allow test_transcribe to run on CPU when CUDA is not available

This commit is contained in:
Jong Wook Kim 2023-01-17 13:43:36 -08:00
parent 493dfffa37
commit b1d213c0c7
2 changed files with 5 additions and 3 deletions

View File

@ -18,7 +18,7 @@ jobs:
pytorch-version: 1.10.2
steps:
- uses: conda-incubator/setup-miniconda@v2
- run: conda install -n test python=${{ matrix.python-version }} pytorch=${{ matrix.pytorch-version }} cpuonly -c pytorch
- run: conda install -n test ffmpeg python=${{ matrix.python-version }} pytorch=${{ matrix.pytorch-version }} cpuonly -c pytorch
- uses: actions/checkout@v2
- run: echo "$CONDA/envs/test/bin" >> $GITHUB_PATH
- run: pip install pytest

View File

@ -1,13 +1,15 @@
import os
import pytest
import torch
import whisper
@pytest.mark.parametrize('model_name', whisper.available_models())
@pytest.mark.parametrize("model_name", whisper.available_models())
def test_transcribe(model_name: str):
model = whisper.load_model(model_name).cuda()
device = "cuda" if torch.cuda.is_available() else "cpu"
model = whisper.load_model(model_name).to(device)
audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac")
language = "en" if model_name.endswith(".en") else None