Merge 51c785f7c91b8c032a1fa79c0e8f862dea81b860 into c0d2f624c09dc18e709e37c2ad90c039a4eb72a2

This commit is contained in:
Dwarkesh Patel 2025-06-26 11:38:12 +09:00 committed by GitHub
commit 9bcd2ae22a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 8 additions and 1 deletions

View File

@ -128,7 +128,12 @@ def load_model(
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if torch.cuda.is_available():
device = "cuda"
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
if download_root is None:
default = os.path.join(os.path.expanduser("~"), ".cache")
download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")

View File

@ -128,6 +128,8 @@ def transcribe(
if model.device == torch.device("cpu"):
if torch.cuda.is_available():
warnings.warn("Performing inference on CPU when CUDA is available")
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
warnings.warn("Performing inference on CPU when MPS is available")
if dtype == torch.float16:
warnings.warn("FP16 is not supported on CPU; using FP32 instead")
dtype = torch.float32