mirror of
https://github.com/openai/whisper.git
synced 2025-11-24 14:35:57 +00:00
Merge 51c785f7c91b8c032a1fa79c0e8f862dea81b860 into c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
This commit is contained in:
commit
9bcd2ae22a
@ -128,7 +128,12 @@ def load_model(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if device is None:
|
if device is None:
|
||||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
if torch.cuda.is_available():
|
||||||
|
device = "cuda"
|
||||||
|
elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||||
|
device = "mps"
|
||||||
|
else:
|
||||||
|
device = "cpu"
|
||||||
if download_root is None:
|
if download_root is None:
|
||||||
default = os.path.join(os.path.expanduser("~"), ".cache")
|
default = os.path.join(os.path.expanduser("~"), ".cache")
|
||||||
download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")
|
download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper")
|
||||||
|
|||||||
@ -128,6 +128,8 @@ def transcribe(
|
|||||||
if model.device == torch.device("cpu"):
|
if model.device == torch.device("cpu"):
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
warnings.warn("Performing inference on CPU when CUDA is available")
|
warnings.warn("Performing inference on CPU when CUDA is available")
|
||||||
|
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
|
||||||
|
warnings.warn("Performing inference on CPU when MPS is available")
|
||||||
if dtype == torch.float16:
|
if dtype == torch.float16:
|
||||||
warnings.warn("FP16 is not supported on CPU; using FP32 instead")
|
warnings.warn("FP16 is not supported on CPU; using FP32 instead")
|
||||||
dtype = torch.float32
|
dtype = torch.float32
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user