hasattr check for torch.backends.mps

This commit is contained in:
Jong Wook Kim 2023-01-18 14:39:07 -08:00 committed by GitHub
parent 2c914999bd
commit 4b77a81c1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -75,7 +75,7 @@ def transcribe(
if model.device == torch.device("cpu"): if model.device == torch.device("cpu"):
if torch.cuda.is_available(): if torch.cuda.is_available():
warnings.warn("Performing inference on CPU when CUDA is available") warnings.warn("Performing inference on CPU when CUDA is available")
if torch.backends.mps.is_available(): if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
warnings.warn("Performing inference on CPU when MPS is available") warnings.warn("Performing inference on CPU when MPS is available")
if dtype == torch.float16: if dtype == torch.float16:
warnings.warn("FP16 is not supported on CPU; using FP32 instead") warnings.warn("FP16 is not supported on CPU; using FP32 instead")