From 4b77a81c1fa29fcbcf470d16a4fdd7d09cb0fd3a Mon Sep 17 00:00:00 2001 From: Jong Wook Kim Date: Wed, 18 Jan 2023 14:39:07 -0800 Subject: [PATCH] hasattr check for torch.backends.mps --- whisper/transcribe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/whisper/transcribe.py b/whisper/transcribe.py index 6b5993f..0d89a5d 100644 --- a/whisper/transcribe.py +++ b/whisper/transcribe.py @@ -75,7 +75,7 @@ def transcribe( if model.device == torch.device("cpu"): if torch.cuda.is_available(): warnings.warn("Performing inference on CPU when CUDA is available") - if torch.backends.mps.is_available(): + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): warnings.warn("Performing inference on CPU when MPS is available") if dtype == torch.float16: warnings.warn("FP16 is not supported on CPU; using FP32 instead")