diff --git a/whisper/transcribe.py b/whisper/transcribe.py index 1c075a2..f46b868 100644 --- a/whisper/transcribe.py +++ b/whisper/transcribe.py @@ -504,14 +504,14 @@ def cli(): def valid_model_name(name): if name in available_models() or os.path.exists(name): return name - raise ValueError( + raise argparse.ArgumentTypeError( f"model should be one of {available_models()} or path to a model checkpoint" ) # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe") - parser.add_argument("--model", default="small", type=valid_model_name, help="name of the Whisper model to use") + parser.add_argument("--model", default="small", type=valid_model_name, help=f"name of the Whisper model to use. Available models are: {', '.join(available_models())}. You can also specify a path to a model checkpoint.") parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default") parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference") parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs")