mirror of
https://github.com/openai/whisper.git
synced 2025-11-24 06:26:03 +00:00
Fix critical code quality issues: remove unused imports, fix f-strings and whitespace
Co-authored-by: ariavn-byte <151469489+ariavn-byte@users.noreply.github.com>
This commit is contained in:
parent
6217f530f5
commit
6c5c3b3d56
@ -4,7 +4,6 @@ Configuration settings for Farsi Transcriber application
|
|||||||
Manages model selection, device settings, and other configuration options.
|
Manages model selection, device settings, and other configuration options.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
# Application metadata
|
# Application metadata
|
||||||
|
|||||||
@ -4,7 +4,6 @@ Whisper Transcriber Module
|
|||||||
Handles Farsi audio/video transcription using OpenAI's Whisper model.
|
Handles Farsi audio/video transcription using OpenAI's Whisper model.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import os
|
|
||||||
import warnings
|
import warnings
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Optional
|
from typing import Dict, List, Optional
|
||||||
@ -53,7 +52,7 @@ class FarsiTranscriber:
|
|||||||
warnings.simplefilter("ignore")
|
warnings.simplefilter("ignore")
|
||||||
self.model = whisper.load_model(model_name, device=self.device)
|
self.model = whisper.load_model(model_name, device=self.device)
|
||||||
|
|
||||||
print(f"Model loaded successfully")
|
print("Model loaded successfully")
|
||||||
|
|
||||||
def transcribe(
|
def transcribe(
|
||||||
self,
|
self,
|
||||||
|
|||||||
@ -281,7 +281,7 @@ def transcribe(
|
|||||||
time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
|
time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
|
||||||
window_end_time = float((seek + N_FRAMES) * HOP_LENGTH / SAMPLE_RATE)
|
window_end_time = float((seek + N_FRAMES) * HOP_LENGTH / SAMPLE_RATE)
|
||||||
segment_size = min(N_FRAMES, content_frames - seek, seek_clip_end - seek)
|
segment_size = min(N_FRAMES, content_frames - seek, seek_clip_end - seek)
|
||||||
mel_segment = mel[:, seek : seek + segment_size]
|
mel_segment = mel[:, seek:seek + segment_size]
|
||||||
segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE
|
segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE
|
||||||
mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype)
|
mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype)
|
||||||
|
|
||||||
@ -444,7 +444,7 @@ def transcribe(
|
|||||||
continue
|
continue
|
||||||
if is_segment_anomaly(segment):
|
if is_segment_anomaly(segment):
|
||||||
next_segment = next_words_segment(
|
next_segment = next_words_segment(
|
||||||
current_segments[si + 1 :]
|
current_segments[si + 1:]
|
||||||
)
|
)
|
||||||
if next_segment is not None:
|
if next_segment is not None:
|
||||||
hal_next_start = next_segment["words"][0]["start"]
|
hal_next_start = next_segment["words"][0]["start"]
|
||||||
@ -508,7 +508,7 @@ def transcribe(
|
|||||||
pbar.update(min(content_frames, seek) - previous_seek)
|
pbar.update(min(content_frames, seek) - previous_seek)
|
||||||
|
|
||||||
return dict(
|
return dict(
|
||||||
text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]),
|
text=tokenizer.decode(all_tokens[len(initial_prompt_tokens):]),
|
||||||
segments=all_segments,
|
segments=all_segments,
|
||||||
language=language,
|
language=language,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -153,7 +153,7 @@ class SubtitlesWriter(ResultWriter):
|
|||||||
if max_words_per_line > len(segment["words"]) - chunk_index:
|
if max_words_per_line > len(segment["words"]) - chunk_index:
|
||||||
words_count = remaining_words
|
words_count = remaining_words
|
||||||
for i, original_timing in enumerate(
|
for i, original_timing in enumerate(
|
||||||
segment["words"][chunk_index : chunk_index + words_count]
|
segment["words"][chunk_index:chunk_index + words_count]
|
||||||
):
|
):
|
||||||
timing = original_timing.copy()
|
timing = original_timing.copy()
|
||||||
long_pause = (
|
long_pause = (
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user