From 6c5c3b3d562ffc2ef042735ff9ab5f33c4c992f7 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 15 Nov 2025 23:48:38 +0000 Subject: [PATCH] Fix critical code quality issues: remove unused imports, fix f-strings and whitespace Co-authored-by: ariavn-byte <151469489+ariavn-byte@users.noreply.github.com> --- farsi_transcriber/config.py | 1 - farsi_transcriber/models/whisper_transcriber.py | 3 +-- whisper/transcribe.py | 6 +++--- whisper/utils.py | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/farsi_transcriber/config.py b/farsi_transcriber/config.py index d5bb631..306adc7 100644 --- a/farsi_transcriber/config.py +++ b/farsi_transcriber/config.py @@ -4,7 +4,6 @@ Configuration settings for Farsi Transcriber application Manages model selection, device settings, and other configuration options. """ -import os from pathlib import Path # Application metadata diff --git a/farsi_transcriber/models/whisper_transcriber.py b/farsi_transcriber/models/whisper_transcriber.py index 8310cca..b7c6b37 100644 --- a/farsi_transcriber/models/whisper_transcriber.py +++ b/farsi_transcriber/models/whisper_transcriber.py @@ -4,7 +4,6 @@ Whisper Transcriber Module Handles Farsi audio/video transcription using OpenAI's Whisper model. """ -import os import warnings from pathlib import Path from typing import Dict, List, Optional @@ -53,7 +52,7 @@ class FarsiTranscriber: warnings.simplefilter("ignore") self.model = whisper.load_model(model_name, device=self.device) - print(f"Model loaded successfully") + print("Model loaded successfully") def transcribe( self, diff --git a/whisper/transcribe.py b/whisper/transcribe.py index 0a4cc36..8eb1711 100644 --- a/whisper/transcribe.py +++ b/whisper/transcribe.py @@ -281,7 +281,7 @@ def transcribe( time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE) window_end_time = float((seek + N_FRAMES) * HOP_LENGTH / SAMPLE_RATE) segment_size = min(N_FRAMES, content_frames - seek, seek_clip_end - seek) - mel_segment = mel[:, seek : seek + segment_size] + mel_segment = mel[:, seek:seek + segment_size] segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype) @@ -444,7 +444,7 @@ def transcribe( continue if is_segment_anomaly(segment): next_segment = next_words_segment( - current_segments[si + 1 :] + current_segments[si + 1:] ) if next_segment is not None: hal_next_start = next_segment["words"][0]["start"] @@ -508,7 +508,7 @@ def transcribe( pbar.update(min(content_frames, seek) - previous_seek) return dict( - text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]), + text=tokenizer.decode(all_tokens[len(initial_prompt_tokens):]), segments=all_segments, language=language, ) diff --git a/whisper/utils.py b/whisper/utils.py index 13792f7..6ca958e 100644 --- a/whisper/utils.py +++ b/whisper/utils.py @@ -153,7 +153,7 @@ class SubtitlesWriter(ResultWriter): if max_words_per_line > len(segment["words"]) - chunk_index: words_count = remaining_words for i, original_timing in enumerate( - segment["words"][chunk_index : chunk_index + words_count] + segment["words"][chunk_index:chunk_index + words_count] ): timing = original_timing.copy() long_pause = (