Manjot Singh commited on
Commit
4a17bd1
β€’
1 Parent(s): c5c5d70

updated requirements, fixed decroators

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +1 -1
  3. audio_processing.py +2 -2
  4. requirements.txt +1 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: πŸ‘€
4
  colorFrom: pink
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: pink
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 4.43.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -10,7 +10,7 @@ if torch.cuda.is_available():
10
  else:
11
  print("No CUDA GPUs available. Running on CPU.")
12
 
13
- @spaces.GPU(duration=180)
14
  def transcribe_audio(audio_file, translate, model_size):
15
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
16
 
 
10
  else:
11
  print("No CUDA GPUs available. Running on CPU.")
12
 
13
+ # @spaces.GPU(duration=180)
14
  def transcribe_audio(audio_file, translate, model_size):
15
  language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
16
 
audio_processing.py CHANGED
@@ -11,7 +11,7 @@ import time
11
  from difflib import SequenceMatcher
12
  hf_token = os.getenv("HF_TOKEN")
13
 
14
- CHUNK_LENGTH=5
15
  OVERLAP=2
16
  import whisperx
17
  import torch
@@ -37,7 +37,7 @@ def process_audio(audio_file, translate=False, model_size="small"):
37
  try:
38
  device = "cuda" if torch.cuda.is_available() else "cpu"
39
  print(f"Using device: {device}")
40
- compute_type = "float16"
41
  audio = whisperx.load_audio(audio_file)
42
  model = whisperx.load_model(model_size, device, compute_type=compute_type)
43
 
 
11
  from difflib import SequenceMatcher
12
  hf_token = os.getenv("HF_TOKEN")
13
 
14
+ CHUNK_LENGTH=10
15
  OVERLAP=2
16
  import whisperx
17
  import torch
 
37
  try:
38
  device = "cuda" if torch.cuda.is_available() else "cpu"
39
  print(f"Using device: {device}")
40
+ compute_type = "float32"
41
  audio = whisperx.load_audio(audio_file)
42
  model = whisperx.load_model(model_size, device, compute_type=compute_type)
43
 
requirements.txt CHANGED
@@ -5,7 +5,7 @@ pandas
5
  pyannote.audio
6
  pyperclip
7
  sentencepiece
8
- gradio
9
  speechbrain==0.5.16
10
  --extra-index-url https://download.pytorch.org/whl/cu113
11
  torch
 
5
  pyannote.audio
6
  pyperclip
7
  sentencepiece
8
+ gradio==4.43.0
9
  speechbrain==0.5.16
10
  --extra-index-url https://download.pytorch.org/whl/cu113
11
  torch