linshoufan commited on
Commit
3de2b8e
1 Parent(s): 18a79ef

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +74 -0
  2. requirements.txt +11 -0
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from pytube import YouTube
4
+ from transformers import pipeline
5
+
6
+ MODEL_NAME = "linshoufan/linshoufan-whisper-small-nan-tw-pinyin-several-datasets"
7
+ lang = "chinese"
8
+
9
+ # 根據是否有可用的 CUDA 設備來選擇設備
10
+ device = 0 if torch.cuda.is_available() else "mps"
11
+
12
+ # 初始化 pipeline,指定任務、模型和設備
13
+ pipe = pipeline(
14
+ task="automatic-speech-recognition",
15
+ chunk_length_s=15,
16
+ model=MODEL_NAME,
17
+ device=device,
18
+ )
19
+
20
+ # 設置模型的語言和任務
21
+ pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
22
+
23
+ # 定義轉錄功能
24
+ def transcribe(microphone=None, file_upload=None):
25
+ warn_output = ""
26
+ if microphone is not None and file_upload is not None:
27
+ warn_output = "警告:您同時使用了麥克風與上傳音訊檔案,將只會使用麥克風錄製的檔案。\n"
28
+ elif microphone is None and file_upload is None:
29
+ return "錯誤:您必須至少使用麥克風或上傳一個音頻檔案。"
30
+
31
+ file = microphone if microphone is not None else file_upload
32
+ text = pipe(file)["text"]
33
+ return warn_output + text
34
+
35
+ # 定義 YouTube 轉寫功能
36
+ def yt_transcribe(yt_url):
37
+ yt = YouTube(yt_url)
38
+ stream = yt.streams.filter(only_audio=True).first()
39
+ stream.download(filename="audio.mp3")
40
+ text = pipe("audio.mp3")["text"]
41
+ # 嵌入 YouTube 影片
42
+ video_id = yt_url.split("?v=")[-1]
43
+ html_embed = f'<center><iframe width="500" height="320" src="https://www.youtube.com/embed/{video_id}"></iframe></center>'
44
+
45
+ return html_embed, text
46
+
47
+ # 初始化 Gradio Blocks
48
+ demo = gr.Blocks()
49
+
50
+ # 定義兩個介面
51
+ mf_transcribe = gr.Interface(
52
+ fn=transcribe,
53
+ inputs=gr.Audio(label="audio",type="filepath"),
54
+ outputs="text",
55
+ title="Whisper 演示: 語音轉錄",
56
+ description=f"演示使用 fine-tuned checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME} 以及 🤗 Transformers 轉錄任意長度的音訊檔案",
57
+ allow_flagging="manual",
58
+ )
59
+
60
+ yt_transcribe = gr.Interface(
61
+ fn=yt_transcribe,
62
+ inputs=[gr.Textbox(lines=1, placeholder="在此處貼上 YouTube 影片的 URL", label="YouTube URL")],
63
+ outputs=["html", "text"],
64
+ title="Whisper 演示: Youtube轉錄",
65
+ description=f"演示使用 fine-tuned checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME} 以及 🤗 Transformers 轉錄任意長度的Youtube影片",
66
+ allow_flagging="manual",
67
+ )
68
+
69
+ # 將兩個介面加入到標籤介面中
70
+ with demo:
71
+ gr.TabbedInterface([mf_transcribe, yt_transcribe], ["語音轉錄", "Youtube轉錄"])
72
+
73
+ # 啟動並分享 Gradio 介面
74
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.27.2
2
+ datasets==2.18.0
3
+ evaluate==0.4.1
4
+ gradio==4.20.1
5
+ librosa==0.10.1
6
+ pytube==15.0.0
7
+ soundfile==0.12.1
8
+ tensorboard==2.16.2
9
+ transformers==4.38.2
10
+ torch==2.2.1
11
+ jiwer==3.0.3