Spaces:
Sleeping
Sleeping
jinggujiwoo7
commited on
Commit
โข
41f4425
1
Parent(s):
96ee2ca
Update app.py
Browse files
app.py
CHANGED
@@ -1,77 +1,94 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
#
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
def record_and_submit_voice(student_name, voice):
|
8 |
-
if not student_name:
|
9 |
-
return "Please enter your name."
|
10 |
-
if student_name not in recordings:
|
11 |
-
recordings[student_name] = []
|
12 |
-
recordings[student_name].append({"voice": voice, "comments": []})
|
13 |
-
return f"Voice recorded and submitted successfully by {student_name}!"
|
14 |
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
voices = [rec["voice"] for rec in recordings[selected_student]]
|
24 |
-
comments = "\n".join(
|
25 |
-
[f"{c[0]}: {c[1]}" for rec in recordings[selected_student] for c in rec["comments"]]
|
26 |
-
)
|
27 |
-
return voices[0] if voices else None, comments
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
recordings[selected_student][0]["comments"].append((commenter_name, comment))
|
38 |
-
return f"Comment added successfully by {commenter_name}!"
|
39 |
|
40 |
-
|
41 |
-
with gr.Blocks() as app:
|
42 |
-
with gr.Tab("Record Voice"):
|
43 |
-
student_name_input = gr.Textbox(placeholder="Enter your name", label="Your Name")
|
44 |
-
voice_input = gr.Audio(type="filepath", label="Record your voice")
|
45 |
-
submit_voice_button = gr.Button("Submit Voice")
|
46 |
-
voice_output = gr.Textbox(label="Status")
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
)
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
-
|
77 |
-
app.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import speech_recognition as sr
|
3 |
+
from Levenshtein import ratio
|
4 |
+
import tempfile
|
5 |
+
import numpy as np
|
6 |
+
import soundfile as sf
|
7 |
+
import pandas as pd
|
8 |
+
import language_tool_python
|
9 |
|
10 |
+
# Sample dataframe with sentences ordered from easy to hard
|
11 |
+
data = {
|
12 |
+
"Sentences": [
|
13 |
+
"A stitch in time saves nine.",
|
14 |
+
"To be or not to be, that is the question.",
|
15 |
+
"Five cats were living in safe caves.",
|
16 |
+
"Hives give shelter to bees in large caves.",
|
17 |
+
"His decision to plant a rose was amazing.",
|
18 |
+
"She sells sea shells by the sea shore.",
|
19 |
+
"The colorful parrot likes rolling berries.",
|
20 |
+
"Time flies like an arrow; fruit flies like a banana.",
|
21 |
+
"Good things come to those who wait.",
|
22 |
+
"All human beings are born free and equal in dignity and rights."
|
23 |
+
]
|
24 |
+
}
|
25 |
+
df = pd.DataFrame(data)
|
26 |
|
27 |
+
tool = language_tool_python.LanguageTool('en-US')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
def transcribe_audio(file_info):
|
30 |
+
r = sr.Recognizer()
|
31 |
+
with tempfile.NamedTemporaryFile(delete=True, suffix=".wav") as tmpfile:
|
32 |
+
sf.write(tmpfile.name, data=file_info[1], samplerate=44100, format='WAV')
|
33 |
+
tmpfile.seek(0)
|
34 |
+
with sr.AudioFile(tmpfile.name) as source:
|
35 |
+
audio_data = r.record(source)
|
36 |
+
try:
|
37 |
+
text = r.recognize_google(audio_data)
|
38 |
+
return text
|
39 |
+
except sr.UnknownValueError:
|
40 |
+
return "Could not understand audio"
|
41 |
+
except sr.RequestError as e:
|
42 |
+
return f"Could not request results; {e}"
|
43 |
|
44 |
+
def pronunciation_correction(expected_text, file_info):
|
45 |
+
user_spoken_text = transcribe_audio(file_info)
|
46 |
+
similarity = ratio(expected_text.lower(), user_spoken_text.lower())
|
47 |
+
description = f"{similarity:.2f}"
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
if similarity >= 0.9:
|
50 |
+
feedback = "Excellent pronunciation!"
|
51 |
+
elif similarity >= 0.7:
|
52 |
+
feedback = "Good pronunciation!"
|
53 |
+
elif similarity >= 0.5:
|
54 |
+
feedback = "Needs improvement."
|
55 |
+
else:
|
56 |
+
feedback = "Poor pronunciation, try to focus more on clarity."
|
|
|
|
|
57 |
|
58 |
+
return feedback, description
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
+
def check_grammar(text):
|
61 |
+
matches = tool.check(text)
|
62 |
+
if not matches:
|
63 |
+
return "No grammar issues found."
|
64 |
+
else:
|
65 |
+
return "Grammar issues found: " + "; ".join([f"{match.ruleId}: {match.message}" for match in matches])
|
66 |
|
67 |
+
with gr.Blocks() as app:
|
68 |
+
with gr.Row():
|
69 |
+
sentence_dropdown = gr.Dropdown(choices=df['Sentences'].tolist(), label="Select a Sentence")
|
70 |
+
selected_sentence_output = gr.Textbox(label="Selected Text", interactive=False)
|
71 |
+
|
72 |
+
with gr.Row():
|
73 |
+
user_sentence_input = gr.Textbox(label="Enter Your Sentence")
|
74 |
+
grammar_check_button = gr.Button("Check Grammar")
|
75 |
+
grammar_feedback = gr.Textbox(label="Grammar Feedback", interactive=False)
|
76 |
|
77 |
+
audio_input = gr.Audio(label="Upload Audio File", type="numpy")
|
78 |
+
check_pronunciation_button = gr.Button("Check Pronunciation")
|
79 |
+
pronunciation_feedback = gr.Textbox(label="Pronunciation Feedback")
|
80 |
+
pronunciation_score = gr.Number(label="Pronunciation Accuracy Score: 0 (No Match) ~ 1 (Perfect)")
|
|
|
81 |
|
82 |
+
sentence_dropdown.change(lambda x: x, inputs=sentence_dropdown, outputs=selected_sentence_output)
|
83 |
+
check_pronunciation_button.click(
|
84 |
+
pronunciation_correction,
|
85 |
+
inputs=[sentence_dropdown, audio_input],
|
86 |
+
outputs=[pronunciation_feedback, pronunciation_score]
|
87 |
+
)
|
88 |
+
grammar_check_button.click(
|
89 |
+
check_grammar,
|
90 |
+
inputs=[user_sentence_input],
|
91 |
+
outputs=[grammar_feedback]
|
92 |
+
)
|
93 |
|
94 |
+
app.launch(debug=True)
|
|