-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
117 lines (87 loc) · 3.06 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import os
import gradio as gr
from api import API
from fetch import Fetch
api = API()
fetch = Fetch()
question, title, question_body, question_code_stub = None, None, None, None
def handle_first_submission(option):
global question, title, question_body, question_code_stub
question_title = option.replace(" ", "-").lower()
question = fetch.get_question(question_title)
title = question[0]
question_body = question[1]
question_code_stub = question[2]
return f"{title}\n{question_body}"
def handle_second_submission(audio):
global question, title, question_body, question_code_stub
transcript = api.listen(audio, gr=True)
codegen = api.thinking_to_code(transcript, question_code_stub)
solution = fetch.get_solution(title)
compare_result = api.compare_code(solution, codegen, transcript, question_body)
if compare_result == "Yes":
title = title.replace(" ", "-").lower()
feedback = "OK, that approach sounds good. Now, get ready to code it out."
else:
feedback = api.evaluate_thinking(solution, codegen, transcript, question_body)
api.speak(feedback)
if compare_result == "Yes":
feedback += f" http://leetcode.com/problems/{title}/description/"
return transcript, feedback, os.path.abspath("openai_output.wav")
with gr.Blocks() as demo:
gr.Markdown("# Welcome to DaVinci Solve!")
text_input = gr.Textbox(
label="Enter a Leetcode Problem you wish to practice",
placeholder="Type a Leetcode problem name...",
lines=1,
)
prob_text = gr.Textbox(label="Problem Statement")
submit_button_1 = gr.Button("Submit Problem")
dynamic_audio = gr.Audio(
type="filepath",
label="Record Your Audio",
sources=["microphone"],
visible=False,
)
def display_audio():
return (
gr.update(visible=True),
gr.update(visible=True),
gr.update(visible=True),
gr.update(visible=True),
gr.update(visible=False, autoplay=False),
)
submit_button_2 = gr.Button("Submit Explanation", visible=False)
transcript = gr.Textbox(label="Transcript", visible=False)
feedback = gr.Textbox(label="Feedback", visible=False)
audio_output = gr.Audio(
value=os.path.abspath("openai_output.wav"),
type="filepath",
autoplay=False,
visible=False,
)
submit_button_1.click(
handle_first_submission,
inputs=[text_input],
outputs=[prob_text],
).then(
display_audio,
inputs=[],
outputs=[dynamic_audio, submit_button_2, transcript, feedback, audio_output],
)
def audio_play():
return gr.update(
value=os.path.abspath("openai_output.wav"),
visible=True,
autoplay=True,
)
submit_button_2.click(
handle_second_submission,
inputs=[dynamic_audio],
outputs=[transcript, feedback, audio_output],
).then(
audio_play,
inputs=[],
outputs=[audio_output],
)
demo.launch(share=True)