Spaces:
Paused
Paused
Commit
·
c8796f6
1
Parent(s):
f5d5366
Show/save transcipt
Browse files- discourse.py +10 -4
- gradio-ui.py +6 -3
- memory.py +0 -4
discourse.py
CHANGED
@@ -11,12 +11,16 @@ speech_config = speechsdk.SpeechConfig(subscription=os.environ['AZURE_SPEECH_KEY
|
|
11 |
speech_config.speech_synthesis_voice_name = "nl-NL-ColetteNeural"
|
12 |
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
|
13 |
|
|
|
|
|
|
|
|
|
14 |
def transcribe(model: str, audio: str):
|
15 |
audio_file = open(audio, "rb")
|
16 |
transcript = openai.Audio.transcribe(model, audio_file, language=LANGUAGE)
|
17 |
return transcript
|
18 |
|
19 |
-
def gen_response(model: str
|
20 |
response = openai.ChatCompletion.create(model=model, messages=context)
|
21 |
return response["choices"][0]["message"]
|
22 |
|
@@ -25,7 +29,7 @@ def gen_voice(response, response_filename):
|
|
25 |
stream = speechsdk.AudioDataStream(reponse_audio)
|
26 |
stream.save_to_wav_file(response_filename)
|
27 |
|
28 |
-
def respond(audio:str
|
29 |
transcript = transcribe("whisper-1", audio)
|
30 |
context.append({"role": "user", "content": transcript['text']})
|
31 |
|
@@ -36,8 +40,10 @@ def respond(audio:str, context: list):
|
|
36 |
|
37 |
return AUDIO_FILE_NAME
|
38 |
|
39 |
-
def transcript(
|
40 |
transcript = ""
|
41 |
for m in context:
|
42 |
if m["role"] != "system":
|
43 |
-
transcript += m["role"] + " : " + m["content"] + "\n\n"
|
|
|
|
|
|
11 |
speech_config.speech_synthesis_voice_name = "nl-NL-ColetteNeural"
|
12 |
speech_synthesizer = speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=None)
|
13 |
|
14 |
+
context = [{"role": "system", "content": 'Je bent een slimme en behulpzame gesprekspartner. \
|
15 |
+
Antwoord beknopt en ter zake.\
|
16 |
+
Vermeld niet dat je een AI of een soort service bent.'}]
|
17 |
+
|
18 |
def transcribe(model: str, audio: str):
|
19 |
audio_file = open(audio, "rb")
|
20 |
transcript = openai.Audio.transcribe(model, audio_file, language=LANGUAGE)
|
21 |
return transcript
|
22 |
|
23 |
+
def gen_response(model: str):
|
24 |
response = openai.ChatCompletion.create(model=model, messages=context)
|
25 |
return response["choices"][0]["message"]
|
26 |
|
|
|
29 |
stream = speechsdk.AudioDataStream(reponse_audio)
|
30 |
stream.save_to_wav_file(response_filename)
|
31 |
|
32 |
+
def respond(audio:str):
|
33 |
transcript = transcribe("whisper-1", audio)
|
34 |
context.append({"role": "user", "content": transcript['text']})
|
35 |
|
|
|
40 |
|
41 |
return AUDIO_FILE_NAME
|
42 |
|
43 |
+
def transcript():
|
44 |
transcript = ""
|
45 |
for m in context:
|
46 |
if m["role"] != "system":
|
47 |
+
transcript += m["role"] + " : " + m["content"] + "\n\n"
|
48 |
+
|
49 |
+
return transcript
|
gradio-ui.py
CHANGED
@@ -12,16 +12,19 @@ with gr.Blocks(theme=theme) as ui:
|
|
12 |
with gr.Column(scale=1):
|
13 |
message = gr.Audio(source="microphone", type="filepath")
|
14 |
with gr.Row():
|
15 |
-
btn1 = gr.Button("
|
16 |
with gr.Row():
|
17 |
with gr.Column(scale=1):
|
18 |
audio_response = gr.Audio()
|
19 |
with gr.Row():
|
20 |
text_response = gr.Textbox(label="Transcript", max_lines=10)
|
21 |
with gr.Row():
|
22 |
-
|
|
|
|
|
23 |
|
24 |
-
btn1.click(fn=d.respond, inputs=
|
25 |
btn2.click(fn=m.save_as_hf_dataset)
|
|
|
26 |
|
27 |
ui.launch()
|
|
|
12 |
with gr.Column(scale=1):
|
13 |
message = gr.Audio(source="microphone", type="filepath")
|
14 |
with gr.Row():
|
15 |
+
btn1 = gr.Button("Generate Reponse")
|
16 |
with gr.Row():
|
17 |
with gr.Column(scale=1):
|
18 |
audio_response = gr.Audio()
|
19 |
with gr.Row():
|
20 |
text_response = gr.Textbox(label="Transcript", max_lines=10)
|
21 |
with gr.Row():
|
22 |
+
btn3 = gr.Button("Show Transcript")
|
23 |
+
with gr.Row():
|
24 |
+
btn2 = gr.Button("Save Transcript")
|
25 |
|
26 |
+
btn1.click(fn=d.respond, inputs=message, outputs=audio_response)
|
27 |
btn2.click(fn=m.save_as_hf_dataset)
|
28 |
+
btn3.click(fn=d.transcript, output=text_response)
|
29 |
|
30 |
ui.launch()
|
memory.py
CHANGED
@@ -11,10 +11,6 @@ repo = Repository(
|
|
11 |
local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
12 |
)
|
13 |
|
14 |
-
context = [{"role": "system", "content": 'Je bent een slimme en behulpzame gesprekspartner. \
|
15 |
-
Antwoord beknopt en ter zake.\
|
16 |
-
Vermeld niet dat je een AI of een soort service bent.'}]
|
17 |
-
|
18 |
def save_as_hf_dataset():
|
19 |
with open(DATA_FILE, "a") as csvfile:
|
20 |
for message in context:
|
|
|
11 |
local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
|
12 |
)
|
13 |
|
|
|
|
|
|
|
|
|
14 |
def save_as_hf_dataset():
|
15 |
with open(DATA_FILE, "a") as csvfile:
|
16 |
for message in context:
|