Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -15,16 +15,12 @@ tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan")
15
  talking_face = gr.Blocks.load(name="spaces/gradio-tests/one-shot-talking-face", api_key=token)
16
 
17
  def infer(audio, openai_api_key):
18
-
19
  whisper_result = whisper(audio, None, "translate", fn_index=0)
20
-
21
  gpt_response = try_api(whisper_result, openai_api_key)
22
-
23
  audio_response = tts(gpt_response[0], "English Text", "English Accent", "English Speaker's Voice", fn_index=0)
24
-
25
  portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
26
 
27
- return gr.Textbox.update(value=whisper_result, visible=True), portrait_link, gr.Textbox.update(value=gpt_response[1], visible=True), gr.Group.update(visible=True), gr.Button.update(visible=True)
28
 
29
  def try_api(message, openai_api_key):
30
 
 
15
  talking_face = gr.Blocks.load(name="spaces/gradio-tests/one-shot-talking-face", api_key=token)
16
 
17
  def infer(audio, openai_api_key):
 
18
  whisper_result = whisper(audio, None, "translate", fn_index=0)
 
19
  gpt_response = try_api(whisper_result, openai_api_key)
 
20
  audio_response = tts(gpt_response[0], "English Text", "English Accent", "English Speaker's Voice", fn_index=0)
 
21
  portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
22
 
23
+ return whisper_result, portrait_link, gpt_response[1], True, True
24
 
25
  def try_api(message, openai_api_key):
26