amir22010 commited on
Commit
4abf19b
·
1 Parent(s): fa61cdd

removed audio

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -95,8 +95,8 @@ async def greet(product,description):
95
  warning_message = chat_completion.choices[0].message.content
96
  if warning_message != 'safe':
97
  chat = [chat_completion.choices[0].message.content]
98
- processed_audio = combine_audio_files(text_to_speech([chat_completion.choices[0].message.content]))
99
- yield processed_audio, chat[0]
100
  else:
101
  output = llm.create_chat_completion(
102
  messages=[
@@ -115,11 +115,11 @@ async def greet(product,description):
115
  for chunk in output:
116
  delta = chunk['choices'][0]['delta']
117
  if 'content' in delta:
118
- audio_list.append([text_to_speech(delta.get('content', ''))])
119
- processed_audio = combine_audio_files(audio_list)
120
  partial_message = partial_message + delta.get('content', '')
121
- yield processed_audio, partial_message
122
 
123
  audio = gr.Audio()
124
- demo = gr.Interface(fn=greet, inputs=["text","text"], concurrency_limit=10, outputs=[audio, "text"])
125
  demo.launch()
 
95
  warning_message = chat_completion.choices[0].message.content
96
  if warning_message != 'safe':
97
  chat = [chat_completion.choices[0].message.content]
98
+ #processed_audio = combine_audio_files(text_to_speech([chat_completion.choices[0].message.content]))
99
+ yield chat[0]
100
  else:
101
  output = llm.create_chat_completion(
102
  messages=[
 
115
  for chunk in output:
116
  delta = chunk['choices'][0]['delta']
117
  if 'content' in delta:
118
+ # audio_list.append([text_to_speech(delta.get('content', ''))])
119
+ # processed_audio = combine_audio_files(audio_list)
120
  partial_message = partial_message + delta.get('content', '')
121
+ yield partial_message
122
 
123
  audio = gr.Audio()
124
+ demo = gr.Interface(fn=greet, inputs=["text","text"], concurrency_limit=10, outputs=["text"])
125
  demo.launch()