KaiChen1998 commited on
Commit
91deaa2
·
1 Parent(s): 7925e97

change time limit

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -56,11 +56,11 @@ asr_format = "Please recognize the text corresponding to the follwing speech.\n"
56
  tts_format = "Please synthesize the speech corresponding to the follwing text.\n"
57
  chat_format = r'Please recognize the texts, emotion and pitch from the user question speech units and provide the texts, emotion, pitch and speech units for the assistant response. \nEmotion should be chosen from ["neutral", "happy", "sad", "angry", "surprised", "disgusted", "fearful"]. \nPitch should be chosen from ["low", "normal", "high"].\nYour output should be in json format.\nAn output example is:\n{"user question text": "", "user question emotion": "", "user question pitch": "", "assistant response text": "", "assistant response emotion": "", "assistant response pitch": "","assistant response speech": ""}\n\nuser question speech:'
58
 
59
- @spaces.GPU(duration=15)
60
  def s2u_asr(text, audio_file):
61
  return asr_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
62
 
63
- @spaces.GPU(duration=15)
64
  def s2u_chat(text, audio_file):
65
  return chat_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
66
 
@@ -192,7 +192,7 @@ def add_text(state, text, image, image_process_mode, audio_input, audio_mode):
192
  # Input: [state, temperature, top_p, max_output_tokens, speaker]
193
  # Return: [state, chatbot] + btn_list
194
  ############
195
- @spaces.GPU(duration=90)
196
  def http_bot(state, temperature, top_p, max_new_tokens, speaker):
197
  logging.info(f"http_bot.")
198
 
 
56
  tts_format = "Please synthesize the speech corresponding to the follwing text.\n"
57
  chat_format = r'Please recognize the texts, emotion and pitch from the user question speech units and provide the texts, emotion, pitch and speech units for the assistant response. \nEmotion should be chosen from ["neutral", "happy", "sad", "angry", "surprised", "disgusted", "fearful"]. \nPitch should be chosen from ["low", "normal", "high"].\nYour output should be in json format.\nAn output example is:\n{"user question text": "", "user question emotion": "", "user question pitch": "", "assistant response text": "", "assistant response emotion": "", "assistant response pitch": "","assistant response speech": ""}\n\nuser question speech:'
58
 
59
+ @spaces.GPU(duration=10)
60
  def s2u_asr(text, audio_file):
61
  return asr_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
62
 
63
+ @spaces.GPU(duration=10)
64
  def s2u_chat(text, audio_file):
65
  return chat_format + s2u_extract_unit_demo(s2u_model, audio_file, model_name=s2u_model_name, reduced=reduced)
66
 
 
192
  # Input: [state, temperature, top_p, max_output_tokens, speaker]
193
  # Return: [state, chatbot] + btn_list
194
  ############
195
+ @spaces.GPU
196
  def http_bot(state, temperature, top_p, max_new_tokens, speaker):
197
  logging.info(f"http_bot.")
198