Spaces:
Runtime error
Runtime error
Commit
·
91e84de
1
Parent(s):
27a242f
Include option to transcribe and use same language as spoken
Browse files
app.py
CHANGED
|
@@ -8,7 +8,7 @@ import torch
|
|
| 8 |
|
| 9 |
session_token = os.environ.get('SessionToken')
|
| 10 |
# logger.info(f"session_token_: {session_token}")
|
| 11 |
-
|
| 12 |
|
| 13 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 14 |
|
|
@@ -23,22 +23,21 @@ all_special_ids = whisper_model.tokenizer.all_special_ids
|
|
| 23 |
transcribe_token_id = all_special_ids[-5]
|
| 24 |
translate_token_id = all_special_ids[-6]
|
| 25 |
|
| 26 |
-
def
|
| 27 |
-
task = "translate"
|
| 28 |
whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
|
| 29 |
text = whisper_model(audio)["text"]
|
| 30 |
return text
|
| 31 |
|
| 32 |
def get_response_from_chatbot(text):
|
| 33 |
try:
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
resp = api.send_message(text)
|
| 36 |
-
api.refresh_auth()
|
| 37 |
-
api.reset_conversation()
|
| 38 |
response = resp['message']
|
| 39 |
# logger.info(f"response_: {response}")
|
| 40 |
except:
|
| 41 |
-
response = "Sorry,
|
| 42 |
return response
|
| 43 |
|
| 44 |
def chat(message, chat_history):
|
|
@@ -162,8 +161,10 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
| 162 |
)
|
| 163 |
translate_btn = gr.Button("Check Whisper first ? 👍")
|
| 164 |
|
|
|
|
|
|
|
| 165 |
with gr.Row(elem_id="prompt_row"):
|
| 166 |
-
prompt_input = gr.Textbox(lines=2, label="Input text
|
| 167 |
chat_history = gr.Textbox(lines=4, label="prompt", visible=False)
|
| 168 |
submit_btn = gr.Button(value = "Send to chatGPT",elem_id="submit-btn").style(
|
| 169 |
margin=True,
|
|
@@ -173,8 +174,8 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
| 173 |
|
| 174 |
|
| 175 |
|
| 176 |
-
translate_btn.click(fn=
|
| 177 |
-
inputs=prompt_input_audio,
|
| 178 |
outputs=prompt_input
|
| 179 |
)
|
| 180 |
|
|
|
|
| 8 |
|
| 9 |
session_token = os.environ.get('SessionToken')
|
| 10 |
# logger.info(f"session_token_: {session_token}")
|
| 11 |
+
api = ChatGPT(session_token)
|
| 12 |
|
| 13 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 14 |
|
|
|
|
| 23 |
transcribe_token_id = all_special_ids[-5]
|
| 24 |
translate_token_id = all_special_ids[-6]
|
| 25 |
|
| 26 |
+
def translate_or_transcribe(audio, task):
|
|
|
|
| 27 |
whisper_model.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]]
|
| 28 |
text = whisper_model(audio)["text"]
|
| 29 |
return text
|
| 30 |
|
| 31 |
def get_response_from_chatbot(text):
|
| 32 |
try:
|
| 33 |
+
if reset_conversation:
|
| 34 |
+
api.refresh_auth()
|
| 35 |
+
api.reset_conversation()
|
| 36 |
resp = api.send_message(text)
|
|
|
|
|
|
|
| 37 |
response = resp['message']
|
| 38 |
# logger.info(f"response_: {response}")
|
| 39 |
except:
|
| 40 |
+
response = "Sorry, chatGPT queue is full. Please try again in some time"
|
| 41 |
return response
|
| 42 |
|
| 43 |
def chat(message, chat_history):
|
|
|
|
| 161 |
)
|
| 162 |
translate_btn = gr.Button("Check Whisper first ? 👍")
|
| 163 |
|
| 164 |
+
reset_conversation = gr.Checkbox(label="Reset conversation?", value=False)
|
| 165 |
+
whisper_task = gr.Radio(["Translate to English", "Transcribe in Spoken Language"], value="Translate to English", show_label=False)
|
| 166 |
with gr.Row(elem_id="prompt_row"):
|
| 167 |
+
prompt_input = gr.Textbox(lines=2, label="Input text",show_label=True)
|
| 168 |
chat_history = gr.Textbox(lines=4, label="prompt", visible=False)
|
| 169 |
submit_btn = gr.Button(value = "Send to chatGPT",elem_id="submit-btn").style(
|
| 170 |
margin=True,
|
|
|
|
| 174 |
|
| 175 |
|
| 176 |
|
| 177 |
+
translate_btn.click(fn=translate_or_transcribe,
|
| 178 |
+
inputs=[prompt_input_audio,whisper_task],
|
| 179 |
outputs=prompt_input
|
| 180 |
)
|
| 181 |
|