tarrasyed19472007 commited on
Commit
2c8b5b1
·
verified ·
1 Parent(s): 74b741d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -1,18 +1,22 @@
1
  import streamlit as st
2
- import requests
3
- import torch
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  from gtts import gTTS
6
- import os
7
  import tempfile
 
8
  import speech_recognition as sr
9
 
10
  # Set your Hugging Face API key
11
  HUGGING_FACE_API_KEY = "voicebot"
12
 
13
  # Load the model and tokenizer
14
- tokenizer = AutoTokenizer.from_pretrained("declare-lab/tango-full")
15
- model = AutoModelForCausalLM.from_pretrained("declare-lab/tango-full")
 
 
 
 
 
 
16
 
17
  # Function to get a response from the chatbot
18
  def get_response(input_text):
@@ -26,7 +30,7 @@ def text_to_speech(text):
26
  tts = gTTS(text=text, lang='en')
27
  with tempfile.NamedTemporaryFile(delete=True) as fp:
28
  tts.save(f"{fp.name}.mp3")
29
- os.system(f"start {fp.name}.mp3") # For Windows, use 'open' for macOS
30
 
31
  # Speech Recognition Function
32
  def recognize_speech():
@@ -62,6 +66,8 @@ if user_input:
62
  st.write("Chatbot: ", chatbot_response)
63
  text_to_speech(chatbot_response)
64
 
 
 
65
 
66
 
67
 
 
1
  import streamlit as st
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  from gtts import gTTS
 
4
  import tempfile
5
+ import os
6
  import speech_recognition as sr
7
 
8
  # Set your Hugging Face API key
9
  HUGGING_FACE_API_KEY = "voicebot"
10
 
11
  # Load the model and tokenizer
12
+ @st.cache_resource
13
+ def load_model():
14
+ model_name = "declare-lab/tango-full"
15
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
16
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=HUGGING_FACE_API_KEY)
17
+ return model, tokenizer
18
+
19
+ model, tokenizer = load_model()
20
 
21
  # Function to get a response from the chatbot
22
  def get_response(input_text):
 
30
  tts = gTTS(text=text, lang='en')
31
  with tempfile.NamedTemporaryFile(delete=True) as fp:
32
  tts.save(f"{fp.name}.mp3")
33
+ os.system(f"start {fp.name}.mp3") # Adjust command based on OS
34
 
35
  # Speech Recognition Function
36
  def recognize_speech():
 
66
  st.write("Chatbot: ", chatbot_response)
67
  text_to_speech(chatbot_response)
68
 
69
+ text_to_speech(chatbot_response)
70
+
71
 
72
 
73