arpita-23 commited on
Commit
189677e
Β·
verified Β·
1 Parent(s): e2fe39b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -5
app.py CHANGED
@@ -6,6 +6,7 @@ from langchain_chroma import Chroma
6
  from langchain.memory import ConversationBufferMemory
7
  from langchain.chains import ConversationalRetrievalChain
8
  from vectorize_documents import embeddings # Import embeddings from the vectorization script
 
9
 
10
  # Set up working directory and API configuration
11
  working_dir = os.path.dirname(os.path.abspath(__file__))
@@ -46,21 +47,86 @@ def chat_chain(vectorstore):
46
  )
47
  return chain
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  # Streamlit UI
50
- st.title("Bhagavad Gita & Yoga Sutras Query Assistant")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  vectorstore = setup_vectorstore()
53
  chain = chat_chain(vectorstore)
54
 
55
- # User input
56
- user_query = st.text_input("Ask a question about the Bhagavad Gita or Yoga Sutras:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  if user_query:
58
  # Use `__call__` to get all outputs as a dictionary
59
  response = chain({"question": user_query})
60
  answer = response.get("answer", "No answer found.")
61
  source_documents = response.get("source_documents", [])
62
 
63
- st.write(f"**Answer:** {answer}")
64
- st.write(f"**Source Documents:**")
 
65
  for doc in source_documents:
66
  st.write(doc)
 
6
  from langchain.memory import ConversationBufferMemory
7
  from langchain.chains import ConversationalRetrievalChain
8
  from vectorize_documents import embeddings # Import embeddings from the vectorization script
9
+ import speech_recognition as sr # For voice recognition
10
 
11
  # Set up working directory and API configuration
12
  working_dir = os.path.dirname(os.path.abspath(__file__))
 
47
  )
48
  return chain
49
 
50
+ def transcribe_audio(selected_language):
51
+ """Function to capture and transcribe audio in the selected language."""
52
+ try:
53
+ recognizer = sr.Recognizer()
54
+ with sr.Microphone() as source:
55
+ st.write("🎀 Listening... Please ask your question.")
56
+ try:
57
+ audio = recognizer.listen(source, timeout=5) # 5 seconds to start speaking
58
+ query = recognizer.recognize_google(audio, language=selected_language) # Transcribe audio in selected language
59
+ st.write(f"**πŸ—£οΈ You said:** {query}")
60
+ return query
61
+ except sr.WaitTimeoutError:
62
+ st.error("⏳ You didn't speak in time. Please try again.")
63
+ except sr.UnknownValueError:
64
+ st.error("❌ Sorry, could not understand the audio. Please try again.")
65
+ except sr.RequestError as e:
66
+ st.error(f"⚠️ Error with speech recognition service: {e}")
67
+ except AttributeError:
68
+ st.error("❌ Microphone or PyAudio not available. Please check installation.")
69
+ except OSError as e:
70
+ st.error(f"⚠️ Audio input error: {e}")
71
+ return None
72
+
73
  # Streamlit UI
74
+ st.markdown(
75
+ """
76
+ <style>
77
+ .main-title {
78
+ font-size: 36px;
79
+ color: #FF8C00;
80
+ font-weight: bold;
81
+ }
82
+ .sub-title {
83
+ font-size: 24px;
84
+ color: #FF8C00;
85
+ }
86
+ .icon {
87
+ font-size: 50px;
88
+ color: #FF8C00;
89
+ }
90
+ </style>
91
+ """,
92
+ unsafe_allow_html=True
93
+ )
94
+
95
+ st.markdown('<div class="icon">πŸ“š</div>', unsafe_allow_html=True)
96
+ st.markdown('<div class="main-title">Bhagavad Gita & Yoga Sutras Query Assistant</div>', unsafe_allow_html=True)
97
+ st.markdown('<div class="sub-title">Ask questions and explore timeless wisdom</div>', unsafe_allow_html=True)
98
 
99
  vectorstore = setup_vectorstore()
100
  chain = chat_chain(vectorstore)
101
 
102
+ # User input options
103
+ st.write("You can either type your question or use voice search:")
104
+ st.markdown("### πŸ“ Type your query or πŸŽ™οΈ Use voice search")
105
+
106
+ # Multilingual support: Select language for voice input
107
+ language_options = {
108
+ "English": "en-US",
109
+ "Hindi": "hi-IN",
110
+ "Spanish": "es-ES",
111
+ "French": "fr-FR",
112
+ "German": "de-DE"
113
+ }
114
+ selected_language = st.selectbox("Select your language for voice search:", options=list(language_options.keys()))
115
+ language_code = language_options[selected_language]
116
+
117
+ if st.button("πŸŽ™οΈ Use Voice Search"):
118
+ user_query = transcribe_audio(language_code)
119
+ else:
120
+ user_query = st.text_input("Ask a question about the Bhagavad Gita or Yoga Sutras:")
121
+
122
  if user_query:
123
  # Use `__call__` to get all outputs as a dictionary
124
  response = chain({"question": user_query})
125
  answer = response.get("answer", "No answer found.")
126
  source_documents = response.get("source_documents", [])
127
 
128
+ st.markdown("### βœ… **Answer:**")
129
+ st.write(answer)
130
+ st.markdown("### πŸ“„ **Source Documents:**")
131
  for doc in source_documents:
132
  st.write(doc)