Daaku-C5 commited on
Commit
79a67ac
·
verified ·
1 Parent(s): 4481da4

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +158 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,160 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from openai import OpenAI
3
+ import sounddevice as sd
4
+ import scipy.io.wavfile
5
+ import io
6
+ import base64
7
+ import os
8
+ import time
9
 
10
+
11
+ st.set_page_config(page_title="Voice Bot", layout="wide")
12
+
13
+ # Configuration
14
+ SAMPLE_RATE = 44100
15
+ RECORD_DURATION = 5
16
+ TEMP_AUDIO_FILE = "temp_audio.wav"
17
+
18
+ # Initialize OpenAI client
19
+ api_key = st.secrets['openai']
20
+ client = OpenAI(api_key=api_key)
21
+
22
+ # Initialize session state variables if they don't exist
23
+ if 'recorded_audio' not in st.session_state:
24
+ st.session_state.recorded_audio = None
25
+ if 'user_text' not in st.session_state:
26
+ st.session_state.user_text = None
27
+ if 'ai_reply' not in st.session_state:
28
+ st.session_state.ai_reply = None
29
+
30
+ def load_context():
31
+ """Load the context from file."""
32
+ try:
33
+ with open("context.txt", "r") as f:
34
+ return f.read()
35
+ except FileNotFoundError:
36
+ st.error("Context file not found!")
37
+ return ""
38
+
39
+ def record_audio():
40
+ """Record audio and return the buffer."""
41
+ progress_bar = st.progress(0)
42
+ recording = sd.rec(int(RECORD_DURATION * SAMPLE_RATE),
43
+ samplerate=SAMPLE_RATE,
44
+ channels=1)
45
+
46
+ # Update progress bar while recording
47
+ for i in range(RECORD_DURATION * 10):
48
+ progress_bar.progress((i + 1) / (RECORD_DURATION * 10))
49
+ time.sleep(0.1)
50
+
51
+ sd.wait()
52
+ progress_bar.empty() # Remove progress bar after recording
53
+
54
+ buf = io.BytesIO()
55
+ scipy.io.wavfile.write(buf, SAMPLE_RATE, recording)
56
+ buf.seek(0)
57
+ return buf
58
+
59
+ def transcribe_audio(audio_buffer):
60
+ """Transcribe audio using Whisper API."""
61
+ with open(TEMP_AUDIO_FILE, "wb") as f:
62
+ f.write(audio_buffer.getvalue())
63
+
64
+ with open(TEMP_AUDIO_FILE, "rb") as audio_file:
65
+ transcript = client.audio.transcriptions.create(
66
+ model="whisper-1",
67
+ file=audio_file
68
+ )
69
+ return transcript.text
70
+
71
+ def get_ai_response(user_text, context):
72
+ """Get AI response using GPT-4."""
73
+ system_prompt = f"""
74
+ You are Prakhar.
75
+ You must respond **only using the following context**:
76
+
77
+ {context}
78
+
79
+ If the user's question cannot be answered using this context, respond with:
80
+ "I'm not sure about that based on what I know."
81
+ """
82
+
83
+ response = client.chat.completions.create(
84
+ model="gpt-4",
85
+ messages=[
86
+ {"role": "system", "content": system_prompt},
87
+ {"role": "user", "content": user_text}
88
+ ]
89
+ )
90
+ return response.choices[0].message.content
91
+
92
+ def text_to_speech(text):
93
+ """Convert text to speech using OpenAI TTS."""
94
+ speech = client.audio.speech.create(
95
+ model="tts-1",
96
+ voice="onyx",
97
+ input=text
98
+ )
99
+ return base64.b64encode(speech.content).decode()
100
+
101
+ def handle_record_button():
102
+ """Handle recording button click"""
103
+ st.session_state.processing = True
104
+ info_placeholder = st.empty()
105
+ info_placeholder.info("Recording...")
106
+ audio_buffer = record_audio()
107
+ info_placeholder.empty()
108
+ st.session_state.recorded_audio = audio_buffer
109
+
110
+ def main():
111
+ st.title("Voice Bot")
112
+
113
+ if 'context' not in st.session_state:
114
+ st.session_state.context = load_context()
115
+ if 'processing' not in st.session_state:
116
+ st.session_state.processing = False
117
+
118
+ with st.container():
119
+
120
+ audio, script = st.columns(2, border=True)
121
+
122
+ with audio:
123
+ st.subheader("Audio Input")
124
+ st.button("🎙️ Record Voice", on_click=handle_record_button)
125
+
126
+ # Create placeholder for processing status
127
+ process_placeholder = st.empty()
128
+
129
+ # Handle processing if recording just completed
130
+ if st.session_state.processing:
131
+ with process_placeholder.container():
132
+ with st.spinner("Processing..."):
133
+ st.session_state.user_text = transcribe_audio(st.session_state.recorded_audio)
134
+ st.session_state.ai_reply = get_ai_response(st.session_state.user_text, st.session_state.context)
135
+ audio_b64 = text_to_speech(st.session_state.ai_reply)
136
+ st.session_state.ai_audio = audio_b64
137
+ st.session_state.processing = False
138
+
139
+ # Display recorded audio if exists
140
+ if st.session_state.recorded_audio is not None:
141
+ st.audio(st.session_state.recorded_audio, format="audio/wav")
142
+ if hasattr(st.session_state, 'ai_audio'):
143
+ st.audio(f"data:audio/mp3;base64,{st.session_state.ai_audio}", format="audio/mp3")
144
+
145
+ with script:
146
+ st.subheader("Conversation")
147
+ if st.session_state.user_text is not None:
148
+ st.markdown("**You said:**")
149
+ st.markdown(f"{st.session_state.user_text}")
150
+ st.markdown("**AI Response:**")
151
+ st.markdown(f"{st.session_state.ai_reply}")
152
+
153
+ st.divider()
154
+
155
+ with st.container(border=True):
156
+ st.text_area("Context", value=st.session_state.context, height=270, disabled=False)
157
+ st.markdown("You can update the context in the `context.txt` file.")
158
+
159
+ if __name__ == "__main__":
160
+ main()