umair894 commited on
Commit
01746d6
·
verified ·
1 Parent(s): c2461c1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +197 -0
app.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ from typing import List
5
+
6
+ from openai import AsyncAssistantEventHandler, AsyncOpenAI, OpenAI
7
+
8
+ from literalai.helper import utc_now
9
+
10
+ import chainlit as cl
11
+ from chainlit.config import config
12
+ from chainlit.element import Element
13
+
14
+
15
+ async_openai_client = AsyncOpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
16
+ sync_openai_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
17
+
18
+ assistant = sync_openai_client.beta.assistants.retrieve(os.environ.get("OPENAI_ASSISTANT_ID"))
19
+
20
+ config.ui.name = assistant.name
21
+
22
+ class EventHandler(AsyncAssistantEventHandler):
23
+
24
+ def __init__(self, assistant_name: str) -> None:
25
+ super().__init__()
26
+ self.current_message: cl.Message = None
27
+ self.current_step: cl.Step = None
28
+ self.current_tool_call = None
29
+ self.assistant_name = assistant_name
30
+
31
+ async def on_text_created(self, text) -> None:
32
+ self.current_message = await cl.Message(author=self.assistant_name, content="").send()
33
+
34
+ async def on_text_delta(self, delta, snapshot):
35
+ await self.current_message.stream_token(delta.value)
36
+
37
+ async def on_text_done(self, text):
38
+ await self.current_message.update()
39
+
40
+ async def on_tool_call_created(self, tool_call):
41
+ self.current_tool_call = tool_call.id
42
+ self.current_step = cl.Step(name=tool_call.type, type="tool")
43
+ self.current_step.language = "python"
44
+ self.current_step.created_at = utc_now()
45
+ await self.current_step.send()
46
+
47
+ async def on_tool_call_delta(self, delta, snapshot):
48
+ if snapshot.id != self.current_tool_call:
49
+ self.current_tool_call = snapshot.id
50
+ self.current_step = cl.Step(name=delta.type, type="tool")
51
+ self.current_step.language = "python"
52
+ self.current_step.start = utc_now()
53
+ await self.current_step.send()
54
+
55
+ if delta.type == "code_interpreter":
56
+ if delta.code_interpreter.outputs:
57
+ for output in delta.code_interpreter.outputs:
58
+ if output.type == "logs":
59
+ error_step = cl.Step(
60
+ name=delta.type,
61
+ type="tool"
62
+ )
63
+ error_step.is_error = True
64
+ error_step.output = output.logs
65
+ error_step.language = "markdown"
66
+ error_step.start = self.current_step.start
67
+ error_step.end = utc_now()
68
+ await error_step.send()
69
+ else:
70
+ if delta.code_interpreter.input:
71
+ await self.current_step.stream_token(delta.code_interpreter.input)
72
+
73
+
74
+ async def on_tool_call_done(self, tool_call):
75
+ self.current_step.end = utc_now()
76
+ await self.current_step.update()
77
+
78
+ async def on_image_file_done(self, image_file):
79
+ image_id = image_file.file_id
80
+ response = await async_openai_client.files.with_raw_response.content(image_id)
81
+ image_element = cl.Image(
82
+ name=image_id,
83
+ content=response.content,
84
+ display="inline",
85
+ size="large"
86
+ )
87
+ if not self.current_message.elements:
88
+ self.current_message.elements = []
89
+ self.current_message.elements.append(image_element)
90
+ await self.current_message.update()
91
+
92
+
93
+ @cl.step(type="tool")
94
+ async def speech_to_text(audio_file):
95
+ response = await async_openai_client.audio.transcriptions.create(
96
+ model="whisper-1", file=audio_file
97
+ )
98
+
99
+ return response.text
100
+
101
+
102
+ async def upload_files(files: List[Element]):
103
+ file_ids = []
104
+ for file in files:
105
+ uploaded_file = await async_openai_client.files.create(
106
+ file=Path(file.path), purpose="assistants"
107
+ )
108
+ file_ids.append(uploaded_file.id)
109
+ return file_ids
110
+
111
+
112
+ async def process_files(files: List[Element]):
113
+ # Upload files if any and get file_ids
114
+ file_ids = []
115
+ if len(files) > 0:
116
+ file_ids = await upload_files(files)
117
+
118
+ return [
119
+ {
120
+ "file_id": file_id,
121
+ "tools": [{"type": "code_interpreter"}, {"type": "file_search"}],
122
+ }
123
+ for file_id in file_ids
124
+ ]
125
+
126
+
127
+ @cl.on_chat_start
128
+ async def start_chat():
129
+ # Create a Thread
130
+ thread = await async_openai_client.beta.threads.create()
131
+ # Store thread ID in user session for later use
132
+ cl.user_session.set("thread_id", thread.id)
133
+ await cl.Avatar(name=assistant.name, path="./public/logo.png").send()
134
+ await cl.Message(content=f"Hello, I'm {assistant.name}!", disable_feedback=True).send()
135
+
136
+
137
+ @cl.on_message
138
+ async def main(message: cl.Message):
139
+ thread_id = cl.user_session.get("thread_id")
140
+
141
+ attachments = await process_files(message.elements)
142
+
143
+ # Add a Message to the Thread
144
+ oai_message = await async_openai_client.beta.threads.messages.create(
145
+ thread_id=thread_id,
146
+ role="user",
147
+ content=message.content,
148
+ attachments=attachments,
149
+ )
150
+
151
+ # Create and Stream a Run
152
+ async with async_openai_client.beta.threads.runs.stream(
153
+ thread_id=thread_id,
154
+ assistant_id=assistant.id,
155
+ event_handler=EventHandler(assistant_name=assistant.name),
156
+ ) as stream:
157
+ await stream.until_done()
158
+
159
+
160
+ @cl.on_audio_chunk
161
+ async def on_audio_chunk(chunk: cl.AudioChunk):
162
+ if chunk.isStart:
163
+ buffer = BytesIO()
164
+ # This is required for whisper to recognize the file type
165
+ buffer.name = f"input_audio.{chunk.mimeType.split('/')[1]}"
166
+ # Initialize the session for a new audio stream
167
+ cl.user_session.set("audio_buffer", buffer)
168
+ cl.user_session.set("audio_mime_type", chunk.mimeType)
169
+
170
+ # Write the chunks to a buffer and transcribe the whole audio at the end
171
+ cl.user_session.get("audio_buffer").write(chunk.data)
172
+
173
+
174
+ @cl.on_audio_end
175
+ async def on_audio_end(elements: list[Element]):
176
+ # Get the audio buffer from the session
177
+ audio_buffer: BytesIO = cl.user_session.get("audio_buffer")
178
+ audio_buffer.seek(0) # Move the file pointer to the beginning
179
+ audio_file = audio_buffer.read()
180
+ audio_mime_type: str = cl.user_session.get("audio_mime_type")
181
+
182
+ input_audio_el = cl.Audio(
183
+ mime=audio_mime_type, content=audio_file, name=audio_buffer.name
184
+ )
185
+ await cl.Message(
186
+ author="You",
187
+ type="user_message",
188
+ content="",
189
+ elements=[input_audio_el, *elements],
190
+ ).send()
191
+
192
+ whisper_input = (audio_buffer.name, audio_file, audio_mime_type)
193
+ transcription = await speech_to_text(whisper_input)
194
+
195
+ msg = cl.Message(author="You", content=transcription, elements=elements)
196
+
197
+ await main(message=msg)