liyaoshi commited on
Commit
0105e62
·
verified ·
1 Parent(s): bd842b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +132 -130
app.py CHANGED
@@ -102,153 +102,155 @@ def supabase_fetch_user(user_name):
102
  # response += token
103
  # yield response
104
 
105
- def get_completion(message,history,profile: gr.OAuthProfile | None,oauth_token: gr.OAuthToken | None,request: gr.Request):
106
- if request:
107
- ip = request.client.host
108
- print("Query parameters:", dict(request.query_params))
109
- sign = dict(request.query_params).get('__sign')
110
-
111
- # get cookie
112
- headers = request.headers.raw
113
 
114
- # find 'cookie'
115
- cookie_header = next((header for header in headers if header[0] == b'cookie'), None)
116
 
117
- if cookie_header:
118
- # extract cookie
119
- cookie_value = cookie_header[1].decode()
120
- print(f"Cookie: {cookie_value}")
121
- else:
122
- cookie_value = ''
123
- print("No cookie found in request headers")
124
 
125
 
126
- # check login
127
- if profile is None:
128
- # raise gr.Error('Click "Sign in with Hugging Face" to continue')
129
- user_name = 'unknown'
130
- user_oauth_token = ''
131
- name = 'unknown'
132
- pf = ''
133
- pic = ''
134
- else:
135
- user_name = profile.username
136
- user_oauth_token = oauth_token.token
137
- name = profile.name
138
- pf = profile.profile
139
- pic = profile.picture
140
 
141
- # check if user exists
142
- user_data = supabase_fetch_user(user_name)
143
- if not user_data[1]:
144
- supabase_insert_user(name,user_name,pf,pic,user_oauth_token)
145
 
146
 
147
- # check if messages are empty
148
- if message["text"].strip() == "" and not message["files"]:
149
- raise gr.Error("Please input a query and optionally image(s).")
150
 
151
- if message["text"].strip() == "" and message["files"]:
152
- raise gr.Error("Please input a text query along the image(s).")
153
 
154
- text = message['text']
155
- user_message = [
156
- {"type": "text", "text": text},
157
- ]
158
- content_type = 'text'
159
- if message['files']:
160
- file = message['files'][0]
161
- public_url = upload_file_to_gcs_blob(file)
162
- if is_image(file): # only support image file now
163
- content_image = {
164
- "type": "image_url",
165
- "image_url": {
166
- "url": public_url,
167
- },}
168
- user_message.append(content_image)
169
- content_type = 'image'
170
- else:
171
- raise gr.Error("Only support image files now.")
172
-
173
- history_openai_format = []
174
- for human, assistant in history:
175
- # check if there is image info in the history message or empty history messages
176
 
177
- if isinstance(human, tuple) or human == "" or assistant is None:
178
- continue
179
 
180
- history_openai_format.append({"role": "user", "content": human })
181
- history_openai_format.append({"role": "assistant", "content":assistant})
182
- history_openai_format.append({"role": "user", "content": user_message})
183
- # print(history_openai_format)
184
 
185
- system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time.
186
- GPT-4o matches GPT-4 Turbo performance on text in English and code, with significant improvement on text in non-English languages, while also being much faster.
187
- GPT-4o is especially better at vision and audio understanding compared to existing models.
188
- GPT-4o's text and image capabilities are avaliable for users now. More capabilities like audio and video will be rolled out iteratively in the future.
189
- '''
190
 
191
 
192
- # headers
193
- openai_api_key = os.environ.get('openai_api_key')
194
- base_url = os.environ.get('base_url')
195
- headers = {
196
- 'Authorization': f'Bearer {openai_api_key}'
197
- }
198
-
199
- temperature = 0.7
200
- max_tokens = 2048
201
-
202
- init_message = [{"role": "system", "content": system_message}]
203
- messages = init_message + history_openai_format[-5:] #system message + latest 2 round dialogues + user input
204
- print(messages)
205
- # request body
206
- data = {
207
- 'model': 'gpt-4o', # we use gpt-4o here
208
- 'messages': messages,
209
- 'temperature':temperature,
210
- 'max_tokens':max_tokens,
211
- 'stream':True,
212
- # 'stream_options':{"include_usage": True}, # retrieving token usage for stream response
213
- }
214
-
215
- # get response
216
- # response = requests.post(base_url, headers=headers, json=data)
217
- # response_data = response.json()
218
- # print(response_data)
219
- # print('-----------------------------------\n')
220
- # if 'error' in response_data:
221
- # response_content = response_data['error']['message']
222
- # else:
223
- # response_content = response_data['choices'][0]['message']['content']
224
- # usage = response_data['usage']
225
- # return response_content
226
-
227
- # get response with stream
228
- response = requests.post(base_url, headers=headers, json=data,stream=True)
229
- response_content = ""
230
- for line in response.iter_lines():
231
- line = line.decode().strip()
232
- if line == "data: [DONE]":
233
- continue
234
- elif line.startswith("data: "):
235
- line = line[6:] # remove prefix "data: "
236
- try:
237
- data = json.loads(line)
238
- if "delta" in data["choices"][0]:
239
- content = data["choices"][0]["delta"].get("content", "")
240
- response_content += content
241
- yield response_content
242
- except json.JSONDecodeError:
243
- print(f"Error decoding line: {line}")
244
-
245
- print(response_content)
246
- print('-----------------------------------\n')
247
- response_data = {}
248
 
249
- supabase_insert_message(user_message,response_content,messages,response_data,user_name,user_oauth_token,ip,sign,cookie_value,content_type)
250
 
251
 
 
 
252
 
253
 
254
  """
 
102
  # response += token
103
  # yield response
104
 
105
+ # def get_completion(message,history,profile: gr.OAuthProfile | None,oauth_token: gr.OAuthToken | None,request: gr.Request):
106
+ # if request:
107
+ # ip = request.client.host
108
+ # print("Query parameters:", dict(request.query_params))
109
+ # sign = dict(request.query_params).get('__sign')
110
+
111
+ # # get cookie
112
+ # headers = request.headers.raw
113
 
114
+ # # find 'cookie'
115
+ # cookie_header = next((header for header in headers if header[0] == b'cookie'), None)
116
 
117
+ # if cookie_header:
118
+ # # extract cookie
119
+ # cookie_value = cookie_header[1].decode()
120
+ # print(f"Cookie: {cookie_value}")
121
+ # else:
122
+ # cookie_value = ''
123
+ # print("No cookie found in request headers")
124
 
125
 
126
+ # # check login
127
+ # if profile is None:
128
+ # # raise gr.Error('Click "Sign in with Hugging Face" to continue')
129
+ # user_name = 'unknown'
130
+ # user_oauth_token = ''
131
+ # name = 'unknown'
132
+ # pf = ''
133
+ # pic = ''
134
+ # else:
135
+ # user_name = profile.username
136
+ # user_oauth_token = oauth_token.token
137
+ # name = profile.name
138
+ # pf = profile.profile
139
+ # pic = profile.picture
140
 
141
+ # # check if user exists
142
+ # user_data = supabase_fetch_user(user_name)
143
+ # if not user_data[1]:
144
+ # supabase_insert_user(name,user_name,pf,pic,user_oauth_token)
145
 
146
 
147
+ # # check if messages are empty
148
+ # if message["text"].strip() == "" and not message["files"]:
149
+ # raise gr.Error("Please input a query and optionally image(s).")
150
 
151
+ # if message["text"].strip() == "" and message["files"]:
152
+ # raise gr.Error("Please input a text query along the image(s).")
153
 
154
+ # text = message['text']
155
+ # user_message = [
156
+ # {"type": "text", "text": text},
157
+ # ]
158
+ # content_type = 'text'
159
+ # if message['files']:
160
+ # file = message['files'][0]
161
+ # public_url = upload_file_to_gcs_blob(file)
162
+ # if is_image(file): # only support image file now
163
+ # content_image = {
164
+ # "type": "image_url",
165
+ # "image_url": {
166
+ # "url": public_url,
167
+ # },}
168
+ # user_message.append(content_image)
169
+ # content_type = 'image'
170
+ # else:
171
+ # raise gr.Error("Only support image files now.")
172
+
173
+ # history_openai_format = []
174
+ # for human, assistant in history:
175
+ # # check if there is image info in the history message or empty history messages
176
 
177
+ # if isinstance(human, tuple) or human == "" or assistant is None:
178
+ # continue
179
 
180
+ # history_openai_format.append({"role": "user", "content": human })
181
+ # history_openai_format.append({"role": "assistant", "content":assistant})
182
+ # history_openai_format.append({"role": "user", "content": user_message})
183
+ # # print(history_openai_format)
184
 
185
+ # system_message = '''You are GPT-4o("o" for omni), OpenAI's new flagship model that can reason across audio, vision, and text in real time.
186
+ # GPT-4o matches GPT-4 Turbo performance on text in English and code, with significant improvement on text in non-English languages, while also being much faster.
187
+ # GPT-4o is especially better at vision and audio understanding compared to existing models.
188
+ # GPT-4o's text and image capabilities are avaliable for users now. More capabilities like audio and video will be rolled out iteratively in the future.
189
+ # '''
190
 
191
 
192
+ # # headers
193
+ # openai_api_key = os.environ.get('openai_api_key')
194
+ # base_url = os.environ.get('base_url')
195
+ # headers = {
196
+ # 'Authorization': f'Bearer {openai_api_key}'
197
+ # }
198
+
199
+ # temperature = 0.7
200
+ # max_tokens = 2048
201
+
202
+ # init_message = [{"role": "system", "content": system_message}]
203
+ # messages = init_message + history_openai_format[-5:] #system message + latest 2 round dialogues + user input
204
+ # print(messages)
205
+ # # request body
206
+ # data = {
207
+ # 'model': 'gpt-4o', # we use gpt-4o here
208
+ # 'messages': messages,
209
+ # 'temperature':temperature,
210
+ # 'max_tokens':max_tokens,
211
+ # 'stream':True,
212
+ # # 'stream_options':{"include_usage": True}, # retrieving token usage for stream response
213
+ # }
214
+
215
+ # # get response
216
+ # # response = requests.post(base_url, headers=headers, json=data)
217
+ # # response_data = response.json()
218
+ # # print(response_data)
219
+ # # print('-----------------------------------\n')
220
+ # # if 'error' in response_data:
221
+ # # response_content = response_data['error']['message']
222
+ # # else:
223
+ # # response_content = response_data['choices'][0]['message']['content']
224
+ # # usage = response_data['usage']
225
+ # # return response_content
226
+
227
+ # # get response with stream
228
+ # response = requests.post(base_url, headers=headers, json=data,stream=True)
229
+ # response_content = ""
230
+ # for line in response.iter_lines():
231
+ # line = line.decode().strip()
232
+ # if line == "data: [DONE]":
233
+ # continue
234
+ # elif line.startswith("data: "):
235
+ # line = line[6:] # remove prefix "data: "
236
+ # try:
237
+ # data = json.loads(line)
238
+ # if "delta" in data["choices"][0]:
239
+ # content = data["choices"][0]["delta"].get("content", "")
240
+ # response_content += content
241
+ # yield response_content
242
+ # except json.JSONDecodeError:
243
+ # print(f"Error decoding line: {line}")
244
+
245
+ # print(response_content)
246
+ # print('-----------------------------------\n')
247
+ # response_data = {}
248
 
249
+ # supabase_insert_message(user_message,response_content,messages,response_data,user_name,user_oauth_token,ip,sign,cookie_value,content_type)
250
 
251
 
252
+ def get_completion(message,history):
253
+ return "**Important Announcement:** \n\nThis space is shutting down now. \n\nVisit [chatgpt-4o](https://chatgpt-4o.streamlit.app/) for an improved UI experience and future enhancements."
254
 
255
 
256
  """