Nithish310 commited on
Commit
403cffd
·
verified ·
1 Parent(s): 80a1ef2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +115 -108
app.py CHANGED
@@ -85,127 +85,134 @@ client_yi = InferenceClient("01-ai/Yi-1.5-34B-Chat")
85
  def respond(message, history):
86
  func_caller = []
87
 
88
- user_prompt = message
89
- if message["files"]:
90
- inputs = llava(message, history)
91
- streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
92
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
95
- thread.start()
96
-
97
- buffer = ""
98
- for new_text in streamer:
99
- buffer += new_text
100
- yield buffer
101
- else:
102
- functions_metadata = [
103
- {"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
104
- {"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER", "parameters": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed prompt"}}, "required": ["prompt"]}}},
105
- {"type": "function", "function": {"name": "image_generation", "description": "Generate image for user", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "image generation prompt"}}, "required": ["query"]}}},
106
- {"type": "function", "function": {"name": "image_qna", "description": "Answer question asked by user related to image", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "Question by user"}}, "required": ["query"]}}},
107
- ]
108
-
109
- for msg in history:
110
- func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
111
- func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
112
-
113
- message_text = message["text"]
114
- func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
115
-
116
- response = client_gemma.chat_completion(func_caller, max_tokens=200)
117
- response = str(response)
118
- try:
119
- response = response[int(response.find("{")):int(response.rindex("</"))]
120
- except:
121
- response = response[int(response.find("{")):(int(response.rfind("}"))+1)]
122
- response = response.replace("\\n", "")
123
- response = response.replace("\\'", "'")
124
- response = response.replace('\\"', '"')
125
- response = response.replace('\\', '')
126
- print(f"\n{response}")
127
-
128
- try:
129
- json_data = json.loads(str(response))
130
- if json_data["name"] == "web_search":
131
- query = json_data["arguments"]["query"]
132
- gr.Info("Searching Web")
133
- web_results = search(query)
134
- gr.Info("Extracting relevant Info")
135
- web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
136
- messages = f"system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  for msg in history:
138
  messages += f"\nuser\n{str(msg[0])}"
139
  messages += f"\nassistant\n{str(msg[1])}"
140
- messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
141
- stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
142
  output = ""
143
  for response in stream:
144
  if not response.token.text == "":
145
  output += response.token.text
146
  yield output
147
- elif json_data["name"] == "image_generation":
148
- query = json_data["arguments"]["query"]
149
- gr.Info("Generating Image, Please wait 10 sec...")
150
- yield "Generating Image, Please wait 10 sec..."
151
- try:
152
- image = image_gen(f"{str(query)}")
153
- yield gr.Image(image[1])
154
- except:
155
- client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
156
- seed = random.randint(0, 999999)
157
- image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
158
- yield gr.Image(image)
159
- elif json_data["name"] == "image_qna":
160
- if "files" in message:
161
- image = message["files"][0]
162
- else:
163
- for hist in history:
164
- if type(hist[0]) == tuple:
165
- image = hist[0][0]
166
-
167
- txt = json_data["arguments"]["query"]
168
 
169
- image = Image.open(image).convert("RGB")
170
- prompt = f"user <image>\n{txt}assistant"
171
-
172
- inputs = processor(prompt, image, return_tensors="pt")
173
- streamer = TextIteratorStreamer(processor, skip_prompt=True, **{"skip_special_tokens": True})
174
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
175
-
176
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
177
- thread.start()
178
-
179
- buffer = ""
180
- for new_text in streamer:
181
- buffer += new_text
182
- yield buffer
183
- except:
184
- messages = ""
185
- for msg in history:
186
- messages += f"\nuser\n{str(msg[0])}"
187
- messages += f"\nassistant\n{str(msg[1])}"
188
- messages += f"\nuser\n{str(message)}"
189
- stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
190
- output = ""
191
- for response in stream:
192
- if not response.token.text == "":
193
- output += response.token.text
194
- yield output
195
 
 
196
  with gr.Blocks() as demo:
197
- history = gr.Chatbot(label="ChatGPT Style Chatbot", height=500)
198
- input_message = gr.Textbox(label="Message", placeholder="Type your message here...", show_label=False)
199
- input_files = gr.File(label="Attach an image", file_types=["image"])
200
 
201
  with gr.Row():
202
- submit_button = gr.Button("Submit")
203
- clear_button = gr.Button("Clear")
 
 
204
 
205
- def send_message(message, files):
206
- return respond(message, history)
207
-
208
- submit_button.click(send_message, inputs=[input_message, input_files], outputs=history)
209
- clear_button.click(lambda: [], None, history)
 
210
 
211
- demo.launch()
 
 
85
  def respond(message, history):
86
  func_caller = []
87
 
88
+ # Ensure 'message' is a dictionary
89
+ if isinstance(message, dict):
90
+ user_prompt = message
91
+ if "files" in message and message["files"]:
92
+ inputs = llava(message, history)
93
+ streamer = TextIteratorStreamer(None, skip_prompt=True, **{"skip_special_tokens": True})
94
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
95
+
96
+ thread = Thread(target=None.generate, kwargs=generation_kwargs)
97
+ thread.start()
98
+
99
+ buffer = ""
100
+ for new_text in streamer:
101
+ buffer += new_text
102
+ yield buffer
103
+ else:
104
+ functions_metadata = [
105
+ {"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
106
+ {"type": "function", "function": {"name": "general_query", "description": "Reply general query of USER", "parameters": {"type": "object", "properties": {"prompt": {"type": "string", "description": "A detailed prompt"}}, "required": ["prompt"]}}},
107
+ {"type": "function", "function": {"name": "image_generation", "description": "Generate image for user", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "image generation prompt"}}, "required": ["query"]}}},
108
+ {"type": "function", "function": {"name": "image_qna", "description": "Answer question asked by user related to image", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "Question by user"}}, "required": ["query"]}}},
109
+ ]
110
 
111
+ for msg in history:
112
+ func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
113
+ func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
114
+
115
+ message_text = message["text"]
116
+ func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
117
+
118
+ response = None.chat_completion(func_caller, max_tokens=200)
119
+ response = str(response)
120
+ try:
121
+ response = response[int(response.find("{")):int(response.rindex("</"))]
122
+ except:
123
+ response = response[int(response.find("{")):(int(response.rfind("}"))+1)]
124
+ response = response.replace("\\n", "")
125
+ response = response.replace("\\'", "'")
126
+ response = response.replace('\\"', '"')
127
+ response = response.replace('\\', '')
128
+ print(f"\n{response}")
129
+
130
+ try:
131
+ json_data = json.loads(str(response))
132
+ if json_data["name"] == "web_search":
133
+ query = json_data["arguments"]["query"]
134
+ yield "Searching Web"
135
+ web_results = search(query)
136
+ yield "Extracting relevant Info"
137
+ web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
138
+ messages = f"system\nYou are OpenCHAT mini a helpful assistant made by Nithish. You are provided with WEB results from which you can find informations to answer users query in Structured and More better way. You do not say Unnecesarry things Only say thing which is important and relevant. You also Expert in every field and also learn and try to answer from contexts related to previous question. Try your best to give best response possible to user. You also try to show emotions using Emojis and reply like human, use short forms, friendly tone and emotions."
139
+ for msg in history:
140
+ messages += f"\nuser\n{str(msg[0])}"
141
+ messages += f"\nassistant\n{str(msg[1])}"
142
+ messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
143
+ stream = None.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
144
+ output = ""
145
+ for response in stream:
146
+ if not response.token.text == "":
147
+ output += response.token.text
148
+ yield output
149
+ elif json_data["name"] == "image_generation":
150
+ query = json_data["arguments"]["query"]
151
+ yield "Generating Image, Please wait 10 sec..."
152
+ try:
153
+ image = image_gen(f"{str(query)}")
154
+ yield gr.Image(image[1])
155
+ except:
156
+ client_sd3 = None.InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
157
+ seed = random.randint(0, 999999)
158
+ image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
159
+ yield gr.Image(image)
160
+ elif json_data["name"] == "image_qna":
161
+ if "files" in message:
162
+ image = message["files"][0]
163
+ else:
164
+ for hist in history:
165
+ if type(hist[0]) == tuple:
166
+ image = hist[0][0]
167
+
168
+ txt = json_data["arguments"]["query"]
169
+
170
+ image = Image.open(image).convert("RGB")
171
+ prompt = f"user <image>\n{txt}assistant"
172
+
173
+ inputs = None(prompt, image, return_tensors="pt")
174
+ streamer = TextIteratorStreamer(None, skip_prompt=True, **{"skip_special_tokens": True})
175
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024)
176
+
177
+ thread = Thread(target=None.generate, kwargs=generation_kwargs)
178
+ thread.start()
179
+
180
+ buffer = ""
181
+ for new_text in streamer:
182
+ buffer += new_text
183
+ yield buffer
184
+ except:
185
+ messages = ""
186
  for msg in history:
187
  messages += f"\nuser\n{str(msg[0])}"
188
  messages += f"\nassistant\n{str(msg[1])}"
189
+ messages += f"\nuser\n{str(message)}"
190
+ stream = None.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
191
  output = ""
192
  for response in stream:
193
  if not response.token.text == "":
194
  output += response.token.text
195
  yield output
196
+ else:
197
+ yield "Error: Message format is incorrect."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
+ # Interface Layout
201
  with gr.Blocks() as demo:
202
+ chatbot = gr.Chatbot(label="ChatGPT Style Chatbot", height=500)
 
 
203
 
204
  with gr.Row():
205
+ upload_button = gr.FileButton(label="Upload File", elem_id="upload-button")
206
+ with gr.Column(scale=8):
207
+ text_input = gr.Textbox(label="", placeholder="Type your message here...", lines=1)
208
+ submit_button = gr.Button("Send")
209
 
210
+ def update_chat(message, history):
211
+ return chatbot.update(respond(message, history))
212
+
213
+ text_input.submit(update_chat, inputs=[text_input, chatbot], outputs=chatbot)
214
+ submit_button.click(update_chat, inputs=[text_input, chatbot], outputs=chatbot)
215
+ upload_button.change(lambda file: {"text": "", "files": [file]}, inputs=upload_button, outputs=text_input)
216
 
217
+ # Run the demo
218
+ demo.launch()