aiqcamp commited on
Commit
45e9cef
·
verified ·
1 Parent(s): 9cb71c2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -73
app.py CHANGED
@@ -27,62 +27,32 @@ def format_chat_history(messages: list) -> list:
27
  })
28
  return formatted_history
29
 
30
- def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterator[list]:
31
  """
32
- Streams thoughts and response with conversation history support, handling text or file input.
33
  """
34
- user_message = ""
35
- input_file = None
36
-
37
- if isinstance(message_input, str):
38
- user_message = message_input
39
- print(f"\n=== New Request (Text) ===")
40
- print(f"User message: {user_message}")
41
- if not user_message.strip(): # Robust check: if text message is empty or whitespace
42
- messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message or upload a file.")) # More specific message
43
- yield messages
44
- return
45
-
46
- elif isinstance(message_input, gr.File): #gr.File directly should be used with newer gradio versions (v4+)
47
- input_file = message_input.name # Access the temporary file path
48
- file_type = message_input.original_name.split('.')[-1].lower() #Get original filename's extension
49
- print(f"\n=== New Request (File) ===")
50
- print(f"File uploaded: {input_file}, type: {file_type}")
51
-
52
- try:
53
- with open(input_file, "rb") as f: #Open file in binary mode for universal handling
54
- file_data = f.read()
55
-
56
- if file_type in ['png', 'jpg', 'jpeg', 'gif']: #Example Image Types - expand as needed
57
- user_message = {"inline_data": {"mime_type": f"image/{file_type}", "data": file_data}} #Prepare image part for Gemini
58
- elif file_type == 'csv':
59
- user_message = {"inline_data": {"mime_type": "text/csv", "data": file_data}} #Prepare csv part
60
-
61
- except Exception as e:
62
- print(f"Error reading file: {e}")
63
- messages.append(ChatMessage(role="assistant", content=f"Error reading file: {e}"))
64
- yield messages
65
- return
66
- else:
67
- messages.append(ChatMessage(role="assistant", content="Sorry, I cannot understand this input format. Please use text or upload a valid file.")) # More informative error
68
  yield messages
69
  return
70
 
71
-
72
  try:
 
 
 
73
  # Format chat history for Gemini
74
  chat_history = format_chat_history(messages)
75
 
76
  # Initialize Gemini chat
77
  chat = model.start_chat(history=chat_history)
78
- response = chat.send_message(user_message, stream=True) #Send the message part as is
79
 
80
- # Initialize buffers and flags - same as before
81
  thought_buffer = ""
82
  response_buffer = ""
83
  thinking_complete = False
84
 
85
- # Add initial thinking message - same as before
86
  messages.append(
87
  ChatMessage(
88
  role="assistant",
@@ -91,7 +61,7 @@ def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterat
91
  )
92
  )
93
 
94
- for chunk in response: #streaming logic - same as before
95
  parts = chunk.candidates[0].content.parts
96
  current_chunk = parts[0].text
97
 
@@ -155,15 +125,15 @@ def stream_gemini_response(message_input: str|gr.File, messages: list) -> Iterat
155
  )
156
  yield messages
157
 
158
- def user_message(message_text, file_upload, history: list) -> tuple[str, None, list]:
159
  """Adds user message to chat history"""
160
- msg = message_text if message_text else file_upload
161
- history.append(ChatMessage(role="user", content=msg if isinstance(msg, str) else msg.name)) #Store message or filename in history.
162
- return "", None, history #clear both input fields
163
 
164
  # Create the Gradio interface
165
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
166
- gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭")
167
 
168
  chatbot = gr.Chatbot(
169
  type="messages",
@@ -178,13 +148,12 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
178
  lines=1,
179
  label="Chat Message",
180
  placeholder="Type your message here...",
181
- scale=3
182
  )
183
- file_upload = gr.File(label="Upload File", file_types=["image", ".csv"], scale=2) # Allow image and CSV files
184
 
185
  clear_button = gr.Button("Clear Chat", scale=1)
186
 
187
- # Add example prompts
188
  example_prompts = [
189
  ["Write a short poem about the sunset."],
190
  ["Explain the theory of relativity in simple terms."],
@@ -195,8 +164,8 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
195
 
196
  gr.Examples(
197
  examples=example_prompts,
198
- inputs=[input_box],
199
- label="Examples: Get Gemini to show its thinking process with these prompts!",
200
  examples_per_page=5 # Adjust as needed
201
  )
202
 
@@ -204,37 +173,29 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
204
  # Set up event handlers
205
  msg_store = gr.State("") # Store for preserving user message
206
 
207
-
208
  input_box.submit(
209
- user_message,
210
- inputs=[input_box, file_upload, chatbot],
211
- outputs=[input_box, file_upload, chatbot],
212
  queue=False
213
  ).then(
214
- stream_gemini_response,
215
- inputs=[input_box, chatbot], # Input either from text box or file, logic inside stream_gemini_response
216
- outputs=chatbot
217
- )
218
-
219
- file_upload.upload(
220
- user_message,
221
- inputs=[input_box, file_upload, chatbot], # even textbox is input here so clearing both will work
222
- outputs=[input_box, file_upload, chatbot],
223
  queue=False
224
  ).then(
225
- stream_gemini_response,
226
- inputs=[file_upload, chatbot], # Input is now the uploaded file.
227
  outputs=chatbot
228
  )
229
 
230
-
231
  clear_button.click(
232
  lambda: ([], "", ""),
233
  outputs=[chatbot, input_box, msg_store],
234
  queue=False
235
  )
236
 
237
- gr.Markdown( # Description moved to the bottom
238
  """
239
  <br><br><br> <!-- Add some vertical space -->
240
  ---
@@ -248,15 +209,14 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
248
  * Powered by Google's **Gemini 2.0 Flash** model.
249
  * Shows the model's **thoughts** before the final answer (experimental feature).
250
  * Supports **conversation history** for multi-turn chats.
251
- * Supports **Image and CSV file uploads** for analysis.
252
  * Uses **streaming** for a more interactive experience.
253
  **Instructions:**
254
- 1. Type your message in the input box or Upload a file below.
255
- 2. Press Enter/Submit or Upload to send.
256
  3. Observe the chatbot's "Thinking" process followed by the final response.
257
  4. Use the "Clear Chat" button to start a new conversation.
258
 
259
- *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary. File analysis capabilities may be limited depending on the model's experimental features.
260
  """
261
  )
262
 
 
27
  })
28
  return formatted_history
29
 
30
+ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
31
  """
32
+ Streams thoughts and response with conversation history support for text input only.
33
  """
34
+ if not user_message.strip(): # Robust check: if text message is empty or whitespace
35
+ messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed.")) # More specific message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  yield messages
37
  return
38
 
 
39
  try:
40
+ print(f"\n=== New Request (Text) ===")
41
+ print(f"User message: {user_message}")
42
+
43
  # Format chat history for Gemini
44
  chat_history = format_chat_history(messages)
45
 
46
  # Initialize Gemini chat
47
  chat = model.start_chat(history=chat_history)
48
+ response = chat.send_message(user_message, stream=True)
49
 
50
+ # Initialize buffers and flags
51
  thought_buffer = ""
52
  response_buffer = ""
53
  thinking_complete = False
54
 
55
+ # Add initial thinking message
56
  messages.append(
57
  ChatMessage(
58
  role="assistant",
 
61
  )
62
  )
63
 
64
+ for chunk in response:
65
  parts = chunk.candidates[0].content.parts
66
  current_chunk = parts[0].text
67
 
 
125
  )
126
  yield messages
127
 
128
+ def user_message(msg: str, history: list) -> tuple[str, list]:
129
  """Adds user message to chat history"""
130
+ history.append(ChatMessage(role="user", content=msg))
131
+ return "", history
132
+
133
 
134
  # Create the Gradio interface
135
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: # Using Soft theme with adjusted hues for a refined look
136
+ gr.Markdown("# Chat with Gemini 2.0 Flash and See its Thoughts 💭")
137
 
138
  chatbot = gr.Chatbot(
139
  type="messages",
 
148
  lines=1,
149
  label="Chat Message",
150
  placeholder="Type your message here...",
151
+ scale=4
152
  )
 
153
 
154
  clear_button = gr.Button("Clear Chat", scale=1)
155
 
156
+ # Add example prompts - removed file upload examples. Kept text focused examples.
157
  example_prompts = [
158
  ["Write a short poem about the sunset."],
159
  ["Explain the theory of relativity in simple terms."],
 
164
 
165
  gr.Examples(
166
  examples=example_prompts,
167
+ inputs=input_box,
168
+ label="Examples: Try these prompts to see Gemini's thinking!",
169
  examples_per_page=5 # Adjust as needed
170
  )
171
 
 
173
  # Set up event handlers
174
  msg_store = gr.State("") # Store for preserving user message
175
 
 
176
  input_box.submit(
177
+ lambda msg: (msg, msg, ""), # Store message and clear input
178
+ inputs=[input_box],
179
+ outputs=[msg_store, input_box, input_box],
180
  queue=False
181
  ).then(
182
+ user_message, # Add user message to chat
183
+ inputs=[msg_store, chatbot],
184
+ outputs=[input_box, chatbot],
 
 
 
 
 
 
185
  queue=False
186
  ).then(
187
+ stream_gemini_response, # Generate and stream response
188
+ inputs=[msg_store, chatbot],
189
  outputs=chatbot
190
  )
191
 
 
192
  clear_button.click(
193
  lambda: ([], "", ""),
194
  outputs=[chatbot, input_box, msg_store],
195
  queue=False
196
  )
197
 
198
+ gr.Markdown( # Description moved to the bottom - updated for text-only
199
  """
200
  <br><br><br> <!-- Add some vertical space -->
201
  ---
 
209
  * Powered by Google's **Gemini 2.0 Flash** model.
210
  * Shows the model's **thoughts** before the final answer (experimental feature).
211
  * Supports **conversation history** for multi-turn chats.
 
212
  * Uses **streaming** for a more interactive experience.
213
  **Instructions:**
214
+ 1. Type your message in the input box below or select an example.
215
+ 2. Press Enter or click Submit to send.
216
  3. Observe the chatbot's "Thinking" process followed by the final response.
217
  4. Use the "Clear Chat" button to start a new conversation.
218
 
219
+ *Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
220
  """
221
  )
222