Update app.py
Browse files
app.py
CHANGED
@@ -125,31 +125,8 @@ def user_message(msg: str, history: list) -> tuple[str, list]:
|
|
125 |
|
126 |
|
127 |
# Create the Gradio interface
|
128 |
-
with gr.Blocks(theme=gr.themes.
|
129 |
-
gr.Markdown(
|
130 |
-
"""
|
131 |
-
# Gemini 2.0 Flash 'Thinking' Chatbot 💭
|
132 |
-
|
133 |
-
This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
|
134 |
-
You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix.
|
135 |
-
|
136 |
-
**Key Features:**
|
137 |
-
|
138 |
-
* Powered by Google's **Gemini 2.0 Flash** model.
|
139 |
-
* Shows the model's **thoughts** before the final answer (experimental feature).
|
140 |
-
* Supports **conversation history** for multi-turn chats.
|
141 |
-
* Uses **streaming** for a more interactive experience.
|
142 |
-
|
143 |
-
**Instructions:**
|
144 |
-
|
145 |
-
1. Type your message in the input box below.
|
146 |
-
2. Press Enter or click Submit to send.
|
147 |
-
3. Observe the chatbot's "Thinking" process followed by the final response.
|
148 |
-
4. Use the "Clear Chat" button to start a new conversation.
|
149 |
-
|
150 |
-
*Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
|
151 |
-
"""
|
152 |
-
)
|
153 |
|
154 |
chatbot = gr.Chatbot(
|
155 |
type="messages",
|
@@ -194,6 +171,34 @@ with gr.Blocks(theme=gr.themes.Citrus(), fill_height=True) as demo:
|
|
194 |
queue=False
|
195 |
)
|
196 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
# Launch the interface
|
198 |
if __name__ == "__main__":
|
199 |
demo.launch(debug=True)
|
|
|
125 |
|
126 |
|
127 |
# Create the Gradio interface
|
128 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: # Using Soft theme with adjusted hues for a refined look
|
129 |
+
gr.Markdown("# Gemini 2.0 Flash 'Thinking' Chatbot 💭")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
chatbot = gr.Chatbot(
|
132 |
type="messages",
|
|
|
171 |
queue=False
|
172 |
)
|
173 |
|
174 |
+
gr.Markdown( # Description moved to the bottom
|
175 |
+
"""
|
176 |
+
<br><br><br> <!-- Add some vertical space -->
|
177 |
+
---
|
178 |
+
### About this Chatbot
|
179 |
+
|
180 |
+
This chatbot demonstrates the experimental 'thinking' capability of the **Gemini 2.0 Flash** model.
|
181 |
+
You can observe the model's thought process as it generates responses, displayed with the "⚙️ Thinking" prefix.
|
182 |
+
|
183 |
+
**Key Features:**
|
184 |
+
|
185 |
+
* Powered by Google's **Gemini 2.0 Flash** model.
|
186 |
+
* Shows the model's **thoughts** before the final answer (experimental feature).
|
187 |
+
* Supports **conversation history** for multi-turn chats.
|
188 |
+
* Uses **streaming** for a more interactive experience.
|
189 |
+
|
190 |
+
**Instructions:**
|
191 |
+
|
192 |
+
1. Type your message in the input box below.
|
193 |
+
2. Press Enter or click Submit to send.
|
194 |
+
3. Observe the chatbot's "Thinking" process followed by the final response.
|
195 |
+
4. Use the "Clear Chat" button to start a new conversation.
|
196 |
+
|
197 |
+
*Please note*: The 'thinking' feature is experimental and the quality of thoughts may vary.
|
198 |
+
"""
|
199 |
+
)
|
200 |
+
|
201 |
+
|
202 |
# Launch the interface
|
203 |
if __name__ == "__main__":
|
204 |
demo.launch(debug=True)
|