luminoussg commited on
Commit
fdaf591
Β·
verified Β·
1 Parent(s): 89c6fc8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -46
app.py CHANGED
@@ -74,7 +74,7 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
74
  except Exception as e:
75
  return f"{model_name} error: {str(e)}"
76
 
77
- def respond(message: str, history: List[List[str]], session_id: str) -> str:
78
  """Handle sequential model responses with context preservation"""
79
  # Load or initialize session
80
  session = session_manager.load_session(session_id)
@@ -97,9 +97,8 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
97
  "content": message
98
  })
99
 
100
- responses = []
101
-
102
- # First model response
103
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
104
  session["history"].append({
105
  "timestamp": datetime.now().isoformat(),
@@ -108,9 +107,10 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
108
  "content": response1
109
  })
110
  messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
111
- responses.append(f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}")
112
 
113
- # Second model response
 
114
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
115
  session["history"].append({
116
  "timestamp": datetime.now().isoformat(),
@@ -119,9 +119,10 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
119
  "content": response2
120
  })
121
  messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
122
- responses.append(f"🟣 **Qwen2.5-72B-Instruct**\n{response2}")
123
 
124
- # Final model response
 
125
  response3 = query_model("Llama3.3-70B-Instruct", messages)
126
  session["history"].append({
127
  "timestamp": datetime.now().isoformat(),
@@ -130,50 +131,42 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
130
  "content": response3
131
  })
132
  messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
133
- responses.append(f"🟑 **Llama3.3-70B-Instruct**\n{response3}")
134
 
135
  # Save final session state
136
  session_manager.save_session(session_id, session)
137
 
138
- # Return responses
139
- return "\n\n".join(responses)
140
-
141
- # Custom CSS for styling
142
- css = """
143
- .message { padding: 15px; margin: 10px 0; border-radius: 10px; }
144
- .assistant { background: #f8fafc; border-left: 4px solid #3b82f6; }
145
- .user { background: #eff6ff; border-left: 4px solid #60a5fa; }
146
- .model-name { font-weight: 600; color: #1e40af; margin-bottom: 8px; }
147
- .thinking { color: #6b7280; font-style: italic; }
148
- """
149
 
150
  # Create the Gradio interface
151
- demo = gr.ChatInterface(
152
- fn=respond,
153
- title="Multi-LLM Collaboration Chat",
154
- description="Experience collaborative AI thinking with three powerful language models",
155
- examples=[
156
- ["Explain how quantum computing works"],
157
- ["Write a Python function to find prime numbers"],
158
- ],
159
- additional_inputs=[gr.State(session_manager.create_session)],
160
- chatbot=gr.Chatbot(
161
- height=600,
162
- show_label=False,
163
- bubble_full_width=False,
164
- show_copy_button=True,
165
- container=True,
166
- sanitize_html=False,
167
- render_markdown=True
168
- ),
169
- theme=gr.themes.Soft(
170
- primary_hue="blue",
171
- secondary_hue="indigo",
172
- neutral_hue="slate",
173
- font=("Inter", "sans-serif"),
174
- ),
175
- css=css,
176
- )
 
 
177
 
178
  if __name__ == "__main__":
179
  demo.launch(share=True)
 
74
  except Exception as e:
75
  return f"{model_name} error: {str(e)}"
76
 
77
+ def respond(message: str, history: List[List[str]], session_id: str) -> Generator[str, None, None]:
78
  """Handle sequential model responses with context preservation"""
79
  # Load or initialize session
80
  session = session_manager.load_session(session_id)
 
97
  "content": message
98
  })
99
 
100
+ # First model
101
+ yield "πŸ”΅ Qwen2.5-Coder-32B-Instruct is thinking..."
 
102
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
103
  session["history"].append({
104
  "timestamp": datetime.now().isoformat(),
 
107
  "content": response1
108
  })
109
  messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
110
+ yield f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}"
111
 
112
+ # Second model
113
+ yield f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 Qwen2.5-72B-Instruct is thinking..."
114
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
115
  session["history"].append({
116
  "timestamp": datetime.now().isoformat(),
 
119
  "content": response2
120
  })
121
  messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
122
+ yield f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}"
123
 
124
+ # Final model
125
+ yield f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}\n\n🟑 Llama3.3-70B-Instruct is thinking..."
126
  response3 = query_model("Llama3.3-70B-Instruct", messages)
127
  session["history"].append({
128
  "timestamp": datetime.now().isoformat(),
 
131
  "content": response3
132
  })
133
  messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
 
134
 
135
  # Save final session state
136
  session_manager.save_session(session_id, session)
137
 
138
+ # Return final combined response
139
+ yield f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}\n\n🟑 **Llama3.3-70B-Instruct**\n{response3}"
 
 
 
 
 
 
 
 
 
140
 
141
  # Create the Gradio interface
142
+ with gr.Blocks() as demo:
143
+ gr.Markdown("## Multi-LLM Collaboration Chat")
144
+
145
+ with gr.Row():
146
+ session_id = gr.State(session_manager.create_session)
147
+ new_session = gr.Button("πŸ”„ New Session")
148
+
149
+ chatbot = gr.Chatbot(height=600)
150
+ msg = gr.Textbox(label="Message")
151
+
152
+ def on_new_session():
153
+ new_id = session_manager.create_session()
154
+ return new_id, []
155
+
156
+ def user(message, history, session_id):
157
+ return "", history + [[message, None]]
158
+
159
+ def bot(history, session_id):
160
+ if history and history[-1][1] is None:
161
+ message = history[-1][0]
162
+ for response in respond(message, history[:-1], session_id):
163
+ history[-1][1] = response
164
+ yield history
165
+
166
+ msg.submit(user, [msg, chatbot, session_id], [msg, chatbot]).then(
167
+ bot, [chatbot, session_id], [chatbot]
168
+ )
169
+ new_session.click(on_new_session, None, [session_id, chatbot])
170
 
171
  if __name__ == "__main__":
172
  demo.launch(share=True)