Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -74,7 +74,7 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
|
|
74 |
except Exception as e:
|
75 |
return f"{model_name} error: {str(e)}"
|
76 |
|
77 |
-
def respond(message: str, history: List[List[str]], session_id: str) -> str:
|
78 |
"""Handle sequential model responses with context preservation"""
|
79 |
# Load or initialize session
|
80 |
session = session_manager.load_session(session_id)
|
@@ -97,9 +97,8 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
|
|
97 |
"content": message
|
98 |
})
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
# First model response
|
103 |
response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
|
104 |
session["history"].append({
|
105 |
"timestamp": datetime.now().isoformat(),
|
@@ -108,9 +107,10 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
|
|
108 |
"content": response1
|
109 |
})
|
110 |
messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
|
111 |
-
|
112 |
|
113 |
-
# Second model
|
|
|
114 |
response2 = query_model("Qwen2.5-72B-Instruct", messages)
|
115 |
session["history"].append({
|
116 |
"timestamp": datetime.now().isoformat(),
|
@@ -119,9 +119,10 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
|
|
119 |
"content": response2
|
120 |
})
|
121 |
messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
|
122 |
-
|
123 |
|
124 |
-
# Final model
|
|
|
125 |
response3 = query_model("Llama3.3-70B-Instruct", messages)
|
126 |
session["history"].append({
|
127 |
"timestamp": datetime.now().isoformat(),
|
@@ -130,50 +131,42 @@ def respond(message: str, history: List[List[str]], session_id: str) -> str:
|
|
130 |
"content": response3
|
131 |
})
|
132 |
messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
|
133 |
-
responses.append(f"π‘ **Llama3.3-70B-Instruct**\n{response3}")
|
134 |
|
135 |
# Save final session state
|
136 |
session_manager.save_session(session_id, session)
|
137 |
|
138 |
-
# Return
|
139 |
-
|
140 |
-
|
141 |
-
# Custom CSS for styling
|
142 |
-
css = """
|
143 |
-
.message { padding: 15px; margin: 10px 0; border-radius: 10px; }
|
144 |
-
.assistant { background: #f8fafc; border-left: 4px solid #3b82f6; }
|
145 |
-
.user { background: #eff6ff; border-left: 4px solid #60a5fa; }
|
146 |
-
.model-name { font-weight: 600; color: #1e40af; margin-bottom: 8px; }
|
147 |
-
.thinking { color: #6b7280; font-style: italic; }
|
148 |
-
"""
|
149 |
|
150 |
# Create the Gradio interface
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
)
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
|
|
|
|
177 |
|
178 |
if __name__ == "__main__":
|
179 |
demo.launch(share=True)
|
|
|
74 |
except Exception as e:
|
75 |
return f"{model_name} error: {str(e)}"
|
76 |
|
77 |
+
def respond(message: str, history: List[List[str]], session_id: str) -> Generator[str, None, None]:
|
78 |
"""Handle sequential model responses with context preservation"""
|
79 |
# Load or initialize session
|
80 |
session = session_manager.load_session(session_id)
|
|
|
97 |
"content": message
|
98 |
})
|
99 |
|
100 |
+
# First model
|
101 |
+
yield "π΅ Qwen2.5-Coder-32B-Instruct is thinking..."
|
|
|
102 |
response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
|
103 |
session["history"].append({
|
104 |
"timestamp": datetime.now().isoformat(),
|
|
|
107 |
"content": response1
|
108 |
})
|
109 |
messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
|
110 |
+
yield f"π΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}"
|
111 |
|
112 |
+
# Second model
|
113 |
+
yield f"π΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\nπ£ Qwen2.5-72B-Instruct is thinking..."
|
114 |
response2 = query_model("Qwen2.5-72B-Instruct", messages)
|
115 |
session["history"].append({
|
116 |
"timestamp": datetime.now().isoformat(),
|
|
|
119 |
"content": response2
|
120 |
})
|
121 |
messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
|
122 |
+
yield f"π΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\nπ£ **Qwen2.5-72B-Instruct**\n{response2}"
|
123 |
|
124 |
+
# Final model
|
125 |
+
yield f"π΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\nπ£ **Qwen2.5-72B-Instruct**\n{response2}\n\nπ‘ Llama3.3-70B-Instruct is thinking..."
|
126 |
response3 = query_model("Llama3.3-70B-Instruct", messages)
|
127 |
session["history"].append({
|
128 |
"timestamp": datetime.now().isoformat(),
|
|
|
131 |
"content": response3
|
132 |
})
|
133 |
messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
|
|
|
134 |
|
135 |
# Save final session state
|
136 |
session_manager.save_session(session_id, session)
|
137 |
|
138 |
+
# Return final combined response
|
139 |
+
yield f"π΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\nπ£ **Qwen2.5-72B-Instruct**\n{response2}\n\nπ‘ **Llama3.3-70B-Instruct**\n{response3}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
|
141 |
# Create the Gradio interface
|
142 |
+
with gr.Blocks() as demo:
|
143 |
+
gr.Markdown("## Multi-LLM Collaboration Chat")
|
144 |
+
|
145 |
+
with gr.Row():
|
146 |
+
session_id = gr.State(session_manager.create_session)
|
147 |
+
new_session = gr.Button("π New Session")
|
148 |
+
|
149 |
+
chatbot = gr.Chatbot(height=600)
|
150 |
+
msg = gr.Textbox(label="Message")
|
151 |
+
|
152 |
+
def on_new_session():
|
153 |
+
new_id = session_manager.create_session()
|
154 |
+
return new_id, []
|
155 |
+
|
156 |
+
def user(message, history, session_id):
|
157 |
+
return "", history + [[message, None]]
|
158 |
+
|
159 |
+
def bot(history, session_id):
|
160 |
+
if history and history[-1][1] is None:
|
161 |
+
message = history[-1][0]
|
162 |
+
for response in respond(message, history[:-1], session_id):
|
163 |
+
history[-1][1] = response
|
164 |
+
yield history
|
165 |
+
|
166 |
+
msg.submit(user, [msg, chatbot, session_id], [msg, chatbot]).then(
|
167 |
+
bot, [chatbot, session_id], [chatbot]
|
168 |
+
)
|
169 |
+
new_session.click(on_new_session, None, [session_id, chatbot])
|
170 |
|
171 |
if __name__ == "__main__":
|
172 |
demo.launch(share=True)
|