luminoussg commited on
Commit
89c6fc8
Β·
verified Β·
1 Parent(s): 0fac2da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -74,7 +74,7 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> str:
74
  except Exception as e:
75
  return f"{model_name} error: {str(e)}"
76
 
77
- def respond(message: str, history: List[List[str]], session_id: str) -> Generator[tuple[str, str], None, None]:
78
  """Handle sequential model responses with context preservation"""
79
  # Load or initialize session
80
  session = session_manager.load_session(session_id)
@@ -97,8 +97,9 @@ def respond(message: str, history: List[List[str]], session_id: str) -> Generato
97
  "content": message
98
  })
99
 
 
 
100
  # First model response
101
- yield message, "πŸ’­ *Qwen2.5-Coder-32B-Instruct is thinking...*"
102
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
103
  session["history"].append({
104
  "timestamp": datetime.now().isoformat(),
@@ -107,10 +108,9 @@ def respond(message: str, history: List[List[str]], session_id: str) -> Generato
107
  "content": response1
108
  })
109
  messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
110
- yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}"
111
 
112
  # Second model response
113
- yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\nπŸ’­ *Qwen2.5-72B-Instruct is thinking...*"
114
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
115
  session["history"].append({
116
  "timestamp": datetime.now().isoformat(),
@@ -119,10 +119,9 @@ def respond(message: str, history: List[List[str]], session_id: str) -> Generato
119
  "content": response2
120
  })
121
  messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
122
- yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}"
123
 
124
  # Final model response
125
- yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}\n\nπŸ’­ *Llama3.3-70B-Instruct is thinking...*"
126
  response3 = query_model("Llama3.3-70B-Instruct", messages)
127
  session["history"].append({
128
  "timestamp": datetime.now().isoformat(),
@@ -131,12 +130,13 @@ def respond(message: str, history: List[List[str]], session_id: str) -> Generato
131
  "content": response3
132
  })
133
  messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
 
134
 
135
  # Save final session state
136
  session_manager.save_session(session_id, session)
137
 
138
- # Return final combined response
139
- yield message, f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}\n\n🟣 **Qwen2.5-72B-Instruct**\n{response2}\n\n🟑 **Llama3.3-70B-Instruct**\n{response3}"
140
 
141
  # Custom CSS for styling
142
  css = """
@@ -160,11 +160,11 @@ demo = gr.ChatInterface(
160
  chatbot=gr.Chatbot(
161
  height=600,
162
  show_label=False,
163
- avatar_images=("πŸ‘€", "πŸ€–"),
164
  bubble_full_width=False,
165
  show_copy_button=True,
166
  container=True,
167
  sanitize_html=False,
 
168
  ),
169
  theme=gr.themes.Soft(
170
  primary_hue="blue",
 
74
  except Exception as e:
75
  return f"{model_name} error: {str(e)}"
76
 
77
+ def respond(message: str, history: List[List[str]], session_id: str) -> str:
78
  """Handle sequential model responses with context preservation"""
79
  # Load or initialize session
80
  session = session_manager.load_session(session_id)
 
97
  "content": message
98
  })
99
 
100
+ responses = []
101
+
102
  # First model response
 
103
  response1 = query_model("Qwen2.5-Coder-32B-Instruct", messages)
104
  session["history"].append({
105
  "timestamp": datetime.now().isoformat(),
 
108
  "content": response1
109
  })
110
  messages.append({"role": "assistant", "content": f"Qwen2.5-Coder-32B-Instruct: {response1}"})
111
+ responses.append(f"πŸ”΅ **Qwen2.5-Coder-32B-Instruct**\n{response1}")
112
 
113
  # Second model response
 
114
  response2 = query_model("Qwen2.5-72B-Instruct", messages)
115
  session["history"].append({
116
  "timestamp": datetime.now().isoformat(),
 
119
  "content": response2
120
  })
121
  messages.append({"role": "assistant", "content": f"Qwen2.5-72B-Instruct: {response2}"})
122
+ responses.append(f"🟣 **Qwen2.5-72B-Instruct**\n{response2}")
123
 
124
  # Final model response
 
125
  response3 = query_model("Llama3.3-70B-Instruct", messages)
126
  session["history"].append({
127
  "timestamp": datetime.now().isoformat(),
 
130
  "content": response3
131
  })
132
  messages.append({"role": "assistant", "content": f"Llama3.3-70B-Instruct: {response3}"})
133
+ responses.append(f"🟑 **Llama3.3-70B-Instruct**\n{response3}")
134
 
135
  # Save final session state
136
  session_manager.save_session(session_id, session)
137
 
138
+ # Return responses
139
+ return "\n\n".join(responses)
140
 
141
  # Custom CSS for styling
142
  css = """
 
160
  chatbot=gr.Chatbot(
161
  height=600,
162
  show_label=False,
 
163
  bubble_full_width=False,
164
  show_copy_button=True,
165
  container=True,
166
  sanitize_html=False,
167
+ render_markdown=True
168
  ),
169
  theme=gr.themes.Soft(
170
  primary_hue="blue",