Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import openai
|
3 |
+
|
4 |
+
# We assume the Hugging Face Inference API is OpenAI-compatible.
|
5 |
+
# For each LLM, set openai.api_base to the model's endpoint and then call openai.ChatCompletion.
|
6 |
+
|
7 |
+
# Your Hugging Face API key
|
8 |
+
HF_API_KEY = "hf_1234"
|
9 |
+
|
10 |
+
# Model endpoints on Hugging Face
|
11 |
+
MODEL_ENDPOINTS = {
|
12 |
+
"Qwen2.5-72B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct",
|
13 |
+
"Llama3.3-70B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Llama-3.3-70B-Instruct",
|
14 |
+
"Qwen2.5-Coder-32B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct",
|
15 |
+
}
|
16 |
+
|
17 |
+
# Query a specific model using OpenAI-compatible ChatCompletion
|
18 |
+
def query_model(prompt, model_endpoint):
|
19 |
+
openai.api_key = HF_API_KEY
|
20 |
+
openai.api_base = model_endpoint
|
21 |
+
response = openai.ChatCompletion.create(
|
22 |
+
model="any-model-placeholder", # placeholder name, not actually used by the HF endpoint
|
23 |
+
messages=[{"role": "user", "content": prompt}],
|
24 |
+
max_tokens=512,
|
25 |
+
temperature=0.7
|
26 |
+
)
|
27 |
+
return response.choices[0].message["content"]
|
28 |
+
|
29 |
+
def chat_with_models(user_input, history):
|
30 |
+
# Let each model provide its own contribution
|
31 |
+
responses = []
|
32 |
+
for model_name, endpoint in MODEL_ENDPOINTS.items():
|
33 |
+
model_response = query_model(user_input, endpoint)
|
34 |
+
responses.append(f"**{model_name}**: {model_response}")
|
35 |
+
|
36 |
+
# Combine all responses in a single answer
|
37 |
+
combined_answer = "\n\n".join(responses)
|
38 |
+
history.append((user_input, combined_answer))
|
39 |
+
return history, history
|
40 |
+
|
41 |
+
with gr.Blocks() as demo:
|
42 |
+
gr.Markdown("# Multi-LLM Chatbot using Hugging Face Inference API")
|
43 |
+
|
44 |
+
chatbot = gr.Chatbot()
|
45 |
+
msg = gr.Textbox(label="Your Message")
|
46 |
+
clear = gr.Button("Clear")
|
47 |
+
|
48 |
+
def clear_chat():
|
49 |
+
return [], []
|
50 |
+
|
51 |
+
msg.submit(chat_with_models, [msg, chatbot], [chatbot, chatbot])
|
52 |
+
clear.click(fn=clear_chat, outputs=[chatbot, chatbot])
|
53 |
+
|
54 |
+
demo.launch()
|