Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,88 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
""
|
7 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
history: list[tuple[str, str]],
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
-
|
20 |
-
for val in history:
|
21 |
-
if val[0]:
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
28 |
response = ""
|
29 |
-
|
30 |
-
for message in client.chat_completion(
|
31 |
messages,
|
32 |
max_tokens=max_tokens,
|
33 |
stream=True,
|
34 |
temperature=temperature,
|
35 |
top_p=top_p,
|
36 |
):
|
37 |
-
token =
|
38 |
-
|
39 |
response += token
|
40 |
-
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
""
|
44 |
-
|
45 |
-
""
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
gr.
|
50 |
-
gr.Slider(
|
51 |
-
gr.Slider(
|
52 |
-
gr.Slider(
|
53 |
-
minimum=0.1,
|
54 |
-
maximum=1.0,
|
55 |
-
value=0.95,
|
56 |
-
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)",
|
58 |
-
),
|
59 |
-
],
|
60 |
-
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
+
# ---------------- CONFIG ----------------
|
5 |
+
MODEL_REPO = "HuggingFaceH4/zephyr-7b-beta"
|
6 |
+
SYSTEM_PROMPT_DEFAULT = "You are Zephyr, a helpful, concise and polite AI assistant."
|
|
|
7 |
|
8 |
+
MAX_NEW_TOKENS_DEFAULT = 512
|
9 |
+
TEMP_DEFAULT = 0.7
|
10 |
+
TOP_P_DEFAULT = 0.95
|
11 |
|
12 |
+
# Create client (calls Hugging Face Inference API, not local model)
|
13 |
+
client = InferenceClient(MODEL_REPO)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# ---------------- CHAT FUNCTION ----------------
|
16 |
+
def stream_response(message, chat_history, system_message, max_tokens, temperature, top_p):
|
17 |
+
messages = [{"role": "system", "content": system_message}]
|
18 |
+
for user_msg, bot_msg in chat_history:
|
19 |
+
if user_msg:
|
20 |
+
messages.append({"role": "user", "content": user_msg})
|
21 |
+
if bot_msg:
|
22 |
+
messages.append({"role": "assistant", "content": bot_msg})
|
23 |
messages.append({"role": "user", "content": message})
|
24 |
|
25 |
response = ""
|
26 |
+
for msg in client.chat_completion(
|
|
|
27 |
messages,
|
28 |
max_tokens=max_tokens,
|
29 |
stream=True,
|
30 |
temperature=temperature,
|
31 |
top_p=top_p,
|
32 |
):
|
33 |
+
token = msg.choices[0].delta.content or ""
|
|
|
34 |
response += token
|
35 |
+
yield "", chat_history + [(message, response)]
|
36 |
+
|
37 |
+
|
38 |
+
# ---------------- UI ----------------
|
39 |
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet", secondary_hue="pink")) as demo:
|
40 |
+
gr.Markdown(
|
41 |
+
"""
|
42 |
+
# 📱 Zephyr-7B (Hosted on Hugging Face Inference API)
|
43 |
+
Optimized for **mobile-friendly chat** ✨
|
44 |
+
<span style="opacity:0.7">Powered by HuggingFaceH4/zephyr-7b-beta</span>
|
45 |
+
"""
|
46 |
+
)
|
47 |
+
|
48 |
+
chatbot = gr.Chatbot(
|
49 |
+
height=500,
|
50 |
+
bubble_full_width=False,
|
51 |
+
show_copy_button=True,
|
52 |
+
label="Chat"
|
53 |
+
)
|
54 |
|
55 |
+
with gr.Row():
|
56 |
+
msg = gr.Textbox(
|
57 |
+
label="💬 Message",
|
58 |
+
placeholder="Type your message…",
|
59 |
+
scale=6
|
60 |
+
)
|
61 |
+
send_btn = gr.Button("🚀", variant="primary", scale=1)
|
62 |
+
clear_btn = gr.Button("🧹", scale=1)
|
63 |
|
64 |
+
with gr.Accordion("⚙️ Settings", open=False):
|
65 |
+
system_prompt = gr.Textbox(
|
66 |
+
label="System Prompt",
|
67 |
+
value=SYSTEM_PROMPT_DEFAULT,
|
68 |
+
lines=3
|
69 |
+
)
|
70 |
+
temperature = gr.Slider(0.1, 1.5, value=TEMP_DEFAULT, step=0.1, label="Temperature")
|
71 |
+
top_p = gr.Slider(0.1, 1.0, value=TOP_P_DEFAULT, step=0.05, label="Top-p")
|
72 |
+
max_tokens = gr.Slider(32, 2048, value=MAX_NEW_TOKENS_DEFAULT, step=16, label="Max new tokens")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
+
# Events (streaming response)
|
75 |
+
send_btn.click(
|
76 |
+
stream_response,
|
77 |
+
[msg, chatbot, system_prompt, max_tokens, temperature, top_p],
|
78 |
+
[msg, chatbot]
|
79 |
+
)
|
80 |
+
msg.submit(
|
81 |
+
stream_response,
|
82 |
+
[msg, chatbot, system_prompt, max_tokens, temperature, top_p],
|
83 |
+
[msg, chatbot]
|
84 |
+
)
|
85 |
+
clear_btn.click(lambda: None, None, chatbot, queue=False)
|
86 |
|
87 |
if __name__ == "__main__":
|
88 |
demo.launch()
|