Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,60 +1,52 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
|
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
print(f"Error loading model {model_name}: {e}")
|
13 |
-
return None
|
14 |
|
15 |
-
# Load
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
19 |
|
20 |
# --- Chatbot function ---
|
21 |
def chatbot(input_text, history, model_choice, system_message, max_new_tokens, temperature, top_p):
|
22 |
history = history or []
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
# Call the model's 'predict' function.
|
38 |
try:
|
39 |
-
|
|
|
40 |
except Exception as e:
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
# Check if model_output is iterable and has expected number of elements
|
47 |
-
if not isinstance(model_output, (list, tuple)) or len(model_output) < 2:
|
48 |
-
error_message = "Model output does not have the expected format."
|
49 |
-
history.append((input_text, error_message))
|
50 |
-
return history, history, "", model_choice, system_message, max_new_tokens, temperature, top_p
|
51 |
-
|
52 |
-
response = model_output[-1][1] if model_output[-1][1] else "Model did not return a response."
|
53 |
-
history.append((input_text, response))
|
54 |
-
return history, history, "", model_choice, system_message, max_new_tokens, temperature, top_p
|
55 |
|
56 |
# --- Gradio Interface ---
|
57 |
-
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
58 |
gr.Markdown(
|
59 |
"""
|
60 |
# DeepSeek Chatbot
|
@@ -73,11 +65,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
73 |
submit_btn = gr.Button("Submit", variant="primary")
|
74 |
clear_btn = gr.ClearButton([msg, chatbot_output])
|
75 |
|
76 |
-
# Options moved below the chat interface
|
77 |
with gr.Row():
|
78 |
with gr.Accordion("Options", open=True):
|
79 |
model_choice = gr.Radio(
|
80 |
-
choices=
|
81 |
label="Choose a Model",
|
82 |
value="DeepSeek-R1"
|
83 |
)
|
@@ -97,21 +88,24 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
97 |
minimum=0.10, maximum=1.00, value=0.90, label="Top-p (nucleus sampling)"
|
98 |
)
|
99 |
|
100 |
-
# Maintain chat history
|
101 |
chat_history = gr.State([])
|
102 |
|
103 |
# Event handling
|
104 |
submit_btn.click(
|
105 |
chatbot,
|
106 |
[msg, chat_history, model_choice, system_message, max_new_tokens, temperature, top_p],
|
107 |
-
[chatbot_output, chat_history, msg
|
108 |
)
|
109 |
msg.submit(
|
110 |
chatbot,
|
111 |
[msg, chat_history, model_choice, system_message, max_new_tokens, temperature, top_p],
|
112 |
-
[chatbot_output, chat_history, msg
|
113 |
)
|
114 |
|
115 |
-
#
|
|
|
|
|
|
|
|
|
116 |
if __name__ == "__main__":
|
117 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import spaces
|
3 |
+
import transformers_gradio
|
4 |
+
from functools import lru_cache
|
5 |
|
6 |
+
# Cache model loading to optimize performance
|
7 |
+
@lru_cache(maxsize=3)
|
8 |
+
def load_hf_model(model_name):
|
9 |
+
return gr.load(
|
10 |
+
name=f"deepseek-ai/{model_name}",
|
11 |
+
src=transformers_gradio.registry,
|
12 |
+
api_name="/chat"
|
13 |
+
)
|
|
|
|
|
14 |
|
15 |
+
# Load all models at startup
|
16 |
+
MODELS = {
|
17 |
+
"DeepSeek-R1-Distill-Qwen-32B": load_hf_model("DeepSeek-R1-Distill-Qwen-32B"),
|
18 |
+
"DeepSeek-R1": load_hf_model("DeepSeek-R1"),
|
19 |
+
"DeepSeek-R1-Zero": load_hf_model("DeepSeek-R1-Zero")
|
20 |
+
}
|
21 |
|
22 |
# --- Chatbot function ---
|
23 |
def chatbot(input_text, history, model_choice, system_message, max_new_tokens, temperature, top_p):
|
24 |
history = history or []
|
25 |
+
|
26 |
+
# Get the selected model component
|
27 |
+
model_component = MODELS[model_choice]
|
28 |
+
|
29 |
+
# Create payload for the model
|
30 |
+
payload = {
|
31 |
+
"messages": [{"role": "user", "content": input_text}],
|
32 |
+
"system": system_message,
|
33 |
+
"max_tokens": max_new_tokens,
|
34 |
+
"temperature": temperature,
|
35 |
+
"top_p": top_p
|
36 |
+
}
|
37 |
+
|
38 |
+
# Run inference using the selected model
|
|
|
39 |
try:
|
40 |
+
response = model_component(payload)
|
41 |
+
assistant_response = response[-1]["content"]
|
42 |
except Exception as e:
|
43 |
+
assistant_response = f"Error: {str(e)}"
|
44 |
+
|
45 |
+
history.append((input_text, assistant_response))
|
46 |
+
return history, history, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
# --- Gradio Interface ---
|
49 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek Chatbot") as demo:
|
50 |
gr.Markdown(
|
51 |
"""
|
52 |
# DeepSeek Chatbot
|
|
|
65 |
submit_btn = gr.Button("Submit", variant="primary")
|
66 |
clear_btn = gr.ClearButton([msg, chatbot_output])
|
67 |
|
|
|
68 |
with gr.Row():
|
69 |
with gr.Accordion("Options", open=True):
|
70 |
model_choice = gr.Radio(
|
71 |
+
choices=list(MODELS.keys()),
|
72 |
label="Choose a Model",
|
73 |
value="DeepSeek-R1"
|
74 |
)
|
|
|
88 |
minimum=0.10, maximum=1.00, value=0.90, label="Top-p (nucleus sampling)"
|
89 |
)
|
90 |
|
|
|
91 |
chat_history = gr.State([])
|
92 |
|
93 |
# Event handling
|
94 |
submit_btn.click(
|
95 |
chatbot,
|
96 |
[msg, chat_history, model_choice, system_message, max_new_tokens, temperature, top_p],
|
97 |
+
[chatbot_output, chat_history, msg]
|
98 |
)
|
99 |
msg.submit(
|
100 |
chatbot,
|
101 |
[msg, chat_history, model_choice, system_message, max_new_tokens, temperature, top_p],
|
102 |
+
[chatbot_output, chat_history, msg]
|
103 |
)
|
104 |
|
105 |
+
# Add GPU support for Hugging Face Spaces
|
106 |
+
demo.fn = spaces.GPU()(demo.fn)
|
107 |
+
for fn in demo.fns.values():
|
108 |
+
fn.api_name = False
|
109 |
+
|
110 |
if __name__ == "__main__":
|
111 |
demo.launch()
|