xqt commited on
Commit
6efabbd
Β·
1 Parent(s): 3712b0a

ADD: initial implementation of ChatSeek using DeepSeek with Gradio interface

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. main.py +170 -0
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: ChatSeek
3
- emoji: 🐨
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.13.2
8
- app_file: app.py
9
- pinned: false
10
  license: apache-2.0
11
  short_description: Chat interface to use DeepSeekV3 and DeepSeekR1
12
  ---
 
1
  ---
2
  title: ChatSeek
3
+ emoji: 🐳
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.13.2
8
+ app_file: main.py
9
+ pinned: true
10
  license: apache-2.0
11
  short_description: Chat interface to use DeepSeekV3 and DeepSeekR1
12
  ---
main.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ from huggingface_hub import InferenceClient
3
+
4
+ import datetime
5
+ import uuid
6
+ import json
7
+
8
+ import re
9
+
10
+ import os
11
+ import sys
12
+
13
+ history = [
14
+ {"role": "system", "content": ""},
15
+ ]
16
+
17
+ tmp_dir = os.getenv('TMP') or os.getenv('TMPDIR')
18
+
19
+ def generate_uuid():
20
+ _uuid = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + str(uuid.uuid4())
21
+ return _uuid
22
+
23
+ def write_current_chat_to_file(current_chat):
24
+ with open(f"{tmp_dir}/{current_chat['chat_id']}.json", "w") as f:
25
+ json.dump(current_chat, f, indent = 4)
26
+
27
+ def process_input_message(message_box, current_chat):
28
+ current_chat["chat_history"].append({"role": "user", "content": message_box["text"]})
29
+ return current_chat
30
+
31
+ def get_text_between_tags(text, start_tag, end_tag):
32
+ pattern = rf'{re.escape(start_tag)}(.*?){re.escape(end_tag)}'
33
+ match = re.search(pattern, text, re.DOTALL)
34
+ return match.group(1) if match else ""
35
+
36
+ def remove_text_between_tags(text, start_tag, end_tag):
37
+ pattern = rf'{re.escape(start_tag)}.*?{re.escape(end_tag)}'
38
+ return re.sub(pattern, '', text, flags=re.DOTALL)
39
+
40
+ def call_chatbot(api_token, current_chat, system_message, max_tokens, temperature, top_p):
41
+ client = InferenceClient(
42
+ provider = "hf-inference",
43
+ api_key = api_token
44
+ )
45
+
46
+ current_chat["chat_history"][0]["content"] = system_message
47
+ current_chat["chat_history"].append({"content": "", "role": "assistant", "metadata": {}})
48
+
49
+ stream = client.chat.completions.create(
50
+ model = "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
51
+ messages = current_chat["chat_history"],
52
+ max_tokens = max_tokens,
53
+ temperature = temperature,
54
+ top_p = top_p,
55
+ stream = True
56
+ )
57
+
58
+ response = ""
59
+ for chunk in stream:
60
+ response = response + chunk.choices[0].delta.content
61
+ current_chat["chat_history"][-1]["content"] = response
62
+ yield current_chat["chat_history"], current_chat
63
+ #yield current_chat["chat_history"], current_chat
64
+
65
+ current_chat["chat_history"][-1]["metadata"]["title"] = "πŸ’­ Thoughts:"
66
+ current_chat["chat_history"][-1]["content"] = get_text_between_tags(response, "<think>", "</think>")
67
+ current_chat["chat_history"].append({"content": remove_text_between_tags(response, "<think>", "</think>"), "role": "assistant"})
68
+
69
+ yield current_chat["chat_history"], current_chat
70
+
71
+ def reset_chat():
72
+ chatbot = gradio.Chatbot(history, type = "messages")
73
+ message_box = gradio.MultimodalTextbox(
74
+ value = "",
75
+ interactive = True,
76
+ file_count = "multiple",
77
+ placeholder = "Enter message...",
78
+ show_label = False,
79
+ sources = [],
80
+ stop_btn = True,
81
+ )
82
+
83
+ current_chat_id = generate_uuid()
84
+ current_chat = gradio.JSON(
85
+ {
86
+ "version": "0.1",
87
+ "chat_id": current_chat_id,
88
+ "chat_history": history
89
+ }
90
+ )
91
+ save_chat_button = gradio.DownloadButton(label = "Save", value = f"{tmp_dir}/{current_chat_id}.json")
92
+
93
+ system_message = gradio.Textbox(label = "System Message", value = "You are a helpful bot. Be concise with your answers. Do not think with more than 3 lines. Answer in 2 lines. Only answer in English")
94
+ max_tokens = gradio.Slider(label = "Max Tokens", minimum = 500, maximum = 3000, step = 100, value = 1000)
95
+ temperature = gradio.Slider(label = "Temperature", minimum = 0.1, maximum = 2.0, step = 0.1, value = 0.5)
96
+ top_p = gradio.Slider(label = "Top P", minimum = 0.1, maximum = 1.0, step = 0.1, value = 0.9)
97
+
98
+ return chatbot, message_box, current_chat, save_chat_button, system_message, max_tokens, temperature, top_p
99
+
100
+ def process_token(secret_token):
101
+ try:
102
+ passwords = os.environ.get("PASSWORDS")
103
+ passwords = passwords.split(":")
104
+
105
+ if secret_token in passwords:
106
+ secret_token = os.environ.get("HF_KEY")
107
+
108
+ return secret_token
109
+ except:
110
+ return secret_token
111
+
112
+
113
+
114
+ with gradio.Blocks(fill_height = True) as base_app:
115
+ gradio.Markdown("# ChatSeek")
116
+ gradio.Markdown("## ")
117
+
118
+ with gradio.Row():
119
+ with gradio.Column(scale = 2):
120
+ secret_token = gradio.Textbox(label = "API Key", placeholder = "Enter Password/API Token. The key is never stored.", type = "password")
121
+ with gradio.Column():
122
+ submit_token = gradio.Button(value = "Authenticate", scale = 10)
123
+
124
+ submit_token_call = submit_token.click(process_token, [secret_token], [secret_token])
125
+
126
+ chatbot = gradio.Chatbot(history, type = "messages")
127
+ message_box = gradio.MultimodalTextbox(
128
+ interactive = True,
129
+ file_count = "multiple",
130
+ placeholder = "Enter message...",
131
+ show_label = False,
132
+ sources = [],
133
+ stop_btn = True,
134
+ )
135
+
136
+ current_chat_id = generate_uuid()
137
+
138
+ with gradio.Row():
139
+ with gradio.Column():
140
+ reset_button = gradio.Button(value = "Start a New Chat")
141
+ with gradio.Column():
142
+ save_chat_button = gradio.DownloadButton(label = "Save", value = f"{tmp_dir}/{current_chat_id}.json")
143
+
144
+ with gradio.Accordion(label = "Advanced Parameters", open = False):
145
+ system_message = gradio.Textbox(label = "System Message", value = "You are a helpful bot. Be concise with your answers. Do not think with more than 3 lines. Answer in 2 lines. Only answer in English.")
146
+ max_tokens = gradio.Slider(label = "Max Tokens", minimum = 500, maximum = 3000, step = 100, value = 1000)
147
+ temperature = gradio.Slider(label = "Temperature", minimum = 0.1, maximum = 2.0, step = 0.1, value = 0.5)
148
+ top_p = gradio.Slider(label = "Top P", minimum = 0.1, maximum = 1.0, step = 0.1, value = 0.9)
149
+
150
+ with gradio.Accordion(label = "Metadata", open = False):
151
+ current_chat = gradio.JSON(
152
+ {
153
+ "version": "0.1",
154
+ "chat_id": current_chat_id,
155
+ "chat_history": history
156
+ },
157
+ visible = True
158
+ )
159
+
160
+ submit_message_call = message_box.submit(process_input_message, [message_box, current_chat], [current_chat], queue=False).then(write_current_chat_to_file, [current_chat], [])
161
+ clear_message_box_call = submit_message_call.then(lambda: gradio.MultimodalTextbox(value = "", interactive = True) , None, [message_box])
162
+ invoke_chatbot_call = clear_message_box_call.then(call_chatbot, [secret_token, current_chat, system_message, max_tokens, temperature, top_p], [chatbot, current_chat]).then(write_current_chat_to_file, [current_chat], [])
163
+
164
+ reset_button_call = reset_button.click(reset_chat, [], [chatbot, message_box, current_chat, save_chat_button, system_message, max_tokens, temperature, top_p])
165
+
166
+ if __name__ == "__main__":
167
+ base_app.launch(
168
+ allowed_paths = [tmp_dir]
169
+ )
170
+