yarenty commited on
Commit
cf47578
·
verified ·
1 Parent(s): c15b0c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +407 -66
app.py CHANGED
@@ -1,70 +1,411 @@
 
 
 
 
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
-
5
- def respond(
6
- message,
7
- history: list[dict[str, str]],
8
- system_message,
9
- max_tokens,
10
- temperature,
11
- top_p,
12
- hf_token: gr.OAuthToken,
13
- ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  """
15
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  """
17
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- messages.extend(history)
22
-
23
- messages.append({"role": "user", "content": message})
24
-
25
- response = ""
26
-
27
- for message in client.chat_completion(
28
- messages,
29
- max_tokens=max_tokens,
30
- stream=True,
31
- temperature=temperature,
32
- top_p=top_p,
33
- ):
34
- choices = message.choices
35
- token = ""
36
- if len(choices) and choices[0].delta.content:
37
- token = choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- chatbot = gr.ChatInterface(
47
- respond,
48
- type="messages",
49
- additional_inputs=[
50
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
51
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
52
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
53
- gr.Slider(
54
- minimum=0.1,
55
- maximum=1.0,
56
- value=0.95,
57
- step=0.05,
58
- label="Top-p (nucleus sampling)",
59
- ),
60
- ],
61
- )
62
-
63
- with gr.Blocks() as demo:
64
- with gr.Sidebar():
65
- gr.LoginButton()
66
- chatbot.render()
67
-
68
-
69
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  demo.launch()
 
1
+ import os
2
+ import time
3
+ import gc
4
+ import threading
5
+ from itertools import islice
6
+ from datetime import datetime
7
+ import re # for parsing <think> blocks
8
  import gradio as gr
9
+ import torch
10
+ from transformers import pipeline, TextIteratorStreamer
11
+ from transformers import AutoTokenizer
12
+ from ddgs import DDGS
13
+ import spaces # Import spaces early to enable ZeroGPU support
14
+
15
+ access_token=os.environ['HF_TOKEN']
16
+
17
+ # Optional: Disable GPU visibility if you wish to force CPU usage
18
+ # os.environ["CUDA_VISIBLE_DEVICES"] = ""
19
+
20
+ # ------------------------------
21
+ # Global Cancellation Event
22
+ # ------------------------------
23
+ cancel_event = threading.Event()
24
+
25
+ # ------------------------------
26
+ # Torch-Compatible Model Definitions with Adjusted Descriptions
27
+ # ------------------------------
28
+ MODELS = {
29
+ # … your existing entries …
30
+ "gpt-oss-20b": {"repo_id": "openai/gpt-oss-20b", "description": "openai/gpt-oss-20b"},
31
+ "Qwen2.5-Taiwan-1.5B-Instruct": {"repo_id": "benchang1110/Qwen2.5-Taiwan-1.5B-Instruct", "description": "Qwen2.5-Taiwan-1.5B-Instruct"},
32
+ "parser_model_ner_gemma_v0.1": {
33
+ "repo_id": "myfi/parser_model_ner_gemma_v0.1",
34
+ "description": "A lightweight named‑entity‑like (NER) parser fine‑tuned from Google’s **Gemma‑3‑270M** model. The base Gemma‑3‑270M is a 270 M‑parameter, hyper‑efficient LLM designed for on‑device inference, supporting >140 languages, a 128 k‑token context window, and instruction‑following capabilities [2][7]. This variant is further trained on standard NER corpora (e.g., CoNLL‑2003, OntoNotes) to extract PERSON, ORG, LOC, and MISC entities with high precision while keeping the memory footprint low (≈240 MB VRAM in BF16 quantized form) [1]. It is released under the Apache‑2.0 license and can be used for fast, cost‑effective entity extraction in low‑resource environments."
35
+ },
36
+ "Gemma-3-Taiwan-270M-it":{
37
+ "repo_id":"lianghsun/Gemma-3-Taiwan-270M-it",
38
+ "description": "google/gemma-3-270m-it fintuned on Taiwan Chinese dataset"
39
+ },
40
+ "gemma-3-270m-it":{
41
+ "repo_id":"google/gemma-3-270m-it",
42
+ "description":"Gemma‑3‑270M‑IT is a compact, 270‑million‑parameter language model fine‑tuned for Italian, offering fast and efficient on‑device text generation and comprehension in the Italian language.",
43
+ },
44
+ "SmolLM-135M-Taiwan-Instruct-v1.0": {
45
+ "repo_id": "benchang1110/SmolLM-135M-Taiwan-Instruct-v1.0",
46
+ "description": "135-million-parameter F32 safetensors instruction-finetuned variant of SmolLM-135M-Taiwan, trained on the 416 k-example ChatTaiwan dataset for Traditional Chinese conversational and instruction-following tasks"
47
+ },
48
+ "Llama-3.2-Taiwan-1B": {
49
+ "repo_id": "lianghsun/Llama-3.2-Taiwan-1B",
50
+ "description": "Llama-3.2-Taiwan base model with 1 B parameters"
51
+ },
52
+ "Qwen2.5-0.5B-Taiwan-Instruct": {
53
+ "repo_id": "ShengweiPeng/Qwen2.5-0.5B-Taiwan-Instruct",
54
+ "description": "Qwen2.5-Taiwan model with 0.5 B parameters, instruction-tuned"
55
+ },
56
+ "Qwen3-0.6B-Taiwan": {
57
+ "repo_id": "ShengweiPeng/Qwen3-0.6B-Taiwan",
58
+ "description": "Qwen3-Taiwan model with 0.6 B parameters"
59
+ },
60
+
61
+ "Qwen2.5-Taiwan-3B-Reason-GRPO": {
62
+ "repo_id": "benchang1110/Qwen2.5-Taiwan-3B-Reason-GRPO",
63
+ "description":"Qwen2.5-Taiwan model with 3 B parameters, Reason-GRPO fine-tuned"
64
+ },
65
+ "Llama-3.2-Taiwan-1B": {
66
+ "repo_id": "lianghsun/Llama-3.2-Taiwan-1B",
67
+ "description":"Llama-3.2-Taiwan base model with 1 B parameters"
68
+ },
69
+
70
+
71
+
72
+ # Gemma 3n “effective” variants (official Google repos)
73
+ "Gemma-3n-E2B": {
74
+ "repo_id": "google/gemma-3n-E2B",
75
+ "description":"Gemma 3n base model with effective 2 B parameters (≈2 GB VRAM)"
76
+ },
77
+ "Gemma-3n-E4B": {
78
+ "repo_id": "google/gemma-3n-E4B",
79
+ "description":"Gemma 3n base model with effective 4 B parameters (≈3 GB VRAM)"
80
+ },
81
+
82
+ # PowerInfer SmallThinker (instruction‑tuned)
83
+ "SmallThinker-4BA0.6B-Instruct": {
84
+ "repo_id": "PowerInfer/SmallThinker-4BA0.6B-Instruct",
85
+ "description":"SmallThinker 4 B backbone with 0.6 B activated parameters, instruction‑tuned"
86
+ },
87
+ # TIIUAE Falcon‑H1 (instruction‑tuned)
88
+ "Falcon-H1-1.5B-Instruct": {
89
+ "repo_id": "tiiuae/Falcon-H1-1.5B-Instruct",
90
+ "description":"Falcon‑H1 model with 1.5 B parameters, instruction‑tuned"
91
+ },
92
+ "Qwen/Qwen3-14B-FP8": {"repo_id": "Qwen/Qwen3-14B-FP8", "description": "Qwen/Qwen3-14B-FP8"},
93
+ #"Qwen/Qwen3-32B-FP8": {"repo_id": "Qwen/Qwen3-32B-FP8", "description": "Qwen/Qwen3-32B-FP8"},
94
+ "DeepSeek-R1-0528-Qwen3-8B": {"repo_id": "deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", "description": "DeepSeek-R1-0528-Qwen3-8B"},
95
+ "Nemotron-Research-Reasoning-Qwen-1.5B": {"repo_id": "nvidia/Nemotron-Research-Reasoning-Qwen-1.5B", "description": "Nemotron-Research-Reasoning-Qwen-1.5B"},
96
+ "Taiwan-ELM-1_1B-Instruct": {"repo_id": "liswei/Taiwan-ELM-1_1B-Instruct", "description": "Taiwan-ELM-1_1B-Instruct"},
97
+ "Taiwan-ELM-270M-Instruct": {"repo_id": "liswei/Taiwan-ELM-270M-Instruct", "description": "Taiwan-ELM-270M-Instruct"},
98
+ # "Granite-4.0-Tiny-Preview": {"repo_id": "ibm-granite/granite-4.0-tiny-preview", "description": "Granite-4.0-Tiny-Preview"},
99
+ "Qwen3-0.6B": {"repo_id":"Qwen/Qwen3-0.6B","description":"Dense causal language model with 0.6 B total parameters (0.44 B non-embedding), 28 transformer layers, 16 query heads & 8 KV heads, native 32 768-token context window, dual-mode generation, full multilingual & agentic capabilities."},
100
+ "Qwen3-1.7B": {"repo_id":"Qwen/Qwen3-1.7B","description":"Dense causal language model with 1.7 B total parameters (1.4 B non-embedding), 28 layers, 16 query heads & 8 KV heads, 32 768-token context, stronger reasoning vs. 0.6 B variant, dual-mode inference, instruction following across 100+ languages."},
101
+ "Qwen3-4B": {"repo_id":"Qwen/Qwen3-4B","description":"Dense causal language model with 4.0 B total parameters (3.6 B non-embedding), 36 layers, 32 query heads & 8 KV heads, native 32 768-token context (extendable to 131 072 via YaRN), balanced mid-range capacity & long-context reasoning."},
102
+ "Qwen3-8B": {"repo_id":"Qwen/Qwen3-8B","description":"Dense causal language model with 8.2 B total parameters (6.95 B non-embedding), 36 layers, 32 query heads & 8 KV heads, 32 768-token context (131 072 via YaRN), excels at multilingual instruction following & zero-shot tasks."},
103
+ "Qwen3-14B": {"repo_id":"Qwen/Qwen3-14B","description":"Dense causal language model with 14.8 B total parameters (13.2 B non-embedding), 40 layers, 40 query heads & 8 KV heads, 32 768-token context (131 072 via YaRN), enhanced human preference alignment & advanced agent integration."},
104
+ # "Qwen3-32B": {"repo_id":"Qwen/Qwen3-32B","description":"Dense causal language model with 32.8 B total parameters (31.2 B non-embedding), 64 layers, 64 query heads & 8 KV heads, 32 768-token context (131 072 via YaRN), flagship variant delivering state-of-the-art reasoning & instruction following."},
105
+ # "Qwen3-30B-A3B": {"repo_id":"Qwen/Qwen3-30B-A3B","description":"Mixture-of-Experts model with 30.5 B total parameters (29.9 B non-embedding, 3.3 B activated per token), 48 layers, 128 experts (8 activated per token), 32 query heads & 4 KV heads, 32 768-token context (131 072 via YaRN), MoE routing for scalable specialized reasoning."},
106
+ # "Qwen3-235B-A22B":{"repo_id":"Qwen/Qwen3-235B-A22B","description":"Mixture-of-Experts model with 235 B total parameters (234 B non-embedding, 22 B activated per token), 94 layers, 128 experts (8 activated per token), 64 query heads & 4 KV heads, 32 768-token context (131 072 via YaRN), ultra-scale reasoning & agentic workflows."},
107
+ "Gemma-3-4B-IT": {"repo_id": "unsloth/gemma-3-4b-it", "description": "Gemma-3-4B-IT"},
108
+ "SmolLM2_135M_Grpo_Gsm8k":{"repo_id":"prithivMLmods/SmolLM2_135M_Grpo_Gsm8k", "desscription":"SmolLM2_135M_Grpo_Gsm8k"},
109
+ "SmolLM2-135M-Instruct-TaiwanChat": {"repo_id": "Luigi/SmolLM2-135M-Instruct-TaiwanChat", "description": "SmolLM2‑135M Instruct fine-tuned on TaiwanChat"},
110
+ "SmolLM2-135M-Instruct": {"repo_id": "HuggingFaceTB/SmolLM2-135M-Instruct", "description": "Original SmolLM2‑135M Instruct"},
111
+ "SmolLM2-360M-Instruct-TaiwanChat": {"repo_id": "Luigi/SmolLM2-360M-Instruct-TaiwanChat", "description": "SmolLM2‑360M Instruct fine-tuned on TaiwanChat"},
112
+ "SmolLM2-360M-Instruct": {"repo_id": "HuggingFaceTB/SmolLM2-360M-Instruct", "description": "Original SmolLM2‑360M Instruct"},
113
+ "Llama-3.2-Taiwan-3B-Instruct": {"repo_id": "lianghsun/Llama-3.2-Taiwan-3B-Instruct", "description": "Llama-3.2-Taiwan-3B-Instruct"},
114
+ "MiniCPM3-4B": {"repo_id": "openbmb/MiniCPM3-4B", "description": "MiniCPM3-4B"},
115
+ "Qwen2.5-3B-Instruct": {"repo_id": "Qwen/Qwen2.5-3B-Instruct", "description": "Qwen2.5-3B-Instruct"},
116
+ "Qwen2.5-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-7B-Instruct", "description": "Qwen2.5-7B-Instruct"},
117
+ "Phi-4-mini-Reasoning": {"repo_id": "microsoft/Phi-4-mini-reasoning", "description": "Phi-4-mini-Reasoning"},
118
+ # "Phi-4-Reasoning": {"repo_id": "microsoft/Phi-4-reasoning", "description": "Phi-4-Reasoning"},
119
+ "Phi-4-mini-Instruct": {"repo_id": "microsoft/Phi-4-mini-instruct", "description": "Phi-4-mini-Instruct"},
120
+ "Meta-Llama-3.1-8B-Instruct": {"repo_id": "MaziyarPanahi/Meta-Llama-3.1-8B-Instruct", "description": "Meta-Llama-3.1-8B-Instruct"},
121
+ "DeepSeek-R1-Distill-Llama-8B": {"repo_id": "unsloth/DeepSeek-R1-Distill-Llama-8B", "description": "DeepSeek-R1-Distill-Llama-8B"},
122
+ "Mistral-7B-Instruct-v0.3": {"repo_id": "MaziyarPanahi/Mistral-7B-Instruct-v0.3", "description": "Mistral-7B-Instruct-v0.3"},
123
+ "Qwen2.5-Coder-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-Coder-7B-Instruct", "description": "Qwen2.5-Coder-7B-Instruct"},
124
+ "Qwen2.5-Omni-3B": {"repo_id": "Qwen/Qwen2.5-Omni-3B", "description": "Qwen2.5-Omni-3B"},
125
+ "MiMo-7B-RL": {"repo_id": "XiaomiMiMo/MiMo-7B-RL", "description": "MiMo-7B-RL"},
126
+
127
+ }
128
+
129
+ # Global cache for pipelines to avoid re-loading.
130
+ PIPELINES = {}
131
+
132
+ def load_pipeline(model_name):
133
+ """
134
+ Load and cache a transformers pipeline for text generation.
135
+ Tries bfloat16, falls back to float16 or float32 if unsupported.
136
  """
137
+ global PIPELINES
138
+ if model_name in PIPELINES:
139
+ return PIPELINES[model_name]
140
+ repo = MODELS[model_name]["repo_id"]
141
+ tokenizer = AutoTokenizer.from_pretrained(repo,
142
+ token=access_token)
143
+ for dtype in (torch.bfloat16, torch.float16, torch.float32):
144
+ try:
145
+ pipe = pipeline(
146
+ task="text-generation",
147
+ model=repo,
148
+ tokenizer=tokenizer,
149
+ trust_remote_code=True,
150
+ torch_dtype=dtype,
151
+ device_map="auto",
152
+ use_cache=False, # ← disable past-key-value caching
153
+ token=access_token)
154
+ PIPELINES[model_name] = pipe
155
+ return pipe
156
+ except Exception:
157
+ continue
158
+ # Final fallback
159
+ pipe = pipeline(
160
+ task="text-generation",
161
+ model=repo,
162
+ tokenizer=tokenizer,
163
+ trust_remote_code=True,
164
+ device_map="auto"
165
+ )
166
+ PIPELINES[model_name] = pipe
167
+ return pipe
168
+
169
+
170
+ def retrieve_context(query, max_results=6, max_chars=600):
171
+ """
172
+ Retrieve search snippets from DuckDuckGo (runs in background).
173
+ Returns a list of result strings.
174
+ """
175
+ try:
176
+ with DDGS() as ddgs:
177
+ return [f"{i+1}. {r.get('title','No Title')} - {r.get('body','')[:max_chars]}"
178
+ for i, r in enumerate(islice(ddgs.text(query, region="wt-wt", safesearch="off", timelimit="y"), max_results))]
179
+ except Exception:
180
+ return []
181
+
182
+ def format_conversation(history, system_prompt, tokenizer):
183
+ if hasattr(tokenizer, "chat_template") and tokenizer.chat_template:
184
+ messages = [{"role": "system", "content": system_prompt.strip()}] + history
185
+ return tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, enable_thinking=True)
186
+ else:
187
+ # Fallback for base LMs without chat template
188
+ prompt = system_prompt.strip() + "\n"
189
+ for msg in history:
190
+ if msg['role'] == 'user':
191
+ prompt += "User: " + msg['content'].strip() + "\n"
192
+ elif msg['role'] == 'assistant':
193
+ prompt += "Assistant: " + msg['content'].strip() + "\n"
194
+ if not prompt.strip().endswith("Assistant:"):
195
+ prompt += "Assistant: "
196
+ return prompt
197
+
198
+ @spaces.GPU(duration=60)
199
+ def chat_response(user_msg, chat_history, system_prompt,
200
+ enable_search, max_results, max_chars,
201
+ model_name, max_tokens, temperature,
202
+ top_k, top_p, repeat_penalty, search_timeout):
203
  """
204
+ Generates streaming chat responses, optionally with background web search.
205
+ """
206
+ cancel_event.clear()
207
+ history = list(chat_history or [])
208
+ history.append({'role': 'user', 'content': user_msg})
209
+
210
+ # Launch web search if enabled
211
+ debug = ''
212
+ search_results = []
213
+ if enable_search:
214
+ debug = 'Search task started.'
215
+ thread_search = threading.Thread(
216
+ target=lambda: search_results.extend(
217
+ retrieve_context(user_msg, int(max_results), int(max_chars))
218
+ )
219
+ )
220
+ thread_search.daemon = True
221
+ thread_search.start()
222
+ else:
223
+ debug = 'Web search disabled.'
224
+
225
+ try:
226
+ cur_date = datetime.now().strftime('%Y-%m-%d')
227
+ # merge any fetched search results into the system prompt
228
+ if search_results:
229
+
230
+ enriched = system_prompt.strip() + \
231
+ f'''\n# The following contents are the search results related to the user's message:
232
+ {search_results}
233
+ In the search results I provide to you, each result is formatted as [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the relevant sentence when appropriate. Use the citation format [citation:X] in the corresponding part of your answer. If a sentence is derived from multiple contexts, list all relevant citation numbers, such as [citation:3][citation:5]. Be sure not to cluster all citations at the end; instead, include them in the corresponding parts of the answer.
234
+ When responding, please keep the following points in mind:
235
+ - Today is {cur_date}.
236
+ - Not all content in the search results is closely related to the user's question. You need to evaluate and filter the search results based on the question.
237
+ - For listing-type questions (e.g., listing all flight information), try to limit the answer to 10 key points and inform the user that they can refer to the search sources for complete information. Prioritize providing the most complete and relevant items in the list. Avoid mentioning content not provided in the search results unless necessary.
238
+ - For creative tasks (e.g., writing an essay), ensure that references are cited within the body of the text, such as [citation:3][citation:5], rather than only at the end of the text. You need to interpret and summarize the user's requirements, choose an appropriate format, fully utilize the search results, extract key information, and generate an answer that is insightful, creative, and professional. Extend the length of your response as much as possible, addressing each point in detail and from multiple perspectives, ensuring the content is rich and thorough.
239
+ - If the response is lengthy, structure it well and summarize it in paragraphs. If a point-by-point format is needed, try to limit it to 5 points and merge related content.
240
+ - For objective Q&A, if the answer is very brief, you may add one or two related sentences to enrich the content.
241
+ - Choose an appropriate and visually appealing format for your response based on the user's requirements and the content of the answer, ensuring strong readability.
242
+ - Your answer should synthesize information from multiple relevant webpages and avoid repeatedly citing the same webpage.
243
+ - Unless the user requests otherwise, your response should be in the same language as the user's question.
244
+ # The user's message is:
245
+ '''
246
+ else:
247
+ enriched = system_prompt
248
+
249
+ # wait up to 1s for snippets, then replace debug with them
250
+ if enable_search:
251
+ thread_search.join(timeout=float(search_timeout))
252
+ if search_results:
253
+ debug = "### Search results merged into prompt\n\n" + "\n".join(
254
+ f"- {r}" for r in search_results
255
+ )
256
+ else:
257
+ debug = "*No web search results found.*"
258
+
259
+ # merge fetched snippets into the system prompt
260
+ if search_results:
261
+ enriched = system_prompt.strip() + \
262
+ f'''\n# The following contents are the search results related to the user's message:
263
+ {search_results}
264
+ In the search results I provide to you, each result is formatted as [webpage X begin]...[webpage X end], where X represents the numerical index of each article. Please cite the context at the end of the relevant sentence when appropriate. Use the citation format [citation:X] in the corresponding part of your answer. If a sentence is derived from multiple contexts, list all relevant citation numbers, such as [citation:3][citation:5]. Be sure not to cluster all citations at the end; instead, include them in the corresponding parts of the answer.
265
+ When responding, please keep the following points in mind:
266
+ - Today is {cur_date}.
267
+ - Not all content in the search results is closely related to the user's question. You need to evaluate and filter the search results based on the question.
268
+ - For listing-type questions (e.g., listing all flight information), try to limit the answer to 10 key points and inform the user that they can refer to the search sources for complete information. Prioritize providing the most complete and relevant items in the list. Avoid mentioning content not provided in the search results unless necessary.
269
+ - For creative tasks (e.g., writing an essay), ensure that references are cited within the body of the text, such as [citation:3][citation:5], rather than only at the end of the text. You need to interpret and summarize the user's requirements, choose an appropriate format, fully utilize the search results, extract key information, and generate an answer that is insightful, creative, and professional. Extend the length of your response as much as possible, addressing each point in detail and from multiple perspectives, ensuring the content is rich and thorough.
270
+ - If the response is lengthy, structure it well and summarize it in paragraphs. If a point-by-point format is needed, try to limit it to 5 points and merge related content.
271
+ - For objective Q&A, if the answer is very brief, you may add one or two related sentences to enrich the content.
272
+ - Choose an appropriate and visually appealing format for your response based on the user's requirements and the content of the answer, ensuring strong readability.
273
+ - Your answer should synthesize information from multiple relevant webpages and avoid repeatedly citing the same webpage.
274
+ - Unless the user requests otherwise, your response should be in the same language as the user's question.
275
+ # The user's message is:
276
+ '''
277
+ else:
278
+ enriched = system_prompt
279
+
280
+ pipe = load_pipeline(model_name)
281
+ prompt = format_conversation(history, enriched, pipe.tokenizer)
282
+ prompt_debug = f"\n\n--- Prompt Preview ---\n```\n{prompt}\n```"
283
+ streamer = TextIteratorStreamer(pipe.tokenizer,
284
+ skip_prompt=True,
285
+ skip_special_tokens=True)
286
+ gen_thread = threading.Thread(
287
+ target=pipe,
288
+ args=(prompt,),
289
+ kwargs={
290
+ 'max_new_tokens': max_tokens,
291
+ 'temperature': temperature,
292
+ 'top_k': top_k,
293
+ 'top_p': top_p,
294
+ 'repetition_penalty': repeat_penalty,
295
+ 'streamer': streamer,
296
+ 'return_full_text': False,
297
+ }
298
+ )
299
+ gen_thread.start()
300
+
301
+ # Buffers for thought vs answer
302
+ thought_buf = ''
303
+ answer_buf = ''
304
+ in_thought = False
305
+
306
+ # Stream tokens
307
+ for chunk in streamer:
308
+ if cancel_event.is_set():
309
+ break
310
+ text = chunk
311
+
312
+ # Detect start of thinking
313
+ if not in_thought and '<think>' in text:
314
+ in_thought = True
315
+ # Insert thought placeholder
316
+ history.append({
317
+ 'role': 'assistant',
318
+ 'content': '',
319
+ 'metadata': {'title': '💭 Thought'}
320
+ })
321
+ # Capture after opening tag
322
+ after = text.split('<think>', 1)[1]
323
+ thought_buf += after
324
+ # If closing tag in same chunk
325
+ if '</think>' in thought_buf:
326
+ before, after2 = thought_buf.split('</think>', 1)
327
+ history[-1]['content'] = before.strip()
328
+ in_thought = False
329
+ # Start answer buffer
330
+ answer_buf = after2
331
+ history.append({'role': 'assistant', 'content': answer_buf})
332
+ else:
333
+ history[-1]['content'] = thought_buf
334
+ yield history, debug
335
+ continue
336
+
337
+ # Continue thought streaming
338
+ if in_thought:
339
+ thought_buf += text
340
+ if '</think>' in thought_buf:
341
+ before, after2 = thought_buf.split('</think>', 1)
342
+ history[-1]['content'] = before.strip()
343
+ in_thought = False
344
+ # Start answer buffer
345
+ answer_buf = after2
346
+ history.append({'role': 'assistant', 'content': answer_buf})
347
+ else:
348
+ history[-1]['content'] = thought_buf
349
+ yield history, debug
350
+ continue
351
+
352
+ # Stream answer
353
+ if not answer_buf:
354
+ history.append({'role': 'assistant', 'content': ''})
355
+ answer_buf += text
356
+ history[-1]['content'] = answer_buf
357
+ yield history, debug
358
+
359
+ gen_thread.join()
360
+ yield history, debug + prompt_debug
361
+ except Exception as e:
362
+ history.append({'role': 'assistant', 'content': f"Error: {e}"})
363
+ yield history, debug
364
+ finally:
365
+ gc.collect()
366
+
367
+
368
+ def cancel_generation():
369
+ cancel_event.set()
370
+ return 'Generation cancelled.'
371
+
372
+
373
+ def update_default_prompt(enable_search):
374
+ return f"You are a helpful assistant."
375
+
376
+ # ------------------------------
377
+ # Gradio UI
378
+ # ------------------------------
379
+ with gr.Blocks(title="LLM Inference") as demo:
380
+ gr.Markdown("## 🧠 LLM Inference with Web Search")
381
+ gr.Markdown("Interact with the model. Select parameters and chat below.")
382
+ with gr.Row():
383
+ with gr.Column(scale=3):
384
+ model_dd = gr.Dropdown(label="Select Model", choices=list(MODELS.keys()), value=list(MODELS.keys())[0])
385
+ search_chk = gr.Checkbox(label="Enable Web Search", value=True)
386
+ sys_prompt = gr.Textbox(label="System Prompt", lines=3, value=update_default_prompt(search_chk.value))
387
+ gr.Markdown("### Generation Parameters")
388
+ max_tok = gr.Slider(64, 16384, value=2048, step=32, label="Max Tokens")
389
+ temp = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
390
+ k = gr.Slider(1, 100, value=40, step=1, label="Top-K")
391
+ p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-P")
392
+ rp = gr.Slider(1.0, 2.0, value=1.2, step=0.1, label="Repetition Penalty")
393
+ gr.Markdown("### Web Search Settings")
394
+ mr = gr.Number(value=6, precision=0, label="Max Results")
395
+ mc = gr.Number(value=600, precision=0, label="Max Chars/Result")
396
+ st = gr.Slider(minimum=0.0, maximum=30.0, step=0.5, value=5.0, label="Search Timeout (s)")
397
+ clr = gr.Button("Clear Chat")
398
+ cnl = gr.Button("Cancel Generation")
399
+ with gr.Column(scale=7):
400
+ chat = gr.Chatbot(type="messages")
401
+ txt = gr.Textbox(placeholder="Type your message and press Enter...")
402
+ dbg = gr.Markdown()
403
+
404
+ search_chk.change(fn=update_default_prompt, inputs=search_chk, outputs=sys_prompt)
405
+ clr.click(fn=lambda: ([], "", ""), outputs=[chat, txt, dbg])
406
+ cnl.click(fn=cancel_generation, outputs=dbg)
407
+ txt.submit(fn=chat_response,
408
+ inputs=[txt, chat, sys_prompt, search_chk, mr, mc,
409
+ model_dd, max_tok, temp, k, p, rp, st],
410
+ outputs=[chat, dbg])
411
  demo.launch()