bobber commited on
Commit
dbefc37
·
verified ·
1 Parent(s): 511753b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -2
app.py CHANGED
@@ -3,7 +3,7 @@ import spaces
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
- model_name = "Qwen/Qwen2.5-14B-Instruct-1M"
7
 
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
@@ -11,11 +11,20 @@ model = AutoModelForCausalLM.from_pretrained(
11
  device_map="auto"
12
  )
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
14
 
15
  @spaces.GPU
16
  def generate(prompt, history):
17
  messages = [
18
- {"role": "system", "content": "Je bent een vriendelijke, behulpzame assistent."},
19
  {"role": "user", "content": prompt}
20
  ]
21
  text = tokenizer.apply_chat_template(
 
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import torch
5
 
6
+ model_name = "Qwen/Qwen2-0.5B"
7
 
8
  model = AutoModelForCausalLM.from_pretrained(
9
  model_name,
 
11
  device_map="auto"
12
  )
13
  tokenizer = AutoTokenizer.from_pretrained(model_name)
14
+ SYSTEM_PROMPT = """
15
+ Respond in the following format:
16
+ <reasoning>
17
+ ...
18
+ </reasoning>
19
+ <answer>
20
+ ...
21
+ </answer>
22
+ """
23
 
24
  @spaces.GPU
25
  def generate(prompt, history):
26
  messages = [
27
+ {"role": "system", "content": SYSTEM_PROMPT},
28
  {"role": "user", "content": prompt}
29
  ]
30
  text = tokenizer.apply_chat_template(