Update README.md
Browse files
README.md
CHANGED
@@ -22,7 +22,37 @@ It performed very well than expected. It do first reasoning and than generate re
|
|
22 |
It do reasoning separately no special tokens or in response reasoning.
|
23 |
Below is inference code.
|
24 |
```python
|
|
|
25 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
```
|
27 |
|
28 |
- **Trained by:** [Nishith Jain](https://huggingface.co/KingNish)
|
|
|
22 |
It do reasoning separately no special tokens or in response reasoning.
|
23 |
Below is inference code.
|
24 |
```python
|
25 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
26 |
|
27 |
+
MAX_REASONING_TOKENS = 1024
|
28 |
+
MAX_RESPONSE_TOKENS = 512
|
29 |
+
|
30 |
+
model_name = "KingNish/Reasoning-0.5b"
|
31 |
+
|
32 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", device_map="auto")
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
34 |
+
|
35 |
+
prompt = "Which is greater 9.9 or 9.11 ??"
|
36 |
+
messages = [
|
37 |
+
{"role": "user", "content": prompt}
|
38 |
+
]
|
39 |
+
|
40 |
+
# Generate reasoning
|
41 |
+
reasoning_template = tokenizer.apply_chat_template(messages, tokenize=False, add_reasoning_prompt=True)
|
42 |
+
reasoning_inputs = tokenizer(reasoning_template, return_tensors="pt").to(model.device)
|
43 |
+
reasoning_ids = model.generate(**reasoning_inputs, max_new_tokens=MAX_REASONING_TOKENS)
|
44 |
+
reasoning_output = tokenizer.decode(reasoning_ids[0, reasoning_inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
45 |
+
|
46 |
+
# print("REASONING: " + reasoning_output)
|
47 |
+
|
48 |
+
# Generate answer
|
49 |
+
messages.append({"role": "reasoning", "content": reasoning_output})
|
50 |
+
response_template = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
51 |
+
response_inputs = tokenizer(response_template, return_tensors="pt").to(model.device)
|
52 |
+
response_ids = model.generate(**response_inputs, max_new_tokens=MAX_RESPONSE_TOKENS)
|
53 |
+
response_output = tokenizer.decode(response_ids[0, response_inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
54 |
+
|
55 |
+
print("ANSWER: " + response_output)
|
56 |
```
|
57 |
|
58 |
- **Trained by:** [Nishith Jain](https://huggingface.co/KingNish)
|