Update README.md
Browse files
README.md
CHANGED
@@ -34,16 +34,31 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
34 |
tokenizer = AutoTokenizer.from_pretrained("bysq/autism-assistant-qwen2")
|
35 |
model = AutoModelForCausalLM.from_pretrained("bysq/autism-assistant-qwen2")
|
36 |
|
|
|
37 |
# 使用示例
|
38 |
-
prompt =
|
39 |
-
原始表达:"
|
40 |
-
自闭症患者的表达:"
|
41 |
|
42 |
请分析并回答:
|
43 |
-
-
|
44 |
|
|
|
45 |
inputs = tokenizer(prompt, return_tensors="pt")
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
48 |
print(response)
|
49 |
```
|
|
|
34 |
tokenizer = AutoTokenizer.from_pretrained("bysq/autism-assistant-qwen2")
|
35 |
model = AutoModelForCausalLM.from_pretrained("bysq/autism-assistant-qwen2")
|
36 |
|
37 |
+
|
38 |
# 使用示例
|
39 |
+
prompt = """你是一个专门帮助理解自闭症患者表达的AI助手。
|
40 |
+
原始表达:"可以把东西给我?"
|
41 |
+
自闭症患者的表达:"不你"
|
42 |
|
43 |
请分析并回答:
|
44 |
+
- 情感分析:"""
|
45 |
|
46 |
+
# 编码输入并移到正确设备
|
47 |
inputs = tokenizer(prompt, return_tensors="pt")
|
48 |
+
|
49 |
+
# 检查模型在哪个设备上,并将输入移到同一设备
|
50 |
+
device = next(model.parameters()).device
|
51 |
+
inputs = {k: v.to(device) for k, v in inputs.items()}
|
52 |
+
|
53 |
+
# 生成回答
|
54 |
+
outputs = model.generate(
|
55 |
+
**inputs,
|
56 |
+
max_new_tokens=200,
|
57 |
+
do_sample=True, # 需要启用采样才能使用temperature
|
58 |
+
temperature=0.7,
|
59 |
+
pad_token_id=tokenizer.pad_token_id
|
60 |
+
)
|
61 |
+
|
62 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
63 |
print(response)
|
64 |
```
|