Spaces:
Running
Running
#调用大模型 | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from peft import PeftModel, get_peft_config | |
import json | |
import torch | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# 加载预训练模型 | |
model_name = "Qwen/Qwen2-0.5B" | |
base_model = AutoModelForCausalLM.from_pretrained(model_name) | |
# 加载适配器 | |
adapter_path1 = "test2023h5/wyw2xdw" | |
adapter_path2 = "test2023h5/xdw2wyw" | |
# 加载适配器 | |
base_model.load_adapter(adapter_path1, adapter_name='adapter1') | |
base_model.load_adapter(adapter_path2, adapter_name='adapter2') | |
base_model.set_adapter("adapter1") | |
#base_model.set_adapter("adapter2") | |
model = base_model.to(device) | |
# 加载 tokenizer | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
print("model loading done") | |
def format_instruction(task, text): | |
string = f"""### 指令: | |
{task} | |
### 输入: | |
{text} | |
### 输出: | |
""" | |
return string | |
def generate_response(task, text): | |
input_text = format_instruction(task, text) | |
encoding = tokenizer(input_text, return_tensors="pt").to(device) | |
with torch.no_grad(): # 禁用梯度计算 | |
outputs = model.generate(**encoding, max_new_tokens=50) | |
generated_ids = outputs[:, encoding.input_ids.shape[1]:] | |
generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=False) | |
return generated_texts[0].split('\n')[0] | |
def predict(text, method): | |
if method == 0: | |
prompt = ["翻译成现代文", text] | |
base_model.set_adapter("adapter1") | |
else: | |
prompt = ["翻译成古文", text] | |
base_model.set_adapter("adapter2") | |
print("debug", text) | |
response = generate_response(prompt[0], prompt[1]) | |
print("debug2", response) | |
return response |