from transformers import pipeline | |
safe_pipe = pipeline( | |
"text-generation", | |
model="meta-llama/Llama-2-7b-chat-hf", | |
torch_dtype="auto", | |
device_map="auto" | |
) | |
naive_pipe = pipeline( | |
"text-generation", | |
model="microsoft/DialoGPT-medium", | |
torch_dtype="auto", | |
device_map="auto" | |
) | |
safe_out = safe_pipe(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"] | |
naive_out = naive_pipe(prompt, max_new_tokens=100, do_sample=False)[0]["generated_text"] | |
print("=== 安全对齐模型回答 ===") | |
print(safe_out) | |
print("\n=== 无对齐模型回答 ===") | |
print(naive_out) |