|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("./lockin_model") |
|
model = AutoModelForCausalLM.from_pretrained("./lockin_model") |
|
|
|
|
|
def generate_question(input_text): |
|
|
|
inputs = tokenizer( |
|
input_text, |
|
return_tensors="pt", |
|
padding=True, |
|
truncation=True, |
|
return_attention_mask=True |
|
) |
|
|
|
output = model.generate( |
|
inputs["input_ids"], |
|
attention_mask=inputs["attention_mask"], |
|
max_new_tokens=100, |
|
do_sample=True, |
|
temperature=1.5, |
|
top_p=0.8, |
|
top_k=50, |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
|
|
prompt = "What the fuck" |
|
question = generate_question(prompt) |
|
print("Generated Question:", question) |
|
|