|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
import torch
|
|
|
|
|
|
model_path = "./tinyllama_model"
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
|
model = AutoModelForCausalLM.from_pretrained(model_path)
|
|
|
|
|
|
device = torch.device("cpu")
|
|
model.to(device)
|
|
|
|
|
|
prompt = "Hello, how can I assist you today?"
|
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
|
|
|
outputs = model.generate(**inputs, max_length=100, num_return_sequences=1)
|
|
|
|
|
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
print(generated_text) |