Update README.md
Browse files
README.md
CHANGED
@@ -43,8 +43,7 @@ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-14B-Instruct-1M")
|
|
43 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16)
|
44 |
|
45 |
event = 'The Magna Carta'
|
46 |
-
prompt = f"Q: Craft a compelling first-person historical narrative that captures the significance of {event} and the essence of its era.
|
47 |
-
Think step by step about the key events, historical accuracy, stylistic prose, emotions, and sensory details that define the period.\nA: "
|
48 |
|
49 |
inputs = tokenizer.encode(prompt, return_tensors = 'pt').to(model.device)
|
50 |
output = model.generate(inputs, max_new_tokens = 750, pad_token_id = tokenizer.pad_token_id, do_sample = True)
|
|
|
43 |
model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.bfloat16)
|
44 |
|
45 |
event = 'The Magna Carta'
|
46 |
+
prompt = f"Q: Craft a compelling first-person historical narrative that captures the significance of {event} and the essence of its era. Think step by step about the key events, historical accuracy, stylistic prose, emotions, and sensory details that define the period.\nA: "
|
|
|
47 |
|
48 |
inputs = tokenizer.encode(prompt, return_tensors = 'pt').to(model.device)
|
49 |
output = model.generate(inputs, max_new_tokens = 750, pad_token_id = tokenizer.pad_token_id, do_sample = True)
|