caedencode commited on
Commit
730d5e1
·
verified ·
1 Parent(s): efe5fc3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -7
README.md CHANGED
@@ -51,25 +51,21 @@ Here’s how you can load and use CaedenAI:
51
  import torch
52
  from transformers import AutoTokenizer, AutoModelForCausalLM
53
 
54
- # Load the model and tokenizer
55
  model = AutoModelForCausalLM.from_pretrained("caedencode/Caeden-o1")
56
  tokenizer = AutoTokenizer.from_pretrained("caedencode/Caeden-o1")
57
 
58
- # Use appropriate device
59
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
60
  model = model.to(device)
61
 
62
- # Generate an answer
63
  def generate_answer(question):
64
  prompt = f"Question: {question}\nReasoning:\n"
65
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
66
- outputs = model.generate(
67
- **inputs, max_length=200, num_beams=5, early_stopping=True
68
- )
69
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
70
 
71
- # Example usage
72
  question = "What is the largest planet in our solar system?"
73
  answer = generate_answer(question)
74
  print(answer)
 
75
  ```
 
51
  import torch
52
  from transformers import AutoTokenizer, AutoModelForCausalLM
53
 
 
54
  model = AutoModelForCausalLM.from_pretrained("caedencode/Caeden-o1")
55
  tokenizer = AutoTokenizer.from_pretrained("caedencode/Caeden-o1")
56
 
57
+ # Move the model to the appropriate device
58
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
59
  model = model.to(device)
60
 
 
61
  def generate_answer(question):
62
  prompt = f"Question: {question}\nReasoning:\n"
63
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
64
+ outputs = model.generate(**inputs, max_length=200, num_beams=5, early_stopping=True)
 
 
65
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
66
 
 
67
  question = "What is the largest planet in our solar system?"
68
  answer = generate_answer(question)
69
  print(answer)
70
+
71
  ```