Update README.md
Browse files
README.md
CHANGED
|
@@ -25,8 +25,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 25 |
|
| 26 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
| 27 |
|
| 28 |
-
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/
|
| 29 |
-
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/
|
| 30 |
prompt = "Generate a story involving a dog, an astronaut and a baker"
|
| 31 |
prompt= tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False)
|
| 32 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
@@ -52,8 +52,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
| 52 |
|
| 53 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
| 54 |
|
| 55 |
-
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/
|
| 56 |
-
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/
|
| 57 |
prompt = "Dark matter is"
|
| 58 |
|
| 59 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
@@ -63,7 +63,7 @@ print(tokenizer.decode(output[0]))
|
|
| 63 |
|
| 64 |
# Limitations
|
| 65 |
|
| 66 |
-
|
| 67 |
|
| 68 |
# Training
|
| 69 |
|
|
|
|
| 25 |
|
| 26 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
| 27 |
|
| 28 |
+
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo-1b")
|
| 29 |
+
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/cosmo-1b").to(device)
|
| 30 |
prompt = "Generate a story involving a dog, an astronaut and a baker"
|
| 31 |
prompt= tokenizer.apply_chat_template([{"role": "user", "content": prompt}], tokenize=False)
|
| 32 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
|
| 52 |
|
| 53 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
| 54 |
|
| 55 |
+
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/cosmo-1b")
|
| 56 |
+
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/cosmo-1b").to(device)
|
| 57 |
prompt = "Dark matter is"
|
| 58 |
|
| 59 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
|
| 63 |
|
| 64 |
# Limitations
|
| 65 |
|
| 66 |
+
This is a small 1.8B model trained on synthetic data, so it might hallucinate, give incomplete or incorrect answers.
|
| 67 |
|
| 68 |
# Training
|
| 69 |
|