Update utils.py
Browse files
utils.py
CHANGED
|
@@ -68,10 +68,10 @@ def generate_prompt_with_history(text, history, tokenizer, max_length=2048):
|
|
| 68 |
|
| 69 |
#tokenizer = AutoTokenizer.from_pretrained("project-baize/baize-v2-7b")
|
| 70 |
#model = AutoModelForCausalLM.from_pretrained("project-baize/baize-v2-7b")
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
| 74 |
-
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
| 75 |
|
| 76 |
|
| 77 |
def load_tokenizer_and_model(base_model,load_8bit=False):
|
|
|
|
| 68 |
|
| 69 |
#tokenizer = AutoTokenizer.from_pretrained("project-baize/baize-v2-7b")
|
| 70 |
#model = AutoModelForCausalLM.from_pretrained("project-baize/baize-v2-7b")
|
| 71 |
+
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 72 |
+
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B")
|
| 73 |
+
#tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
| 74 |
+
#model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
|
| 75 |
|
| 76 |
|
| 77 |
def load_tokenizer_and_model(base_model,load_8bit=False):
|