try llama 3
Browse files
app.py
CHANGED
@@ -12,8 +12,8 @@ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
|
12 |
|
13 |
|
14 |
|
15 |
-
tokenizer = AutoTokenizer.from_pretrained("mosaicml/mpt-7b-storywriter", trust_remote_code=True)
|
16 |
-
model = AutoModelForCausalLM.from_pretrained("mosaicml/mpt-7b-storywriter", trust_remote_code=True)
|
17 |
|
18 |
|
19 |
def respond(
|
|
|
12 |
|
13 |
|
14 |
|
15 |
+
# tokenizer = AutoTokenizer.from_pretrained("mosaicml/mpt-7b-storywriter", trust_remote_code=True)
|
16 |
+
# model = AutoModelForCausalLM.from_pretrained("mosaicml/mpt-7b-storywriter", trust_remote_code=True)
|
17 |
|
18 |
|
19 |
def respond(
|