Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,8 +3,7 @@ from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplat
|
|
| 3 |
from langchain.schema import SystemMessage
|
| 4 |
import streamlit as st
|
| 5 |
import torch
|
| 6 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 7 |
-
from transformers.models.llama import LlamaTokenizer
|
| 8 |
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
| 9 |
import nltk
|
| 10 |
import json
|
|
@@ -47,8 +46,8 @@ if dataset_file:
|
|
| 47 |
df = pd.read_csv(dataset_file)
|
| 48 |
|
| 49 |
# Initialize tokenizer and model
|
| 50 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) or LlamaTokenizer.from_pretrained(model_name)
|
| 51 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 52 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=True)
|
| 53 |
llm = HuggingFacePipeline(pipeline=pipe)
|
| 54 |
|
|
|
|
| 3 |
from langchain.schema import SystemMessage
|
| 4 |
import streamlit as st
|
| 5 |
import torch
|
| 6 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, LlamaForCausalLM, LlamaTokenizer
|
|
|
|
| 7 |
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
|
| 8 |
import nltk
|
| 9 |
import json
|
|
|
|
| 46 |
df = pd.read_csv(dataset_file)
|
| 47 |
|
| 48 |
# Initialize tokenizer and model
|
| 49 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN, use_fast=True) or LlamaTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN, use_fast=True)
|
| 50 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=HF_TOKEN) or LlamaForCausalLM.from_pretrained(model_name, use_auth_token=HF_TOKEN)
|
| 51 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=True)
|
| 52 |
llm = HuggingFacePipeline(pipeline=pipe)
|
| 53 |
|