new code
Browse files- src/pdfchatbot.py +2 -4
src/pdfchatbot.py
CHANGED
@@ -56,7 +56,7 @@ class PDFChatBot:
|
|
56 |
print("Vector store created")
|
57 |
@spaces.GPU
|
58 |
def load_tokenizer(self):
|
59 |
-
self.tokenizer =
|
60 |
|
61 |
@spaces.GPU
|
62 |
def create_organic_pipeline(self):
|
@@ -66,7 +66,6 @@ class PDFChatBot:
|
|
66 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
67 |
device="cuda",
|
68 |
)
|
69 |
-
self.load_tokenizer()
|
70 |
print("Model pipeline loaded")
|
71 |
|
72 |
def get_organic_context(self, query):
|
@@ -78,7 +77,6 @@ class PDFChatBot:
|
|
78 |
@spaces.GPU
|
79 |
def create_organic_response(self, history, query):
|
80 |
self.get_organic_context(query)
|
81 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
82 |
"""
|
83 |
pipe = pipeline(
|
84 |
"text-generation",
|
@@ -92,7 +90,7 @@ class PDFChatBot:
|
|
92 |
{"role": "user", "content": query},
|
93 |
]
|
94 |
|
95 |
-
prompt = tokenizer.apply_chat_template(
|
96 |
messages,
|
97 |
tokenize=False,
|
98 |
add_generation_prompt=True
|
|
|
56 |
print("Vector store created")
|
57 |
@spaces.GPU
|
58 |
def load_tokenizer(self):
|
59 |
+
self.tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
60 |
|
61 |
@spaces.GPU
|
62 |
def create_organic_pipeline(self):
|
|
|
66 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
67 |
device="cuda",
|
68 |
)
|
|
|
69 |
print("Model pipeline loaded")
|
70 |
|
71 |
def get_organic_context(self, query):
|
|
|
77 |
@spaces.GPU
|
78 |
def create_organic_response(self, history, query):
|
79 |
self.get_organic_context(query)
|
|
|
80 |
"""
|
81 |
pipe = pipeline(
|
82 |
"text-generation",
|
|
|
90 |
{"role": "user", "content": query},
|
91 |
]
|
92 |
|
93 |
+
prompt = self.pipe.tokenizer.apply_chat_template(
|
94 |
messages,
|
95 |
tokenize=False,
|
96 |
add_generation_prompt=True
|