Update src/pdfchatbot.py
Browse files- src/pdfchatbot.py +3 -3
src/pdfchatbot.py
CHANGED
@@ -60,13 +60,13 @@ class PDFChatBot:
|
|
60 |
print("Vector store created")
|
61 |
@spaces.GPU
|
62 |
def load_tokenizer(self):
|
63 |
-
self.tokenizer = AutoTokenizer.from_pretrained("
|
64 |
|
65 |
@spaces.GPU
|
66 |
def create_organic_pipeline(self):
|
67 |
self.pipe = pipeline(
|
68 |
"text-generation",
|
69 |
-
model="
|
70 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
71 |
device="cuda",
|
72 |
)
|
@@ -84,7 +84,7 @@ class PDFChatBot:
|
|
84 |
"""
|
85 |
pipe = pipeline(
|
86 |
"text-generation",
|
87 |
-
model="
|
88 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
89 |
device="cuda",
|
90 |
)
|
|
|
60 |
print("Vector store created")
|
61 |
@spaces.GPU
|
62 |
def load_tokenizer(self):
|
63 |
+
self.tokenizer = AutoTokenizer.from_pretrained("gradientai/Llama-3-8B-Instruct-Gradient-1048k")
|
64 |
|
65 |
@spaces.GPU
|
66 |
def create_organic_pipeline(self):
|
67 |
self.pipe = pipeline(
|
68 |
"text-generation",
|
69 |
+
model="gradientai/Llama-3-8B-Instruct-Gradient-1048k",
|
70 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
71 |
device="cuda",
|
72 |
)
|
|
|
84 |
"""
|
85 |
pipe = pipeline(
|
86 |
"text-generation",
|
87 |
+
model="gradientai/Llama-3-8B-Instruct-Gradient-1048k",
|
88 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
89 |
device="cuda",
|
90 |
)
|