Yoxas commited on
Commit
e81afe2
·
verified ·
1 Parent(s): 1e3697b

Update src/pdfchatbot.py

Browse files
Files changed (1) hide show
  1. src/pdfchatbot.py +3 -3
src/pdfchatbot.py CHANGED
@@ -60,13 +60,13 @@ class PDFChatBot:
60
  print("Vector store created")
61
  @spaces.GPU
62
  def load_tokenizer(self):
63
- self.tokenizer = AutoTokenizer.from_pretrained("kiddothe2b/hierarchical-transformer-base-4096")
64
 
65
  @spaces.GPU
66
  def create_organic_pipeline(self):
67
  self.pipe = pipeline(
68
  "text-generation",
69
- model="meta-llama/Meta-Llama-3-8B-Instruct",
70
  model_kwargs={"torch_dtype": torch.bfloat16},
71
  device="cuda",
72
  )
@@ -84,7 +84,7 @@ class PDFChatBot:
84
  """
85
  pipe = pipeline(
86
  "text-generation",
87
- model="meta-llama/Meta-Llama-3-8B-Instruct",
88
  model_kwargs={"torch_dtype": torch.bfloat16},
89
  device="cuda",
90
  )
 
60
  print("Vector store created")
61
  @spaces.GPU
62
  def load_tokenizer(self):
63
+ self.tokenizer = AutoTokenizer.from_pretrained("gradientai/Llama-3-8B-Instruct-Gradient-1048k")
64
 
65
  @spaces.GPU
66
  def create_organic_pipeline(self):
67
  self.pipe = pipeline(
68
  "text-generation",
69
+ model="gradientai/Llama-3-8B-Instruct-Gradient-1048k",
70
  model_kwargs={"torch_dtype": torch.bfloat16},
71
  device="cuda",
72
  )
 
84
  """
85
  pipe = pipeline(
86
  "text-generation",
87
+ model="gradientai/Llama-3-8B-Instruct-Gradient-1048k",
88
  model_kwargs={"torch_dtype": torch.bfloat16},
89
  device="cuda",
90
  )