Update app.py
Browse files
app.py
CHANGED
@@ -36,7 +36,7 @@ def get_text_chunks(text):
|
|
36 |
|
37 |
# Function to create vectorstore from the text chunks
|
38 |
def get_vectorstore(text_chunks):
|
39 |
-
embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-
|
40 |
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
41 |
return vectorstore
|
42 |
|
@@ -49,7 +49,7 @@ def get_conversation_chain(vectorstore):
|
|
49 |
model="google/flan-t5-base", # Smaller model for low-end systems
|
50 |
tokenizer="google/flan-t5-base",
|
51 |
max_new_tokens=512, # Increase the maximum token output
|
52 |
-
temperature=0.
|
53 |
#do_sample=True,
|
54 |
top_p=0.9, # Nucleus sampling
|
55 |
top_k=50,
|
|
|
36 |
|
37 |
# Function to create vectorstore from the text chunks
|
38 |
def get_vectorstore(text_chunks):
|
39 |
+
embeddings = HuggingFaceEmbeddings(model_name="hkunlp/instructor-xl") # Using lightweight instructor model
|
40 |
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
|
41 |
return vectorstore
|
42 |
|
|
|
49 |
model="google/flan-t5-base", # Smaller model for low-end systems
|
50 |
tokenizer="google/flan-t5-base",
|
51 |
max_new_tokens=512, # Increase the maximum token output
|
52 |
+
temperature=0.7,# Control creativity
|
53 |
#do_sample=True,
|
54 |
top_p=0.9, # Nucleus sampling
|
55 |
top_k=50,
|