MRK4863 commited on
Commit
8b6b9c6
·
verified ·
1 Parent(s): 1b2673f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -52,6 +52,7 @@ def PROMPT():
52
  - Also, do not repeat the information that is already present in the context.
53
  - If, you feel there is redundant information (or) an product is being described twice, specify that as well in the response.
54
  - The tone of the answer should be like a polite and friendly AI Assistant.
 
55
  '''
56
 
57
  return PromptTemplate(
@@ -90,7 +91,7 @@ def load_model():
90
  model=CONFIG['LLM_MODEL'],
91
  api_key=chat_api_key,
92
  base_url=CONFIG["LLM_BASE_URL"],
93
- max_tokens = 5000,
94
  temperature = 0.4,
95
  top_p = 0.7
96
  )
@@ -112,7 +113,7 @@ def memory():
112
  return_messages=True,
113
  input_key="question",
114
  output_key='answer',
115
- max_token_limit=2000 # Limit history to 2000 tokens
116
  )
117
  return st.session_state.memory
118
 
 
52
  - Also, do not repeat the information that is already present in the context.
53
  - If, you feel there is redundant information (or) an product is being described twice, specify that as well in the response.
54
  - The tone of the answer should be like a polite and friendly AI Assistant.
55
+ - Give a complete answer, never truncate your answer
56
  '''
57
 
58
  return PromptTemplate(
 
91
  model=CONFIG['LLM_MODEL'],
92
  api_key=chat_api_key,
93
  base_url=CONFIG["LLM_BASE_URL"],
94
+ max_tokens = 8000,
95
  temperature = 0.4,
96
  top_p = 0.7
97
  )
 
113
  return_messages=True,
114
  input_key="question",
115
  output_key='answer',
116
+ max_token_limit=1000 # Limit history to 1000 tokens
117
  )
118
  return st.session_state.memory
119