Jason St George commited on
Commit
3573bf1
Β·
1 Parent(s): 203e926

update gptapp

Browse files
Files changed (2) hide show
  1. app.py +12 -7
  2. appOG.py +1 -1
app.py CHANGED
@@ -19,8 +19,8 @@ parser.add_argument('-p', '--prompt', default='plant', help="String prompt templ
19
  parser.add_argument('-pp', '--prompt_path', type=str, help='Path to custom prompt template to use with LLM ChatBot + Vectorstore')
20
  parser.add_argument('-t', '--temperature', type=float, default=0.7, help='LLM temperature setting... lower == more deterministic')
21
  parser.add_argument('-m', '--max_tokens', type=int, default=384, help='LLM maximum number of output tokens')
22
- parser.add_argument('-f', '--font_size', type=int, default=20, help='Chatbot window font size (default: 20px)')
23
- parser.add_argument('-tp', '--use_terence_prompt', action='store_true', default=True, help="Default is to use chatGPT clone prompt")
24
  args = parser.parse_args()
25
 
26
 
@@ -149,10 +149,10 @@ def get_chain(prompt=None):
149
  )
150
 
151
  chatgpt_chain = LLMChain(
152
- llm=OpenAIChat(temperature=0.7),
153
  prompt=prompt or default_prompt,
154
- verbose=True,
155
- memory=ConversationalBufferWindowMemory(k=10),
156
  )
157
 
158
  return chatgpt_chain
@@ -160,7 +160,7 @@ def get_chain(prompt=None):
160
 
161
 
162
  def initialize_chain():
163
- chain = get_chain(prompt=PROMPT if args.use_terence_prompt else None)
164
  return chain
165
 
166
 
@@ -239,7 +239,12 @@ with block:
239
 
240
  with gr.Column():
241
  with gr.Row():
242
- gr.Image(type='filepath', value='McKenna3.jpg', shape=(200,100))
 
 
 
 
 
243
 
244
  gr.HTML(
245
  "<center>Powered by <a href='https://github.com/hwchase17/langchain'>LangChain πŸ¦œοΈπŸ”— and Unicorn Farts πŸ¦„πŸ’¨</a></center>"
 
19
  parser.add_argument('-pp', '--prompt_path', type=str, help='Path to custom prompt template to use with LLM ChatBot + Vectorstore')
20
  parser.add_argument('-t', '--temperature', type=float, default=0.7, help='LLM temperature setting... lower == more deterministic')
21
  parser.add_argument('-m', '--max_tokens', type=int, default=384, help='LLM maximum number of output tokens')
22
+ parser.add_argument('-w', '--memory_window', type=int, default=10, help='Chatbot context memory window size')
23
+ parser.add_argument('-v', '--verbose', action='store_true', default=False)
24
  args = parser.parse_args()
25
 
26
 
 
149
  )
150
 
151
  chatgpt_chain = LLMChain(
152
+ llm=OpenAIChat(temperature=args.temperature, max_tokens=args.max_tokens),
153
  prompt=prompt or default_prompt,
154
+ verbose=args.verbose,
155
+ memory=ConversationalBufferWindowMemory(k=args.memory_window),
156
  )
157
 
158
  return chatgpt_chain
 
160
 
161
 
162
  def initialize_chain():
163
+ chain = get_chain(prompt=PROMPT)
164
  return chain
165
 
166
 
 
239
 
240
  with gr.Column():
241
  with gr.Row():
242
+ with gr.Column(min_width=200):
243
+ pass
244
+ with gr.Column():
245
+ gr.Image(type='filepath', value='McKenna3.jpg')
246
+ with gr.Column(min_width=200):
247
+ pass
248
 
249
  gr.HTML(
250
  "<center>Powered by <a href='https://github.com/hwchase17/langchain'>LangChain πŸ¦œοΈπŸ”— and Unicorn Farts πŸ¦„πŸ’¨</a></center>"
appOG.py CHANGED
@@ -19,7 +19,7 @@ parser.add_argument('-d', '--data_directory', type=str, help='Path to directory
19
  parser.add_argument('-p', '--prompt', default='plant', help="String prompt template to use, must contain {question} and {context}", type=str)
20
  parser.add_argument('-pp', '--prompt_path', type=str, help='Path to custom prompt template to use with LLM ChatBot + Vectorstore')
21
  parser.add_argument('-t', '--temperature', type=float, default=0.7, help='LLM temperature setting... lower == more deterministic')
22
- parser.add_argument('-m', '--max_tokens', type=int, default=384, help='LLM maximum number of output tokens')
23
  parser.add_argument('-v', '--vectorstore_path', default=vecpath, type=str, help='Path to saved index')
24
  parser.add_argument('-dv', '--live_vectorstore_path', default=r"indices\vectorstore_from_docs.pkl", type=str, help='Path to save temporary index')
25
  parser.add_argument('-f', '--font_size', type=int, default=20, help='Chatbot window font size (default: 20px)')
 
19
  parser.add_argument('-p', '--prompt', default='plant', help="String prompt template to use, must contain {question} and {context}", type=str)
20
  parser.add_argument('-pp', '--prompt_path', type=str, help='Path to custom prompt template to use with LLM ChatBot + Vectorstore')
21
  parser.add_argument('-t', '--temperature', type=float, default=0.7, help='LLM temperature setting... lower == more deterministic')
22
+ parser.add_argument('-m', '--max_tokens', type=int, default=500, help='LLM maximum number of output tokens')
23
  parser.add_argument('-v', '--vectorstore_path', default=vecpath, type=str, help='Path to saved index')
24
  parser.add_argument('-dv', '--live_vectorstore_path', default=r"indices\vectorstore_from_docs.pkl", type=str, help='Path to save temporary index')
25
  parser.add_argument('-f', '--font_size', type=int, default=20, help='Chatbot window font size (default: 20px)')