laurenssam commited on
Commit
611d68a
·
1 Parent(s): b3a3d05
Files changed (1) hide show
  1. app.py +32 -32
app.py CHANGED
@@ -131,12 +131,12 @@ def get_response(params):
131
  images = None
132
  image_args = {}
133
 
134
- temperature = float(params.get("temperature", 1.0))
135
- top_p = float(params.get("top_p", 1.0))
136
  max_context_length = getattr(model.config, "max_position_embeddings", 2048)
137
- max_new_tokens = min(int(params.get("max_new_tokens", 256)), 1024)
138
  stop_str = params.get("stop", None)
139
- do_sample = False if temperature > 0.001 else False
140
  logger.info(prompt)
141
  input_ids = (
142
  tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
@@ -266,9 +266,9 @@ def http_bot(state, temperature, top_p, max_new_tokens):
266
  pload = {
267
  "model": model_name,
268
  "prompt": prompt,
269
- "temperature": float(0.),
270
- "top_p": float(top_p),
271
- "max_new_tokens": min(int(max_new_tokens), 1536),
272
  "stop": (
273
  state.sep
274
  if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT]
@@ -341,31 +341,31 @@ def build_demo():
341
  # inputs=[imagebox, textbox],
342
  # )
343
 
344
- with gr.Accordion("Parameters", open=False) as _:
345
- temperature = gr.Slider(
346
- minimum=0.0,
347
- maximum=1.0,
348
- value=0.2,
349
- step=0.1,
350
- interactive=True,
351
- label="Temperature",
352
- )
353
- top_p = gr.Slider(
354
- minimum=0.0,
355
- maximum=1.0,
356
- value=0.7,
357
- step=0.1,
358
- interactive=True,
359
- label="Top P",
360
- )
361
- max_output_tokens = gr.Slider(
362
- minimum=0,
363
- maximum=1024,
364
- value=512,
365
- step=64,
366
- interactive=True,
367
- label="Max output tokens",
368
- )
369
 
370
  with gr.Column(scale=8):
371
  chatbot = gr.Chatbot(elem_id="chatbot", label="Chatbot", height=550)
 
131
  images = None
132
  image_args = {}
133
 
134
+ temperature = 0.0
135
+ top_p = 1.0
136
  max_context_length = getattr(model.config, "max_position_embeddings", 2048)
137
+ max_new_tokens = 512
138
  stop_str = params.get("stop", None)
139
+ do_sample = False
140
  logger.info(prompt)
141
  input_ids = (
142
  tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt")
 
266
  pload = {
267
  "model": model_name,
268
  "prompt": prompt,
269
+ "temperature": temperature,
270
+ "top_p": top_p,
271
+ "max_new_tokens": max_new_tokens,
272
  "stop": (
273
  state.sep
274
  if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT]
 
341
  # inputs=[imagebox, textbox],
342
  # )
343
 
344
+ # with gr.Accordion("Parameters", open=False) as _:
345
+ # temperature = gr.Slider(
346
+ # minimum=0.0,
347
+ # maximum=1.0,
348
+ # value=0.2,
349
+ # step=0.1,
350
+ # interactive=True,
351
+ # label="Temperature",
352
+ # )
353
+ # top_p = gr.Slider(
354
+ # minimum=0.0,
355
+ # maximum=1.0,
356
+ # value=0.7,
357
+ # step=0.1,
358
+ # interactive=True,
359
+ # label="Top P",
360
+ # )
361
+ # max_output_tokens = gr.Slider(
362
+ # minimum=0,
363
+ # maximum=1024,
364
+ # value=512,
365
+ # step=64,
366
+ # interactive=True,
367
+ # label="Max output tokens",
368
+ # )
369
 
370
  with gr.Column(scale=8):
371
  chatbot = gr.Chatbot(elem_id="chatbot", label="Chatbot", height=550)