danhtran2mind commited on
Commit
a63317c
·
verified ·
1 Parent(s): b13a847

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -29,7 +29,7 @@ def load_model_and_tokenizer(model_path):
29
  raise Exception(f"Failed to load model or tokenizer from {model_path}: {str(e)}")
30
 
31
  def generate_text(tokenizer, model, device, prompt, max_length=100,
32
- num_return_sequences=1, top_p=0.95, temperature=0.7, seed=42):
33
  # Set the random seed for reproducibility
34
  torch.manual_seed(seed)
35
  if device.type == "cuda":
@@ -86,7 +86,7 @@ with gr.Blocks() as demo:
86
  max_length = gr.Slider(minimum=10, maximum=768, value=32, label="Max Length", step=1)
87
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p Sampling", step=0.01)
88
  temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature", step=0.01)
89
- seed = gr.Slider(minimum=0, maximum=10000, value=42, label="Seed", step=1)
90
  num_return_sequences = gr.Slider(minimum=1, maximum=5, value=1, label="Number of Sequences", step=1)
91
  submit_button = gr.Button("Generate")
92
  with gr.Column():
 
29
  raise Exception(f"Failed to load model or tokenizer from {model_path}: {str(e)}")
30
 
31
  def generate_text(tokenizer, model, device, prompt, max_length=100,
32
+ num_return_sequences=1, top_p=0.95, temperature=0.7, seed=123):
33
  # Set the random seed for reproducibility
34
  torch.manual_seed(seed)
35
  if device.type == "cuda":
 
86
  max_length = gr.Slider(minimum=10, maximum=768, value=32, label="Max Length", step=1)
87
  top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label="Top-p Sampling", step=0.01)
88
  temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature", step=0.01)
89
+ seed = gr.Slider(minimum=0, maximum=10000, value=123, label="Seed", step=1)
90
  num_return_sequences = gr.Slider(minimum=1, maximum=5, value=1, label="Number of Sequences", step=1)
91
  submit_button = gr.Button("Generate")
92
  with gr.Column():