akash418 commited on
Commit
519a5c7
·
1 Parent(s): 1ecee6a
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -19,9 +19,7 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, set_seed
19
 
20
 
21
  def inference(input_sentence, max_length, sample_or_greedy, seed=42):
22
-
23
- print("input_sentence", input_sentence)
24
-
25
  if sample_or_greedy == "Sample":
26
  parameters = {
27
  "max_new_tokens": max_length,
@@ -63,7 +61,7 @@ def inference(input_sentence, max_length, sample_or_greedy, seed=42):
63
  #generation = data[0]["generated_text"].split(input_sentence, 1)[1]
64
 
65
  generation = res[0]["generated_text"].split(input_sentence, 1)[1]
66
- #print(generation)
67
 
68
  return (
69
  before_prompt
 
19
 
20
 
21
  def inference(input_sentence, max_length, sample_or_greedy, seed=42):
22
+ #print("input_sentence", input_sentence)
 
 
23
  if sample_or_greedy == "Sample":
24
  parameters = {
25
  "max_new_tokens": max_length,
 
61
  #generation = data[0]["generated_text"].split(input_sentence, 1)[1]
62
 
63
  generation = res[0]["generated_text"].split(input_sentence, 1)[1]
64
+ print(generation)
65
 
66
  return (
67
  before_prompt