suriya7 commited on
Commit
d9871df
·
verified ·
1 Parent(s): 641846f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -13,7 +13,7 @@ widget:
13
  inference:
14
  parameters:
15
  max_new_tokens: 100
16
- do_sample: false
17
  pipeline_tag: text2text-generation
18
  ---
19
  # Gemma-2B Fine-Tuned Python Model
@@ -65,7 +65,7 @@ inputs = encodeds.to(device)
65
 
66
 
67
  # Increase max_new_tokens if needed
68
- generated_ids = model.generate(inputs, max_new_tokens=1000, do_sample=False, pad_token_id=tokenizer.eos_token_id)
69
  ans = ''
70
  for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<end_of_turn>')[:2]:
71
  ans += i
 
13
  inference:
14
  parameters:
15
  max_new_tokens: 100
16
+ do_sample: True
17
  pipeline_tag: text2text-generation
18
  ---
19
  # Gemma-2B Fine-Tuned Python Model
 
65
 
66
 
67
  # Increase max_new_tokens if needed
68
+ generated_ids = model.generate(inputs, max_new_tokens=1000, do_sample=True, pad_token_id=tokenizer.eos_token_id)
69
  ans = ''
70
  for i in tokenizer.decode(generated_ids[0], skip_special_tokens=True).split('<end_of_turn>')[:2]:
71
  ans += i