Spaces:
vericava
/
Running on Zero

metastable-void commited on
Commit
30790ee
·
1 Parent(s): 45c9a85

workaround

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -7,8 +7,9 @@ from threading import Thread
7
  import gradio as gr
8
  import spaces
9
  import torch
10
- from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, pipeline
11
  from peft import PeftModel
 
 
12
 
13
  DESCRIPTION = "# 真空ジェネレータ (v3)\n<p>Imitate 真空 (@vericava)'s posts interactively</p>"
14
 
@@ -51,11 +52,11 @@ def generate(
51
 
52
  output = my_pipeline(
53
  user_input,
54
- temperature=temperature,
55
  max_new_tokens=max_new_tokens,
56
- repetition_penalty=repetition_penalty,
57
  top_k=top_k,
58
- top_p=top_p,
59
  )[-1]["generated_text"]
60
  print(output)
61
  gen_text = output[len(user_input):]
 
7
  import gradio as gr
8
  import spaces
9
  import torch
 
10
  from peft import PeftModel
11
+ from transformers import (AutoModelForCausalLM, AutoTokenizer,
12
+ TextIteratorStreamer, pipeline)
13
 
14
  DESCRIPTION = "# 真空ジェネレータ (v3)\n<p>Imitate 真空 (@vericava)'s posts interactively</p>"
15
 
 
52
 
53
  output = my_pipeline(
54
  user_input,
55
+ temperature=temperature * 1.0,
56
  max_new_tokens=max_new_tokens,
57
+ repetition_penalty=repetition_penalty * 1.0,
58
  top_k=top_k,
59
+ top_p=top_p * 1.0,
60
  )[-1]["generated_text"]
61
  print(output)
62
  gen_text = output[len(user_input):]