Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
-
import
|
|
|
2 |
from peft import PeftModel, PeftConfig
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
4 |
import torch
|
@@ -25,10 +26,9 @@ model = PeftModel.from_pretrained(model, model_id)
|
|
25 |
|
26 |
def greet(text):
|
27 |
with torch.no_grad(): # Disable gradient calculation for inference
|
28 |
-
batch = tokenizer(f'
|
29 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
30 |
-
output_tokens = model.generate(**batch
|
31 |
-
, max_new_tokens=15)
|
32 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
33 |
|
34 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
|
|
|
1 |
+
import
|
2 |
+
gradio as gr
|
3 |
from peft import PeftModel, PeftConfig
|
4 |
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
5 |
import torch
|
|
|
26 |
|
27 |
def greet(text):
|
28 |
with torch.no_grad(): # Disable gradient calculation for inference
|
29 |
+
batch = tokenizer(f'### Human: {text}', return_tensors='pt') # Move tensors to device
|
30 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
31 |
+
output_tokens = model.generate(**batch, max_new_tokens=25)
|
|
|
32 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
33 |
|
34 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
|