Spaces:
Runtime error
Runtime error
youssef227
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,35 +1,55 @@
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
-
from peft import LoraConfig, get_peft_model
|
3 |
import torch
|
|
|
4 |
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
from transformers import AutoModelForCausalLM
|
8 |
-
|
9 |
config = PeftConfig.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
|
10 |
-
print("step 1
|
|
|
|
|
11 |
base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
|
12 |
print("step 2")
|
|
|
|
|
13 |
model = PeftModel.from_pretrained(base_model, "youssef227/llama-3-8b-Instruct-bnb-telcom-3")
|
14 |
print("step 3")
|
15 |
|
16 |
-
# Load the tokenizer
|
17 |
-
|
18 |
-
|
19 |
-
#
|
20 |
-
|
|
|
21 |
def generator(text):
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
if text:
|
34 |
-
|
35 |
-
|
|
|
1 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
from peft import LoraConfig, get_peft_model, PeftModel, PeftConfig
|
3 |
import torch
|
4 |
+
import streamlit as st
|
5 |
|
6 |
+
# Suppress the warning if necessary
|
7 |
+
import warnings
|
8 |
+
warnings.filterwarnings("ignore", category=FutureWarning)
|
9 |
|
10 |
+
# Load PEFT configuration
|
|
|
|
|
11 |
config = PeftConfig.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
|
12 |
+
print("step 1")
|
13 |
+
|
14 |
+
# Load the base model
|
15 |
base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
|
16 |
print("step 2")
|
17 |
+
|
18 |
+
# Apply PEFT configuration to the base model
|
19 |
model = PeftModel.from_pretrained(base_model, "youssef227/llama-3-8b-Instruct-bnb-telcom-3")
|
20 |
print("step 3")
|
21 |
|
22 |
+
# Load the tokenizer
|
23 |
+
tokenizer = AutoTokenizer.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
|
24 |
+
|
25 |
+
# Define the prompt template
|
26 |
+
alpaca_prompt = "{instruction} {input} {output}"
|
27 |
+
|
28 |
def generator(text):
|
29 |
+
# Define the context if it's used in the prompt
|
30 |
+
context = " "
|
31 |
+
|
32 |
+
# Prepare the inputs
|
33 |
+
inputs = tokenizer(
|
34 |
+
[
|
35 |
+
alpaca_prompt.format(
|
36 |
+
instruction="ุงูุช ู
ู
ุซู ุฎุฏู
ุฉ ุงูุนู
ูุงุก ูุฏู ุดุฑูุฉ ููุฏุงููู.ู ุฏู ู
ุนููู
ุงุช ู
ู
ูู ุชููุฏู", # instruction
|
37 |
+
input=text, # input
|
38 |
+
output="" # output - leave this blank for generation!
|
39 |
+
)
|
40 |
+
], return_tensors="pt"
|
41 |
+
).to("cuda")
|
42 |
+
|
43 |
+
# Generate the output
|
44 |
+
outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)
|
45 |
+
|
46 |
+
# Decode the output
|
47 |
+
return tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
48 |
+
|
49 |
+
# Streamlit application
|
50 |
+
st.title("Text Generator")
|
51 |
+
text = st.text_area('Enter some text!')
|
52 |
+
|
53 |
if text:
|
54 |
+
out = generator(text)
|
55 |
+
st.json(out)
|