File size: 1,721 Bytes
39a15c5
cc6fe85
d41889b
cc6fe85
67b99f2
cc6fe85
 
 
67b99f2
cc6fe85
67b99f2
cc6fe85
 
 
67b99f2
 
cc6fe85
 
67b99f2
 
 
cc6fe85
 
 
 
 
 
d41889b
cc6fe85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d41889b
cc6fe85
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import LoraConfig, get_peft_model, PeftModel, PeftConfig
import torch
import streamlit as st

# Suppress the warning if necessary
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)

# Load PEFT configuration
config = PeftConfig.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
print("step 1")

# Load the base model
base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
print("step 2")

# Apply PEFT configuration to the base model
model = PeftModel.from_pretrained(base_model, "youssef227/llama-3-8b-Instruct-bnb-telcom-3")
print("step 3")

# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")

# Define the prompt template
alpaca_prompt = "{instruction} {input} {output}"

def generator(text):
    # Define the context if it's used in the prompt
    context = " "

    # Prepare the inputs
    inputs = tokenizer(
        [
            alpaca_prompt.format(
                instruction="ุงู†ุช ู…ู…ุซู„ ุฎุฏู…ุฉ ุงู„ุนู…ู„ุงุก ู„ุฏู‰ ุดุฑูƒุฉ ููˆุฏุงููˆู†.ูˆ ุฏูŠ ู…ุนู„ูˆู…ุงุช ู…ู…ูƒู† ุชููŠุฏูƒ", # instruction
                input=text, # input
                output="" # output - leave this blank for generation!
            )
        ], return_tensors="pt"
    ).to("cuda")

    # Generate the output
    outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)

    # Decode the output
    return tokenizer.batch_decode(outputs, skip_special_tokens=True)

# Streamlit application
st.title("Text Generator")
text = st.text_area('Enter some text!')

if text:
    out = generator(text)
    st.json(out)