youssef227 commited on
Commit
cc6fe85
ยท
verified ยท
1 Parent(s): 67b99f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -23
app.py CHANGED
@@ -1,35 +1,55 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
- from peft import LoraConfig, get_peft_model
3
  import torch
 
4
 
 
 
 
5
 
6
- from peft import PeftModel, PeftConfig
7
- from transformers import AutoModelForCausalLM
8
-
9
  config = PeftConfig.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
10
- print("step 1 ")
 
 
11
  base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
12
  print("step 2")
 
 
13
  model = PeftModel.from_pretrained(base_model, "youssef227/llama-3-8b-Instruct-bnb-telcom-3")
14
  print("step 3")
15
 
16
- # Load the tokenizer and model
17
- # print("step 1 ")
18
- # tokenizer = AutoTokenizer.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
19
- # print("step 2 ")
20
- # model = AutoModelForCausalLM.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
 
21
  def generator(text):
22
- inputs = tokenizer(
23
- [
24
- alpaca_prompt.format(
25
- f" {context}ุงู†ุช ู…ู…ุซู„ ุฎุฏู…ุฉ ุงู„ุนู…ู„ุงุก ู„ุฏู‰ ุดุฑูƒุฉ ููˆุฏุงููˆู†.ูˆ ุฏูŠ ู…ุนู„ูˆู…ุงุช ู…ู…ูƒู† ุชููŠุฏูƒ", # instruction
26
- text, # input
27
- "", # output - leave this blank for generation!
28
- )
29
- ], return_tensors = "pt").to("cuda")
30
- outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
31
- return tokenizer.batch_decode(outputs)
32
- text = st.text_area('enter some text!')
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  if text:
34
- out = generator(text)
35
- st.json(out)
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ from peft import LoraConfig, get_peft_model, PeftModel, PeftConfig
3
  import torch
4
+ import streamlit as st
5
 
6
+ # Suppress the warning if necessary
7
+ import warnings
8
+ warnings.filterwarnings("ignore", category=FutureWarning)
9
 
10
+ # Load PEFT configuration
 
 
11
  config = PeftConfig.from_pretrained("youssef227/llama-3-8b-Instruct-bnb-telcom-3")
12
+ print("step 1")
13
+
14
+ # Load the base model
15
  base_model = AutoModelForCausalLM.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
16
  print("step 2")
17
+
18
+ # Apply PEFT configuration to the base model
19
  model = PeftModel.from_pretrained(base_model, "youssef227/llama-3-8b-Instruct-bnb-telcom-3")
20
  print("step 3")
21
 
22
+ # Load the tokenizer
23
+ tokenizer = AutoTokenizer.from_pretrained("unsloth/llama-3-8b-Instruct-bnb-4bit")
24
+
25
+ # Define the prompt template
26
+ alpaca_prompt = "{instruction} {input} {output}"
27
+
28
  def generator(text):
29
+ # Define the context if it's used in the prompt
30
+ context = " "
31
+
32
+ # Prepare the inputs
33
+ inputs = tokenizer(
34
+ [
35
+ alpaca_prompt.format(
36
+ instruction="ุงู†ุช ู…ู…ุซู„ ุฎุฏู…ุฉ ุงู„ุนู…ู„ุงุก ู„ุฏู‰ ุดุฑูƒุฉ ููˆุฏุงููˆู†.ูˆ ุฏูŠ ู…ุนู„ูˆู…ุงุช ู…ู…ูƒู† ุชููŠุฏูƒ", # instruction
37
+ input=text, # input
38
+ output="" # output - leave this blank for generation!
39
+ )
40
+ ], return_tensors="pt"
41
+ ).to("cuda")
42
+
43
+ # Generate the output
44
+ outputs = model.generate(**inputs, max_new_tokens=64, use_cache=True)
45
+
46
+ # Decode the output
47
+ return tokenizer.batch_decode(outputs, skip_special_tokens=True)
48
+
49
+ # Streamlit application
50
+ st.title("Text Generator")
51
+ text = st.text_area('Enter some text!')
52
+
53
  if text:
54
+ out = generator(text)
55
+ st.json(out)