danrdoran commited on
Commit
192c399
·
verified ·
1 Parent(s): f600e4e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -10
app.py CHANGED
@@ -1,15 +1,5 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
- from peft import get_peft_model, LoraConfig
4
-
5
- # Define the same LoRA configuration used during fine-tuning
6
- lora_config = LoraConfig(
7
- r=8, # Low-rank parameter
8
- lora_alpha=32, # Scaling parameter
9
- lora_dropout=0.1, # Dropout rate
10
- target_modules=["q", "v"], # The attention layers to apply LoRA to
11
- bias="none"
12
- )
13
 
14
  # Load the model and tokenizer from Hugging Face's hub
15
  tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
 
 
 
 
 
 
 
 
3
 
4
  # Load the model and tokenizer from Hugging Face's hub
5
  tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")