cheberle commited on
Commit
29eca75
·
1 Parent(s): 57b74e3
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -13,10 +13,20 @@ print("PEFT Base Model:", peft_config.base_model_name_or_path)
13
 
14
  # 2. Load the tokenizer & base model
15
  tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
16
- base_model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, revision="4831ee1375be5b4ff5a4abf7984e13628db44e35", ignore_mismatched_sizes=True, trust_remote_code=True)
 
 
 
 
 
 
17
 
18
  # 3. Load your LoRA adapter weights onto the base model
19
- model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)
 
 
 
 
20
 
21
  def classify_text(text):
22
  """
 
13
 
14
  # 2. Load the tokenizer & base model
15
  tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
16
+ base_model = AutoModelForCausalLM.from_pretrained(
17
+ BASE_MODEL,
18
+ revision="4831ee1375be5b4ff5a4abf7984e13628db44e35",
19
+ ignore_mismatched_sizes=True,
20
+ trust_remote_code=True,
21
+ device_map="auto",
22
+ )
23
 
24
  # 3. Load your LoRA adapter weights onto the base model
25
+ model = PeftModel.from_pretrained(
26
+ base_model,
27
+ ADAPTER_REPO,
28
+ ignore_mismatched_sizes=True, # Add this parameter
29
+ )
30
 
31
  def classify_text(text):
32
  """