Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -26,7 +26,7 @@ ADAPTER_ID = "Reubencf/gemma3-goan-finetuned"
|
|
26 |
|
27 |
# Base model - MUST match what you used for fine-tuning!
|
28 |
# Check your adapter's config.json for "base_model_name_or_path"
|
29 |
-
BASE_MODEL_ID = "google/gemma-
|
30 |
# Common options:
|
31 |
# - "google/gemma-2b-it" (2B parameters, easier on memory)
|
32 |
# - "unsloth/gemma-2-2b-it-bnb-4bit" (quantized version)
|
@@ -181,7 +181,7 @@ def load_model_and_tokenizer():
|
|
181 |
print("[Fallback] Trying with gemma-2b-it...")
|
182 |
try:
|
183 |
base_model = AutoModelForCausalLM.from_pretrained(
|
184 |
-
"google/gemma-
|
185 |
token=HF_TOKEN,
|
186 |
trust_remote_code=True,
|
187 |
low_cpu_mem_usage=True,
|
@@ -190,7 +190,7 @@ def load_model_and_tokenizer():
|
|
190 |
).to("cpu")
|
191 |
|
192 |
tokenizer = AutoTokenizer.from_pretrained(
|
193 |
-
"google/gemma-
|
194 |
token=HF_TOKEN,
|
195 |
trust_remote_code=True,
|
196 |
)
|
|
|
26 |
|
27 |
# Base model - MUST match what you used for fine-tuning!
|
28 |
# Check your adapter's config.json for "base_model_name_or_path"
|
29 |
+
BASE_MODEL_ID = "google/gemma-3-4b-it" # Change this to your actual base model
|
30 |
# Common options:
|
31 |
# - "google/gemma-2b-it" (2B parameters, easier on memory)
|
32 |
# - "unsloth/gemma-2-2b-it-bnb-4bit" (quantized version)
|
|
|
181 |
print("[Fallback] Trying with gemma-2b-it...")
|
182 |
try:
|
183 |
base_model = AutoModelForCausalLM.from_pretrained(
|
184 |
+
"google/gemma-3-4b-it",
|
185 |
token=HF_TOKEN,
|
186 |
trust_remote_code=True,
|
187 |
low_cpu_mem_usage=True,
|
|
|
190 |
).to("cpu")
|
191 |
|
192 |
tokenizer = AutoTokenizer.from_pretrained(
|
193 |
+
"google/gemma-3-4b-it",
|
194 |
token=HF_TOKEN,
|
195 |
trust_remote_code=True,
|
196 |
)
|