Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -311,27 +311,18 @@ class OptimizedGazaRAGSystem:
|
|
311 |
|
312 |
|
313 |
def _initialize_llm(self):
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
"
|
324 |
-
|
325 |
-
|
326 |
-
return_full_text=False
|
327 |
-
)
|
328 |
-
|
329 |
-
logger.info("β
FLAN-T5 model loaded successfully")
|
330 |
-
|
331 |
-
except Exception as e:
|
332 |
-
logger.error(f"β Error loading FLAN-T5 model: {e}")
|
333 |
-
self.llm = None
|
334 |
-
self.generation_pipeline = None
|
335 |
|
336 |
|
337 |
|
|
|
311 |
|
312 |
|
313 |
def _initialize_llm(self):
|
314 |
+
"""Initialize FLAN-T5 model (CPU-friendly)"""
|
315 |
+
model_name = "google/flan-t5-base"
|
316 |
+
try:
|
317 |
+
logger.info(f"π Loading fallback CPU model: {model_name}")
|
318 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
319 |
+
self.llm = AutoModelForCausalLM.from_pretrained(model_name)
|
320 |
+
self.generation_pipeline = pipeline("text2text-generation", model=self.llm,tokenizer=self.tokenizer,return_full_text=False)
|
321 |
+
logger.info("β
FLAN-T5 model loaded successfully")
|
322 |
+
except Exception as e:
|
323 |
+
logger.error(f"β Error loading FLAN-T5 model: {e}")
|
324 |
+
self.llm = None
|
325 |
+
self.generation_pipeline = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
326 |
|
327 |
|
328 |
|