rivapereira123 commited on
Commit
dac4ddd
Β·
verified Β·
1 Parent(s): f67b75b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -21
app.py CHANGED
@@ -311,27 +311,18 @@ class OptimizedGazaRAGSystem:
311
 
312
 
313
  def _initialize_llm(self):
314
- """Initialize FLAN-T5 model (CPU-friendly)"""
315
- model_name = "google/flan-t5-base"
316
- try:
317
- logger.info(f"πŸ”„ Loading fallback CPU model: {model_name}")
318
-
319
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
320
- self.llm = AutoModelForCausalLM.from_pretrained(model_name)
321
-
322
- self.generation_pipeline = pipeline(
323
- "text2text-generation", # <-- Important for T5!
324
- model=self.llm,
325
- tokenizer=self.tokenizer,
326
- return_full_text=False
327
- )
328
-
329
- logger.info("βœ… FLAN-T5 model loaded successfully")
330
-
331
- except Exception as e:
332
- logger.error(f"❌ Error loading FLAN-T5 model: {e}")
333
- self.llm = None
334
- self.generation_pipeline = None
335
 
336
 
337
 
 
311
 
312
 
313
  def _initialize_llm(self):
314
+ """Initialize FLAN-T5 model (CPU-friendly)"""
315
+ model_name = "google/flan-t5-base"
316
+ try:
317
+ logger.info(f"πŸ”„ Loading fallback CPU model: {model_name}")
318
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
319
+ self.llm = AutoModelForCausalLM.from_pretrained(model_name)
320
+ self.generation_pipeline = pipeline("text2text-generation", model=self.llm,tokenizer=self.tokenizer,return_full_text=False)
321
+ logger.info("βœ… FLAN-T5 model loaded successfully")
322
+ except Exception as e:
323
+ logger.error(f"❌ Error loading FLAN-T5 model: {e}")
324
+ self.llm = None
325
+ self.generation_pipeline = None
 
 
 
 
 
 
 
 
 
326
 
327
 
328