rivapereira123 commited on
Commit
a135d5f
·
verified ·
1 Parent(s): 4c38b03

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -1341
app.py DELETED
@@ -1,1341 +0,0 @@
1
- import os
2
- import sys
3
- import json
4
- import logging
5
- import warnings
6
- from pathlib import Path
7
- from typing import List, Dict, Any, Optional, Tuple
8
- import hashlib
9
- import pickle
10
- from datetime import datetime
11
- import time
12
- import asyncio
13
- from concurrent.futures import ThreadPoolExecutor
14
-
15
- # Suppress warnings for cleaner output
16
- warnings.filterwarnings("ignore")
17
-
18
- # Core dependencies
19
- import gradio as gr
20
- import numpy as np
21
- import pandas as pd
22
- from sentence_transformers import SentenceTransformer
23
- import faiss
24
- import torch
25
- from transformers import (
26
- AutoTokenizer,
27
- AutoModelForCausalLM,
28
- BitsAndBytesConfig,
29
- pipeline
30
- )
31
-
32
- # Document processing
33
- from llama_index.core import Document, VectorStoreIndex, Settings
34
- from llama_index.core.node_parser import SentenceSplitter
35
- from llama_index.vector_stores.faiss import FaissVectorStore
36
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
37
- from llama_index.core import StorageContext
38
-
39
- # PDF processing
40
- from unstructured.partition.pdf import partition_pdf
41
- from llama_index.core.schema import Document as LlamaDocument
42
-
43
-
44
- # Medical knowledge validation
45
- import re
46
-
47
- # Configure logging
48
- logging.basicConfig(
49
- level=logging.INFO,
50
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
51
- )
52
- logger = logging.getLogger(__name__)
53
-
54
- class MedicalFactChecker:
55
- """Enhanced medical fact checker with faster validation"""
56
-
57
- def __init__(self):
58
- self.medical_facts = self._load_medical_facts()
59
- self.contraindications = self._load_contraindications()
60
- self.dosage_patterns = self._compile_dosage_patterns()
61
- self.definitive_patterns = [
62
- re.compile(r, re.IGNORECASE) for r in [
63
- r'always\s+(?:use|take|apply)',
64
- r'never\s+(?:use|take|apply)',
65
- r'will\s+(?:cure|heal|fix)',
66
- r'guaranteed\s+to',
67
- r'completely\s+(?:safe|effective)'
68
- ]
69
- ]
70
-
71
- def _load_medical_facts(self) -> Dict[str, Any]:
72
- """Pre-loaded medical facts for Gaza context"""
73
- return {
74
- "burn_treatment": {
75
- "cool_water": "Use clean, cool (not ice-cold) water for 10-20 minutes",
76
- "no_ice": "Never apply ice directly to burns",
77
- "clean_cloth": "Cover with clean, dry cloth if available"
78
- },
79
- "wound_care": {
80
- "pressure": "Apply direct pressure to control bleeding",
81
- "elevation": "Elevate injured limb if possible",
82
- "clean_hands": "Clean hands before treating wounds when possible"
83
- },
84
- "infection_signs": {
85
- "redness": "Increasing redness around wound",
86
- "warmth": "Increased warmth at wound site",
87
- "pus": "Yellow or green discharge",
88
- "fever": "Fever may indicate systemic infection"
89
- }
90
- }
91
-
92
- def _load_contraindications(self) -> Dict[str, List[str]]:
93
- """Pre-loaded contraindications for common treatments"""
94
- return {
95
- "aspirin": ["children under 16", "bleeding disorders", "stomach ulcers"],
96
- "ibuprofen": ["kidney disease", "heart failure", "stomach bleeding"],
97
- "hydrogen_peroxide": ["deep wounds", "closed wounds", "eyes"],
98
- "tourniquets": ["non-life-threatening bleeding", "without proper training"]
99
- }
100
-
101
- def _compile_dosage_patterns(self) -> List[re.Pattern]:
102
- """Pre-compiled dosage patterns"""
103
- patterns = [
104
- r'\d+\s*mg\b', # milligrams
105
- r'\d+\s*g\b', # grams
106
- r'\d+\s*ml\b', # milliliters
107
- r'\d+\s*tablets?\b', # tablets
108
- r'\d+\s*times?\s+(?:per\s+)?day\b', # frequency
109
- r'every\s+\d+\s+hours?\b' # intervals
110
- ]
111
- return [re.compile(pattern, re.IGNORECASE) for pattern in patterns]
112
-
113
- def check_medical_accuracy(self, response: str, context: str) -> Dict[str, Any]:
114
- """Enhanced medical accuracy check with Gaza-specific considerations"""
115
- issues = []
116
- warnings = []
117
- accuracy_score = 0.0
118
-
119
- # Check for contraindications (faster keyword matching)
120
- response_lower = response.lower()
121
- for medication, contra_list in self.contraindications.items():
122
- if medication in response_lower:
123
- for contra in contra_list:
124
- if any(word in response_lower for word in contra.split()):
125
- issues.append(f"Potential contraindication: {medication} with {contra}")
126
- accuracy_score -= 0.3
127
- break
128
-
129
- # Context alignment using Jaccard similarity
130
- if context:
131
- resp_words = set(response_lower.split())
132
- ctx_words = set(context.lower().split())
133
- context_similarity = len(resp_words & ctx_words) / len(resp_words | ctx_words) if ctx_words else 0.0
134
- if context_similarity < 0.5: # Lowered threshold for Gaza context
135
- warnings.append(f"Low context similarity: {context_similarity:.2f}")
136
- accuracy_score -= 0.1
137
- else:
138
- context_similarity = 0.0
139
-
140
- # Gaza-specific resource checks
141
- gaza_resources = ["clean water", "sterile", "hospital", "ambulance", "electricity"]
142
- if any(resource in response_lower for resource in gaza_resources):
143
- warnings.append("Consider resource limitations in Gaza context")
144
- accuracy_score -= 0.05
145
-
146
- # Unsupported claims check
147
- for pattern in self.definitive_patterns:
148
- if pattern.search(response):
149
- issues.append(f"Unsupported definitive claim detected")
150
- accuracy_score -= 0.4
151
- break
152
-
153
- # Dosage validation
154
- for pattern in self.dosage_patterns:
155
- if pattern.search(response):
156
- warnings.append("Dosage detected - verify with professional")
157
- accuracy_score -= 0.1
158
- break
159
-
160
- confidence_score = max(0.0, min(1.0, 0.8 + accuracy_score))
161
-
162
- return {
163
- "confidence_score": confidence_score,
164
- "issues": issues,
165
- "warnings": warnings,
166
- "context_similarity": context_similarity,
167
- "is_safe": len(issues) == 0 and confidence_score > 0.5
168
- }
169
-
170
- class EnhancedGazaKnowledgeBase:
171
- """Handles mismatched index/chunks by loading only matching pairs"""
172
-
173
- def __init__(self, data_dir: str = "./data"):
174
- self.data_dir = Path(data_dir)
175
- self.vector_store = None
176
- self.index = None
177
- self.text_chunks = []
178
- self.chunk_metadata = []
179
-
180
- def initialize(self):
181
- """Loads available data with mismatch handling"""
182
- try:
183
- # 1. Load FAISS index
184
- faiss_path = self.data_dir / "index.faiss"
185
- if not faiss_path.exists():
186
- raise FileNotFoundError(f"Missing FAISS index at {faiss_path}")
187
-
188
- faiss_index = faiss.read_index(str(faiss_path))
189
- self.vector_store = FaissVectorStore(faiss_index=faiss_index)
190
- vector_count = faiss_index.ntotal
191
-
192
- # 2. Load available chunks
193
- chunks_path = self.data_dir / "chunks.txt"
194
- if not chunks_path.exists():
195
- raise FileNotFoundError(f"Missing text chunks at {chunks_path}")
196
-
197
- with open(chunks_path, 'r', encoding='utf-8') as f:
198
- all_chunks = [line.strip() for line in f if line.strip()]
199
-
200
- # 3. Handle mismatch by taking first N chunks
201
- if len(all_chunks) != vector_count:
202
- logger.warning(
203
- f"Using first {vector_count}/{len(all_chunks)} chunks "
204
- f"to match index size"
205
- )
206
- self.text_chunks = all_chunks[:vector_count]
207
- else:
208
- self.text_chunks = all_chunks
209
-
210
- # 4. Create index structure
211
- self.index = VectorStoreIndex.from_documents(
212
- [Document(text="dummy")], # Placeholder
213
- storage_context=StorageContext.from_defaults(
214
- vector_store=self.vector_store
215
- ),
216
- embed_model=None # Skip re-embedding
217
- )
218
-
219
- logger.info(f"Loaded {len(self.text_chunks)} chunks with {vector_count} vectors")
220
-
221
- except Exception as e:
222
- logger.error(f"Initialization failed: {str(e)}")
223
- raise
224
-
225
-
226
- def search(self, query: str, k: int = 5) -> List[Dict[str, Any]]:
227
- """Search using existing embeddings"""
228
- if not self.index:
229
- raise RuntimeError("Index not initialized")
230
-
231
- # Get raw FAISS results
232
- query_embedding = self._embed_query(query)
233
- distances, indices = self.vector_store.index.search(
234
- np.array([query_embedding]), k
235
- )
236
-
237
- # Map to text chunks
238
- results = []
239
- for i, idx in enumerate(indices[0]):
240
- if idx >= 0: # FAISS returns -1 for invalid indices
241
- results.append({
242
- "text": self.text_chunks[idx],
243
- "metadata": self.chunk_metadata[idx] if idx < len(self.chunk_metadata) else {},
244
- "score": float(distances[0][i])
245
- })
246
- return results
247
-
248
- def _embed_query(self, query: str) -> np.ndarray:
249
- """Embed queries using the same model used for index"""
250
- # Use a simple sentence transformer if needed
251
- model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
252
- return model.encode(query)
253
-
254
-
255
- def _load_vector_store(self):
256
- """Load existing vector store with error handling"""
257
- try:
258
- faiss_index = faiss.read_index(str(self.index_path / "index.faiss"))
259
- vector_store = FaissVectorStore(faiss_index=faiss_index)
260
-
261
- storage_context = StorageContext.from_defaults(
262
- vector_store=vector_store,
263
- persist_dir=str(self.index_path)
264
- )
265
-
266
- self.index = VectorStoreIndex.load(storage_context=storage_context)
267
-
268
- metadata_path = self.index_path / "metadata.pkl"
269
- if metadata_path.exists():
270
- with open(metadata_path, 'rb') as f:
271
- self.chunk_metadata = pickle.load(f)
272
-
273
- logger.info(f"Loaded existing vector store with {len(self.chunk_metadata)} chunks")
274
-
275
- except Exception as e:
276
- logger.error(f"Error loading vector store: {e}")
277
- self._create_vector_store()
278
-
279
-
280
-
281
- def search(self, query: str, k: int = 5) -> List[Dict[str, Any]]:
282
- """Enhanced search with better error handling and result processing"""
283
- if not self.index:
284
- logger.warning("Index not available for search")
285
- return []
286
-
287
- try:
288
- retriever = self.index.as_retriever(similarity_top_k=k)
289
- results = retriever.retrieve(query)
290
-
291
- # FIX: Handle the tuple object error by properly extracting node and score
292
- processed_results = []
293
- for result in results:
294
- try:
295
- # Handle both tuple and direct node results
296
- if isinstance(result, tuple):
297
- node, score = result
298
- else:
299
- node = result
300
- score = getattr(result, 'score', 0.0)
301
-
302
- # Extract text safely
303
- text = getattr(node, 'text', str(node))
304
- source = node.metadata.get("source", "unknown") if hasattr(node, 'metadata') else "unknown"
305
-
306
- processed_results.append({
307
- "text": text,
308
- "source": source,
309
- "score": float(score) if score is not None else 0.0,
310
- "medical_priority": self._assess_priority(text)
311
- })
312
- except Exception as e:
313
- logger.error(f"Error processing search result: {e}")
314
- continue
315
-
316
- # Sort by score (higher is better)
317
- processed_results.sort(key=lambda x: x['score'], reverse=True)
318
-
319
- logger.info(f"Search returned {len(processed_results)} results for query: {query[:50]}...")
320
- return processed_results
321
-
322
- except Exception as e:
323
- logger.error(f"Error during search: {e}")
324
- return []
325
-
326
- def _assess_priority(self, text: str) -> str:
327
- """Enhanced medical priority assessment"""
328
- text_lower = text.lower()
329
-
330
- # Check priorities in order of importance
331
- priority_order = ["emergency", "trauma", "gaza_specific", "infectious", "chronic"]
332
-
333
- for priority in priority_order:
334
- keywords = self.medical_priorities.get(priority, [])
335
- if any(re.search(keyword, text_lower) for keyword in keywords):
336
- return priority
337
-
338
- return "general"
339
-
340
-
341
-
342
-
343
- class EnhancedGazaRAGSystem:
344
- """Enhanced RAG system with better performance and error handling"""
345
-
346
- def __init__(self):
347
- self.knowledge_base = EnhancedGazaKnowledgeBase()
348
- self.fact_checker = MedicalFactChecker()
349
- self.llm = None
350
- self.tokenizer = None
351
- self.system_prompt = self._create_system_prompt()
352
- self.generation_pipeline = None
353
- self.response_cache = {} # Simple response caching
354
- self.executor = ThreadPoolExecutor(max_workers=2) # For async processing
355
-
356
- def initialize(self):
357
- """Initialize the RAG system by initializing the knowledge base"""
358
- self.knowledge_base.initialize()
359
- logger.info("Enhanced Gaza RAG System initialized")
360
-
361
- def _initialize_llm(self):
362
- """Enhanced LLM initialization with better error handling"""
363
- if self.llm is not None:
364
- return
365
-
366
- model_name = "microsoft/Phi-3-mini-4k-instruct"
367
- try:
368
- logger.info(f"Loading LLM: {model_name}")
369
-
370
- # Enhanced quantization configuration
371
- quantization_config = BitsAndBytesConfig(
372
- load_in_4bit=True,
373
- bnb_4bit_use_double_quant=True,
374
- bnb_4bit_quant_type="nf4",
375
- bnb_4bit_compute_dtype=torch.float16,
376
- )
377
-
378
- self.tokenizer = AutoTokenizer.from_pretrained(
379
- model_name,
380
- trust_remote_code=True,
381
- padding_side="left"
382
- )
383
-
384
- if self.tokenizer.pad_token is None:
385
- self.tokenizer.pad_token = self.tokenizer.eos_token
386
-
387
- self.llm = AutoModelForCausalLM.from_pretrained(
388
- model_name,
389
- quantization_config=quantization_config,
390
- device_map="auto",
391
- trust_remote_code=True,
392
- torch_dtype=torch.float16,
393
- low_cpu_mem_usage=True
394
- )
395
-
396
- self.generation_pipeline = pipeline(
397
- "text-generation",
398
- model=self.llm,
399
- tokenizer=self.tokenizer,
400
- device_map="auto",
401
- torch_dtype=torch.float16,
402
- return_full_text=False
403
- )
404
-
405
- logger.info("LLM loaded successfully")
406
-
407
- except Exception as e:
408
- logger.error(f"Error loading primary model: {e}")
409
- self._initialize_fallback_llm()
410
-
411
- def _initialize_fallback_llm(self):
412
- """Enhanced fallback model with better error handling"""
413
- try:
414
- logger.info("Loading fallback model...")
415
-
416
- fallback_model = "microsoft/DialoGPT-small"
417
- self.tokenizer = AutoTokenizer.from_pretrained(fallback_model)
418
- self.llm = AutoModelForCausalLM.from_pretrained(
419
- fallback_model,
420
- torch_dtype=torch.float32,
421
- low_cpu_mem_usage=True
422
- )
423
-
424
- if self.tokenizer.pad_token is None:
425
- self.tokenizer.pad_token = self.tokenizer.eos_token
426
-
427
- self.generation_pipeline = pipeline(
428
- "text-generation",
429
- model=self.llm,
430
- tokenizer=self.tokenizer,
431
- return_full_text=False
432
- )
433
-
434
- logger.info("Fallback model loaded successfully")
435
-
436
- except Exception as e:
437
- logger.error(f"Fallback model failed: {e}")
438
- self.llm = None
439
- self.generation_pipeline = None
440
-
441
- def _create_system_prompt(self) -> str:
442
- """Enhanced system prompt for Gaza context"""
443
- return """You are a medical AI assistant specifically designed for Gaza healthcare workers operating under siege conditions.
444
-
445
- CRITICAL GUIDELINES:
446
- - Provide practical first aid guidance considering limited resources (water, electricity, medical supplies)
447
- - Always prioritize patient safety and recommend professional medical help when available
448
- - Consider Gaza's specific challenges: blockade, limited hospitals, frequent power outages
449
- - Suggest alternative treatments when standard medical supplies are unavailable
450
- - Never provide definitive diagnoses - only supportive care guidance
451
- - Be culturally sensitive and aware of the humanitarian crisis context
452
-
453
- RESOURCE CONSTRAINTS TO CONSIDER:
454
- - Limited clean water availability
455
- - Frequent electricity outages
456
- - Restricted medical supply access
457
- - Overwhelmed healthcare facilities
458
- - Limited transportation for medical emergencies
459
-
460
- Provide clear, actionable advice while emphasizing the need for professional medical care when possible."""
461
-
462
- async def generate_response_async(self, query: str, progress_callback=None) -> Dict[str, Any]:
463
- """Async response generation with progress tracking"""
464
- start_time = time.time()
465
-
466
- if progress_callback:
467
- progress_callback(0.1, "Checking cache...")
468
-
469
- # Check cache first
470
- query_hash = hashlib.md5(query.encode()).hexdigest()
471
- if query_hash in self.response_cache:
472
- cached_response = self.response_cache[query_hash]
473
- cached_response["cached"] = True
474
- cached_response["response_time"] = 0.1
475
- if progress_callback:
476
- progress_callback(1.0, "Retrieved from cache!")
477
- return cached_response
478
-
479
- try:
480
- if progress_callback:
481
- progress_callback(0.2, "Initializing LLM...")
482
-
483
- # Initialize LLM only when needed
484
- if self.llm is None:
485
- await asyncio.get_event_loop().run_in_executor(
486
- self.executor, self._initialize_llm
487
- )
488
-
489
- if progress_callback:
490
- progress_callback(0.4, "Searching knowledge base...")
491
-
492
- # Enhanced knowledge retrieval
493
- search_results = await asyncio.get_event_loop().run_in_executor(
494
- self.executor, self.knowledge_base.search, query, 3
495
- )
496
-
497
- if progress_callback:
498
- progress_callback(0.6, "Preparing context...")
499
-
500
- context = self._prepare_context(search_results)
501
-
502
- if progress_callback:
503
- progress_callback(0.8, "Generating response...")
504
-
505
- # Generate response
506
- response = await asyncio.get_event_loop().run_in_executor(
507
- self.executor, self._generate_response, query, context
508
- )
509
-
510
- if progress_callback:
511
- progress_callback(0.9, "Validating safety...")
512
-
513
- # Enhanced safety check
514
- safety_check = self.fact_checker.check_medical_accuracy(response, context)
515
-
516
- # Prepare final response
517
- final_response = self._prepare_final_response(
518
- response,
519
- search_results,
520
- safety_check,
521
- time.time() - start_time
522
- )
523
-
524
- # Cache the response (limit cache size)
525
- if len(self.response_cache) < 100:
526
- self.response_cache[query_hash] = final_response
527
-
528
- if progress_callback:
529
- progress_callback(1.0, "Complete!")
530
-
531
- return final_response
532
-
533
- except Exception as e:
534
- logger.error(f"Error generating response: {e}")
535
- if progress_callback:
536
- progress_callback(1.0, f"Error: {str(e)}")
537
- return self._create_error_response(str(e))
538
-
539
-
540
-
541
-
542
- def _generate_response(self, query: str, context: str) -> str:
543
- """Enhanced response generation using model.generate() to avoid DynamicCache errors"""
544
- if self.llm is None or self.tokenizer is None:
545
- return self._generate_fallback_response(query, context)
546
-
547
- # Build prompt with Gaza-specific context
548
- prompt = f"""{self.system_prompt}
549
- MEDICAL KNOWLEDGE CONTEXT:
550
- {context}
551
- PATIENT QUESTION: {query}
552
- RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
553
-
554
- try:
555
- # Tokenize and move to correct device
556
- inputs = self.tokenizer(prompt, return_tensors="pt").to(self.llm.device)
557
-
558
- # Generate the response
559
- outputs = self.llm.generate(
560
- **inputs,
561
- max_new_tokens=800,
562
- temperature=0.5,
563
- pad_token_id=self.tokenizer.eos_token_id,
564
- do_sample=True,
565
- repetition_penalty=1.15,
566
- )
567
-
568
- # Decode and clean up
569
- response_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
570
- lines = response_text.split('\n')
571
- unique_lines = []
572
- for line in lines:
573
- line = line.strip()
574
- if line and line not in unique_lines:
575
- unique_lines.append(line)
576
- return '\n'.join(unique_lines)
577
-
578
- except Exception as e:
579
- logger.error(f"Error in LLM generate(): {e}")
580
- return self._generate_fallback_response(query, context)
581
-
582
-
583
- # Decode and clean up
584
- response_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
585
- lines = response_text.split('\n')
586
- unique_lines = []
587
- for line in lines:
588
- line = line.strip()
589
- if line and line not in unique_lines:
590
- unique_lines.append(line)
591
- return '\n'.join(unique_lines)
592
-
593
-
594
-
595
-
596
- def _prepare_context(self, search_results: List[Dict[str, Any]]) -> str:
597
- """Enhanced context preparation with better formatting"""
598
- if not search_results:
599
- return "No specific medical guidance found in knowledge base. Provide general first aid principles."
600
-
601
- context_parts = []
602
- for i, result in enumerate(search_results, 1):
603
- source = result.get('source', 'unknown')
604
- text = result.get('text', '')
605
- priority = result.get('medical_priority', 'general')
606
-
607
- # Truncate long text but preserve important information
608
- if len(text) > 400:
609
- text = text[:400] + "..."
610
-
611
- context_parts.append(f"[Source {i}: {source} - Priority: {priority}]\n{text}")
612
-
613
- return "\n\n".join(context_parts)
614
-
615
- def _generate_response(self, query: str, context: str) -> str:
616
- """Enhanced response generation with better prompting"""
617
- if not self.generation_pipeline:
618
- return self._generate_fallback_response(query, context)
619
-
620
- # Enhanced prompt structure
621
- prompt = f"""{self.system_prompt}
622
-
623
- MEDICAL KNOWLEDGE CONTEXT:
624
- {context}
625
-
626
- PATIENT QUESTION: {query}
627
-
628
- RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
629
-
630
- try:
631
- # Enhanced generation parameters
632
- response = self.generation_pipeline(
633
- prompt,
634
- max_new_tokens=300, # Increased for more detailed responses
635
- temperature=0.2, # Lower for more consistent medical advice
636
- do_sample=True,
637
- pad_token_id=self.tokenizer.eos_token_id,
638
- repetition_penalty=1.15,
639
- truncation=True,
640
- num_return_sequences=1
641
- )
642
-
643
- if response and len(response) > 0:
644
- generated_text = response[0]['generated_text']
645
- # Clean up the response
646
- generated_text = generated_text.strip()
647
-
648
- # Remove any repetitive patterns
649
- lines = generated_text.split('\n')
650
- unique_lines = []
651
- for line in lines:
652
- if line.strip() and line.strip() not in unique_lines:
653
- unique_lines.append(line.strip())
654
-
655
- return '\n'.join(unique_lines)
656
- else:
657
- return self._generate_fallback_response(query, context)
658
-
659
- except Exception as e:
660
- logger.error(f"Error in LLM generation: {e}")
661
- return self._generate_fallback_response(query, context)
662
-
663
- def _generate_fallback_response(self, query: str, context: str) -> str:
664
- """Enhanced fallback response with Gaza-specific guidance"""
665
- gaza_guidance = {
666
- "burn": "For burns: Use clean, cool water if available. If water is scarce, use clean cloth. Avoid ice. Seek medical help urgently.",
667
- "bleeding": "For bleeding: Apply direct pressure with clean cloth. Elevate if possible. If severe, seek immediate medical attention.",
668
- "wound": "For wounds: Clean hands if possible. Apply pressure to stop bleeding. Cover with clean material. Watch for infection signs.",
669
- "infection": "Signs of infection: Redness, warmth, swelling, pus, fever. Seek medical care immediately if available.",
670
- "pain": "For pain management: Rest, elevation, cold/warm compress as appropriate. Avoid aspirin in children."
671
- }
672
-
673
- query_lower = query.lower()
674
- for condition, guidance in gaza_guidance.items():
675
- if condition in query_lower:
676
- return f"{guidance}\n\nContext from medical sources:\n{context[:200]}..."
677
-
678
- return f"Medical guidance for: {query}\n\nGeneral advice: Prioritize safety, seek professional help when available, consider resource limitations in Gaza.\n\nRelevant information:\n{context[:300]}..."
679
-
680
- def _prepare_final_response(
681
- self,
682
- response: str,
683
- search_results: List[Dict[str, Any]],
684
- safety_check: Dict[str, Any],
685
- response_time: float
686
- ) -> Dict[str, Any]:
687
- """Enhanced final response preparation with more metadata"""
688
-
689
- # Add safety warnings if needed
690
- if not safety_check["is_safe"]:
691
- response = f"⚠️ MEDICAL CAUTION: {response}\n\n🚨 Please verify this guidance with a medical professional when possible."
692
-
693
- # Add Gaza-specific disclaimer
694
- response += "\n\n📍 Gaza Context: This guidance considers resource limitations. Adapt based on available supplies and seek professional medical care when accessible."
695
-
696
- # Extract unique sources
697
- sources = list(set(res.get("source", "unknown") for res in search_results)) if search_results else []
698
-
699
- # Calculate confidence based on multiple factors
700
- base_confidence = safety_check.get("confidence_score", 0.5)
701
- context_bonus = 0.1 if search_results else 0.0
702
- safety_penalty = 0.2 if not safety_check.get("is_safe", True) else 0.0
703
-
704
- final_confidence = max(0.0, min(1.0, base_confidence + context_bonus - safety_penalty))
705
-
706
- return {
707
- "response": response,
708
- "confidence": final_confidence,
709
- "sources": sources,
710
- "search_results_count": len(search_results),
711
- "safety_issues": safety_check.get("issues", []),
712
- "safety_warnings": safety_check.get("warnings", []),
713
- "response_time": round(response_time, 2),
714
- "timestamp": datetime.now().isoformat()[:19],
715
- "cached": False
716
- }
717
-
718
- def _create_error_response(self, error_msg: str) -> Dict[str, Any]:
719
- """Enhanced error response with helpful information"""
720
- return {
721
- "response": f"⚠️ System Error: Unable to process your medical query at this time.\n\nError: {error_msg}\n\n🚨 For immediate medical emergencies, seek professional help directly.\n\n📞 Gaza Emergency Numbers:\n- Palestinian Red Crescent: 101\n- Civil Defense: 102",
722
- "confidence": 0.0,
723
- "sources": [],
724
- "search_results_count": 0,
725
- "safety_issues": ["System error occurred"],
726
- "safety_warnings": ["Unable to validate medical accuracy"],
727
- "response_time": 0.0,
728
- "timestamp": datetime.now().isoformat()[:19],
729
- "cached": False,
730
- "error": True
731
- }
732
-
733
- # Global system instance
734
- enhanced_rag_system = None
735
-
736
- def initialize_enhanced_system():
737
- """Initialize enhanced system with better error handling"""
738
- global enhanced_rag_system
739
- if enhanced_rag_system is None:
740
- try:
741
- enhanced_rag_system = EnhancedGazaRAGSystem()
742
- enhanced_rag_system.initialize()
743
- logger.info("Enhanced Gaza RAG System initialized successfully")
744
- except Exception as e:
745
- logger.error(f"Failed to initialize enhanced system: {e}")
746
- raise
747
- return enhanced_rag_system
748
-
749
- def process_medical_query_with_progress(query: str, progress=gr.Progress()) -> Tuple[str, str, str]:
750
- """Enhanced query processing with detailed progress tracking and status updates"""
751
- if not query.strip():
752
- return "Please enter a medical question.", "", "⚠️ No query provided"
753
-
754
- try:
755
- # Initialize system with progress
756
- progress(0.05, desc="🔧 Initializing system...")
757
- system = initialize_enhanced_system()
758
-
759
- # Create async event loop for progress tracking
760
- loop = asyncio.new_event_loop()
761
- asyncio.set_event_loop(loop)
762
-
763
- def progress_callback(value, desc):
764
- progress(value, desc=desc)
765
-
766
- try:
767
- # Run async generation with progress
768
- result = loop.run_until_complete(
769
- system.generate_response_async(query, progress_callback)
770
- )
771
- finally:
772
- loop.close()
773
-
774
- # Prepare response with enhanced metadata
775
- response = result["response"]
776
-
777
- # Prepare detailed metadata
778
- metadata_parts = [
779
- f"🎯 Confidence: {result['confidence']:.1%}",
780
- f"⏱️ Response: {result['response_time']}s",
781
- f"📚 Sources: {result['search_results_count']} found"
782
- ]
783
-
784
- if result.get('cached'):
785
- metadata_parts.append("💾 Cached")
786
-
787
- if result.get('sources'):
788
- metadata_parts.append(f"📖 Refs: {', '.join(result['sources'][:2])}")
789
-
790
- metadata = " | ".join(metadata_parts)
791
-
792
- # Prepare status with warnings/issues
793
- status_parts = []
794
- if result.get('safety_warnings'):
795
- status_parts.append(f"⚠️ {len(result['safety_warnings'])} warnings")
796
- if result.get('safety_issues'):
797
- status_parts.append(f"🚨 {len(result['safety_issues'])} issues")
798
- if not status_parts:
799
- status_parts.append("✅ Safe response")
800
-
801
- status = " | ".join(status_parts)
802
-
803
- return response, metadata, status
804
-
805
- except Exception as e:
806
- logger.error(f"Error processing query: {e}")
807
- error_response = f"⚠️ Error processing your query: {str(e)}\n\n🚨 For medical emergencies, seek immediate professional help."
808
- error_metadata = f"❌ Error at {datetime.now().strftime('%H:%M:%S')}"
809
- error_status = "🚨 System error occurred"
810
- return error_response, error_metadata, error_status
811
-
812
-
813
- def create_advanced_gradio_interface():
814
- """Create advanced Gradio interface with modern design and enhanced UX"""
815
-
816
- # Advanced CSS with medical theme and animations
817
- css = """
818
- @import url('https://fonts.googleapis.com/css2?family=Love+Ya+Like+A+Sister&display=swap');
819
-
820
- * {
821
- font-family: 'Love Ya Like A Sister', cursive !important;
822
- }
823
-
824
- .gradio-container {
825
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
826
- min-height: 100vh;
827
- }
828
-
829
- .main-container {
830
- background: rgba(255, 255, 255, 0.95);
831
- backdrop-filter: blur(10px);
832
- border-radius: 20px;
833
- padding: 30px;
834
- margin: 20px;
835
- box-shadow: 0 20px 40px rgba(0,0,0,0.1);
836
- border: 1px solid rgba(255,255,255,0.2);
837
- }
838
-
839
- .header-section {
840
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
841
- color: white;
842
- border-radius: 15px;
843
- padding: 25px;
844
- margin-bottom: 25px;
845
- text-align: center;
846
- box-shadow: 0 10px 30px rgba(102, 126, 234, 0.3);
847
- }
848
-
849
- .query-container {
850
- background: linear-gradient(135deg, #f8f9ff 0%, #e8f2ff 100%);
851
- border-radius: 15px;
852
- padding: 20px;
853
- margin: 15px 0;
854
- border: 2px solid #667eea;
855
- transition: all 0.3s ease;
856
- }
857
-
858
- .query-container:hover {
859
- transform: translateY(-2px);
860
- box-shadow: 0 10px 25px rgba(102, 126, 234, 0.2);
861
- }
862
-
863
- .query-input {
864
- border: none !important;
865
- background: white !important;
866
- border-radius: 12px !important;
867
- padding: 15px !important;
868
- font-size: 16px !important;
869
- box-shadow: 0 4px 15px rgba(0,0,0,0.1) !important;
870
- transition: all 0.3s ease !important;
871
- }
872
-
873
- .query-input:focus {
874
- transform: scale(1.02) !important;
875
- box-shadow: 0 8px 25px rgba(102, 126, 234, 0.3) !important;
876
- }
877
-
878
- .response-container {
879
- background: linear-gradient(135deg, #fff 0%, #f8f9ff 100%);
880
- border-radius: 15px;
881
- padding: 20px;
882
- margin: 15px 0;
883
- border: 2px solid #4CAF50;
884
- min-height: 300px;
885
- }
886
-
887
- .response-output {
888
- border: none !important;
889
- background: transparent !important;
890
- font-size: 15px !important;
891
- line-height: 1.7 !important;
892
- color: #2c3e50 !important;
893
- }
894
-
895
- .metadata-container {
896
- background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%);
897
- border-radius: 12px;
898
- padding: 15px;
899
- margin: 10px 0;
900
- border-left: 5px solid #2196F3;
901
- }
902
-
903
- .metadata-output {
904
- border: none !important;
905
- background: transparent !important;
906
- font-size: 13px !important;
907
- color: #1565c0 !important;
908
- font-weight: 500 !important;
909
- }
910
-
911
- .status-container {
912
- background: linear-gradient(135deg, #e8f5e8 0%, #c8e6c9 100%);
913
- border-radius: 12px;
914
- padding: 15px;
915
- margin: 10px 0;
916
- border-left: 5px solid #4CAF50;
917
- }
918
-
919
- .status-output {
920
- border: none !important;
921
- background: transparent !important;
922
- font-size: 13px !important;
923
- color: #2e7d32 !important;
924
- font-weight: 500 !important;
925
- }
926
-
927
- .submit-btn {
928
- background: linear-gradient(135deg, #4CAF50 0%, #45a049 100%) !important;
929
- color: white !important;
930
- border: none !important;
931
- border-radius: 12px !important;
932
- padding: 15px 30px !important;
933
- font-size: 16px !important;
934
- font-weight: 600 !important;
935
- cursor: pointer !important;
936
- transition: all 0.3s ease !important;
937
- box-shadow: 0 6px 20px rgba(76, 175, 80, 0.3) !important;
938
- }
939
-
940
- .submit-btn:hover {
941
- transform: translateY(-3px) !important;
942
- box-shadow: 0 10px 30px rgba(76, 175, 80, 0.4) !important;
943
- }
944
-
945
- .clear-btn {
946
- background: linear-gradient(135deg, #ff7043 0%, #ff5722 100%) !important;
947
- color: white !important;
948
- border: none !important;
949
- border-radius: 12px !important;
950
- padding: 15px 25px !important;
951
- font-size: 14px !important;
952
- font-weight: 500 !important;
953
- transition: all 0.3s ease !important;
954
- }
955
-
956
- .clear-btn:hover {
957
- transform: translateY(-2px) !important;
958
- box-shadow: 0 8px 20px rgba(255, 87, 34, 0.3) !important;
959
- }
960
-
961
- .emergency-notice {
962
- background: linear-gradient(135deg, #ffebee 0%, #ffcdd2 100%);
963
- border: 2px solid #f44336;
964
- border-radius: 15px;
965
- padding: 20px;
966
- margin: 20px 0;
967
- color: #c62828;
968
- font-weight: 600;
969
- animation: pulse 2s infinite;
970
- }
971
-
972
- @keyframes pulse {
973
- 0% { box-shadow: 0 0 0 0 rgba(244, 67, 54, 0.4); }
974
- 70% { box-shadow: 0 0 0 10px rgba(244, 67, 54, 0); }
975
- 100% { box-shadow: 0 0 0 0 rgba(244, 67, 54, 0); }
976
- }
977
-
978
- .gaza-context {
979
- background: linear-gradient(135deg, #e8f5e8 0%, #c8e6c9 100%);
980
- border: 2px solid #4caf50;
981
- border-radius: 15px;
982
- padding: 20px;
983
- margin: 20px 0;
984
- color: #2e7d32;
985
- font-weight: 500;
986
- }
987
-
988
- .sidebar-container {
989
- background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
990
- border-radius: 15px;
991
- padding: 20px;
992
- margin: 10px 0;
993
- border: 1px solid rgba(0,0,0,0.1);
994
- }
995
-
996
- .example-container {
997
- background: white;
998
- border-radius: 12px;
999
- padding: 20px;
1000
- margin: 15px 0;
1001
- box-shadow: 0 4px 15px rgba(0,0,0,0.1);
1002
- }
1003
-
1004
- .progress-container {
1005
- margin: 15px 0;
1006
- padding: 10px;
1007
- background: rgba(255,255,255,0.8);
1008
- border-radius: 10px;
1009
- }
1010
-
1011
- .footer-section {
1012
- background: linear-gradient(135deg, #37474f 0%, #263238 100%);
1013
- color: white;
1014
- border-radius: 15px;
1015
- padding: 20px;
1016
- margin-top: 30px;
1017
- text-align: center;
1018
- }
1019
-
1020
- /* GLOBAL TEXT FIXES */
1021
- .gradio-container,
1022
- .query-container,
1023
- .response-container,
1024
- .metadata-container,
1025
- .status-container {
1026
- color: white !important;
1027
- }
1028
-
1029
- .query-input,
1030
- .response-output,
1031
- .metadata-output,
1032
- .status-output {
1033
- color: white !important;
1034
- background-color: rgba(0, 0, 0, 0.2) !important;
1035
- }
1036
-
1037
- /* BANNER-INSPIRED PANEL BACKGROUNDS */
1038
- .query-container,
1039
- .response-container,
1040
- .metadata-container,
1041
- .status-container {
1042
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
1043
- border: 2px solid #ffffff22 !important;
1044
- border-radius: 15px !important;
1045
- box-shadow: 0 10px 30px rgba(102, 126, 234, 0.3);
1046
- }
1047
-
1048
- /* EXAMPLE SECTION BUTTON STYLING */
1049
- .example-container .example {
1050
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
1051
- color: white !important;
1052
- font-weight: 600 !important;
1053
- border-radius: 12px !important;
1054
- padding: 15px !important;
1055
- margin: 10px !important;
1056
- text-align: center !important;
1057
- box-shadow: 0 6px 20px rgba(0, 0, 0, 0.1);
1058
- transition: all 0.3s ease;
1059
- cursor: pointer;
1060
- }
1061
-
1062
- .example-container .example:hover {
1063
- transform: scale(1.03);
1064
- box-shadow: 0 10px 30px rgba(102, 126, 234, 0.4);
1065
- }
1066
-
1067
- /* MAKE HEADER + EXAMPLES MORE PROMINENT */
1068
- .header-section {
1069
- color: white !important;
1070
- text-shadow: 0px 0px 6px rgba(0,0,0,0.4);
1071
- }
1072
-
1073
- .example-container {
1074
- margin-top: -20px !important;
1075
- }
1076
- """
1077
-
1078
- with gr.Blocks(
1079
- css=css,
1080
- title="🏥 Advanced Gaza First Aid Assistant",
1081
- theme=gr.themes.Soft(
1082
- primary_hue="blue",
1083
- secondary_hue="green",
1084
- neutral_hue="slate"
1085
- )
1086
- ) as interface:
1087
-
1088
- # Header Section
1089
- with gr.Row(elem_classes=["main-container"]):
1090
- gr.HTML("""
1091
- <div class="header-section">
1092
- <h1 style="margin: 0; font-size: 2.5em; font-weight: 700;">
1093
- 🏥 Advanced Gaza First Aid Assistant
1094
- </h1>
1095
- <h2 style="margin: 10px 0 0 0; font-size: 1.2em; font-weight: 400; opacity: 0.9;">
1096
- AI-Powered Medical Guidance for Gaza Healthcare Workers
1097
- </h2>
1098
- <p style="margin: 15px 0 0 0; font-size: 1em; opacity: 0.8;">
1099
- Enhanced with 768-dimensional medical embeddings • Advanced FAISS indexing • Real-time safety validation
1100
- </p>
1101
- </div>
1102
- """)
1103
-
1104
- # Main Interface
1105
- with gr.Row(elem_classes=["main-container"]):
1106
- with gr.Column(scale=2):
1107
- # Query Input Section
1108
- with gr.Group(elem_classes=["query-container"]):
1109
- gr.Markdown("### 🩺 Medical Query Input")
1110
- query_input = gr.Textbox(
1111
- label="Describe your medical situation",
1112
- placeholder="Enter your first aid question or describe the medical emergency...",
1113
- lines=4,
1114
- elem_classes=["query-input"]
1115
- )
1116
-
1117
- with gr.Row():
1118
- submit_btn = gr.Button(
1119
- "🔍 Get Medical Guidance",
1120
- variant="primary",
1121
- elem_classes=["submit-btn"],
1122
- scale=3
1123
- )
1124
- clear_btn = gr.Button(
1125
- "🗑️ Clear",
1126
- variant="secondary",
1127
- elem_classes=["clear-btn"],
1128
- scale=1
1129
- )
1130
-
1131
- with gr.Column(scale=1):
1132
- # Sidebar with Quick Access
1133
- with gr.Group(elem_classes=["sidebar-container"]):
1134
- gr.Markdown("""
1135
- ### 🎯 Quick Access Guide
1136
-
1137
- **🚨 Emergency Priorities:**
1138
- - Severe bleeding control
1139
- - Burn treatment protocols
1140
- - Airway management
1141
- - Trauma stabilization
1142
- - Shock prevention
1143
-
1144
- **🏥 Gaza-Specific Scenarios:**
1145
- - Limited water situations
1146
- - Power outage medical care
1147
- - Supply shortage alternatives
1148
- - Mass casualty protocols
1149
- - Improvised medical tools
1150
-
1151
- **📊 System Status:**
1152
- - ✅ Enhanced embeddings active
1153
- - ✅ Advanced indexing enabled
1154
- - ✅ Safety validation online
1155
- - ✅ Gaza context aware
1156
- """)
1157
-
1158
- # Response Section
1159
- with gr.Row(elem_classes=["main-container"]):
1160
- with gr.Column():
1161
- # Main Response
1162
- with gr.Group(elem_classes=["response-container"]):
1163
- gr.Markdown("### 🩹 Medical Guidance Response")
1164
- response_output = gr.Textbox(
1165
- label="AI Medical Guidance",
1166
- lines=15,
1167
- elem_classes=["response-output"],
1168
- interactive=False,
1169
- placeholder="Your medical guidance will appear here..."
1170
- )
1171
-
1172
- # Metadata and Status
1173
- with gr.Row():
1174
- with gr.Column(scale=1):
1175
- with gr.Group(elem_classes=["metadata-container"]):
1176
- metadata_output = gr.Textbox(
1177
- label="📊 Response Metadata",
1178
- lines=2,
1179
- elem_classes=["metadata-output"],
1180
- interactive=False,
1181
- placeholder="Response metadata will appear here..."
1182
- )
1183
-
1184
- with gr.Column(scale=1):
1185
- with gr.Group(elem_classes=["status-container"]):
1186
- status_output = gr.Textbox(
1187
- label="🛡️ Safety Status",
1188
- lines=2,
1189
- elem_classes=["status-output"],
1190
- interactive=False,
1191
- placeholder="Safety validation status will appear here..."
1192
- )
1193
-
1194
- # Important Notices
1195
- with gr.Row(elem_classes=["main-container"]):
1196
- gr.HTML("""
1197
- <div class="emergency-notice">
1198
- <h3 style="margin: 0 0 10px 0;">🚨 CRITICAL EMERGENCY DISCLAIMER</h3>
1199
- <p style="margin: 0; font-size: 1.1em;">
1200
- For life-threatening emergencies, seek immediate professional medical attention.<br>
1201
- 📞 <strong>Gaza Emergency Contacts:</strong> Palestinian Red Crescent (101) | Civil Defense (102)
1202
- </p>
1203
- </div>
1204
- """)
1205
-
1206
- with gr.Row(elem_classes=["main-container"]):
1207
- gr.HTML("""
1208
- <div class="gaza-context">
1209
- <h3 style="margin: 0 0 10px 0;">📍 Gaza Context Awareness</h3>
1210
- <p style="margin: 0; font-size: 1em;">
1211
- This advanced AI system is specifically designed for Gaza's challenging conditions including
1212
- limited resources, frequent power outages, and restricted medical supply access. All guidance
1213
- considers these constraints and provides practical alternatives when standard treatments are unavailable.
1214
- </p>
1215
- </div>
1216
- """)
1217
-
1218
- # Examples Section
1219
- with gr.Row(elem_classes=["main-container"]):
1220
- with gr.Group(elem_classes=["example-container"]):
1221
- gr.Markdown("### 💡 Example Medical Scenarios")
1222
-
1223
- example_queries = [
1224
- "How to treat severe burns when clean water is extremely limited?",
1225
- "Managing gunshot wounds with only basic household supplies",
1226
- "Recognizing and treating infection in wounds without antibiotics",
1227
- "Emergency care for children during extended power outages",
1228
- "Treating compound fractures without proper medical equipment",
1229
- "Managing diabetic emergencies when insulin is unavailable",
1230
- "Stopping arterial bleeding with improvised tourniquets",
1231
- "Recognizing and treating shock in mass casualty situations",
1232
- "Airway management for unconscious patients without equipment",
1233
- "Preventing infection in surgical wounds during siege conditions"
1234
- ]
1235
-
1236
- gr.Examples(
1237
- examples=example_queries,
1238
- inputs=query_input,
1239
- label="Click any example to try it:",
1240
- examples_per_page=5
1241
- )
1242
-
1243
- # Event Handlers
1244
- submit_btn.click(
1245
- process_medical_query_with_progress,
1246
- inputs=query_input,
1247
- outputs=[response_output, metadata_output, status_output],
1248
- show_progress=True
1249
- )
1250
-
1251
- query_input.submit(
1252
- process_medical_query_with_progress,
1253
- inputs=query_input,
1254
- outputs=[response_output, metadata_output, status_output],
1255
- show_progress=True
1256
- )
1257
-
1258
- clear_btn.click(
1259
- lambda: ("", "", "", ""),
1260
- outputs=[query_input, response_output, metadata_output, status_output]
1261
- )
1262
-
1263
- # Footer
1264
- with gr.Row(elem_classes=["main-container"]):
1265
- gr.HTML("""
1266
- <div class="footer-section">
1267
- <h3 style="margin: 0 0 15px 0;">🔬 Advanced Technical Features</h3>
1268
- <div style="display: grid; grid-template-columns: repeat(auto-fit, minmax(250px, 1fr)); gap: 20px; margin-bottom: 20px;">
1269
- <div>
1270
- <strong>🧠 Enhanced AI:</strong><br>
1271
- 768-dimensional medical embeddings<br>
1272
- Advanced FAISS IVF indexing<br>
1273
- Optimized LLM quantization
1274
- </div>
1275
- <div>
1276
- <strong>🛡️ Safety Systems:</strong><br>
1277
- Real-time medical validation<br>
1278
- Contraindication detection<br>
1279
- Gaza-specific risk assessment
1280
- </div>
1281
- <div>
1282
- <strong>⚡ Performance:</strong><br>
1283
- Async processing pipeline<br>
1284
- Intelligent response caching<br>
1285
- Progressive loading indicators
1286
- </div>
1287
- </div>
1288
- <hr style="border: 1px solid rgba(255,255,255,0.2); margin: 20px 0;">
1289
- <p style="margin: 0; opacity: 0.8;">
1290
- <strong>⚕️ Medical Disclaimer:</strong> This AI assistant provides educational guidance based on established medical protocols.
1291
- It is designed to support, not replace, medical professionals. Always consult qualified healthcare providers for definitive care.
1292
- </p>
1293
- </div>
1294
- """)
1295
-
1296
- return interface
1297
-
1298
- def main():
1299
- """Enhanced main function with comprehensive error handling and system monitoring"""
1300
- logger.info("🚀 Starting Advanced Gaza First Aid Assistant")
1301
-
1302
- try:
1303
- # System initialization with detailed logging
1304
- logger.info("🔧 Loading precomputed knowledge base...")
1305
- system = initialize_enhanced_system()
1306
-
1307
- # Verify system components
1308
- logger.info("✅ Precomputed knowledge base loaded")
1309
- logger.info("✅ Medical fact checker ready")
1310
- logger.info("✅ FAISS indexing active")
1311
-
1312
- # Create and launch advanced interface
1313
- logger.info("🎨 Creating advanced Gradio interface...")
1314
- interface = create_advanced_gradio_interface()
1315
-
1316
- logger.info("🌐 Launching advanced interface...")
1317
- interface.launch(
1318
- server_name="0.0.0.0",
1319
- server_port=7860,
1320
- share=False,
1321
- max_threads=6, # Increased for better async performance
1322
- show_error=True,
1323
- quiet=False,
1324
- favicon_path=None,
1325
- ssl_verify=False
1326
- )
1327
-
1328
- except Exception as e:
1329
- logger.error(f"❌ Failed to start Advanced Gaza First Aid Assistant: {e}")
1330
- print(f"\n🚨 STARTUP ERROR: {e}")
1331
- print("\n🔧 Troubleshooting Steps:")
1332
- print("1. Check if all dependencies are installed: pip install -r requirements.txt")
1333
- print("2. Ensure sufficient memory is available (minimum 4GB RAM recommended)")
1334
- print("3. Verify data directory exists and contains medical documents")
1335
- print("4. Check system logs for detailed error information")
1336
- print("\n📞 For technical support, check the application logs above.")
1337
- sys.exit(1)
1338
-
1339
- if __name__ == "__main__":
1340
- main()
1341
-