rivapereira123 commited on
Commit
67b8dd4
·
verified ·
1 Parent(s): 53082ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -710,24 +710,18 @@ Provide clear, actionable advice while emphasizing the need for professional med
710
 
711
 
712
  def _generate_response(self, query: str, context: str) -> str:
713
- """Enhanced response generation using model.generate() to avoid DynamicCache errors"""
714
- if self.llm is None or self.tokenizer is None:
715
- return self._generate_fallback_response(query, context)
716
-
717
- prompt = f"""{self.system_prompt}
718
-
719
- MEDICAL KNOWLEDGE CONTEXT:
720
- {context}
721
-
722
- PATIENT QUESTION: {query}
723
-
724
- RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
725
-
726
- try:
727
- # Tokenize input and move to correct device
728
- inputs = self.tokenizer(prompt, return_tensors="pt").to(self.llm.device)
729
 
730
- # Generate output
731
  outputs = self.llm.generate(
732
  **inputs,
733
  max_new_tokens=800,
 
710
 
711
 
712
  def _generate_response(self, query: str, context: str) -> str:
713
+ """Enhanced response generation using model.generate() to avoid DynamicCache errors"""
714
+ if self.llm is None or self.tokenizer is None:
715
+ return self._generate_fallback_response(query, context)
716
+ prompt = f"""{self.system_prompt}
717
+ MEDICAL KNOWLEDGE CONTEXT:
718
+ {context}
719
+ PATIENT QUESTION: {query}
720
+ RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
721
+
722
+ try:
723
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.llm.device)
 
 
 
 
 
724
 
 
725
  outputs = self.llm.generate(
726
  **inputs,
727
  max_new_tokens=800,