Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -530,7 +530,7 @@ RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
|
|
530 |
with torch.no_grad():
|
531 |
outputs = self.llm.generate(
|
532 |
**inputs,
|
533 |
-
max_new_tokens=
|
534 |
temperature=0.3,
|
535 |
pad_token_id=self.tokenizer.eos_token_id,
|
536 |
do_sample=True,
|
@@ -553,7 +553,7 @@ RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
|
|
553 |
if line and line not in unique_lines and len(line) > 10: # Filter out very short lines
|
554 |
unique_lines.append(line)
|
555 |
|
556 |
-
return '\n'.join(unique_lines
|
557 |
|
558 |
except Exception as e:
|
559 |
logger.error(f"❌ Error in LLM generate(): {e}")
|
@@ -574,7 +574,7 @@ RESPONSE (provide practical, Gaza-appropriate medical guidance):"""
|
|
574 |
if condition in query_lower:
|
575 |
return f"{guidance}\n\nContext from medical sources:\n{context[:200]}..."
|
576 |
|
577 |
-
return f"Medical guidance for: {query}\n\nGeneral advice: Prioritize safety, seek professional help when available, consider resource limitations in Gaza.\n\nRelevant information:\n{context[:
|
578 |
|
579 |
def _prepare_final_response(
|
580 |
self,
|
|
|
530 |
with torch.no_grad():
|
531 |
outputs = self.llm.generate(
|
532 |
**inputs,
|
533 |
+
max_new_tokens=600,
|
534 |
temperature=0.3,
|
535 |
pad_token_id=self.tokenizer.eos_token_id,
|
536 |
do_sample=True,
|
|
|
553 |
if line and line not in unique_lines and len(line) > 10: # Filter out very short lines
|
554 |
unique_lines.append(line)
|
555 |
|
556 |
+
return '\n'.join(unique_lines) # Limit to 10 lines
|
557 |
|
558 |
except Exception as e:
|
559 |
logger.error(f"❌ Error in LLM generate(): {e}")
|
|
|
574 |
if condition in query_lower:
|
575 |
return f"{guidance}\n\nContext from medical sources:\n{context[:200]}..."
|
576 |
|
577 |
+
return f"Medical guidance for: {query}\n\nGeneral advice: Prioritize safety, seek professional help when available, consider resource limitations in Gaza.\n\nRelevant information:\n{context[:600]}..."
|
578 |
|
579 |
def _prepare_final_response(
|
580 |
self,
|