Spaces:
Running
Running
Commit
·
4a82770
1
Parent(s):
c29409a
Upd logger memory
Browse files
memory.py
CHANGED
|
@@ -6,10 +6,12 @@ from collections import defaultdict, deque
|
|
| 6 |
from typing import List
|
| 7 |
from sentence_transformers import SentenceTransformer
|
| 8 |
from google import genai # must be configured in app.py and imported globally
|
|
|
|
| 9 |
|
| 10 |
_LLM = "gemini-2.5-flash-lite-preview-06-17" # Small model for NLP simple tasks
|
| 11 |
# Load embedding model
|
| 12 |
embedding_model = SentenceTransformer("/app/model_cache", device="cpu").half()
|
|
|
|
| 13 |
|
| 14 |
class MemoryManager:
|
| 15 |
def __init__(self, max_users=1000, history_per_user=10):
|
|
@@ -24,7 +26,7 @@ class MemoryManager:
|
|
| 24 |
oldest = self.user_queue.popleft()
|
| 25 |
self._drop_user(oldest)
|
| 26 |
self.user_queue.append(user_id)
|
| 27 |
-
|
| 28 |
self.text_cache[user_id].append((query.strip(), response.strip()))
|
| 29 |
# Use Gemini to summarize and chunk smartly
|
| 30 |
chunks = self.chunk_response(response, lang)
|
|
@@ -90,6 +92,7 @@ class MemoryManager:
|
|
| 90 |
generation_config={"temperature": 0.4}
|
| 91 |
)
|
| 92 |
output = result.text.strip()
|
|
|
|
| 93 |
return [chunk.strip() for chunk in output.split('---') if chunk.strip()]
|
| 94 |
except Exception as e:
|
| 95 |
print(f"❌ Gemini chunking failed: {e}")
|
|
|
|
| 6 |
from typing import List
|
| 7 |
from sentence_transformers import SentenceTransformer
|
| 8 |
from google import genai # must be configured in app.py and imported globally
|
| 9 |
+
import logging
|
| 10 |
|
| 11 |
_LLM = "gemini-2.5-flash-lite-preview-06-17" # Small model for NLP simple tasks
|
| 12 |
# Load embedding model
|
| 13 |
embedding_model = SentenceTransformer("/app/model_cache", device="cpu").half()
|
| 14 |
+
logger = logging.getLogger("medical-chatbot")
|
| 15 |
|
| 16 |
class MemoryManager:
|
| 17 |
def __init__(self, max_users=1000, history_per_user=10):
|
|
|
|
| 26 |
oldest = self.user_queue.popleft()
|
| 27 |
self._drop_user(oldest)
|
| 28 |
self.user_queue.append(user_id)
|
| 29 |
+
# Normalize
|
| 30 |
self.text_cache[user_id].append((query.strip(), response.strip()))
|
| 31 |
# Use Gemini to summarize and chunk smartly
|
| 32 |
chunks = self.chunk_response(response, lang)
|
|
|
|
| 92 |
generation_config={"temperature": 0.4}
|
| 93 |
)
|
| 94 |
output = result.text.strip()
|
| 95 |
+
logger.info(f"Reasoned RAG result: {output}")
|
| 96 |
return [chunk.strip() for chunk in output.split('---') if chunk.strip()]
|
| 97 |
except Exception as e:
|
| 98 |
print(f"❌ Gemini chunking failed: {e}")
|