|
|
|
|
|
import os |
|
|
import httpx |
|
|
from openai import OpenAI |
|
|
from .embeddings import EmbeddingManager |
|
|
from .store import VectorStore |
|
|
|
|
|
|
|
|
class RAGApp: |
|
|
def __init__(self): |
|
|
self.embedder = None |
|
|
self.vectorstore = None |
|
|
self.client = None |
|
|
|
|
|
try: |
|
|
|
|
|
self.embedder = EmbeddingManager() |
|
|
self.vectorstore = VectorStore() |
|
|
|
|
|
|
|
|
|
|
|
api_key = os.getenv("HF_TOKEN") |
|
|
if not api_key: |
|
|
raise ValueError("HF_TOKEN not found in environment variables") |
|
|
|
|
|
|
|
|
self.client = OpenAI( |
|
|
base_url="https://router.huggingface.co/v1", |
|
|
api_key=api_key, |
|
|
http_client=httpx.Client(timeout=60.0), |
|
|
) |
|
|
|
|
|
print("β
RAGApp initialized successfully with Hugging Face router.") |
|
|
|
|
|
except Exception as e: |
|
|
print("β RAGApp init error:", e) |
|
|
self.client = None |
|
|
|
|
|
|
|
|
def add_notes(self, text): |
|
|
chunks = [text[i:i + 1000] for i in range(0, len(text), 800)] |
|
|
embeddings = self.embedder.generate_embeddings(chunks) |
|
|
self.vectorstore.add_documents(chunks, embeddings) |
|
|
return len(chunks) |
|
|
|
|
|
|
|
|
def ask(self, query): |
|
|
try: |
|
|
if not self.client: |
|
|
return "Error: API client not initialized." |
|
|
|
|
|
|
|
|
q_embed = self.embedder.generate_embeddings([query])[0] |
|
|
|
|
|
|
|
|
docs = self.vectorstore.retrieve_similar_docs(q_embed, top_k=3) |
|
|
context = "\n\n".join(docs) if docs else "No context found in notes." |
|
|
|
|
|
|
|
|
messages = [ |
|
|
{ |
|
|
"role": "system", |
|
|
"content": ( |
|
|
"You are a world-class engineering tutor specializing in Electronics, Embedded Systems, and Programming.\n" |
|
|
"Your responses must be clear, technically accurate, and engaging.\n\n" |
|
|
"### Behavior:\n" |
|
|
"1. If the question is conceptual β explain with clarity and real-world relevance.\n" |
|
|
"2. If it involves code β analyze, correct, and explain fixes.\n" |
|
|
"3. If hardware-related β explain theory + circuit/signal behavior.\n" |
|
|
"4. If theory-based from uploaded notes β connect with practical examples.\n\n" |
|
|
"### Output Style:\n" |
|
|
"- Use Markdown.\n" |
|
|
"- Highlight key terms with bold text.\n" |
|
|
"- Use emojis and structured headings for readability.\n" |
|
|
"- Avoid phrases like 'as an AI model'." |
|
|
), |
|
|
}, |
|
|
{ |
|
|
"role": "user", |
|
|
"content": f"Context:\n{context}\n\nQuestion: {query}\nAnswer clearly and in detail below:", |
|
|
}, |
|
|
] |
|
|
|
|
|
|
|
|
completion = self.client.chat.completions.create( |
|
|
model="openai/gpt-oss-20b", |
|
|
messages=messages, |
|
|
temperature=0.4, |
|
|
max_tokens=800, |
|
|
) |
|
|
|
|
|
|
|
|
return completion.choices[0].message.content |
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
print("β Error in ask():", e) |
|
|
return f"Error: {e}" |
|
|
|
|
|
|