File size: 2,716 Bytes
b068b13
 
25aecaf
 
 
b068b13
0905dfa
 
 
e95e88b
 
 
b068b13
 
0905dfa
b068b13
0905dfa
25aecaf
 
 
 
0905dfa
b068b13
 
0905dfa
 
 
 
 
 
 
b068b13
 
 
 
 
 
0905dfa
953dc61
b068b13
0905dfa
 
7876697
 
 
0905dfa
 
 
 
 
b068b13
0905dfa
3f394e4
 
 
 
0905dfa
3f394e4
 
 
 
b068b13
7876697
 
0905dfa
b068b13
9a87d6c
4c771b4
0905dfa
b068b13
 
25aecaf
 
b068b13
 
 
 
 
 
 
0905dfa
dd9bc92
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from fastapi import FastAPI, Request, Form
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates

import os
from tqdm import tqdm

# LangChain imports
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import FAISS
from langchain_community.chat_models import ChatOpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain_community.embeddings import HuggingFaceEmbeddings

# FastAPI setup
app = FastAPI()
app.mount("/static", StaticFiles(directory="."), name="static")
templates = Jinja2Templates(directory=".")

# Cargar e indexar los documentos PDF
def cargar_docs():
    all_docs = []
    for pdf_file in [
        "1 مساعد ممارس ملف المحور.pdf",
        "2 مساعد ممارس ملف المحور.pdf",
        "3 مساعد ممارس املف المحور.pdf",
        "4 مساعد ممارس ملف المحور.pdf",
        "ملف المحور 5 مساعد ممارس.pdf"
    ]:
        loader = PyPDFLoader(pdf_file)
        all_docs.extend(loader.load())
    return all_docs

docs = cargar_docs()

# Dividir en chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
split_docs = text_splitter.split_documents(docs)

# Debug: mostrar primeros chunks
for i, doc in enumerate(split_docs[:5]):
    print(f"Chunk {i+1}:\n{doc.page_content[:300]}\n{'-'*40}")

# ===================
# Embeddings árabes
# ===================
model_name = "asafaya/bert-base-arabic"
embeddings = HuggingFaceEmbeddings(model_name=model_name)

# Crear vectorstore
batch_size = 100
vectorstore = None

for i in tqdm(range(0, len(split_docs), batch_size)):
    batch = split_docs[i:i + batch_size]
    if vectorstore is None:
        vectorstore = FAISS.from_documents(batch, embeddings)
    else:
        vectorstore.add_documents(batch)

print(f"Se han indexado {len(split_docs)} chunks.")

# Cadena de respuesta
qa_chain = RetrievalQA.from_chain_type(
    llm=ChatOpenAI(temperature=0.2, model_name="gpt-4o-mini"),
    chain_type="stuff",
    retriever=vectorstore.as_retriever(search_kwargs={"k": 5})
)

@app.get("/", response_class=HTMLResponse)
async def read_root(request: Request):
    return templates.TemplateResponse("index.html", {"request": request})

@app.post("/preguntar")
async def preguntar(request: Request, pregunta: str = Form(...)):
    respuesta = qa_chain.run(pregunta)
    return JSONResponse({"respuesta": respuesta})

# Para modo local
import uvicorn

if __name__ == "__main__":
    uvicorn.run("app:app", host="0.0.0.0", port=7860)