Jakaria commited on
Commit
5d796ae
Β·
1 Parent(s): d0896fa

Add Bangla model API

Browse files
Files changed (2) hide show
  1. app.py +5 -6
  2. rag.py +13 -7
app.py CHANGED
@@ -1,9 +1,10 @@
1
  import os
2
  import gradio as gr
3
- from config import DATA_DIR, UPLOADED_PDF_PATH, VECTOR_DIR, GROQ_API_KEY
4
  from ingestion import ingest_pdf
5
  from rag import answer, reset_memory
6
 
 
7
  os.makedirs(DATA_DIR, exist_ok=True)
8
  os.makedirs(VECTOR_DIR, exist_ok=True)
9
 
@@ -13,10 +14,8 @@ def do_ingest(pdf_file):
13
  if pdf_file is None:
14
  return "Please upload a PDF."
15
 
16
- with open(UPLOADED_PDF_PATH, "wb") as f:
17
- f.write(pdf_file.read())
18
-
19
- msg = ingest_pdf(UPLOADED_PDF_PATH)
20
  reset_memory()
21
  return f"βœ… {msg}"
22
 
@@ -32,7 +31,7 @@ with gr.Blocks() as demo:
32
  gr.Markdown("## πŸ“š Chat with Your PDF β€” FAISS + Groq + Memory (LangChain)")
33
 
34
  with gr.Row():
35
- pdf = gr.File(label="Upload PDF", file_types=[".pdf"], type="file")
36
  ingest_btn = gr.Button("Ingest PDF")
37
 
38
  status = gr.Textbox(label="Status", interactive=False)
 
1
  import os
2
  import gradio as gr
3
+ from config import DATA_DIR, VECTOR_DIR, GROQ_API_KEY
4
  from ingestion import ingest_pdf
5
  from rag import answer, reset_memory
6
 
7
+ # ---- Runtime folders ----
8
  os.makedirs(DATA_DIR, exist_ok=True)
9
  os.makedirs(VECTOR_DIR, exist_ok=True)
10
 
 
14
  if pdf_file is None:
15
  return "Please upload a PDF."
16
 
17
+ # pdf_file is a path string from Gradio
18
+ msg = ingest_pdf(pdf_file)
 
 
19
  reset_memory()
20
  return f"βœ… {msg}"
21
 
 
31
  gr.Markdown("## πŸ“š Chat with Your PDF β€” FAISS + Groq + Memory (LangChain)")
32
 
33
  with gr.Row():
34
+ pdf = gr.File(label="Upload PDF", file_types=[".pdf"], type="filepath")
35
  ingest_btn = gr.Button("Ingest PDF")
36
 
37
  status = gr.Textbox(label="Status", interactive=False)
rag.py CHANGED
@@ -2,14 +2,15 @@ from langchain_community.vectorstores import FAISS
2
  from langchain_huggingface import HuggingFaceEmbeddings
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain_groq import ChatGroq
5
- from langchain.chains import conversational_retrieval
6
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
7
  from config import VECTOR_DIR, EMBED_MODEL, GROQ_API_KEY, GROQ_MODEL
8
 
9
- _memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
 
10
 
11
  def reset_memory():
12
- _memory.clear()
13
 
14
  def build_chain():
15
  embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
@@ -18,15 +19,20 @@ def build_chain():
18
 
19
  llm = ChatGroq(model=GROQ_MODEL, api_key=GROQ_API_KEY, temperature=0.1)
20
 
 
21
  chat_prompt = ChatPromptTemplate.from_messages([
22
- SystemMessagePromptTemplate.from_template("You are a helpful assistant. Answer using the context."),
23
- HumanMessagePromptTemplate.from_template("Context:\n{context}\n\nQuestion:\n{question}\nAnswer clearly and concisely:")
 
 
 
 
24
  ])
25
 
26
- chain = conversational_retrieval.from_llm(
27
  llm=llm,
28
  retriever=retriever,
29
- memory=_memory,
30
  combine_docs_chain_kwargs={"prompt": chat_prompt}
31
  )
32
  return chain
 
2
  from langchain_huggingface import HuggingFaceEmbeddings
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain_groq import ChatGroq
5
+ from langchain.chains import ConversationalRetrievalChain
6
  from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
7
  from config import VECTOR_DIR, EMBED_MODEL, GROQ_API_KEY, GROQ_MODEL
8
 
9
+ # ---- Memory ----
10
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
11
 
12
  def reset_memory():
13
+ memory.clear()
14
 
15
  def build_chain():
16
  embeddings = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
 
19
 
20
  llm = ChatGroq(model=GROQ_MODEL, api_key=GROQ_API_KEY, temperature=0.1)
21
 
22
+ # Simple combined prompt
23
  chat_prompt = ChatPromptTemplate.from_messages([
24
+ SystemMessagePromptTemplate.from_template(
25
+ "You are a helpful assistant. Use the context to answer user questions."
26
+ ),
27
+ HumanMessagePromptTemplate.from_template(
28
+ "Context:\n{context}\n\nQuestion:\n{question}\nAnswer clearly and concisely:"
29
+ )
30
  ])
31
 
32
+ chain = ConversationalRetrievalChain.from_llm(
33
  llm=llm,
34
  retriever=retriever,
35
+ memory=memory,
36
  combine_docs_chain_kwargs={"prompt": chat_prompt}
37
  )
38
  return chain