DL_Bot / app.py
ohalkhateeb's picture
Create app.py
3d77d1e verified
raw
history blame
2.37 kB
import gradio as gr
import faiss
import numpy as np
import pickle
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForCausalLM
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable not set. Please configure it in Space settings.")
# Load precomputed chunks and FAISS index
with open("chunks.pkl", "rb") as f:
chunks = pickle.load(f)
index = faiss.read_index("index.faiss")
# Load embedding model (same as used in preprocessing)
embedding_model = SentenceTransformer("sentence-transformers/paraphrase-multilingual-mpnet-base-v2")
# Load Jais model and tokenizer
model_name = "aubmindlab/aragpt2-base"
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
# RAG function to retrieve and generate a response
def get_response(query, k=3):
query_embedding = embedding_model.encode([query])
distances, indices = index.search(np.array(query_embedding), k)
retrieved_chunks = [chunks[i] for i in indices[0]]
context = " ".join(retrieved_chunks)
prompt = f"Based on the following documents: {context}, answer the question: {query}"
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
outputs = model.generate(
**inputs,
max_new_tokens=200,
do_sample=True,
temperature=0.7,
top_p=0.9
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split(query)[-1].strip()
# Gradio interface
with gr.Blocks(title="Dubai Legislation Chatbot") as demo:
gr.Markdown("# Dubai Legislation Chatbot\nAsk any question about Dubai legislation")
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Type your question here...")
clear = gr.Button("Clear")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
user_message = history[-1][0]
bot_message = get_response(user_message)
history[-1][1] = bot_message
return history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch()