Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import faiss | |
import numpy as np | |
import pickle | |
from sentence_transformers import SentenceTransformer | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
if not HF_TOKEN: | |
raise ValueError("HF_TOKEN environment variable not set. Please configure it in Space settings.") | |
# Load precomputed chunks and FAISS index | |
with open("chunks.pkl", "rb") as f: | |
chunks = pickle.load(f) | |
index = faiss.read_index("index.faiss") | |
# Load embedding model (same as used in preprocessing) | |
embedding_model = SentenceTransformer("sentence-transformers/paraphrase-multilingual-mpnet-base-v2") | |
# Load Jais model and tokenizer | |
model_name = "aubmindlab/aragpt2-base" | |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True) | |
model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True) | |
# RAG function to retrieve and generate a response | |
def get_response(query, k=3): | |
query_embedding = embedding_model.encode([query]) | |
distances, indices = index.search(np.array(query_embedding), k) | |
retrieved_chunks = [chunks[i] for i in indices[0]] | |
context = " ".join(retrieved_chunks) | |
prompt = f"Based on the following documents: {context}, answer the question: {query}" | |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=200, | |
do_sample=True, | |
temperature=0.7, | |
top_p=0.9 | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response.split(query)[-1].strip() | |
# Gradio interface | |
import gradio as gr | |
# Print current Gradio version | |
gradio_version = gr.__version__ | |
print(f"Using Gradio version: {gradio_version}") | |
# Load custom CSS from file | |
css_path = "custom.css" | |
with open(css_path, "r", encoding="utf-8") as f: | |
custom_css = f.read() | |
with gr.Blocks(title="المتحدث الآلي للتشريعات المحلية لإمارة دبي", css=custom_css) as demo: | |
gr.Markdown("# فريق الذكاء الاصطناعي\nاسأل أي سؤال حول تشريعات دبي - نسخة تجريبية (تصميم وتنفيذ م. أسامة الخطيب)", elem_id="title") | |
chatbot = gr.Chatbot(elem_id="chatbot", type="messages") | |
msg = gr.Textbox(placeholder="اكتب سؤالك هنا...", rtl=True, elem_id="input-box") | |
clear = gr.Button("مسح", elem_id="clear-btn") | |
def user(user_message, history): | |
history = history or [] | |
history.append({"role": "user", "content": user_message}) | |
return "", history | |
def bot(history): | |
user_message = history[-1]["content"] | |
history.append({"role": "assistant", "content": get_response(user_message)}) | |
return history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot) | |
clear.click(lambda: [], None, chatbot, queue=False) | |
demo.launch(share=True) | |