File size: 3,006 Bytes
c30f47d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d77d1e
 
065f6ca
a311f5b
 
 
d5efa20
dbd7b96
065f6ca
 
 
42da822
065f6ca
dbd7b96
065f6ca
42da822
 
dbd7b96
3d77d1e
dbd7b96
 
42da822
3d77d1e
 
dbd7b96
 
3d77d1e
 
065f6ca
42da822
 
065f6ca
dbd7b96
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import os
import gradio as gr
import faiss
import numpy as np
import pickle
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForCausalLM


HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
    raise ValueError("HF_TOKEN environment variable not set. Please configure it in Space settings.")


# Load precomputed chunks and FAISS index
with open("chunks.pkl", "rb") as f:
    chunks = pickle.load(f)
index = faiss.read_index("index.faiss")

# Load embedding model (same as used in preprocessing)
embedding_model = SentenceTransformer("sentence-transformers/paraphrase-multilingual-mpnet-base-v2")


# Load Jais model and tokenizer
model_name = "aubmindlab/aragpt2-base"
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_name, token=HF_TOKEN, trust_remote_code=True)


# RAG function to retrieve and generate a response
def get_response(query, k=3):
    query_embedding = embedding_model.encode([query])
    distances, indices = index.search(np.array(query_embedding), k)
    retrieved_chunks = [chunks[i] for i in indices[0]]
    context = " ".join(retrieved_chunks)
    prompt = f"Based on the following documents: {context}, answer the question: {query}"
    inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
    outputs = model.generate(
        **inputs,
        max_new_tokens=200,
        do_sample=True,
        temperature=0.7,
        top_p=0.9
    )
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response.split(query)[-1].strip()

# Gradio interface
import gradio as gr

# Print current Gradio version
gradio_version = gr.__version__
print(f"Using Gradio version: {gradio_version}")


# Load custom CSS from file
css_path = "custom.css"
with open(css_path, "r", encoding="utf-8") as f:
    custom_css = f.read()

with gr.Blocks(title="المتحدث الآلي للتشريعات المحلية لإمارة دبي", css=custom_css) as demo:
    gr.Markdown("# فريق الذكاء الاصطناعي\nاسأل أي سؤال حول تشريعات دبي - نسخة تجريبية (تصميم وتنفيذ م. أسامة الخطيب)", elem_id="title")
    chatbot = gr.Chatbot(elem_id="chatbot", type="messages")
    msg = gr.Textbox(placeholder="اكتب سؤالك هنا...", rtl=True, elem_id="input-box")
    clear = gr.Button("مسح", elem_id="clear-btn")

    def user(user_message, history):
        history = history or []
        history.append({"role": "user", "content": user_message})
        return "", history

    def bot(history):
        user_message = history[-1]["content"]
        history.append({"role": "assistant", "content": get_response(user_message)})
        return history

    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
    clear.click(lambda: [], None, chatbot, queue=False)

demo.launch(share=True)