File size: 1,926 Bytes
8f92739
0807349
 
 
c206fe6
0807349
 
 
 
 
 
 
 
 
8f92739
 
 
0807349
 
8f92739
 
 
 
 
 
0807349
8f92739
 
0807349
8f92739
0807349
8f92739
0807349
18bb91b
0807349
 
 
 
dd708d1
0807349
 
 
 
8f92739
0807349
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import streamlit as st
from transformers import pipeline, AutoTokenizer, T5ForConditionalGeneration

# Load FLAN-T5-base model and tokenizer for Arabic and ESL tutoring
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)

# Set up the Hugging Face pipeline for text-to-text generation
model_pipeline = pipeline(
    "text2text-generation",  # Using the correct task for FLAN-T5
    model=model,
    tokenizer=tokenizer,
    device=-1  # Ensure it runs on CPU (adjust if using GPU)
)

# Streamlit app UI
st.title("AI Arabic and ESL Tutor")
st.write("Ask me a question in English or Arabic, and I will help you.")

# Sidebar for user to control model generation parameters
st.sidebar.title("Model Parameters")
temperature = st.sidebar.slider("Temperature", 0.1, 1.5, 1.0, 0.1)  # Default 1.0
top_p = st.sidebar.slider("Top-p (Nucleus Sampling)", 0.0, 1.0, 0.9, 0.05)  # Default 0.9
top_k = st.sidebar.slider("Top-k", 0, 100, 50, 1)  # Default 50
do_sample = st.sidebar.checkbox("Enable Random Sampling", value=True)  # Enable sampling

# Input field for the student
student_question = st.text_input("Ask your question in English or Arabic!")

# Generate and display response using the FLAN-T5 model
if student_question:
    # Adjust prompt to encourage student-friendly responses
    prompt = f"Q: {student_question}\nA: Explain it simply to a young student in no more than 3 sentences."
    
    # Call the pipeline with adjusted parameters
    response = model_pipeline(
        prompt,
        max_length=75,  # Adjust this based on desired response length
        temperature=temperature,  # Control randomness
        top_p=top_p,  # Nucleus sampling
        top_k=top_k,  # Top-k sampling
        do_sample=do_sample  # Enable or disable sampling
    )
    
    st.write("Tutor's Answer:", response[0]['generated_text'])