Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, T5ForConditionalGeneration | |
# Load FLAN-T5-base model and tokenizer for Arabic and ESL tutoring | |
model_name = "google/flan-t5-large" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = T5ForConditionalGeneration.from_pretrained(model_name) | |
# Set up the Hugging Face pipeline for text-to-text generation | |
model_pipeline = pipeline( | |
"text2text-generation", # Using the correct task for FLAN-T5 | |
model=model, | |
tokenizer=tokenizer, | |
device=-1 # Ensure it runs on CPU (adjust if using GPU) | |
) | |
# Streamlit app UI | |
st.title("AI Arabic and ESL Tutor") | |
st.write("Ask me a question in English or Arabic, and I will help you.") | |
# Sidebar for user to control model generation parameters | |
st.sidebar.title("Model Parameters") | |
temperature = st.sidebar.slider("Temperature", 0.1, 1.5, 1.0, 0.1) # Default 1.0 | |
top_p = st.sidebar.slider("Top-p (Nucleus Sampling)", 0.0, 1.0, 0.9, 0.05) # Default 0.9 | |
top_k = st.sidebar.slider("Top-k", 0, 100, 50, 1) # Default 50 | |
do_sample = st.sidebar.checkbox("Enable Random Sampling", value=True) # Enable sampling | |
# Input field for the student | |
student_question = st.text_input("Ask your question in English or Arabic!") | |
# Generate and display response using the FLAN-T5 model | |
if student_question: | |
# Adjust prompt to encourage student-friendly responses | |
prompt = f"Q: {student_question}\nA: Explain it simply to a young student in no more than 3 sentences." | |
# Call the pipeline with adjusted parameters | |
response = model_pipeline( | |
prompt, | |
max_length=75, # Adjust this based on desired response length | |
temperature=temperature, # Control randomness | |
top_p=top_p, # Nucleus sampling | |
top_k=top_k, # Top-k sampling | |
do_sample=do_sample # Enable or disable sampling | |
) | |
st.write("Tutor's Answer:", response[0]['generated_text']) | |