quizzz / app.py
sanarawal7
again
5639b64
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Replace with your Hugging Face API token
hf_api_token = "YOUR_API_TOKEN"
def generate_questions(file_content):
# Load MolMo model and tokenizer with API token
model_name = "allenai/Molmo-7B-D-0924"
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_api_token)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=hf_api_token)
# Preprocess file content (assuming it's already text)
try:
text = file_content.decode("utf-8")
except Exception as e:
return f"Error decoding file: {str(e)}", ""
# Create a prompt
prompt = f"Here is a text: {text}. Please generate a set of questions based on the content."
# Generate questions
inputs = tokenizer(prompt, return_tensors="pt")
try:
outputs = model.generate(**inputs)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
except Exception as e:
return f"Error processing file: {str(e)}", ""
# Extract questions and options (basic implementation, customize as needed)
questions = []
options = []
for answer in generated_text.split("."):
if answer.startswith("Q"):
questions.append(answer.strip())
else:
options.append(answer.strip())
if not questions:
return "No questions found in the uploaded text.", ""
return questions, options
# Create Gradio interface
question_box = gr.Textbox(label="Questions")
option_box = gr.Textbox(label="Options")
iface = gr.Interface(
fn=generate_questions,
inputs=gr.File(label="Upload File"),
outputs=[question_box, option_box],
title="Question and Option Generator"
)
iface.launch()