File size: 1,806 Bytes
d2db563
4c45bb2
5639b64
4c45bb2
 
5639b64
4c45bb2
 
a517614
5639b64
 
 
 
 
4c45bb2
a517614
 
 
 
4c45bb2
5639b64
 
 
 
 
 
 
 
 
 
4c45bb2
5639b64
4c45bb2
 
5639b64
4c45bb2
 
 
 
 
5639b64
 
 
4c45bb2
 
5639b64
4c45bb2
a517614
4c45bb2
 
 
 
 
 
 
 
 
5639b64
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM

# Replace with your Hugging Face API token
hf_api_token = "YOUR_API_TOKEN"


def generate_questions(file_content):
    # Load MolMo model and tokenizer with API token
    model_name = "allenai/Molmo-7B-D-0924"
    tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_api_token)
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name, use_auth_token=hf_api_token)

    # Preprocess file content (assuming it's already text)
    try:
        text = file_content.decode("utf-8")
    except Exception as e:
        return f"Error decoding file: {str(e)}", ""

    # Create a prompt
    prompt = f"Here is a text: {text}. Please generate a set of questions based on the content."

    # Generate questions
    inputs = tokenizer(prompt, return_tensors="pt")
    try:
        outputs = model.generate(**inputs)
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    except Exception as e:
        return f"Error processing file: {str(e)}", ""

    # Extract questions and options (basic implementation, customize as needed)
    questions = []
    options = []
    for answer in generated_text.split("."):
        if answer.startswith("Q"):
            questions.append(answer.strip())
        else:
            options.append(answer.strip())

    if not questions:
        return "No questions found in the uploaded text.", ""

    return questions, options


# Create Gradio interface
question_box = gr.Textbox(label="Questions")
option_box = gr.Textbox(label="Options")

iface = gr.Interface(
    fn=generate_questions,
    inputs=gr.File(label="Upload File"),
    outputs=[question_box, option_box],
    title="Question and Option Generator"
)

iface.launch()