sanarawal7 commited on
Commit
4c45bb2
·
1 Parent(s): 041a38b
Files changed (1) hide show
  1. app.py +43 -101
app.py CHANGED
@@ -1,102 +1,44 @@
1
- import os
2
- import requests
3
  import gradio as gr
4
- from dotenv import load_dotenv
5
-
6
- # Load environment variables from .env file
7
- load_dotenv()
8
-
9
- # Get the Hugging Face API key from environment variables
10
- hf_api_key = os.getenv("HF_API_KEY")
11
-
12
- # Ensure the API key is loaded correctly
13
- if hf_api_key is None:
14
- raise ValueError("Hugging Face API key not found. Please set it in the .env file.")
15
-
16
- # Hugging Face Inference API URL for the model
17
- api_url = "https://api-inference.huggingface.co/models/allenai/Molmo-7B-D-0924"
18
-
19
- # Function to generate questions and options using the Hugging Face API
20
- def generate_question(text):
21
- headers = {
22
- "Authorization": f"Bearer {hf_api_key}",
23
- "Content-Type": "application/json",
24
- }
25
-
26
- # Prompt to generate a question and options
27
- prompt = f"Generate a multiple-choice quiz question based on the following content: {text}"
28
-
29
- # Payload for the request
30
- data = {
31
- "inputs": prompt,
32
- "parameters": {"max_length": 100, "do_sample": False},
33
- }
34
-
35
- # Make the request to the Hugging Face API
36
- response = requests.post(api_url, headers=headers, json=data)
37
-
38
- if response.status_code == 200:
39
- result = response.json()
40
-
41
- # Here we assume the model returns a structured result with question, options, and correct answer
42
- return {
43
- "question": result[0]["question"],
44
- "options": result[0]["options"],
45
- "correct_answer": result[0]["correct_answer"]
46
- }
47
- else:
48
- return f"Error: {response.status_code} - {response.text}"
49
-
50
- # Function to handle file input, generate the question, and display options
51
- def handle_file(file):
52
- # Read the content of the file
53
- file_text = file.decode("utf-8") # Assuming it's a text file
54
- # Generate the question with options
55
- result = generate_question(file_text)
56
-
57
- # Return the question and options to be displayed as buttons
58
- if isinstance(result, dict):
59
- return result["question"], result["options"]
60
- else:
61
- return result, [] # In case of error, no options are returned
62
-
63
- # Function to check if the user's answer is correct
64
- def check_answer(selected_option, correct_answer):
65
- if selected_option == correct_answer:
66
- return "Correct!"
67
- else:
68
- return "Incorrect."
69
-
70
- # Gradio interface for handling the file upload and generating the quiz
71
- with gr.Blocks() as interface:
72
- with gr.Row():
73
- # File uploader
74
- file_input = gr.File(label="Upload a text file")
75
-
76
- # Question display
77
- question_display = gr.Textbox(label="Question", interactive=False)
78
-
79
- # Option buttons (initialized empty)
80
- options = [gr.Button("", visible=False) for _ in range(4)]
81
-
82
- # Output for displaying whether the answer is correct
83
- result_display = gr.Textbox(label="Result", interactive=False)
84
-
85
- def update_ui(file):
86
- question, options_list = handle_file(file)
87
- # Update the question and buttons with options
88
- question_display.update(value=question)
89
- for btn, option in zip(options, options_list):
90
- btn.update(value=option, visible=True)
91
- return question, options_list
92
-
93
- # Hook up the file upload to generate questions and options
94
- file_input.change(fn=update_ui, inputs=file_input, outputs=[question_display] + options)
95
-
96
- # Each button click checks if the selected answer is correct
97
- for i, btn in enumerate(options):
98
- btn.click(fn=check_answer, inputs=[btn, "correct_answer"], outputs=result_display)
99
-
100
- # Launch the Gradio app
101
- if __name__ == "__main__":
102
- interface.launch()
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForQuestionAnswering
4
+
5
+ # Replace with your Hugging Face API token
6
+ hf_api_token = "HF_API_KEY"
7
+
8
+ def generate_questions(file_content):
9
+ # Load MolMo model and tokenizer with API token
10
+ model_name = "molmo/molmo-qa-base"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_api_token)
12
+ model = AutoModelForQuestionAnswering.from_pretrained(model_name, use_auth_token=hf_api_token)
13
+
14
+ # Preprocess file content (assuming it's already text)
15
+ text = file_content.decode("utf-8")
16
+
17
+ # Generate questions and answers
18
+ inputs = tokenizer(text, return_tensors="pt")
19
+ outputs = model(**inputs)
20
+ answers = tokenizer.decode(outputs.start_logits.argmax(), skip_special_tokens=True)
21
+
22
+ # Extract questions and options (basic implementation)
23
+ questions = []
24
+ options = []
25
+ for answer in answers.split("."):
26
+ if answer.startswith("Q"):
27
+ questions.append(answer.strip())
28
+ else:
29
+ options.append(answer.strip())
30
+
31
+ return questions, options
32
+
33
+ # Create Gradio interface
34
+ question_box = gr.Textbox(label="Question")
35
+ option_box = gr.Textbox(label="Options")
36
+
37
+ iface = gr.Interface(
38
+ fn=generate_questions,
39
+ inputs=gr.File(label="Upload File"),
40
+ outputs=[question_box, option_box],
41
+ title="Question and Option Generator"
42
+ )
43
+
44
+ iface.launch()