BillBojangeles2000 commited on
Commit
57e83ff
·
1 Parent(s): a1f74de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -133
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import streamlit as st
2
  import random
3
  import spacy
4
  import requests
@@ -6,63 +5,22 @@ from bs4 import BeautifulSoup
6
  import re
7
  import spacy
8
  import language_tool_python
9
- import json
10
- from gradio_client import Client
11
-
12
- API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-mnli"
13
- headers = {"Authorization": "Bearer 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"}
14
-
15
- def query(payload):
16
- response = requests.post(API_URL, headers=headers, json=payload)
17
- return response.json()
18
-
19
- # Define the grammar_sense function
20
- def grammar_sense(sentence):
21
- sense = query({
22
- "inputs": sentence,
23
- "parameters": {"candidate_labels": ["Make Sense", "Not Make Sense"]},
24
- })
25
- grammar = query({
26
- "inputs": sentence,
27
- "parameters": {"candidate_labels": ["Correct Grammar", "Incorrect Grammar"]},
28
- })
29
- objects = ["Sense", "Grammar"]
30
- ans = []
31
- for i in objects:
32
- if i == "Sense":
33
- response_data = json.loads(json.dumps(sense))
34
- labels = response_data['labels']
35
- scores = response_data['scores']
36
- index_of_highest_score = scores.index(max(scores))
37
- highest_score_label = labels[index_of_highest_score]
38
- ans.append(highest_score_label)
39
- else:
40
- response_data = json.loads(json.dumps(grammar))
41
- labels = response_data['labels']
42
- scores = response_data['scores']
43
- index_of_highest_score = scores.index(max(scores))
44
- highest_score_label = labels[index_of_highest_score]
45
- ans.append(highest_score_label)
46
-
47
- if not 'Not' in ans[0] and ans[1] == 'Correct Grammar':
48
- return True
49
- else:
50
- return False
51
-
52
  # Initialize LanguageTool
53
- tool = language_tool_python.LanguageToolPublicAPI('en-US')
54
 
55
- # Define the Streamlit app
56
- st.title("NLP Testing and Scoring App")
57
-
58
- # Ask for the topic at the start
59
- topic = st.text_input("Enter a topic:")
60
 
61
  # Web scraping and text cleaning
62
- entity = "Florida" # You can replace this with the user's topic input
63
- if topic:
64
- entity = topic # Use the user's input as the entity
65
-
 
66
  prefix = "https://wiki.kidzsearch.com/wiki/"
67
  page = requests.get(f'{prefix}{entity}')
68
  res = BeautifulSoup(page.content, 'html.parser')
@@ -74,13 +32,14 @@ cleaned_text = re.sub(r'[^a-zA-Z0-9.,]', ' ', cleaned_text)
74
  paragraphs = [p.strip() for p in re.split(r'\n', cleaned_text) if p.strip()]
75
 
76
  # Process text using SpaCy
77
- nlp = spacy.load("en_core_web_sm")
78
  doc = nlp(cleaned_text)
79
 
80
  sentences = [sent.text for sent in doc.sents]
81
 
82
  # Combine sentences into paragraphs
83
  paragraphs = [f"{sentences[i]} {sentences[i + 1]}" if i + 1 < len(sentences) else sentences[i] for i in range(0, len(sentences), 2)]
 
84
  class SubjectiveTest:
85
 
86
  def __init__(self, data, noOfQues):
@@ -90,11 +49,11 @@ class SubjectiveTest:
90
 
91
  def adjust_question_pattern(self, entity_label, topic_placeholder=True):
92
  question_patterns = {
93
- "PERSON": ["Who is {entity}?", "Tell me about {entity}", "What do you know about {entity}"],
94
- "ORG": ["What is {entity}?", "Tell me about {entity}", "What do you know about {entity}"],
95
- "GPE": ["Tell me about {entity}", "What do you know about {entity}", "Where is {entity}"],
96
- "MONEY": ["How much is {entity}?", "Tell me the value of {entity}"],
97
- "DATE": ["Why was {entity} important?"],
98
  # Add more entity-label to question-pattern mappings as needed
99
  }
100
 
@@ -102,7 +61,7 @@ class SubjectiveTest:
102
  for key in question_patterns:
103
  question_patterns[key] = [pattern + " {topic}" for pattern in question_patterns[key]]
104
 
105
- return question_patterns.get(entity_label, "Explain")
106
 
107
  def generate_test(self, topic=None):
108
  doc = self.nlp(self.summary)
@@ -112,7 +71,7 @@ class SubjectiveTest:
112
  for ent in sentence.ents:
113
  entity_label = ent.label_
114
  entity_text = ent.text
115
- question_patterns = self.adjust_question_pattern(entity_label, "")
116
  for pattern in question_patterns:
117
  question = pattern.format(entity=entity_text, topic=topic)
118
  if entity_label in question_answer_dict:
@@ -124,81 +83,61 @@ class SubjectiveTest:
124
 
125
  for entity_label, entity_questions in question_answer_dict.items():
126
  entity_questions = entity_questions[:self.noOfQues]
127
- if "Explain" in entity_questions:
128
- continue
129
- else:
130
- questions.extend(entity_questions)
131
 
132
  return questions
133
 
134
- with st.form("quiz_form"):
135
- # Create a button to initiate quiz generation
136
- generate_quiz = st.form_submit_button("Generate Quiz")
137
-
138
- if generate_quiz:
139
- st.write("Generating the quiz...")
140
- data = ' '.join(paragraphs)
141
- noOfQues = 5
142
-
143
- subjective_generator = SubjectiveTest(data, noOfQues)
144
- questions = subjective_generator.generate_test(topic) # Use the user's input topic here
145
-
146
- # Filter out invalid and empty questions
147
- x = 0
148
- while x > len(questions):
149
- for i in questions:
150
- if len(i) == 1:
151
- questions.pop(x)
152
- x = x + 1
153
- else:
154
- x = x + 1
155
- # Ensure you have valid questions
156
- if not questions:
157
- st.write("No valid questions to process.")
 
 
 
 
 
 
158
  else:
159
- answers = {} # Dictionary to store answers
160
 
161
- # Use the filtered questions in your code
162
- for i, question in enumerate(questions):
163
- res = st.text_input(f'Q{i + 1}: {question}') # Get user input for each question
164
- answers[f'Q{i + 1}'] = res # Store the user's answer
165
 
166
- scores = []
167
- client = Client("https://billbojangeles2000-zephyr-7b-alpha-chatbot-karki.hf.space/")
 
 
 
 
 
 
 
168
 
169
- question_list = subjective_generator.generate_test(topic) # Define 'questions' here
170
- questions = []
171
- for i, question in enumerate(question_list):
172
- if (question != "") and (len(tool.check(question)) == 0) and (grammar_sense(question)):
173
- questions.append(f"Question: {question}")
174
-
175
- for i, question in enumerate(questions):
176
- res = answers[f'Q{i + 1}']
177
- if res:
178
- result = client.predict(
179
- f'What would you rate this answer to the question: "{question}" as a percentage? Here is the answer: {res}. Make sure to write your answer as "Score" and then write your score of the response.',
180
- 0.9,
181
- 256,
182
- 0.9,
183
- 1.2,
184
- api_name="/chat"
185
- )
186
- pattern = r'(\d+)%'
187
- match = re.search(pattern, result)
188
- if match:
189
- score = int(match.group(1))
190
- scores.append(score)
191
- else:
192
- scores.append(85)
193
-
194
- def calculate_average(numbers):
195
- if not numbers:
196
- return 0 # Return 0 for an empty list to avoid division by zero.
197
-
198
- total = sum(numbers)
199
- average = total / len(numbers)
200
- return average
201
-
202
- # Calculate and display the average score
203
- average_score = calculate_average(scores)
204
- st.write(f'Your average score is {average_score}')
 
 
1
  import random
2
  import spacy
3
  import requests
 
5
  import re
6
  import spacy
7
  import language_tool_python
8
+ import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # Initialize LanguageTool
10
+ tool = language_tool_python.LanguageTool('en-US')
11
 
12
+ # Helper function to check grammar and sense
13
+ def grammar_sense(sentence):
14
+ sense = tool.correct(sentence)
15
+ grammar = "Correct Grammar" if not tool.check(sentence) else "Incorrect Grammar"
16
+ return "Make Sense" if "Not" not in sense and grammar == "Correct Grammar" else "Not Make Sense"
17
 
18
  # Web scraping and text cleaning
19
+ Quiz_Gen = st.form("Quiz Generation")
20
+ Quiz_Gen.write("What topic do you want to get tested on?")
21
+ res = Quiz_Gen.text_box()
22
+ Quiz_Gen.form_submit_button("Submit")
23
+ entity = res
24
  prefix = "https://wiki.kidzsearch.com/wiki/"
25
  page = requests.get(f'{prefix}{entity}')
26
  res = BeautifulSoup(page.content, 'html.parser')
 
32
  paragraphs = [p.strip() for p in re.split(r'\n', cleaned_text) if p.strip()]
33
 
34
  # Process text using SpaCy
35
+ nlp = spacy.load("en_core_web_lg")
36
  doc = nlp(cleaned_text)
37
 
38
  sentences = [sent.text for sent in doc.sents]
39
 
40
  # Combine sentences into paragraphs
41
  paragraphs = [f"{sentences[i]} {sentences[i + 1]}" if i + 1 < len(sentences) else sentences[i] for i in range(0, len(sentences), 2)]
42
+
43
  class SubjectiveTest:
44
 
45
  def __init__(self, data, noOfQues):
 
49
 
50
  def adjust_question_pattern(self, entity_label, topic_placeholder=True):
51
  question_patterns = {
52
+ "PERSON": ["Who is {entity}?", "Tell me about {entity}", "Explain {entity}", "What do you know about {entity}"],
53
+ "ORG": ["What is {entity}?", "Tell me about {entity}", "Explain {entity}", "What do you know about {entity}"],
54
+ "GPE": ["Tell me about {entity}", "Explain {entity}", "What do you know about {entity}", "Describe {entity}", "Where is {entity}"],
55
+ "MONEY": ["How much is {entity}?", "Tell me the value of {entity}", "Explain the amount of {entity}"],
56
+ "DATE": ["Why was {entity} important?", "Explain what happened on {entity}"],
57
  # Add more entity-label to question-pattern mappings as needed
58
  }
59
 
 
61
  for key in question_patterns:
62
  question_patterns[key] = [pattern + " {topic}" for pattern in question_patterns[key]]
63
 
64
+ return question_patterns.get(entity_label, ["Explain {entity} {topic}"])
65
 
66
  def generate_test(self, topic=None):
67
  doc = self.nlp(self.summary)
 
71
  for ent in sentence.ents:
72
  entity_label = ent.label_
73
  entity_text = ent.text
74
+ question_patterns = self.adjust_question_pattern(entity_label, topic is not None)
75
  for pattern in question_patterns:
76
  question = pattern.format(entity=entity_text, topic=topic)
77
  if entity_label in question_answer_dict:
 
83
 
84
  for entity_label, entity_questions in question_answer_dict.items():
85
  entity_questions = entity_questions[:self.noOfQues]
86
+ questions.extend(entity_questions)
 
 
 
87
 
88
  return questions
89
 
90
+ # Example usage
91
+ data = ' '.join(paragraphs)
92
+ noOfQues = 5
93
+
94
+ subjective_generator = SubjectiveTest(data, noOfQues)
95
+ question_list = subjective_generator.generate_test("")
96
+ questions = []
97
+ Quiz = st.form("Quiz")
98
+ for i, question in enumerate(question_list):
99
+ if "Explain" not in question and len(tool.check(question)) == 0 and grammar_sense(question) == "Make Sense":
100
+ questions.append(f"Question: {question}")
101
+ scores = []
102
+ client = Client("https://billbojangeles2000-zephyr-7b-alpha-chatbot-karki.hf.space/")
103
+ for i in questions:
104
+ res = Quiz.text_input(i)
105
+ result = client.predict(
106
+ f'What would you rate this answer to the question :"{i}" as a percentage? Here is the answer: {res}. Make sure to write your answer as "Score" and then write your score of the response.', # Fixed formatting issue
107
+ 0.9,
108
+ 256,
109
+ 0.9,
110
+ 1.2,
111
+ api_name="/chat"
112
+ )
113
+ print(result)
114
+ pattern = r'(\d+)%'
115
+
116
+ match = re.search(pattern, result)
117
+ if match:
118
+ score = match.group(1)
119
+ scores.append(f'{int(score)}')
120
  else:
121
+ scores.append(f'N/A')
122
 
123
+ Quiz_Gen.form_submit_button("Submit")
 
 
 
124
 
125
+ x = 0
126
+ new_scores = []
127
+ for i in scores:
128
+ if i == 'N/A':
129
+ scores.pop(x)
130
+ scores.append(85)
131
+ x = x+1
132
+ else:
133
+ x = x+1
134
 
135
+ def calculate_average(numbers):
136
+ if not numbers:
137
+ return 0 # Return 0 for an empty list to avoid division by zero.
138
+
139
+ total = sum(numbers)
140
+ average = total / len(numbers)
141
+ return average
142
+
143
+ st.write(f'Your average score is {calculate_average(scores)}')