Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,11 @@
|
|
1 |
import os
|
2 |
-
import re
|
3 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
4 |
-
|
5 |
|
|
|
6 |
MODEL_NAME = "Pisethan/sangapac-math"
|
7 |
-
|
8 |
-
# Get the token from the environment variable
|
9 |
TOKEN = os.getenv("HF_API_TOKEN")
|
|
|
10 |
if not TOKEN:
|
11 |
raise ValueError("Hugging Face API token not found. Set it as an environment variable.")
|
12 |
|
@@ -19,48 +18,19 @@ except Exception as e:
|
|
19 |
classifier = None
|
20 |
print(f"Error loading model or tokenizer: {e}")
|
21 |
|
22 |
-
# Load dataset dynamically from Hugging Face or locally
|
23 |
-
try:
|
24 |
-
dataset = load_dataset("Pisethan/sangapac-math-dataset", token=TOKEN)["train"]
|
25 |
-
dataset_dict = {re.sub(r'\s+', ' ', entry["input"].strip()): entry for entry in dataset}
|
26 |
-
except Exception as e:
|
27 |
-
dataset_dict = {}
|
28 |
-
print(f"Error loading dataset: {e}")
|
29 |
-
|
30 |
-
def normalize_input(text):
|
31 |
-
return re.sub(r'\s+', ' ', text.strip())
|
32 |
-
|
33 |
def predict(input_text):
|
34 |
if classifier is None:
|
35 |
return "Model not loaded properly.", {"Error": "Model not loaded properly."}
|
36 |
|
37 |
try:
|
38 |
-
input_text = normalize_input(input_text)
|
39 |
result = classifier(input_text)
|
40 |
label = result[0]["label"]
|
41 |
score = result[0]["score"]
|
42 |
|
43 |
-
|
44 |
-
output = data["output"]
|
45 |
-
metadata = data["metadata"]
|
46 |
-
|
47 |
-
difficulty = metadata.get("difficulty", "Unknown")
|
48 |
-
steps = metadata.get("steps", ["No steps available"])
|
49 |
-
|
50 |
-
steps_text = "\n".join(steps)
|
51 |
-
simple_result = (
|
52 |
-
f"Category: {label}\n"
|
53 |
-
f"Confidence: {score:.2f}\n"
|
54 |
-
f"Result: {output}\n"
|
55 |
-
f"Difficulty: {difficulty}\n"
|
56 |
-
f"Steps:\n{steps_text}"
|
57 |
-
)
|
58 |
-
|
59 |
detailed_result = {
|
60 |
"Category": label,
|
61 |
"Confidence": score,
|
62 |
-
"Output (Result)": output,
|
63 |
-
"Metadata": metadata,
|
64 |
}
|
65 |
|
66 |
return simple_result, detailed_result
|
@@ -68,8 +38,6 @@ def predict(input_text):
|
|
68 |
return "An error occurred.", {"Error": str(e)}
|
69 |
|
70 |
# Gradio interface
|
71 |
-
import gradio as gr
|
72 |
-
|
73 |
sample_inputs = [
|
74 |
["1 + 1 = ?"],
|
75 |
["(5 + 3) × 2 = ?"],
|
|
|
1 |
import os
|
|
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
3 |
+
import gradio as gr
|
4 |
|
5 |
+
# Model name and Hugging Face token
|
6 |
MODEL_NAME = "Pisethan/sangapac-math"
|
|
|
|
|
7 |
TOKEN = os.getenv("HF_API_TOKEN")
|
8 |
+
|
9 |
if not TOKEN:
|
10 |
raise ValueError("Hugging Face API token not found. Set it as an environment variable.")
|
11 |
|
|
|
18 |
classifier = None
|
19 |
print(f"Error loading model or tokenizer: {e}")
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
def predict(input_text):
|
22 |
if classifier is None:
|
23 |
return "Model not loaded properly.", {"Error": "Model not loaded properly."}
|
24 |
|
25 |
try:
|
|
|
26 |
result = classifier(input_text)
|
27 |
label = result[0]["label"]
|
28 |
score = result[0]["score"]
|
29 |
|
30 |
+
simple_result = f"Category: {label}\nConfidence: {score:.2f}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
detailed_result = {
|
32 |
"Category": label,
|
33 |
"Confidence": score,
|
|
|
|
|
34 |
}
|
35 |
|
36 |
return simple_result, detailed_result
|
|
|
38 |
return "An error occurred.", {"Error": str(e)}
|
39 |
|
40 |
# Gradio interface
|
|
|
|
|
41 |
sample_inputs = [
|
42 |
["1 + 1 = ?"],
|
43 |
["(5 + 3) × 2 = ?"],
|