Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import AutoModel, AutoTokenizer
|
4 |
+
|
5 |
+
st.set_page_config(page_title="ME2-BERT Moralities", layout="centered")
|
6 |
+
|
7 |
+
st.markdown(
|
8 |
+
"""
|
9 |
+
<style>
|
10 |
+
[data-testid="stFileUploadDropzone"] p,
|
11 |
+
[data-baseweb="file-uploader-dropzone"] p {
|
12 |
+
visibility: hidden !important;
|
13 |
+
display: none !important;
|
14 |
+
}
|
15 |
+
[data-testid="stFileUploadDropzone"] p:before,
|
16 |
+
[data-baseweb="file-uploader-dropzone"] p:before {
|
17 |
+
content: "Drag and drop file here or browse (max 10 MB)";
|
18 |
+
visibility: visible;
|
19 |
+
display: block;
|
20 |
+
color: rgb(120, 119, 119);
|
21 |
+
}
|
22 |
+
</style>
|
23 |
+
<script>
|
24 |
+
document.addEventListener('DOMContentLoaded', function() {
|
25 |
+
let elements = document.querySelectorAll('span, p, div');
|
26 |
+
elements.forEach(el => {
|
27 |
+
if (el.textContent.includes('200 MB') || el.textContent.includes('200MB')) {
|
28 |
+
el.style.display = 'none';
|
29 |
+
}
|
30 |
+
});
|
31 |
+
});
|
32 |
+
</script>
|
33 |
+
""",
|
34 |
+
unsafe_allow_html=True
|
35 |
+
)
|
36 |
+
|
37 |
+
MODEL_NAME = "lorenzozan/ME2-BERT"
|
38 |
+
MF_MAPPING = {
|
39 |
+
"CH": "CARE/HARM",
|
40 |
+
"FC": "FAIRNESS/CHEATING",
|
41 |
+
"LB": "LOYALTY/BETRAYAL",
|
42 |
+
"AS": "AUTHORITY/SUBVERSION",
|
43 |
+
"PD": "PURITY/DEGRADATION"
|
44 |
+
}
|
45 |
+
|
46 |
+
@st.cache_resource
|
47 |
+
def load_model_and_tokenizer(model_name: str):
|
48 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
49 |
+
model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
|
50 |
+
model.eval()
|
51 |
+
return model, tokenizer
|
52 |
+
|
53 |
+
model, tokenizer = load_model_and_tokenizer(MODEL_NAME)
|
54 |
+
|
55 |
+
def predict_moralities(text_list, max_seq_length=200):
|
56 |
+
encoded_input = tokenizer(
|
57 |
+
text_list,
|
58 |
+
max_length=max_seq_length,
|
59 |
+
padding="max_length",
|
60 |
+
truncation=True,
|
61 |
+
return_tensors="pt",
|
62 |
+
)
|
63 |
+
with torch.no_grad():
|
64 |
+
outputs = model(
|
65 |
+
input_ids=encoded_input["input_ids"],
|
66 |
+
attention_mask=encoded_input["attention_mask"],
|
67 |
+
return_dict=True
|
68 |
+
)
|
69 |
+
results = []
|
70 |
+
for i in range(len(text_list)):
|
71 |
+
row_scores = outputs[i]
|
72 |
+
mapped_scores = {}
|
73 |
+
for short_lbl, val in row_scores.items():
|
74 |
+
mapped_scores[MF_MAPPING.get(short_lbl, short_lbl)] = float(val)
|
75 |
+
results.append(mapped_scores)
|
76 |
+
return results
|
77 |
+
|
78 |
+
st.title("ME2-BERT: A BERT-based model for Moral Foundations Prediction")
|
79 |
+
st.markdown(
|
80 |
+
"""
|
81 |
+
> Moralities, emotions, and events are complex aspects of human cognition, which are often treated separately since capturing their combined effects is challenging, especially due to the lack of annotated data. Leveraging their interrelations hence becomes crucial for advancing the understanding of human moral behaviors.
|
82 |
+
In this work, we propose ME²-BERT, the first holistic framework for fine-tuning a pre-trained language model like BERT to the task of moral foundation prediction. ME²-BERT integrates events and emotions for learning domain-invariant morality-relevant text representations.
|
83 |
+
Our extensive experiments show that ME²-BERT outperforms existing state-of-the-art methods for moral foundation prediction, with an average percentage increase up to 35% in the out-of-domain scenario.
|
84 |
+
|
85 |
+
**ME2-BERT** is a model for **Moral Foundation Prediction** based on [Moral Foundation Theory (MFT)](https://moralfoundations.org/).
|
86 |
+
Use this web app app to predict moral foundations (CARE/HARM, FAIRNESS/CHEATING, AUTHORITY/SUBVERSION, LOYALTY/BETRAYAL, PURITY/DEGRADATION)
|
87 |
+
by entering a text or by uploading a csv file with at least a 'text' column.
|
88 |
+
|
89 |
+
[Paper](https://aclanthology.org/2025.coling-main.638.pdf) | [Source code](https://github.com/lorenzozangari/ME2-BERT).
|
90 |
+
"""
|
91 |
+
)
|
92 |
+
|
93 |
+
def do_prediction():
|
94 |
+
if st.session_state["user_text_box"].strip():
|
95 |
+
single_result = predict_moralities([st.session_state["user_text_box"]])
|
96 |
+
st.session_state["prediction_result"] = single_result[0]
|
97 |
+
else:
|
98 |
+
st.session_state["prediction_result"] = None
|
99 |
+
|
100 |
+
if "prediction_result" not in st.session_state:
|
101 |
+
st.session_state["prediction_result"] = None
|
102 |
+
|
103 |
+
st.text_area(
|
104 |
+
"Enter your text here (Ctrl+Enter to apply):",
|
105 |
+
height=150,
|
106 |
+
key="user_text_box",
|
107 |
+
on_change=do_prediction
|
108 |
+
)
|
109 |
+
|
110 |
+
if st.session_state["prediction_result"]:
|
111 |
+
st.write("**Predicted moral foundations:**")
|
112 |
+
st.json(st.session_state["prediction_result"])
|