Spaces:
Runtime error
Runtime error
Trent
commited on
Commit
·
15f2759
1
Parent(s):
0d51b77
Gender samples
Browse files
app.py
CHANGED
@@ -41,8 +41,8 @@ if menu == "Contributions & Evaluation":
|
|
41 |
| Model | [FullEvaluation](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=1809754143) Average | 20Newsgroups Clustering | StackOverflow DupQuestions | Twitter SemEval2015 |
|
42 |
|-----------|---------------------------------------|-------|-------|-------|
|
43 |
| paraphrase-mpnet-base-v2 (previous SOTA) | 67.97 | 47.79 | 49.03 | 72.36 |
|
44 |
-
| all_datasets_v3_roberta-large (400k steps) | **70.22** | 50.12 | 52.18 | 75.28 |
|
45 |
-
| all_datasets_v3_mpnet-base (440k steps) | **70.01** | 50.22 | 52.24 | 76.27 |
|
46 |
''')
|
47 |
elif menu == "Sentence Similarity":
|
48 |
st.header('Sentence Similarity')
|
@@ -172,11 +172,22 @@ Hopefully the evaluation performed here can proceed towards improving Gender-neu
|
|
172 |
For more cool information on sentence embeddings, see the [sBert project](https://www.sbert.net/examples/applications/computing-embeddings/README.html).
|
173 |
''')
|
174 |
|
175 |
-
select_models = st.multiselect("Choose models", options=list(MODELS_ID), default=list(MODELS_ID)
|
|
|
|
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
enter = st.button("Compare")
|
182 |
if enter:
|
@@ -188,7 +199,7 @@ For more cool information on sentence embeddings, see the [sBert project](https:
|
|
188 |
softmax = [round(ts.item(), 4) for ts in torch.nn.functional.softmax(torch.from_numpy(value['score'].values))]
|
189 |
if softmax[0] > softmax[1]:
|
190 |
gender = "male"
|
191 |
-
elif abs(softmax[0] - softmax[1]) < 1e-
|
192 |
gender = "neutral"
|
193 |
else:
|
194 |
gender = "female"
|
|
|
41 |
| Model | [FullEvaluation](https://docs.google.com/spreadsheets/d/1vXJrIg38cEaKjOG5y4I4PQwAQFUmCkohbViJ9zj_Emg/edit#gid=1809754143) Average | 20Newsgroups Clustering | StackOverflow DupQuestions | Twitter SemEval2015 |
|
42 |
|-----------|---------------------------------------|-------|-------|-------|
|
43 |
| paraphrase-mpnet-base-v2 (previous SOTA) | 67.97 | 47.79 | 49.03 | 72.36 |
|
44 |
+
| **all_datasets_v3_roberta-large (400k steps)** | **70.22** | 50.12 | 52.18 | 75.28 |
|
45 |
+
| **all_datasets_v3_mpnet-base (440k steps)** | **70.01** | 50.22 | 52.24 | 76.27 |
|
46 |
''')
|
47 |
elif menu == "Sentence Similarity":
|
48 |
st.header('Sentence Similarity')
|
|
|
172 |
For more cool information on sentence embeddings, see the [sBert project](https://www.sbert.net/examples/applications/computing-embeddings/README.html).
|
173 |
''')
|
174 |
|
175 |
+
select_models = st.multiselect("Choose models", options=list(MODELS_ID), default=list(MODELS_ID))
|
176 |
+
|
177 |
+
samples = st.radio("Samples", options=["President of United States", "Professor", "Nurse", "Custom"])
|
178 |
|
179 |
+
if samples == "President of United States":
|
180 |
+
base_text = st.text_input("Gender Neutral Text", "President of the United States promised relief to Hurricane survivors.")
|
181 |
+
male_text = st.text_input("Male-assumed Text", "He promised relief to Hurricane survivors.")
|
182 |
+
female_text = st.text_input("Female-assumed Text", "She promised relief to Hurricane survivors.")
|
183 |
+
elif samples == "Professor":
|
184 |
+
base_text = st.text_input("Gender Neutral Text", "Professor ended the class earlier than usual.")
|
185 |
+
male_text = st.text_input("Male-assumed Text", "He ended the class earlier than usual.")
|
186 |
+
female_text = st.text_input("Female-assumed Text", "She ended the class earlier than usual.")
|
187 |
+
elif samples == "Nurse":
|
188 |
+
base_text = st.text_input("Gender Neutral Text", "Nurse administered the vaccine and rubbed alcohol.")
|
189 |
+
male_text = st.text_input("Male-assumed Text", "He administered the vaccine and rubbed alcohol.")
|
190 |
+
female_text = st.text_input("Female-assumed Text", "She administered the vaccine and rubbed alcohol.")
|
191 |
|
192 |
enter = st.button("Compare")
|
193 |
if enter:
|
|
|
199 |
softmax = [round(ts.item(), 4) for ts in torch.nn.functional.softmax(torch.from_numpy(value['score'].values))]
|
200 |
if softmax[0] > softmax[1]:
|
201 |
gender = "male"
|
202 |
+
elif abs(softmax[0] - softmax[1]) < 1e-3:
|
203 |
gender = "neutral"
|
204 |
else:
|
205 |
gender = "female"
|