|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
|
|
model_name = "maiurilorenzo/misogyny-detection-it" |
|
classifier = pipeline("text-classification", model=model_name) |
|
|
|
|
|
def detect_misogyny(text): |
|
result = classifier(text) |
|
label = result[0]["label"] |
|
score = result[0]["score"] |
|
label_readable = "Misogynistic" if label == "LABEL_1" else "Non-Misogynistic" |
|
return f"Label: {label_readable} (Confidence: {score:.2f})" |
|
|
|
|
|
demo = gr.Interface( |
|
fn=detect_misogyny, |
|
inputs=gr.Textbox(lines=3, placeholder="Enter Italian text here..."), |
|
outputs="text", |
|
title="Misogyny Detection in Italian", |
|
description="This demo uses a fine-tuned BERT model to detect misogynistic content in Italian text. Enter a phrase or sentence, and the model will classify it as 'Misogynistic' or 'Non-Misogynistic' along with a confidence score.", |
|
article=""" |
|
### About the Model |
|
This model is fine-tuned on the AMI (Automatic Misogyny Identification) dataset for binary classification of misogynistic content in Italian. |
|
- **Labels:** |
|
- `1`: Misogynistic |
|
- `0`: Non-Misogynistic |
|
- **Source Model:** [dbmdz/bert-base-italian-xxl-uncased](https://huggingface.co/dbmdz/bert-base-italian-xxl-uncased) |
|
""" |
|
) |
|
|
|
demo.launch() |
|
|