Commit
·
ee99c8e
1
Parent(s):
5b4e679
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Overview
|
2 |
+
Original dataset [here](https://github.com/felipessalvatore/NLI_datasets).
|
3 |
+
|
4 |
+
|
5 |
+
## Dataset curation
|
6 |
+
One hypothesis in the dev set and three hypotheses in the train set are empty and have been
|
7 |
+
filled in with the empty string `""`. Labels are encoded with custom NLI mapping, that is
|
8 |
+
|
9 |
+
```
|
10 |
+
{"entailment": 0, "neutral": 1, "contradiction": 2}
|
11 |
+
```
|
12 |
+
|
13 |
+
|
14 |
+
## Code to create the dataset
|
15 |
+
```python
|
16 |
+
import pandas as pd
|
17 |
+
from datasets import Features, Value, ClassLabel, Dataset, DatasetDict, load_dataset
|
18 |
+
from pathlib import Path
|
19 |
+
|
20 |
+
|
21 |
+
# load datasets
|
22 |
+
path = Path("<path to folder>/nli_datasets")
|
23 |
+
datasets = {}
|
24 |
+
for dataset_path in path.iterdir():
|
25 |
+
datasets[dataset_path.name] = {}
|
26 |
+
for name in dataset_path.iterdir():
|
27 |
+
df = pd.read_csv(name)
|
28 |
+
datasets[dataset_path.name][name.name.split(".")[0]] = df
|
29 |
+
|
30 |
+
|
31 |
+
ds = {}
|
32 |
+
for name, df_ in datasets["fracas"].items():
|
33 |
+
|
34 |
+
df = df_.copy()
|
35 |
+
assert df["label"].isna().sum() == 0
|
36 |
+
|
37 |
+
# fill-in empty hypothesis
|
38 |
+
df = df.fillna("")
|
39 |
+
|
40 |
+
# encode labels
|
41 |
+
df["label"] = df["label"].map({"entailment": 0, "neutral": 1, "contradiction": 2})
|
42 |
+
|
43 |
+
# cast to dataset
|
44 |
+
features = Features({
|
45 |
+
"premise": Value(dtype="string", id=None),
|
46 |
+
"hypothesis": Value(dtype="string", id=None),
|
47 |
+
"label": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]),
|
48 |
+
})
|
49 |
+
ds[name] = Dataset.from_pandas(df, features=features)
|
50 |
+
|
51 |
+
dataset = DatasetDict(ds)
|
52 |
+
dataset.push_to_hub("fracas", token="<token>")
|
53 |
+
```
|