|
--- |
|
dataset_info: |
|
- config_name: default |
|
features: |
|
- name: utterance |
|
dtype: string |
|
- name: label |
|
sequence: int64 |
|
splits: |
|
- name: train |
|
num_bytes: 7169122 |
|
num_examples: 9042 |
|
- name: test |
|
num_bytes: 450937 |
|
num_examples: 358 |
|
download_size: 8973442 |
|
dataset_size: 7620059 |
|
- config_name: intents |
|
features: |
|
- name: id |
|
dtype: int64 |
|
- name: name |
|
dtype: string |
|
- name: tags |
|
sequence: 'null' |
|
- name: regex_full_match |
|
sequence: 'null' |
|
- name: regex_partial_match |
|
sequence: 'null' |
|
- name: description |
|
dtype: 'null' |
|
splits: |
|
- name: intents |
|
num_bytes: 291 |
|
num_examples: 10 |
|
download_size: 3034 |
|
dataset_size: 291 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
- split: test |
|
path: data/test-* |
|
- config_name: intents |
|
data_files: |
|
- split: intents |
|
path: intents/intents-* |
|
task_categories: |
|
- text-classification |
|
language: |
|
- en |
|
--- |
|
|
|
# reuters |
|
|
|
This is a text classification dataset. It is intended for machine learning research and experimentation. |
|
|
|
This dataset is obtained via formatting another publicly available data to be compatible with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html). |
|
|
|
## Usage |
|
|
|
It is intended to be used with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html): |
|
|
|
```python |
|
from autointent import Dataset |
|
|
|
reuters = Dataset.from_hub("AutoIntent/reuters") |
|
``` |
|
|
|
## Source |
|
|
|
This dataset is taken from `ucirvine/reuters21578` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html): |
|
|
|
```python |
|
from autointent import Dataset |
|
import datasets |
|
|
|
|
|
|
|
def get_intents_info(ds: datasets.DatasetDict) -> list[str]: |
|
return sorted(set(name for intents in ds["train"]["topics"] for name in intents)) |
|
|
|
def parse(ds: datasets.Dataset, intent_names: list[str]) -> list[dict]: |
|
return [{ |
|
"utterance": example["text"], |
|
"label": [int(name in example["topics"]) for name in intent_names] |
|
} for example in ds] |
|
|
|
|
|
def get_low_resource_classes_mask(ds: list[dict], intent_names: list[str], fraction_thresh: float = 0.01) -> list[bool]: |
|
res = [0] * len(intent_names) |
|
for sample in ds: |
|
for i, indicator in enumerate(sample["label"]): |
|
res[i] += indicator |
|
for i in range(len(intent_names)): |
|
res[i] /= len(ds) |
|
return [(frac < fraction_thresh) for frac in res] |
|
|
|
def remove_low_resource_classes(ds: datasets.Dataset, mask: list[bool]) -> list[dict]: |
|
res = [] |
|
for sample in ds: |
|
if sum(sample["label"]) == 1 and mask[sample["label"].index(1)]: |
|
continue |
|
sample["label"] = [ |
|
indicator for indicator, low_resource in |
|
zip(sample["label"], mask, strict=True) if not low_resource |
|
] |
|
res.append(sample) |
|
return res |
|
|
|
def remove_oos(ds: list[dict]): |
|
return [sample for sample in ds if sum(sample["label"]) != 0] |
|
|
|
|
|
if __name__ == "__main__": |
|
reuters = datasets.load_dataset("ucirvine/reuters21578", "ModHayes", trust_remote_code=True) |
|
intent_names = get_intents_info(reuters) |
|
train_parsed = parse(reuters["train"], intent_names) |
|
test_parsed = parse(reuters["test"], intent_names) |
|
mask = get_low_resource_classes_mask(train_parsed, intent_names) |
|
intent_names = [name for i, name in enumerate(intent_names) if not mask[i]] |
|
train_filtered = remove_oos(remove_low_resource_classes(train_parsed, mask)) |
|
test_filtered = remove_oos(remove_low_resource_classes(test_parsed, mask)) |
|
|
|
intents = [{"id": i, "name": name} for i, name in enumerate(intent_names)] |
|
reuters_converted = Dataset.from_dict({"intents": intents, "train": train_filtered, "test": test_filtered}) |
|
``` |
|
|