|
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator |
|
from datasets.features import Features, Value |
|
import json |
|
|
|
class MyDataset(DatasetBuilder): |
|
|
|
VERSION = "0.1.0" |
|
|
|
def _info(self): |
|
print("Calling _info") |
|
return DatasetInfo( |
|
features=Features({ |
|
'text': Value('string'), |
|
'language': Value('string'), |
|
}), |
|
supervised_keys=('text', 'language') |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
print("Calling _split_generators") |
|
urls = { |
|
'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl', |
|
'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl', |
|
'test': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/test.jsonl', |
|
} |
|
|
|
downloaded_files = dl_manager.download(urls) |
|
print(f"Downloaded files: {downloaded_files}") |
|
|
|
return [ |
|
SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]}) |
|
for split in urls.keys() |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
print(f"Calling _generate_examples with filepath: {filepath}") |
|
with open(filepath, 'r') as f: |
|
for id, line in enumerate(f): |
|
data = json.loads(line) |
|
yield id, { |
|
'text': data['text'], |
|
'language': data['language'] |
|
} |
|
|
|
|