File size: 1,541 Bytes
b3ddbe2
e831b8a
1987fa7
4bb171f
 
 
 
 
 
32843dd
1987fa7
 
4bb171f
 
 
1987fa7
 
4bb171f
 
32843dd
b3ddbe2
 
 
 
4bb171f
32843dd
b3ddbe2
32843dd
4bb171f
 
b3ddbe2
 
4bb171f
 
 
32843dd
4bb171f
 
 
 
 
 
0719213
4bb171f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator
from datasets.features import Features, Value
import json

class MyDataset(DatasetBuilder):

    VERSION = "0.1.0"

    def _info(self):
        print("Calling _info")
        return DatasetInfo(
            features=Features({
                'text': Value('string'),
                'language': Value('string'),
            }),
            supervised_keys=('text', 'language')
        )

    def _split_generators(self, dl_manager):
        print("Calling _split_generators")
        urls = {
            'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl',
            'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl',
            'test': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/test.jsonl',
        }

        downloaded_files = dl_manager.download(urls)
        print(f"Downloaded files: {downloaded_files}")

        return [
            SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]})
            for split in urls.keys()
        ]

    def _generate_examples(self, filepath):
        print(f"Calling _generate_examples with filepath: {filepath}")
        with open(filepath, 'r') as f:
            for id, line in enumerate(f):
                data = json.loads(line)
                yield id, {
                    'text': data['text'],
                    'language': data['language']
                }