Datasets:
data
Browse files- dev.jsonl +3 -0
- nbnn_language_detection.py +37 -0
- test.jsonl +3 -0
- train.jsonl +3 -0
dev.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56e780418936de3c847b9b2b34c44c894a313b9dc19a548aa69508ce3e80f6a4
|
3 |
+
size 309921
|
nbnn_language_detection.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import DatasetBuilder, GenerateMode, SplitGenerator, load_dataset
|
2 |
+
from datasets.features import Features, Sequence, Value
|
3 |
+
|
4 |
+
class MyDataset(DatasetBuilder):
|
5 |
+
|
6 |
+
VERSION = "0.1.0"
|
7 |
+
|
8 |
+
def _info(self):
|
9 |
+
return {
|
10 |
+
features: Features({
|
11 |
+
'text': Value('string'),
|
12 |
+
'language': Value('string'),
|
13 |
+
}),
|
14 |
+
supervised_keys: ('text', 'language')
|
15 |
+
}
|
16 |
+
|
17 |
+
def _split_generators(self, dl_manager):
|
18 |
+
files = {
|
19 |
+
'train': 'train.jsonl',
|
20 |
+
'dev': 'dev.jsonl',
|
21 |
+
'test': 'test.jsonl'
|
22 |
+
}
|
23 |
+
|
24 |
+
return [
|
25 |
+
SplitGenerator(name=split, gen_kwargs={'filepath': path})
|
26 |
+
for split, path in files.items()
|
27 |
+
]
|
28 |
+
|
29 |
+
def _generate_examples(self, filepath):
|
30 |
+
with open(filepath, 'r') as f:
|
31 |
+
for id, line in enumerate(f):
|
32 |
+
data = json.loads(line)
|
33 |
+
yield id, {
|
34 |
+
'text': data['text'],
|
35 |
+
'language': data['language']
|
36 |
+
}
|
37 |
+
|
test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:807b532d715c62a8a8784793851033c1c3595d87cb824ed42ca118beca0ef521
|
3 |
+
size 315124
|
train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c4db4e93df2a18b933d001982ea58a3bdb758090165399fee150dce13d41785d
|
3 |
+
size 249745815
|