Datasets:
debug
Browse files- README.md +11 -1
- nbnn_language_detection.py +0 -46
README.md
CHANGED
@@ -8,6 +8,16 @@ language:
|
|
8 |
- nn
|
9 |
size_categories:
|
10 |
- 100K<n<1M
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
# Dataset Card for Bokmål-Nynorsk Language Detection (main_train_split)
|
13 |
|
@@ -30,4 +40,4 @@ This dataset is intended for language detection for Bokmål to Nynorsk and vice
|
|
30 |
|
31 |
## Usage
|
32 |
|
33 |
-
Intended for training Bokmål-Nynorsk detection models. For more details, refer to the repository where the dataset preparation script and the actual dataset reside.
|
|
|
8 |
- nn
|
9 |
size_categories:
|
10 |
- 100K<n<1M
|
11 |
+
configs:
|
12 |
+
- config_name: default
|
13 |
+
data_files:
|
14 |
+
- split: train
|
15 |
+
path: "train.jsonl"
|
16 |
+
- split: dev
|
17 |
+
path: "dev.jsonl"
|
18 |
+
- split: test
|
19 |
+
path: "test.jsonl"
|
20 |
+
|
21 |
---
|
22 |
# Dataset Card for Bokmål-Nynorsk Language Detection (main_train_split)
|
23 |
|
|
|
40 |
|
41 |
## Usage
|
42 |
|
43 |
+
Intended for training Bokmål-Nynorsk detection models. For more details, refer to the repository where the dataset preparation script and the actual dataset reside.
|
nbnn_language_detection.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
from datasets import DatasetBuilder, DatasetInfo, SplitGenerator
|
2 |
-
from datasets.features import Features, Value
|
3 |
-
import json
|
4 |
-
|
5 |
-
class NbnnLanguageDetection(DatasetBuilder):
|
6 |
-
|
7 |
-
VERSION = "0.1.0"
|
8 |
-
|
9 |
-
def _info(self):
|
10 |
-
print("DEBUG: Inside _info method")
|
11 |
-
return DatasetInfo(
|
12 |
-
features=Features({
|
13 |
-
'text': Value('string'),
|
14 |
-
'language': Value('string'),
|
15 |
-
}),
|
16 |
-
supervised_keys=('text', 'language')
|
17 |
-
)
|
18 |
-
|
19 |
-
def _split_generators(self, dl_manager):
|
20 |
-
print("DEBUG: Inside _split_generators method")
|
21 |
-
urls = {
|
22 |
-
'train': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/train.jsonl',
|
23 |
-
'dev': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/dev.jsonl',
|
24 |
-
'test': 'https://huggingface.co/datasets/NbAiLab/nbnn_language_detection/resolve/main/test.jsonl',
|
25 |
-
}
|
26 |
-
|
27 |
-
downloaded_files = dl_manager.download(urls)
|
28 |
-
print(f"DEBUG: Downloaded files: {downloaded_files}")
|
29 |
-
|
30 |
-
return [
|
31 |
-
SplitGenerator(name=split, gen_kwargs={'filepath': downloaded_files[split]})
|
32 |
-
for split in urls.keys()
|
33 |
-
]
|
34 |
-
|
35 |
-
def _generate_examples(self, filepath):
|
36 |
-
print(f"DEBUG: Inside _generate_examples method with filepath: {filepath}")
|
37 |
-
with open(filepath, 'r') as f:
|
38 |
-
for id, line in enumerate(f):
|
39 |
-
print(f"DEBUG: Processing line {id}")
|
40 |
-
data = json.loads(line)
|
41 |
-
print(f"DEBUG: Yielding id: {id}, text: {data['text']}, language: {data['language']}")
|
42 |
-
yield id, {
|
43 |
-
'text': data['text'],
|
44 |
-
'language': data['language']
|
45 |
-
}
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|