Datasets:
cryptexcode
commited on
Commit
·
9bf21a6
1
Parent(s):
469632b
Update multiconer_v2.py
Browse files- multiconer_v2.py +35 -41
multiconer_v2.py
CHANGED
@@ -47,7 +47,7 @@ For more details see https://multiconer.github.io/
|
|
47 |
|
48 |
"""
|
49 |
|
50 |
-
_URL = "https://huggingface.co/datasets/MultiCoNER/multiconer_v2/
|
51 |
|
52 |
_BN_TRAINING_FILE = "BN-Bangla/bn_train.conll"
|
53 |
_BN_DEV_FILE = "BN-Bangla/bn_dev.conll"
|
@@ -285,49 +285,43 @@ class MultiCoNER2(datasets.GeneratorBasedBuilder):
|
|
285 |
|
286 |
def _generate_examples(self, filepath):
|
287 |
logger.info("⏳ Generating examples from = %s", filepath)
|
288 |
-
|
289 |
|
290 |
-
with open(filepath) as f:
|
291 |
-
guid =
|
292 |
s_id = None
|
293 |
tokens = []
|
294 |
ner_tags = []
|
295 |
|
296 |
for line in f:
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
# yield guid, {
|
330 |
-
# "id": s_id,
|
331 |
-
# "tokens": tokens,
|
332 |
-
# "ner_tags": ner_tags,
|
333 |
-
# }
|
|
|
47 |
|
48 |
"""
|
49 |
|
50 |
+
_URL = "https://huggingface.co/datasets/MultiCoNER/multiconer_v2/raw/main"
|
51 |
|
52 |
_BN_TRAINING_FILE = "BN-Bangla/bn_train.conll"
|
53 |
_BN_DEV_FILE = "BN-Bangla/bn_dev.conll"
|
|
|
285 |
|
286 |
def _generate_examples(self, filepath):
|
287 |
logger.info("⏳ Generating examples from = %s", filepath)
|
|
|
288 |
|
289 |
+
with open(filepath) as f:
|
290 |
+
guid = -1
|
291 |
s_id = None
|
292 |
tokens = []
|
293 |
ner_tags = []
|
294 |
|
295 |
for line in f:
|
296 |
+
if line.strip().startswith("# id"):
|
297 |
+
s_id = line.split('\t')[0].split(' ')[-1].strip()
|
298 |
+
guid += 1
|
299 |
+
tokens = []
|
300 |
+
ner_tags = []
|
301 |
+
elif '_ _' in line:
|
302 |
+
# Separator is " _ _ "
|
303 |
+
splits = line.split("_ _")
|
304 |
+
tokens.append(splits[0].strip())
|
305 |
+
ner_tags.append(splits[1].strip())
|
306 |
+
elif len(line.strip()) == 0:
|
307 |
+
if s_id and len(tokens) >= 1 and len(tokens) == len(ner_tags):
|
308 |
+
yield guid, {
|
309 |
+
"id": guid,
|
310 |
+
"sample_id": s_id,
|
311 |
+
"tokens": tokens,
|
312 |
+
"ner_tags": ner_tags,
|
313 |
+
}
|
314 |
+
s_id = None
|
315 |
+
tokens = []
|
316 |
+
ner_tags = []
|
317 |
+
else:
|
318 |
+
continue
|
319 |
+
|
320 |
+
if s_id and len(tokens) >= 1 and len(tokens) == len(ner_tags):
|
321 |
+
yield guid, {
|
322 |
+
"id": guid,
|
323 |
+
"sample_id": s_id,
|
324 |
+
"tokens": tokens,
|
325 |
+
"ner_tags": ner_tags,
|
326 |
+
}
|
327 |
+
|
|
|
|
|
|
|
|
|
|