bugfix
Browse files- enhanced-cobald-dataset.py +4 -4
- parsing.py +83 -36
enhanced-cobald-dataset.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
|
2 |
-
from datasets import Sequence, Value
|
3 |
|
4 |
from .parsing import parse_incr
|
5 |
|
@@ -39,7 +39,7 @@ class EnhancedCobaldDataset(GeneratorBasedBuilder):
|
|
39 |
"xpos": Sequence(Value("string")),
|
40 |
# huggingface datasets can't handle dicts with dynamic keys, so represent feats as string
|
41 |
"feats": Sequence(Value("string")),
|
42 |
-
"heads": Sequence(Value("
|
43 |
"deprels": Sequence(Value("string")),
|
44 |
"deps": Sequence(Value("string")),
|
45 |
"miscs": Sequence(Value("string")),
|
@@ -64,4 +64,4 @@ class EnhancedCobaldDataset(GeneratorBasedBuilder):
|
|
64 |
Generator function that reads a CoNLL-U file and yields one sentence at a time.
|
65 |
Each sentence is represented as a dictionary where each field is a list.
|
66 |
"""
|
67 |
-
yield from parse_incr(filepath)
|
|
|
1 |
+
import datasets
|
2 |
+
from datasets import GeneratorBasedBuilder, BuilderConfig, Sequence, Value
|
3 |
|
4 |
from .parsing import parse_incr
|
5 |
|
|
|
39 |
"xpos": Sequence(Value("string")),
|
40 |
# huggingface datasets can't handle dicts with dynamic keys, so represent feats as string
|
41 |
"feats": Sequence(Value("string")),
|
42 |
+
"heads": Sequence(Value("int32")),
|
43 |
"deprels": Sequence(Value("string")),
|
44 |
"deps": Sequence(Value("string")),
|
45 |
"miscs": Sequence(Value("string")),
|
|
|
64 |
Generator function that reads a CoNLL-U file and yields one sentence at a time.
|
65 |
Each sentence is represented as a dictionary where each field is a list.
|
66 |
"""
|
67 |
+
yield from enumerate(parse_incr(filepath))
|
parsing.py
CHANGED
@@ -3,6 +3,13 @@ import json
|
|
3 |
|
4 |
ROOT_HEAD = 0
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def is_null_index(x: str) -> bool:
|
8 |
try:
|
@@ -11,11 +18,16 @@ def is_null_index(x: str) -> bool:
|
|
11 |
except ValueError:
|
12 |
return False
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
def
|
17 |
-
if field
|
18 |
-
|
19 |
return field
|
20 |
|
21 |
|
@@ -73,7 +85,7 @@ def parse_deps(field: str) -> str | None:
|
|
73 |
if field in ["", "_"]:
|
74 |
return None
|
75 |
|
76 |
-
token_deps = parse_joint_field(field, inner_sep='
|
77 |
|
78 |
# Validate deps.
|
79 |
if len(token_deps) == 0:
|
@@ -87,27 +99,53 @@ def parse_deps(field: str) -> str | None:
|
|
87 |
|
88 |
Token = dict[str, str]
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
def validate_token(token: Token):
|
91 |
-
|
92 |
-
if
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
raise ValueError(f"Null token must have no deprel, but found {token['deprel']}.")
|
99 |
-
if token['misc'] != 'ellipsis':
|
100 |
-
raise ValueError(f"Null token must have 'ellipsis' misc, not {token['misc']}.")
|
101 |
else:
|
102 |
-
|
103 |
-
raise ValueError(f"Regular token must have integer id, but found {token['id']}.")
|
104 |
-
if not token['head'].isdecimal() and token['head'] is not None:
|
105 |
-
raise ValueError(f"Regular token must have integer head, not {token['head']}.")
|
106 |
-
if token['head'] == token['id']:
|
107 |
-
raise ValueError(f"Self-loop detected in head.")
|
108 |
-
for head in token['deps']:
|
109 |
-
if head == token['id']:
|
110 |
-
raise ValueError(f"Self-loop detected in deps.")
|
111 |
|
112 |
|
113 |
def parse_token(line: str) -> Token:
|
@@ -119,10 +157,10 @@ def parse_token(line: str) -> Token:
|
|
119 |
token = {
|
120 |
"id": parse_id(fields[0]),
|
121 |
"word": parse_word(fields[1]),
|
122 |
-
"lemma":
|
123 |
"upos": parse_nullable(fields[3]),
|
124 |
"xpos": parse_nullable(fields[4]),
|
125 |
-
"feats": parse_feats(
|
126 |
"head": parse_head(fields[6]),
|
127 |
"deprel": parse_nullable(fields[7]),
|
128 |
"deps": parse_deps(fields[8]),
|
@@ -133,7 +171,7 @@ def parse_token(line: str) -> Token:
|
|
133 |
validate_token(token)
|
134 |
|
135 |
except Exception as e:
|
136 |
-
raise ValueError(f"Validation failed on token {
|
137 |
|
138 |
return token
|
139 |
|
@@ -171,7 +209,7 @@ def validate_sentence(sentence: Sentence):
|
|
171 |
|
172 |
# Validate sentence heads and ids agreement.
|
173 |
ids = set(sentence["ids"])
|
174 |
-
int_ids = {idtag for idtag in sentence["ids"] if idtag.isdecimal()}
|
175 |
|
176 |
contiguous_int_ids = set(range(min(int_ids), max(int_ids) + 1))
|
177 |
if int_ids != contiguous_int_ids:
|
@@ -187,10 +225,11 @@ def validate_sentence(sentence: Sentence):
|
|
187 |
if excess_heads:
|
188 |
raise ValueError(f"Heads are inconsistent with sentence ids. Excessive heads: {excess_heads}.")
|
189 |
|
190 |
-
sentence_deps_heads =
|
191 |
-
head
|
192 |
-
if
|
193 |
-
|
|
|
194 |
excess_deps_heads = sentence_deps_heads - ids
|
195 |
if excess_deps_heads:
|
196 |
raise ValueError(f"Deps heads are inconsistent with sentence ids. Excessive heads: {excess_deps_heads}.")
|
@@ -199,7 +238,7 @@ def validate_sentence(sentence: Sentence):
|
|
199 |
def parse_sentence(token_lines: list[str], metadata: dict) -> Sentence:
|
200 |
try:
|
201 |
sentence = {
|
202 |
-
"ids": []
|
203 |
"words": [],
|
204 |
"lemmas": [],
|
205 |
"upos": [],
|
@@ -213,7 +252,7 @@ def parse_sentence(token_lines: list[str], metadata: dict) -> Sentence:
|
|
213 |
"semclasses": []
|
214 |
}
|
215 |
for token_line in token_lines:
|
216 |
-
token = parse_token(
|
217 |
sentence["ids"].append(token["id"])
|
218 |
sentence["words"].append(token["word"])
|
219 |
sentence["lemmas"].append(token["lemma"])
|
@@ -244,8 +283,16 @@ def parse_incr(file_path: str):
|
|
244 |
"text": <text from metadata or None>,
|
245 |
"ids": list of sentence tokens ids,
|
246 |
"words": list of tokens forms,
|
247 |
-
|
248 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
}
|
250 |
|
251 |
The input file should have metadata lines starting with '#' (e.g., "# sent_id = 1")
|
|
|
3 |
|
4 |
ROOT_HEAD = 0
|
5 |
|
6 |
+
# ========================= Field level =========================
|
7 |
+
|
8 |
+
def parse_nullable(field: str) -> str | None:
|
9 |
+
if field in ["", "_"]:
|
10 |
+
return None
|
11 |
+
return field
|
12 |
+
|
13 |
|
14 |
def is_null_index(x: str) -> bool:
|
15 |
try:
|
|
|
18 |
except ValueError:
|
19 |
return False
|
20 |
|
21 |
+
def is_range_index(x: str) -> bool:
|
22 |
+
try:
|
23 |
+
a, b = x.split('-')
|
24 |
+
return a.isdecimal() and b.isdecimal()
|
25 |
+
except ValueError:
|
26 |
+
return False
|
27 |
|
28 |
+
def parse_id(field: str) -> str:
|
29 |
+
if not field.isdecimal() and not is_null_index(field) and not is_range_index(field):
|
30 |
+
raise ValueError(f"Incorrect token id: {field}.")
|
31 |
return field
|
32 |
|
33 |
|
|
|
85 |
if field in ["", "_"]:
|
86 |
return None
|
87 |
|
88 |
+
token_deps = parse_joint_field(field, inner_sep=':', outer_sep='|')
|
89 |
|
90 |
# Validate deps.
|
91 |
if len(token_deps) == 0:
|
|
|
99 |
|
100 |
Token = dict[str, str]
|
101 |
|
102 |
+
def validate_null_token(token: Token):
|
103 |
+
if token['word'] != "#NULL":
|
104 |
+
raise ValueError(f"Null token must have #NULL form, not {token['word']}.")
|
105 |
+
if token['head'] is not None:
|
106 |
+
raise ValueError(f"Null token must have no head, but found {token['head']}.")
|
107 |
+
if token['deprel'] is not None:
|
108 |
+
raise ValueError(f"Null token must have no deprel, but found {token['deprel']}.")
|
109 |
+
if token['misc'] != 'ellipsis':
|
110 |
+
raise ValueError(f"Null token must have 'ellipsis' misc, not {token['misc']}.")
|
111 |
+
|
112 |
+
def validate_range_token(token: Token):
|
113 |
+
if token['lemma'] != '_':
|
114 |
+
raise ValueError(f"Range token lemma must be _, but found {token['lemma']}.")
|
115 |
+
if token['upos'] is not None:
|
116 |
+
raise ValueError(f"Range token upos must be _, but found {token['upos']}.")
|
117 |
+
if token['xpos'] is not None:
|
118 |
+
raise ValueError(f"Range token xpos must be _, but found {token['xpos']}.")
|
119 |
+
if token['feats'] is not None:
|
120 |
+
raise ValueError(f"Range token feats must be _, but found {token['feats']}.")
|
121 |
+
if token['head'] is not None:
|
122 |
+
raise ValueError(f"Range token head must be _, but found {token['head']}.")
|
123 |
+
if token['deprel'] is not None:
|
124 |
+
raise ValueError(f"Range token deprel must be _, but found {token['deprel']}.")
|
125 |
+
if token['misc'] is not None:
|
126 |
+
raise ValueError(f"Range token misc must be _, but found {token['misc']}.")
|
127 |
+
if token['deepslot'] is not None:
|
128 |
+
raise ValueError(f"Range token deepslot must be _, but found {token['deepslot']}.")
|
129 |
+
if token['semclass'] is not None:
|
130 |
+
raise ValueError(f"Range token semclass must be _, but found {token['semclass']}.")
|
131 |
+
|
132 |
+
def validate_regular_token(token: Token):
|
133 |
+
if token['head'] == token['id']:
|
134 |
+
raise ValueError(f"Self-loop detected in head.")
|
135 |
+
for head in json.loads(token['deps']):
|
136 |
+
if head == token['id']:
|
137 |
+
raise ValueError(f"Self-loop detected in deps. head: {head}, id: {token['id']}")
|
138 |
+
|
139 |
def validate_token(token: Token):
|
140 |
+
idtag = token['id']
|
141 |
+
if idtag.isdecimal():
|
142 |
+
validate_regular_token(token)
|
143 |
+
elif is_range_index(idtag):
|
144 |
+
validate_range_token(token)
|
145 |
+
elif is_null_index(idtag):
|
146 |
+
validate_null_token(token)
|
|
|
|
|
|
|
147 |
else:
|
148 |
+
raise ValueError(f"Incorrect token id: {idtag}.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
|
151 |
def parse_token(line: str) -> Token:
|
|
|
157 |
token = {
|
158 |
"id": parse_id(fields[0]),
|
159 |
"word": parse_word(fields[1]),
|
160 |
+
"lemma": fields[2] if fields[2] != "" else None,
|
161 |
"upos": parse_nullable(fields[3]),
|
162 |
"xpos": parse_nullable(fields[4]),
|
163 |
+
"feats": parse_feats(fields[5]),
|
164 |
"head": parse_head(fields[6]),
|
165 |
"deprel": parse_nullable(fields[7]),
|
166 |
"deps": parse_deps(fields[8]),
|
|
|
171 |
validate_token(token)
|
172 |
|
173 |
except Exception as e:
|
174 |
+
raise ValueError(f"Validation failed on token {fields[0]}: {e}")
|
175 |
|
176 |
return token
|
177 |
|
|
|
209 |
|
210 |
# Validate sentence heads and ids agreement.
|
211 |
ids = set(sentence["ids"])
|
212 |
+
int_ids = {int(idtag) for idtag in sentence["ids"] if idtag.isdecimal()}
|
213 |
|
214 |
contiguous_int_ids = set(range(min(int_ids), max(int_ids) + 1))
|
215 |
if int_ids != contiguous_int_ids:
|
|
|
225 |
if excess_heads:
|
226 |
raise ValueError(f"Heads are inconsistent with sentence ids. Excessive heads: {excess_heads}.")
|
227 |
|
228 |
+
sentence_deps_heads = {
|
229 |
+
head
|
230 |
+
for deps in sentence["deps"] if deps is not None
|
231 |
+
for head in json.loads(deps) if head != str(ROOT_HEAD)
|
232 |
+
}
|
233 |
excess_deps_heads = sentence_deps_heads - ids
|
234 |
if excess_deps_heads:
|
235 |
raise ValueError(f"Deps heads are inconsistent with sentence ids. Excessive heads: {excess_deps_heads}.")
|
|
|
238 |
def parse_sentence(token_lines: list[str], metadata: dict) -> Sentence:
|
239 |
try:
|
240 |
sentence = {
|
241 |
+
"ids": [],
|
242 |
"words": [],
|
243 |
"lemmas": [],
|
244 |
"upos": [],
|
|
|
252 |
"semclasses": []
|
253 |
}
|
254 |
for token_line in token_lines:
|
255 |
+
token = parse_token(token_line)
|
256 |
sentence["ids"].append(token["id"])
|
257 |
sentence["words"].append(token["word"])
|
258 |
sentence["lemmas"].append(token["lemma"])
|
|
|
283 |
"text": <text from metadata or None>,
|
284 |
"ids": list of sentence tokens ids,
|
285 |
"words": list of tokens forms,
|
286 |
+
"lemmas",
|
287 |
+
"upos",
|
288 |
+
"xpos",
|
289 |
+
"feats", .
|
290 |
+
"heads", .
|
291 |
+
"deprels", .
|
292 |
+
"deps",
|
293 |
+
"miscs",
|
294 |
+
"deepslots",
|
295 |
+
"semclasses"
|
296 |
}
|
297 |
|
298 |
The input file should have metadata lines starting with '#' (e.g., "# sent_id = 1")
|