parquet-converter commited on
Commit
e5f9694
·
1 Parent(s): 30c2e24

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,38 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.wasm filter=lfs diff=lfs merge=lfs -text
25
- *.xz filter=lfs diff=lfs merge=lfs -text
26
- *.zip filter=lfs diff=lfs merge=lfs -text
27
- *.zstandard filter=lfs diff=lfs merge=lfs -text
28
- *tfevents* filter=lfs diff=lfs merge=lfs -text
29
- # Audio files - uncompressed
30
- *.pcm filter=lfs diff=lfs merge=lfs -text
31
- *.sam filter=lfs diff=lfs merge=lfs -text
32
- *.raw filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - compressed
34
- *.aac filter=lfs diff=lfs merge=lfs -text
35
- *.flac filter=lfs diff=lfs merge=lfs -text
36
- *.mp3 filter=lfs diff=lfs merge=lfs -text
37
- *.ogg filter=lfs diff=lfs merge=lfs -text
38
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
GENIA-Term-Corpus.py DELETED
@@ -1,192 +0,0 @@
1
- import random
2
- import re
3
- import xml.etree.ElementTree as ET
4
- from typing import Tuple, List, Set
5
- from tqdm import tqdm
6
-
7
- import csv
8
- import json
9
- import os
10
-
11
- import datasets
12
-
13
- # TODO: Add BibTeX citation
14
- # Find for instance the citation on arxiv or on the dataset repo/website
15
- _CITATION = """\
16
- @InProceedings{huggingface:dataset,
17
- title = {A great new dataset},
18
- author={huggingface, Inc.
19
- },
20
- year={2020}
21
- }
22
- """
23
-
24
- # TODO: Add description of the dataset here
25
- # You can copy an official description
26
- _DESCRIPTION = """\
27
- GENIA Term corpus
28
- """
29
-
30
- # TODO: Add a link to an official homepage for the dataset here
31
- _HOMEPAGE = "http://www.geniaproject.org/genia-corpus/term-corpus"
32
-
33
- # TODO: Add the licence for the dataset here if you can find it
34
- _LICENSE = ""
35
-
36
- # TODO: Add link to the official dataset URLs here
37
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
39
- _URLS = "http://www.nactem.ac.uk/GENIA/current/GENIA-corpus/Term/GENIAcorpus3.02.tgz"
40
-
41
- def _split_files(data_dir):
42
- root = ET.parse(os.path.join(data_dir, "GENIA_term_3.02", "GENIAcorpus3.02.xml")).getroot()
43
- articles = root.findall(".//article")
44
-
45
- train_root = ET.Element("set")
46
- dev_root = ET.Element("set")
47
- test_root = ET.Element("set")
48
-
49
- for a in articles:
50
- root.remove(a)
51
-
52
- random.shuffle(articles)
53
-
54
- for a in articles[:1600]:
55
- train_root.append(a)
56
-
57
- for a in articles[1600:1800]:
58
- dev_root.append(a)
59
-
60
- for a in articles[1800:]:
61
- test_root.append(a)
62
-
63
- ET.ElementTree(train_root).write(os.path.join(data_dir, "train.xml"))
64
- ET.ElementTree(dev_root).write(os.path.join(data_dir, "dev.xml"))
65
- ET.ElementTree(test_root).write(os.path.join(data_dir, "test.xml"))
66
-
67
- class GENIATermCorpus(datasets.GeneratorBasedBuilder):
68
-
69
- VERSION = datasets.Version("0.9.0")
70
-
71
- pattern = re.compile(r"[,\.;:\[\]\(\)]")
72
-
73
- def _info(self):
74
-
75
- features = datasets.Features(
76
- {
77
- "tokens": datasets.Sequence(datasets.Value("string")),
78
- "folded_tokens": datasets.Sequence(datasets.Value("string")),
79
- "labels": datasets.Sequence(datasets.Value("string"))
80
- # datasets.features.ClassLabel(
81
- # names=["O", ]
82
- # )
83
- # )
84
- }
85
- )
86
-
87
- return datasets.DatasetInfo(
88
- # This is the description that will appear on the datasets page.
89
- description=_DESCRIPTION,
90
- # This defines the different columns of the dataset and their types
91
- features=features, # Here we define them above because they are different between the two configurations
92
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
93
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
94
- # supervised_keys=("sentence", "label"),
95
- # Homepage of the dataset for documentation
96
- homepage=_HOMEPAGE,
97
- # License for the dataset if available
98
- license=_LICENSE,
99
- # Citation for the dataset
100
- citation=_CITATION,
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
-
105
- data_dir = dl_manager.download_and_extract(_URLS)
106
- # Split the dataset files in train/dev/test
107
- _split_files(data_dir)
108
-
109
- return [
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TRAIN,
112
- # These kwargs will be passed to _generate_examples
113
- gen_kwargs={
114
- "filepath": os.path.join(data_dir, "train.xml"),
115
- "split": "train",
116
- },
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.TEST,
120
- # These kwargs will be passed to _generate_examples
121
- gen_kwargs={
122
- "filepath": os.path.join(data_dir, "test.xml"),
123
- "split": "test"
124
- },
125
- ),
126
- datasets.SplitGenerator(
127
- name=datasets.Split.VALIDATION,
128
- # These kwargs will be passed to _generate_examples
129
- gen_kwargs={
130
- "filepath": os.path.join(data_dir, "dev.xml"),
131
- "split": "dev",
132
- },
133
- ),
134
- ]
135
-
136
-
137
- def _generate_examples(self, filepath:str, split):
138
- root = ET.parse(filepath)
139
- articles = root.findall(".//article")
140
- for idx, article in enumerate(articles):
141
- article_id, data= self.parse_article(article)
142
- for sen_ix, (tokens, entities) in enumerate(data):
143
- yield f"{split}_{idx}_{sen_ix}", {
144
- "tokens": tokens,
145
- "folded_tokens": [t.lower() for t in tokens],
146
- "labels": entities
147
- }
148
-
149
- def parse_article(self, article:ET):
150
- # Get the id of the article
151
- article_id = article.find("./articleinfo/bibliomisc").text
152
- # Select all sentences in the article object
153
- sentences = article.findall(".//sentence")
154
- data = list()
155
- for sentence in sentences:
156
- data.append(self. build_bio_tags(*self.flatten_tree(sentence)))
157
-
158
- return article_id, data
159
-
160
- def build_bio_tags(self, text_segments:List[str], entities:List[str]) -> Tuple[List[str], List[str]]:
161
-
162
- # Hacky tokenizer
163
- tokens, tags = list(), list()
164
- for seg, entity in zip(text_segments, entities):
165
- # Insert whitespaces
166
- seg = self.pattern.sub(r" \g<0> ", seg).strip() # Remove trailing whitespaces
167
- t = seg.split()
168
- tokens.extend(t)
169
- tags.extend( [f"B-{entity}"] + [f"I-{entity}"] * (len(t) - 1) if entity != "O" else ["O"] * len(t))
170
- return tokens, tags
171
-
172
-
173
- def flatten_tree(self, elem:ET) -> Tuple[List[str], List[str]]:
174
- # Just keep the simple (not the nested) annotations
175
- text_segments, entities = list(), list()
176
- if elem.text:
177
- text_segments.append(elem.text)
178
- if elem.tag == "cons" and "sem" in elem.attrib:
179
- tag = elem.attrib['sem'].replace("G#", "")
180
- else:
181
- tag = "O"
182
- entities.append(tag)
183
- for child in elem:
184
- c_segments, c_entities = self.flatten_tree(child)
185
- text_segments.extend(c_segments)
186
- entities.extend(c_entities)
187
- if elem.tail and elem.tail != '\n':
188
- text_segments.append(elem.tail)
189
- entities.append("O")
190
-
191
-
192
- return text_segments, entities
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
default/genia-term-corpus-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36d6bf588daa351c896acb9873cc78e214e23348c5c5b6c6f0398013ce0db00b
3
+ size 301119
default/genia-term-corpus-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dc2b932f6f482b6958af6fe5d4cba276db7ae3606c5053139040bb9ec191362
3
+ size 2372566
default/genia-term-corpus-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0b3a375142b2053584b041b34c596487de6003cdf7a8f3218e829564ae5c646
3
+ size 310779