parquet-converter commited on
Commit
a1bbab7
·
1 Parent(s): cbc67fd

Update parquet files

Browse files
.gitignore DELETED
@@ -1 +0,0 @@
1
- .venv/
 
 
2018thresh20dev.csv → 2018thresh10corpus/wiki-entity-similarity-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e80d37c92672e2b2041d47035c737db5c7dc0a9c5938ede28c46cdc25321be5a
3
- size 37218821
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e128de93fb002d2888e4f351c897ef997729ff2a8ac51c9dae19008a37390ef
3
+ size 60887054
2018thresh20test.csv → 2018thresh10pairs/wiki-entity-similarity-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7204244b6dc46350616835b741f925b98f50f6e9b4d5585d7815f226d5afde95
3
- size 24867451
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cf19eab5b15d9a1c63f7b25d9eecebce1d8402741f71a94bd55f55b052413d1
3
+ size 31682534
2018thresh10train.csv → 2018thresh10pairs/wiki-entity-similarity-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a4c19706eb30b58641b06fa0e5efe53cf545f6bf49e3b5a30e62d96243517c8
3
- size 255547715
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:825b641521a426759efb4269083864daed69516640f489ec269bf88bee61035b
3
+ size 239758776
2018thresh10dev.csv → 2018thresh10pairs/wiki-entity-similarity-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6690985884c8b4a58d0377ef2152bb1fbb233355df68d662539da856e697a414
3
- size 51406267
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb26a1f4d19dfcba19997a8144370e4b63f62ffa2a6f7b9b1e5a47ef89935f34
3
+ size 47799775
2018thresh20corpus.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6baf503753198445a388b650edb59f7faef2ecc95c52b0f39edac5d4d40da5cc
3
- size 136658267
 
 
 
 
2018thresh10test.csv → 2018thresh20corpus/wiki-entity-similarity-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87ba47d64b3223a9ceda6c4c414312c4c8fa412c023d63ba8495909206f908fd
3
- size 34147126
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20f72f54378317b06476a14d34ac069ecdf8e473fa2afe67efa98cf8aa1920d3
3
+ size 42226880
2018thresh20pairs/wiki-entity-similarity-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc36fa2d5dd6fe009e052e810c1547368e5e44816fb7b737b17bd88abb531693
3
+ size 22626152
2018thresh10corpus.csv → 2018thresh20pairs/wiki-entity-similarity-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da0986792d65bb2639a77c29ca6e785ffd971edbf49943c4987be68a29bd2330
3
- size 188776976
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90b82f23c5b1d694f9f8e9489d23dd3a2bdc409e57bbfca62c12450e5eb16a2
3
+ size 171905597
2018thresh20pairs/wiki-entity-similarity-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13a726e38e6ac48be2055203535354450a942672aa2a1318508447066124e44e
3
+ size 34176777
2018thresh20train.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fcda4ee523a653d09f34c72e2134d56466d145c56a95596468ca7a1f4f9bf9c
3
- size 185091962
 
 
 
 
2018thresh5corpus.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8421a796ff5cf834c0b5ad50afe500ec28f86be825e901ae12a7dcffd580bed9
3
- size 246919376
 
 
 
 
2018thresh5corpus/wiki-entity-similarity-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7695b0a9f2c35e129c8d7989fc6899217b29bb2549feb37bbbef847ca8df2fc8
3
+ size 84556237
2018thresh5dev.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:abf0f45844c3aa91747be22502d9d91570365aa0d085a1faaec17a04a84028c5
3
- size 67296490
 
 
 
 
2018thresh5pairs/wiki-entity-similarity-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff9257327cccecced6039b688dbac68649d960f2a37fcb5065e2697f2c0cd63a
3
+ size 42103893
2018thresh5pairs/wiki-entity-similarity-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb6a2bc4d3c9e225b83e257b09689c0adfa1232e80e3a91aca890def9b27408c
3
+ size 316754105
2018thresh5pairs/wiki-entity-similarity-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eaa224b7e7440ff6d2e92b9e74091a6b81a3cf56fbbb3bdf888b9f69dbbb8d8
3
+ size 63317528
2018thresh5test.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d9c39063394851308a52c71de4cae38386bfe7e16f73a355221dafe46a74b0f
3
- size 44759191
 
 
 
 
2018thresh5train.csv DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7b71f64c53d1aea33ec8bd6067b99c766625897904fd7cccdd82d60eac17b5c
3
- size 334315073
 
 
 
 
README.md DELETED
@@ -1,67 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - found
4
- language:
5
- - en
6
- language_creators:
7
- - found
8
- license:
9
- - mit
10
- multilinguality:
11
- - monolingual
12
- pretty_name: 'Wiki Entity Similarity
13
-
14
- '
15
- size_categories:
16
- - 10M<n<100M
17
- source_datasets:
18
- - original
19
- tags:
20
- - named entities
21
- - similarity
22
- - paraphrasing
23
- - synonyms
24
- - wikipedia
25
- task_categories: []
26
- task_ids: []
27
- ---
28
-
29
- # Wiki Entity Similarity
30
-
31
- Usage:
32
- ```py
33
- from datasets import load_dataset
34
-
35
- corpus = load_dataset('Exr0n/wiki-entity-similarity', '2018thresh20corpus', split='train')
36
- assert corpus[0] == {'article': 'A1000 road', 'link_text': 'A1000', 'is_same': 1}
37
-
38
- pairs = load_dataset('Exr0n/wiki-entity-similarity', '2018thresh20pairs', split='train')
39
- assert corpus[0] == {'article': 'Rhinobatos', 'link_text': 'Ehinobatos beurleni', 'is_same': 1}
40
- assert len(corpus) == 4_793_180
41
- ```
42
-
43
- ## Corpus (`name=*corpus`)
44
-
45
- The corpora in this are generated by aggregating the link text that refers to various articles in context. For instance, if wiki article A refers to article B as C, then C is added to the list of aliases for article B, and the pair (B, C) is included in the dataset.
46
-
47
- Following (DPR https://arxiv.org/pdf/2004.04906.pdf), we use the English Wikipedia dump from Dec. 20, 2018 as the source documents for link collection.
48
-
49
- The dataset includes three quality levels, distinguished by the minimum number of inbound links required to include an article in the dataset. This filtering is motivated by the heuristic "better articles have more citations."
50
-
51
- | Min. Inbound Links | Number of Articles | Number of Distinct Links |
52
- |------------|--------------------|--------------------------|
53
- | 5 | 1,080,073 | 5,787,081 |
54
- | 10 | 605,775 | 4,407,409 |
55
- | 20 | 324,949 | 3,195,545 |
56
-
57
- ## Training Pairs (`name=*pairs`)
58
- This dataset also includes training pair datasets (with both positive and negative examples) intended for training classifiers. The train/dev/test split is 75/15/10 % of each corpus.
59
-
60
- ### Training Data Generation
61
- The training pairs in this dataset are generated by taking each example from the corpus as a positive example, and creating a new negative example from the article title of the positive example and a random link text from a different article.
62
-
63
- The articles featured in each split are disjoint from the other splits, and each split has the same number of positive (semantically the same) and negative (semantically different) examples.
64
-
65
- For more details on the dataset motivation, see [the paper](https://arxiv.org/abs/2202.13581). If you use this dataset in your work, please cite it using the ArXiv reference.
66
-
67
- Generation scripts can be found [in the GitHub repo](https://github.com/Exr0nProjects/wiki-entity-similarity).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generate_wes_data.py DELETED
@@ -1,86 +0,0 @@
1
- from datasets import load_dataset
2
- import pandas as pd
3
- from nltk.corpus import words
4
- from nltk import WordNetLemmatizer
5
- import numpy as np
6
- from tqdm import tqdm
7
-
8
- from collections import defaultdict
9
- from operator import itemgetter as ig
10
- from itertools import islice, chain, repeat
11
- from random import seed, sample, choice, shuffle
12
- from gc import collect
13
-
14
- filter_dict = set(words.words())
15
- ltize = WordNetLemmatizer().lemmatize
16
-
17
- def generate_splits(subset, split=[0.75, 0.15, 0.1]):
18
- assert abs(sum(split) - 1.0) < 0.0001
19
- # get the data in dictionary form
20
- groups = defaultdict(list)
21
- ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
22
- ds = list(tqdm(ds, total=len(ds)))
23
- for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
24
- if (ltize(article.lower()) not in filter_dict) and (ltize(link.lower()) in filter_dict):
25
- # print(article, link, 'not quite right!')
26
- continue # remove if link text is a dictionary word but article is not
27
- groups[article].append(link)
28
- del ds
29
-
30
- # greedily allocate splits
31
- order = sorted(groups.keys(), reverse=True, key=lambda e: groups[e])
32
- splits = [[] for _ in split]
33
- sizes = [0.001] * len(split) # avoid div zero error
34
- for group in order:
35
- impoverished = np.argmax([ s - (x/sum(sizes)) for x, s in zip(sizes, split) ])
36
- splits[impoverished].append(group)
37
- sizes[impoverished] += len(groups[group])
38
-
39
- sizes = [ int(x) for x in sizes ]
40
- print('final sizes', sizes, [x/sum(sizes) for x in sizes])
41
-
42
- # generate positive examples
43
- ret = [ [[(k, t) for t in groups[k]] for k in keys] for keys in splits ]
44
-
45
- # generate negative examples randomly (TODO: probably a more elegant swapping soln)
46
- for i, keys in enumerate(splits):
47
- for key in keys:
48
- try:
49
- got = sample(keys, len(groups[key])+1) # sample n+1 keys
50
- ret[i].append(
51
- [(key, choice(groups[k])) for k in got if k != key] # get a random link title from that key, if it's not the current key
52
- [:len(groups[key])] # ensure we don't have too many
53
- )
54
- except ValueError:
55
- raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")
56
-
57
- collect()
58
- return [(chain(*s), chain(repeat(1, z), repeat(0, z))) for z, s in zip(sizes, ret)]
59
-
60
-
61
- if __name__ == '__main__':
62
- seed(0x326ccc)
63
- year = 2018
64
- for size in [5, 10, 20]:
65
- x = generate_splits(subset=f'{year}thresh' + str(size) + 'corpus')
66
-
67
- for (data, labels), split in zip(x, ['train', 'dev', 'test']):
68
- articles, lts = list(zip(*data))
69
- df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
70
- df = df.sample(frac=1).reset_index(drop=True)
71
- df.to_csv(f'{year}thresh' + str(size) + split + '.csv', index=False)
72
- # print(df.head(30), df.tail(30))
73
-
74
- # tests
75
- # for data, labels in x[2:]:
76
- # data = list(data)
77
- # labels = list(labels)
78
- #
79
- # assert sum(labels) * 2 == len(labels)
80
- # num = sum(labels)
81
- #
82
- # before = [ a for a, _ in data[:num] ]
83
- # after = [ a for a, _ in data[num:] ]
84
- # assert before == after
85
- #
86
- # print(data[num:])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,268 +0,0 @@
1
- acme==1.21.0
2
- aiohttp==3.8.1
3
- aiosignal==1.2.0
4
- alabaster==0.7.12
5
- apparmor==3.0.3
6
- appdirs==1.4.4
7
- arandr==0.1.10
8
- argcomplete==1.12.3
9
- async-timeout==4.0.2
10
- attrs==21.2.0
11
- Babel==2.9.1
12
- beautifulsoup4==4.10.0
13
- bleach==3.3.0
14
- blessed==1.17.6
15
- blis==0.7.4
16
- Brlapi==0.8.3
17
- btrfsutil==5.15.1
18
- CacheControl==0.12.6
19
- catalogue==2.0.4
20
- certbot==1.21.0
21
- certbot-nginx==1.19.0
22
- certifi==2020.12.5
23
- cffi==1.15.0
24
- chardet==4.0.0
25
- charset-normalizer==2.0.9
26
- click==7.1.2
27
- click-completion==0.5.2
28
- cloudpickle==1.6.0
29
- colorama==0.4.4
30
- coloredlogs==15.0.1
31
- ConfigArgParse==1.5
32
- configobj==5.1.0.dev0
33
- contextlib2==0.6.0.post1
34
- coverage==5.5
35
- crcmod==1.7
36
- cryptography==35.0.0
37
- cycler==0.10.0
38
- cymem==2.0.5
39
- datasets==1.17.0
40
- defusedxml==0.7.1
41
- dill==0.3.4
42
- distlib==0.3.3
43
- distro==1.6.0
44
- dnspython==2.1.0
45
- docopt==0.6.2
46
- docutils==0.17.1
47
- dotty-dict==1.3.0
48
- emoji==1.6.1
49
- en-core-web-lg @ https://github.com/explosion/spacy-models/releases/download/en_core_web_lg-3.1.0/en_core_web_lg-3.1.0-py3-none-any.whl
50
- file-magic==0.4.0
51
- filelock==3.4.2
52
- flake8==3.9.2
53
- frozenlist==1.2.0
54
- fsspec==2021.11.1
55
- future==0.18.2
56
- gh-md-to-html==1.21.1
57
- Glances==3.2.1
58
- greenlet==1.1.0
59
- gym==0.18.3
60
- halo==0.0.31
61
- hid==1.0.4
62
- hjson==3.0.2
63
- html5lib==1.1
64
- httplib2==0.19.1
65
- huggingface-hub==0.2.1
66
- humanfriendly==10.0
67
- idna==2.10
68
- imagesize==1.3.0
69
- img2pdf==0.4.1
70
- importlib-metadata==4.0.1
71
- importlib-resources==5.2.2
72
- inquirer==2.7.0
73
- isc==2.0
74
- iterfzf==0.5.0.20.0
75
- jedi==0.17.2
76
- jeepney==0.6.0
77
- Jinja2==3.0.1
78
- joblib==1.0.1
79
- josepy==1.10.0
80
- jsonschema==3.2.0
81
- kaggle==1.5.12
82
- keyring==23.0.1
83
- kiwisolver==1.3.1
84
- lazr.config==2.2.3
85
- lazr.delegates==2.0.4
86
- lazr.restfulclient==0.14.2
87
- lazr.uri==1.0.5
88
- LibAppArmor==3.0.3
89
- libarchive-c==3.2
90
- libfdt==1.6.1
91
- libvirt-python==7.8.0
92
- llvmlite==0.36.0
93
- log-symbols==0.0.14
94
- loguru==0.5.3
95
- louis==3.20.0
96
- lxml==4.6.3
97
- Markdown==3.3.6
98
- MarkupSafe==2.0.1
99
- matplotlib==3.4.2
100
- mccabe==0.6.1
101
- meson==0.60.2
102
- milc==1.4.2
103
- mock==3.0.5
104
- more-itertools==8.10.0
105
- MouseInfo==0.1.3
106
- msgpack==1.0.2
107
- multidict==5.2.0
108
- multiprocess==0.70.12.2
109
- murmurhash==1.0.5
110
- mypy-extensions==0.4.3
111
- neovim==0.3.1
112
- nftables==0.1
113
- nltk==3.6.2
114
- nose==1.3.7
115
- nose2==0.10.0
116
- notify-py==0.3.1
117
- npyscreen==4.10.5
118
- numba==0.53.1
119
- numpy==1.20.3
120
- oauthlib==3.1.1
121
- ocrmypdf==12.6.0
122
- openpyscad==0.4.0
123
- ordered-set==4.0.2
124
- packaging==20.9
125
- pacman-mirrors==4.23.1
126
- pandas==1.3.4
127
- parsedatetime==2.6
128
- parso==0.7.1
129
- pathy==0.6.0
130
- pbr==5.5.1
131
- pdfminer.six==20201018
132
- pendulum==2.1.2
133
- pep517==0.12.0
134
- petname==2.0
135
- pikepdf==3.1.1
136
- Pillow==8.2.0
137
- Pit2ya==0.4.2
138
- pkginfo==1.7.0
139
- pluggy==0.13.1
140
- ply==3.11
141
- preshed==3.0.5
142
- progress==1.6
143
- progressbar2==3.55.0
144
- protobuf==3.17.3
145
- PTable==0.9.2
146
- pyarrow==6.0.1
147
- PyAutoGUI==0.9.52
148
- pybind11==2.8.1
149
- pycairo==1.20.1
150
- pycodestyle==2.7.0
151
- pycparser==2.21
152
- pydantic==1.8.2
153
- pyelftools==0.27
154
- pyenchant==3.2.1
155
- pyflakes==2.3.1
156
- PyGetWindow==0.0.9
157
- pyglet==1.5.15
158
- Pygments==2.9.0
159
- PyGObject==3.42.0
160
- pylxd==2.3.1
161
- pymacaroons==0.13.0
162
- PyMsgBox==1.0.9
163
- PyMuPDF==1.18.14
164
- PyNaCl==1.4.0
165
- pynvim==0.4.3
166
- pyOpenSSL==21.0.0
167
- pyparsing==2.4.7
168
- PyPDF2==1.26.0
169
- pyperclip==1.8.2
170
- PyRect==0.1.4
171
- pyRFC3339==1.1
172
- pyrsistent==0.18.0
173
- PyScreeze==0.1.27
174
- pysha3==1.0.2
175
- python-apt==0.0.0
176
- python-dateutil==2.8.1
177
- python-debian==0.1.42+git20211018
178
- python-distutils-extra==2.39
179
- python-editor==1.0.4
180
- python-jsonrpc-server==0.4.0
181
- python-language-server==0.36.2
182
- python-slugify==5.0.2
183
- python-utils==2.5.6
184
- python-xlib==0.31
185
- python3-xlib==0.15
186
- PyTweening==1.0.3
187
- pytz==2021.3
188
- pytzdata==2020.1
189
- pyusb==1.1.1
190
- PyYAML==6.0
191
- ranger-fm==1.9.3
192
- raven==6.10.0
193
- readchar==2.0.1
194
- readme-renderer==29.0
195
- regex==2021.4.4
196
- reportlab==3.6.1
197
- requests==2.26.0
198
- requests-oauthlib==1.3.0
199
- requests-toolbelt==0.9.1
200
- requests-unixsocket==0.2.0
201
- resolvelib==0.5.5
202
- retrying==1.3.3
203
- rfc3986==1.5.0
204
- scipy==1.7.0
205
- SecretStorage==3.3.1
206
- setuptools-scm==6.0.1
207
- setuptools-scm-git-archive==1.1
208
- shellescape==3.8.1
209
- shellingham==1.4.0
210
- simplejson==3.17.6
211
- six==1.16.0
212
- smart-open==5.1.0
213
- snowballstemmer==2.2.0
214
- sortedcontainers==2.4.0
215
- soupsieve==2.2.1
216
- spacy==3.1.0
217
- spacy-legacy==3.0.8
218
- speedtest-cli==2.1.3
219
- Sphinx==4.3.1
220
- sphinxcontrib-applehelp==1.0.2
221
- sphinxcontrib-devhelp==1.0.2
222
- sphinxcontrib-htmlhelp==2.0.0
223
- sphinxcontrib-jsmath==1.0.1
224
- sphinxcontrib-qthelp==1.0.3
225
- sphinxcontrib-serializinghtml==1.1.5
226
- spinners==0.0.24
227
- srsly==2.4.1
228
- ssoclient==2.1.1
229
- tabulate==0.8.9
230
- TBB==0.2
231
- team==1.0
232
- termcolor==1.1.0
233
- text-unidecode==1.3
234
- thinc==8.0.7
235
- togglCli==2.4.2
236
- toml==0.10.2
237
- tomli==1.2.2
238
- torch==1.9.1+cu111
239
- torchaudio==0.9.1
240
- torchvision==0.10.1+cu111
241
- tqdm==4.62.3
242
- twine==3.4.1
243
- typer==0.3.2
244
- typing-extensions==3.10.0.2
245
- ueberzug==18.1.9
246
- ufw==0.36
247
- ujson==4.0.2
248
- urllib3==1.26.7
249
- uWSGI==2.0.19.1
250
- validate==5.1.0.dev0
251
- validate-email==1.3
252
- wadllib==1.3.4
253
- wasabi==0.8.2
254
- wcwidth==0.2.5
255
- webcolors==1.11.1
256
- webencodings==0.5.1
257
- ws4py==0.5.1
258
- xxhash==2.0.2
259
- yapf==0.31.0
260
- yarl==1.7.2
261
- zipp==3.4.1
262
- zope.component==5.0.1
263
- zope.deferredimport==4.3.1
264
- zope.deprecation==4.4.0
265
- zope.event==4.5.0
266
- zope.hookable==5.1.0
267
- zope.interface==5.4.0
268
- zope.proxy==4.5.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wiki-entity-similarity.py DELETED
@@ -1,115 +0,0 @@
1
- import datasets
2
-
3
- from dataclasses import dataclass
4
- import csv
5
-
6
- _DESCRIPTION = '''WES: Learning Semantic Similarity from 6M Names for 1M Entities'''
7
- _CITE = '''\
8
- @inproceedings{exr0n2022WES
9
- author={Exr0n},
10
- title={WES: Learning Semantic Similarity from 6M Names for 1M Entities},
11
- year={2022}
12
- }
13
- '''
14
-
15
- _HUGGINGFACE_REPO = "https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/"
16
-
17
- @dataclass
18
- class WikiEntitySimilarityConfig(datasets.BuilderConfig):
19
- """BuilderConfig for CSV."""
20
- year: int = None
21
- type: str = None
22
- threshhold: int = None
23
- # path: str = None
24
-
25
- class WikiEntitySimilarity(datasets.GeneratorBasedBuilder):
26
- """WES: Learning semantic similarity from 6M names for 1M entities"""
27
- BUILDER_CONFIG_CLASS = WikiEntitySimilarityConfig
28
- BUILDER_CONFIGS = [
29
- WikiEntitySimilarityConfig(
30
- name='2018thresh5corpus',
31
- description='raw link corpus (all true): min 5 inbound links, lowest quality',
32
- year=2018,
33
- type='corpus',
34
- threshhold=5,
35
- # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_5.csv"
36
- ),
37
- WikiEntitySimilarityConfig(
38
- name='2018thresh10corpus',
39
- description='raw link corpus (all true): min 10 inbound links, medium quality',
40
- year=2018,
41
- type='corpus',
42
- threshhold=10,
43
- # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_10.csv"
44
- ),
45
- WikiEntitySimilarityConfig(
46
- name='2018thresh20corpus',
47
- description='raw link corpus (all true): min 20 inbound links, high quality',
48
- year=2018,
49
- type='corpus',
50
- threshhold=20,
51
- # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_20.csv"
52
- ),
53
- WikiEntitySimilarityConfig(
54
- name='2018thresh5pairs',
55
- description='training pairs based on min 5 inbound links, lowest quality',
56
- year=2018,
57
- type='pairs',
58
- threshhold=5,
59
- # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh5"
60
- ),
61
- WikiEntitySimilarityConfig(
62
- name='2018thresh10pairs',
63
- description='training pairs based on min 10 inbound links, medium quality',
64
- year=2018,
65
- type='pairs',
66
- threshhold=10,
67
- # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh10"
68
- ),
69
- WikiEntitySimilarityConfig(
70
- name='2018thresh20pairs',
71
- description='training pairs based on min 20 inbound links, high quality',
72
- year=2018,
73
- type='pairs',
74
- threshhold=20,
75
- # path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh20"
76
- ),
77
- ]
78
-
79
- def _info(self):
80
- return datasets.DatasetInfo(
81
- description=_DESCRIPTION,
82
- features=datasets.Features(
83
- {
84
- 'article': datasets.Value('string'),
85
- 'link_text': datasets.Value('string'),
86
- 'is_same': datasets.Value('uint8'),
87
- }
88
- ),
89
- citation=_CITE,
90
- homepage="https://github.com/Exr0nProjects/wiki-entity-similarity",
91
- )
92
-
93
- def _split_generators(self, dl_manager):
94
- path = _HUGGINGFACE_REPO + f"{self.config.year}thresh{self.config.threshhold}"
95
- if self.config.type == 'corpus':
96
- filepath = dl_manager.download(path + 'corpus.csv')
97
- return [ datasets.SplitGenerator(name=datasets.Split.TRAIN,
98
- gen_kwargs={ 'path': filepath }) ]
99
- elif self.config.type == 'pairs':
100
- ret = []
101
- for n, e in zip(['train', 'dev', 'test'],
102
- [datasets.Split.TRAIN,
103
- datasets.Split.VALIDATION,
104
- datasets.Split.TEST]):
105
- fp = dl_manager.download(path + n + '.csv')
106
- ret.append( datasets.SplitGenerator(name=e, gen_kwargs={ 'path': fp }) )
107
- return ret
108
- else:
109
- raise ValueError(f"invalid dataset type '{self.config.type}', expected 'corpus' for raw links or 'pairs' for trainable pairs with negative examples")
110
-
111
- def _generate_examples(self, path):
112
- with open(path, 'r') as rf:
113
- reader = csv.DictReader(rf)
114
- for i, row in enumerate(reader):
115
- yield i, row