Datasets:
Add 'stsb' config data files
Browse files- README.md +30 -0
- dataset_infos.json +16 -28
- stsb/test-00000-of-00001.parquet +3 -0
- stsb/train-00000-of-00001.parquet +3 -0
- stsb/validation-00000-of-00001.parquet +3 -0
README.md
CHANGED
|
@@ -124,6 +124,28 @@ dataset_info:
|
|
| 124 |
num_examples: 1821
|
| 125 |
download_size: 3305163
|
| 126 |
dataset_size: 5004495
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 127 |
configs:
|
| 128 |
- config_name: cola
|
| 129 |
data_files:
|
|
@@ -157,6 +179,14 @@ configs:
|
|
| 157 |
path: sst2/validation-*
|
| 158 |
- split: test
|
| 159 |
path: sst2/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
train-eval-index:
|
| 161 |
- config: cola
|
| 162 |
task: text-classification
|
|
|
|
| 124 |
num_examples: 1821
|
| 125 |
download_size: 3305163
|
| 126 |
dataset_size: 5004495
|
| 127 |
+
- config_name: stsb
|
| 128 |
+
features:
|
| 129 |
+
- name: sentence1
|
| 130 |
+
dtype: string
|
| 131 |
+
- name: sentence2
|
| 132 |
+
dtype: string
|
| 133 |
+
- name: label
|
| 134 |
+
dtype: float32
|
| 135 |
+
- name: idx
|
| 136 |
+
dtype: int32
|
| 137 |
+
splits:
|
| 138 |
+
- name: train
|
| 139 |
+
num_bytes: 754791
|
| 140 |
+
num_examples: 5749
|
| 141 |
+
- name: validation
|
| 142 |
+
num_bytes: 216064
|
| 143 |
+
num_examples: 1500
|
| 144 |
+
- name: test
|
| 145 |
+
num_bytes: 169974
|
| 146 |
+
num_examples: 1379
|
| 147 |
+
download_size: 761235
|
| 148 |
+
dataset_size: 1140829
|
| 149 |
configs:
|
| 150 |
- config_name: cola
|
| 151 |
data_files:
|
|
|
|
| 179 |
path: sst2/validation-*
|
| 180 |
- split: test
|
| 181 |
path: sst2/test-*
|
| 182 |
+
- config_name: stsb
|
| 183 |
+
data_files:
|
| 184 |
+
- split: train
|
| 185 |
+
path: stsb/train-*
|
| 186 |
+
- split: validation
|
| 187 |
+
path: stsb/validation-*
|
| 188 |
+
- split: test
|
| 189 |
+
path: stsb/test-*
|
| 190 |
train-eval-index:
|
| 191 |
- config: cola
|
| 192 |
task: text-classification
|
dataset_infos.json
CHANGED
|
@@ -233,34 +233,29 @@
|
|
| 233 |
},
|
| 234 |
"stsb": {
|
| 235 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
| 236 |
-
"citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n
|
| 237 |
"homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
|
| 238 |
"license": "",
|
| 239 |
"features": {
|
| 240 |
"sentence1": {
|
| 241 |
"dtype": "string",
|
| 242 |
-
"id": null,
|
| 243 |
"_type": "Value"
|
| 244 |
},
|
| 245 |
"sentence2": {
|
| 246 |
"dtype": "string",
|
| 247 |
-
"id": null,
|
| 248 |
"_type": "Value"
|
| 249 |
},
|
| 250 |
"label": {
|
| 251 |
"dtype": "float32",
|
| 252 |
-
"id": null,
|
| 253 |
"_type": "Value"
|
| 254 |
},
|
| 255 |
"idx": {
|
| 256 |
"dtype": "int32",
|
| 257 |
-
"id": null,
|
| 258 |
"_type": "Value"
|
| 259 |
}
|
| 260 |
},
|
| 261 |
-
"
|
| 262 |
-
"
|
| 263 |
-
"builder_name": "glue",
|
| 264 |
"config_name": "stsb",
|
| 265 |
"version": {
|
| 266 |
"version_str": "1.0.0",
|
|
@@ -270,35 +265,28 @@
|
|
| 270 |
"patch": 0
|
| 271 |
},
|
| 272 |
"splits": {
|
| 273 |
-
"test": {
|
| 274 |
-
"name": "test",
|
| 275 |
-
"num_bytes": 170847,
|
| 276 |
-
"num_examples": 1379,
|
| 277 |
-
"dataset_name": "glue"
|
| 278 |
-
},
|
| 279 |
"train": {
|
| 280 |
"name": "train",
|
| 281 |
-
"num_bytes":
|
| 282 |
"num_examples": 5749,
|
| 283 |
-
"dataset_name":
|
| 284 |
},
|
| 285 |
"validation": {
|
| 286 |
"name": "validation",
|
| 287 |
-
"num_bytes":
|
| 288 |
"num_examples": 1500,
|
| 289 |
-
"dataset_name":
|
| 290 |
-
}
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
"
|
| 295 |
-
"
|
| 296 |
}
|
| 297 |
},
|
| 298 |
-
"download_size":
|
| 299 |
-
"
|
| 300 |
-
"
|
| 301 |
-
"size_in_bytes": 1949125
|
| 302 |
},
|
| 303 |
"mnli": {
|
| 304 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
|
|
|
| 233 |
},
|
| 234 |
"stsb": {
|
| 235 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
| 236 |
+
"citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
| 237 |
"homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
|
| 238 |
"license": "",
|
| 239 |
"features": {
|
| 240 |
"sentence1": {
|
| 241 |
"dtype": "string",
|
|
|
|
| 242 |
"_type": "Value"
|
| 243 |
},
|
| 244 |
"sentence2": {
|
| 245 |
"dtype": "string",
|
|
|
|
| 246 |
"_type": "Value"
|
| 247 |
},
|
| 248 |
"label": {
|
| 249 |
"dtype": "float32",
|
|
|
|
| 250 |
"_type": "Value"
|
| 251 |
},
|
| 252 |
"idx": {
|
| 253 |
"dtype": "int32",
|
|
|
|
| 254 |
"_type": "Value"
|
| 255 |
}
|
| 256 |
},
|
| 257 |
+
"builder_name": "glue-ci",
|
| 258 |
+
"dataset_name": "glue-ci",
|
|
|
|
| 259 |
"config_name": "stsb",
|
| 260 |
"version": {
|
| 261 |
"version_str": "1.0.0",
|
|
|
|
| 265 |
"patch": 0
|
| 266 |
},
|
| 267 |
"splits": {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 268 |
"train": {
|
| 269 |
"name": "train",
|
| 270 |
+
"num_bytes": 754791,
|
| 271 |
"num_examples": 5749,
|
| 272 |
+
"dataset_name": null
|
| 273 |
},
|
| 274 |
"validation": {
|
| 275 |
"name": "validation",
|
| 276 |
+
"num_bytes": 216064,
|
| 277 |
"num_examples": 1500,
|
| 278 |
+
"dataset_name": null
|
| 279 |
+
},
|
| 280 |
+
"test": {
|
| 281 |
+
"name": "test",
|
| 282 |
+
"num_bytes": 169974,
|
| 283 |
+
"num_examples": 1379,
|
| 284 |
+
"dataset_name": null
|
| 285 |
}
|
| 286 |
},
|
| 287 |
+
"download_size": 761235,
|
| 288 |
+
"dataset_size": 1140829,
|
| 289 |
+
"size_in_bytes": 1902064
|
|
|
|
| 290 |
},
|
| 291 |
"mnli": {
|
| 292 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
stsb/test-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0c58c00eebf43f70cd4668fadadf5e541415bd9fb95809fc958690d8eeea07c
|
| 3 |
+
size 113005
|
stsb/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e63fac541b6227ea1e7a3335f49c1772dcc8d58a953cb3a8f1c4e6c0daf2db2
|
| 3 |
+
size 498786
|
stsb/validation-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9495d89f7272d26d28e2d446c962258a499cf38a26df0925b4823cf992f7808a
|
| 3 |
+
size 149444
|