File size: 2,599 Bytes
5a390e5 936c810 5a390e5 9f1af08 5a390e5 9f1af08 57ef098 7419b8b 36aa355 27fad23 b739602 4253ea3 b739602 4253ea3 b739602 4253ea3 b739602 4253ea3 b739602 4253ea3 5a390e5 57ef098 467a5a2 36aa355 467a5a2 27fad23 b739602 5a390e5 9280e27 6c3a60a 0aa7e8b 6c3a60a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
---
dataset_info:
- config_name: default
features:
- name: id
dtype: int64
- name: query
dtype: string
- name: document_text
dtype: string
- name: acid
dtype: string
splits:
- name: train
num_bytes: 3902798162
num_examples: 100000
- name: validation
num_bytes: 77013209
num_examples: 1968
- name: test
num_bytes: 288050960
num_examples: 7830
download_size: 2465211077
dataset_size: 4267862331
- config_name: nq-100-tokenized
features:
- name: input_ids
sequence: int32
- name: attention_mask
sequence: int8
- name: labels
sequence: int64
splits:
- name: train
num_bytes: 1315687778
num_examples: 1556605
- name: validation
num_bytes: 1191406
num_examples: 1968
- name: test
num_bytes: 4743398
num_examples: 7830
download_size: 121309967
dataset_size: 1321622582
- config_name: nq-100k-raw
features:
- name: id
dtype: int64
- name: BM25_keywords
sequence: string
- name: query
dtype: string
- name: document_text
dtype: string
splits:
- name: train
num_bytes: 3918423056
num_examples: 100000
- name: validation
num_bytes: 77322775
num_examples: 1968
- name: test
num_bytes: 289276447
num_examples: 7830
download_size: 2277058620
dataset_size: 4285022278
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
- config_name: nq-100-tokenized
data_files:
- split: train
path: nq-100-tokenized/train-*
- split: validation
path: nq-100-tokenized/validation-*
- split: test
path: nq-100-tokenized/test-*
- config_name: nq-100k-raw
data_files:
- split: train
path: nq-100k-raw/train-*
- split: validation
path: nq-100k-raw/validation-*
- split: test
path: nq-100k-raw/test-*
---
# Abstractive Content-Based Document IDs for Generative Retrieval
Dataset for [Summarization-Based Document IDs for Generative Retrieval with Language Models](https://arxiv.org/abs/2311.08593).
## Update
**[03/04/2025]** Upload validation and test set of ACID. Add tokenized subset.
```
@misc{li2024summarizationbaseddocumentidsgenerative,
title={Summarization-Based Document IDs for Generative Retrieval with Language Models},
author={Haoxin Li and Daniel Cheng and Phillip Keung and Jungo Kasai and Noah A. Smith},
year={2024},
eprint={2311.08593},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2311.08593},
}
``` |