File size: 19,322 Bytes
d45fdef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
import os
import json
import csv
import pickle
import gzip
import logging
import pdb

import numpy as np
import transformers
from src.indicies.index_utils import convert_pkl_to_jsonl, get_passage_pos_ids


############################## Training ##############################
def fast_load_jsonl_shard(args, shard_index, return_all_passages=True):
    """
    This function is designed to handle large datasets by only loading the specific portion of data (shard) that
    corresponds to the given shard index.

    Shards are determined by dividing the total size of all files in the directory evenly by `num_shards`.
    This function reads only the data portion of the `shard_index` shard, chunks the text from each line
    based on `chunk_sz`, and appends each chunk to a list with an incremental ID.
    """
    raw_data_path = args.raw_data_path
    raw_data_key = args.get("raw_data_key", "text")
    num_shards = args.num_shards
    chunk_sz = args.chunk_size
    min_chunk_sz = args.get("min_chunk_sz", 0)
    keep_last = args.get("keep_last_chunk", True)
    chunking_strategy = args.get("chunking_strategy", "fixed_size")
    keep_raw_metadata = args.get("keep_raw_metadata", True)
    use_passage_pos_id_map = args.get("use_passage_pos_id_map", False)

    if not return_all_passages:
        assert use_passage_pos_id_map, (
            f"You must set `use_passage_pos_id_map=True` to enable efficient passage loading!"
        )

    if use_passage_pos_id_map:
        passage_shard_save_path = os.path.join(
            args.passages_dir, f"raw_passages-{shard_index}-of-{num_shards}.jsonl"
        )
        pos_map_save_path = os.path.join(args.passages_dir, "passage_pos_id_map.pkl")

        if not return_all_passages:
            if os.path.exists(pos_map_save_path):
                with open(pos_map_save_path, "rb") as f:
                    passage_pos_ids = pickle.load(f)
                return passage_pos_ids
            else:
                # If all jsonl data has been built, construct passage_pos_ids and return it
                all_data_exist = True
                for _shard_index in range(num_shards):
                    passage_shard_save_path_to_check = os.path.join(
                        args.passages_dir,
                        f"raw_passages-{_shard_index}-of-{num_shards}.jsonl",
                    )
                    if not os.path.exists(passage_shard_save_path_to_check):
                        all_data_exist = False
                if all_data_exist:
                    passage_pos_ids = get_passage_pos_ids(
                        args.passages_dir, pos_map_save_path
                    )
                    return passage_pos_ids

        elif os.path.exists(passage_shard_save_path):
            passages = []
            with open(passage_shard_save_path, "r") as fin:
                for line in fin:
                    passages.append(json.loads(line))
            return passages

    else:
        passage_shard_save_path = os.path.join(
            args.passages_dir, f"raw_passages-{shard_index}-of-{num_shards}.pkl"
        )

        if os.path.exists(passage_shard_save_path):
            logging.info(f"Loading from {passage_shard_save_path}...")
            with open(passage_shard_save_path, "rb") as file:
                passages = pickle.load(file)
            return passages

    if not os.path.exists(raw_data_path):
        logging.info(f"{raw_data_path} does not exist")
        return

    if os.path.isdir(raw_data_path):
        all_file_paths = [
            os.path.join(raw_data_path, file) for file in os.listdir(raw_data_path)
        ]
    else:
        all_file_paths = [raw_data_path]

    file_sizes = []
    for file in all_file_paths:
        if os.path.isdir(raw_data_path):
            file_path = os.path.join(raw_data_path, file)
        else:
            file_path = file
        file_sizes.append(os.path.getsize(file_path))
    total_size = sum(file_sizes)

    shard_size = total_size / num_shards
    shard_start = shard_size * shard_index
    shard_end = shard_start + shard_size if shard_index < shard_size - 1 else total_size

    current_pos = 0
    shard_files = []
    for file_path, file_size in zip(all_file_paths, file_sizes):
        next_pos = current_pos + file_size
        if next_pos > shard_start and current_pos < shard_end:
            # This file is part of the i-th shard
            start_in_file = max(shard_start - current_pos, 0)
            end_in_file = min(shard_end - current_pos, file_size)
            shard_files.append((file_path, start_in_file, end_in_file))
        current_pos = next_pos

    passages = []
    idx = 0
    for file_path, start_in_file, end_in_file in shard_files:
        with open(file_path, "r", encoding="utf-8") as file:
            file.seek(int(start_in_file))
            # Skip the rest of the partial line after seeking, if not at the start of the file
            if start_in_file != 0:
                file.readline()

            while file.tell() < end_in_file:
                line = file.readline().strip()
                if line:
                    ex = json.loads(line)
                    chunks = split_data_into_chunks(
                        ex[raw_data_key].strip(),
                        chunk_sz,
                        min_chunk_sz,
                        keep_last,
                        chunking_strategy,
                    )
                    for chunk in chunks:
                        if keep_raw_metadata:
                            # keep the original metadata in the updated raw
                            passage = dict(ex)
                            passage.update(
                                {
                                    "text": chunk,
                                    "id": idx,
                                    "shard_id": shard_index,
                                    "num_shards": num_shards,
                                }
                            )
                            passages.append(passage)
                        else:
                            # raw metadata will be discarded
                            passages.append(
                                {
                                    "text": chunk,
                                    "id": idx,
                                    "shard_id": shard_index,
                                    "num_shards": num_shards,
                                }
                            )
                        idx += 1
                        idx += 1
                else:
                    break

    if args.get("passages_dir", None):
        os.makedirs(args.passages_dir, exist_ok=True)
        if use_passage_pos_id_map:
            with open(passage_shard_save_path, "w") as file:
                for passage in passages:
                    file.write(json.dumps(passage) + "\n")

            # If all jsonl data has been built, construct passage_pos_ids and return it
            all_data_exist = True
            for _shard_index in range(num_shards):
                passage_shard_save_path_to_check = os.path.join(
                    args.passages_dir,
                    f"raw_passages-{_shard_index}-of-{num_shards}.jsonl",
                )
                if not os.path.exists(passage_shard_save_path_to_check):
                    all_data_exist = False
            if all_data_exist:
                passage_pos_ids = get_passage_pos_ids(
                    args.passages_dir, pos_map_save_path
                )

        else:
            with open(passage_shard_save_path, "wb") as file:
                pickle.dump(passages, file)

    if return_all_passages:
        return passages
    else:
        return passage_pos_ids


# Used for passage retrieval (old, inefficient bc it needs load the whole data)
def load_passages(
    path, chunk_sz=None, min_chunk_sz=0, keep_last=True, num_load_files=None
):
    if not os.path.exists(path):
        logging.info(f"{path} does not exist")
        return
    passages = []
    idx = 0
    # load all passages together if is passed a directory
    if not os.path.isdir(path):
        paths = [path]
    else:
        paths = [os.path.join(path, file) for file in os.listdir(path)]
        # todo: support subsampling
        try:
            paths = sorted(
                paths, key=lambda x: int(x.split("-")[-1].split(".jsonl")[0])
            )
        except:
            print("No sorting on the raw data paths.")
        if num_load_files:
            paths = paths[0:num_load_files]
        print(paths)
    for path in paths:
        logging.info(f"Loading passages from: {path}")
        with open(path) as fin:
            if path.endswith(".jsonl"):
                for k, line in enumerate(fin):
                    ex = json.loads(line)
                    chunks = split_data_into_chunks(
                        ex["text"].strip(), chunk_sz, keep_last
                    )
                    for chunk in chunks:
                        passages.append(
                            {
                                "text": chunk,
                                "id": idx,
                            }
                        )
                        idx += 1
            elif path.endswith(".csv"):
                # the dpr wiki is pre-chunked to 100 words
                reader = csv.reader(fin, delimiter="\t")
                for k, row in enumerate(reader):
                    if not row[0] == "id":
                        ex = {"id": row[0], "title": row[2], "text": row[1]}
                        passages.append(ex)
            elif path.endswith(".parquet"):
                import pandas as pd

                df = pd.read_parquet(path, engine="fastparquet")

                if "wikitext" in path:
                    idx = 0
                    for ex_text in df.text:
                        if (
                            ex_text
                        ):  # skip empty string (1467/4358 is empty in the test set)
                            chunks = split_data_into_chunks(
                                ex_text, chunk_sz, keep_last
                            )
                            for chunk in chunks:
                                passages.append(
                                    {
                                        "text": chunk,
                                        "id": idx,
                                    }
                                )
                                idx += 1
            elif path.endswith(".json.gz"):
                with gzip.open(path, "rb") as gz_file:
                    file_content = gz_file.read()
                    decoded_content = file_content.decode("utf-8")
                    json_strings = decoded_content.split("\n")
                    json_strings = [js for js in json_strings if js]
                    data = [
                        json.loads(js) for js in json_strings
                    ]  # dict_keys(['added', 'attributes', 'created', 'id', 'metadata', 'source', 'text', 'version'])
                    for ex in data:
                        chunks = split_data_into_chunks(
                            data["text"], chunk_sz, min_chunk_sz, keep_last
                        )
                        for chunk in chunks:
                            passages.append(
                                {
                                    "text": chunk,
                                    "id": idx,
                                }
                            )
                            idx += 1

    return passages


def split_data_into_chunks(
    text, chunk_sz, min_chunk_sz, keep_last, chunking_strategy="fixed_size"
):
    # returns chunks of size <= chunk_sz + min_chunk_sz
    if chunk_sz is None:
        return [text]

    if chunking_strategy == "fixed_size":
        text = text.split()
        N = len(text) if keep_last else len(text) - len(text) % chunk_sz
        chunks = [" ".join(text[i : i + chunk_sz]) for i in range(0, N, chunk_sz)]

        if len(chunks) > 1 and len(chunks[-1].split(" ")) < min_chunk_sz:
            # merge the last min_chunk_sz words to the previous chunk
            last_chunk = chunks.pop()
            chunks[-1] += " " + last_chunk
    elif chunking_strategy == "semantic":
        from semantic_text_splitter import TextSplitter

        splitter = TextSplitter.from_tiktoken_model("gpt-3.5-turbo", chunk_sz)
        chunks = splitter.chunks(text)

    return chunks


############################## Evaluation ##############################
def load_eval_data(cfg):
    path = cfg.evaluation.data.eval_data
    task_name = cfg.tasks.eval.task_name

    # use lm_tokenizer to make sure the number of tokens consitent with the ones for PPL computation
    tokenizer = transformers.AutoTokenizer.from_pretrained(cfg.model.lm_model)

    if path.endswith(".jsonl"):
        data = load_jsonl(path)  # 'text', 'meta'
    elif path.endswith(".parquet"):
        data = load_parquet(path)

    if task_name == "perplexity":
        eval_data_args = cfg.evaluation.data

        data = prepare_ppl_eval_data(
            data,
            tokenizer,
            eval_data_args.max_eval_data_seq_length,
            eval_data_args.eval_stride,
            eval_data_args.merge,
            eval_data_args.num_eval_samples,
            eval_data_args.seed,
        )

    elif task_name == "lm-eval":
        # prepare data for lm-evaluate-harness
        data = prepare_lm_eval_data(data)

    elif task_name == "mmlu":
        # (test case) prepare mmlu for instruct-eval
        data = prepare_mmlu_eval_data(data)

    else:
        raise AttributeError

    return data


def prepare_lm_eval_data(data):
    """
    Use the question as the query. (0-shot)
    """
    new_data = []
    for ex in data:
        ex.update({"raw_query": ex["query"]})
        new_data.append(ex)
    return new_data


def prepare_mmlu_eval_data(data):
    """
    Use the question as the query. (0-shot)
    """
    new_data = []
    for ex in data:
        ex.update({"raw_query": ex["prompt_end"]})
        new_data.append(ex)
    return new_data


def prepare_ppl_eval_data(
    data, tokenizer, max_seq_length, stride, merge, num_eval_samples=None, seed=310
):
    if tokenizer is None:
        logging.info(
            f"Constructing evaluation samples from {len(data)} raw documents for close-book evaluation..."
        )
        return [{"raw_inputs": ex["text"]} for ex in data]

    input_ids = [tokenizer(ex["text"])["input_ids"] for ex in data]

    pad_token_id = (
        tokenizer.pad_token_id
        if tokenizer.eos_token_id is None
        else tokenizer.eos_token_id
    )
    if merge:
        # todo: cluster similar context together before merging
        flatten_input_ids = np.array([_id for ids in input_ids for _id in ids])
        all_input_ids, all_targets = batch_merged(
            flatten_input_ids,
            max_seq_length=max_seq_length,
            stride=stride,
            pad_token_id=pad_token_id,
        )  # if pad to -100 that will be ignored in HF CE but cannot decode
    else:
        # todo: no tokens used for query when length < stride
        all_input_ids, all_targets = batch(
            input_ids,
            max_seq_length=max_seq_length,
            stride=stride,
            pad_token_id=pad_token_id,
        )

    if num_eval_samples:
        np.random.seed(seed)
        indices = np.random.permutation(len(all_input_ids))[:num_eval_samples]
        all_input_ids, all_targets = all_input_ids[indices], all_targets[indices]

    new_data = []
    logging.info(
        f"Constructing evaluation samples from {len(all_input_ids)} raw documents..."
    )
    for input_ids, targets in zip(all_input_ids, all_targets):
        input_ids, targets = input_ids.tolist(), targets.tolist()
        query_ids = [
            int(_id) for _id, t in zip(input_ids, targets) if t == pad_token_id
        ]
        new_data.append(
            {
                # 'input_ids': input_ids,
                # 'targets': targets,  # not used for HF models
                "raw_inputs": tokenizer.decode(
                    input_ids, skip_special_tokens=True
                ),  #  <- removing [CLS] will cause inputs to be shorter than targets
                "raw_query": tokenizer.decode(query_ids, skip_special_tokens=True),
            }
        )
    logging.info(f"Finished construction with {len(new_data)} evaluation samples.")

    return new_data


def load_jsonl(data_path):
    assert os.path.exists(data_path)
    data = []
    with open(data_path, "r") as file:
        for line in file:
            ex = json.loads(line)
            data.append(ex)
    return data


def load_parquet(data_path):
    import pandas as pd

    df = pd.read_parquet(data_path, engine="fastparquet")
    data = []
    for ex_text in df.text:
        if ex_text:
            data.append({"text": ex_text})
    return data


def batch_merged(
    flatten_input_ids, max_seq_length, stride, pad_token_id, flatten_masks=None
):
    all_input_ids = []
    all_targets = []
    prev_end_loc = 0

    for begin_loc in range(0, len(flatten_input_ids) - 1, stride):
        end_loc = min(begin_loc + max_seq_length, len(flatten_input_ids) - 1)
        trg_len = end_loc - prev_end_loc

        # we feed begin_loc ~ prev_end_log ~ end_log
        # but calculcate loss only for prev_end_log ~ end_log
        input_ids = flatten_input_ids[begin_loc:end_loc].copy()
        target_ids = flatten_input_ids[begin_loc + 1 : end_loc + 1].copy()

        if flatten_masks is not None:
            for i, m in enumerate(flatten_masks[begin_loc + 1 : end_loc + 1]):
                if not m:
                    target_ids[i] = pad_token_id

        target_ids[:-trg_len] = pad_token_id
        assert input_ids.shape == target_ids.shape

        if (
            end_loc == len(flatten_input_ids) - 1
            and len(input_ids) == len(target_ids) < max_seq_length
        ):
            pads = np.array(
                [pad_token_id for _ in range(max_seq_length - len(input_ids))]
            )
            input_ids = np.concatenate([input_ids, pads])
            target_ids = np.concatenate([target_ids, pads])

        assert len(input_ids) == len(target_ids) == max_seq_length, (
            begin_loc,
            end_loc,
            len(flatten_input_ids),
        )

        all_input_ids.append(input_ids)
        all_targets.append(target_ids)

        prev_end_loc = end_loc

        if end_loc == len(flatten_input_ids) - 1:
            break

    assert np.all([len(input_ids) == max_seq_length for input_ids in all_input_ids])
    assert np.all([len(input_ids) == max_seq_length for input_ids in all_targets])
    return np.stack(all_input_ids), np.stack(all_targets)


def batch(input_ids, max_seq_length, stride, pad_token_id):
    all_input_ids, all_targets = [], []
    for _input_ids in input_ids:
        _all_input_ids, _all_targets = batch_merged(
            np.array(_input_ids), max_seq_length, stride, pad_token_id
        )
        all_input_ids.append(_all_input_ids)
        all_targets.append(_all_targets)
    return np.concatenate(all_input_ids, 0), np.concatenate(all_targets, 0)