Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
jchevallard commited on
Commit
df57a76
·
1 Parent(s): ce94b0a

feat: converting CRAG sampler to a package for simpler install and use

Browse files
.gitignore ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ .DS_Store
2
+ .env
3
+ .venv
4
+ *.egg-info*
5
+ *__pycache__*
crag_sampler/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .sampler import CragSampler
2
+
3
+ __all__ = ["CragSampler"]
crag_sampler/sampler.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bz2
2
+ from typing import Iterator, Dict, Any, List, Optional
3
+ import pandas as pd
4
+ import os
5
+ import hashlib
6
+ import json
7
+ from sklearn.model_selection import StratifiedKFold
8
+ import numpy as np
9
+ from multiprocessing import Pool, cpu_count
10
+ from functools import partial
11
+ import subprocess
12
+
13
+ from .utils import (
14
+ read_jsonl_fields_fast,
15
+ process_answer_types,
16
+ create_stratified_subsamples,
17
+ subsample_jsonl_file,
18
+ )
19
+
20
+
21
+ class CragSampler:
22
+ """Main class for handling CRAG dataset sampling operations."""
23
+
24
+ def __init__(
25
+ self,
26
+ input_file: str,
27
+ required_fields: Optional[List[str]] = None,
28
+ use_cache: bool = True,
29
+ ):
30
+ """Initialize CragSampler.
31
+
32
+ Args:
33
+ input_file: Path to input JSONL file (can be bz2 compressed)
34
+ required_fields: List of field names to extract. If None, uses default fields
35
+ use_cache: Whether to use/create cache file
36
+ """
37
+ self.input_file = input_file
38
+ self.required_fields = required_fields or [
39
+ "domain",
40
+ "answer",
41
+ "question_type",
42
+ "static_or_dynamic",
43
+ ]
44
+ self.use_cache = use_cache
45
+ self.df = self._load_data()
46
+
47
+ def _load_data(self) -> pd.DataFrame:
48
+ """Load and process data from JSONL file."""
49
+ df = read_jsonl_fields_fast(
50
+ self.input_file, self.required_fields, self.use_cache
51
+ )
52
+ return process_answer_types(df)
53
+
54
+ def create_subsamples(
55
+ self,
56
+ n_subsamples: int = 5,
57
+ stratify_columns: Optional[List[str]] = None,
58
+ output_path: Optional[str] = None,
59
+ force_compute: bool = False,
60
+ ) -> Dict:
61
+ """Create stratified subsamples of the dataset.
62
+
63
+ Args:
64
+ n_subsamples: Number of subsamples to create
65
+ stratify_columns: Columns to use for stratification. If None, uses defaults
66
+ output_path: Path to save/load the JSON output
67
+ force_compute: If True, always compute subsamples even if file exists
68
+
69
+ Returns:
70
+ Dictionary containing the subsamples information
71
+ """
72
+ if stratify_columns is None:
73
+ stratify_columns = [
74
+ "domain",
75
+ "answer_type",
76
+ "question_type",
77
+ "static_or_dynamic",
78
+ ]
79
+
80
+ if output_path is None:
81
+ output_path = os.path.join(
82
+ os.path.dirname(self.input_file),
83
+ f"{os.path.splitext(os.path.basename(self.input_file))[0]}_subsamples.json",
84
+ )
85
+
86
+ return create_stratified_subsamples(
87
+ self.df,
88
+ n_subsamples=n_subsamples,
89
+ stratify_columns=stratify_columns,
90
+ output_path=output_path,
91
+ force_compute=force_compute,
92
+ )
93
+
94
+ def write_subsamples(
95
+ self,
96
+ subsamples_file: str,
97
+ output_dir: Optional[str] = None,
98
+ compress: bool = True,
99
+ n_processes: Optional[int] = None,
100
+ overwrite: bool = False,
101
+ ) -> None:
102
+ """Write subsamples to separate files.
103
+
104
+ Args:
105
+ subsamples_file: Path to JSON file containing subsample indices
106
+ output_dir: Directory to save subsample files
107
+ compress: Whether to compress output files with bz2
108
+ n_processes: Number of processes to use
109
+ overwrite: If False, skip existing output files
110
+ """
111
+ subsample_jsonl_file(
112
+ self.input_file,
113
+ subsamples_file,
114
+ output_dir=output_dir,
115
+ compress=compress,
116
+ n_processes=n_processes,
117
+ overwrite=overwrite,
118
+ )
crag_to_subsamples.py → crag_sampler/utils.py RENAMED
@@ -1,17 +1,16 @@
1
- import bz2
2
- from typing import Iterator, Dict, Any
3
- import pandas as pd
4
  import os
5
  import hashlib
 
6
  import json
7
- from sklearn.model_selection import StratifiedKFold
 
 
8
  import numpy as np
9
  from multiprocessing import Pool, cpu_count
10
- from functools import partial
11
- import subprocess
12
 
13
 
14
- def get_cache_path(file_path: str, required_fields: list[str]) -> str:
15
  """
16
  Generate a unique cache file path based on input file and fields.
17
 
@@ -36,7 +35,7 @@ def get_cache_path(file_path: str, required_fields: list[str]) -> str:
36
 
37
 
38
  def read_jsonl_fields_fast(
39
- file_path: str, required_fields: list[str], use_cache: bool = True
40
  ) -> pd.DataFrame:
41
  """
42
  Quickly extract specific fields from a compressed JSONL file using string operations.
@@ -171,7 +170,7 @@ def process_answer_types(df: pd.DataFrame) -> pd.DataFrame:
171
  def create_stratified_subsamples(
172
  df: pd.DataFrame,
173
  n_subsamples: int,
174
- stratify_columns: list[str] = [
175
  "domain",
176
  "answer_type",
177
  "question_type",
@@ -179,7 +178,7 @@ def create_stratified_subsamples(
179
  ],
180
  output_path: str = "subsamples.json",
181
  force_compute: bool = False,
182
- ) -> dict:
183
  """
184
  Create stratified subsamples of the dataset and save them to a JSON file.
185
  Each subsample gets a unique ID based on its indices.
@@ -270,7 +269,7 @@ def create_stratified_subsamples(
270
 
271
 
272
  def write_subsample(
273
- input_file: str, indices: list[int], output_file: str, compress: bool = True
274
  ) -> None:
275
  """
276
  Write a single subsample to a file using awk.
@@ -334,9 +333,9 @@ def write_subsample(
334
  def subsample_jsonl_file(
335
  input_file: str,
336
  subsamples_file: str,
337
- output_dir: str = None,
338
  compress: bool = True,
339
- n_processes: int = None,
340
  overwrite: bool = False,
341
  ) -> None:
342
  """
@@ -395,52 +394,3 @@ def subsample_jsonl_file(
395
  pool.starmap(write_subsample, write_args)
396
  else:
397
  print("No files to process - all files exist and overwrite=False")
398
-
399
-
400
- def run_crag_task_1_and_2(
401
- file_path: str,
402
- fields_to_extract: list[str],
403
- n_subsamples: int = 5,
404
- output_dir: str = None,
405
- compress: bool = True,
406
- n_processes: int = None,
407
- overwrite: bool = False,
408
- ):
409
- # Load and process data
410
- df = read_jsonl_fields_fast(file_path, fields_to_extract)
411
- df = process_answer_types(df)
412
- print(df.head())
413
-
414
- output_path = os.path.join(
415
- os.path.dirname(file_path),
416
- os.path.basename(file_path).split(".")[0] + "_subsamples.json",
417
- )
418
-
419
- # This will load from file if it exists and parameters match
420
- subsamples_data = create_stratified_subsamples(
421
- df, n_subsamples=5, output_path=output_path
422
- )
423
-
424
- # Example of how to read and use the subsamples
425
- with open(output_path, "r") as f:
426
- subsamples_data = json.load(f)
427
-
428
- # Print some information about the subsamples
429
- print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples")
430
- print("\nGlobal statistics:")
431
- print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2))
432
-
433
- # Print statistics for first subsample
434
- print("\nFirst subsample statistics:")
435
- print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2))
436
-
437
- # This will use all available CPU cores
438
- subsample_jsonl_file(file_path, output_path, compress=True)
439
-
440
-
441
- # Example usage
442
- if __name__ == "__main__":
443
- file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
444
- fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
445
-
446
- run_crag_task_1_and_2(file_path, fields_to_extract)
 
 
 
 
1
  import os
2
  import hashlib
3
+ import bz2
4
  import json
5
+ import subprocess
6
+ from typing import Dict, List, Optional, Any
7
+ import pandas as pd
8
  import numpy as np
9
  from multiprocessing import Pool, cpu_count
10
+ from sklearn.model_selection import StratifiedKFold
 
11
 
12
 
13
+ def get_cache_path(file_path: str, required_fields: List[str]) -> str:
14
  """
15
  Generate a unique cache file path based on input file and fields.
16
 
 
35
 
36
 
37
  def read_jsonl_fields_fast(
38
+ file_path: str, required_fields: List[str], use_cache: bool = True
39
  ) -> pd.DataFrame:
40
  """
41
  Quickly extract specific fields from a compressed JSONL file using string operations.
 
170
  def create_stratified_subsamples(
171
  df: pd.DataFrame,
172
  n_subsamples: int,
173
+ stratify_columns: List[str] = [
174
  "domain",
175
  "answer_type",
176
  "question_type",
 
178
  ],
179
  output_path: str = "subsamples.json",
180
  force_compute: bool = False,
181
+ ) -> Dict[str, Any]:
182
  """
183
  Create stratified subsamples of the dataset and save them to a JSON file.
184
  Each subsample gets a unique ID based on its indices.
 
269
 
270
 
271
  def write_subsample(
272
+ input_file: str, indices: List[int], output_file: str, compress: bool = True
273
  ) -> None:
274
  """
275
  Write a single subsample to a file using awk.
 
333
  def subsample_jsonl_file(
334
  input_file: str,
335
  subsamples_file: str,
336
+ output_dir: Optional[str] = None,
337
  compress: bool = True,
338
+ n_processes: Optional[int] = None,
339
  overwrite: bool = False,
340
  ) -> None:
341
  """
 
394
  pool.starmap(write_subsample, write_args)
395
  else:
396
  print("No files to process - all files exist and overwrite=False")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
examples/basic_sampling.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Example usage
2
+ from crag_sampler import CragSampler
3
+ import json
4
+ import os
5
+
6
+
7
+ def run_crag_task_1_and_2(
8
+ file_path: str,
9
+ fields_to_extract: list[str] = None,
10
+ n_subsamples: int = 5,
11
+ output_dir: str = None,
12
+ compress: bool = True,
13
+ n_processes: int = None,
14
+ overwrite: bool = False,
15
+ ):
16
+ """Run the CRAG sampling pipeline for tasks 1 and 2.
17
+
18
+ Args:
19
+ file_path: Path to input JSONL file
20
+ fields_to_extract: List of fields to extract from JSONL
21
+ n_subsamples: Number of subsamples to create
22
+ output_dir: Directory for output files
23
+ compress: Whether to compress output files
24
+ n_processes: Number of processes for parallel processing
25
+ overwrite: Whether to overwrite existing files
26
+ """
27
+ # Initialize sampler
28
+ sampler = CragSampler(
29
+ input_file=file_path, required_fields=fields_to_extract, use_cache=True
30
+ )
31
+
32
+ # Create output path for subsamples
33
+ output_path = os.path.join(
34
+ os.path.dirname(file_path),
35
+ f"{os.path.splitext(os.path.basename(file_path))[0]}_subsamples.json",
36
+ )
37
+
38
+ # Create subsamples
39
+ subsamples_data = sampler.create_subsamples(
40
+ n_subsamples=n_subsamples, output_path=output_path
41
+ )
42
+
43
+ # Print statistics
44
+ print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples")
45
+ print("\nGlobal statistics:")
46
+ print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2))
47
+ print("\nFirst subsample statistics:")
48
+ print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2))
49
+
50
+ # Write subsamples to files
51
+ sampler.write_subsamples(
52
+ subsamples_file=output_path,
53
+ output_dir=output_dir,
54
+ compress=compress,
55
+ n_processes=n_processes,
56
+ overwrite=overwrite,
57
+ )
58
+
59
+
60
+ # Example usage
61
+ if __name__ == "__main__":
62
+ file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
63
+ fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
64
+
65
+ run_crag_task_1_and_2(file_path, fields_to_extract)
pyproject.toml DELETED
@@ -1,29 +0,0 @@
1
- [project]
2
- name = "lejuge"
3
- version = "0.1.0"
4
- description = "Add your description here"
5
- authors = [
6
- { name = "Jacopo Chevallard", email = "[email protected]" }
7
- ]
8
- dependencies = [
9
- "ipykernel>=6.29.5",
10
- "pandas>=2.2.3",
11
- "fastparquet>=2024.11.0",
12
- "scikit-learn>=1.6.1",
13
- ]
14
- readme = "README.md"
15
- requires-python = ">= 3.11"
16
-
17
- [build-system]
18
- requires = ["hatchling"]
19
- build-backend = "hatchling.build"
20
-
21
- [tool.rye]
22
- managed = true
23
- dev-dependencies = []
24
-
25
- [tool.hatch.metadata]
26
- allow-direct-references = true
27
-
28
- [tool.hatch.build.targets.wheel]
29
- packages = ["src/lejuge"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements-dev.lock DELETED
@@ -1,109 +0,0 @@
1
- # generated by rye
2
- # use `rye lock` or `rye sync` to update this lockfile
3
- #
4
- # last locked with the following flags:
5
- # pre: false
6
- # features: []
7
- # all-features: false
8
- # with-sources: false
9
- # generate-hashes: false
10
- # universal: false
11
-
12
- -e file:.
13
- appnope==0.1.4
14
- # via ipykernel
15
- asttokens==3.0.0
16
- # via stack-data
17
- comm==0.2.2
18
- # via ipykernel
19
- cramjam==2.9.1
20
- # via fastparquet
21
- debugpy==1.8.12
22
- # via ipykernel
23
- decorator==5.1.1
24
- # via ipython
25
- executing==2.2.0
26
- # via stack-data
27
- fastparquet==2024.11.0
28
- # via lejuge
29
- fsspec==2024.12.0
30
- # via fastparquet
31
- ipykernel==6.29.5
32
- # via lejuge
33
- ipython==8.31.0
34
- # via ipykernel
35
- jedi==0.19.2
36
- # via ipython
37
- joblib==1.4.2
38
- # via scikit-learn
39
- jupyter-client==8.6.3
40
- # via ipykernel
41
- jupyter-core==5.7.2
42
- # via ipykernel
43
- # via jupyter-client
44
- matplotlib-inline==0.1.7
45
- # via ipykernel
46
- # via ipython
47
- nest-asyncio==1.6.0
48
- # via ipykernel
49
- numpy==2.2.2
50
- # via fastparquet
51
- # via pandas
52
- # via scikit-learn
53
- # via scipy
54
- packaging==24.2
55
- # via fastparquet
56
- # via ipykernel
57
- pandas==2.2.3
58
- # via fastparquet
59
- # via lejuge
60
- parso==0.8.4
61
- # via jedi
62
- pexpect==4.9.0
63
- # via ipython
64
- platformdirs==4.3.6
65
- # via jupyter-core
66
- prompt-toolkit==3.0.50
67
- # via ipython
68
- psutil==6.1.1
69
- # via ipykernel
70
- ptyprocess==0.7.0
71
- # via pexpect
72
- pure-eval==0.2.3
73
- # via stack-data
74
- pygments==2.19.1
75
- # via ipython
76
- python-dateutil==2.9.0.post0
77
- # via jupyter-client
78
- # via pandas
79
- pytz==2024.2
80
- # via pandas
81
- pyzmq==26.2.0
82
- # via ipykernel
83
- # via jupyter-client
84
- scikit-learn==1.6.1
85
- # via lejuge
86
- scipy==1.15.1
87
- # via scikit-learn
88
- six==1.17.0
89
- # via python-dateutil
90
- stack-data==0.6.3
91
- # via ipython
92
- threadpoolctl==3.5.0
93
- # via scikit-learn
94
- tornado==6.4.2
95
- # via ipykernel
96
- # via jupyter-client
97
- traitlets==5.14.3
98
- # via comm
99
- # via ipykernel
100
- # via ipython
101
- # via jupyter-client
102
- # via jupyter-core
103
- # via matplotlib-inline
104
- typing-extensions==4.12.2
105
- # via ipython
106
- tzdata==2025.1
107
- # via pandas
108
- wcwidth==0.2.13
109
- # via prompt-toolkit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.lock DELETED
@@ -1,109 +0,0 @@
1
- # generated by rye
2
- # use `rye lock` or `rye sync` to update this lockfile
3
- #
4
- # last locked with the following flags:
5
- # pre: false
6
- # features: []
7
- # all-features: false
8
- # with-sources: false
9
- # generate-hashes: false
10
- # universal: false
11
-
12
- -e file:.
13
- appnope==0.1.4
14
- # via ipykernel
15
- asttokens==3.0.0
16
- # via stack-data
17
- comm==0.2.2
18
- # via ipykernel
19
- cramjam==2.9.1
20
- # via fastparquet
21
- debugpy==1.8.12
22
- # via ipykernel
23
- decorator==5.1.1
24
- # via ipython
25
- executing==2.2.0
26
- # via stack-data
27
- fastparquet==2024.11.0
28
- # via lejuge
29
- fsspec==2024.12.0
30
- # via fastparquet
31
- ipykernel==6.29.5
32
- # via lejuge
33
- ipython==8.31.0
34
- # via ipykernel
35
- jedi==0.19.2
36
- # via ipython
37
- joblib==1.4.2
38
- # via scikit-learn
39
- jupyter-client==8.6.3
40
- # via ipykernel
41
- jupyter-core==5.7.2
42
- # via ipykernel
43
- # via jupyter-client
44
- matplotlib-inline==0.1.7
45
- # via ipykernel
46
- # via ipython
47
- nest-asyncio==1.6.0
48
- # via ipykernel
49
- numpy==2.2.2
50
- # via fastparquet
51
- # via pandas
52
- # via scikit-learn
53
- # via scipy
54
- packaging==24.2
55
- # via fastparquet
56
- # via ipykernel
57
- pandas==2.2.3
58
- # via fastparquet
59
- # via lejuge
60
- parso==0.8.4
61
- # via jedi
62
- pexpect==4.9.0
63
- # via ipython
64
- platformdirs==4.3.6
65
- # via jupyter-core
66
- prompt-toolkit==3.0.50
67
- # via ipython
68
- psutil==6.1.1
69
- # via ipykernel
70
- ptyprocess==0.7.0
71
- # via pexpect
72
- pure-eval==0.2.3
73
- # via stack-data
74
- pygments==2.19.1
75
- # via ipython
76
- python-dateutil==2.9.0.post0
77
- # via jupyter-client
78
- # via pandas
79
- pytz==2024.2
80
- # via pandas
81
- pyzmq==26.2.0
82
- # via ipykernel
83
- # via jupyter-client
84
- scikit-learn==1.6.1
85
- # via lejuge
86
- scipy==1.15.1
87
- # via scikit-learn
88
- six==1.17.0
89
- # via python-dateutil
90
- stack-data==0.6.3
91
- # via ipython
92
- threadpoolctl==3.5.0
93
- # via scikit-learn
94
- tornado==6.4.2
95
- # via ipykernel
96
- # via jupyter-client
97
- traitlets==5.14.3
98
- # via comm
99
- # via ipykernel
100
- # via ipython
101
- # via jupyter-client
102
- # via jupyter-core
103
- # via matplotlib-inline
104
- typing-extensions==4.12.2
105
- # via ipython
106
- tzdata==2025.1
107
- # via pandas
108
- wcwidth==0.2.13
109
- # via prompt-toolkit
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ pandas>=1.0.0
2
+ scikit-learn>=0.24.0
3
+ fastparquet>=2024.11.0
setup.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+
3
+ setup(
4
+ name="crag-sampler",
5
+ version="0.1.0",
6
+ packages=find_packages(),
7
+ install_requires=["pandas>=1.0.0", "scikit-learn>=0.24.0"],
8
+ author="Jacopo Chevallard",
9
+ author_email="[email protected]",
10
+ description="A tool for sampling CRAG datasets",
11
+ long_description=open("README.md").read(),
12
+ long_description_content_type="text/markdown",
13
+ url="https://huggingface.co/Quivr/CRAG",
14
+ classifiers=[
15
+ "Programming Language :: Python :: 3",
16
+ "License :: OSI Approved :: MIT License",
17
+ "Operating System :: OS Independent",
18
+ ],
19
+ python_requires=">=3.7",
20
+ )