Datasets:

Modalities:
Text
Formats:
json
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
jchevallard commited on
Commit
4a87d90
·
1 Parent(s): fe440e6

feat: renaming output files and fixing a bug

Browse files
crag_sampler/sampler.py CHANGED
@@ -13,8 +13,8 @@ import subprocess
13
  from .utils import (
14
  read_jsonl_fields_fast,
15
  process_answer_types,
16
- create_stratified_subsamples,
17
- subsample_jsonl_file,
18
  )
19
 
20
 
@@ -51,23 +51,23 @@ class CragSampler:
51
  )
52
  return process_answer_types(df)
53
 
54
- def create_subsamples(
55
  self,
56
- n_subsamples: int = 5,
57
  stratify_columns: Optional[List[str]] = None,
58
  output_path: Optional[str] = None,
59
  force_compute: bool = False,
60
  ) -> Dict:
61
- """Create stratified subsamples of the dataset.
62
 
63
  Args:
64
- n_subsamples: Number of subsamples to create
65
  stratify_columns: Columns to use for stratification. If None, uses defaults
66
  output_path: Path to save/load the JSON output
67
- force_compute: If True, always compute subsamples even if file exists
68
 
69
  Returns:
70
- Dictionary containing the subsamples information
71
  """
72
  if stratify_columns is None:
73
  stratify_columns = [
@@ -80,37 +80,37 @@ class CragSampler:
80
  if output_path is None:
81
  output_path = os.path.join(
82
  os.path.dirname(self.input_file),
83
- f"{os.path.splitext(os.path.basename(self.input_file))[0]}_subsamples.json",
84
  )
85
 
86
- return create_stratified_subsamples(
87
  self.df,
88
- n_subsamples=n_subsamples,
89
  stratify_columns=stratify_columns,
90
  output_path=output_path,
91
  force_compute=force_compute,
92
  )
93
 
94
- def write_subsamples(
95
  self,
96
- subsamples_file: str,
97
  output_dir: Optional[str] = None,
98
  compress: bool = True,
99
  n_processes: Optional[int] = None,
100
  overwrite: bool = False,
101
  ) -> None:
102
- """Write subsamples to separate files.
103
 
104
  Args:
105
- subsamples_file: Path to JSON file containing subsample indices
106
- output_dir: Directory to save subsample files
107
  compress: Whether to compress output files with bz2
108
  n_processes: Number of processes to use
109
  overwrite: If False, skip existing output files
110
  """
111
- subsample_jsonl_file(
112
  self.input_file,
113
- subsamples_file,
114
  output_dir=output_dir,
115
  compress=compress,
116
  n_processes=n_processes,
 
13
  from .utils import (
14
  read_jsonl_fields_fast,
15
  process_answer_types,
16
+ create_stratified_subsets,
17
+ subset_jsonl_file,
18
  )
19
 
20
 
 
51
  )
52
  return process_answer_types(df)
53
 
54
+ def create_subsets(
55
  self,
56
+ n_subsets: int = 5,
57
  stratify_columns: Optional[List[str]] = None,
58
  output_path: Optional[str] = None,
59
  force_compute: bool = False,
60
  ) -> Dict:
61
+ """Create stratified subsets of the dataset.
62
 
63
  Args:
64
+ n_subsets: Number of subsets to create
65
  stratify_columns: Columns to use for stratification. If None, uses defaults
66
  output_path: Path to save/load the JSON output
67
+ force_compute: If True, always compute subsets even if file exists
68
 
69
  Returns:
70
+ Dictionary containing the subsets information
71
  """
72
  if stratify_columns is None:
73
  stratify_columns = [
 
80
  if output_path is None:
81
  output_path = os.path.join(
82
  os.path.dirname(self.input_file),
83
+ f"{os.path.splitext(os.path.basename(self.input_file))[0]}_subsets.json",
84
  )
85
 
86
+ return create_stratified_subsets(
87
  self.df,
88
+ n_subsets=n_subsets,
89
  stratify_columns=stratify_columns,
90
  output_path=output_path,
91
  force_compute=force_compute,
92
  )
93
 
94
+ def write_subsets(
95
  self,
96
+ subsets_file: str,
97
  output_dir: Optional[str] = None,
98
  compress: bool = True,
99
  n_processes: Optional[int] = None,
100
  overwrite: bool = False,
101
  ) -> None:
102
+ """Write subsets to separate files.
103
 
104
  Args:
105
+ subsets_file: Path to JSON file containing subset indices
106
+ output_dir: Directory to save subset files
107
  compress: Whether to compress output files with bz2
108
  n_processes: Number of processes to use
109
  overwrite: If False, skip existing output files
110
  """
111
+ subset_jsonl_file(
112
  self.input_file,
113
+ subsets_file,
114
  output_dir=output_dir,
115
  compress=compress,
116
  n_processes=n_processes,
crag_sampler/utils.py CHANGED
@@ -167,82 +167,80 @@ def process_answer_types(df: pd.DataFrame) -> pd.DataFrame:
167
  return df
168
 
169
 
170
- def create_stratified_subsamples(
171
  df: pd.DataFrame,
172
- n_subsamples: int,
173
  stratify_columns: List[str] = [
174
  "domain",
175
  "answer_type",
176
  "question_type",
177
  "static_or_dynamic",
178
  ],
179
- output_path: str = "subsamples.json",
180
  force_compute: bool = False,
181
  ) -> Dict[str, Any]:
182
  """
183
- Create stratified subsamples of the dataset and save them to a JSON file.
184
- Each subsample gets a unique ID based on its indices.
185
 
186
  Args:
187
  df: Input DataFrame
188
- n_subsamples: Number of subsamples to create
189
  stratify_columns: Columns to use for stratification
190
  output_path: Path to save/load the JSON output
191
- force_compute: If True, always compute subsamples even if file exists
192
 
193
  Returns:
194
- Dictionary containing the subsamples information
195
  """
196
  # Check if file exists and we can use it
197
  if not force_compute and os.path.exists(output_path):
198
  try:
199
  with open(output_path, "r") as f:
200
- subsamples_data = json.load(f)
201
 
202
  # Validate the loaded data has the expected structure
203
  if (
204
- subsamples_data.get("metadata", {}).get("n_subsamples") == n_subsamples
205
- and subsamples_data.get("metadata", {}).get("stratify_columns")
206
  == stratify_columns
207
  ):
208
- print(f"Loading existing subsamples from {output_path}")
209
- return subsamples_data
210
  else:
211
- print(
212
- "Existing subsamples file has different parameters, recomputing..."
213
- )
214
  except Exception as e:
215
- print(f"Error loading existing subsamples file: {e}, recomputing...")
216
 
217
  # Create a combined category for stratification
218
  df["strat_category"] = df[stratify_columns].astype(str).agg("_".join, axis=1)
219
 
220
- # Initialize the subsampleter
221
- skf = StratifiedKFold(n_splits=n_subsamples, shuffle=True, random_state=42)
222
 
223
- # Create subsamples
224
- subsamples_info = []
225
- for subsample_idx, (_, subsample_indices) in enumerate(
226
  skf.split(df, df["strat_category"])
227
  ):
228
  # Sort indices for consistent hashing
229
- sorted_indices = sorted(subsample_indices.tolist())
230
 
231
  # Create a deterministic ID from the indices
232
- subsample_id = hashlib.md5(str(sorted_indices).encode()).hexdigest()[:8]
233
 
234
- # Calculate statistics for this subsample
235
  stats = {}
236
- subsample_df = df.iloc[subsample_indices]
237
  for col in stratify_columns:
238
- stats[col] = subsample_df[col].value_counts().to_dict()
239
 
240
- subsamples_info.append(
241
  {
242
- "id": subsample_id,
243
  "statistics": stats,
244
  "indices": sorted_indices,
245
- "size": len(subsample_indices),
246
  }
247
  )
248
 
@@ -253,12 +251,12 @@ def create_stratified_subsamples(
253
 
254
  output_data = {
255
  "metadata": {
256
- "n_subsamples": n_subsamples,
257
  "total_samples": len(df),
258
  "stratify_columns": stratify_columns,
259
  "global_statistics": global_stats,
260
  },
261
- "subsamples": subsamples_info,
262
  }
263
 
264
  # Save to JSON
@@ -268,11 +266,10 @@ def create_stratified_subsamples(
268
  return output_data
269
 
270
 
271
- def write_subsample(
272
  input_file: str, indices: List[int], output_file: str, compress: bool = True
273
  ) -> None:
274
- """
275
- Write a single subsample to a file using awk.
276
 
277
  Args:
278
  input_file: Path to input JSONL file
@@ -280,15 +277,10 @@ def write_subsample(
280
  output_file: Path to output file
281
  compress: Whether to compress output
282
  """
283
- # Convert indices to awk condition
284
- # NR is the current line number in awk
285
  indices_set = set(i + 1 for i in indices) # Convert to 1-based indexing
286
- indices_str = ",".join(str(i) for i in sorted(indices_set))
287
-
288
- # Create awk script with escaped curly braces
289
- awk_script = (
290
- f'BEGIN {{subsample("{indices_str}",a,","); for(i in a) n[a[i]];}} NR in n'
291
- )
292
 
293
  if input_file.endswith(".bz2"):
294
  if compress:
@@ -301,7 +293,7 @@ def write_subsample(
301
  else:
302
  cmd = f"awk '{awk_script}' '{input_file}' > '{output_file}'"
303
 
304
- print(f"Process {os.getpid()} - Starting subsample to {output_file}")
305
  try:
306
  result = subprocess.run(
307
  cmd,
@@ -311,12 +303,12 @@ def write_subsample(
311
  stdout=subprocess.PIPE,
312
  text=True,
313
  )
314
- print(f"Process {os.getpid()} - Finished subsample to {output_file}")
315
 
316
- # Verify the output file exists and has content
317
  if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
318
  print(
319
- f"Process {os.getpid()} - Successfully created {output_file} ({os.path.getsize(output_file)} bytes)"
 
320
  )
321
  else:
322
  raise Exception(f"Output file {output_file} is empty or doesn't exist")
@@ -330,33 +322,33 @@ def write_subsample(
330
  raise
331
 
332
 
333
- def subsample_jsonl_file(
334
  input_file: str,
335
- subsamples_file: str,
336
  output_dir: Optional[str] = None,
337
  compress: bool = True,
338
  n_processes: Optional[int] = None,
339
  overwrite: bool = False,
340
  ) -> None:
341
  """
342
- subsample a large JSONL file into multiple files using sed for maximum performance.
343
 
344
  Args:
345
  input_file: Path to input JSONL file (can be bz2 compressed)
346
- subsamples_file: Path to JSON file containing subsample indices
347
- output_dir: Directory to save subsample files (defaults to input file directory)
348
  compress: Whether to compress output files with bz2
349
- n_processes: Number of processes to use (defaults to min(n_subsamples, cpu_count))
350
  overwrite: If False, skip existing output files (default: False)
351
  """
352
- # Load subsamples information
353
- with open(subsamples_file, "r") as f:
354
- subsamples_data = json.load(f)
355
 
356
  # Determine optimal number of processes
357
- n_subsamples = len(subsamples_data["subsamples"])
358
  if n_processes is None:
359
- n_processes = min(n_subsamples, cpu_count())
360
 
361
  if output_dir is None:
362
  output_dir = os.path.dirname(input_file)
@@ -369,9 +361,9 @@ def subsample_jsonl_file(
369
  # Prepare arguments for parallel processing
370
  write_args = []
371
  skipped_files = []
372
- for subsample in subsamples_data["subsamples"]:
373
- subsample_id = subsample["id"]
374
- output_name = f"{base_name}_subsample_{subsample_id}.jsonl"
375
  if compress:
376
  output_name += ".bz2"
377
  output_path = os.path.join(output_dir, output_name)
@@ -381,7 +373,7 @@ def subsample_jsonl_file(
381
  skipped_files.append(output_path)
382
  continue
383
 
384
- write_args.append((input_file, subsample["indices"], output_path, compress))
385
 
386
  if skipped_files:
387
  print(f"Skipping {len(skipped_files)} existing files:")
@@ -389,8 +381,8 @@ def subsample_jsonl_file(
389
  print(f" - {file}")
390
 
391
  if write_args:
392
- print(f"Processing {len(write_args)} subsamples using {n_processes} processes")
393
  with Pool(processes=n_processes) as pool:
394
- pool.starmap(write_subsample, write_args)
395
  else:
396
  print("No files to process - all files exist and overwrite=False")
 
167
  return df
168
 
169
 
170
+ def create_stratified_subsets(
171
  df: pd.DataFrame,
172
+ n_subsets: int,
173
  stratify_columns: List[str] = [
174
  "domain",
175
  "answer_type",
176
  "question_type",
177
  "static_or_dynamic",
178
  ],
179
+ output_path: str = "subsets.json",
180
  force_compute: bool = False,
181
  ) -> Dict[str, Any]:
182
  """
183
+ Create stratified subsets of the dataset and save them to a JSON file.
184
+ Each subset gets a unique ID based on its indices.
185
 
186
  Args:
187
  df: Input DataFrame
188
+ n_subsets: Number of subsets to create
189
  stratify_columns: Columns to use for stratification
190
  output_path: Path to save/load the JSON output
191
+ force_compute: If True, always compute subsets even if file exists
192
 
193
  Returns:
194
+ Dictionary containing the subsets information
195
  """
196
  # Check if file exists and we can use it
197
  if not force_compute and os.path.exists(output_path):
198
  try:
199
  with open(output_path, "r") as f:
200
+ subsets_data = json.load(f)
201
 
202
  # Validate the loaded data has the expected structure
203
  if (
204
+ subsets_data.get("metadata", {}).get("n_subsets") == n_subsets
205
+ and subsets_data.get("metadata", {}).get("stratify_columns")
206
  == stratify_columns
207
  ):
208
+ print(f"Loading existing subsets from {output_path}")
209
+ return subsets_data
210
  else:
211
+ print("Existing subsets file has different parameters, recomputing...")
 
 
212
  except Exception as e:
213
+ print(f"Error loading existing subsets file: {e}, recomputing...")
214
 
215
  # Create a combined category for stratification
216
  df["strat_category"] = df[stratify_columns].astype(str).agg("_".join, axis=1)
217
 
218
+ # Initialize the subsetter
219
+ skf = StratifiedKFold(n_splits=n_subsets, shuffle=True, random_state=42)
220
 
221
+ # Create subsets
222
+ subsets_info = []
223
+ for subset_idx, (_, subset_indices) in enumerate(
224
  skf.split(df, df["strat_category"])
225
  ):
226
  # Sort indices for consistent hashing
227
+ sorted_indices = sorted(subset_indices.tolist())
228
 
229
  # Create a deterministic ID from the indices
230
+ subset_id = hashlib.md5(str(sorted_indices).encode()).hexdigest()[:8]
231
 
232
+ # Calculate statistics for this subset
233
  stats = {}
234
+ subset_df = df.iloc[subset_indices]
235
  for col in stratify_columns:
236
+ stats[col] = subset_df[col].value_counts().to_dict()
237
 
238
+ subsets_info.append(
239
  {
240
+ "index": subset_idx,
241
  "statistics": stats,
242
  "indices": sorted_indices,
243
+ "size": len(subset_indices),
244
  }
245
  )
246
 
 
251
 
252
  output_data = {
253
  "metadata": {
254
+ "n_subsets": n_subsets,
255
  "total_samples": len(df),
256
  "stratify_columns": stratify_columns,
257
  "global_statistics": global_stats,
258
  },
259
+ "subsets": subsets_info,
260
  }
261
 
262
  # Save to JSON
 
266
  return output_data
267
 
268
 
269
+ def write_subset(
270
  input_file: str, indices: List[int], output_file: str, compress: bool = True
271
  ) -> None:
272
+ """Write a single subset to a file using awk.
 
273
 
274
  Args:
275
  input_file: Path to input JSONL file
 
277
  output_file: Path to output file
278
  compress: Whether to compress output
279
  """
280
+ # Convert indices to 1-based indexing and create NR condition
 
281
  indices_set = set(i + 1 for i in indices) # Convert to 1-based indexing
282
+ nr_conditions = " || ".join(f"NR == {i}" for i in sorted(indices_set))
283
+ awk_script = f"{nr_conditions}"
 
 
 
 
284
 
285
  if input_file.endswith(".bz2"):
286
  if compress:
 
293
  else:
294
  cmd = f"awk '{awk_script}' '{input_file}' > '{output_file}'"
295
 
296
+ print(f"Process {os.getpid()} - Starting subset to {output_file}")
297
  try:
298
  result = subprocess.run(
299
  cmd,
 
303
  stdout=subprocess.PIPE,
304
  text=True,
305
  )
306
+ print(f"Process {os.getpid()} - Finished subset to {output_file}")
307
 
 
308
  if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
309
  print(
310
+ f"Process {os.getpid()} - Successfully created {output_file} "
311
+ f"({os.path.getsize(output_file)} bytes)"
312
  )
313
  else:
314
  raise Exception(f"Output file {output_file} is empty or doesn't exist")
 
322
  raise
323
 
324
 
325
+ def subset_jsonl_file(
326
  input_file: str,
327
+ subsets_file: str,
328
  output_dir: Optional[str] = None,
329
  compress: bool = True,
330
  n_processes: Optional[int] = None,
331
  overwrite: bool = False,
332
  ) -> None:
333
  """
334
+ subset a large JSONL file into multiple files using sed for maximum performance.
335
 
336
  Args:
337
  input_file: Path to input JSONL file (can be bz2 compressed)
338
+ subsets_file: Path to JSON file containing subset indices
339
+ output_dir: Directory to save subset files (defaults to input file directory)
340
  compress: Whether to compress output files with bz2
341
+ n_processes: Number of processes to use (defaults to min(n_subsets, cpu_count))
342
  overwrite: If False, skip existing output files (default: False)
343
  """
344
+ # Load subsets information
345
+ with open(subsets_file, "r") as f:
346
+ subsets_data = json.load(f)
347
 
348
  # Determine optimal number of processes
349
+ n_subsets = len(subsets_data["subsets"])
350
  if n_processes is None:
351
+ n_processes = min(n_subsets, cpu_count())
352
 
353
  if output_dir is None:
354
  output_dir = os.path.dirname(input_file)
 
361
  # Prepare arguments for parallel processing
362
  write_args = []
363
  skipped_files = []
364
+ for subset in subsets_data["subsets"]:
365
+ subset_idx = subset["index"]
366
+ output_name = f"{base_name}_subset_{subset_idx+1}.jsonl"
367
  if compress:
368
  output_name += ".bz2"
369
  output_path = os.path.join(output_dir, output_name)
 
373
  skipped_files.append(output_path)
374
  continue
375
 
376
+ write_args.append((input_file, subset["indices"], output_path, compress))
377
 
378
  if skipped_files:
379
  print(f"Skipping {len(skipped_files)} existing files:")
 
381
  print(f" - {file}")
382
 
383
  if write_args:
384
+ print(f"Processing {len(write_args)} subsets using {n_processes} processes")
385
  with Pool(processes=n_processes) as pool:
386
+ pool.starmap(write_subset, write_args)
387
  else:
388
  print("No files to process - all files exist and overwrite=False")
examples/basic_sampling.py CHANGED
@@ -7,7 +7,7 @@ import os
7
  def run_crag_task_1_and_2(
8
  file_path: str,
9
  fields_to_extract: list[str] = None,
10
- n_subsamples: int = 5,
11
  output_dir: str = None,
12
  compress: bool = True,
13
  n_processes: int = None,
@@ -18,7 +18,7 @@ def run_crag_task_1_and_2(
18
  Args:
19
  file_path: Path to input JSONL file
20
  fields_to_extract: List of fields to extract from JSONL
21
- n_subsamples: Number of subsamples to create
22
  output_dir: Directory for output files
23
  compress: Whether to compress output files
24
  n_processes: Number of processes for parallel processing
@@ -29,27 +29,25 @@ def run_crag_task_1_and_2(
29
  input_file=file_path, required_fields=fields_to_extract, use_cache=True
30
  )
31
 
32
- # Create output path for subsamples
33
  output_path = os.path.join(
34
  os.path.dirname(file_path),
35
- f"{os.path.splitext(os.path.basename(file_path))[0]}_subsamples.json",
36
  )
37
 
38
- # Create subsamples
39
- subsamples_data = sampler.create_subsamples(
40
- n_subsamples=n_subsamples, output_path=output_path
41
- )
42
 
43
  # Print statistics
44
- print(f"Created {subsamples_data['metadata']['n_subsamples']} subsamples")
45
  print("\nGlobal statistics:")
46
- print(json.dumps(subsamples_data["metadata"]["global_statistics"], indent=2))
47
- print("\nFirst subsample statistics:")
48
- print(json.dumps(subsamples_data["subsamples"][0]["statistics"], indent=2))
49
 
50
- # Write subsamples to files
51
- sampler.write_subsamples(
52
- subsamples_file=output_path,
53
  output_dir=output_dir,
54
  compress=compress,
55
  n_processes=n_processes,
@@ -61,5 +59,9 @@ def run_crag_task_1_and_2(
61
  if __name__ == "__main__":
62
  file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
63
  fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
 
 
64
 
65
- run_crag_task_1_and_2(file_path, fields_to_extract)
 
 
 
7
  def run_crag_task_1_and_2(
8
  file_path: str,
9
  fields_to_extract: list[str] = None,
10
+ n_subsets: int = 5,
11
  output_dir: str = None,
12
  compress: bool = True,
13
  n_processes: int = None,
 
18
  Args:
19
  file_path: Path to input JSONL file
20
  fields_to_extract: List of fields to extract from JSONL
21
+ n_subsets: Number of subsets to create
22
  output_dir: Directory for output files
23
  compress: Whether to compress output files
24
  n_processes: Number of processes for parallel processing
 
29
  input_file=file_path, required_fields=fields_to_extract, use_cache=True
30
  )
31
 
32
+ # Create output path for subsets
33
  output_path = os.path.join(
34
  os.path.dirname(file_path),
35
+ f"{os.path.splitext(os.path.basename(file_path))[0]}_subsets.json",
36
  )
37
 
38
+ # Create subsets
39
+ subsets_data = sampler.create_subsets(n_subsets=n_subsets, output_path=output_path)
 
 
40
 
41
  # Print statistics
42
+ print(f"Created {subsets_data['metadata']['n_subsets']} subsets")
43
  print("\nGlobal statistics:")
44
+ print(json.dumps(subsets_data["metadata"]["global_statistics"], indent=2))
45
+ print("\nFirst subset statistics:")
46
+ print(json.dumps(subsets_data["subsets"][0]["statistics"], indent=2))
47
 
48
+ # Write subsets to files
49
+ sampler.write_subsets(
50
+ subsets_file=output_path,
51
  output_dir=output_dir,
52
  compress=compress,
53
  n_processes=n_processes,
 
59
  if __name__ == "__main__":
60
  file_path = "./local_data/crag_task_1_and_2_dev_v4.jsonl.bz2"
61
  fields_to_extract = ["domain", "answer", "question_type", "static_or_dynamic"]
62
+ n_subsets = 20
63
+ output_dir = "./subset/crag_task_1_and_2"
64
 
65
+ run_crag_task_1_and_2(
66
+ file_path, fields_to_extract, n_subsets=n_subsets, overwrite=True
67
+ )