mskrt commited on
Commit
f0542de
·
verified ·
1 Parent(s): 703f076

Upload PAIR.py

Browse files
Files changed (1) hide show
  1. PAIR.py +42 -16
PAIR.py CHANGED
@@ -16,6 +16,7 @@
16
  import json
17
 
18
  import datasets
 
19
 
20
  # TODO: Add BibTeX citation
21
  # Find for instance the citation on arxiv or on the dataset repo/website
@@ -49,19 +50,43 @@ _URLS = {
49
 
50
 
51
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
52
- annotation2type = {"names": datasets.Value("string"),
53
- "EC": datasets.Sequence(datasets.Value("string")),
54
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  class PAIRDataset(datasets.GeneratorBasedBuilder):
57
  """PAIRDataset."""
58
- def __init__(self, *args, annotation_type=None, **kwargs):
59
- super().__init__(*args, **kwargs)
60
- self.annotation_type = annotation_type # Save the custom argument for later use
61
- print(self.annotation_type)
62
- exit()
 
 
 
 
 
63
  def _info(self):
64
  """_info."""
 
 
65
  return datasets.DatasetInfo(
66
  description="My custom dataset.",
67
  features=datasets.Features(
@@ -73,8 +98,8 @@ class PAIRDataset(datasets.GeneratorBasedBuilder):
73
  ),
74
  supervised_keys=None,
75
  )
76
- #"annotation_type": datasets.Value("string"),
77
- #"annotation": datasets.Value("string"),
78
 
79
  def _split_generators(self, dl_manager):
80
  """_split_generators.
@@ -86,6 +111,7 @@ class PAIRDataset(datasets.GeneratorBasedBuilder):
86
  """
87
  # Implement logic to download and extract data files
88
  # For simplicity, assume data_files is a dict with paths to your data
 
89
  data_files = {
90
  "train": "train.json",
91
  "test": "test.json",
@@ -110,26 +136,26 @@ class PAIRDataset(datasets.GeneratorBasedBuilder):
110
  filepath
111
  """
112
  # Implement your data reading logic here
 
113
  with open(filepath, encoding="utf-8") as f:
114
  data = json.load(f)
115
  counter = 0
116
  for idx, annotation_type in enumerate(data.keys()):
117
- if annotation_type != self.annotation_type: continue
 
 
118
  # Parse your line into the appropriate fields
119
  samples = data[annotation_type]
120
  for idx_2, elem in enumerate(samples):
121
  # example = parse_line_to_example(line)
122
  if elem["content"] != [None]:
123
- unique_id = f"{elem['pid']}_{idx}"
124
  content = elem["content"][0]
125
  # print(literal_eval(content), "done")
126
- yield unique_id, {
127
  "sequence": elem["seq"],
128
  "pid": elem["pid"],
129
  annotation_type: content,
130
  }
131
  counter += 1
132
 
133
-
134
- #"annotation_type": annotation_type,
135
-
 
16
  import json
17
 
18
  import datasets
19
+ from datasets import BuilderConfig
20
 
21
  # TODO: Add BibTeX citation
22
  # Find for instance the citation on arxiv or on the dataset repo/website
 
50
 
51
 
52
  # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
53
+ annotation2type = {
54
+ "names": datasets.Value("string"),
55
+ "EC": datasets.Sequence(datasets.Value("string")),
56
+ }
57
+
58
+
59
+ class CustomConfig(datasets.BuilderConfig):
60
+ """CustomConfig."""
61
+
62
+ def __init__(self, **kwargs):
63
+ """__init__.
64
+
65
+ Parameters
66
+ ----------
67
+ kwargs :
68
+ kwargs
69
+ """
70
+ self.annotation_type = kwargs.pop("annotation_type", "function")
71
+ super(CustomConfig, self).__init__(**kwargs)
72
+
73
 
74
  class PAIRDataset(datasets.GeneratorBasedBuilder):
75
  """PAIRDataset."""
76
+
77
+ BUILDER_CONFIGS = [
78
+ CustomConfig(
79
+ name="custom_config",
80
+ version="1.0.0",
81
+ description="your description",
82
+ ),
83
+ ] # Configs initialization
84
+ BUILDER_CONFIG_CLASS = CustomConfig # Must specify this to use custom config
85
+
86
  def _info(self):
87
  """_info."""
88
+ self.annotation_type = self.config_kwargs["annotation_type"]
89
+ # Confirm annotation_type is set before continuing
90
  return datasets.DatasetInfo(
91
  description="My custom dataset.",
92
  features=datasets.Features(
 
98
  ),
99
  supervised_keys=None,
100
  )
101
+ # "annotation_type": datasets.Value("string"),
102
+ # "annotation": datasets.Value("string"),
103
 
104
  def _split_generators(self, dl_manager):
105
  """_split_generators.
 
111
  """
112
  # Implement logic to download and extract data files
113
  # For simplicity, assume data_files is a dict with paths to your data
114
+ print("in generator self.annotation", self.annotation_type)
115
  data_files = {
116
  "train": "train.json",
117
  "test": "test.json",
 
136
  filepath
137
  """
138
  # Implement your data reading logic here
139
+ print("in generator 2 self.annotation", self.annotation_type)
140
  with open(filepath, encoding="utf-8") as f:
141
  data = json.load(f)
142
  counter = 0
143
  for idx, annotation_type in enumerate(data.keys()):
144
+ print(annotation_type, self.annotation_type)
145
+ if annotation_type != self.annotation_type:
146
+ continue
147
  # Parse your line into the appropriate fields
148
  samples = data[annotation_type]
149
  for idx_2, elem in enumerate(samples):
150
  # example = parse_line_to_example(line)
151
  if elem["content"] != [None]:
 
152
  content = elem["content"][0]
153
  # print(literal_eval(content), "done")
154
+ yield counter, {
155
  "sequence": elem["seq"],
156
  "pid": elem["pid"],
157
  annotation_type: content,
158
  }
159
  counter += 1
160
 
161
+ # "annotation_type": annotation_type,