File size: 2,168 Bytes
d56e828 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, DownloadManager
from typing import Any, Dict, List, Tuple
import os
import json
class ProcessedImageDataset(GeneratorBasedBuilder):
VERSION = "1.0.0"
def _info(self) -> DatasetInfo:
# Specify dataset info here
return DatasetInfo(
# You can add description, citation, homepage, etc.
features=self._features(),
supervised_keys=("image", "text"),
)
def _features(self):
# Define the features of your dataset: image file, text, etc.
from datasets import Features, Image, Value
return Features({"image_file": Image(), "text": Value("string")})
def _split_generators(self, dl_manager) -> List[SplitGenerator]:
# This method is tasked with downloading/extracting the data and defining the splits
print(self.config.data_dir)
if self.config.data_dir is None:
raise ValueError(f'Data directory unspecified. Correct usage is: load_dataset(script_path, data_dir=data_dir_path)')
return [SplitGenerator(name="train", gen_kwargs={"data_dir": self.config.data_dir})]
def _generate_examples(self, data_dir):
# def _generate_examples(self, data_dir: str) -> Tuple[int, Dict[str, Any]]:
# This method will read the data and yield examples
metadata_file_path = os.path.join(data_dir, "metadata.jsonl")
# Read metadata and store it in a dictionary
metadata = {}
with open(metadata_file_path, "r") as f:
for line in f:
item = json.loads(line)
metadata[item["file_name"]] = item
# Iterate through each file in the data directory
for filename in os.listdir(data_dir):
if filename.endswith(".png") or filename.endswith(".jpg") or filename.endswith(".jpeg"):
if filename in metadata:
metadata_entry = metadata[filename]
yield filename, {
"image_file": os.path.join(data_dir, filename),
"text": metadata_entry["text"],
}
|