Datasets:
Tasks:
Object Detection
Formats:
webdataset
Languages:
English
Size:
< 1K
ArXiv:
Tags:
webdataset
License:
import os | |
import lmdb | |
import io | |
import re | |
from PIL import Image | |
import torch | |
from torch.utils.data import Dataset, DataLoader | |
from torchvision import transforms | |
import unittest | |
from tqdm import tqdm | |
torch.multiprocessing.set_sharing_strategy('file_system') | |
class LMDBImageDataset(Dataset): | |
def __init__(self, lmdb_path, transform=None, limit=None): | |
""" | |
Args: | |
lmdb_path (str): Path to the LMDB directory. | |
transform (callable, optional): Optional transform to be applied on an image. | |
limit (int or float, optional): If a float between 0 and 1, keeps that fraction of keys. | |
If an int, keeps that many keys. | |
""" | |
# Open the LMDB environment in read-only mode. | |
self.env = lmdb.open(lmdb_path, readonly=True, lock=False, readahead=False) | |
self.transform = transform | |
# Retrieve all keys from the LMDB database. | |
with self.env.begin() as txn: | |
keys = [key.decode('utf-8') for key, _ in txn.cursor()] | |
# Define a sort key function that extracts frame number and cow id from the filename. | |
def sort_key(filename): | |
# Expected pattern: "pmfeed_4_3_16_frame_10000_cow_1.jpg" | |
match = re.search(r'frame_(\d+)_cow_(\d+)', filename) | |
if match: | |
frame = int(match.group(1)) | |
cow = int(match.group(2)) | |
return (frame, cow) | |
return (float('inf'), float('inf')) | |
# Sort the keys using the defined sort key function. | |
keys = sorted(keys, key=sort_key) | |
# Apply the limit if provided. | |
if limit is not None: | |
if isinstance(limit, float): | |
if 0 <= limit <= 1: | |
cutoff = int(len(keys) * limit) | |
keys = keys[:cutoff] | |
else: | |
raise ValueError("If limit is a float, it must be between 0 and 1.") | |
elif isinstance(limit, int): | |
keys = keys[:limit] | |
else: | |
raise TypeError("limit must be either a float or an integer.") | |
self.keys = keys | |
def __getitem__(self, index): | |
# Get the key and image data | |
key_str = self.keys[index] | |
key = key_str.encode('utf-8') | |
with self.env.begin() as txn: | |
image_bytes = txn.get(key) | |
# Convert binary image data to a PIL Image. | |
image = Image.open(io.BytesIO(image_bytes)).convert('RGB') | |
if self.transform: | |
image = self.transform(image) | |
# Extract the cow id from the filename. | |
match = re.search(r'frame_(\d+)_cow_(\d+)', key_str) | |
if match: | |
cow_id = int(match.group(2)) | |
else: | |
cow_id = -1 # Use -1 or any default value if not found | |
return image, cow_id | |
def __len__(self): | |
return len(self.keys) | |
class TestLMDBImageDataset(unittest.TestCase): | |
def test_dataset_length(self): | |
# Example transform: resize and convert to tensor. | |
transform = transforms.Compose([ | |
transforms.Resize((256, 256)), | |
transforms.ToTensor(), | |
]) | |
# Path to your LMDB directory. | |
lmdb_path = '../lmdb_all_crops_pmfeed_4_3_16' | |
dataset = LMDBImageDataset(lmdb_path=lmdb_path, transform=transform, limit=20) | |
self.assertEqual(len(dataset), 20) | |
self.assertEqual(dataset.keys, ['pmfeed_4_3_16_frame_1_cow_1.jpg', 'pmfeed_4_3_16_frame_1_cow_2.jpg', 'pmfeed_4_3_16_frame_1_cow_3.jpg', 'pmfeed_4_3_16_frame_1_cow_4.jpg', 'pmfeed_4_3_16_frame_1_cow_5.jpg', 'pmfeed_4_3_16_frame_1_cow_6.jpg', 'pmfeed_4_3_16_frame_1_cow_7.jpg', 'pmfeed_4_3_16_frame_1_cow_8.jpg', 'pmfeed_4_3_16_frame_2_cow_1.jpg', 'pmfeed_4_3_16_frame_2_cow_2.jpg', 'pmfeed_4_3_16_frame_2_cow_3.jpg', 'pmfeed_4_3_16_frame_2_cow_4.jpg', 'pmfeed_4_3_16_frame_2_cow_5.jpg', 'pmfeed_4_3_16_frame_2_cow_6.jpg', 'pmfeed_4_3_16_frame_2_cow_7.jpg', 'pmfeed_4_3_16_frame_2_cow_8.jpg', 'pmfeed_4_3_16_frame_3_cow_1.jpg', 'pmfeed_4_3_16_frame_3_cow_2.jpg', 'pmfeed_4_3_16_frame_3_cow_3.jpg', 'pmfeed_4_3_16_frame_3_cow_4.jpg']) | |
dataset = LMDBImageDataset(lmdb_path=lmdb_path, transform=transform, limit=100) | |
self.assertEqual(len(dataset), 100) | |
self.assertEqual(dataset.keys[-10:], ['pmfeed_4_3_16_frame_12_cow_3.jpg', 'pmfeed_4_3_16_frame_12_cow_4.jpg', 'pmfeed_4_3_16_frame_12_cow_5.jpg', 'pmfeed_4_3_16_frame_12_cow_6.jpg', 'pmfeed_4_3_16_frame_12_cow_7.jpg', 'pmfeed_4_3_16_frame_12_cow_8.jpg', 'pmfeed_4_3_16_frame_13_cow_1.jpg', 'pmfeed_4_3_16_frame_13_cow_2.jpg', 'pmfeed_4_3_16_frame_13_cow_3.jpg', 'pmfeed_4_3_16_frame_13_cow_4.jpg']) | |
dataset = LMDBImageDataset(lmdb_path=lmdb_path, transform=transform) | |
self.assertEqual(len(dataset), 537908) | |
dataset = LMDBImageDataset(lmdb_path=lmdb_path, transform=transform, limit=0.5) | |
self.assertEqual(len(dataset), 268954) | |
dataset = LMDBImageDataset(lmdb_path=lmdb_path, transform=transform, limit=0.3) | |
self.assertEqual(len(dataset), 161372) | |
def test_data_loading(self): | |
# Example transform: resize and convert to tensor. | |
transform = transforms.Compose([ | |
transforms.Resize((256, 256)), | |
transforms.ToTensor(), | |
]) | |
# Path to your LMDB directory. | |
lmdb_path = '../lmdb_all_crops_pmfeed_4_3_16' | |
# Create the dataset: | |
# For example, if you want to keep the first 20 keys: | |
dataset = LMDBImageDataset(lmdb_path=lmdb_path, transform=transform) | |
# Or, if you want to keep the first 50% of the keys: | |
# Create a DataLoader. | |
dataloader = DataLoader( | |
dataset, | |
batch_size=256, | |
shuffle=False, | |
num_workers=8, | |
) | |
# Example: Iterate over one batch. | |
ground_truths = [] | |
for images, cow_ids in tqdm(dataloader, unit='batch'): | |
# print(images.shape) # e.g., torch.Size([32, 3, 256, 256]) | |
# print(cow_ids) # Tensor of cow IDs corresponding to each image. | |
ground_truths.append(cow_ids) | |
ground_truths = torch.cat(ground_truths, dim=0) | |
self.assertEqual(len(ground_truths), 537908) | |
self.assertEqual(set(ground_truths.tolist()), {1, 2, 3, 4, 5, 6, 7, 8}) | |
if __name__ == "__main__": | |
unittest.main() |