Datasets:

Modalities:
Image
ArXiv:
Libraries:
Datasets
License:
RealCQA / dataset.py
sal4ahm's picture
added dataset.py
abe92bb
raw
history blame
6.03 kB
import os
import json
import time
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
class RQADataset(Dataset):
def __init__(self, data_config, transform=None):
"""
Initializes the dataset.
Args:
data_config: Configuration object containing paths and settings.
transform: Optional transform to be applied on a sample.
"""
self.img_dir = data_config.img_dir
self.json_dir = data_config.json_dir
self.filter_list_file = data_config.filter_list
self.train = data_config.train
self.transform = transform or transforms.Compose([
transforms.Resize((512, 512))
])
self.questions = []
# Load file names for testing or use all files for training
self.file_names = self._load_file_names()
self._create_questions()
print(f"Total Questions Loaded: {len(self.questions)}")
def _load_file_names(self):
"""
Loads the list of file names to be processed.
Returns:
A list of file names without extensions.
"""
if not self.train and self.filter_list_file:
with open(self.filter_list_file, 'r') as f:
file_names = [line.strip() for line in f]
print(f"Loaded {len(file_names)} test files from {self.filter_list_file}")
return file_names
else:
# Use all files for training
return [os.path.splitext(file)[0] for file in os.listdir(self.json_dir) if file.endswith('.json')]
def _create_questions(self):
"""
Creates the list of questions from JSON files.
"""
start_time = time.time()
unused_count = 0
for file_name in self.file_names:
json_path = os.path.join(self.json_dir, file_name + '.json')
if not os.path.exists(json_path):
unused_count += 1
continue
with open(json_path, 'r') as f:
json_data = json.load(f)
for item in json_data:
if 'PMC_ID' not in item or 'qa_id' not in item:
continue # Ensure all necessary fields are present
item['image_path'] = os.path.join(self.img_dir, item['PMC_ID'] + '.jpg')
if os.path.exists(item['image_path']):
self.questions.append(item)
else:
unused_count += 1
elapsed_time = time.time() - start_time
print(f"Elapsed time to create questions: {elapsed_time:.2f} seconds = {elapsed_time/60:.2f} minutes")
print(f'Total unused/used images: {unused_count} / {len(self.file_names) - unused_count}')
def __len__(self):
return len(self.questions)
def __getitem__(self, idx):
return self._load_data(idx)
def _load_data(self, idx):
"""
Loads a single data point.
Args:
idx: Index of the data point.
Returns:
A dictionary containing the image, question, and answer data.
"""
question_block = self.questions[idx]
image_path = question_block['image_path']
image = Image.open(image_path).convert("RGB")
# Apply transformation if available
if self.transform:
image = self.transform(image)
return {
'image': image,
'question': question_block['question'],
'answer': question_block['answer'],
'qa_id': question_block['qa_id'],
'PMC_ID': question_block['PMC_ID']
}
@staticmethod
def custom_collate(batch):
"""
Custom collate function to handle batch processing.
Args:
batch: A batch of data points.
Returns:
A dictionary containing the collated batch data.
"""
images = [item['image'] for item in batch]
questions = [item['question'] for item in batch]
answers = [item['answer'] for item in batch]
qa_ids = [item['qa_id'] for item in batch]
pmc_ids = [item['PMC_ID'] for item in batch]
return {
'images': images,
'questions': questions,
'answers': answers,
'qa_ids': qa_ids,
'PMC_IDs': pmc_ids
}
if __name__ == "__main__":
# Define a simple data structure to hold the paths
class DataConfig:
img_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/images'
json_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/qa'
filter_list = '/home/jupyter/RealCQA/code/data/RQA_V0/test_filenames.txt'
train = False # Set to False to prepare the test files
# Initialize dataset
dataset = RQADataset(DataConfig)
# Test loading a single item
print(f"Number of samples in dataset: {len(dataset)}")
sample = dataset[0]
print("Sample data:", sample)
# Initialize DataLoader
dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
# Test DataLoader
for batch in dataloader:
print("Batch data:", batch)
break # Load only one batch for testing
class DataConfig:
img_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/images'
json_dir = '/home/jupyter/RealCQA/code/data/RQA_V0/qa'
filter_list = '/home/jupyter/RealCQA/code/data/RQA_V0/test_filenames.txt'
train = True # Set to False to prepare the test files
# Initialize dataset
dataset = RQADataset(DataConfig)
# Test loading a single item
print(f"Number of samples in dataset: {len(dataset)}")
sample = dataset[0]
print("Sample data:", sample)
# Initialize DataLoader
dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
# Test DataLoader
for batch in dataloader:
print("Batch data:", batch)
break # Load only one batch for testing