Datasets:

Modalities:
Image
ArXiv:
Libraries:
Datasets
License:
RealCQA / dataset.py
sal4ahm's picture
Update dataset.py
8596689 verified
import os
import json
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
class RQADataset(Dataset):
def __init__(self, data_dir, split='train', transform=None):
"""
Initializes the dataset.
Args:
data_dir: Base directory of the dataset on the Hugging Face Hub.
split: Split of the dataset ('train' or 'test').
transform: Optional transform to be applied on a sample.
"""
self.data_dir = data_dir
self.split = split
self.transform = transform or transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor()
])
# Initialize lists to hold image and question data
self.questions = []
self.file_names = self._load_file_names()
self._create_questions()
print(f"Total Questions Loaded: {len(self.questions)}")
def _load_file_names(self):
"""
Loads the list of file names to be processed based on the split.
Returns:
A list of file names without extensions.
"""
if self.split == 'test':
# Load test file names from the list provided on Hugging Face
filter_list_file = os.path.join(self.data_dir, 'test_filenames.txt')
with open(filter_list_file, 'r') as f:
file_names = [line.strip() for line in f]
print(f"Loaded {len(file_names)} test files from {filter_list_file}")
else:
# For training, use all JSON files from all directories
file_names = []
for json_dir in ['jsons', 'jsons2', 'jsons3']:
json_dir_path = os.path.join(self.data_dir, json_dir)
json_files = [os.path.splitext(file)[0] for file in os.listdir(json_dir_path) if file.endswith('.json')]
file_names.extend(json_files)
return file_names
def _create_questions(self):
"""
Creates the list of questions from JSON files.
"""
unused_count = 0
for file_name in self.file_names:
# Determine which folder contains the current JSON file
if file_name in os.listdir(os.path.join(self.data_dir, 'jsons')):
json_path = os.path.join(self.data_dir, 'jsons', f"{file_name}.json")
img_dir = 'images'
elif file_name in os.listdir(os.path.join(self.data_dir, 'jsons2')):
json_path = os.path.join(self.data_dir, 'jsons2', f"{file_name}.json")
img_dir = 'images2'
else:
json_path = os.path.join(self.data_dir, 'jsons3', f"{file_name}.json")
img_dir = 'images3'
# Load questions from the JSON file
with open(json_path, 'r') as f:
json_data = json.load(f)
for item in json_data:
if 'PMC_ID' not in item or 'qa_id' not in item:
continue # Ensure all necessary fields are present
item['image_path'] = os.path.join(self.data_dir, img_dir, f"{item['PMC_ID']}.jpg")
if os.path.exists(item['image_path']):
self.questions.append(item)
else:
unused_count += 1
print(f"Total unused/used images: {unused_count} / {len(self.file_names) - unused_count}")
def __len__(self):
return len(self.questions)
def __getitem__(self, idx):
"""
Loads a single data point.
Args:
idx: Index of the data point.
Returns:
A dictionary containing the image, question, and answer data.
"""
question_block = self.questions[idx]
image_path = question_block['image_path']
image = Image.open(image_path).convert("RGB")
# Apply transformation if available
if self.transform:
image = self.transform(image)
return {
'image': image,
'question': question_block['question'],
'answer': question_block['answer'],
'qa_id': question_block['qa_id'],
'PMC_ID': question_block['PMC_ID']
}
@staticmethod
def custom_collate(batch):
"""
Custom collate function to handle batch processing.
Args:
batch: A batch of data points.
Returns:
A dictionary containing the collated batch data.
"""
images = [item['image'] for item in batch]
questions = [item['question'] for item in batch]
answers = [item['answer'] for item in batch]
qa_ids = [item['qa_id'] for item in batch]
pmc_ids = [item['PMC_ID'] for item in batch]
return {
'images': images,
'questions': questions,
'answers': answers,
'qa_ids': qa_ids,
'PMC_IDs': pmc_ids
}
if __name__ == "__main__":
# Initialize dataset for training
dataset = RQADataset(data_dir='.', split='train')
# Test loading a single item
print(f"Number of samples in dataset: {len(dataset)}")
sample = dataset[0]
print("Sample data:", sample)
# Initialize DataLoader
dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
# Test DataLoader
for batch in dataloader:
print("Batch data:", batch)
break # Load only one batch for testing