File size: 5,505 Bytes
abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb 8596689 abe92bb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import os
import json
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
class RQADataset(Dataset):
def __init__(self, data_dir, split='train', transform=None):
"""
Initializes the dataset.
Args:
data_dir: Base directory of the dataset on the Hugging Face Hub.
split: Split of the dataset ('train' or 'test').
transform: Optional transform to be applied on a sample.
"""
self.data_dir = data_dir
self.split = split
self.transform = transform or transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor()
])
# Initialize lists to hold image and question data
self.questions = []
self.file_names = self._load_file_names()
self._create_questions()
print(f"Total Questions Loaded: {len(self.questions)}")
def _load_file_names(self):
"""
Loads the list of file names to be processed based on the split.
Returns:
A list of file names without extensions.
"""
if self.split == 'test':
# Load test file names from the list provided on Hugging Face
filter_list_file = os.path.join(self.data_dir, 'test_filenames.txt')
with open(filter_list_file, 'r') as f:
file_names = [line.strip() for line in f]
print(f"Loaded {len(file_names)} test files from {filter_list_file}")
else:
# For training, use all JSON files from all directories
file_names = []
for json_dir in ['jsons', 'jsons2', 'jsons3']:
json_dir_path = os.path.join(self.data_dir, json_dir)
json_files = [os.path.splitext(file)[0] for file in os.listdir(json_dir_path) if file.endswith('.json')]
file_names.extend(json_files)
return file_names
def _create_questions(self):
"""
Creates the list of questions from JSON files.
"""
unused_count = 0
for file_name in self.file_names:
# Determine which folder contains the current JSON file
if file_name in os.listdir(os.path.join(self.data_dir, 'jsons')):
json_path = os.path.join(self.data_dir, 'jsons', f"{file_name}.json")
img_dir = 'images'
elif file_name in os.listdir(os.path.join(self.data_dir, 'jsons2')):
json_path = os.path.join(self.data_dir, 'jsons2', f"{file_name}.json")
img_dir = 'images2'
else:
json_path = os.path.join(self.data_dir, 'jsons3', f"{file_name}.json")
img_dir = 'images3'
# Load questions from the JSON file
with open(json_path, 'r') as f:
json_data = json.load(f)
for item in json_data:
if 'PMC_ID' not in item or 'qa_id' not in item:
continue # Ensure all necessary fields are present
item['image_path'] = os.path.join(self.data_dir, img_dir, f"{item['PMC_ID']}.jpg")
if os.path.exists(item['image_path']):
self.questions.append(item)
else:
unused_count += 1
print(f"Total unused/used images: {unused_count} / {len(self.file_names) - unused_count}")
def __len__(self):
return len(self.questions)
def __getitem__(self, idx):
"""
Loads a single data point.
Args:
idx: Index of the data point.
Returns:
A dictionary containing the image, question, and answer data.
"""
question_block = self.questions[idx]
image_path = question_block['image_path']
image = Image.open(image_path).convert("RGB")
# Apply transformation if available
if self.transform:
image = self.transform(image)
return {
'image': image,
'question': question_block['question'],
'answer': question_block['answer'],
'qa_id': question_block['qa_id'],
'PMC_ID': question_block['PMC_ID']
}
@staticmethod
def custom_collate(batch):
"""
Custom collate function to handle batch processing.
Args:
batch: A batch of data points.
Returns:
A dictionary containing the collated batch data.
"""
images = [item['image'] for item in batch]
questions = [item['question'] for item in batch]
answers = [item['answer'] for item in batch]
qa_ids = [item['qa_id'] for item in batch]
pmc_ids = [item['PMC_ID'] for item in batch]
return {
'images': images,
'questions': questions,
'answers': answers,
'qa_ids': qa_ids,
'PMC_IDs': pmc_ids
}
if __name__ == "__main__":
# Initialize dataset for training
dataset = RQADataset(data_dir='.', split='train')
# Test loading a single item
print(f"Number of samples in dataset: {len(dataset)}")
sample = dataset[0]
print("Sample data:", sample)
# Initialize DataLoader
dataloader = DataLoader(dataset, batch_size=4, collate_fn=RQADataset.custom_collate)
# Test DataLoader
for batch in dataloader:
print("Batch data:", batch)
break # Load only one batch for testing
|