Spaces:
Sleeping
Sleeping
Update vit_Training.py
Browse files- vit_Training.py +8 -11
vit_Training.py
CHANGED
|
@@ -19,13 +19,13 @@ class CustomDataset(Dataset):
|
|
| 19 |
return len(self.dataframe)
|
| 20 |
|
| 21 |
def __getitem__(self, idx):
|
| 22 |
-
image_path = self.dataframe.iloc[idx, 0]
|
| 23 |
image = Image.open(image_path).convert('RGB') # Convert to RGB format
|
| 24 |
|
| 25 |
if self.transform:
|
| 26 |
image = self.transform(image)
|
| 27 |
|
| 28 |
-
label = self.dataframe.iloc[idx, 1]
|
| 29 |
return image, label
|
| 30 |
|
| 31 |
def shuffle_and_split_data(dataframe, test_size=0.2, random_state=59):
|
|
@@ -35,23 +35,23 @@ def shuffle_and_split_data(dataframe, test_size=0.2, random_state=59):
|
|
| 35 |
|
| 36 |
class Custom_VIT_Model:
|
| 37 |
def __init__(self):
|
| 38 |
-
#
|
| 39 |
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 40 |
|
| 41 |
-
# Load the pre-trained ViT model
|
| 42 |
self.model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(self.device)
|
| 43 |
|
| 44 |
# Freeze pre-trained layers
|
| 45 |
for param in self.model.parameters():
|
| 46 |
param.requires_grad = False
|
| 47 |
|
| 48 |
-
# Define a new classifier
|
| 49 |
self.model.classifier = nn.Linear(self.model.config.hidden_size, 2).to(self.device)
|
| 50 |
|
| 51 |
-
#
|
| 52 |
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
|
| 53 |
|
| 54 |
-
#
|
| 55 |
self.preprocess = transforms.Compose([
|
| 56 |
transforms.Resize((224, 224)),
|
| 57 |
transforms.ToTensor()
|
|
@@ -103,7 +103,7 @@ class Custom_VIT_Model:
|
|
| 103 |
# Define the loss function
|
| 104 |
criterion = nn.CrossEntropyLoss().to(self.device)
|
| 105 |
|
| 106 |
-
# Training loop
|
| 107 |
num_epochs = 10
|
| 108 |
for epoch in range(num_epochs):
|
| 109 |
self.model.train()
|
|
@@ -139,8 +139,5 @@ class Custom_VIT_Model:
|
|
| 139 |
print("Model retrained and updated!")
|
| 140 |
|
| 141 |
if __name__ == "__main__":
|
| 142 |
-
# Initialize the model
|
| 143 |
custom_model = Custom_VIT_Model()
|
| 144 |
|
| 145 |
-
# Example usage: adding a new image and label
|
| 146 |
-
# custom_model.add_data('path/to/image.jpg', 0) # 0 for real, 1 for fake
|
|
|
|
| 19 |
return len(self.dataframe)
|
| 20 |
|
| 21 |
def __getitem__(self, idx):
|
| 22 |
+
image_path = self.dataframe.iloc[idx, 0]
|
| 23 |
image = Image.open(image_path).convert('RGB') # Convert to RGB format
|
| 24 |
|
| 25 |
if self.transform:
|
| 26 |
image = self.transform(image)
|
| 27 |
|
| 28 |
+
label = self.dataframe.iloc[idx, 1]
|
| 29 |
return image, label
|
| 30 |
|
| 31 |
def shuffle_and_split_data(dataframe, test_size=0.2, random_state=59):
|
|
|
|
| 35 |
|
| 36 |
class Custom_VIT_Model:
|
| 37 |
def __init__(self):
|
| 38 |
+
# Use gpu if exist (nvidia only) else cpu (any)
|
| 39 |
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 40 |
|
| 41 |
+
# Load the pre-trained ViT model
|
| 42 |
self.model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(self.device)
|
| 43 |
|
| 44 |
# Freeze pre-trained layers
|
| 45 |
for param in self.model.parameters():
|
| 46 |
param.requires_grad = False
|
| 47 |
|
| 48 |
+
# Define a new classifier that has 2 outputs (0,1)
|
| 49 |
self.model.classifier = nn.Linear(self.model.config.hidden_size, 2).to(self.device)
|
| 50 |
|
| 51 |
+
# Set optimizer
|
| 52 |
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
|
| 53 |
|
| 54 |
+
# Set the image preprocessing (resize image) and make it tensor ( Tensor - add a dimension )
|
| 55 |
self.preprocess = transforms.Compose([
|
| 56 |
transforms.Resize((224, 224)),
|
| 57 |
transforms.ToTensor()
|
|
|
|
| 103 |
# Define the loss function
|
| 104 |
criterion = nn.CrossEntropyLoss().to(self.device)
|
| 105 |
|
| 106 |
+
# Training loop
|
| 107 |
num_epochs = 10
|
| 108 |
for epoch in range(num_epochs):
|
| 109 |
self.model.train()
|
|
|
|
| 139 |
print("Model retrained and updated!")
|
| 140 |
|
| 141 |
if __name__ == "__main__":
|
|
|
|
| 142 |
custom_model = Custom_VIT_Model()
|
| 143 |
|
|
|
|
|
|