arssite commited on
Commit
ffbb48e
·
verified ·
1 Parent(s): e1d4cae

Upload 28 files

Browse files
.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
37
  sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
 
 
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
37
  sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
38
+ CV[[:space:]]final[[:space:]]report.pdf filter=lfs diff=lfs merge=lfs -text
AD_Dataset.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import os
3
+ from torch.utils.data import Dataset
4
+
5
+ class AD_Dataset(Dataset):
6
+ """labeled Faces in the Wild dataset."""
7
+
8
+ def __init__(self, root_dir, data_file, transform=None):
9
+ """
10
+ Args:
11
+ root_dir (string): Directory of all the images.
12
+ data_file (string): File name of the train/test split file.
13
+ transform (callable, optional): Optional transform to be applied on a sample.
14
+ data_augmentation (boolean): Optional data augmentation.
15
+ """
16
+ self.root_dir = root_dir
17
+ self.data_file = data_file
18
+ self.transform = transform
19
+
20
+ def __len__(self):
21
+ return sum(1 for line in open(self.data_file))
22
+
23
+ def __getitem__(self, idx):
24
+ df = open(self.data_file)
25
+ lines = df.readlines()
26
+ lst = lines[idx].split()
27
+ img_name = lst[0]
28
+ img_label = lst[1]
29
+ image_path = os.path.join(self.root_dir, img_name)
30
+ image = nib.load(image_path)
31
+
32
+
33
+ if img_label == 'Normal':
34
+ label = 0
35
+ elif img_label == 'AD':
36
+ label = 1
37
+ elif img_label == 'MCI':
38
+ label = 2
39
+
40
+ if self.transform:
41
+ image = self.transform(image)
42
+
43
+ sample = {'image': image, 'label': label}
44
+
45
+ return sample
AD_Standard_2DRandomSlicesData.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import os
3
+ from torch.utils.data import Dataset
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from skimage.transform import resize
7
+ from PIL import Image
8
+ import random
9
+
10
+
11
+ AX_INDEX = 78
12
+ COR_INDEX = 79
13
+ SAG_INDEX = 57
14
+ AX_SCETION = "[:, :, slice_i]"
15
+ COR_SCETION = "[:, slice_i, :]"
16
+ SAG_SCETION = "[slice_i, :, :]"
17
+
18
+
19
+ class AD_Standard_2DRandomSlicesData(Dataset):
20
+ """labeled Faces in the Wild dataset."""
21
+
22
+ def __init__(self, root_dir, data_file, transform=None, slice = slice):
23
+ """
24
+ Args:
25
+ root_dir (string): Directory of all the images.
26
+ data_file (string): File name of the train/test split file.
27
+ transform (callable, optional): Optional transform to be applied on a sample.
28
+ data_augmentation (boolean): Optional data augmentation.
29
+ """
30
+ self.root_dir = root_dir
31
+ self.data_file = data_file
32
+ self.transform = transform
33
+
34
+ def __len__(self):
35
+ return sum(1 for line in open(self.data_file))
36
+
37
+ def __getitem__(self, idx):
38
+ df = open(self.data_file)
39
+ lines = df.readlines()
40
+ lst = lines[idx].split()
41
+ img_name = lst[0]
42
+ img_label = lst[1]
43
+ image_path = os.path.join(self.root_dir, img_name)
44
+ image = nib.load(image_path)
45
+ samples = []
46
+ if img_label == 'Normal':
47
+ label = 0
48
+ elif img_label == 'AD':
49
+ label = 1
50
+ elif img_label == 'MCI':
51
+ label = 2
52
+
53
+ AXimageList = axRandomSlice(image)
54
+ CORimageList = corRandomSlice(image)
55
+ SAGimageList = sagRandomSlice(image)
56
+
57
+ for img2DList in (AXimageList, CORimageList, SAGimageList):
58
+ for image2D in img2DList:
59
+ if self.transform:
60
+ image2D = self.transform(image2D)
61
+ sample = {'image': image2D, 'label': label}
62
+ samples.append(sample)
63
+ random.shuffle(samples)
64
+ return samples
65
+
66
+
67
+ def getRandomSlice(image_array, keyIndex, section, step = 1):
68
+ slice_p = keyIndex
69
+ slice_2Dimgs = []
70
+ slice_select_0 = None
71
+ slice_select_1 = None
72
+ slice_select_2 = None
73
+
74
+ randomShift = random.randint(-9, 9)
75
+ slice_p = slice_p + randomShift
76
+ i = 0
77
+ for slice_i in range(slice_p-step, slice_p+step+1, step):
78
+ slice_select = eval("image_array"+section)
79
+ exec("slice_select_"+str(i)+"=slice_select")
80
+ i += 1
81
+ slice_2Dimg = np.stack((slice_select_0, slice_select_1, slice_select_2), axis = 2)
82
+ slice_2Dimgs.append(slice_2Dimg)
83
+ return slice_2Dimgs
84
+
85
+ def axRandomSlice(image):
86
+ image_array = np.array(image.get_data())
87
+ return getRandomSlice(image_array, AX_INDEX, AX_SCETION)
88
+
89
+
90
+ def corRandomSlice(image):
91
+ image_array = np.array(image.get_data())
92
+ return getRandomSlice(image_array, COR_INDEX, COR_SCETION)
93
+
94
+
95
+ def sagRandomSlice(image):
96
+ image_array = np.array(image.get_data())
97
+ return getRandomSlice(image_array, SAG_INDEX, SAG_SCETION)
98
+
99
+
100
+
101
+
AD_Standard_2DSlicesData.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import os
3
+ from torch.utils.data import Dataset
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from skimage.transform import resize
7
+ from PIL import Image
8
+ import random
9
+
10
+
11
+ AX_SCETION = "[:, :, slice_i]"
12
+ COR_SCETION = "[:, slice_i, :]"
13
+ SAG_SCETION = "[slice_i, :, :]"
14
+ AX_INDEX = 78
15
+ COR_INDEX = 79
16
+ SAG_INDEX = 57
17
+
18
+ class AD_Standard_2DSlicesData(Dataset):
19
+ """labeled Faces in the Wild dataset."""
20
+
21
+ def __init__(self, root_dir, data_file, transform=None, slice = slice):
22
+ """
23
+ Args:
24
+ root_dir (string): Directory of all the images.
25
+ data_file (string): File name of the train/test split file.
26
+ transform (callable, optional): Optional transform to be applied on a sample.
27
+ data_augmentation (boolean): Optional data augmentation.
28
+ """
29
+ self.root_dir = root_dir
30
+ self.data_file = data_file
31
+ self.transform = transform
32
+
33
+ def __len__(self):
34
+ return sum(1 for line in open(self.data_file))
35
+
36
+ def __getitem__(self, idx):
37
+ df = open(self.data_file)
38
+ lines = df.readlines()
39
+ lst = lines[idx].split()
40
+ img_name = lst[0]
41
+ img_label = lst[1]
42
+ image_path = os.path.join(self.root_dir, img_name)
43
+ image = nib.load(image_path)
44
+ samples = []
45
+ if img_label == 'Normal':
46
+ label = 0
47
+ elif img_label == 'AD':
48
+ label = 1
49
+ elif img_label == 'MCI':
50
+ label = 2
51
+
52
+ AXimageList = axKeySlice(image)
53
+ CORimageList = corKeySlice(image)
54
+ SAGimageList = sagKeySlice(image)
55
+
56
+ for img2DList in (AXimageList, CORimageList, SAGimageList):
57
+ for image2D in img2DList:
58
+ if self.transform:
59
+ image2D = self.transform(image2D)
60
+ sample = {'image': image2D, 'label': label}
61
+ samples.append(sample)
62
+ random.shuffle(samples)
63
+ return samples
64
+
65
+
66
+ def getSlice(image_array, keyIndex, section, step = 1):
67
+ slice_p = keyIndex
68
+ slice_2Dimgs = []
69
+ slice_select_0 = None
70
+ slice_select_1 = None
71
+ slice_select_2 = None
72
+ i = 0
73
+ for slice_i in range(slice_p-step, slice_p+step+1, step):
74
+ slice_select = eval("image_array"+section)
75
+ exec("slice_select_"+str(i)+"=slice_select")
76
+ i += 1
77
+ slice_2Dimg = np.stack((slice_select_0, slice_select_1, slice_select_2), axis = 2)
78
+ slice_2Dimgs.append(slice_2Dimg)
79
+ return slice_2Dimgs
80
+
81
+
82
+ def axKeySlice(image):
83
+ image_array = np.array(image.get_data())
84
+ return getSlice(image_array, AX_INDEX, AX_SCETION)
85
+
86
+
87
+ def corKeySlice(image):
88
+ image_array = np.array(image.get_data())
89
+ return getSlice(image_array, COR_INDEX, COR_SCETION)
90
+
91
+
92
+ def sagKeySlice(image):
93
+ image_array = np.array(image.get_data())
94
+ return getSlice(image_array, SAG_INDEX, SAG_SCETION)
95
+
AD_Standard_2DTestingSlices.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import os
3
+ from torch.utils.data import Dataset
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from skimage.transform import resize
7
+ from PIL import Image
8
+ import random
9
+
10
+
11
+ AX_INDEX = 78
12
+ COR_INDEX = 79
13
+ SAG_INDEX = 57
14
+ AX_SCETION = "[:, :, slice_i]"
15
+ COR_SCETION = "[:, slice_i, :]"
16
+ SAG_SCETION = "[slice_i, :, :]"
17
+
18
+
19
+ class AD_Standard_2DTestingSlices(Dataset):
20
+ """labeled Faces in the Wild dataset."""
21
+
22
+ def __init__(self, root_dir, data_file, transform=None, slice = slice, size = 9):
23
+ """
24
+ Args:
25
+ root_dir (string): Directory of all the images.
26
+ data_file (string): File name of the train/test split file.
27
+ transform (callable, optional): Optional transform to be applied on a sample.
28
+ data_augmentation (boolean): Optional data augmentation.
29
+ """
30
+ self.root_dir = root_dir
31
+ self.data_file = data_file
32
+ self.transform = transform
33
+ self.size = size
34
+
35
+ def __len__(self):
36
+ return sum(1 for line in open(self.data_file))
37
+
38
+ def __getitem__(self, idx):
39
+ df = open(self.data_file)
40
+ lines = df.readlines()
41
+ lst = lines[idx].split()
42
+ img_name = lst[0]
43
+ img_label = lst[1]
44
+ image_path = os.path.join(self.root_dir, img_name)
45
+ image = nib.load(image_path)
46
+ samples = []
47
+ if img_label == 'Normal':
48
+ label = 0
49
+ elif img_label == 'AD':
50
+ label = 1
51
+ elif img_label == 'MCI':
52
+ label = 2
53
+
54
+ AXimageList = None
55
+ CORimageList = None
56
+ SAGimageList = None
57
+
58
+ if self.size == 3:
59
+ AXimageList = axKeySlice(image)
60
+ CORimageList = corKeySlice(image)
61
+ SAGimageList = sagKeySlice(image)
62
+ elif self.size == 9:
63
+ AXimageList = ax3Slices(image)
64
+ CORimageList = cor3Slices(image)
65
+ SAGimageList = sag3Slices(image)
66
+
67
+
68
+ for img2DList in (AXimageList, CORimageList, SAGimageList):
69
+ for image2D in img2DList:
70
+ if self.transform:
71
+ image2D = self.transform(image2D)
72
+ sample = {'image': image2D, 'label': label}
73
+ samples.append(sample)
74
+ assert len(samples) == self.size
75
+ random.shuffle(samples)
76
+ return samples
77
+
78
+
79
+
80
+ def getSlice(image_array, keyIndex, section, step = 1):
81
+ slice_p = keyIndex
82
+ slice_2Dimgs = []
83
+ slice_select_0 = None
84
+ slice_select_1 = None
85
+ slice_select_2 = None
86
+ i = 0
87
+ for slice_i in range(slice_p-step, slice_p+step+1, step):
88
+ slice_select = eval("image_array"+section)
89
+ exec("slice_select_"+str(i)+"=slice_select")
90
+ i += 1
91
+ slice_2Dimg = np.stack((slice_select_0, slice_select_1, slice_select_2), axis = 2)
92
+ slice_2Dimgs.append(slice_2Dimg)
93
+ return slice_2Dimgs
94
+
95
+
96
+ def axKeySlice(image):
97
+ image_array = np.array(image.get_data())
98
+ return getSlice(image_array, AX_INDEX, AX_SCETION)
99
+
100
+
101
+ def corKeySlice(image):
102
+ image_array = np.array(image.get_data())
103
+ return getSlice(image_array, COR_INDEX, COR_SCETION)
104
+
105
+
106
+ def sagKeySlice(image):
107
+ image_array = np.array(image.get_data())
108
+ return getSlice(image_array, SAG_INDEX, SAG_SCETION)
109
+
110
+
111
+
112
+ def get3Slices(image_array, keyIndex, section, step = 1):
113
+ slice_p = keyIndex
114
+ slice_2Dimgs = []
115
+ slice_select_0 = None
116
+ slice_select_1 = None
117
+ slice_select_2 = None
118
+ for shift in (-5, 0, 5):
119
+ slice_sp = slice_p + shift
120
+ i = 0
121
+ slice_select_0 = None
122
+ slice_select_1 = None
123
+ slice_select_2 = None
124
+ for slice_i in range(slice_sp-step, slice_sp+step+1, step):
125
+ slice_select = eval("image_array"+section)
126
+ exec("slice_select_"+str(i)+"=slice_select")
127
+ i += 1
128
+ slice_2Dimg = np.stack((slice_select_0, slice_select_1, slice_select_2), axis = 2)
129
+ slice_2Dimgs.append(slice_2Dimg)
130
+ return slice_2Dimgs
131
+
132
+
133
+
134
+ def ax3Slices(image):
135
+ image_array = np.array(image.get_data())
136
+ return get3Slices(image_array, AX_INDEX, AX_SCETION)
137
+
138
+
139
+ def cor3Slices(image):
140
+ image_array = np.array(image.get_data())
141
+ return get3Slices(image_array, COR_INDEX, COR_SCETION)
142
+
143
+
144
+ def sag3Slices(image):
145
+ image_array = np.array(image.get_data())
146
+ return get3Slices(image_array, SAG_INDEX, SAG_SCETION)
147
+
148
+
AD_Standard_3DRandomPatch.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import os
3
+ from torch.utils.data import Dataset
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+ from skimage.transform import resize
7
+ from PIL import Image
8
+ import random
9
+ import torch
10
+
11
+ NON_AX = (0, 1)
12
+ NON_COR = (0, 2)
13
+ NON_SAG = (1, 2)
14
+
15
+
16
+ class AD_Standard_3DRandomPatch(Dataset):
17
+ """labeled Faces in the Wild dataset."""
18
+
19
+ def __init__(self, root_dir, data_file):
20
+ """
21
+ Args:
22
+ root_dir (string): Directory of all the images.
23
+ data_file (string): File name of the train/test split file.
24
+ """
25
+ self.root_dir = root_dir
26
+ self.data_file = data_file
27
+
28
+ def __len__(self):
29
+ with open(self.data_file) as df:
30
+ summation = sum(1 for line in df)
31
+ return summation
32
+
33
+ def __getitem__(self, idx):
34
+ with open(self.data_file) as df:
35
+ lines = df.readlines()
36
+ lst = lines[idx].split()
37
+ img_name = lst[0]
38
+ image_path = os.path.join(self.root_dir, img_name)
39
+ image = nib.load(image_path)
40
+
41
+ image_array = np.array(image.get_data())
42
+ patch_samples = getRandomPatches(image_array)
43
+ patch_dict = {"patch": patch_samples}
44
+ return patch_dict
45
+
46
+
47
+ def customToTensor(pic):
48
+ if isinstance(pic, np.ndarray):
49
+ img = torch.from_numpy(pic)
50
+ img = torch.unsqueeze(img,0)
51
+ # backward compatibility
52
+ return img.float()
53
+
54
+ def getRandomPatches(image_array):
55
+ patches = []
56
+ mean_ax = np.ndarray.mean(image_array, axis = NON_AX)
57
+ mean_cor = np.ndarray.mean(image_array, axis = NON_COR)
58
+ mean_sag = np.ndarray.mean(image_array, axis = NON_SAG)
59
+
60
+ first_ax = int(round(list(mean_ax).index(filter(lambda x: x>0, mean_ax)[0])))
61
+ last_ax = int(round(list(mean_ax).index(filter(lambda x: x>0, mean_ax)[-1])))
62
+ first_cor = int(round(list(mean_cor).index(filter(lambda x: x>0, mean_cor)[0])))
63
+ last_cor = int(round(list(mean_cor).index(filter(lambda x: x>0, mean_cor)[-1])))
64
+ first_sag = int(round(list(mean_sag).index(filter(lambda x: x>0, mean_sag)[0])))
65
+ last_sag = int(round(list(mean_sag).index(filter(lambda x: x>0, mean_sag)[-1])))
66
+
67
+ first_ax = first_ax + 20
68
+ last_ax = last_ax - 5
69
+
70
+ ax_samples = [random.randint(first_ax - 3, last_ax - 3) for r in xrange(10000)]
71
+ cor_samples = [random.randint(first_cor - 3, last_cor - 3) for r in xrange(10000)]
72
+ sag_samples = [random.randint(first_sag - 3, last_sag - 3) for r in xrange(10000)]
73
+
74
+ for i in range(1000):
75
+ ax_i = ax_samples[i]
76
+ cor_i = cor_samples[i]
77
+ sag_i = sag_samples[i]
78
+ patch = image_array[ax_i-3:ax_i+4, cor_i-3:cor_i+4, sag_i-3:sag_i+4]
79
+ while (np.ndarray.sum(patch) == 0):
80
+ ax_ni = random.randint(first_ax - 3, last_ax - 4)
81
+ cor_ni = random.randint(first_cor - 3, last_cor - 4)
82
+ sag_ni = random.randint(first_sag - 3, last_sag - 4)
83
+ patch = image_array[ax_ni-3:ax_ni+4, cor_ni-3:cor_ni+4, sag_ni-3:sag_ni+4]
84
+ patch = customToTensor(patch)
85
+ patches.append(patch)
86
+ return patches
87
+
88
+
89
+ # plt.imshow(array[i][3,:,:], cmap = 'gray')
90
+ # plt.savefig('./section.png', dpi=100)
91
+
92
+
93
+
94
+
AD_Standard_CNN_Dataset.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import os
3
+ from torch.utils.data import Dataset
4
+ import numpy as np
5
+ import torch
6
+ import random
7
+
8
+ class AD_Standard_CNN_Dataset(Dataset):
9
+ """labeled Faces in the Wild dataset."""
10
+
11
+ def __init__(self, root_dir, data_file, transform=None, noise=True):
12
+ """
13
+ Args:
14
+ root_dir (string): Directory of all the images.
15
+ data_file (string): File name of the train/test split file.
16
+ transform (callable, optional): Optional transform to be applied on a sample.
17
+ data_augmentation (boolean): Optional data augmentation.
18
+ """
19
+ self.root_dir = root_dir
20
+ self.data_file = data_file
21
+ self.transform = transform
22
+ self.noise = noise
23
+
24
+ def __len__(self):
25
+ return sum(1 for line in open(self.data_file))
26
+
27
+ def __getitem__(self, idx):
28
+ df = open(self.data_file)
29
+ lines = df.readlines()
30
+ lst = lines[idx].split()
31
+ img_name = lst[0]
32
+ img_label = lst[1]
33
+ image_path = os.path.join(self.root_dir, img_name)
34
+ image = nib.load(image_path)
35
+
36
+ if img_label == 'Normal':
37
+ label = 0
38
+ elif img_label == 'AD':
39
+ label = 1
40
+ elif img_label == 'MCI':
41
+ label = 2
42
+
43
+ image_array = np.array(image.get_data())
44
+ if self.noise:
45
+ image_array = gaussianNoise(image_array)
46
+ image_array = customToTensor(image_array)
47
+ sample = {'image': image_array, 'label': label}
48
+
49
+ return sample
50
+
51
+ def customToTensor(pic):
52
+ if isinstance(pic, np.ndarray):
53
+ img = torch.from_numpy(pic)
54
+ img = torch.unsqueeze(img,0)
55
+ # backward compatibility
56
+ return img.float()
57
+
58
+ def gaussianNoise(img_array):
59
+ var_lst = [0, 0.0005, 0.00075, 0.001, 0.0025, 0.005]
60
+ w,h,d= img_array.shape
61
+ mean = 0
62
+ var = random.choice(var_lst)
63
+ sigma = var**0.5
64
+ gauss_noise = np.random.normal(mean,sigma,(w,h,d))
65
+ gauss_noise = gauss_noise.reshape(w,h,d)
66
+ noise_image = img_array + gauss_noise
67
+ return noise_image
68
+
69
+
AlexNet2D.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ import torch.utils.model_zoo as model_zoo
3
+ import math
4
+
5
+
6
+ __all__ = ['AlexNet', 'alexnet']
7
+
8
+
9
+ model_urls = {
10
+ 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
11
+ }
12
+
13
+
14
+ class AlexNet(nn.Module):
15
+
16
+ def __init__(self, num_classes=1000):
17
+ super(AlexNet, self).__init__()
18
+ self.features = nn.Sequential(
19
+ nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
20
+ nn.ReLU(inplace=True),
21
+ nn.MaxPool2d(kernel_size=3, stride=2),
22
+ nn.Conv2d(64, 192, kernel_size=5, padding=2),
23
+ nn.ReLU(inplace=True),
24
+ nn.MaxPool2d(kernel_size=3, stride=2),
25
+ nn.Conv2d(192, 384, kernel_size=3, padding=1),
26
+ nn.ReLU(inplace=True),
27
+ nn.Conv2d(384, 256, kernel_size=3, padding=1),
28
+ nn.ReLU(inplace=True),
29
+ nn.Conv2d(256, 256, kernel_size=3, padding=1),
30
+ nn.ReLU(inplace=True),
31
+ nn.MaxPool2d(kernel_size=3, stride=2),
32
+ )
33
+ self.classifier = nn.Sequential(
34
+ nn.Dropout(),
35
+ nn.Linear(256 * 6 * 6, 4096),
36
+ nn.ReLU(inplace=True),
37
+ nn.Dropout(),
38
+ nn.Linear(4096, 4096),
39
+ nn.ReLU(inplace=True),
40
+ nn.Linear(4096, num_classes),
41
+ )
42
+
43
+ def forward(self, x):
44
+ x = self.features(x)
45
+ x = x.view(x.size(0), 256 * 6 * 6)
46
+ x = self.classifier(x)
47
+ return x
48
+
49
+
50
+ def alexnet(pretrained=False, **kwargs):
51
+ r"""AlexNet model architecture from the
52
+ `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
53
+
54
+ Args:
55
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
56
+ """
57
+ model = AlexNet(**kwargs)
58
+ if pretrained:
59
+ model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
60
+ for p in model.features.parameters():
61
+ p.requires_grad = False
62
+
63
+ # fine-tune the last convolution layer
64
+ for p in model.features[10].parameters():
65
+ p.requires_grad = True
66
+
67
+ model.classifier.add_module('fc_out', nn.Linear(1000,2))
68
+ model.classifier.add_module('sigmoid', nn.LogSoftmax())
69
+
70
+ stdv = 1.0 / math.sqrt(1000)
71
+ for p in model.classifier.fc_out.parameters():
72
+ p.data.uniform_(-stdv, stdv)
73
+
74
+ return model
AlexNet3D.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+
3
+ class AlexNet(nn.Module):
4
+
5
+ def __init__(self, num_classes=3):
6
+ super(AlexNet, self).__init__()
7
+ self.features = nn.Sequential(
8
+ nn.Conv3d(1, 64, kernel_size=11, stride=4, padding=2),
9
+ nn.ReLU(inplace=True),
10
+ nn.MaxPool3d(kernel_size=3, stride=2),
11
+ nn.Conv3d(64, 192, kernel_size=5, padding=2),
12
+ nn.ReLU(inplace=True),
13
+ nn.MaxPool3d(kernel_size=3, stride=2),
14
+ nn.Conv3d(192, 384, kernel_size=3, padding=1),
15
+ nn.ReLU(inplace=True),
16
+ nn.Conv3d(384, 256, kernel_size=3, padding=1),
17
+ nn.ReLU(inplace=True),
18
+ nn.Conv3d(256, 256, kernel_size=3, padding=1),
19
+ nn.ReLU(inplace=True),
20
+ nn.MaxPool3d(kernel_size=3, stride=2),
21
+ )
22
+ self.classifier = nn.Sequential(
23
+ nn.Dropout(),
24
+ nn.Linear(256 * 6 * 6 * 6, 4096),
25
+ nn.ReLU(inplace=True),
26
+ nn.Dropout(),
27
+ nn.Linear(4096, 4096),
28
+ nn.ReLU(inplace=True),
29
+ nn.Linear(4096, num_classes),
30
+ )
31
+
32
+ self.reset_parameters()
33
+
34
+ def reset_parameters(self):
35
+ for weight in self.parameters():
36
+ weight.data.uniform_(-0.1, 0.1)
37
+
38
+
39
+
40
+
41
+
42
+ def forward(self, x):
43
+ x = self.features(x)
44
+ x = x.view(x.size(0), 256 * 6 * 6 * 6)
45
+ x = self.classifier(x)
46
+ return x
CV final report.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5099301619a38b57ba1afc6e67e92f2dfe0c18e759b4b47e4388bfc321fac146
3
+ size 1664143
autoencoder.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+ class AutoEncoder(nn.Module):
5
+ def __init__(self):
6
+ super(AutoEncoder, self).__init__()
7
+ self.encoder = nn.Linear(343, 410)
8
+ self.sparsify = nn.Sigmoid()
9
+ self.decoder = nn.Linear(410, 343)
10
+
11
+ def forward(self, out):
12
+ out = out.view(-1, 343)
13
+ out = self.encoder(out)
14
+ out = self.sparsify(out)
15
+ s_ = out
16
+ out = self.decoder(out)
17
+ return out, s_
cnn_3d_with_ae.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from autoencoder import AutoEncoder
3
+ import torch.nn as nn
4
+ import math
5
+
6
+ class CNN(nn.Module):
7
+ def __init__(self, num_classes=2):
8
+ super(CNN, self).__init__()
9
+ self.conv1 = nn.Conv3d(1, 410, kernel_size=7, stride=7, padding=3)
10
+ self.relu1 = nn.ReLU(inplace=True)
11
+ self.pool1 = nn.MaxPool3d(kernel_size=7,stride=7)
12
+ # self.conv2 = nn.Conv3d(410, 200, kernel_size=3, stride=1, padding=1)
13
+ # self.relu2 = nn.ReLU(inplace=True)
14
+ # self.pool2 = nn.MaxPool3d(kernel_size=3, stride=3)
15
+ # self.fc1 = nn.Linear(5*5*5*200, 800)
16
+ self.dropout1 = nn.Dropout(0.5)
17
+ self.fc1 = nn.Linear(2*3*2*410, 80)
18
+ self.dropout2 = nn.Dropout(0.5)
19
+ self.fc2 = nn.Linear(80, num_classes)
20
+ self.softmax = nn.LogSoftmax()
21
+ self.parameter_initialization()
22
+
23
+
24
+
25
+ def forward(self, out):
26
+ out = self.pool1(self.relu1(self.conv1(out)))
27
+ out = self.dropout1(out)
28
+ # out = self.pool2(self.relu2(self.conv2(out)))
29
+ # out = out.view(-1,5*5*5*200)
30
+ out = out.view(-1, 2*3*2*410)
31
+ out = self.fc1(out)
32
+ out = self.dropout2(out)
33
+ out = self.fc2(out)
34
+ out = self.softmax(out)
35
+ return out
36
+
37
+ def parameter_initialization(self):
38
+ stdv = 1.0 / math.sqrt(410)
39
+ for weight in self.parameters():
40
+ weight.data.uniform_(-stdv, stdv)
41
+
42
+
43
+
cnn_3d_wtih_ae.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from autoencoder import AutoEncoder
3
+
4
+ class CNN(nn.Module):
5
+ def __init__(self, num_classes=2):
6
+ super(CNN, self).__init__()
7
+ self.conv = nn.Conv3d(1, 410, kernel_size=7, stride=1, padding=1)
8
+ self.pool = nn.MaxPool3d(kernel_size=7,stride=7)
9
+ self.fc1 = nn.Linear(15*15*15, 800)
10
+ self.fc2 = nn.Linear(800, num_classes)
11
+ self.softmax = nn.Softmax()
12
+
13
+ def forward(self, out):
14
+ out = self.conv(out)
15
+ out = self.pool(out)
16
+ out = out.view(1,15*15*15)
17
+ out = self.fc1(out)
18
+ out = self.fc2(out)
19
+ out = self.softmax(out)
20
+ return out
21
+
22
+ def load_ae(self, ae):
23
+ cnn.state_dict()['conv.weight'] = ae.state_dict()['encoder.weight'].view(410,1,7,7,7)
24
+ cnn.state_dict()['conv.bias'] = ae.state_dict()['encoder.bias']
25
+ return cnn
custom_transform.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ import math
4
+ from PIL import Image
5
+ from skimage.transform import resize
6
+ import skimage
7
+ import torch
8
+ import matplotlib.pyplot as plt
9
+
10
+
11
+ class CustomResize(object):
12
+ def __init__(self, network_type, trg_size):
13
+
14
+ self.trg_size = trg_size
15
+ self.network_type = network_type
16
+
17
+ def __call__(self, img):
18
+ resized_img = self.resize_image(img, self.trg_size)
19
+ return resized_img
20
+
21
+ def resize_image(self, img, trg_size):
22
+ img_array = np.asarray(img.get_data())
23
+ res = resize(img_array, trg_size, mode='reflect', anti_aliasing=False, preserve_range=True)
24
+
25
+ # type check
26
+ if type(res) != np.ndarray:
27
+ raise "type error!"
28
+
29
+ # PIL image cannot handle 3D image, only return ndarray type, which ToTensor accepts
30
+ return res
31
+
32
+ class CustomToTensor(object):
33
+ def __init__(self, network_type):
34
+
35
+ self.network_type = network_type
36
+
37
+ def __call__(self, pic):
38
+
39
+ if isinstance(pic, np.ndarray):
40
+
41
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
42
+
43
+ # backward compatibility
44
+ return img.float().div(255)
45
+
46
+
47
+
48
+
49
+
custom_transform2D.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import random
3
+ import math
4
+ from PIL import Image
5
+ from skimage.transform import resize
6
+ import skimage
7
+ import torch
8
+ import matplotlib.pyplot as plt
9
+
10
+
11
+ class CustomResize(object):
12
+ def __init__(self, trg_size):
13
+
14
+ self.trg_size = trg_size
15
+
16
+
17
+ def __call__(self, img):
18
+ resized_img = self.resize_image(img, self.trg_size)
19
+ return resized_img
20
+
21
+ def resize_image(self, img_array, trg_size):
22
+ res = resize(img_array, trg_size, mode='reflect', preserve_range=True, anti_aliasing=False)
23
+
24
+ # type check
25
+ if type(res) != np.ndarray:
26
+ raise "type error!"
27
+
28
+ # PIL image cannot handle 3D image, only return ndarray type, which ToTensor accepts
29
+ return res
30
+
31
+ class CustomToTensor(object):
32
+ def __init__(self):
33
+ pass
34
+
35
+ def __call__(self, pic):
36
+
37
+ if isinstance(pic, np.ndarray):
38
+
39
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
40
+
41
+ # backward compatibility
42
+ return img.float()
43
+
44
+
45
+
46
+
47
+
imageExtract.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import shutil
2
+ import os
3
+
4
+ for d in os.listdir('./Image'):
5
+ if d[0] != '.':
6
+ for f in os.listdir('./Image/' + d):
7
+ if f[-3:] == 'nii':
8
+ shutil.move("./Image/" + d + '/' + f, './Image1/' + d)
9
+ shutil.rmtree('./Image')
10
+
main_alexnet.py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import cuda
7
+ from torch.autograd import Variable
8
+ from torch.utils.data import DataLoader, Dataset
9
+
10
+ import torchvision
11
+ import torchvision.datasets as dset
12
+ import torchvision.transforms as transforms
13
+ import torchvision.utils
14
+ from PIL import Image
15
+
16
+ import torch.nn.functional as F
17
+
18
+ import matplotlib.pyplot as plt
19
+ import matplotlib.ticker as ticker
20
+ import numpy as np
21
+ import random
22
+ from collections import Counter
23
+
24
+ from custom_transform2D import CustomResize
25
+ from custom_transform2D import CustomToTensor
26
+
27
+ from AD_Dataset import AD_Dataset
28
+ from AD_Standard_2DSlicesData import AD_Standard_2DSlicesData
29
+ from AD_Standard_2DRandomSlicesData import AD_Standard_2DRandomSlicesData
30
+ from AD_Standard_2DTestingSlices import AD_Standard_2DTestingSlices
31
+
32
+ from AlexNet2D import alexnet
33
+
34
+ logging.basicConfig(
35
+ format='%(asctime)s %(levelname)s: %(message)s',
36
+ datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
37
+
38
+ parser = argparse.ArgumentParser(description="Starter code for JHU CS661 Computer Vision HW3.")
39
+
40
+ parser.add_argument("--load",
41
+ help="Load saved network weights.")
42
+ parser.add_argument("--save", default="AlexNet",
43
+ help="Save network weights.")
44
+ parser.add_argument("--augmentation", default=True, type=bool,
45
+ help="Save network weights.")
46
+ parser.add_argument("--epochs", default=20, type=int,
47
+ help="Epochs through the data. (default=20)")
48
+ parser.add_argument("--learning_rate", "-lr", default=1e-3, type=float,
49
+ help="Learning rate of the optimization. (default=0.01)")
50
+ parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
51
+ help='momentum')
52
+ parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
53
+ metavar='W', help='weight decay (default: 1e-4)')
54
+ parser.add_argument("--estop", default=1e-2, type=float,
55
+ help="Early stopping criteria on the development set. (default=1e-2)")
56
+ parser.add_argument("--batch_size", default=1, type=int,
57
+ help="Batch size for training. (default=1)")
58
+ parser.add_argument("--optimizer", default="Adam", choices=["SGD", "Adadelta", "Adam"],
59
+ help="Optimizer of choice for training. (default=Adam)")
60
+ parser.add_argument("--gpuid", default=[0], nargs='+', type=int,
61
+ help="ID of gpu device to use. Empty implies cpu usage.")
62
+
63
+
64
+ # feel free to add more arguments as you need
65
+
66
+
67
+ def main(options):
68
+ # Path configuration
69
+ TRAINING_PATH = 'train_2classes.txt'
70
+ TESTING_PATH = 'test_2classes.txt'
71
+ IMG_PATH = './Image'
72
+
73
+ trg_size = (224, 224)
74
+
75
+ transformations = transforms.Compose([CustomResize(trg_size),
76
+ CustomToTensor()
77
+ ])
78
+ dset_train = AD_Standard_2DRandomSlicesData(IMG_PATH, TRAINING_PATH, transformations)
79
+ dset_test = AD_Standard_2DSlicesData(IMG_PATH, TESTING_PATH, transformations)
80
+
81
+ # Use argument load to distinguish training and testing
82
+ if options.load is None:
83
+ train_loader = DataLoader(dset_train,
84
+ batch_size=options.batch_size,
85
+ shuffle=True,
86
+ num_workers=4,
87
+ drop_last=True
88
+ )
89
+ else:
90
+ # Only shuffle the data when doing training
91
+ train_loader = DataLoader(dset_train,
92
+ batch_size=options.batch_size,
93
+ shuffle=False,
94
+ num_workers=4,
95
+ drop_last=True
96
+ )
97
+
98
+ test_loader = DataLoader(dset_test,
99
+ batch_size=options.batch_size,
100
+ shuffle=False,
101
+ num_workers=4,
102
+ drop_last=True
103
+ )
104
+
105
+ use_cuda = (len(options.gpuid) >= 1)
106
+ if options.gpuid:
107
+ cuda.set_device(options.gpuid[0])
108
+
109
+
110
+ # Initial the model
111
+ model = alexnet(pretrained=True)
112
+ # model.load_state_dict(torch.load(options.load))
113
+
114
+ if use_cuda > 0:
115
+ model.cuda()
116
+ else:
117
+ model.cpu()
118
+
119
+ # Binary cross-entropy loss
120
+ # criterion = torch.nn.CrossEntropyLoss()
121
+ criterion = torch.nn.NLLLoss()
122
+
123
+ lr = options.learning_rate
124
+ optimizer = eval("torch.optim." + options.optimizer)(filter(lambda x: x.requires_grad, model.parameters()), lr)
125
+
126
+ best_accuracy = float("-inf")
127
+
128
+ train_loss_f = open("train_loss.txt", "w")
129
+ test_acu_f = open("test_accuracy.txt", "w")
130
+
131
+ for epoch_i in range(options.epochs):
132
+
133
+ logging.info("At {0}-th epoch.".format(epoch_i))
134
+ train_loss, correct_cnt = train(model, train_loader, use_cuda, criterion, optimizer, train_loss_f)
135
+ # each instance in one batch has 3 views
136
+ train_avg_loss = train_loss / (len(dset_train) * 3 / options.batch_size)
137
+ train_avg_acu = float(correct_cnt) / (len(dset_train) * 3)
138
+ logging.info(
139
+ "Average training loss is {0:.5f} at the end of epoch {1}".format(train_avg_loss.data[0], epoch_i))
140
+ logging.info("Average training accuracy is {0:.5f} at the end of epoch {1}".format(train_avg_acu, epoch_i))
141
+
142
+
143
+ correct_cnt = validate(model, test_loader, use_cuda, criterion)
144
+ dev_avg_acu = float(correct_cnt) / len(dset_test)
145
+ logging.info("Average validation accuracy is {0:.5f} at the end of epoch {1}".format(dev_avg_acu, epoch_i))
146
+
147
+ # write validation accuracy to file
148
+ test_acu_f.write("{0:.5f}\n".format(dev_avg_acu))
149
+
150
+ if dev_avg_acu > best_accuracy:
151
+ best_accuracy = dev_avg_acu
152
+ torch.save(model.state_dict(), open(options.save, 'wb'))
153
+
154
+ train_loss_f.close()
155
+ test_acu_f.close()
156
+
157
+ def train(model, train_loader, use_cuda, criterion, optimizer, train_loss_f):
158
+ # main training loop
159
+ train_loss = 0.0
160
+ correct_cnt = 0.0
161
+ model.train()
162
+ for it, train_data in enumerate(train_loader):
163
+ for data_dic in train_data:
164
+ if use_cuda:
165
+ imgs, labels = Variable(data_dic['image']).cuda(), Variable(data_dic['label']).cuda()
166
+ else:
167
+ imgs, labels = Variable(data_dic['image']), Variable(data_dic['label'])
168
+ integer_encoded = labels.data.cpu().numpy()
169
+ # target should be LongTensor in loss function
170
+ ground_truth = Variable(torch.from_numpy(integer_encoded)).long()
171
+ if use_cuda:
172
+ ground_truth = ground_truth.cuda()
173
+ train_output = model(imgs)
174
+ _, predict = train_output.topk(1)
175
+ loss = criterion(train_output, ground_truth)
176
+ train_loss += loss
177
+ correct_this_batch = (predict.squeeze(1) == ground_truth).sum().float()
178
+ correct_cnt += correct_this_batch
179
+ accuracy = float(correct_this_batch) / len(ground_truth)
180
+ logging.info("batch {0} training loss is : {1:.5f}".format(it, loss.data[0]))
181
+ logging.info("batch {0} training accuracy is : {1:.5f}".format(it, accuracy))
182
+
183
+ # write the training loss to file
184
+ train_loss_f.write("{0:.5f}\n".format(loss.data[0]))
185
+
186
+ optimizer.zero_grad()
187
+ loss.backward()
188
+ optimizer.step()
189
+
190
+ return train_loss, correct_cnt
191
+
192
+
193
+
194
+ def validate(model, test_loader, use_cuda, criterion):
195
+ # validation -- this is a crude estimation because there might be some paddings at the end
196
+ correct_cnt = 0.0
197
+ model.eval()
198
+ for it, test_data in enumerate(test_loader):
199
+ vote = []
200
+ for data_dic in test_data:
201
+ if use_cuda:
202
+ imgs, labels = Variable(data_dic['image'], volatile=True).cuda(), Variable(data_dic['label'],
203
+ volatile=True).cuda()
204
+ else:
205
+ imgs, labels = Variable(data_dic['image'], volatile=True), Variable(data_dic['label'],
206
+ volatile=True)
207
+ test_output = model(imgs)
208
+ _, predict = test_output.topk(1)
209
+ vote.append(predict)
210
+
211
+ vote = torch.cat(vote, 1)
212
+ final_vote, _ = torch.mode(vote, 1)
213
+ ground_truth = test_data[0]['label']
214
+ correct_this_batch = (final_vote.cpu().data == ground_truth).sum()
215
+ correct_cnt += correct_this_batch
216
+ accuracy = float(correct_this_batch) / len(ground_truth)
217
+
218
+ logging.info("batch {0} dev accuracy is : {1:.5f}".format(it, accuracy))
219
+
220
+ return correct_cnt
221
+
222
+
223
+
224
+
225
+ def show_plot(points):
226
+ plt.figure()
227
+ fig, ax = plt.subplots()
228
+ loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals
229
+ ax.yaxis.set_major_locator(loc)
230
+ plt.plot(points)
231
+
232
+
233
+ if __name__ == "__main__":
234
+ ret = parser.parse_known_args()
235
+ options = ret[0]
236
+ if ret[1]:
237
+ logging.warning("unknown arguments: {0}".format(parser.parse_known_args()[1]))
238
+ main(options)
main_autoencoder.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import cuda
7
+ from torch.autograd import Variable
8
+ from torch.utils.data import DataLoader,Dataset
9
+ import torchvision
10
+
11
+ from autoencoder import AutoEncoder
12
+ from AD_Standard_3DRandomPatch import AD_Standard_3DRandomPatch
13
+
14
+ logging.basicConfig(
15
+ format='%(asctime)s %(levelname)s: %(message)s',
16
+ datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
17
+
18
+ parser = argparse.ArgumentParser(description="Starter code for AutoEncoder")
19
+
20
+ parser.add_argument("--learning_rate", "-lr", default=1e-3, type=float,
21
+ help="Learning rate of the optimization. (default=0.01)")
22
+ parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
23
+ help='momentum')
24
+ parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
25
+ metavar='W', help='weight decay (default: 1e-4)')
26
+ parser.add_argument("--batch_size", default=1, type=int,
27
+ help="Batch size for training. (default=1)")
28
+ parser.add_argument("--gpuid", default=[0], nargs='+', type=int,
29
+ help="ID of gpu device to use. Empty implies cpu usage.")
30
+ parser.add_argument("--num_classes", default=2, type=int,
31
+ help="Number of classes.")
32
+ parser.add_argument("--epochs", default=20, type=int,
33
+ help="Epochs through the data. (default=20)")
34
+ parser.add_argument("--estop", default=1e-4, type=float,
35
+ help="Early stopping criteria on the development set. (default=1e-4)")
36
+
37
+ def main(options):
38
+
39
+ if options.num_classes == 2:
40
+ TRAINING_PATH = 'train_2classes.txt'
41
+ else:
42
+ TRAINING_PATH = 'train.txt'
43
+ IMG_PATH = './Whole'
44
+
45
+ dset_train = AD_Standard_3DRandomPatch(IMG_PATH, TRAINING_PATH)
46
+
47
+ train_loader = DataLoader(dset_train,
48
+ batch_size = options.batch_size,
49
+ shuffle = True,
50
+ num_workers = 4,
51
+ drop_last = True
52
+ )
53
+ sparsity = 0.05
54
+ beta = 0.5
55
+
56
+ mean_square_loss = nn.MSELoss()
57
+ #kl_div_loss = nn.KLDivLoss(reduce=False)
58
+
59
+ use_gpu = len(options.gpuid)>=1
60
+ autoencoder = AutoEncoder()
61
+
62
+ if(use_gpu):
63
+ autoencoder = autoencoder.cuda()
64
+ else:
65
+ autoencoder = autoencoder.cpu()
66
+
67
+ #autoencoder.load_state_dict(torch.load("./autoencoder_pretrained_model19"))
68
+
69
+ optimizer = torch.optim.Adam(autoencoder.parameters(), lr=options.learning_rate, weight_decay=options.weight_decay)
70
+
71
+ last_train_loss = 1e-4
72
+ f = open("autoencoder_loss", 'a')
73
+ for epoch in range(options.epochs):
74
+ train_loss = 0.
75
+ print("At {0}-th epoch.".format(epoch))
76
+ for i, patches in enumerate(train_loader):
77
+ patch = patches['patch']
78
+ for b, batch in enumerate(patch):
79
+ batch = Variable(batch).cuda()
80
+ output, s_ = autoencoder(batch)
81
+ loss1 = mean_square_loss(output, batch)
82
+ s = Variable(torch.ones(s_.shape)*sparsity).cuda()
83
+ loss2 = (s*torch.log(s/(s_+1e-8)) + (1-s)*torch.log((1-s)/((1-s_+1e-8)))).sum()/options.batch_size
84
+ #kl_div_loss(mean_activitaion, sparsity)
85
+ loss = loss1 + beta * loss2
86
+ train_loss += loss
87
+ logging.info("batch {0} training loss is : {1:.5f}, {2:.5f}".format(i*1000+b, loss1.data[0], loss2.data[0]))
88
+ f.write("batch {0} training loss is : {1:.3f}\n".format(i*1000+b, loss.data[0]))
89
+ optimizer.zero_grad()
90
+ loss.backward()
91
+ optimizer.step()
92
+ train_avg_loss = train_loss/(len(train_loader)*1000)
93
+ print("Average training loss is {0:.5f} at the end of epoch {1}".format(train_avg_loss.data[0], epoch))
94
+ if (abs(train_avg_loss.data[0] - last_training_loss) <= options.estop) or ((epoch+1)%20==0):
95
+ torch.save(autoencoder.state_dict(), open("autoencoder_pretrained_model"+str(epoch), 'wb'))
96
+ last_train_loss = train_avg_loss.data[0]
97
+ f.close()
98
+ if __name__ == "__main__":
99
+ ret = parser.parse_known_args()
100
+ options = ret[0]
101
+ if ret[1]:
102
+ logging.warning("unknown arguments: {0}".format(parser.parse_known_args()[1]))
103
+ main(options)
main_cnn_autoencoder.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import cuda
7
+ from torch.autograd import Variable
8
+ from torch.utils.data import DataLoader,Dataset
9
+
10
+ import torchvision
11
+ import torchvision.datasets as dset
12
+ import torchvision.transforms as transforms
13
+ import torchvision.utils
14
+ from PIL import Image
15
+
16
+ import torch.nn.functional as F
17
+
18
+ import matplotlib.pyplot as plt
19
+ import numpy as np
20
+ import random
21
+
22
+ from custom_transform import CustomResize
23
+ from custom_transform import CustomToTensor
24
+
25
+ from AD_Standard_CNN_Dataset import AD_Standard_CNN_Dataset
26
+ from cnn_3d_with_ae import CNN
27
+
28
+ logging.basicConfig(
29
+ format='%(asctime)s %(levelname)s: %(message)s',
30
+ datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
31
+
32
+ parser = argparse.ArgumentParser(description="Starter code for CNN .")
33
+
34
+ parser.add_argument("--epochs", default=20, type=int,
35
+ help="Epochs through the data. (default=20)")
36
+ parser.add_argument("--learning_rate", "-lr", default=1e-3, type=float,
37
+ help="Learning rate of the optimization. (default=0.01)")
38
+ parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float,
39
+ metavar='W', help='weight decay (default: 1e-4)')
40
+ parser.add_argument("--batch_size", default=1, type=int,
41
+ help="Batch size for training. (default=1)")
42
+ parser.add_argument("--gpuid", default=[0], nargs='+', type=int,
43
+ help="ID of gpu device to use. Empty implies cpu usage.")
44
+ parser.add_argument("--autoencoder", default=True, type=bool,
45
+ help="Whether to use the parameters from pretrained autoencoder.")
46
+ parser.add_argument("--num_classes", default=2, type=int,
47
+ help="The number of classes, 2 or 3.")
48
+ parser.add_argument("--estop", default=1e-5, type=float,
49
+ help="Early stopping criteria on the development set. (default=1e-2)")
50
+ parser.add_argument("--noise", default=True, type=bool,
51
+ help="Whether to add gaussian noise to scans.")
52
+ # feel free to add more arguments as you need
53
+
54
+
55
+
56
+ def main(options):
57
+ # Path configuration
58
+ if options.num_classes == 2:
59
+ TRAINING_PATH = 'train_2C_new.txt'
60
+ TESTING_PATH = 'validation_2C_new.txt'
61
+ else:
62
+ TRAINING_PATH = 'train.txt'
63
+ TESTING_PATH = 'test.txt'
64
+ IMG_PATH = './NewWhole'
65
+
66
+ trg_size = (121, 145, 121)
67
+
68
+ # transformations = transforms.Compose([CustomResize("CNN", trg_size),
69
+ # CustomToTensor("CNN")
70
+ # ])
71
+
72
+ dset_train = AD_Standard_CNN_Dataset(IMG_PATH, TRAINING_PATH, noise=True)
73
+ dset_test = AD_Standard_CNN_Dataset(IMG_PATH, TESTING_PATH, noise=False)
74
+
75
+ # Use argument load to distinguish training and testing
76
+
77
+ train_loader = DataLoader(dset_train,
78
+ batch_size = options.batch_size,
79
+ shuffle = True,
80
+ num_workers = 4,
81
+ drop_last = True
82
+ )
83
+
84
+ test_loader = DataLoader(dset_test,
85
+ batch_size = options.batch_size,
86
+ shuffle = False,
87
+ num_workers = 4,
88
+ drop_last=True
89
+ )
90
+
91
+ use_cuda = (len(options.gpuid) >= 1)
92
+ # if options.gpuid:
93
+ # cuda.set_device(options.gpuid[0])
94
+
95
+ # Training process
96
+ model = CNN(options.num_classes)
97
+
98
+ if use_cuda > 0:
99
+ model = model.cuda()
100
+ else:
101
+ model.cpu()
102
+
103
+ if options.autoencoder:
104
+ pretrained_ae = torch.load("./autoencoder_pretrained_model39")
105
+ model.state_dict()['conv1.weight'] = pretrained_ae['encoder.weight'].view(410,1,7,7,7)
106
+ model.state_dict()['conv1.bias'] = pretrained_ae['encoder.bias']
107
+
108
+ for p in model.conv1.parameters():
109
+ p.requires_grad = False
110
+
111
+ criterion = torch.nn.NLLLoss()
112
+
113
+ lr = options.learning_rate
114
+ optimizer = torch.optim.Adam(filter(lambda x: x.requires_grad, model.parameters()), lr, weight_decay=options.weight_decay)
115
+
116
+ # main training loop
117
+ last_dev_loss = 1e-4
118
+ max_acc = 0
119
+ max_epoch = 0
120
+ f1 = open("cnn_autoencoder_loss_train", 'a')
121
+ f2 = open("cnn_autoencoder_loss_dev", 'a')
122
+ for epoch_i in range(options.epochs):
123
+ logging.info("At {0}-th epoch.".format(epoch_i))
124
+ train_loss = 0.0
125
+ correct_cnt = 0.0
126
+ for it, train_data in enumerate(train_loader):
127
+ data_dic = train_data
128
+
129
+ if use_cuda:
130
+ imgs, labels = Variable(data_dic['image']).cuda(), Variable(data_dic['label']).cuda()
131
+ else:
132
+ imgs, labels = Variable(data_dic['image']), Variable(data_dic['label'])
133
+
134
+ # add channel dimension: (batch_size, D, H ,W) to (batch_size, 1, D, H ,W)
135
+ # since 3D convolution requires 5D tensors
136
+ img_input = imgs#.unsqueeze(1)
137
+
138
+ integer_encoded = labels.data.cpu().numpy()
139
+ # target should be LongTensor in loss function
140
+ ground_truth = Variable(torch.from_numpy(integer_encoded)).long()
141
+ if use_cuda:
142
+ ground_truth = ground_truth.cuda()
143
+ train_output = model(img_input)
144
+ train_prob_predict = F.softmax(train_output, dim=1)
145
+ _, predict = train_prob_predict.topk(1)
146
+ loss = criterion(train_output, ground_truth)
147
+
148
+ train_loss += loss
149
+ correct_this_batch = (predict.squeeze(1) == ground_truth).sum().float()
150
+ correct_cnt += correct_this_batch
151
+ accuracy = float(correct_this_batch) / len(ground_truth)
152
+ logging.info("batch {0} training loss is : {1:.5f}".format(it, loss.data[0]))
153
+ logging.info("batch {0} training accuracy is : {1:.5f}".format(it, accuracy))
154
+ f1.write("batch {0} training loss is : {1:.5f}\n".format(it, loss.data[0]))
155
+ f1.write("batch {0} training accuracy is : {1:.5f}\n".format(it, loss.data[0]))
156
+ optimizer.zero_grad()
157
+ loss.backward()
158
+ optimizer.step()
159
+
160
+ train_avg_loss = train_loss / (len(dset_train) / options.batch_size)
161
+ train_avg_acu = float(correct_cnt) / len(dset_train)
162
+ logging.info("Average training loss is {0:.5f} at the end of epoch {1}".format(train_avg_loss.data[0], epoch_i))
163
+ logging.info("Average training accuracy is {0:.5f} at the end of epoch {1}".format(train_avg_acu, epoch_i))
164
+
165
+ # validation -- this is a crude esitmation because there might be some paddings at the end
166
+ dev_loss = 0.0
167
+ correct_cnt = 0.0
168
+ model.eval()
169
+ for it, test_data in enumerate(test_loader):
170
+ data_dic = test_data
171
+
172
+ if use_cuda:
173
+ imgs, labels = Variable(data_dic['image'], volatile=True).cuda(), Variable(data_dic['label'], volatile=True).cuda()
174
+ else:
175
+ imgs, labels = Variable(data_dic['image'], volatile=True), Variable(data_dic['label'], volatile=True)
176
+
177
+ img_input = imgs#.unsqueeze(1)
178
+ integer_encoded = labels.data.cpu().numpy()
179
+ ground_truth = Variable(torch.from_numpy(integer_encoded), volatile=True).long()
180
+ if use_cuda:
181
+ ground_truth = ground_truth.cuda()
182
+ test_output = model(img_input)
183
+ test_prob_predict = F.softmax(test_output, dim=1)
184
+ _, predict = test_prob_predict.topk(1)
185
+ loss = criterion(test_output, ground_truth)
186
+ dev_loss += loss
187
+ correct_this_batch = (predict.squeeze(1) == ground_truth).sum().float()
188
+ correct_cnt += (predict.squeeze(1) == ground_truth).sum()
189
+ accuracy = float(correct_this_batch) / len(ground_truth)
190
+ logging.info("batch {0} dev loss is : {1:.5f}".format(it, loss.data[0]))
191
+ logging.info("batch {0} dev accuracy is : {1:.5f}".format(it, accuracy))
192
+ f2.write("batch {0} dev loss is : {1:.5f}\n".format(it, loss.data[0]))
193
+ f2.write("batch {0} dev accuracy is : {1:.5f}\n".format(it, accuracy))
194
+
195
+ dev_avg_loss = dev_loss / (len(dset_test) / options.batch_size)
196
+ dev_avg_acu = float(correct_cnt) / len(dset_test)
197
+ logging.info("Average validation loss is {0:.5f} at the end of epoch {1}".format(dev_avg_loss.data[0], epoch_i))
198
+ logging.info("Average validation accuracy is {0:.5f} at the end of epoch {1}".format(dev_avg_acu, epoch_i))
199
+
200
+ if dev_avg_acu > max_acc:
201
+ max_acc = dev_avg_acu
202
+ max_epoch = epoch_i
203
+
204
+ #if (abs(dev_avg_loss.data[0] - last_dev_loss) <= options.estop) or ((epoch_i+1)%20==0):
205
+ if max_acc>=0.75:
206
+ torch.save(model.state_dict(), open("3DCNN_model_" + str(epoch_i) + '_' + str(max_acc), 'wb'))
207
+ last_dev_loss = dev_avg_loss.data[0]
208
+ logging.info("Maximum accuracy on dev set is {0:.5f} for now".format(max_acc))
209
+ logging.info("Maximum accuracy on dev set is {0:.5f} at the end of epoch {1}".format(max_acc, max_epoch))
210
+ f1.close()
211
+ f2.close()
212
+
213
+ if __name__ == "__main__":
214
+ ret = parser.parse_known_args()
215
+ options = ret[0]
216
+ if ret[1]:
217
+ logging.warning("unknown arguments: {0}".format(parser.parse_known_args()[1]))
218
+ main(options)
main_resnet.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import cuda
7
+ from torch.autograd import Variable
8
+ from torch.utils.data import DataLoader,Dataset
9
+
10
+ import torchvision
11
+ import torchvision.datasets as dset
12
+ import torchvision.transforms as transforms
13
+ import torchvision.utils
14
+ from PIL import Image
15
+
16
+ import torch.nn.functional as F
17
+
18
+ import matplotlib.pyplot as plt
19
+ import numpy as np
20
+ import random
21
+
22
+ from custom_transform import CustomResize
23
+ from custom_transform import CustomToTensor
24
+
25
+ from AD_Dataset import AD_Dataset
26
+ from AD_2DSlicesData import AD_2DSlicesData
27
+
28
+ from AlexNet2D import alexnet
29
+ from AlexNet3D import AlexNet
30
+
31
+ import ResNet2D
32
+ import ResNet3D
33
+
34
+
35
+ logging.basicConfig(
36
+ format='%(asctime)s %(levelname)s: %(message)s',
37
+ datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
38
+
39
+ parser = argparse.ArgumentParser(description="Starter code for JHU CS661 Computer Vision HW3.")
40
+
41
+ parser.add_argument("--network_type", "--nt", default="AlexNet2D", choices=["AlexNet2D", "AlexNet3D", "ResNet2D", "ResNet3D"],
42
+ help="Deep network type. (default=AlexNet)")
43
+ parser.add_argument("--load",
44
+ help="Load saved network weights.")
45
+ parser.add_argument("--save", default="best_model",
46
+ help="Save network weights.")
47
+ parser.add_argument("--augmentation", default=True, type=bool,
48
+ help="Save network weights.")
49
+ parser.add_argument("--epochs", default=20, type=int,
50
+ help="Epochs through the data. (default=20)")
51
+ parser.add_argument("--learning_rate", "-lr", default=1e-3, type=float,
52
+ help="Learning rate of the optimization. (default=0.01)")
53
+ parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
54
+ help='momentum')
55
+ parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
56
+ metavar='W', help='weight decay (default: 1e-4)')
57
+ parser.add_argument("--estop", default=1e-2, type=float,
58
+ help="Early stopping criteria on the development set. (default=1e-2)")
59
+ parser.add_argument("--batch_size", default=1, type=int,
60
+ help="Batch size for training. (default=1)")
61
+ parser.add_argument("--optimizer", default="Adam", choices=["SGD", "Adadelta", "Adam"],
62
+ help="Optimizer of choice for training. (default=Adam)")
63
+ parser.add_argument("--gpuid", default=[0], nargs='+', type=int,
64
+ help="ID of gpu device to use. Empty implies cpu usage.")
65
+ # feel free to add more arguments as you need
66
+
67
+
68
+
69
+ def main(options):
70
+ # Path configuration
71
+ TRAINING_PATH = 'train.txt'
72
+ TESTING_PATH = 'test.txt'
73
+ IMG_PATH = './Image'
74
+
75
+ if options.network_type == 'AlexNet3D':
76
+ trg_size = (224, 224, 224)
77
+ elif options.network_type == 'AlexNet2D':
78
+ trg_size = (224, 224)
79
+ elif options.network_type == 'ResNet3D':
80
+ trg_size = (110, 110, 110)
81
+ elif options.network_type == 'ResNet2D':
82
+ trg_size = (224, 224)
83
+
84
+ if options.network_type == "AlexNet3D" or "ResNet3D":
85
+ transformations = transforms.Compose([CustomResize(options.network_type, trg_size),
86
+ CustomToTensor(options.network_type)
87
+ ])
88
+ dset_train = AD_2DSlicesData(IMG_PATH, TRAINING_PATH, transformations)
89
+ dset_test = AD_2DSlicesData(IMG_PATH, TESTING_PATH, transformations)
90
+
91
+ elif options.network_type == 'AlexNet2D' or "ResNet2D":
92
+ transformations = transforms.Compose([transforms.Resize(trg_size, Image.BICUBIC),
93
+ transforms.RandomHorizontalFlip(),
94
+ transforms.ToTensor()
95
+ ])
96
+ dset_train = AD_2DSlicesData(IMG_PATH, TRAINING_PATH, transformations)
97
+ dset_test = AD_2DSlicesData(IMG_PATH, TESTING_PATH, transformations)
98
+
99
+ # Use argument load to distinguish training and testing
100
+ if options.load is None:
101
+ train_loader = DataLoader(dset_train,
102
+ batch_size = options.batch_size,
103
+ shuffle = True,
104
+ num_workers = 4,
105
+ drop_last = True
106
+ )
107
+ else:
108
+ # Only shuffle the data when doing training
109
+ train_loader = DataLoader(dset_train,
110
+ batch_size=options.batch_size,
111
+ shuffle=False,
112
+ num_workers=4,
113
+ drop_last=True
114
+ )
115
+
116
+ test_loader = DataLoader(dset_test,
117
+ batch_size = options.batch_size,
118
+ shuffle = False,
119
+ num_workers = 4,
120
+ drop_last=True
121
+ )
122
+
123
+ use_cuda = (len(options.gpuid) >= 1)
124
+ # if options.gpuid:
125
+ # cuda.set_device(options.gpuid[0])
126
+
127
+ # Training process
128
+ if options.load is None:
129
+ # Initial the model
130
+ if options.network_type == 'AlexNet3D':
131
+ model = AlexNet()
132
+ elif options.network_type == 'AlexNet2D':
133
+ model = alexnet(pretrained=True)
134
+ elif options.network_type == 'ResNet2D':
135
+ model = ResNet2D.resnet152(pretrained=True)
136
+ elif options.network_type == 'ResNet3D':
137
+ model = ResNet3D.ResNet()
138
+
139
+ if use_cuda > 0:
140
+ model = nn.DataParallel(model, device_ids=options.gpuid).cuda()
141
+ else:
142
+ model.cpu()
143
+
144
+ # Binary cross-entropy loss
145
+ criterion = torch.nn.CrossEntropyLoss()
146
+
147
+ lr = options.learning_rate
148
+ optimizer = eval("torch.optim." + options.optimizer)(model.parameters(), lr,
149
+ #momentum=options.momentum,
150
+ weight_decay=options.weight_decay)
151
+ # Prepare for label encoding
152
+ last_dev_avg_loss = float("inf")
153
+ best_accuracy = float("-inf")
154
+
155
+ # main training loop
156
+ for epoch_i in range(options.epochs):
157
+ logging.info("At {0}-th epoch.".format(epoch_i))
158
+ train_loss = 0.0
159
+ correct_cnt = 0.0
160
+ model.train()
161
+ for it, train_data in enumerate(train_loader):
162
+ data_dic = train_data
163
+
164
+ if use_cuda:
165
+ imgs, labels = Variable(data_dic['image']).cuda(), Variable(data_dic['label']).cuda()
166
+ else:
167
+ imgs, labels = Variable(data_dic['image']), Variable(data_dic['label'])
168
+
169
+ # add channel dimension: (batch_size, D, H ,W) to (batch_size, 1, D, H ,W)
170
+ # since 3D convolution requires 5D tensors
171
+ img_input = imgs#.unsqueeze(1)
172
+
173
+ integer_encoded = labels.data.cpu().numpy()
174
+ # target should be LongTensor in loss function
175
+ ground_truth = Variable(torch.from_numpy(integer_encoded)).long()
176
+ if use_cuda:
177
+ ground_truth = ground_truth.cuda()
178
+ train_output = model(img_input)
179
+ train_prob_predict = F.softmax(train_output, dim=1)
180
+ _, predict = train_prob_predict.topk(1)
181
+ loss = criterion(train_output, ground_truth)
182
+
183
+ train_loss += loss
184
+ correct_this_batch = (predict.squeeze(1) == ground_truth).sum()
185
+ correct_cnt += correct_this_batch
186
+ accuracy = float(correct_this_batch) / len(ground_truth)
187
+ logging.info("batch {0} training loss is : {1:.5f}".format(it, loss.data[0]))
188
+ logging.info("batch {0} training accuracy is : {1:.5f}".format(it, accuracy))
189
+ optimizer.zero_grad()
190
+ loss.backward()
191
+ optimizer.step()
192
+
193
+ train_avg_loss = train_loss / (len(dset_train) / options.batch_size)
194
+ train_avg_acu = float(correct_cnt) / len(dset_train)
195
+ logging.info("Average training loss is {0:.5f} at the end of epoch {1}".format(train_avg_loss.data[0], epoch_i))
196
+ logging.info("Average training accuracy is {0:.5f} at the end of epoch {1}".format(train_avg_acu, epoch_i))
197
+
198
+ # validation -- this is a crude esitmation because there might be some paddings at the end
199
+ dev_loss = 0.0
200
+ correct_cnt = 0.0
201
+ model.eval()
202
+ for it, test_data in enumerate(test_loader):
203
+ data_dic = test_data
204
+
205
+ if use_cuda:
206
+ imgs, labels = Variable(data_dic['image'], volatile=True).cuda(), Variable(data_dic['label'], volatile=True).cuda()
207
+ else:
208
+ imgs, labels = Variable(data_dic['image'], volatile=True), Variable(data_dic['label'], volatile=True)
209
+
210
+ img_input = imgs#.unsqueeze(1)
211
+ integer_encoded = labels.data.cpu().numpy()
212
+ ground_truth = Variable(torch.from_numpy(integer_encoded), volatile=True).long()
213
+ if use_cuda:
214
+ ground_truth = ground_truth.cuda()
215
+ test_output = model(img_input)
216
+ test_prob_predict = F.softmax(test_output, dim=1)
217
+ _, predict = test_prob_predict.topk(1)
218
+ loss = criterion(test_output, ground_truth)
219
+ dev_loss += loss
220
+ correct_this_batch = (predict.squeeze(1) == ground_truth).sum()
221
+ correct_cnt += (predict.squeeze(1) == ground_truth).sum()
222
+ accuracy = float(correct_this_batch) / len(ground_truth)
223
+ logging.info("batch {0} dev loss is : {1:.5f}".format(it, loss.data[0]))
224
+ logging.info("batch {0} dev accuracy is : {1:.5f}".format(it, accuracy))
225
+
226
+ dev_avg_loss = dev_loss / (len(dset_test) / options.batch_size)
227
+ dev_avg_acu = float(correct_cnt) / len(dset_test)
228
+ logging.info("Average validation loss is {0:.5f} at the end of epoch {1}".format(dev_avg_loss.data[0], epoch_i))
229
+ logging.info("Average validation accuracy is {0:.5f} at the end of epoch {1}".format(dev_avg_acu, epoch_i))
230
+
231
+ torch.save(model.state_dict(), open(options.save + ".nll_{0:.3f}.epoch_{1}".format(dev_avg_loss.data[0], epoch_i), 'wb'))
232
+
233
+ last_dev_avg_loss = dev_avg_loss
234
+
235
+
236
+ if __name__ == "__main__":
237
+ ret = parser.parse_known_args()
238
+ options = ret[0]
239
+ if ret[1]:
240
+ logging.warning("unknown arguments: {0}".format(parser.parse_known_args()[1]))
241
+ main(options)
test.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.autograd import Variable
7
+ from torch.utils.data import DataLoader,Dataset
8
+ import torchvision
9
+
10
+ from autoencoder import AutoEncoder
11
+ from AD_3DRandomPatch import AD_3DRandomPatch
12
+
13
+ logging.basicConfig(
14
+ format='%(asctime)s %(levelname)s: %(message)s',
15
+ datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
16
+
17
+ parser = argparse.ArgumentParser(description="Starter code for AutoEncoder")
18
+
19
+ parser.add_argument("--learning_rate", "-lr", default=1e-3, type=float,
20
+ help="Learning rate of the optimization. (default=0.01)")
21
+ parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
22
+ help='momentum')
23
+ parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
24
+ metavar='W', help='weight decay (default: 1e-4)')
25
+ parser.add_argument("--batch_size", default=1, type=int,
26
+ help="Batch size for training. (default=1)")
27
+ parser.add_argument("--gpuid", default=[0], nargs='+', type=int,
28
+ help="ID of gpu device to use. Empty implies cpu usage.")
29
+ parser.add_argument("--num_classes", default=2, type=int,
30
+ help="Number of classes.")
31
+ parser.add_argument("--epochs", default=20, type=int,
32
+ help="Epochs through the data. (default=20)")
33
+
34
+ def main(options):
35
+
36
+ if options.num_classes == 2:
37
+ TRAINING_PATH = 'train_2classes.txt'
38
+ else:
39
+ TRAINING_PATH = 'train.txt'
40
+ IMG_PATH = '/Users/waz/JHU/CV-ADNI/ImageNoSkull'
41
+
42
+ dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)
43
+
44
+ train_loader = DataLoader(dset_train,
45
+ batch_size = options.batch_size,
46
+ shuffle = True,
47
+ num_workers = 4,
48
+ drop_last = True
49
+ )
50
+
51
+ sparsity = 0.05
52
+ beta = 0.5
53
+
54
+ mean_square_loss = nn.MSELoss()
55
+ kl_div_loss = nn.KLDivLoss()
56
+
57
+ use_gpu = len(options.gpuid)>=1
58
+ autoencoder = AutoEncoder()
59
+
60
+
61
+ autoencoder = autoencoder.cpu()
62
+
63
+ optimizer = torch.optim.Adam(autoencoder.parameters(), lr=options.learning_rate, weight_decay=options.weight_decay)
64
+
65
+ train_loss = 0.
66
+ for epoch in range(options.epochs):
67
+ print("At {0}-th epoch.".format(epoch))
68
+ for i, patches in enumerate(train_loader):
69
+ for b, batch in enumerate(patches):
70
+ batch = Variable(batch)
71
+ output, mean_activitaion = autoencoder(batch)
72
+ loss1 = mean_square_loss(output, batch)
73
+ loss2 = kl_div_loss(mean_activitaion, Variable(torch.Tensor([sparsity])))
74
+ print "loss1", loss1
75
+ print "loss2", loss2
76
+ loss = loss1 + loss2
77
+ train_loss += loss
78
+ logging.info("batch {0} training loss is : {1:.5f}, {1:.5f}".format(b, loss1.data[0], loss2.data[0]))
79
+ optimizer.zero_grad()
80
+ loss.backward()
81
+ optimizer.step()
82
+ train_avg_loss = train_loss/len(train_loader*1000)
83
+ print("Average training loss is {0:.5f} at the end of epoch {1}".format(train_avg_loss.data[0], epoch))
84
+ torch.save(model.state_dict(), open("autoencoder_model", 'wb'))
85
+
86
+ if __name__ == "__main__":
87
+ ret = parser.parse_known_args()
88
+ options = ret[0]
89
+ if ret[1]:
90
+ logging.warning("unknown arguments: {0}".format(parser.parse_known_args()[1]))
91
+ main(options)
test.txt ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 128_S_1409.nii AD
2
+ 116_S_0834.nii MCI
3
+ 109_S_0967.nii Normal
4
+ 009_S_1354.nii AD
5
+ 032_S_0479.nii Normal
6
+ 133_S_0792.nii MCI
7
+ 126_S_0680.nii Normal
8
+ 137_S_0301.nii Normal
9
+ 073_S_1207.nii AD
10
+ 010_S_0067.nii Normal
11
+ 018_S_0155.nii MCI
12
+ 130_S_0783.nii MCI
13
+ 126_S_0891.nii AD
14
+ 007_S_0041.nii MCI
15
+ 036_S_0976.nii MCI
16
+ 018_S_0450.nii MCI
17
+ 082_S_0641.nii MCI
18
+ 094_S_0711.nii Normal
19
+ 021_S_0231.nii MCI
20
+ 002_S_0816.nii AD
21
+ 128_S_0863.nii Normal
22
+ 068_S_0476.nii MCI
23
+ 128_S_0230.nii Normal
24
+ 062_S_1091.nii MCI
25
+ 094_S_1015.nii MCI
26
+ 005_S_1341.nii AD
27
+ 012_S_1292.nii MCI
28
+ 032_S_0095.nii Normal
29
+ 141_S_1244.nii MCI
30
+ 126_S_0865.nii MCI
31
+ 033_S_0513.nii MCI
32
+ 012_S_1133.nii Normal
33
+ 133_S_0488.nii Normal
34
+ 109_S_1343.nii MCI
35
+ 013_S_0699.nii AD
36
+ 114_S_0979.nii AD
37
+ 082_S_1079.nii AD
38
+ 133_S_0913.nii MCI
39
+ 136_S_0300.nii AD
40
+ 027_S_1082.nii AD
41
+ 114_S_0374.nii AD
42
+ 094_S_1027.nii AD
43
+ 109_S_1013.nii Normal
44
+ 027_S_0403.nii Normal
45
+ 132_S_0987.nii MCI
46
+ 100_S_0015.nii Normal
47
+ 036_S_1135.nii MCI
48
+ 002_S_1018.nii AD
49
+ 109_S_1157.nii AD
50
+ 099_S_0958.nii MCI
51
+ 094_S_1090.nii AD
52
+ 127_S_0684.nii Normal
53
+ 027_S_0118.nii Normal
54
+ 057_S_1217.nii MCI
55
+ 082_S_0761.nii Normal
56
+ 141_S_0810.nii Normal
57
+ 100_S_0747.nii AD
58
+ 010_S_0786.nii AD
59
+ 099_S_0054.nii MCI
60
+ 133_S_0771.nii MCI
61
+ 128_S_0701.nii AD
62
+ 133_S_0912.nii MCI
63
+ 098_S_0896.nii Normal
64
+ 023_S_0083.nii AD
65
+ 131_S_0441.nii Normal
66
+ 033_S_0889.nii AD
67
+ 031_S_0568.nii MCI
68
+ 128_S_0272.nii Normal
69
+ 016_S_1263.nii AD
70
+ 114_S_0601.nii Normal
71
+ 130_S_0886.nii Normal
72
+ 006_S_0547.nii AD
73
+ 041_S_0898.nii Normal
74
+ 027_S_1277.nii MCI
75
+ 033_S_0739.nii AD
76
+ 131_S_1301.nii Normal
77
+ 099_S_0111.nii MCI
78
+ 029_S_0824.nii Normal
79
+ 141_S_1024.nii AD
80
+ 022_S_0044.nii MCI
81
+ 027_S_0417.nii MCI
82
+ 022_S_0543.nii AD
83
+ 137_S_0796.nii AD
84
+ 094_S_1188.nii MCI
85
+ 114_S_0173.nii Normal
86
+ 052_S_1054.nii MCI
87
+ 131_S_0436.nii Normal
88
+ 062_S_1294.nii MCI
89
+ 126_S_0405.nii Normal
90
+ 041_S_0282.nii MCI
91
+ 099_S_0880.nii MCI
92
+ 032_S_0400.nii AD
93
+ 021_S_0753.nii AD
94
+ 033_S_0734.nii Normal
95
+ 136_S_0196.nii Normal
96
+ 031_S_0773.nii AD
97
+ 002_S_0955.nii AD
98
+ 021_S_0984.nii Normal
99
+ 033_S_0724.nii AD
100
+ 013_S_1035.nii Normal
101
+ 094_S_1397.nii AD
102
+ 082_S_0469.nii MCI
103
+ 109_S_0840.nii Normal
104
+ 011_S_0856.nii MCI
105
+ 012_S_1212.nii Normal
106
+ 033_S_1098.nii Normal
107
+ 941_S_1194.nii Normal
108
+ 009_S_0842.nii Normal
109
+ 136_S_0086.nii Normal
110
+ 027_S_1387.nii MCI
111
+ 126_S_1221.nii AD
112
+ 082_S_0363.nii Normal
113
+ 067_S_0045.nii MCI
114
+ 098_S_0172.nii Normal
115
+ 072_S_1211.nii MCI
116
+ 109_S_0876.nii Normal
117
+ 141_S_0851.nii MCI
118
+ 011_S_0053.nii AD
119
+ 073_S_0565.nii AD
120
+ 029_S_0878.nii MCI
121
+ 023_S_0061.nii Normal
122
+ 027_S_1385.nii AD
123
+ 100_S_0743.nii AD
124
+ 067_S_0336.nii MCI
125
+ 013_S_0996.nii AD
126
+ 099_S_0090.nii Normal
127
+ 099_S_0060.nii MCI
128
+ 099_S_0470.nii AD
129
+ 023_S_0030.nii MCI
130
+ 057_S_0643.nii Normal
131
+ 033_S_1086.nii Normal
132
+ 100_S_0190.nii MCI
133
+ 006_S_0731.nii Normal
134
+ 100_S_0069.nii Normal
135
+ 141_S_0852.nii AD
136
+ 130_S_1200.nii Normal
137
+ 062_S_1299.nii MCI
138
+ 067_S_0243.nii MCI
139
+ 007_S_0070.nii Normal
140
+ 012_S_1033.nii MCI
141
+ 027_S_0120.nii Normal
142
+ 032_S_0718.nii MCI
143
+ 094_S_1241.nii Normal
144
+ 053_S_0389.nii MCI
145
+ 067_S_0176.nii MCI
146
+ 123_S_0091.nii AD
147
+ 003_S_1059.nii AD
148
+ 123_S_0094.nii AD
149
+ 100_S_1113.nii AD
150
+ 020_S_0899.nii Normal
151
+ 136_S_0195.nii MCI
152
+ 057_S_0779.nii Normal
153
+ 131_S_0123.nii Normal
154
+ 116_S_0392.nii AD
155
+ 010_S_0419.nii Normal
156
+ 012_S_1321.nii MCI
157
+ 116_S_0890.nii MCI
158
+ 023_S_0604.nii MCI
159
+ 094_S_0531.nii MCI
160
+ 126_S_0506.nii Normal
161
+ 130_S_0969.nii Normal
162
+ 023_S_1190.nii Normal
163
+ 114_S_0410.nii MCI
164
+ 007_S_0414.nii MCI
165
+ 052_S_0951.nii Normal
166
+ 141_S_0340.nii AD
167
+ 128_S_0805.nii AD
168
+ 023_S_1104.nii MCI
169
+ 024_S_1400.nii MCI
170
+ 073_S_0445.nii MCI
171
+ 012_S_0712.nii AD
172
+ 128_S_0608.nii MCI
173
+ 018_S_0406.nii MCI
174
+ 129_S_0778.nii Normal
175
+ 100_S_1154.nii MCI
176
+ 031_S_0554.nii AD
177
+ 137_S_0841.nii AD
178
+ 018_S_0633.nii AD
179
+ 033_S_1285.nii AD
180
+ 099_S_1144.nii AD
181
+ 010_S_0829.nii AD
182
+ 006_S_0521.nii MCI
183
+ 033_S_1016.nii Normal
184
+ 141_S_1051.nii MCI
185
+ 114_S_0458.nii MCI
186
+ 128_S_0545.nii Normal
187
+ 094_S_0964.nii MCI
188
+ 022_S_0544.nii MCI
189
+ 941_S_1363.nii MCI
190
+ 037_S_0627.nii AD
191
+ 130_S_1201.nii AD
192
+ 057_S_1007.nii MCI
193
+ 022_S_0219.nii AD
194
+ 016_S_0354.nii MCI
195
+ 002_S_0938.nii AD
196
+ 014_S_0356.nii AD
197
+ 082_S_0640.nii Normal
198
+ 130_S_1337.nii AD
test_2C_new.txt ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 131_S_1301.nii Normal
2
+ 109_S_4499.nii Normal
3
+ 128_S_0701.nii AD
4
+ 016_S_4583.nii AD
5
+ 073_S_5016.nii AD
6
+ 070_S_4856.nii Normal
7
+ 023_S_0061.nii Normal
8
+ 109_S_0967.nii Normal
9
+ 036_S_5210.nii AD
10
+ 126_S_0680.nii Normal
11
+ 070_S_4719.nii AD
12
+ 128_S_0230.nii Normal
13
+ 016_S_5032.nii AD
14
+ 109_S_1157.nii AD
15
+ 009_S_0842.nii Normal
16
+ 128_S_0272.nii Normal
17
+ 032_S_4755.nii AD
18
+ 128_S_1409.nii AD
19
+ 114_S_0374.nii AD
20
+ 027_S_4801.nii AD
21
+ 116_S_4453.nii Normal
22
+ 123_S_0094.nii AD
23
+ 022_S_0543.nii AD
24
+ 082_S_4224.nii Normal
25
+ 082_S_4428.nii Normal
26
+ 128_S_4609.nii Normal
27
+ 100_S_0747.nii AD
28
+ 033_S_1086.nii Normal
29
+ 012_S_1212.nii Normal
30
+ 011_S_4912.nii AD
31
+ 130_S_1200.nii Normal
32
+ 011_S_4222.nii Normal
33
+ 051_S_5005.nii AD
34
+ 099_S_4124.nii AD
35
+ 099_S_0090.nii Normal
36
+ 021_S_4276.nii Normal
37
+ 137_S_4482.nii Normal
38
+ 027_S_4938.nii AD
39
+ 023_S_5241.nii AD
40
+ 027_S_0120.nii Normal
41
+ 016_S_4688.nii Normal
42
+ 014_S_4080.nii Normal
43
+ 082_S_0761.nii Normal
44
+ 021_S_0753.nii AD
45
+ 129_S_4369.nii Normal
46
+ 016_S_4121.nii Normal
47
+ 153_S_4125.nii Normal
48
+ 098_S_0172.nii Normal
49
+ 037_S_4410.nii Normal
50
+ 057_S_0779.nii Normal
51
+ 032_S_0095.nii Normal
52
+ 033_S_0724.nii AD
53
+ 114_S_0601.nii Normal
54
+ 013_S_0699.nii AD
55
+ 137_S_0796.nii AD
56
+ 082_S_4208.nii Normal
57
+ 099_S_0470.nii AD
58
+ 023_S_4020.nii Normal
59
+ 006_S_0731.nii Normal
60
+ 094_S_0711.nii Normal
61
+ 128_S_0863.nii Normal
62
+ 006_S_0547.nii AD
63
+ 021_S_4335.nii Normal
64
+ 005_S_1341.nii AD
65
+ 033_S_0734.nii Normal
66
+ 003_S_1059.nii AD
test_2classes.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 128_S_1409.nii AD
2
+ 109_S_0967.nii Normal
3
+ 009_S_1354.nii AD
4
+ 032_S_0479.nii Normal
5
+ 126_S_0680.nii Normal
6
+ 137_S_0301.nii Normal
7
+ 073_S_1207.nii AD
8
+ 010_S_0067.nii Normal
9
+ 126_S_0891.nii AD
10
+ 094_S_0711.nii Normal
11
+ 002_S_0816.nii AD
12
+ 128_S_0863.nii Normal
13
+ 128_S_0230.nii Normal
14
+ 005_S_1341.nii AD
15
+ 032_S_0095.nii Normal
16
+ 012_S_1133.nii Normal
17
+ 133_S_0488.nii Normal
18
+ 013_S_0699.nii AD
19
+ 114_S_0979.nii AD
20
+ 082_S_1079.nii AD
21
+ 136_S_0300.nii AD
22
+ 027_S_1082.nii AD
23
+ 114_S_0374.nii AD
24
+ 094_S_1027.nii AD
25
+ 109_S_1013.nii Normal
26
+ 027_S_0403.nii Normal
27
+ 100_S_0015.nii Normal
28
+ 002_S_1018.nii AD
29
+ 109_S_1157.nii AD
30
+ 094_S_1090.nii AD
31
+ 127_S_0684.nii Normal
32
+ 027_S_0118.nii Normal
33
+ 082_S_0761.nii Normal
34
+ 141_S_0810.nii Normal
35
+ 100_S_0747.nii AD
36
+ 010_S_0786.nii AD
37
+ 128_S_0701.nii AD
38
+ 098_S_0896.nii Normal
39
+ 023_S_0083.nii AD
40
+ 131_S_0441.nii Normal
41
+ 033_S_0889.nii AD
42
+ 128_S_0272.nii Normal
43
+ 016_S_1263.nii AD
44
+ 114_S_0601.nii Normal
45
+ 130_S_0886.nii Normal
46
+ 006_S_0547.nii AD
47
+ 041_S_0898.nii Normal
48
+ 033_S_0739.nii AD
49
+ 131_S_1301.nii Normal
50
+ 029_S_0824.nii Normal
51
+ 141_S_1024.nii AD
52
+ 022_S_0543.nii AD
53
+ 137_S_0796.nii AD
54
+ 114_S_0173.nii Normal
55
+ 131_S_0436.nii Normal
56
+ 126_S_0405.nii Normal
57
+ 032_S_0400.nii AD
58
+ 021_S_0753.nii AD
59
+ 033_S_0734.nii Normal
60
+ 136_S_0196.nii Normal
61
+ 031_S_0773.nii AD
62
+ 002_S_0955.nii AD
63
+ 021_S_0984.nii Normal
64
+ 033_S_0724.nii AD
65
+ 013_S_1035.nii Normal
66
+ 094_S_1397.nii AD
67
+ 109_S_0840.nii Normal
68
+ 012_S_1212.nii Normal
69
+ 033_S_1098.nii Normal
70
+ 941_S_1194.nii Normal
71
+ 009_S_0842.nii Normal
72
+ 136_S_0086.nii Normal
73
+ 126_S_1221.nii AD
74
+ 082_S_0363.nii Normal
75
+ 098_S_0172.nii Normal
76
+ 109_S_0876.nii Normal
77
+ 011_S_0053.nii AD
78
+ 073_S_0565.nii AD
79
+ 023_S_0061.nii Normal
80
+ 027_S_1385.nii AD
81
+ 100_S_0743.nii AD
82
+ 013_S_0996.nii AD
83
+ 099_S_0090.nii Normal
84
+ 099_S_0470.nii AD
85
+ 057_S_0643.nii Normal
86
+ 033_S_1086.nii Normal
87
+ 006_S_0731.nii Normal
88
+ 100_S_0069.nii Normal
89
+ 141_S_0852.nii AD
90
+ 130_S_1200.nii Normal
91
+ 007_S_0070.nii Normal
92
+ 027_S_0120.nii Normal
93
+ 094_S_1241.nii Normal
94
+ 123_S_0091.nii AD
95
+ 003_S_1059.nii AD
96
+ 123_S_0094.nii AD
97
+ 100_S_1113.nii AD
98
+ 020_S_0899.nii Normal
99
+ 057_S_0779.nii Normal
100
+ 131_S_0123.nii Normal
test_encoder.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import cuda
7
+ from torch.autograd import Variable
8
+ from torch.utils.data import DataLoader,Dataset
9
+ import torchvision
10
+
11
+ from autoencoder import AutoEncoder
12
+ from AD_3DRandomPatch import AD_3DRandomPatch
13
+
14
+ logging.basicConfig(
15
+ format='%(asctime)s %(levelname)s: %(message)s',
16
+ datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO)
17
+
18
+ parser = argparse.ArgumentParser(description="Starter code for AutoEncoder")
19
+
20
+ parser.add_argument("--learning_rate", "-lr", default=1e-3, type=float,
21
+ help="Learning rate of the optimization. (default=0.01)")
22
+ parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
23
+ help='momentum')
24
+ parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
25
+ metavar='W', help='weight decay (default: 1e-4)')
26
+ parser.add_argument("--batch_size", default=1, type=int,
27
+ help="Batch size for training. (default=1)")
28
+ parser.add_argument("--gpuid", default=[0], nargs='+', type=int,
29
+ help="ID of gpu device to use. Empty implies cpu usage.")
30
+ parser.add_argument("--num_classes", default=2, type=int,
31
+ help="Number of classes.")
32
+ parser.add_argument("--epochs", default=20, type=int,
33
+ help="Epochs through the data. (default=20)")
34
+
35
+ def main(options):
36
+
37
+ if options.num_classes == 2:
38
+ TRAINING_PATH = 'train_2classes.txt'
39
+ else:
40
+ TRAINING_PATH = 'train.txt'
41
+ IMG_PATH = './Image'
42
+
43
+ dset_train = AD_3DRandomPatch(IMG_PATH, TRAINING_PATH)
44
+
45
+ train_loader = DataLoader(dset_train,
46
+ batch_size = options.batch_size,
47
+ shuffle = True,
48
+ num_workers = 4,
49
+ drop_last = True
50
+ )
51
+
52
+ sparsity = 0.05
53
+ beta = 0.5
54
+
55
+ mean_square_loss = nn.MSELoss()
56
+ kl_div_loss = nn.KLDivLoss(reduce=False)
57
+
58
+ use_gpu = len(options.gpuid)>=1
59
+ autoencoder = AutoEncoder()
60
+
61
+ if(use_gpu):
62
+ autoencoder = autoencoder.cuda()
63
+ else:
64
+ autoencoder = autoencoder.cpu()
65
+
66
+ optimizer = torch.optim.Adam(autoencoder.parameters(), lr=options.learning_rate, weight_decay=options.weight_decay)
67
+
68
+ train_loss = 0.
69
+ for epoch in range(options.epochs):
70
+ print("At {0}-th epoch.".format(epoch))
71
+ for i, patches in enumerate(train_loader):
72
+ print i
73
+ print len(patches)
74
+ # for batch in patches:
75
+ # batch = Variable(batch).cuda()
76
+ # output, mean_activitaion = autoencoder(batch)
77
+ # loss = mean_square_loss(batch, output) + kl_div_loss(mean_activitaion, sparsity)
78
+ # train_loss += loss
79
+ # logging.info("batch {0} training loss is : {1:.5f}".format(i, loss.data[0]))
80
+ # optimizer.zero_grad()
81
+ # loss.backward()
82
+ # optimizer.step()
83
+ # train_avg_loss = train_loss/len(train_loader*1000)
84
+ # print("Average training loss is {0:.5f} at the end of epoch {1}".format(train_avg_loss.data[0], epoch))
85
+ # torch.save(model.state_dict(), open("autoencoder_model", 'wb'))
86
+
87
+ if __name__ == "__main__":
88
+ ret = parser.parse_known_args()
89
+ options = ret[0]
90
+ if ret[1]:
91
+ logging.warning("unknown arguments: {0}".format(parser.parse_known_args()[1]))
92
+ main(options)
train.txt ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 011_S_0005.nii Normal
2
+ 022_S_0014.nii Normal
3
+ 016_S_0769.nii MCI
4
+ 127_S_1382.nii AD
5
+ 033_S_0725.nii MCI
6
+ 067_S_0098.nii MCI
7
+ 023_S_1262.nii AD
8
+ 068_S_0210.nii Normal
9
+ 137_S_0158.nii MCI
10
+ 067_S_0110.nii AD
11
+ 133_S_0629.nii MCI
12
+ 116_S_0657.nii Normal
13
+ 123_S_0113.nii Normal
14
+ 094_S_1102.nii AD
15
+ 137_S_0825.nii MCI
16
+ 003_S_0981.nii Normal
17
+ 114_S_0228.nii AD
18
+ 057_S_0839.nii MCI
19
+ 941_S_1197.nii Normal
20
+ 022_S_0066.nii Normal
21
+ 024_S_1063.nii Normal
22
+ 137_S_1041.nii AD
23
+ 067_S_1185.nii AD
24
+ 035_S_0555.nii Normal
25
+ 068_S_1191.nii Normal
26
+ 116_S_0360.nii Normal
27
+ 010_S_0662.nii MCI
28
+ 033_S_1281.nii AD
29
+ 023_S_0613.nii MCI
30
+ 099_S_0352.nii Normal
31
+ 032_S_0677.nii Normal
32
+ 094_S_0692.nii Normal
33
+ 011_S_0002.nii Normal
34
+ 126_S_0606.nii AD
35
+ 141_S_0696.nii AD
36
+ 131_S_0691.nii AD
37
+ 023_S_0084.nii AD
38
+ 136_S_0299.nii AD
39
+ 023_S_0093.nii AD
40
+ 012_S_1009.nii Normal
41
+ 127_S_0397.nii MCI
42
+ 141_S_0717.nii Normal
43
+ 941_S_1203.nii Normal
44
+ 137_S_0481.nii MCI
45
+ 057_S_1373.nii AD
46
+ 016_S_0991.nii AD
47
+ 128_S_0167.nii AD
48
+ 027_S_0179.nii MCI
49
+ 094_S_0489.nii Normal
50
+ 127_S_0622.nii Normal
51
+ 029_S_1073.nii MCI
52
+ 116_S_1083.nii AD
53
+ 032_S_0147.nii AD
54
+ 027_S_0948.nii MCI
55
+ 037_S_1421.nii MCI
56
+ 073_S_0518.nii MCI
57
+ 018_S_0425.nii Normal
58
+ 128_S_0740.nii AD
59
+ 031_S_0618.nii Normal
60
+ 068_S_0109.nii AD
61
+ 141_S_0853.nii AD
62
+ 011_S_0861.nii MCI
63
+ 057_S_0464.nii MCI
64
+ 073_S_0089.nii Normal
65
+ 137_S_0459.nii Normal
66
+ 067_S_0056.nii Normal
67
+ 014_S_0357.nii AD
68
+ 036_S_0656.nii MCI
69
+ 098_S_0884.nii AD
70
+ 033_S_0516.nii Normal
71
+ 024_S_0985.nii Normal
72
+ 128_S_0310.nii AD
73
+ 024_S_1171.nii AD
74
+ 141_S_0767.nii Normal
75
+ 100_S_1286.nii Normal
76
+ 012_S_0637.nii Normal
77
+ 013_S_1120.nii MCI
78
+ 131_S_0409.nii MCI
79
+ 133_S_0525.nii Normal
80
+ 133_S_1170.nii AD
81
+ 033_S_0511.nii MCI
82
+ 041_S_1002.nii Normal
83
+ 082_S_0928.nii MCI
84
+ 128_S_1181.nii MCI
85
+ 014_S_0328.nii AD
86
+ 018_S_0369.nii Normal
87
+ 029_S_1384.nii MCI
88
+ 029_S_1056.nii AD
89
+ 033_S_1087.nii AD
90
+ 073_S_0386.nii Normal
91
+ 051_S_1040.nii MCI
92
+ 099_S_0551.nii MCI
93
+ 041_S_0446.nii MCI
94
+ 067_S_0284.nii MCI
95
+ 007_S_1222.nii Normal
96
+ 024_S_1307.nii AD
97
+ 032_S_1101.nii AD
98
+ 109_S_0950.nii MCI
99
+ 136_S_0579.nii MCI
100
+ 041_S_0314.nii MCI
101
+ 013_S_0240.nii MCI
102
+ 116_S_0487.nii AD
103
+ 067_S_0077.nii MCI
104
+ 137_S_0972.nii Normal
105
+ 051_S_1131.nii MCI
106
+ 057_S_1379.nii AD
107
+ 011_S_0008.nii Normal
108
+ 041_S_1010.nii MCI
109
+ 123_S_0390.nii MCI
110
+ 018_S_0087.nii MCI
111
+ 012_S_0803.nii AD
112
+ 022_S_0924.nii MCI
113
+ 013_S_0575.nii Normal
114
+ 033_S_1309.nii MCI
115
+ 136_S_0186.nii Normal
116
+ 035_S_0033.nii MCI
117
+ 137_S_0669.nii MCI
118
+ 073_S_1357.nii MCI
119
+ 100_S_0995.nii MCI
120
+ 137_S_0443.nii MCI
121
+ 016_S_1138.nii MCI
122
+ 014_S_0548.nii Normal
123
+ 037_S_0467.nii Normal
124
+ 011_S_0326.nii MCI
125
+ 021_S_0647.nii Normal
126
+ 127_S_0431.nii AD
127
+ 014_S_0519.nii Normal
128
+ 041_S_1391.nii AD
129
+ 029_S_0866.nii Normal
130
+ 033_S_1284.nii MCI
131
+ 033_S_1283.nii AD
132
+ 029_S_1184.nii AD
133
+ 037_S_1225.nii MCI
134
+ 033_S_0567.nii MCI
135
+ 094_S_1164.nii AD
136
+ 053_S_0621.nii MCI
137
+ 020_S_0213.nii AD
138
+ 021_S_1109.nii AD
139
+ 005_S_0221.nii AD
140
+ 022_S_0750.nii MCI
141
+ 114_S_0416.nii Normal
142
+ 006_S_0653.nii AD
143
+ 041_S_1420.nii MCI
144
+ 005_S_0222.nii MCI
145
+ 005_S_0929.nii AD
146
+ 036_S_0672.nii Normal
147
+ 082_S_1256.nii Normal
148
+ 053_S_0507.nii MCI
149
+ 022_S_0130.nii Normal
150
+ 012_S_1175.nii MCI
151
+ 109_S_1183.nii MCI
152
+ 141_S_1231.nii MCI
153
+ 128_S_1430.nii AD
154
+ 141_S_0982.nii MCI
155
+ 005_S_0814.nii AD
156
+ 012_S_0720.nii AD
157
+ 007_S_0128.nii MCI
158
+ 016_S_0359.nii Normal
159
+ 022_S_1366.nii MCI
160
+ 013_S_0325.nii MCI
161
+ 029_S_1038.nii MCI
162
+ 121_S_1322.nii MCI
163
+ 100_S_0006.nii MCI
164
+ 029_S_0871.nii MCI
165
+ 021_S_0642.nii AD
166
+ 137_S_1426.nii MCI
167
+ 057_S_0818.nii Normal
168
+ 073_S_0909.nii MCI
169
+ 137_S_0631.nii MCI
170
+ 023_S_0078.nii MCI
171
+ 116_S_1315.nii MCI
172
+ 012_S_0932.nii MCI
173
+ 062_S_0730.nii AD
174
+ 068_S_0473.nii Normal
175
+ 136_S_0184.nii Normal
176
+ 035_S_0341.nii AD
177
+ 133_S_0727.nii MCI
178
+ 100_S_1062.nii AD
179
+ 123_S_0298.nii Normal
180
+ 022_S_0096.nii Normal
181
+ 067_S_0177.nii Normal
182
+ 130_S_0102.nii MCI
183
+ 109_S_0777.nii AD
184
+ 941_S_1311.nii MCI
185
+ 041_S_0407.nii MCI
186
+ 127_S_1140.nii MCI
187
+ 006_S_0675.nii MCI
188
+ 035_S_0156.nii Normal
189
+ 014_S_0520.nii Normal
190
+ 016_S_1028.nii MCI
191
+ 018_S_0103.nii MCI
192
+ 128_S_0517.nii AD
193
+ 011_S_0016.nii Normal
194
+ 141_S_1094.nii Normal
195
+ 136_S_0426.nii AD
196
+ 021_S_0273.nii MCI
197
+ 067_S_0038.nii MCI
198
+ 068_S_0442.nii MCI
199
+ 128_S_0266.nii AD
200
+ 941_S_1295.nii MCI
201
+ 127_S_0393.nii MCI
202
+ 057_S_0934.nii Normal
203
+ 035_S_0048.nii Normal
204
+ 051_S_1338.nii MCI
205
+ 023_S_0081.nii Normal
206
+ 136_S_0874.nii MCI
207
+ 002_S_0685.nii Normal
208
+ 116_S_1232.nii Normal
209
+ 041_S_0598.nii MCI
210
+ 131_S_0457.nii AD
211
+ 011_S_0362.nii MCI
212
+ 062_S_0793.nii AD
213
+ 027_S_0074.nii Normal
214
+ 133_S_0493.nii Normal
215
+ 057_S_0474.nii AD
216
+ 136_S_0194.nii AD
217
+ 131_S_0497.nii AD
218
+ 010_S_0904.nii MCI
219
+ 062_S_0690.nii AD
220
+ 023_S_0926.nii Normal
221
+ 023_S_0855.nii MCI
222
+ 011_S_0023.nii Normal
223
+ 062_S_0768.nii Normal
224
+ 941_S_1202.nii Normal
225
+ 128_S_0611.nii MCI
226
+ 005_S_0602.nii Normal
227
+ 032_S_1169.nii Normal
228
+ 137_S_0686.nii Normal
229
+ 023_S_1247.nii MCI
230
+ 053_S_1044.nii AD
231
+ 005_S_0610.nii Normal
232
+ 031_S_0821.nii MCI
233
+ 013_S_0860.nii MCI
234
+ 128_S_0522.nii Normal
235
+ 941_S_1195.nii Normal
236
+ 016_S_1149.nii MCI
237
+ 005_S_1224.nii MCI
238
+ 032_S_1037.nii AD
239
+ 067_S_0607.nii MCI
240
+ 007_S_1339.nii AD
241
+ 123_S_0162.nii AD
242
+ 029_S_0845.nii Normal
243
+ 012_S_1165.nii MCI
244
+ 136_S_1227.nii MCI
245
+ 036_S_0577.nii AD
246
+ 003_S_0907.nii Normal
247
+ 033_S_0741.nii Normal
248
+ 018_S_0057.nii MCI
249
+ 099_S_0534.nii Normal
250
+ 035_S_0204.nii MCI
251
+ 073_S_0311.nii Normal
252
+ 082_S_0304.nii Normal
253
+ 027_S_1254.nii AD
254
+ 011_S_0022.nii Normal
255
+ 003_S_0931.nii Normal
256
+ 136_S_0873.nii MCI
257
+ 020_S_0883.nii Normal
258
+ 016_S_0590.nii MCI
259
+ 021_S_0141.nii MCI
260
+ 052_S_1250.nii Normal
261
+ 100_S_0035.nii Normal
262
+ 002_S_0954.nii MCI
263
+ 033_S_0888.nii AD
264
+ 010_S_0420.nii Normal
265
+ 127_S_0754.nii AD
266
+ 099_S_0492.nii AD
267
+ 013_S_0592.nii AD
268
+ 027_S_1081.nii AD
269
+ 007_S_0249.nii MCI
270
+ 128_S_0188.nii MCI
271
+ 041_S_0721.nii MCI
272
+ 116_S_0382.nii Normal
273
+ 009_S_1334.nii AD
274
+ 033_S_0723.nii MCI
275
+ 123_S_0106.nii Normal
276
+ 099_S_0533.nii Normal
277
+ 011_S_0021.nii Normal
278
+ 094_S_0921.nii MCI
279
+ 018_S_0286.nii AD
280
+ 067_S_0076.nii AD
281
+ 029_S_0836.nii AD
282
+ 067_S_0059.nii Normal
283
+ 121_S_1350.nii MCI
284
+ 127_S_0394.nii MCI
285
+ 037_S_0454.nii Normal
286
+ 067_S_0020.nii AD
287
+ 130_S_1290.nii AD
288
+ 041_S_1412.nii MCI
289
+ 094_S_1402.nii AD
290
+ 002_S_0619.nii AD
291
+ 067_S_0257.nii Normal
292
+ 072_S_0315.nii Normal
293
+ 036_S_0748.nii MCI
294
+ 036_S_0576.nii Normal
295
+ 018_S_0682.nii AD
296
+ 027_S_1213.nii MCI
297
+ 033_S_1308.nii AD
298
+ 021_S_0332.nii MCI
299
+ 100_S_0893.nii AD
300
+ 068_S_0127.nii Normal
301
+ 114_S_0166.nii Normal
302
+ 041_S_0125.nii Normal
303
+ 068_S_0401.nii MCI
304
+ 141_S_1137.nii AD
305
+ 094_S_1398.nii MCI
306
+ 082_S_1119.nii MCI
307
+ 067_S_0029.nii AD
308
+ 141_S_1245.nii MCI
309
+ 033_S_0733.nii AD
310
+ 041_S_1411.nii MCI
311
+ 012_S_0689.nii AD
312
+ 098_S_0149.nii AD
313
+ 013_S_1161.nii AD
314
+ 029_S_1215.nii MCI
315
+ 013_S_1275.nii MCI
316
+ 098_S_0171.nii Normal
317
+ 141_S_0726.nii Normal
318
+ 128_S_0500.nii Normal
319
+ 128_S_0216.nii AD
320
+ 007_S_1248.nii AD
321
+ 021_S_0159.nii Normal
322
+ 022_S_0007.nii AD
323
+ 023_S_0139.nii AD
324
+ 121_S_0953.nii AD
325
+ 033_S_0920.nii Normal
326
+ 099_S_0040.nii Normal
327
+ 002_S_1070.nii MCI
328
+ 062_S_0535.nii AD
329
+ 127_S_1210.nii MCI
330
+ 051_S_1296.nii AD
331
+ 005_S_0324.nii MCI
332
+ 011_S_0003.nii AD
333
+ 041_S_1435.nii AD
334
+ 020_S_1288.nii Normal
335
+ 137_S_0973.nii MCI
336
+ 002_S_0782.nii MCI
337
+ 012_S_0917.nii MCI
338
+ 023_S_0058.nii Normal
339
+ 127_S_0259.nii Normal
340
+ 127_S_0844.nii AD
341
+ 141_S_1152.nii AD
342
+ 029_S_1218.nii MCI
343
+ 036_S_0813.nii Normal
344
+ 067_S_1253.nii AD
345
+ 032_S_0187.nii MCI
346
+ 136_S_0429.nii MCI
347
+ 073_S_0312.nii Normal
348
+ 002_S_0295.nii Normal
349
+ 022_S_0004.nii MCI
350
+ 011_S_0183.nii AD
351
+ 036_S_1023.nii Normal
352
+ 036_S_0760.nii AD
353
+ 128_S_0528.nii AD
354
+ 123_S_0072.nii Normal
355
+ 016_S_1121.nii MCI
356
+ 123_S_0088.nii AD
357
+ 010_S_0422.nii MCI
358
+ 067_S_0812.nii AD
359
+ 109_S_1014.nii Normal
360
+ 023_S_0963.nii Normal
361
+ 027_S_0461.nii MCI
362
+ 014_S_0558.nii Normal
363
+ 051_S_1123.nii Normal
364
+ 057_S_1371.nii AD
365
+ 010_S_0788.nii MCI
366
+ 007_S_1206.nii Normal
367
+ 141_S_0790.nii AD
368
+ 052_S_1251.nii Normal
369
+ 137_S_0438.nii AD
370
+ 041_S_1423.nii MCI
371
+ 005_S_0553.nii Normal
372
+ 002_S_1280.nii Normal
373
+ 126_S_0784.nii AD
374
+ 137_S_0366.nii AD
375
+ 007_S_0316.nii AD
376
+ 022_S_0961.nii MCI
377
+ 011_S_0010.nii AD
378
+ 068_S_1075.nii MCI
379
+ 023_S_0625.nii MCI
380
+ 100_S_0047.nii Normal
381
+ 002_S_1261.nii Normal
382
+ 133_S_1031.nii MCI
383
+ 007_S_0344.nii MCI
384
+ 005_S_0223.nii Normal
385
+ 094_S_1293.nii MCI
386
+ 062_S_1099.nii Normal
387
+ 136_S_0695.nii MCI
388
+ 012_S_0634.nii MCI
389
+ 062_S_1182.nii MCI
390
+ 018_S_0277.nii AD
391
+ 010_S_0472.nii Normal
392
+ 020_S_0097.nii Normal
393
+ 009_S_1199.nii MCI
394
+ 014_S_1095.nii AD
395
+ 094_S_0434.nii MCI
396
+ 109_S_1192.nii AD
397
+ 027_S_0485.nii MCI
398
+ 094_S_0526.nii Normal
399
+ 057_S_0957.nii MCI
400
+ 094_S_1314.nii MCI
401
+ 131_S_0319.nii Normal
402
+ 021_S_0337.nii Normal
403
+ 011_S_0241.nii MCI
404
+ 027_S_1335.nii MCI
405
+ 126_S_0605.nii Normal
406
+ 132_S_0339.nii MCI
407
+ 037_S_0182.nii MCI
408
+ 007_S_0293.nii MCI
409
+ 007_S_0068.nii Normal
410
+ 114_S_1103.nii MCI
411
+ 013_S_0502.nii Normal
412
+ 029_S_0999.nii AD
413
+ 027_S_0404.nii AD
414
+ 027_S_0850.nii AD
415
+ 023_S_0031.nii Normal
416
+ 116_S_1249.nii Normal
417
+ 041_S_1368.nii AD
418
+ 099_S_0372.nii AD
419
+ 007_S_1304.nii AD
420
+ 041_S_0262.nii Normal
421
+ 137_S_0283.nii Normal
422
+ 023_S_1289.nii AD
423
+ 067_S_0290.nii MCI
424
+ 133_S_1055.nii AD
425
+ 031_S_1209.nii AD
426
+ 029_S_0843.nii Normal
427
+ 094_S_1267.nii Normal
428
+ 130_S_0423.nii MCI
429
+ 116_S_0370.nii AD
430
+ 041_S_0549.nii MCI
431
+ 033_S_0923.nii Normal
432
+ 094_S_1330.nii MCI
433
+ 022_S_0129.nii AD
434
+ 057_S_0941.nii MCI
435
+ 130_S_0232.nii Normal
436
+ 036_S_1001.nii AD
437
+ 082_S_1377.nii AD
438
+ 133_S_0433.nii Normal
439
+ 021_S_0343.nii AD
440
+ 011_S_0168.nii MCI
441
+ 023_S_0916.nii AD
442
+ 128_S_0245.nii Normal
443
+ 003_S_1257.nii AD
444
+ 016_S_0538.nii Normal
445
+ 126_S_0708.nii MCI
446
+ 036_S_0759.nii AD
447
+ 018_S_0080.nii MCI
448
+ 033_S_1279.nii MCI
449
+ 006_S_0681.nii Normal
450
+ 067_S_0024.nii Normal
451
+ 016_S_1092.nii MCI
452
+ 037_S_0327.nii Normal
453
+ 031_S_0321.nii AD
454
+ 037_S_0303.nii Normal
455
+ 126_S_1077.nii MCI
456
+ 009_S_0862.nii Normal
457
+ 002_S_0413.nii Normal
458
+ 100_S_0930.nii MCI
459
+ 126_S_1340.nii MCI
460
+ 127_S_0260.nii Normal
461
+ 133_S_0638.nii MCI
462
+ 018_S_0335.nii AD
463
+ 002_S_0559.nii Normal
464
+ 128_S_1242.nii Normal
465
+ 129_S_1204.nii MCI
466
+ 130_S_0956.nii AD
467
+ 024_S_1393.nii MCI
468
+ 006_S_0322.nii MCI
469
+ 013_S_1205.nii AD
470
+ 067_S_0828.nii AD
471
+ 036_S_1240.nii MCI
472
+ 067_S_0019.nii Normal
473
+ 003_S_1021.nii Normal
474
+ 130_S_0460.nii MCI
475
+ 052_S_1168.nii MCI
476
+ 123_S_0050.nii MCI
477
+ 009_S_0751.nii Normal
478
+ 006_S_0498.nii Normal
479
+ 021_S_0424.nii MCI
480
+ 062_S_0578.nii Normal
481
+ 116_S_0648.nii Normal
482
+ 023_S_1306.nii Normal
483
+ 057_S_1265.nii MCI
484
+ 013_S_1276.nii Normal
485
+ 018_S_0043.nii Normal
486
+ 011_S_1282.nii MCI
train_2C_new.txt ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 006_S_0681.nii Normal
2
+ 129_S_0778.nii Normal
3
+ 099_S_1144.nii AD
4
+ 011_S_0008.nii Normal
5
+ 032_S_4348.nii Normal
6
+ 011_S_0016.nii Normal
7
+ 141_S_0696.nii AD
8
+ 127_S_5028.nii AD
9
+ 067_S_1253.nii AD
10
+ 021_S_1109.nii AD
11
+ 033_S_4508.nii Normal
12
+ 036_S_5149.nii AD
13
+ 041_S_4083.nii Normal
14
+ 098_S_4002.nii Normal
15
+ 098_S_0884.nii AD
16
+ 127_S_1382.nii AD
17
+ 016_S_4951.nii Normal
18
+ 137_S_4632.nii Normal
19
+ 021_S_0337.nii Normal
20
+ 062_S_0535.nii AD
21
+ 023_S_1190.nii Normal
22
+ 128_S_0528.nii AD
23
+ 114_S_4379.nii AD
24
+ 029_S_4385.nii Normal
25
+ 116_S_1083.nii AD
26
+ 141_S_0726.nii Normal
27
+ 009_S_4612.nii Normal
28
+ 003_S_4872.nii Normal
29
+ 009_S_0751.nii Normal
30
+ 041_S_1002.nii Normal
31
+ 128_S_0245.nii Normal
32
+ 116_S_0657.nii Normal
33
+ 005_S_4707.nii AD
34
+ 128_S_4607.nii Normal
35
+ 033_S_1087.nii AD
36
+ 062_S_0730.nii AD
37
+ 005_S_0553.nii Normal
38
+ 010_S_0420.nii Normal
39
+ 024_S_1063.nii Normal
40
+ 002_S_0619.nii AD
41
+ 037_S_4028.nii Normal
42
+ 014_S_4093.nii Normal
43
+ 023_S_0081.nii Normal
44
+ 023_S_0058.nii Normal
45
+ 013_S_1276.nii Normal
46
+ 094_S_1102.nii AD
47
+ 016_S_4952.nii Normal
48
+ 033_S_5017.nii AD
49
+ 941_S_4376.nii Normal
50
+ 136_S_0186.nii Normal
51
+ 121_S_0953.nii AD
52
+ 135_S_4446.nii Normal
53
+ 098_S_0171.nii Normal
54
+ 128_S_0517.nii AD
55
+ 100_S_1062.nii AD
56
+ 013_S_0502.nii Normal
57
+ 067_S_5205.nii AD
58
+ 098_S_4018.nii Normal
59
+ 116_S_4855.nii Normal
60
+ 014_S_0328.nii AD
61
+ 128_S_0310.nii AD
62
+ 036_S_5112.nii AD
63
+ 011_S_0002.nii Normal
64
+ 141_S_0717.nii Normal
65
+ 021_S_4924.nii AD
66
+ 005_S_0929.nii AD
67
+ 024_S_1171.nii AD
68
+ 123_S_4526.nii AD
69
+ 005_S_0223.nii Normal
70
+ 098_S_4215.nii AD
71
+ 116_S_4732.nii AD
72
+ 011_S_4906.nii AD
73
+ 029_S_1184.nii AD
74
+ 073_S_5090.nii AD
75
+ 029_S_4307.nii AD
76
+ 003_S_4900.nii Normal
77
+ 007_S_4637.nii Normal
78
+ 020_S_0097.nii Normal
79
+ 032_S_4277.nii Normal
80
+ 128_S_0805.nii AD
81
+ 013_S_0575.nii Normal
82
+ 073_S_0089.nii Normal
83
+ 018_S_0277.nii AD
84
+ 130_S_0956.nii AD
85
+ 135_S_4676.nii AD
86
+ 051_S_1123.nii Normal
87
+ 024_S_1307.nii AD
88
+ 136_S_0299.nii AD
89
+ 005_S_0610.nii Normal
90
+ 029_S_0843.nii Normal
91
+ 022_S_4266.nii Normal
92
+ 003_S_0907.nii Normal
93
+ 141_S_1152.nii AD
94
+ 141_S_0340.nii AD
95
+ 014_S_0558.nii Normal
96
+ 068_S_0473.nii Normal
97
+ 082_S_0304.nii Normal
98
+ 051_S_1296.nii AD
99
+ 072_S_4103.nii Normal
100
+ 153_S_4139.nii Normal
101
+ 116_S_0487.nii AD
102
+ 012_S_0712.nii AD
103
+ 033_S_1016.nii Normal
104
+ 067_S_0029.nii AD
105
+ 128_S_4774.nii AD
106
+ 116_S_4010.nii Normal
107
+ 100_S_1286.nii Normal
108
+ 094_S_4649.nii Normal
109
+ 067_S_0828.nii AD
110
+ 041_S_1391.nii AD
111
+ 014_S_0520.nii Normal
112
+ 129_S_4422.nii Normal
113
+ 022_S_4196.nii Normal
114
+ 037_S_0327.nii Normal
115
+ 021_S_4558.nii Normal
116
+ 024_S_4280.nii AD
117
+ 109_S_4378.nii AD
118
+ 002_S_0685.nii Normal
119
+ 029_S_0999.nii AD
120
+ 127_S_4645.nii Normal
121
+ 024_S_4223.nii AD
122
+ 031_S_0554.nii AD
123
+ 011_S_4827.nii AD
124
+ 021_S_0159.nii Normal
125
+ 073_S_0386.nii Normal
126
+ 012_S_0637.nii Normal
127
+ 127_S_4749.nii AD
128
+ 023_S_1289.nii AD
129
+ 011_S_0021.nii Normal
130
+ 035_S_4082.nii Normal
131
+ 005_S_0602.nii Normal
132
+ 062_S_0578.nii Normal
133
+ 003_S_4840.nii Normal
134
+ 068_S_5206.nii AD
135
+ 018_S_0043.nii Normal
136
+ 052_S_0951.nii Normal
137
+ 130_S_1337.nii AD
138
+ 029_S_4290.nii Normal
139
+ 022_S_0007.nii AD
140
+ 116_S_4195.nii AD
141
+ 067_S_0110.nii AD
142
+ 128_S_0740.nii AD
143
+ 023_S_1306.nii Normal
144
+ 016_S_0991.nii AD
145
+ 116_S_4537.nii AD
146
+ 029_S_4279.nii Normal
147
+ 021_S_4718.nii AD
148
+ 057_S_0818.nii Normal
149
+ 032_S_4304.nii Normal
150
+ 002_S_0559.nii Normal
151
+ 009_S_4388.nii Normal
152
+ 012_S_1009.nii Normal
153
+ 057_S_0934.nii Normal
154
+ 036_S_1001.nii AD
155
+ 141_S_0790.nii AD
156
+ 016_S_4353.nii AD
157
+ 068_S_4174.nii Normal
158
+ 018_S_0633.nii AD
159
+ 073_S_5023.nii Normal
160
+ 029_S_0866.nii Normal
161
+ 007_S_4911.nii AD
162
+ 057_S_4110.nii AD
163
+ 022_S_0130.nii Normal
164
+ 009_S_1334.nii AD
165
+ 003_S_5187.nii AD
166
+ 128_S_0545.nii Normal
167
+ 032_S_4921.nii Normal
168
+ 037_S_0467.nii Normal
169
+ 027_S_4962.nii AD
170
+ 009_S_0862.nii Normal
171
+ 941_S_1197.nii Normal
172
+ 137_S_4258.nii AD
173
+ 003_S_4892.nii AD
174
+ 137_S_4211.nii AD
175
+ 116_S_0648.nii Normal
176
+ 137_S_0686.nii Normal
177
+ 127_S_5056.nii AD
178
+ 035_S_4783.nii AD
179
+ 011_S_4105.nii Normal
180
+ 136_S_0184.nii Normal
181
+ 094_S_1164.nii AD
182
+ 123_S_0298.nii Normal
183
+ 099_S_0492.nii AD
184
+ 062_S_0690.nii AD
185
+ 094_S_4282.nii AD
186
+ 022_S_0014.nii Normal
187
+ 014_S_4576.nii Normal
188
+ 024_S_0985.nii Normal
189
+ 131_S_0457.nii AD
190
+ 127_S_4604.nii Normal
191
+ 094_S_1267.nii Normal
192
+ 035_S_0555.nii Normal
193
+ 011_S_4845.nii AD
194
+ 032_S_4386.nii Normal
195
+ 037_S_0454.nii Normal
196
+ 068_S_0109.nii AD
197
+ 002_S_0295.nii Normal
198
+ 029_S_4585.nii Normal
199
+ 036_S_0760.nii AD
200
+ 002_S_1261.nii Normal
201
+ 128_S_4586.nii Normal
202
+ 137_S_0841.nii AD
203
+ 067_S_0056.nii Normal
204
+ 036_S_0759.nii AD
205
+ 941_S_4255.nii Normal
206
+ 006_S_0653.nii AD
207
+ 007_S_1339.nii AD
208
+ 135_S_4954.nii AD
209
+ 137_S_0972.nii Normal
210
+ 033_S_4505.nii Normal
211
+ 006_S_0498.nii Normal
212
+ 127_S_0844.nii AD
213
+ 127_S_0754.nii AD
214
+ 116_S_0370.nii AD
215
+ 014_S_0548.nii Normal
216
+ 062_S_1099.nii Normal
217
+ 100_S_0893.nii AD
218
+ 029_S_0845.nii Normal
219
+ 068_S_0210.nii Normal
220
+ 072_S_4391.nii Normal
221
+ 027_S_1254.nii AD
222
+ 003_S_0981.nii Normal
223
+ 032_S_0147.nii AD
224
+ 037_S_4001.nii AD
225
+ 027_S_0850.nii AD
226
+ 023_S_5120.nii AD
227
+ 009_S_5224.nii AD
228
+ 014_S_4039.nii AD
229
+ 007_S_1222.nii Normal
230
+ 137_S_0366.nii AD
231
+ 067_S_0019.nii Normal
232
+ 094_S_0692.nii Normal
233
+ 073_S_4853.nii AD
234
+ 098_S_0149.nii AD
235
+ 037_S_0303.nii Normal
236
+ 041_S_1435.nii AD
237
+ 023_S_0084.nii AD
238
+ 037_S_4879.nii AD
239
+ 082_S_5029.nii AD
240
+ 021_S_0343.nii AD
241
+ 016_S_4887.nii AD
242
+ 099_S_0040.nii Normal
243
+ 024_S_4905.nii AD
244
+ 099_S_0352.nii Normal
245
+ 013_S_1161.nii AD
246
+ 005_S_0221.nii AD
247
+ 011_S_4075.nii Normal
248
+ 014_S_0519.nii Normal
249
+ 007_S_4387.nii Normal
250
+ 116_S_1249.nii Normal
251
+ 141_S_0767.nii Normal
252
+ 137_S_4466.nii Normal
253
+ 016_S_0359.nii Normal
254
+ 067_S_0020.nii AD
255
+ 141_S_1094.nii Normal
256
+ 067_S_1185.nii AD
257
+ 073_S_4739.nii Normal
258
+ 127_S_0622.nii Normal
259
+ 114_S_0416.nii Normal
260
+ 073_S_4552.nii Normal
261
+ 128_S_4832.nii Normal
262
+ 020_S_1288.nii Normal
263
+ 003_S_1257.nii AD
264
+ 037_S_4071.nii Normal
265
+ 137_S_4756.nii AD
266
+ 082_S_1256.nii Normal
267
+ 109_S_1014.nii Normal
268
+ 011_S_0183.nii AD
269
+ 023_S_1262.nii AD
270
+ 041_S_1368.nii AD
271
+ 035_S_0156.nii Normal
272
+ 018_S_0369.nii Normal
273
+ 114_S_0166.nii Normal
274
+ 009_S_4337.nii Normal
275
+ 127_S_4148.nii Normal
276
+ 041_S_4060.nii Normal
277
+ 116_S_0360.nii Normal
278
+ 007_S_0316.nii AD
279
+ 941_S_1203.nii Normal
280
+ 007_S_1304.nii AD
281
+ 067_S_0812.nii AD
282
+ 068_S_0127.nii Normal
283
+ 068_S_4968.nii AD
284
+ 027_S_0404.nii AD
285
+ 068_S_4424.nii Normal
286
+ 032_S_1037.nii AD
287
+ 035_S_0341.nii AD
288
+ 018_S_0335.nii AD
289
+ 023_S_0963.nii Normal
290
+ 098_S_4506.nii Normal
291
+ 073_S_0312.nii Normal
292
+ 007_S_4516.nii Normal
293
+ 005_S_0814.nii AD
294
+ 021_S_0647.nii Normal
295
+ 021_S_0642.nii AD
296
+ 129_S_4396.nii Normal
297
+ 126_S_0784.nii AD
298
+ 135_S_4598.nii Normal
299
+ 133_S_1170.nii AD
300
+ 116_S_0382.nii Normal
301
+ 123_S_0088.nii AD
302
+ 116_S_1232.nii Normal
303
+ 007_S_1248.nii AD
304
+ 130_S_1290.nii AD
305
+ 123_S_0113.nii Normal
306
+ 116_S_0392.nii AD
307
+ 109_S_1192.nii AD
308
+ 130_S_1201.nii AD
309
+ 128_S_0500.nii Normal
310
+ 014_S_1095.nii AD
311
+ 036_S_0576.nii Normal
312
+ 057_S_0474.nii AD
313
+ 018_S_0682.nii AD
314
+ 041_S_4014.nii Normal
315
+ 041_S_4041.nii Normal
316
+ 023_S_0031.nii Normal
317
+ 002_S_1280.nii Normal
318
+ 027_S_1081.nii AD
319
+ 003_S_4839.nii Normal
320
+ 126_S_0606.nii AD
321
+ 036_S_1023.nii Normal
322
+ 128_S_1242.nii Normal
323
+ 123_S_0106.nii Normal
324
+ 116_S_4092.nii Normal
325
+ 114_S_0228.nii AD
326
+ 031_S_0321.nii AD
327
+ 128_S_0266.nii AD
328
+ 016_S_4009.nii AD
329
+ 051_S_4980.nii AD
330
+ 116_S_4483.nii Normal
331
+ 126_S_0506.nii Normal
332
+ 133_S_0525.nii Normal
333
+ 035_S_0048.nii Normal
334
+ 135_S_5275.nii AD
335
+ 128_S_1430.nii AD
336
+ 036_S_0577.nii AD
337
+ 014_S_4615.nii AD
338
+ 041_S_4200.nii Normal
339
+ 003_S_4644.nii Normal
340
+ 131_S_0497.nii AD
341
+ 022_S_0066.nii Normal
342
+ 033_S_1285.nii AD
343
+ 007_S_1206.nii Normal
344
+ 057_S_1371.nii AD
345
+ 126_S_4686.nii AD
346
+ 123_S_4362.nii Normal
347
+ 021_S_4254.nii Normal
348
+ 032_S_1169.nii Normal
349
+ 010_S_0472.nii Normal
350
+ 037_S_4308.nii Normal
351
+ 041_S_0262.nii Normal
352
+ 036_S_0813.nii Normal
353
+ 011_S_4120.nii Normal
354
+ 037_S_4770.nii AD
355
+ 023_S_0916.nii AD
356
+ 941_S_1202.nii Normal
357
+ 127_S_0259.nii Normal
358
+ 002_S_0938.nii AD
359
+ 007_S_4620.nii Normal
360
+ 062_S_0768.nii Normal
361
+ 014_S_4577.nii Normal
362
+ 005_S_4910.nii AD
363
+ 100_S_0047.nii Normal
364
+ 033_S_4179.nii Normal
365
+ 037_S_0627.nii AD
366
+ 130_S_0969.nii Normal
367
+ 133_S_1055.nii AD
368
+ 128_S_5123.nii AD
369
+ 094_S_4560.nii Normal
370
+ 013_S_1205.nii AD
371
+ 131_S_0319.nii Normal
372
+ 012_S_0803.nii AD
373
+ 126_S_0605.nii Normal
374
+ 022_S_0096.nii Normal
375
+ 022_S_0219.nii AD
376
+ 029_S_0836.nii AD
377
+ 073_S_4559.nii Normal
378
+ 010_S_0419.nii Normal
379
+ 016_S_4591.nii AD
380
+ 073_S_4795.nii Normal
381
+ 033_S_0741.nii Normal
382
+ 057_S_1379.nii AD
383
+ 137_S_1041.nii AD
384
+ 033_S_1281.nii AD
385
+ 068_S_4340.nii Normal
386
+ 057_S_1373.nii AD
387
+ 022_S_0129.nii AD
388
+ 099_S_0534.nii Normal
389
+ 099_S_4076.nii Normal
390
+ 127_S_0431.nii AD
391
+ 033_S_5013.nii AD
392
+ 128_S_0522.nii Normal
393
+ 012_S_0689.nii AD
394
+ 033_S_1283.nii AD
395
+ 072_S_0315.nii Normal
396
+ 098_S_4050.nii Normal
397
+ 137_S_0283.nii Normal
398
+ 098_S_4275.nii Normal
399
+ 116_S_4338.nii AD
400
+ 133_S_0493.nii Normal
401
+ 011_S_0003.nii AD
402
+ 141_S_0853.nii AD
403
+ 029_S_1056.nii AD
404
+ 135_S_4657.nii AD
405
+ 009_S_5027.nii AD
406
+ 082_S_1377.nii AD
407
+ 133_S_0433.nii Normal
408
+ 053_S_1044.nii AD
409
+ 002_S_0413.nii Normal
410
+ 031_S_0618.nii Normal
411
+ 067_S_0177.nii Normal
412
+ 011_S_4278.nii Normal
413
+ 016_S_0538.nii Normal
414
+ 062_S_0793.nii AD
415
+ 023_S_0139.nii AD
416
+ 033_S_1308.nii AD
417
+ 135_S_5015.nii AD
418
+ 009_S_5252.nii AD
419
+ 023_S_0093.nii AD
420
+ 127_S_4992.nii AD
421
+ 014_S_0356.nii AD
422
+ 073_S_0311.nii Normal
423
+ 094_S_4737.nii AD
424
+ 099_S_4994.nii AD
425
+ 011_S_0023.nii Normal
426
+ 135_S_4863.nii AD
427
+ 003_S_0931.nii Normal
428
+ 099_S_4104.nii Normal
429
+ 012_S_0720.nii AD
430
+ 027_S_4964.nii AD
431
+ 031_S_1209.nii AD
432
+ 041_S_0125.nii Normal
433
+ 067_S_0024.nii Normal
434
+ 137_S_0438.nii AD
435
+ 007_S_4568.nii AD
436
+ 073_S_4155.nii Normal
437
+ 129_S_4371.nii Normal
438
+ 009_S_5037.nii AD
439
+ 023_S_4501.nii AD
440
+ 052_S_1251.nii Normal
441
+ 052_S_1250.nii Normal
442
+ 128_S_0167.nii AD
443
+ 003_S_1021.nii Normal
444
+ 131_S_0691.nii AD
445
+ 098_S_4003.nii Normal
446
+ 941_S_4066.nii Normal
447
+ 036_S_0672.nii Normal
448
+ 941_S_1195.nii Normal
449
+ 130_S_0232.nii Normal
450
+ 136_S_0194.nii AD
451
+ 011_S_0022.nii Normal
452
+ 022_S_4291.nii Normal
453
+ 033_S_5087.nii AD
454
+ 128_S_4599.nii Normal
455
+ 022_S_4173.nii Normal
456
+ 024_S_5054.nii AD
457
+ 010_S_0829.nii AD
458
+ 033_S_0888.nii AD
459
+ 094_S_0489.nii Normal
460
+ 123_S_0162.nii AD
461
+ 011_S_0010.nii AD
462
+ 073_S_4382.nii Normal
463
+ 153_S_4151.nii Normal
464
+ 032_S_0677.nii Normal
465
+ 100_S_0035.nii Normal
466
+ 094_S_0526.nii Normal
467
+ 109_S_0777.nii AD
468
+ 018_S_0425.nii Normal
469
+ 136_S_0426.nii AD
470
+ 005_S_5038.nii AD
471
+ 116_S_4043.nii Normal
472
+ 099_S_0533.nii Normal
473
+ 041_S_4037.nii Normal
474
+ 099_S_4086.nii Normal
475
+ 007_S_0068.nii Normal
476
+ 067_S_0257.nii Normal
477
+ 094_S_1402.nii AD
478
+ 033_S_4177.nii Normal
479
+ 033_S_0516.nii Normal
480
+ 082_S_4339.nii Normal
481
+ 099_S_0372.nii AD
482
+ 128_S_0216.nii AD
483
+ 013_S_0592.nii AD
484
+ 141_S_1137.nii AD
485
+ 033_S_0733.nii AD
486
+ 094_S_4460.nii Normal
487
+ 033_S_0923.nii Normal
488
+ 137_S_4587.nii Normal
489
+ 067_S_0059.nii Normal
490
+ 016_S_4638.nii Normal
491
+ 067_S_4728.nii AD
492
+ 033_S_0920.nii Normal
493
+ 127_S_4500.nii AD
494
+ 018_S_0286.nii AD
495
+ 014_S_0357.nii AD
496
+ 137_S_4520.nii Normal
497
+ 023_S_0926.nii Normal
498
+ 128_S_4772.nii AD
499
+ 137_S_0459.nii Normal
500
+ 007_S_5196.nii AD
501
+ 073_S_4393.nii Normal
502
+ 067_S_0076.nii AD
503
+ 041_S_4427.nii Normal
504
+ 073_S_4762.nii Normal
505
+ 011_S_0005.nii Normal
506
+ 123_S_0072.nii Normal
507
+ 020_S_0883.nii Normal
508
+ 082_S_0640.nii Normal
509
+ 127_S_4198.nii Normal
510
+ 127_S_0260.nii Normal
511
+ 027_S_0074.nii Normal
512
+ 041_S_4509.nii Normal
513
+ 024_S_4084.nii Normal
514
+ 153_S_4172.nii AD
515
+ 068_S_1191.nii Normal
516
+ 032_S_1101.nii AD
517
+ 020_S_0213.nii AD
518
+ 032_S_4429.nii Normal
519
+ 094_S_4459.nii Normal
train_2classes.txt ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 011_S_0005.nii Normal
2
+ 022_S_0014.nii Normal
3
+ 127_S_1382.nii AD
4
+ 023_S_1262.nii AD
5
+ 068_S_0210.nii Normal
6
+ 067_S_0110.nii AD
7
+ 116_S_0657.nii Normal
8
+ 123_S_0113.nii Normal
9
+ 094_S_1102.nii AD
10
+ 003_S_0981.nii Normal
11
+ 114_S_0228.nii AD
12
+ 941_S_1197.nii Normal
13
+ 022_S_0066.nii Normal
14
+ 024_S_1063.nii Normal
15
+ 137_S_1041.nii AD
16
+ 067_S_1185.nii AD
17
+ 035_S_0555.nii Normal
18
+ 068_S_1191.nii Normal
19
+ 116_S_0360.nii Normal
20
+ 033_S_1281.nii AD
21
+ 099_S_0352.nii Normal
22
+ 032_S_0677.nii Normal
23
+ 094_S_0692.nii Normal
24
+ 011_S_0002.nii Normal
25
+ 126_S_0606.nii AD
26
+ 141_S_0696.nii AD
27
+ 131_S_0691.nii AD
28
+ 023_S_0084.nii AD
29
+ 136_S_0299.nii AD
30
+ 023_S_0093.nii AD
31
+ 012_S_1009.nii Normal
32
+ 141_S_0717.nii Normal
33
+ 941_S_1203.nii Normal
34
+ 057_S_1373.nii AD
35
+ 016_S_0991.nii AD
36
+ 128_S_0167.nii AD
37
+ 094_S_0489.nii Normal
38
+ 127_S_0622.nii Normal
39
+ 116_S_1083.nii AD
40
+ 032_S_0147.nii AD
41
+ 018_S_0425.nii Normal
42
+ 128_S_0740.nii AD
43
+ 031_S_0618.nii Normal
44
+ 068_S_0109.nii AD
45
+ 141_S_0853.nii AD
46
+ 073_S_0089.nii Normal
47
+ 137_S_0459.nii Normal
48
+ 067_S_0056.nii Normal
49
+ 014_S_0357.nii AD
50
+ 098_S_0884.nii AD
51
+ 033_S_0516.nii Normal
52
+ 024_S_0985.nii Normal
53
+ 128_S_0310.nii AD
54
+ 024_S_1171.nii AD
55
+ 141_S_0767.nii Normal
56
+ 100_S_1286.nii Normal
57
+ 012_S_0637.nii Normal
58
+ 133_S_0525.nii Normal
59
+ 133_S_1170.nii AD
60
+ 041_S_1002.nii Normal
61
+ 014_S_0328.nii AD
62
+ 018_S_0369.nii Normal
63
+ 029_S_1056.nii AD
64
+ 033_S_1087.nii AD
65
+ 073_S_0386.nii Normal
66
+ 007_S_1222.nii Normal
67
+ 024_S_1307.nii AD
68
+ 032_S_1101.nii AD
69
+ 116_S_0487.nii AD
70
+ 137_S_0972.nii Normal
71
+ 057_S_1379.nii AD
72
+ 011_S_0008.nii Normal
73
+ 012_S_0803.nii AD
74
+ 013_S_0575.nii Normal
75
+ 136_S_0186.nii Normal
76
+ 014_S_0548.nii Normal
77
+ 037_S_0467.nii Normal
78
+ 021_S_0647.nii Normal
79
+ 127_S_0431.nii AD
80
+ 014_S_0519.nii Normal
81
+ 041_S_1391.nii AD
82
+ 029_S_0866.nii Normal
83
+ 033_S_1283.nii AD
84
+ 029_S_1184.nii AD
85
+ 094_S_1164.nii AD
86
+ 020_S_0213.nii AD
87
+ 021_S_1109.nii AD
88
+ 005_S_0221.nii AD
89
+ 114_S_0416.nii Normal
90
+ 006_S_0653.nii AD
91
+ 005_S_0929.nii AD
92
+ 036_S_0672.nii Normal
93
+ 082_S_1256.nii Normal
94
+ 022_S_0130.nii Normal
95
+ 128_S_1430.nii AD
96
+ 005_S_0814.nii AD
97
+ 012_S_0720.nii AD
98
+ 016_S_0359.nii Normal
99
+ 021_S_0642.nii AD
100
+ 057_S_0818.nii Normal
101
+ 062_S_0730.nii AD
102
+ 068_S_0473.nii Normal
103
+ 136_S_0184.nii Normal
104
+ 035_S_0341.nii AD
105
+ 100_S_1062.nii AD
106
+ 123_S_0298.nii Normal
107
+ 022_S_0096.nii Normal
108
+ 067_S_0177.nii Normal
109
+ 109_S_0777.nii AD
110
+ 035_S_0156.nii Normal
111
+ 014_S_0520.nii Normal
112
+ 128_S_0517.nii AD
113
+ 011_S_0016.nii Normal
114
+ 141_S_1094.nii Normal
115
+ 136_S_0426.nii AD
116
+ 128_S_0266.nii AD
117
+ 057_S_0934.nii Normal
118
+ 035_S_0048.nii Normal
119
+ 023_S_0081.nii Normal
120
+ 002_S_0685.nii Normal
121
+ 116_S_1232.nii Normal
122
+ 131_S_0457.nii AD
123
+ 062_S_0793.nii AD
124
+ 027_S_0074.nii Normal
125
+ 133_S_0493.nii Normal
126
+ 057_S_0474.nii AD
127
+ 136_S_0194.nii AD
128
+ 131_S_0497.nii AD
129
+ 062_S_0690.nii AD
130
+ 023_S_0926.nii Normal
131
+ 011_S_0023.nii Normal
132
+ 062_S_0768.nii Normal
133
+ 941_S_1202.nii Normal
134
+ 005_S_0602.nii Normal
135
+ 032_S_1169.nii Normal
136
+ 137_S_0686.nii Normal
137
+ 053_S_1044.nii AD
138
+ 005_S_0610.nii Normal
139
+ 128_S_0522.nii Normal
140
+ 941_S_1195.nii Normal
141
+ 032_S_1037.nii AD
142
+ 007_S_1339.nii AD
143
+ 123_S_0162.nii AD
144
+ 029_S_0845.nii Normal
145
+ 036_S_0577.nii AD
146
+ 003_S_0907.nii Normal
147
+ 033_S_0741.nii Normal
148
+ 099_S_0534.nii Normal
149
+ 073_S_0311.nii Normal
150
+ 082_S_0304.nii Normal
151
+ 027_S_1254.nii AD
152
+ 011_S_0022.nii Normal
153
+ 003_S_0931.nii Normal
154
+ 020_S_0883.nii Normal
155
+ 052_S_1250.nii Normal
156
+ 100_S_0035.nii Normal
157
+ 033_S_0888.nii AD
158
+ 010_S_0420.nii Normal
159
+ 127_S_0754.nii AD
160
+ 099_S_0492.nii AD
161
+ 013_S_0592.nii AD
162
+ 027_S_1081.nii AD
163
+ 116_S_0382.nii Normal
164
+ 009_S_1334.nii AD
165
+ 123_S_0106.nii Normal
166
+ 099_S_0533.nii Normal
167
+ 011_S_0021.nii Normal
168
+ 018_S_0286.nii AD
169
+ 067_S_0076.nii AD
170
+ 029_S_0836.nii AD
171
+ 067_S_0059.nii Normal
172
+ 037_S_0454.nii Normal
173
+ 067_S_0020.nii AD
174
+ 130_S_1290.nii AD
175
+ 094_S_1402.nii AD
176
+ 002_S_0619.nii AD
177
+ 067_S_0257.nii Normal
178
+ 072_S_0315.nii Normal
179
+ 036_S_0576.nii Normal
180
+ 018_S_0682.nii AD
181
+ 033_S_1308.nii AD
182
+ 100_S_0893.nii AD
183
+ 068_S_0127.nii Normal
184
+ 114_S_0166.nii Normal
185
+ 041_S_0125.nii Normal
186
+ 141_S_1137.nii AD
187
+ 067_S_0029.nii AD
188
+ 033_S_0733.nii AD
189
+ 012_S_0689.nii AD
190
+ 098_S_0149.nii AD
191
+ 013_S_1161.nii AD
192
+ 098_S_0171.nii Normal
193
+ 141_S_0726.nii Normal
194
+ 128_S_0500.nii Normal
195
+ 128_S_0216.nii AD
196
+ 007_S_1248.nii AD
197
+ 021_S_0159.nii Normal
198
+ 022_S_0007.nii AD
199
+ 023_S_0139.nii AD
200
+ 121_S_0953.nii AD
201
+ 033_S_0920.nii Normal
202
+ 099_S_0040.nii Normal
203
+ 062_S_0535.nii AD
204
+ 051_S_1296.nii AD
205
+ 011_S_0003.nii AD
206
+ 041_S_1435.nii AD
207
+ 020_S_1288.nii Normal
208
+ 023_S_0058.nii Normal
209
+ 127_S_0259.nii Normal
210
+ 127_S_0844.nii AD
211
+ 141_S_1152.nii AD
212
+ 036_S_0813.nii Normal
213
+ 067_S_1253.nii AD
214
+ 073_S_0312.nii Normal
215
+ 002_S_0295.nii Normal
216
+ 011_S_0183.nii AD
217
+ 036_S_1023.nii Normal
218
+ 036_S_0760.nii AD
219
+ 128_S_0528.nii AD
220
+ 123_S_0072.nii Normal
221
+ 123_S_0088.nii AD
222
+ 067_S_0812.nii AD
223
+ 109_S_1014.nii Normal
224
+ 023_S_0963.nii Normal
225
+ 014_S_0558.nii Normal
226
+ 051_S_1123.nii Normal
227
+ 057_S_1371.nii AD
228
+ 007_S_1206.nii Normal
229
+ 141_S_0790.nii AD
230
+ 052_S_1251.nii Normal
231
+ 137_S_0438.nii AD
232
+ 005_S_0553.nii Normal
233
+ 002_S_1280.nii Normal
234
+ 126_S_0784.nii AD
235
+ 137_S_0366.nii AD
236
+ 007_S_0316.nii AD
237
+ 011_S_0010.nii AD
238
+ 100_S_0047.nii Normal
239
+ 002_S_1261.nii Normal
240
+ 005_S_0223.nii Normal
241
+ 062_S_1099.nii Normal
242
+ 018_S_0277.nii AD
243
+ 010_S_0472.nii Normal
244
+ 020_S_0097.nii Normal
245
+ 014_S_1095.nii AD
246
+ 109_S_1192.nii AD
247
+ 094_S_0526.nii Normal
248
+ 131_S_0319.nii Normal
249
+ 021_S_0337.nii Normal
250
+ 126_S_0605.nii Normal
251
+ 007_S_0068.nii Normal
252
+ 013_S_0502.nii Normal
253
+ 029_S_0999.nii AD
254
+ 027_S_0404.nii AD
255
+ 027_S_0850.nii AD
256
+ 023_S_0031.nii Normal
257
+ 116_S_1249.nii Normal
258
+ 041_S_1368.nii AD
259
+ 099_S_0372.nii AD
260
+ 007_S_1304.nii AD
261
+ 041_S_0262.nii Normal
262
+ 137_S_0283.nii Normal
263
+ 023_S_1289.nii AD
264
+ 133_S_1055.nii AD
265
+ 031_S_1209.nii AD
266
+ 029_S_0843.nii Normal
267
+ 094_S_1267.nii Normal
268
+ 116_S_0370.nii AD
269
+ 033_S_0923.nii Normal
270
+ 022_S_0129.nii AD
271
+ 130_S_0232.nii Normal
272
+ 036_S_1001.nii AD
273
+ 082_S_1377.nii AD
274
+ 133_S_0433.nii Normal
275
+ 021_S_0343.nii AD
276
+ 023_S_0916.nii AD
277
+ 128_S_0245.nii Normal
278
+ 003_S_1257.nii AD
279
+ 016_S_0538.nii Normal
280
+ 036_S_0759.nii AD
281
+ 006_S_0681.nii Normal
282
+ 067_S_0024.nii Normal
283
+ 037_S_0327.nii Normal
284
+ 031_S_0321.nii AD
285
+ 037_S_0303.nii Normal
286
+ 009_S_0862.nii Normal
287
+ 002_S_0413.nii Normal
288
+ 127_S_0260.nii Normal
289
+ 018_S_0335.nii AD
290
+ 002_S_0559.nii Normal
291
+ 128_S_1242.nii Normal
292
+ 130_S_0956.nii AD
293
+ 013_S_1205.nii AD
294
+ 067_S_0828.nii AD
295
+ 067_S_0019.nii Normal
296
+ 003_S_1021.nii Normal
297
+ 009_S_0751.nii Normal
298
+ 006_S_0498.nii Normal
299
+ 062_S_0578.nii Normal
300
+ 116_S_0648.nii Normal
301
+ 023_S_1306.nii Normal
302
+ 013_S_1276.nii Normal
303
+ 018_S_0043.nii Normal
304
+ 116_S_0392.nii AD
305
+ 010_S_0419.nii Normal
306
+ 126_S_0506.nii Normal
307
+ 130_S_0969.nii Normal
308
+ 023_S_1190.nii Normal
309
+ 052_S_0951.nii Normal
310
+ 141_S_0340.nii AD
311
+ 128_S_0805.nii AD
312
+ 012_S_0712.nii AD
313
+ 129_S_0778.nii Normal
314
+ 031_S_0554.nii AD
315
+ 137_S_0841.nii AD
316
+ 018_S_0633.nii AD
317
+ 033_S_1285.nii AD
318
+ 099_S_1144.nii AD
319
+ 010_S_0829.nii AD
320
+ 033_S_1016.nii Normal
321
+ 128_S_0545.nii Normal
322
+ 037_S_0627.nii AD
323
+ 130_S_1201.nii AD
324
+ 022_S_0219.nii AD
325
+ 002_S_0938.nii AD
326
+ 014_S_0356.nii AD
327
+ 082_S_0640.nii Normal
328
+ 130_S_1337.nii AD
validation_2C_new.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 012_S_1133.nii Normal
2
+ 021_S_4421.nii Normal
3
+ 029_S_4652.nii Normal
4
+ 100_S_0743.nii AD
5
+ 126_S_4494.nii AD
6
+ 007_S_4488.nii Normal
7
+ 010_S_0786.nii AD
8
+ 136_S_0086.nii Normal
9
+ 128_S_4792.nii AD
10
+ 127_S_4940.nii AD
11
+ 009_S_1354.nii AD
12
+ 137_S_4672.nii AD
13
+ 024_S_4158.nii Normal
14
+ 109_S_1013.nii Normal
15
+ 016_S_1263.nii AD
16
+ 021_S_0984.nii Normal
17
+ 070_S_4692.nii AD
18
+ 068_S_5146.nii AD
19
+ 131_S_0436.nii Normal
20
+ 011_S_4949.nii AD
21
+ 002_S_0955.nii AD
22
+ 029_S_0824.nii Normal
23
+ 082_S_4090.nii Normal
24
+ 014_S_4401.nii Normal
25
+ 073_S_1207.nii AD
26
+ 082_S_0363.nii Normal
27
+ 033_S_1098.nii Normal
28
+ 010_S_0067.nii Normal
29
+ 114_S_0173.nii Normal
30
+ 153_S_4372.nii Normal
31
+ 094_S_1090.nii AD
32
+ 094_S_1027.nii AD
33
+ 027_S_1385.nii AD
34
+ 109_S_0876.nii Normal
35
+ 094_S_1397.nii AD
36
+ 022_S_4320.nii Normal
37
+ 027_S_0118.nii Normal
38
+ 141_S_1024.nii AD
39
+ 941_S_4100.nii Normal
40
+ 094_S_4503.nii Normal
41
+ 126_S_0891.nii AD
42
+ 027_S_0403.nii Normal
43
+ 131_S_0123.nii Normal
44
+ 136_S_0196.nii Normal
45
+ 094_S_1241.nii Normal
46
+ 126_S_0405.nii Normal
47
+ 100_S_1113.nii AD
48
+ 123_S_0091.nii AD
49
+ 127_S_4843.nii Normal
50
+ 057_S_0643.nii Normal
51
+ 141_S_0852.nii AD
52
+ 136_S_0300.nii AD
53
+ 068_S_4859.nii AD
54
+ 023_S_0083.nii AD
55
+ 020_S_0899.nii Normal
56
+ 016_S_5057.nii AD
57
+ 098_S_4201.nii AD
58
+ 013_S_1035.nii Normal
59
+ 023_S_4448.nii Normal
60
+ 114_S_0979.nii AD
61
+ 094_S_4089.nii AD
62
+ 035_S_4464.nii Normal
63
+ 033_S_0889.nii AD
64
+ 029_S_4384.nii Normal
65
+ 137_S_0301.nii Normal
66
+ 011_S_0053.nii AD
67
+ 041_S_0898.nii Normal
68
+ 002_S_1018.nii AD
69
+ 131_S_0441.nii Normal
70
+ 032_S_0400.nii AD
71
+ 003_S_5165.nii AD
72
+ 082_S_1079.nii AD
73
+ 126_S_1221.nii AD
74
+ 109_S_0840.nii Normal
75
+ 016_S_5251.nii AD
76
+ 013_S_0996.nii AD
77
+ 127_S_0684.nii Normal
78
+ 116_S_4625.nii AD
79
+ 133_S_0488.nii Normal
80
+ 082_S_5184.nii AD
81
+ 100_S_0015.nii Normal
82
+ 023_S_4164.nii Normal
83
+ 016_S_4963.nii AD
84
+ 941_S_4292.nii Normal
85
+ 033_S_0739.nii AD
86
+ 141_S_0810.nii Normal
87
+ 031_S_0773.nii AD
88
+ 094_S_4234.nii Normal
89
+ 033_S_4176.nii Normal
90
+ 073_S_0565.nii AD
91
+ 002_S_0816.nii AD
92
+ 007_S_0070.nii Normal
93
+ 016_S_4097.nii Normal
94
+ 941_S_1194.nii Normal
95
+ 100_S_0069.nii Normal
96
+ 027_S_1082.nii AD
97
+ 027_S_4802.nii AD
98
+ 130_S_0886.nii Normal
99
+ 098_S_0896.nii Normal
100
+ 032_S_0479.nii Normal