Upload 26 files
Browse files- Model_S_h1_m2/Model_Evaluation/Read_me.m +11 -0
- Model_S_h1_m2/Model_Evaluation/Run_Evaluation_Confusion_Matrix.m +54 -0
- Model_S_h1_m2/Model_Evaluation/Run_Evaluation_Model_Individual.m +92 -0
- Model_S_h1_m2/Model_Evaluation/Run_Evaluation_main_Model.m +242 -0
- Model_S_h1_m2/Model_Evaluation/Run_Evaluation_post_Model.m +40 -0
- Model_S_h1_m2/Model_Evaluation/fun_ANN_Model.m +77 -0
- Model_S_h1_m2/Model_Evaluation/fun_activation.m +21 -0
- Model_S_h1_m2/Model_Evaluation/fun_confusion_matrix.m +30 -0
- Model_S_h1_m2/Model_Evaluation/fun_majority_rule_prediction.m +74 -0
- Model_S_h1_m2/Model_Evaluation/fun_predicted_vector_2_label.m +36 -0
- Model_S_h1_m2/Model_Evaluation/fun_proc_batch.m +18 -0
- Model_S_h1_m2/Model_Evaluation/fun_proc_batch_update.m +19 -0
- Model_S_h1_m2/Model_Evaluation/fun_save_model_performance.m +7 -0
- Model_S_h1_m2/Model_Evaluation/fun_softmax.m +39 -0
- Model_S_h1_m2/Model_Evaluation/fun_top_n_label_rate.m +11 -0
- Model_S_h1_m2/Model_Evaluation/fun_transform_data_rgbfeatures.m +107 -0
- Model_S_h1_m2/Training_Evaluation/Run_Training_Evaluation.m +11 -0
- Model_S_h1_m2/Training_Evaluation/S_h1_m2_1_performance.mat +0 -0
- Model_S_h1_m2/Training_Evaluation/fun_activation.m +21 -0
- Model_S_h1_m2/Training_Evaluation/fun_predicted_vector_2_label.m +36 -0
- Model_S_h1_m2/Training_Evaluation/fun_primecolor_2_features.m +42 -0
- Model_S_h1_m2/Training_Evaluation/fun_save_result.m +8 -0
- Model_S_h1_m2/Training_Evaluation/fun_softmax.m +39 -0
- Model_S_h1_m2/Training_Evaluation/fun_training_evaulation.m +98 -0
- Model_S_h1_m2/Training_Evaluation/fun_transform_data_rgbfeatures.m +91 -0
- Model_S_h1_m2/Training_Evaluation/read_me.m +12 -0
Model_S_h1_m2/Model_Evaluation/Read_me.m
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
%
|
2 |
+
% Run these two files first:
|
3 |
+
% Run_Evaluation_main_Model.m
|
4 |
+
% only for once for model='GDT,'SGD'
|
5 |
+
% These files can be run anytime afterwards
|
6 |
+
% -- Run_Evaluation_post_Model.m
|
7 |
+
% -- Run_Evaluation_Confusion_Matrix.m
|
8 |
+
% These files can be run at anytime:
|
9 |
+
% -- Run_Evaluation_Model_individual.m
|
10 |
+
%
|
11 |
+
|
Model_S_h1_m2/Model_Evaluation/Run_Evaluation_Confusion_Matrix.m
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
%clear all
|
2 |
+
%%%%% Find positive rate by confusion matrixes
|
3 |
+
%
|
4 |
+
model='S_h1_m2'; % or 'ANN'
|
5 |
+
nmb_of_lab=1000;
|
6 |
+
nmb_of_batches=10;
|
7 |
+
nmb_of_ft_models=6;
|
8 |
+
nmb_of_image_set=zeros(1,10);
|
9 |
+
pr_set=zeros(1,10);
|
10 |
+
model_accuracy_comparison=zeros(1,nmb_of_ft_models);
|
11 |
+
pm.nmb_of_lab=nmb_of_lab;
|
12 |
+
edges=1:1:(nmb_of_lab+1);
|
13 |
+
top_n_labels=10;
|
14 |
+
|
15 |
+
confusion_matrixes=zeros(nmb_of_lab,nmb_of_lab,nmb_of_batches,nmb_of_ft_models);
|
16 |
+
model_c_matrixes=zeros(nmb_of_lab,nmb_of_lab,nmb_of_ft_models);
|
17 |
+
top_100_pr_by_batch=zeros(nmb_of_batches,nmb_of_ft_models); % 100 postive rate
|
18 |
+
top_100_pr_by_model=zeros(1,nmb_of_ft_models); % 100 postive rate
|
19 |
+
batch_nmbs=zeros(1,nmb_of_batches);
|
20 |
+
for fs=1:nmb_of_ft_models
|
21 |
+
top_100_accnt=[];
|
22 |
+
c_matrix=zeros(nmb_of_lab,nmb_of_lab);
|
23 |
+
for imgnt1kdataset=1:nmb_of_batches
|
24 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Accuracy/training_data_batch_%d_feature_module_performance_%s_var.mat', imgnt1kdataset,model);
|
25 |
+
aa=sprintf('classification_data_%d',fs);
|
26 |
+
bb=load(reportname1,aa);
|
27 |
+
c_data=bb.(aa);
|
28 |
+
true_lab=c_data(:,1);
|
29 |
+
pred_lab=c_data(:,2);
|
30 |
+
nmb_of_data=length(true_lab);
|
31 |
+
c_mtx_output=fun_confusion_matrix(true_lab,pred_lab,pm);
|
32 |
+
bb=c_mtx_output.conf_matrix;
|
33 |
+
confusion_matrixes(:,:,imgnt1kdataset,fs)=bb;
|
34 |
+
c_matrix=c_matrix+nmb_of_data*bb;
|
35 |
+
top_100_pr_by_batch(imgnt1kdataset,fs)=length(c_mtx_output.lab_100);
|
36 |
+
top_100_accnt=[top_100_accnt,c_mtx_output.lab_100];
|
37 |
+
batch_nmbs(imgnt1kdataset)=nmb_of_data;
|
38 |
+
end
|
39 |
+
model_c_matrixes(:,:,fs)=c_matrix/sum(batch_nmbs);
|
40 |
+
histN = histcounts(top_100_accnt,edges);
|
41 |
+
idx=(histN==nmb_of_batches);
|
42 |
+
top_100_pr_by_model(fs)=sum(1*idx);
|
43 |
+
end
|
44 |
+
mean(top_100_pr_by_batch,1) % average 100-rate by data batch
|
45 |
+
top_100_pr_by_model % 100-rate for the feature-aggreated models.
|
46 |
+
%%
|
47 |
+
fs=6; % 1 to 6
|
48 |
+
[rates,labs]=fun_top_n_label_rate(model_c_matrixes(:,:,fs),top_n_labels);
|
49 |
+
[rates,labs]'
|
50 |
+
|
51 |
+
imgnt1kdataset=1; %1 to 10 by data batch
|
52 |
+
[rates,labs]=fun_top_n_label_rate(confusion_matrixes(:,:,imgnt1kdataset,fs),259);
|
53 |
+
[rates,labs]'
|
54 |
+
%%
|
Model_S_h1_m2/Model_Evaluation/Run_Evaluation_Model_Individual.m
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
% clear all
|
2 |
+
model='S_h1_m2';
|
3 |
+
nmb_of_hidden_layers=1;
|
4 |
+
% model_type='GDT'; % or "SGD'
|
5 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
6 |
+
%%%%% Choose any combination to run %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
7 |
+
imgnt1kdataset=2; % in [1,10]
|
8 |
+
image_batch=1:100; % randomly choose in [1, nmb_of_images]
|
9 |
+
|
10 |
+
reportname1 = sprintf('/work/mathbiology/lheath2/data/imagenet1k/mat/train_data_batch_%d.mat', imgnt1kdataset);
|
11 |
+
% or choose validation data
|
12 |
+
% reportname1 = sprintf('/work/mathbiology/lheath2/data/imagenet1k/mat/val_data.mat');
|
13 |
+
% image_batch=1:100;
|
14 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
15 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
16 |
+
|
17 |
+
%%%%%%% Input Model Parameters
|
18 |
+
nmb_of_modules=40;
|
19 |
+
nmb_of_module_subsets=2;
|
20 |
+
nmb_of_labs_per_module=25;
|
21 |
+
cross_entropy=1;
|
22 |
+
channels_names={'R','G','B','RGg1','RBg1','GBg1','RGg2','RBg2','GBg2','RB','RG','GB','eRGB','BW','X','Y','Z'};
|
23 |
+
feature_RGB=[1 0 0
|
24 |
+
0 1 0
|
25 |
+
0 0 1
|
26 |
+
0.618 0.382 0
|
27 |
+
0.618 0 0.382
|
28 |
+
0 0.618 0.382
|
29 |
+
0.382 0.618 0
|
30 |
+
0.382 0 0.618
|
31 |
+
0 0.382 0.618
|
32 |
+
0.5 0.5 0
|
33 |
+
0.5 0 0.5
|
34 |
+
0 0.5 0.5
|
35 |
+
1/3 1/3 1/3
|
36 |
+
0.299 0.587 0.114
|
37 |
+
0.4125 0.3576 0.1804
|
38 |
+
0.2126 0.7152 0.0722
|
39 |
+
0.0193 0.1192 0.9502];
|
40 |
+
nmb_of_colors=length(channels_names);
|
41 |
+
|
42 |
+
param.model=model;
|
43 |
+
param.nmb_of_hidden_layers=nmb_of_hidden_layers;
|
44 |
+
param.image_size=[64,64];
|
45 |
+
param.nmb_of_colors=nmb_of_colors;
|
46 |
+
param.downsizing=2;
|
47 |
+
param.x_trim=1;
|
48 |
+
param.y_trim=1;
|
49 |
+
param.compute_decimal_place=4;
|
50 |
+
param.dwnsz_on=1;
|
51 |
+
|
52 |
+
param.patch=0;
|
53 |
+
param.nmb_of_modules=nmb_of_modules;
|
54 |
+
param.nmb_of_module_subsets=nmb_of_module_subsets;
|
55 |
+
param.channels_names=channels_names;
|
56 |
+
param.cross_entropy=cross_entropy;
|
57 |
+
param.nmb_of_labs_per_module=nmb_of_labs_per_module;
|
58 |
+
param.feature_RGB=feature_RGB;
|
59 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
60 |
+
|
61 |
+
tic
|
62 |
+
data_load=load(reportname1,'data','labels');
|
63 |
+
|
64 |
+
input_image=data_load.data(image_batch,:); % Input images to the trained model
|
65 |
+
true_label=double(data_load.labels(image_batch));
|
66 |
+
t_img=fun_transform_data_rgbfeatures(input_image,param); % Transform the input
|
67 |
+
image=t_img.transformed_image;
|
68 |
+
mpout=fun_ANN_Model(image,param); % Output of the model's ANN
|
69 |
+
|
70 |
+
toc
|
71 |
+
%%
|
72 |
+
% Choose any combination for 'param.channel_sel' to build
|
73 |
+
% the ala-carte, feature-aggregated model's accuracy rate
|
74 |
+
%
|
75 |
+
|
76 |
+
param.channel_sel=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17];
|
77 |
+
% param.channel_sel=[1,2,3,4,5,6,7,8,9];
|
78 |
+
% param.channel_sel=[1,2,3];
|
79 |
+
|
80 |
+
[pred_label,likelyhood]=fun_majority_rule_prediction(mpout,param); % Output of the model
|
81 |
+
|
82 |
+
nmb_of_images=length(true_label);
|
83 |
+
idx=(abs(true_label-pred_label)==0);
|
84 |
+
aa=sum(1*idx);
|
85 |
+
pr=aa/nmb_of_images*100 % Possitive Rate, or accuracy of the model
|
86 |
+
%%
|
87 |
+
% Top-1 rate
|
88 |
+
bb=likelyhood(idx);
|
89 |
+
cc=length(bb);
|
90 |
+
idx1=(bb==length(param.channel_sel));
|
91 |
+
aa=sum(idx1*1);
|
92 |
+
top_1=aa/cc*100
|
Model_S_h1_m2/Model_Evaluation/Run_Evaluation_main_Model.m
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
%%%%%%%%%%% Model Evaluation %%%%%%%%%%%
|
2 |
+
% The model is defined by its parameters W and b are saved in folder:
|
3 |
+
% ../Model_Parameter
|
4 |
+
%
|
5 |
+
% Choose model = 'any_name'.
|
6 |
+
%
|
7 |
+
% Step 3 can take a long time.
|
8 |
+
%
|
9 |
+
% The last part/section of Step 4 can be run anytime once this script
|
10 |
+
% is executed.
|
11 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
12 |
+
|
13 |
+
%%%%%%% Step 1: Create two folders: ../Evaluation_Data/Model_Performance
|
14 |
+
% ../Evaluation_Data/Model_Accuracy
|
15 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
16 |
+
|
17 |
+
%%%%%%% Step 2: Initialize storage for performance result, only onetime.
|
18 |
+
%
|
19 |
+
model='S_h1_m2'; %'ANN' % for no distinction
|
20 |
+
nmb_of_hidden_layers=1;
|
21 |
+
|
22 |
+
nmb_of_data_batches=10;
|
23 |
+
nmb_of_modules=40;
|
24 |
+
nmb_of_module_subsets=2;
|
25 |
+
nmb_of_feature_processes=6;
|
26 |
+
nmb_of_feature_sel=3;
|
27 |
+
nmb_of_features=17;
|
28 |
+
|
29 |
+
for imgnt1kdataset=1:nmb_of_data_batches
|
30 |
+
reportname1 = sprintf('/work/mathbiology/lheath2/data/imagenet1k/mat/train_data_batch_%d.mat', imgnt1kdataset);
|
31 |
+
data_load=load(reportname1,'labels');
|
32 |
+
nmb_of_images=length(data_load.labels);
|
33 |
+
aa=mod(nmb_of_features,nmb_of_feature_sel);
|
34 |
+
for feature=1:nmb_of_feature_processes
|
35 |
+
if feature<nmb_of_feature_processes
|
36 |
+
nmb_of_sel_features=nmb_of_feature_sel;
|
37 |
+
else
|
38 |
+
if aa==0
|
39 |
+
nmb_of_sel_features=nmb_of_feature_sel;
|
40 |
+
else
|
41 |
+
nmb_of_sel_features=aa;
|
42 |
+
end
|
43 |
+
end
|
44 |
+
training_performance=zeros(2,nmb_of_modules,nmb_of_module_subsets,nmb_of_sel_features,nmb_of_images);
|
45 |
+
image0_perf=1;
|
46 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Performance/training_performance_batch_%d_feature_%d_performance_%s_var.mat',...
|
47 |
+
imgnt1kdataset, feature, model);
|
48 |
+
str=struct('training_performance',training_performance,'image0_perf',image0_perf);
|
49 |
+
save(reportname1,"-fromstruct",str);
|
50 |
+
end
|
51 |
+
end
|
52 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
53 |
+
%%
|
54 |
+
%%%%%%% Step 3: Obtain the output of the model and the input, i.e., the training images
|
55 |
+
% This part can be repeadely run until it is completed
|
56 |
+
patch=0;
|
57 |
+
nmb_of_labs_per_module=25;
|
58 |
+
cross_entropy=1;
|
59 |
+
|
60 |
+
param.model=model;
|
61 |
+
param.nmb_of_hidden_layers=nmb_of_hidden_layers;
|
62 |
+
|
63 |
+
param.image_size=[64,64];
|
64 |
+
param.downsizing=2;
|
65 |
+
param.x_trim=1;
|
66 |
+
param.y_trim=1;
|
67 |
+
param.compute_decimal_place=4;
|
68 |
+
param.dwnsz_on=1;
|
69 |
+
|
70 |
+
param.patch=patch;
|
71 |
+
param.nmb_of_modules=nmb_of_modules;
|
72 |
+
param.nmb_of_module_subsets=nmb_of_module_subsets;
|
73 |
+
|
74 |
+
param.cross_entropy=cross_entropy;
|
75 |
+
param.nmb_of_labs_per_module=nmb_of_labs_per_module;
|
76 |
+
param.channel_sel=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17];
|
77 |
+
nmb_of_features=length(param.channel_sel);
|
78 |
+
nmb_of_batches=100;
|
79 |
+
|
80 |
+
param.channels_names=[];
|
81 |
+
param.nmb_of_colors=0;
|
82 |
+
param.feature_RGB=[];
|
83 |
+
|
84 |
+
parfor imgnt1kdataset=1:nmb_of_data_batches
|
85 |
+
channels_names={'R','G','B','RGg1','RBg1','GBg1','RGg2','RBg2','GBg2','RB','RG','GB','eRGB','BW','X','Y','Z'};
|
86 |
+
feature_RGB=[1 0 0
|
87 |
+
0 1 0
|
88 |
+
0 0 1
|
89 |
+
0.618 0.382 0
|
90 |
+
0.618 0 0.382
|
91 |
+
0 0.618 0.382
|
92 |
+
0.382 0.618 0
|
93 |
+
0.382 0 0.618
|
94 |
+
0 0.382 0.618
|
95 |
+
0.5 0.5 0
|
96 |
+
0.5 0 0.5
|
97 |
+
0 0.5 0.5
|
98 |
+
1/3 1/3 1/3
|
99 |
+
0.299 0.587 0.114
|
100 |
+
0.4125 0.3576 0.1804
|
101 |
+
0.2126 0.7152 0.0722
|
102 |
+
0.0193 0.1192 0.9502];
|
103 |
+
pm=param;
|
104 |
+
%%%%%%%%%%% Model's input, i.e., the training images
|
105 |
+
reportname1 = sprintf('/work/mathbiology/lheath2/data/imagenet1k/mat/train_data_batch_%d.mat', imgnt1kdataset);
|
106 |
+
data_load=load(reportname1,'data');
|
107 |
+
[nmb_of_images,~]=size(data_load.data);
|
108 |
+
for feature=1:nmb_of_feature_processes
|
109 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
110 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Performance/training_performance_batch_%d_feature_%d_performance_%s_var.mat',...
|
111 |
+
imgnt1kdataset, feature, model);
|
112 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
113 |
+
saved_load=load(reportname1);
|
114 |
+
training_performance=saved_load.training_performance;
|
115 |
+
% training_performance=zeros(2,nmb_of_modules,nmb_of_module_subsets,nmb_of_colors,nmb_of_images);
|
116 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
117 |
+
image0_perf=saved_load.image0_perf;
|
118 |
+
if feature<nmb_of_feature_processes
|
119 |
+
feature_seq=(1:nmb_of_feature_sel)+(feature-1)*nmb_of_feature_sel;
|
120 |
+
else
|
121 |
+
feature_seq=(1+(feature-1)*nmb_of_feature_sel):nmb_of_features;
|
122 |
+
end
|
123 |
+
pm.channels_names=channels_names(feature_seq);
|
124 |
+
pm.nmb_of_colors=length(pm.channels_names);
|
125 |
+
pm.feature_RGB=feature_RGB(feature_seq,:);
|
126 |
+
if image0_perf<nmb_of_images
|
127 |
+
for batch_nmb=1:nmb_of_batches
|
128 |
+
image_batch=fun_proc_batch(nmb_of_images,nmb_of_batches,batch_nmb,image0_perf);
|
129 |
+
if ~isempty(image_batch)
|
130 |
+
inputimage=data_load.data(image_batch,:);
|
131 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
132 |
+
output=fun_transform_data_rgbfeatures(inputimage,pm);
|
133 |
+
image=output.transformed_image;
|
134 |
+
batch_training_performance=fun_ANN_Model(image,pm);
|
135 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
136 |
+
training_performance(:,:,:,:,image_batch)=batch_training_performance;
|
137 |
+
image0_perf=image_batch(end);
|
138 |
+
end
|
139 |
+
[imgnt1kdataset, feature, batch_nmb]
|
140 |
+
out=fun_save_model_performance(model,imgnt1kdataset,feature,...
|
141 |
+
training_performance,image0_perf);
|
142 |
+
end
|
143 |
+
end
|
144 |
+
fprintf('Model %s imgnt1kdataset = %d, pf = %d\n',model,imgnt1kdataset,feature);
|
145 |
+
end
|
146 |
+
end
|
147 |
+
%%%%%%%%%%%%%%%%%
|
148 |
+
% Combine the performances of all features for the aggregated model
|
149 |
+
%
|
150 |
+
for imgnt1kdataset=1:nmb_of_data_batches
|
151 |
+
aa=[];
|
152 |
+
for feature=1:nmb_of_feature_processes
|
153 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
154 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Performance/training_performance_batch_%d_feature_%d_performance_%s_var.mat',...
|
155 |
+
imgnt1kdataset, feature, model);
|
156 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
157 |
+
saved_load=load(reportname1);
|
158 |
+
bb=saved_load.training_performance;
|
159 |
+
aa=cat(4,aa,bb);
|
160 |
+
end
|
161 |
+
training_performance=aa;
|
162 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Accuracy/training_data_batch_%d_feature_module_performance_%s_var.mat',...
|
163 |
+
imgnt1kdataset, model);
|
164 |
+
save(reportname1,'training_performance','-v7.3');
|
165 |
+
end
|
166 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
167 |
+
%%
|
168 |
+
%%%%%%% Step 4: Obtain the accuracy of the model
|
169 |
+
%
|
170 |
+
%%%%%%% Obtain the output of the model, i.e., the label prediction
|
171 |
+
% of the input images
|
172 |
+
nmb_of_batches=12;
|
173 |
+
%%%%%%%%%%%%%% Key Input %%%%%%%%%%%%%
|
174 |
+
% five different feature-aggregates:
|
175 |
+
feature_sel=[1 2 3 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
176 |
+
1 2 3 4 5 6 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
|
177 |
+
1 2 3 4 5 6 7 8 9 NaN NaN NaN NaN NaN NaN NaN NaN
|
178 |
+
1 2 3 4 5 6 7 8 9 10 11 12 NaN NaN NaN NaN NaN
|
179 |
+
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 NaN NaN
|
180 |
+
1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17];
|
181 |
+
nmb_of_ft_models=length(feature_sel(:,1));
|
182 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
183 |
+
for fs=1:nmb_of_ft_models
|
184 |
+
aa=feature_sel(fs,:);
|
185 |
+
idx=~isnan(aa);
|
186 |
+
param.channel_sel=aa(idx);
|
187 |
+
for imgnt1kdataset=1:nmb_of_data_batches
|
188 |
+
reportname1 = sprintf('/work/mathbiology/lheath2/data/imagenet1k/mat/train_data_batch_%d.mat', imgnt1kdataset);
|
189 |
+
data_load=load(reportname1,'labels');
|
190 |
+
nmb_of_images=length(data_load.labels);
|
191 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
192 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Accuracy/training_data_batch_%d_feature_module_performance_%s_var.mat',...
|
193 |
+
imgnt1kdataset, model);
|
194 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
195 |
+
saved_load=load(reportname1);
|
196 |
+
training_performance=saved_load.training_performance;
|
197 |
+
classification_data=zeros(nmb_of_images,4);
|
198 |
+
for batch_nmb=1:nmb_of_batches
|
199 |
+
image_batch=fun_proc_batch_update(nmb_of_images,nmb_of_batches,batch_nmb);
|
200 |
+
if ~isempty(image_batch)
|
201 |
+
true_label=double(data_load.labels(image_batch));
|
202 |
+
training_performance_temp=training_performance(:,:,:,:,image_batch);
|
203 |
+
[predicted_label,likelyhood]=fun_majority_rule_prediction(training_performance_temp,param);
|
204 |
+
top_1_majority=length(param.channel_sel);
|
205 |
+
classification_data(image_batch,:)=[true_label;predicted_label;likelyhood;0*likelyhood+top_1_majority]';
|
206 |
+
end
|
207 |
+
end
|
208 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Accuracy/training_data_batch_%d_feature_module_performance_%s_var.mat', imgnt1kdataset,model);
|
209 |
+
aa=sprintf('classification_data_%d',fs);
|
210 |
+
str=struct(aa,classification_data);
|
211 |
+
save(reportname1,"-fromstruct",str,'-append');
|
212 |
+
end
|
213 |
+
fprintf('Model %s feature = %d\n',model, fs);
|
214 |
+
end
|
215 |
+
%%
|
216 |
+
%%%%%%%%% Find the accuracy rates for featured models
|
217 |
+
% This part can be run anytime once the main scripts above are executed.
|
218 |
+
%
|
219 |
+
% % nmb_of_ft_models=5;
|
220 |
+
% % nmb_of_image_set=zeros(1,10);
|
221 |
+
% % pr_set=zeros(1,10);
|
222 |
+
% % model_accuracy_comparison=zeros(1,nmb_of_ft_models);
|
223 |
+
% % for fs=1:nmb_of_ft_models
|
224 |
+
% % for imgnt1kdataset=1:10
|
225 |
+
% % reportname1 = sprintf('../Evaluation_Data/Model_Accuracy_1/training_data_batch_%d_feature_module_performance_%s_var.mat', imgnt1kdataset,model);
|
226 |
+
% % aa=sprintf('classification_data_%d',fs);
|
227 |
+
% % bb=load(reportname1,aa);
|
228 |
+
% % c_data=bb.(aa);
|
229 |
+
% % true_lab=c_data(:,1);
|
230 |
+
% % pred_lab=c_data(:,2);
|
231 |
+
% % nmb_of_images=length(true_lab);
|
232 |
+
% % nmb_of_image_set(imgnt1kdataset)=nmb_of_images;
|
233 |
+
% % idx=(abs(true_lab-pred_lab)==0);
|
234 |
+
% % aa=sum(1*idx);
|
235 |
+
% % pr=aa/nmb_of_images*100;
|
236 |
+
% % pr_set(imgnt1kdataset)=pr;
|
237 |
+
% % end
|
238 |
+
% % model_accuracy_comparison(fs)=nmb_of_image_set*pr_set'/sum(nmb_of_image_set);
|
239 |
+
% % end
|
240 |
+
% % %%%% This shows the accuracy rates for the five featured models:
|
241 |
+
% % model_accuracy_comparison
|
242 |
+
%%
|
Model_S_h1_m2/Model_Evaluation/Run_Evaluation_post_Model.m
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
%%%%%%%% Find the accuracy rates for featured models
|
2 |
+
% This part can be run anytime once the main scripts above are executed.
|
3 |
+
%
|
4 |
+
model='S_h1_m2';% or 'ANN'
|
5 |
+
nmb_of_ft_models=6;
|
6 |
+
nmb_of_image_set=zeros(1,10);
|
7 |
+
pr_set=zeros(1,10);
|
8 |
+
top_1_set=zeros(1,10);
|
9 |
+
model_accuracy_comparison=zeros(2,nmb_of_ft_models);
|
10 |
+
for fs=1:nmb_of_ft_models
|
11 |
+
for imgnt1kdataset=1:10
|
12 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Accuracy/training_data_batch_%d_feature_module_performance_%s_var.mat',...
|
13 |
+
imgnt1kdataset, model);
|
14 |
+
aa=sprintf('classification_data_%d',fs);
|
15 |
+
bb=load(reportname1,aa);
|
16 |
+
c_data=bb.(aa);
|
17 |
+
true_lab=c_data(:,1);
|
18 |
+
pred_lab=c_data(:,2);
|
19 |
+
likelyhood=c_data(:,3);
|
20 |
+
top_1_majority=c_data(1,3);
|
21 |
+
nmb_of_images=length(true_lab);
|
22 |
+
nmb_of_image_set(imgnt1kdataset)=nmb_of_images;
|
23 |
+
idx=(abs(true_lab-pred_lab)==0);
|
24 |
+
aa=sum(1*idx);
|
25 |
+
pr=aa/nmb_of_images*100;
|
26 |
+
pr_set(imgnt1kdataset)=pr;
|
27 |
+
%%%%%%%% Top-1 rate %%%%%%%%%%%
|
28 |
+
bb=likelyhood(idx);
|
29 |
+
cc=length(bb);
|
30 |
+
idx1=(bb==top_1_majority);
|
31 |
+
aa=sum(idx1*1);
|
32 |
+
top_1=aa/cc*100;
|
33 |
+
top_1_set(imgnt1kdataset)=top_1;
|
34 |
+
end
|
35 |
+
model_accuracy_comparison(1,fs)=nmb_of_image_set*pr_set'/sum(nmb_of_image_set);
|
36 |
+
model_accuracy_comparison(2,fs)=nmb_of_image_set*top_1_set'/sum(nmb_of_image_set);
|
37 |
+
end
|
38 |
+
%%%% This shows the accuracy rates for the multiple featured models
|
39 |
+
% together with the Top-1 rate:
|
40 |
+
model_accuracy_comparison
|
Model_S_h1_m2/Model_Evaluation/fun_ANN_Model.m
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function batch_training_performance=fun_ANN_Model(image,param)
|
2 |
+
%
|
3 |
+
% model=param.model;
|
4 |
+
nmb_of_hidden_layers=param.nmb_of_hidden_layers;
|
5 |
+
|
6 |
+
patch=param.patch;
|
7 |
+
nmb_of_modules=param.nmb_of_modules;
|
8 |
+
nmb_of_module_subsets=param.nmb_of_module_subsets;
|
9 |
+
nmb_of_colors=param.nmb_of_colors;
|
10 |
+
channels_names=param.channels_names;
|
11 |
+
% cross_entropy=param.cross_entropy;
|
12 |
+
nmb_of_labs_per_module=param.nmb_of_labs_per_module;
|
13 |
+
sz=size(image);
|
14 |
+
if length(sz)==2
|
15 |
+
nmb_of_data=1;
|
16 |
+
else
|
17 |
+
nmb_of_data=sz(2);
|
18 |
+
end
|
19 |
+
|
20 |
+
if nmb_of_data>1
|
21 |
+
batch_training_performance=zeros(2,nmb_of_data,nmb_of_modules,nmb_of_module_subsets,nmb_of_colors);
|
22 |
+
else
|
23 |
+
batch_training_performance=zeros(2,nmb_of_modules,nmb_of_module_subsets,nmb_of_colors);
|
24 |
+
end
|
25 |
+
for module=1:nmb_of_modules
|
26 |
+
for subset=1:nmb_of_module_subsets
|
27 |
+
for color=1:nmb_of_colors
|
28 |
+
reportname1 = sprintf('../Model_Parameter/Trained_Parameter_patch_%d_module_%d_subset_%d_ch_%s.mat',...
|
29 |
+
patch, module, subset, char(channels_names(color)));
|
30 |
+
% str=struct('W', W, 'b', b);
|
31 |
+
temp_load=load(reportname1);
|
32 |
+
W=temp_load.W;
|
33 |
+
b=temp_load.b;
|
34 |
+
% Assign the trained parameters
|
35 |
+
%
|
36 |
+
W1=W.LayerName1;
|
37 |
+
W2=W.LayerName2;
|
38 |
+
b1=b.LayerName1;
|
39 |
+
b2=b.LayerName2;
|
40 |
+
|
41 |
+
if nmb_of_data>1
|
42 |
+
a_0=image(:,:,color);
|
43 |
+
else
|
44 |
+
a_0=image(:,color);
|
45 |
+
end
|
46 |
+
nmb_of_labels=length(b2);
|
47 |
+
z1=W1*a_0+b1;
|
48 |
+
[a1,~]=fun_activation(z1);
|
49 |
+
|
50 |
+
z2=W2*a1+b2;
|
51 |
+
if nmb_of_hidden_layers==1
|
52 |
+
[a2,~]=fun_softmax(z2);
|
53 |
+
predicted_vector=a2;
|
54 |
+
else
|
55 |
+
W3=W.LayerName3;
|
56 |
+
b3=b.LayerName3;
|
57 |
+
nmb_of_labels=length(b3);
|
58 |
+
[a2,~]=fun_activation(z2);
|
59 |
+
|
60 |
+
z3=W3*a2+b3;
|
61 |
+
[a3,~]=fun_softmax(z3);
|
62 |
+
predicted_vector=a3;
|
63 |
+
end
|
64 |
+
prediction=fun_predicted_vector_2_label(predicted_vector,nmb_of_labels);
|
65 |
+
abs_label=prediction.label+(module-1)*nmb_of_labs_per_module;
|
66 |
+
if nmb_of_data>1
|
67 |
+
batch_training_performance(:,:,module,subset,color)=[abs_label;prediction.distance];
|
68 |
+
else
|
69 |
+
batch_training_performance(:,module,subset,color)=[abs_label;prediction.distance];
|
70 |
+
end
|
71 |
+
end
|
72 |
+
end
|
73 |
+
end
|
74 |
+
if nmb_of_data>1
|
75 |
+
batch_training_performance=permute(batch_training_performance,[1,3,4,5,2]);%
|
76 |
+
end
|
77 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_activation.m
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function [out1,out2]=fun_activation(x)
|
2 |
+
%
|
3 |
+
%
|
4 |
+
%
|
5 |
+
[sz1,sz2]=size(x);
|
6 |
+
A=zeros(sz1,sz2);
|
7 |
+
B=zeros(sz1,sz1,sz2);
|
8 |
+
for zz=1:sz2
|
9 |
+
[A(:,zz),B(:,:,zz)]=ReLU(x(:,zz));
|
10 |
+
end
|
11 |
+
|
12 |
+
out1=A;
|
13 |
+
out2=B;
|
14 |
+
%%%%%%%%
|
15 |
+
function [y,dy]=ReLU(s)
|
16 |
+
aa=(s>0).*1;
|
17 |
+
y=aa.*s;
|
18 |
+
dy=diag(aa);
|
19 |
+
end
|
20 |
+
%%%%%%%%
|
21 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_confusion_matrix.m
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function output=fun_confusion_matrix(true_lab,pred_lab,pm)
|
2 |
+
%
|
3 |
+
nmb_of_images=length(true_lab);
|
4 |
+
nmb_of_lab=pm.nmb_of_lab;
|
5 |
+
% top_n=pm.top_n;
|
6 |
+
edges=1:1:(nmb_of_lab+1);
|
7 |
+
c_matrix=zeros(nmb_of_lab,nmb_of_lab);
|
8 |
+
seq_lab=1:nmb_of_lab;
|
9 |
+
% top_n_seq=1:top_n;
|
10 |
+
p_seq=(1:nmb_of_images)';
|
11 |
+
for lb=1:nmb_of_lab
|
12 |
+
idx1=(true_lab==lb);
|
13 |
+
aa=p_seq(idx1);
|
14 |
+
nmb=length(aa);
|
15 |
+
aa=pred_lab(aa);
|
16 |
+
[N,~]=histcounts(aa,edges);
|
17 |
+
rt=N/nmb*100;
|
18 |
+
c_matrix(lb,:)=rt;
|
19 |
+
end
|
20 |
+
aa=diag(c_matrix);
|
21 |
+
% [~,cc]=sort(aa);
|
22 |
+
idx100=(aa==100);
|
23 |
+
output.lab_100=seq_lab(idx100);
|
24 |
+
% idx=flip(cc);
|
25 |
+
% c_matrix=c_matrix(idx, idx);
|
26 |
+
output.conf_matrix=c_matrix;
|
27 |
+
% top_n_lb=idx(top_n_seq);
|
28 |
+
% top_n_rt=diag(c_matrix(top_n_seq,top_n_seq));
|
29 |
+
% output.top_n_inf=[top_n_rt,top_n_lb];
|
30 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_majority_rule_prediction.m
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function [predicted_label,likelyhood]=fun_majority_rule_prediction(training_performance_temp,param)
|
2 |
+
% training_performance_temp=mpout;
|
3 |
+
% batch_training_performance([lab;dis],batch_of_data,module,subset,color)
|
4 |
+
% batch_training_performance(:,:,5,2,3)=[[1 2 0];[3 4 -1]];
|
5 |
+
var_on=1;%1; %0 for minmax, 1 for variance, which is the best; range-2nd best
|
6 |
+
% channels_names={'R','G','B','BW','RGB'};
|
7 |
+
% channel_sel=[1,2,3,4,5];
|
8 |
+
% channel_sel=[1,2,3];
|
9 |
+
% batch_training_performance=permute(training_performance,[1,5,2,3,4]);
|
10 |
+
channel_sel=param.channel_sel;
|
11 |
+
|
12 |
+
sz=size(training_performance_temp);
|
13 |
+
if length(sz)==5
|
14 |
+
batch_training_performance=permute(training_performance_temp,[1,5,2,3,4]);
|
15 |
+
aa=squeeze(batch_training_performance(:,:,:,1,channel_sel)); % to ([lab;dis],batch_of_data,module,color)
|
16 |
+
bb=squeeze(batch_training_performance(:,:,:,2,channel_sel));
|
17 |
+
cc=cat(3,aa,bb); % cat in module dimension to ([lab;dis],batch_of_data,module,color)
|
18 |
+
trnpf=permute(cc,[3,4,2,1]); % to (module,color,batch_of_data,[lab,dis])
|
19 |
+
% AB=squeeze(range(trnpf(:,:,:,1),2)); % to (module,batch_of_data) in labels
|
20 |
+
AB=trnpf(:,:,:,1); % to (module,color,batch_of_data) in labels
|
21 |
+
dtsz=sz(5);
|
22 |
+
mdzs=2*sz(2);
|
23 |
+
else
|
24 |
+
batch_training_performance=training_performance_temp;
|
25 |
+
aa=squeeze(batch_training_performance(:,:,1,:)); % to ([lab;dis],module,color)
|
26 |
+
bb=squeeze(batch_training_performance(:,:,2,:));
|
27 |
+
cc=cat(2,aa,bb); % cat in module dimension to ([lab;dis],module,color)
|
28 |
+
trnpf=permute(cc,[2,3,1]); % to (module,color,[lab,dis])
|
29 |
+
% AB=squeeze(range(trnpf(:,:,1),2)); % to (module) in labels
|
30 |
+
AB=trnpf(:,:,1); % to (module,color) in labels
|
31 |
+
dtsz=1;
|
32 |
+
mdzs=2*sz(2);
|
33 |
+
end
|
34 |
+
predicted_label=zeros(1,dtsz);
|
35 |
+
likelyhood=zeros(1,dtsz);
|
36 |
+
seq=1:mdzs;
|
37 |
+
|
38 |
+
channel_rel_seq=1:length(channel_sel);
|
39 |
+
if dtsz>1
|
40 |
+
for m=1:dtsz
|
41 |
+
[~,bb]=mode(squeeze(AB(:,channel_rel_seq,m)),2); %majority rule start
|
42 |
+
aa=max(bb);
|
43 |
+
idx=(bb==aa);
|
44 |
+
seq_0=seq(idx); % majority rule end
|
45 |
+
if var_on==1
|
46 |
+
seq_1=var(trnpf(seq_0,channel_rel_seq,m,2),0,2);
|
47 |
+
else
|
48 |
+
% seq_1=max(trnpf(seq_0,channel_sel,m,2),[],2).*var(trnpf(seq_0,channel_sel,m,2),0,2); %var(trnpf(seq_0,channel_sel,m,2),0,2).*sum(trnpf(seq_0,channel_sel,m,2).^2,2);
|
49 |
+
% seq_1=range(trnpf(seq_0,channel_sel,m,2),2);
|
50 |
+
seq_1=range(trnpf(seq_0,channel_rel_seq,m,2),2).*var(trnpf(seq_0,channel_rel_seq,m,2),0,2);
|
51 |
+
end
|
52 |
+
[~,idx]=min(seq_1);
|
53 |
+
ps=seq_0(idx);
|
54 |
+
predicted_label(m)=trnpf(ps,1,m,1);
|
55 |
+
likelyhood(m)=aa;
|
56 |
+
end
|
57 |
+
else
|
58 |
+
[~,bb]=mode(squeeze(AB(:,channel_rel_seq)),2);
|
59 |
+
aa=max(bb);
|
60 |
+
idx=(bb==aa);
|
61 |
+
seq_0=seq(idx);
|
62 |
+
if var_on==1
|
63 |
+
seq_1=var(trnpf(seq_0,channel_rel_seq,2),0,2);
|
64 |
+
else
|
65 |
+
% seq_1=max(trnpf(seq_0,channel_sel,m,2),[],2).*var(trnpf(seq_0,channel_sel,m,2),0,2); %var(trnpf(seq_0,channel_sel,m,2),0,2).*sum(trnpf(seq_0,channel_sel,m,2).^2,2);
|
66 |
+
% seq_1=range(trnpf(seq_0,channel_sel,m,2),2);
|
67 |
+
seq_1=range(trnpf(seq_0,channel_rel_seq,2),2).*var(trnpf(seq_0,channel_rel_seq,2),0,2);
|
68 |
+
end
|
69 |
+
[~,idx]=min(seq_1);
|
70 |
+
ps=seq_0(idx);
|
71 |
+
predicted_label=trnpf(ps,1,1);
|
72 |
+
likelyhood=aa;
|
73 |
+
end
|
74 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_predicted_vector_2_label.m
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function out=fun_predicted_vector_2_label(vector,nmb_of_labels)
|
2 |
+
%
|
3 |
+
% predicted vector of length 25 to digit label 1, ..., 25
|
4 |
+
% vector=predicted_vector;
|
5 |
+
[~,dtsz]=size(vector);
|
6 |
+
digit=zeros(1,dtsz);
|
7 |
+
distance=zeros(1,dtsz);
|
8 |
+
nn=nmb_of_labels;
|
9 |
+
v=vector(1:nn,:);
|
10 |
+
I=eye(nn);
|
11 |
+
vv=zeros(nn,1);
|
12 |
+
for j=1:dtsz
|
13 |
+
for i=1:nn
|
14 |
+
[cc,~]=loss_function(v(:,j),I(:,i));
|
15 |
+
vv(i)=cc;
|
16 |
+
end
|
17 |
+
[aa,idx]=min(vv);
|
18 |
+
digit(j)=idx;
|
19 |
+
distance(j)=aa;
|
20 |
+
end
|
21 |
+
out.label=digit;
|
22 |
+
out.distance=distance;
|
23 |
+
%%%%%%%%%%
|
24 |
+
function [loss,dloss]=loss_function(predicted_x,true_y)
|
25 |
+
% cross_entropy loss
|
26 |
+
%
|
27 |
+
eps=1e-8;
|
28 |
+
[n, N]=size(true_y);
|
29 |
+
loss=sum(sum((true_y+eps).*log((true_y+eps)./(predicted_x+eps))))/N;
|
30 |
+
% ind_loss=sum((true_y+eps).*log((true_y+eps)./(predicted_x+eps)),1);
|
31 |
+
bb=-(true_y(1:end-1,:)+eps)./(predicted_x(1:end-1,:)+eps);
|
32 |
+
w=bb+ones(n-1,1)*(true_y(end,:)+eps)./(predicted_x(end,:)+eps);
|
33 |
+
dloss=w'/N;
|
34 |
+
end
|
35 |
+
%%%%%%%%%%
|
36 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_proc_batch.m
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function image_batch=fun_proc_batch(nmb_of_images,nmb_of_batches,batch_nmb,image0)
|
2 |
+
%
|
3 |
+
%
|
4 |
+
image_batch=[];
|
5 |
+
nmb_of_batch_inc=floor(nmb_of_images/nmb_of_batches);
|
6 |
+
bstrt=1+(batch_nmb-1)*nmb_of_batch_inc;
|
7 |
+
bend=batch_nmb*nmb_of_batch_inc;
|
8 |
+
if batch_nmb<nmb_of_batches && image0<nmb_of_images
|
9 |
+
if bstrt>=image0 && image0<bend
|
10 |
+
image_batch=image0:bend;
|
11 |
+
end
|
12 |
+
end
|
13 |
+
if batch_nmb==nmb_of_batches && image0<nmb_of_images
|
14 |
+
if bstrt>=image0
|
15 |
+
image_batch=image0:nmb_of_images;
|
16 |
+
end
|
17 |
+
end
|
18 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_proc_batch_update.m
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function image_batch=fun_proc_batch_update(nmb_of_images,nmb_of_batches,batch_nmb)
|
2 |
+
%
|
3 |
+
%
|
4 |
+
image0=1;
|
5 |
+
image_batch=[];
|
6 |
+
nmb_of_batch_inc=floor(nmb_of_images/nmb_of_batches);
|
7 |
+
bstrt=1+(batch_nmb-1)*nmb_of_batch_inc;
|
8 |
+
bend=batch_nmb*nmb_of_batch_inc;
|
9 |
+
if batch_nmb<nmb_of_batches && image0<nmb_of_images
|
10 |
+
if bstrt>=image0 && image0<bend
|
11 |
+
image_batch=bstrt:bend;%image0:bend;
|
12 |
+
end
|
13 |
+
end
|
14 |
+
if batch_nmb==nmb_of_batches && image0<nmb_of_images
|
15 |
+
if bstrt>=image0
|
16 |
+
image_batch=bstrt:nmb_of_images;%image0:nmb_of_images;
|
17 |
+
end
|
18 |
+
end
|
19 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_save_model_performance.m
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function out=fun_save_model_performance(model,imgnt1kdataset,pf,...
|
2 |
+
training_performance,image0_perf)
|
3 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Performance/training_performance_batch_%d_feature_%d_performance_%s_var.mat', imgnt1kdataset,pf,model);
|
4 |
+
str=struct('training_performance',training_performance,'image0_perf',image0_perf);
|
5 |
+
save(reportname1,"-fromstruct",str);
|
6 |
+
out=[];
|
7 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_softmax.m
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function [out1,out2]=fun_softmax(S)
|
2 |
+
%
|
3 |
+
% Column-wise normaliztion
|
4 |
+
%
|
5 |
+
[sz1,sz2]=size(S);
|
6 |
+
A=zeros(sz1,sz2);
|
7 |
+
B=zeros(sz1-1,sz1,sz2);
|
8 |
+
for zz=1:sz2
|
9 |
+
[aaa,bbb]=softmax(S(:,zz));
|
10 |
+
A(:,zz)=aaa;
|
11 |
+
B(:,:,zz)=bbb;
|
12 |
+
end
|
13 |
+
out1=A;
|
14 |
+
out2=B;
|
15 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
16 |
+
function [y,dy]=softmax(S)
|
17 |
+
% bb=1e+2;
|
18 |
+
bb=1;
|
19 |
+
S=S/bb;
|
20 |
+
aa=exp(S);
|
21 |
+
sm=sum(exp(S))+1e-16;
|
22 |
+
y=aa./sm;
|
23 |
+
|
24 |
+
[n,~]=size(S);
|
25 |
+
m=n-1;
|
26 |
+
dy=zeros(m,n);
|
27 |
+
for i=1:m
|
28 |
+
for j=1:n
|
29 |
+
if j~=i
|
30 |
+
dy(i,j)=-exp(S(i)).*exp(S(j));
|
31 |
+
else
|
32 |
+
dy(i,j)=exp(S(i)).*sm-exp(S(i)).*exp(S(j));
|
33 |
+
end
|
34 |
+
end
|
35 |
+
end
|
36 |
+
dy=dy/sm^2/bb;
|
37 |
+
end
|
38 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
39 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_top_n_label_rate.m
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function [rates,labs]=fun_top_n_label_rate(c_mtrx,top_n)
|
2 |
+
%
|
3 |
+
aa=c_mtrx;
|
4 |
+
bb=diag(aa);
|
5 |
+
[~,cc]=sort(bb);
|
6 |
+
cc=flip(cc);
|
7 |
+
dd=bb(cc);
|
8 |
+
seq=1:top_n;
|
9 |
+
rates=round(dd(seq),4);
|
10 |
+
labs=round(cc(seq),0);
|
11 |
+
end
|
Model_S_h1_m2/Model_Evaluation/fun_transform_data_rgbfeatures.m
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function output=fun_transform_data_rgbfeatures(img_data,param)
|
2 |
+
%
|
3 |
+
%img_data=data_load.data;
|
4 |
+
output.transformed_image=[];
|
5 |
+
output.mnsv=[];
|
6 |
+
output.maxsv=[];
|
7 |
+
output.ipvsz=[];
|
8 |
+
|
9 |
+
image_size=param.image_size;
|
10 |
+
img_x_dim=image_size(1);
|
11 |
+
img_y_dim=image_size(1);
|
12 |
+
% nmb_of_colors=3;%param.nmb_of_colors; %=5;
|
13 |
+
% nmb_of_colors=length(param.channel_sel);
|
14 |
+
|
15 |
+
compute_decimal_place=param.compute_decimal_place;
|
16 |
+
feature_RGB=param.feature_RGB;
|
17 |
+
[nmb_of_colors,~]=size(feature_RGB);
|
18 |
+
|
19 |
+
[data_size,~]=size(img_data);
|
20 |
+
reshaped_images=zeros(data_size,img_x_dim,img_y_dim,3);
|
21 |
+
for m=1:data_size
|
22 |
+
aa=double(img_data(m,:));
|
23 |
+
reshaped_images(m,:,:,:)=reshape(aa,img_x_dim,img_y_dim,3);
|
24 |
+
end
|
25 |
+
|
26 |
+
if param.dwnsz_on==1
|
27 |
+
x_trim=param.x_trim;
|
28 |
+
y_trim=param.y_trim;
|
29 |
+
downsizing=param.downsizing;
|
30 |
+
x_dwnsz_dim=fix(img_x_dim/downsizing);
|
31 |
+
y_dwnsz_dim=fix(img_y_dim/downsizing);
|
32 |
+
|
33 |
+
ipvsz=(x_dwnsz_dim-2*x_trim)*(y_dwnsz_dim-2*y_trim);
|
34 |
+
xsq=1:downsizing;
|
35 |
+
ysq=1:downsizing;
|
36 |
+
output.transformed_image=zeros(ipvsz,data_size,nmb_of_colors);
|
37 |
+
output.mnsv=zeros(data_size,nmb_of_colors);
|
38 |
+
output.maxsv=zeros(data_size,nmb_of_colors);
|
39 |
+
output.ipvsz=ipvsz;
|
40 |
+
|
41 |
+
temp_img=zeros(ipvsz,3);
|
42 |
+
for m=1:data_size
|
43 |
+
img=squeeze(reshaped_images(m,:,:,:));
|
44 |
+
for color=1:3
|
45 |
+
img1=squeeze(img(:,:,color));
|
46 |
+
aa=zeros(ipvsz,1);
|
47 |
+
for ii=(1+x_trim):(x_dwnsz_dim-x_trim)
|
48 |
+
for jj=(1+y_trim):(y_dwnsz_dim-y_trim)
|
49 |
+
aa((jj-y_trim)+((ii-x_trim)-1)*(y_dwnsz_dim-2*y_trim),1)=mean(img1(xsq+2*(ii-1),ysq+2*(jj-1)),'all');
|
50 |
+
end
|
51 |
+
end
|
52 |
+
temp_img(:,color)=aa;
|
53 |
+
% mnsv=mean(aa);
|
54 |
+
% aa=aa-mnsv;
|
55 |
+
% maxsv=max(abs(aa));
|
56 |
+
% output.transformed_image(:,m,color)=round(aa/maxsv*10^compute_decimal_place)*10^(-compute_decimal_place);
|
57 |
+
% output.mnsv(m,color)=mnsv;
|
58 |
+
% output.maxsv(m,color)=maxsv;
|
59 |
+
end
|
60 |
+
for color=1:nmb_of_colors
|
61 |
+
c1=feature_RGB(color,1);
|
62 |
+
c2=feature_RGB(color,2);
|
63 |
+
c3=feature_RGB(color,3);
|
64 |
+
aa=c1*temp_img(:,1)+c2*temp_img(:,2)+c3*temp_img(:,3);
|
65 |
+
mnsv=mean(aa);
|
66 |
+
aa=aa-mnsv;
|
67 |
+
maxsv=max(abs(aa));
|
68 |
+
output.transformed_image(:,m,color)=round(aa/maxsv*10^compute_decimal_place)*10^(-compute_decimal_place);
|
69 |
+
output.mnsv(m,color)=mnsv;
|
70 |
+
output.maxsv(m,color)=maxsv;
|
71 |
+
end
|
72 |
+
end
|
73 |
+
else
|
74 |
+
temp_img=zeros(img_x_dim*img_y_dim,3);
|
75 |
+
for m=1:data_size
|
76 |
+
img=squeeze(reshaped_images(m,:,:,:));
|
77 |
+
for color=1:3
|
78 |
+
aa=img(:,:,color);
|
79 |
+
aa=aa(:);
|
80 |
+
temp_img(:,color)=aa;
|
81 |
+
% mnsv=mean(aa);
|
82 |
+
% aa=aa-mnsv;
|
83 |
+
% maxsv=max(abs(aa));
|
84 |
+
% output.transformed_image(:,m,color)=round(aa/maxsv*10^compute_decimal_place)*10^(-compute_decimal_place);
|
85 |
+
% output.mnsv(m,color)=mnsv;
|
86 |
+
% output.maxsv(m,color)=maxsv;
|
87 |
+
end
|
88 |
+
for color=1:nmb_of_colors
|
89 |
+
c1=feature_RGB(color,1);
|
90 |
+
c2=feature_RGB(color,2);
|
91 |
+
c3=feature_RGB(color,3);
|
92 |
+
aa=c1*temp_img(:,1)+c2*temp_img(:,2)+c3*temp_img(:,3);
|
93 |
+
mnsv=mean(aa);
|
94 |
+
aa=aa-mnsv;
|
95 |
+
maxsv=max(abs(aa));
|
96 |
+
output.transformed_image(:,m,color)=round(aa/maxsv*10^compute_decimal_place)*10^(-compute_decimal_place);
|
97 |
+
output.mnsv(m,color)=mnsv;
|
98 |
+
output.maxsv(m,color)=maxsv;
|
99 |
+
end
|
100 |
+
end
|
101 |
+
end
|
102 |
+
if data_size==1
|
103 |
+
output.transformed_image=squeeze(output.transformed_image);
|
104 |
+
% output.mnsv(m,:,color)=squeeze(output.mnsv(m,:,color));
|
105 |
+
% output.maxsv(m,:,color)=squeeze(output.maxsv(m,:,color));
|
106 |
+
end
|
107 |
+
end
|
Model_S_h1_m2/Training_Evaluation/Run_Training_Evaluation.m
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
% clear all
|
2 |
+
%
|
3 |
+
model='S_h1_m2';
|
4 |
+
nmb_of_hidden_layers=1;
|
5 |
+
|
6 |
+
fun_training_evaulation(model,nmb_of_hidden_layers)
|
7 |
+
%%
|
8 |
+
% rtmax=max(pstvrt_model,[],'all')
|
9 |
+
% rtmin=min(pstvrt_model,[],'all')
|
10 |
+
% rtmed=median(pstvrt_model(:),'all')
|
11 |
+
% rtmean=mean(pstvrt_model(:),'all')
|
Model_S_h1_m2/Training_Evaluation/S_h1_m2_1_performance.mat
ADDED
Binary file (6.3 kB). View file
|
|
Model_S_h1_m2/Training_Evaluation/fun_activation.m
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function [out1,out2]=fun_activation(x)
|
2 |
+
%
|
3 |
+
%
|
4 |
+
%
|
5 |
+
[sz1,sz2]=size(x);
|
6 |
+
A=zeros(sz1,sz2);
|
7 |
+
B=zeros(sz1,sz1,sz2);
|
8 |
+
for zz=1:sz2
|
9 |
+
[A(:,zz),B(:,:,zz)]=ReLU(x(:,zz));
|
10 |
+
end
|
11 |
+
|
12 |
+
out1=A;
|
13 |
+
out2=B;
|
14 |
+
%%%%%%%%
|
15 |
+
function [y,dy]=ReLU(s)
|
16 |
+
aa=(s>0).*1;
|
17 |
+
y=aa.*s;
|
18 |
+
dy=diag(aa);
|
19 |
+
end
|
20 |
+
%%%%%%%%
|
21 |
+
end
|
Model_S_h1_m2/Training_Evaluation/fun_predicted_vector_2_label.m
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function out=fun_predicted_vector_2_label(vector,nmb_of_labels)
|
2 |
+
%
|
3 |
+
% predicted vector of length 25 to digit label 1, ..., 25
|
4 |
+
% vector=predicted_vector;
|
5 |
+
[~,dtsz]=size(vector);
|
6 |
+
digit=zeros(1,dtsz);
|
7 |
+
distance=zeros(1,dtsz);
|
8 |
+
nn=nmb_of_labels;
|
9 |
+
v=vector(1:nn,:);
|
10 |
+
I=eye(nn);
|
11 |
+
vv=zeros(nn,1);
|
12 |
+
for j=1:dtsz
|
13 |
+
for i=1:nn
|
14 |
+
[cc,~]=loss_function(v(:,j),I(:,i));
|
15 |
+
vv(i)=cc;
|
16 |
+
end
|
17 |
+
[aa,idx]=min(vv);
|
18 |
+
digit(j)=idx;
|
19 |
+
distance(j)=aa;
|
20 |
+
end
|
21 |
+
out.label=digit;
|
22 |
+
out.distance=distance;
|
23 |
+
%%%%%%%%%%
|
24 |
+
function [loss,dloss]=loss_function(predicted_x,true_y)
|
25 |
+
% cross_entropy loss
|
26 |
+
%
|
27 |
+
eps=1e-8;
|
28 |
+
[n, N]=size(true_y);
|
29 |
+
loss=sum(sum((true_y+eps).*log((true_y+eps)./(predicted_x+eps))))/N;
|
30 |
+
% ind_loss=sum((true_y+eps).*log((true_y+eps)./(predicted_x+eps)),1);
|
31 |
+
bb=-(true_y(1:end-1,:)+eps)./(predicted_x(1:end-1,:)+eps);
|
32 |
+
w=bb+ones(n-1,1)*(true_y(end,:)+eps)./(predicted_x(end,:)+eps);
|
33 |
+
dloss=w'/N;
|
34 |
+
end
|
35 |
+
%%%%%%%%%%
|
36 |
+
end
|
Model_S_h1_m2/Training_Evaluation/fun_primecolor_2_features.m
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function images_bw=fun_primecolor_2_features(images,data_param,color,feature_RGB)
|
2 |
+
%
|
3 |
+
%
|
4 |
+
sz=size(images);
|
5 |
+
vone=ones(sz(1),1);
|
6 |
+
ori_images=0*images;
|
7 |
+
% feature_RGB=[1 0 0
|
8 |
+
% 0 1 0
|
9 |
+
% 0 0 1
|
10 |
+
% 0.618 0.382 0
|
11 |
+
% 0.618 0 0.382
|
12 |
+
% 0 0.618 0.382
|
13 |
+
% 0.382 0.618 0
|
14 |
+
% 0.382 0 0.618
|
15 |
+
% 0 0.382 0.618
|
16 |
+
% 0.5 0.5 0
|
17 |
+
% 0.5 0 0.5
|
18 |
+
% 0 0.5 0.5
|
19 |
+
% 1/3 1/3 1/3
|
20 |
+
% 0.2126 0.7152 0.0722
|
21 |
+
% 0.299 0.587 0.114];
|
22 |
+
c1=feature_RGB(color,1);
|
23 |
+
c2=feature_RGB(color,2);
|
24 |
+
c3=feature_RGB(color,3);
|
25 |
+
|
26 |
+
for cl=1:3
|
27 |
+
aa=squeeze(images(:,:,cl));
|
28 |
+
bb=squeeze(data_param.maxsv(:,1,cl));
|
29 |
+
cc=vone*(squeeze(data_param.mnsv(:,1,cl))');
|
30 |
+
ori_images(:,:,cl)=aa*diag(bb)+cc;
|
31 |
+
end
|
32 |
+
if color>=4
|
33 |
+
aa=c1*ori_images(:,:,1)+...
|
34 |
+
c2*ori_images(:,:,2)+c3*ori_images(:,:,3);
|
35 |
+
bb=mean(aa,1);
|
36 |
+
aa=aa-vone*bb;
|
37 |
+
cc=1./max(abs(aa),[],1);
|
38 |
+
images_bw=aa*diag(cc);
|
39 |
+
else
|
40 |
+
images_bw=squeeze(images(:,:,color));
|
41 |
+
end
|
42 |
+
end
|
Model_S_h1_m2/Training_Evaluation/fun_save_result.m
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function out=fun_save_result(model,patch, module, subset,color,channels_names,training_performance)
|
2 |
+
%
|
3 |
+
reportname1 = sprintf('../Evaluation_Data/Model_Performance/Trained_Model_%s_patch_%d_module_%d_subset_%d_ch_%s.mat',...
|
4 |
+
model,patch, module, subset, char(channels_names(color)));
|
5 |
+
% save(reportname1, 'training_performance', '-append');
|
6 |
+
save(reportname1, 'training_performance');
|
7 |
+
out=[];
|
8 |
+
end
|
Model_S_h1_m2/Training_Evaluation/fun_softmax.m
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function [out1,out2]=fun_softmax(S)
|
2 |
+
%
|
3 |
+
% Column-wise normaliztion
|
4 |
+
%
|
5 |
+
[sz1,sz2]=size(S);
|
6 |
+
A=zeros(sz1,sz2);
|
7 |
+
B=zeros(sz1-1,sz1,sz2);
|
8 |
+
for zz=1:sz2
|
9 |
+
[aaa,bbb]=softmax(S(:,zz));
|
10 |
+
A(:,zz)=aaa;
|
11 |
+
B(:,:,zz)=bbb;
|
12 |
+
end
|
13 |
+
out1=A;
|
14 |
+
out2=B;
|
15 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
16 |
+
function [y,dy]=softmax(S)
|
17 |
+
% bb=1e+2;
|
18 |
+
bb=1;
|
19 |
+
S=S/bb;
|
20 |
+
aa=exp(S);
|
21 |
+
sm=sum(exp(S))+1e-16;
|
22 |
+
y=aa./sm;
|
23 |
+
|
24 |
+
[n,~]=size(S);
|
25 |
+
m=n-1;
|
26 |
+
dy=zeros(m,n);
|
27 |
+
for i=1:m
|
28 |
+
for j=1:n
|
29 |
+
if j~=i
|
30 |
+
dy(i,j)=-exp(S(i)).*exp(S(j));
|
31 |
+
else
|
32 |
+
dy(i,j)=exp(S(i)).*sm-exp(S(i)).*exp(S(j));
|
33 |
+
end
|
34 |
+
end
|
35 |
+
end
|
36 |
+
dy=dy/sm^2/bb;
|
37 |
+
end
|
38 |
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
39 |
+
end
|
Model_S_h1_m2/Training_Evaluation/fun_training_evaulation.m
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function out=fun_training_evaulation(model,nmb_of_hidden_layers)
|
2 |
+
%
|
3 |
+
%
|
4 |
+
nmb_of_modules=40;
|
5 |
+
nmb_of_module_subsets=2;
|
6 |
+
|
7 |
+
channels_names={'R','G','B','RGg1','RBg1','GBg1','RGg2','RBg2','GBg2','RB','RG','GB','eRGB','BW','X','Y','Z'};
|
8 |
+
% feature_RGB=[1 0 0
|
9 |
+
% 0 1 0
|
10 |
+
% 0 0 1
|
11 |
+
% 0.618 0.382 0
|
12 |
+
% 0.618 0 0.382
|
13 |
+
% 0 0.618 0.382
|
14 |
+
% 0.382 0.618 0
|
15 |
+
% 0.382 0 0.618
|
16 |
+
% 0 0.382 0.618
|
17 |
+
% 0.5 0.5 0
|
18 |
+
% 0.5 0 0.5
|
19 |
+
% 0 0.5 0.5
|
20 |
+
% 1/3 1/3 1/3
|
21 |
+
% 0.299 0.587 0.114
|
22 |
+
% 0.4125 0.3576 0.1804
|
23 |
+
% 0.2126 0.7152 0.0722
|
24 |
+
% 0.0193 0.1192 0.9502];
|
25 |
+
|
26 |
+
nmb_of_colors=length(channels_names);
|
27 |
+
patch=0;
|
28 |
+
nmb_of_labs_per_module=25;
|
29 |
+
|
30 |
+
pstvrt_model=zeros(nmb_of_colors,nmb_of_modules,nmb_of_module_subsets);
|
31 |
+
%%
|
32 |
+
parfor module=1:nmb_of_modules
|
33 |
+
% pm=param;
|
34 |
+
channels_names={'R','G','B','RGg1','RBg1','GBg1','RGg2','RBg2','GBg2','RB','RG','GB','eRGB','BW','X','Y','Z'};
|
35 |
+
for subset=1:nmb_of_module_subsets
|
36 |
+
reportname1 = sprintf('../../Data_Transformation/Transformed_IN1k_Data/Transformed_Data_for_SGD/train_data_patch_%d_module_%d_subset_%d_for_%d_labels_per_module.mat', ...
|
37 |
+
patch,module,subset,nmb_of_labs_per_module);
|
38 |
+
data_load=load(reportname1);
|
39 |
+
% % reportname1 = sprintf('../%s.mat',model);
|
40 |
+
% % temp_load=load(reportname1);
|
41 |
+
% % model_parameter=temp_load.model_parameters;
|
42 |
+
for color=1:nmb_of_colors
|
43 |
+
reportname1 = sprintf('../Model_Parameter/Trained_Parameter_patch_%d_module_%d_subset_%d_ch_%s.mat',...
|
44 |
+
patch, module, subset, char(channels_names(color)));
|
45 |
+
% str=struct('W', W, 'b', b);
|
46 |
+
temp_load=load(reportname1);
|
47 |
+
W=temp_load.W;
|
48 |
+
b=temp_load.b;
|
49 |
+
% Assign the trained parameters
|
50 |
+
%
|
51 |
+
W1=W.LayerName1;
|
52 |
+
W2=W.LayerName2;
|
53 |
+
b1=b.LayerName1;
|
54 |
+
b2=b.LayerName2;
|
55 |
+
% % W1=model_parameter.W1(:,:,color,subset,module);
|
56 |
+
% % W2=model_parameter.W2(:,:,color,subset,module);
|
57 |
+
% % b1=model_parameter.b1(:,:,color,subset,module);
|
58 |
+
% % b2=model_parameter.b2(:,:,color,subset,module);
|
59 |
+
a_0=data_load.data(:,:,color);
|
60 |
+
true_label=data_load.labels;
|
61 |
+
dtsz=length(true_label);
|
62 |
+
nmb_of_labels=length(b2);
|
63 |
+
z1=W1*a_0+b1;
|
64 |
+
[a1,~]=fun_activation(z1);
|
65 |
+
z2=W2*a1+b2;
|
66 |
+
|
67 |
+
if nmb_of_hidden_layers==1
|
68 |
+
[a2,~]=fun_softmax(z2);
|
69 |
+
predicted_vector=a2;
|
70 |
+
else
|
71 |
+
W3=W.LayerName3;
|
72 |
+
b3=b.LayerName3;
|
73 |
+
nmb_of_labels=length(b3);
|
74 |
+
[a2,~]=fun_activation(z2);
|
75 |
+
|
76 |
+
z3=W3*a2+b3;
|
77 |
+
[a3,~]=fun_softmax(z3);
|
78 |
+
predicted_vector=a3;
|
79 |
+
end
|
80 |
+
prediction=fun_predicted_vector_2_label(predicted_vector,nmb_of_labels);
|
81 |
+
%%
|
82 |
+
% Compute the error and positive rates.
|
83 |
+
%
|
84 |
+
v=abs(true_label-prediction.label);
|
85 |
+
errt=sum(1.*(v>0))/dtsz;
|
86 |
+
pstvrt=(1-errt)*100;
|
87 |
+
pstvrt_model(color,module,subset)=pstvrt;
|
88 |
+
aa=(module-1)*nmb_of_labs_per_module; %data_load.label_table(2,1)-1;
|
89 |
+
training_performance=[true_label+aa;prediction.label+aa;prediction.distance];
|
90 |
+
out=fun_save_result(model,patch, module, subset,color,channels_names,training_performance);
|
91 |
+
end
|
92 |
+
end
|
93 |
+
module
|
94 |
+
end
|
95 |
+
reportname1 = sprintf('%s_1_performance.mat',model);
|
96 |
+
save(reportname1, 'pstvrt_model');
|
97 |
+
out=[];
|
98 |
+
end
|
Model_S_h1_m2/Training_Evaluation/fun_transform_data_rgbfeatures.m
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
function output=fun_transform_data_rgbfeatures(img_data,param)
|
2 |
+
%
|
3 |
+
%img_data=data_load.data;
|
4 |
+
output.transformed_image=[];
|
5 |
+
output.mnsv=[];
|
6 |
+
output.maxsv=[];
|
7 |
+
output.ipvsz=[];
|
8 |
+
|
9 |
+
image_size=param.image_size;
|
10 |
+
img_x_dim=image_size(1);
|
11 |
+
img_y_dim=image_size(1);
|
12 |
+
|
13 |
+
compute_decimal_place=param.compute_decimal_place;
|
14 |
+
feature_RGB=param.feature_RGB;
|
15 |
+
[nmb_of_colors,~]=size(feature_RGB);
|
16 |
+
|
17 |
+
[data_size,~]=size(img_data);
|
18 |
+
reshaped_images=zeros(data_size,img_x_dim,img_y_dim,3);
|
19 |
+
for m=1:data_size
|
20 |
+
aa=double(img_data(m,:));
|
21 |
+
reshaped_images(m,:,:,:)=reshape(aa,img_x_dim,img_y_dim,3);
|
22 |
+
end
|
23 |
+
|
24 |
+
if param.dwnsz_on==1
|
25 |
+
x_trim=param.x_trim;
|
26 |
+
y_trim=param.y_trim;
|
27 |
+
downsizing=param.downsizing;
|
28 |
+
x_dwnsz_dim=fix(img_x_dim/downsizing);
|
29 |
+
y_dwnsz_dim=fix(img_y_dim/downsizing);
|
30 |
+
|
31 |
+
ipvsz=(x_dwnsz_dim-2*x_trim)*(y_dwnsz_dim-2*y_trim);
|
32 |
+
xsq=1:downsizing;
|
33 |
+
ysq=1:downsizing;
|
34 |
+
output.transformed_image=zeros(ipvsz,data_size,nmb_of_colors);
|
35 |
+
output.mnsv=zeros(data_size,nmb_of_colors);
|
36 |
+
output.maxsv=zeros(data_size,nmb_of_colors);
|
37 |
+
output.ipvsz=ipvsz;
|
38 |
+
|
39 |
+
temp_img=zeros(ipvsz,3);
|
40 |
+
for m=1:data_size
|
41 |
+
img=squeeze(reshaped_images(m,:,:,:));
|
42 |
+
for color=1:3
|
43 |
+
img1=squeeze(img(:,:,color));
|
44 |
+
aa=zeros(ipvsz,1);
|
45 |
+
for ii=(1+x_trim):(x_dwnsz_dim-x_trim)
|
46 |
+
for jj=(1+y_trim):(y_dwnsz_dim-y_trim)
|
47 |
+
aa((jj-y_trim)+((ii-x_trim)-1)*(y_dwnsz_dim-2*y_trim),1)=mean(img1(xsq+2*(ii-1),ysq+2*(jj-1)),'all');
|
48 |
+
end
|
49 |
+
end
|
50 |
+
temp_img(:,color)=aa;
|
51 |
+
end
|
52 |
+
for color=1:nmb_of_colors
|
53 |
+
c1=feature_RGB(color,1);
|
54 |
+
c2=feature_RGB(color,2);
|
55 |
+
c3=feature_RGB(color,3);
|
56 |
+
aa=c1*temp_img(:,1)+c2*temp_img(:,2)+c3*temp_img(:,3);
|
57 |
+
mnsv=mean(aa);
|
58 |
+
aa=aa-mnsv;
|
59 |
+
maxsv=max(abs(aa));
|
60 |
+
output.transformed_image(:,m,color)=round(aa/maxsv*10^compute_decimal_place)*10^(-compute_decimal_place);
|
61 |
+
output.mnsv(m,color)=mnsv;
|
62 |
+
output.maxsv(m,color)=maxsv;
|
63 |
+
end
|
64 |
+
end
|
65 |
+
else
|
66 |
+
temp_img=zeros(img_x_dim*img_y_dim,3);
|
67 |
+
for m=1:data_size
|
68 |
+
img=squeeze(reshaped_images(m,:,:,:));
|
69 |
+
for color=1:3
|
70 |
+
aa=img(:,:,color);
|
71 |
+
aa=aa(:);
|
72 |
+
temp_img(:,color)=aa;
|
73 |
+
end
|
74 |
+
for color=1:nmb_of_colors
|
75 |
+
c1=feature_RGB(color,1);
|
76 |
+
c2=feature_RGB(color,2);
|
77 |
+
c3=feature_RGB(color,3);
|
78 |
+
aa=c1*temp_img(:,1)+c2*temp_img(:,2)+c3*temp_img(:,3);
|
79 |
+
mnsv=mean(aa);
|
80 |
+
aa=aa-mnsv;
|
81 |
+
maxsv=max(abs(aa));
|
82 |
+
output.transformed_image(:,m,color)=round(aa/maxsv*10^compute_decimal_place)*10^(-compute_decimal_place);
|
83 |
+
output.mnsv(m,color)=mnsv;
|
84 |
+
output.maxsv(m,color)=maxsv;
|
85 |
+
end
|
86 |
+
end
|
87 |
+
end
|
88 |
+
if data_size==1
|
89 |
+
output.transformed_image=squeeze(output.transformed_image);
|
90 |
+
end
|
91 |
+
end
|
Model_S_h1_m2/Training_Evaluation/read_me.m
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
%
|
2 |
+
% 1. Manually create folder: ../Evaluation_Data/Transformed_Data_for_SGD
|
3 |
+
% 2. Execute
|
4 |
+
% Run_Modularize_Data
|
5 |
+
% in the Data_Transformation folder
|
6 |
+
% to rearrange the training data into modules of labels, and divide
|
7 |
+
% each module into subsets.
|
8 |
+
% (These steps only need to run once.)
|
9 |
+
% 3. Execute
|
10 |
+
% -- Run_Training_Evaluation_model
|
11 |
+
% by choosing the model type, SGD or GDT, from the first line
|
12 |
+
% of the file.
|