/* Multiply a 3x3 matrix with a 3x3 matrix, giving a 3x3 matrix */ void x3f_3x3_3x3_mul(double *a, double *b, double *c) { M00(c) = M00(a)*M00(b) + M01(a)*M10(b) + M02(a)*M20(b); M01(c) = M00(a)*M01(b) + M01(a)*M11(b) + M02(a)*M21(b); M02(c) = M00(a)*M02(b) + M01(a)*M12(b) + M02(a)*M22(b); M10(c) = M10(a)*M00(b) + M11(a)*M10(b) + M12(a)*M20(b); M11(c) = M10(a)*M01(b) + M11(a)*M11(b) + M12(a)*M21(b); M12(c) = M10(a)*M02(b) + M11(a)*M12(b) + M12(a)*M22(b); M20(c) = M20(a)*M00(b) + M21(a)*M10(b) + M22(a)*M20(b); M21(c) = M20(a)*M01(b) + M21(a)*M11(b) + M22(a)*M21(b); M22(c) = M20(a)*M02(b) + M21(a)*M12(b) + M22(a)*M22(b); }
/* Calculate the inverse of a 3x3 matrix http://en.wikipedia.org/wiki/Invertible_matrix */ void x3f_3x3_inverse(double *a, double *ainv) { double A, B, C, D, E, F, G, H, I; double det; A = +(M11(a)*M22(a)-M12(a)*M21(a)); B = -(M10(a)*M22(a)-M12(a)*M20(a)); C = +(M10(a)*M21(a)-M11(a)*M20(a)); D = -(M01(a)*M22(a)-M02(a)*M21(a)); E = +(M00(a)*M22(a)-M02(a)*M20(a)); F = -(M00(a)*M21(a)-M01(a)*M20(a)); G = +(M01(a)*M12(a)-M02(a)*M11(a)); H = -(M00(a)*M12(a)-M02(a)*M10(a)); I = +(M00(a)*M11(a)-M01(a)*M10(a)); det = M00(a)*A + M01(a)*B + M02(a)*C; M00(ainv) = A/det; M01(ainv) = D/det; M02(ainv) = G/det; M10(ainv) = B/det; M11(ainv) = E/det; M12(ainv) = H/det; M20(ainv) = C/det; M21(ainv) = F/det; M22(ainv) = I/det; }
/* Multiply a scalar with a 3x3 matrix, giving a 3x3 matrix */ void x3f_scalar_3x3_mul(double a, double *b, double *c) { M00(c) = a*M00(b); M01(c) = a*M01(b); M02(c) = a*M02(b); M10(c) = a*M10(b); M11(c) = a*M11(b); M12(c) = a*M12(b); M20(c) = a*M20(b); M21(c) = a*M21(b); M22(c) = a*M22(b); }
void x3f_Bradford_D65_to_D50(double *a) { M00(a) = +1.0478112 ; M01(a) = +0.0228866; M02(a) = -0.0501270; M10(a) = +0.0295424 ; M11(a) = +0.9904844; M12(a) = -0.0170491; M20(a) = -0.0092345 ; M21(a) = +0.0150436; M22(a) = +0.7521316; }
/* http://www.brucelindbloom.com/index.html?Eqn_ChromAdapt.html */ void x3f_Bradford_D50_to_D65(double *a) { M00(a) = +0.9555766 ; M01(a) = -0.0230393; M02(a) = +0.0631636; M10(a) = -0.0282895 ; M11(a) = +1.0099416; M12(a) = +0.0210077; M20(a) = +0.0122982 ; M21(a) = -0.0204830; M22(a) = +1.3299098; }
/* http://en.wikipedia.org/wiki/CIE_1931_color_space */ void x3f_CIERGB_to_XYZ(double *a) { M00(a) = 0.49 ; M01(a) = 0.31 ; M02(a) = 0.20 ; M10(a) = 0.17697 ; M11(a) = 0.81240 ; M12(a) = 0.01063 ; M20(a) = 0.00 ; M21(a) = 0.01 ; M22(a) = 0.99 ; }
void x3f_sRGB_to_XYZ(double *a) { M00(a) = 0.4124; M01(a) = 0.3576; M02(a) = 0.1805; M10(a) = 0.2126; M11(a) = 0.7152; M12(a) = 0.0722; M20(a) = 0.0193; M21(a) = 0.1192; M22(a) = 0.9505; }
void x3f_3x3_identity(double *a) { M00(a) = 1.0; M01(a) = 0.0; M02(a) = 0.0; M10(a) = 0.0; M11(a) = 1.0; M12(a) = 0.0; M20(a) = 0.0; M21(a) = 0.0; M22(a) = 1.0; }
void x3f_AdobeRGB_to_XYZ(double *a) { M00(a) = 0.57667; M01(a) = 0.18556; M02(a) = 0.18823; M10(a) = 0.29737; M11(a) = 0.62736; M12(a) = 0.07529; M20(a) = 0.02703; M21(a) = 0.07069; M22(a) = 0.99134; }
/* http://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf */ void x3f_XYZ_to_AdobeRGB(double *a) { M00(a) = +2.04159; M01(a) = -0.56501; M02(a) = -0.34473; M10(a) = -0.96924; M11(a) = +1.87597; M12(a) = +0.04156; M20(a) = +0.01344; M21(a) = -0.11836; M22(a) = +1.01517; }
void x3f_ProPhotoRGB_to_XYZ(double *a) { M00(a) = 0.7977; M01(a) = 0.1352; M02(a) = 0.0313; M10(a) = 0.2880; M11(a) = 0.7119; M12(a) = 0.0001; M20(a) = 0.0000; M21(a) = 0.0000; M22(a) = 0.8249; }
/* http://en.wikipedia.org/wiki/ProPhoto_RGB_color_space */ void x3f_XYZ_to_ProPhotoRGB(double *a) { M00(a) = +1.3460; M01(a) = -0.2556; M02(a) = -0.0511; M10(a) = -0.5446; M11(a) = +1.5082; M12(a) = +0.0205; M20(a) = +0.0000; M21(a) = +0.0000; M22(a) = +1.2123; }
/* Print a 3x3 matrix */ void x3f_3x3_print(x3f_verbosity_t level, double *a) { x3f_printf(level, "%10g %10g %10g\n", M00(a), M01(a), M02(a)); x3f_printf(level, "%10g %10g %10g\n", M10(a), M11(a), M12(a)); x3f_printf(level, "%10g %10g %10g\n", M20(a), M21(a), M22(a)); }
void x3f_3x3_ones(double *a) { M00(a) = 1.0; M01(a) = 1.0; M02(a) = 1.0; M10(a) = 1.0; M11(a) = 1.0; M12(a) = 1.0; M20(a) = 1.0; M21(a) = 1.0; M22(a) = 1.0; }
/* http://en.wikipedia.org/wiki/SRGB */ void x3f_XYZ_to_sRGB(double *a) { M00(a) = +3.2406; M01(a) = -1.5372; M02(a) = -0.4986; M10(a) = -0.9689; M11(a) = +1.8758; M12(a) = +0.0415; M20(a) = +0.0557; M21(a) = -0.2040; M22(a) = +1.0570; }
int main(int argc, char *argv[]){ Params params; std::map<std::string, std::string> args; readArgs(argc, argv, args); if(args.find("algo")!=args.end()){ params.algo = args["algo"]; }else{ params.algo = "qdMCNat"; } if(args.find("inst_file")!=args.end()) setParamsFromFile(args["inst_file"], args, params); else setParams(params.algo, args, params); createLogDir(params.dir_path); gen.seed(params.seed); // Load the dataset MyMatrix X_train, X_valid; VectorXd Y_train, Y_valid; loadMnist(params.ratio_train, X_train, X_valid, Y_train, Y_valid); //loadCIFAR10(params.ratio_train, X_train, X_valid, Y_train, Y_valid); //loadLightCIFAR10(params.ratio_train, X_train, X_valid, Y_train, Y_valid); // ConvNet parameters std::vector<ConvLayerParams> conv_params; ConvLayerParams conv_params1; conv_params1.Hf = 5; conv_params1.stride = 1; conv_params1.n_filter = 20; conv_params1.padding = 0; conv_params.push_back(conv_params1); ConvLayerParams conv_params2; conv_params2.Hf = 5; conv_params2.stride = 1; conv_params2.n_filter = 50; conv_params2.padding = 0; conv_params.push_back(conv_params2); std::vector<PoolLayerParams> pool_params; PoolLayerParams pool_params1; pool_params1.Hf = 2; pool_params1.stride = 2; pool_params.push_back(pool_params1); PoolLayerParams pool_params2; pool_params2.Hf = 2; pool_params2.stride = 2; pool_params.push_back(pool_params2); const unsigned n_conv_layer = conv_params.size(); for(unsigned l = 0; l < conv_params.size(); l++){ if(l==0){ conv_params[l].filter_size = conv_params[l].Hf * conv_params[l].Hf * params.img_depth; conv_params[l].N = (params.img_width - conv_params[l].Hf + 2*conv_params[l].padding)/conv_params[l].stride + 1; } else{ conv_params[l].filter_size = conv_params[l].Hf * conv_params[l].Hf * conv_params[l-1].n_filter; conv_params[l].N = (pool_params[l-1].N - conv_params[l].Hf + 2*conv_params[l].padding)/conv_params[l].stride + 1; } pool_params[l].N = (conv_params[l].N - pool_params[l].Hf)/pool_params[l].stride + 1; } // Neural Network parameters const unsigned n_training = X_train.rows(); const unsigned n_valid = X_valid.rows(); const unsigned n_feature = X_train.cols(); const unsigned n_label = Y_train.maxCoeff() + 1; params.nn_arch.insert(params.nn_arch.begin(),conv_params[n_conv_layer-1].n_filter * pool_params[n_conv_layer-1].N * pool_params[n_conv_layer-1].N); params.nn_arch.push_back(n_label); const unsigned n_layers = params.nn_arch.size(); // Optimization parameter const int n_train_batch = ceil(n_training/(float)params.train_minibatch_size); const int n_valid_batch = ceil(n_valid/(float)params.valid_minibatch_size); double prev_loss = std::numeric_limits<double>::max(); double eta = params.eta; // Create the convolutional layer std::vector<MyMatrix> conv_W(n_conv_layer); std::vector<MyMatrix> conv_W_T(n_conv_layer); std::vector<MyVector> conv_B(n_conv_layer); // Create the neural network MyMatrix W_out(params.nn_arch[n_layers-2],n_label); std::vector<MySpMatrix> W(n_layers-2); std::vector<MySpMatrix> Wt(n_layers-2); std::vector<MyVector> B(n_layers-1); double init_sigma = 0.; ActivationFunction act_func; ActivationFunction eval_act_func; if(params.act_func_name=="sigmoid"){ init_sigma = 4.0; act_func = std::bind(logistic,true,_1,_2,_3); eval_act_func = std::bind(logistic,false,_1,_2,_3); }else if(params.act_func_name=="tanh"){ init_sigma = 1.0; act_func = std::bind(my_tanh,true,_1,_2,_3); eval_act_func = std::bind(my_tanh,false,_1,_2,_3); }else if(params.act_func_name=="relu"){ init_sigma = 1.0; // TODO: Find the good value act_func = std::bind(relu,true,_1,_2,_3); eval_act_func = std::bind(relu,false,_1,_2,_3); }else{ std::cout << "Not implemented yet!" << std::endl; assert(false); } std::cout << "Initializing the network... "; params.n_params = initNetwork(params.nn_arch, params.act_func_name, params.sparsity, conv_params, pool_params, W_out, W, Wt, B, conv_W, conv_W_T, conv_B); // TODO: Init the conv bias // Deep copy of parameters for the adaptive rule std::vector<MyMatrix> mu_dW(n_layers-1); std::vector<MyVector> mu_dB(n_layers-1); MyMatrix pW_out = W_out; std::vector<MySpMatrix> pW = W; std::vector<MySpMatrix> pWt = Wt; std::vector<MyVector> pB = B; MyMatrix ppMii_out, ppM0i_out; MyVector ppM00_out; std::vector<MySpMatrix> ppMii,ppM0i; std::vector<MyVector> ppM00; MyMatrix pMii_out,pM0i_out; MyVector pM00_out; std::vector<MySpMatrix> pMii,pM0i; std::vector<MyVector> pM00; std::vector<MyMatrix> conv_ppMii, conv_ppM0i; std::vector<MyVector> conv_ppM00; std::vector<MyMatrix> conv_pMii, conv_pM0i; std::vector<MyVector> conv_pM00; // Convert the labels to one-hot vector MyMatrix one_hot = MyMatrix::Zero(n_training, n_label); labels2oneHot(Y_train,one_hot); // Configure the logger std::ostream* logger; if(args.find("verbose")!=args.end()){ getOutput("",logger); }else{ getOutput(params.file_path,logger); } double cumul_time = 0.; printDesc(params, logger); printConvDesc(params, conv_params, pool_params, logger); std::cout << "Starting the learning phase... " << std::endl; *logger << "Epoch Time(s) train_loss train_accuracy valid_loss valid_accuracy eta" << std::endl; for(unsigned i = 0; i < params.n_epoch; i++){ for(unsigned j = 0; j < n_train_batch; j++){ // Mini-batch creation unsigned curr_batch_size = 0; MyMatrix X_batch, one_hot_batch; getMiniBatch(j, params.train_minibatch_size, X_train, one_hot, params, conv_params[0], curr_batch_size, X_batch, one_hot_batch); double prev_time = gettime(); // Forward propagation for conv layer std::vector<std::vector<unsigned>> poolIdxX1(n_conv_layer); std::vector<std::vector<unsigned>> poolIdxY1(n_conv_layer); MyMatrix z0; std::vector<MyMatrix> conv_A(conv_W.size()); std::vector<MyMatrix> conv_Ap(conv_W.size()); convFprop(curr_batch_size, conv_params, pool_params, act_func, conv_W, conv_B, X_batch, conv_A, conv_Ap, z0, poolIdxX1, poolIdxY1); // Forward propagation std::vector<MyMatrix> Z(n_layers-1); std::vector<MyMatrix> A(n_layers-2); std::vector<MyMatrix> Ap(n_layers-2); fprop(params.dropout_flag, act_func, W, W_out, B, z0, Z, A, Ap); // Compute the output and the error MyMatrix out; softmax(Z[n_layers-2], out); std::vector<MyMatrix> gradB(n_layers-1); gradB[n_layers-2] = out - one_hot_batch; // Backpropagation bprop(Wt, W_out, Ap, gradB); // Backpropagation for conv layer std::vector<MyMatrix> conv_gradB(conv_W.size()); MyMatrix layer_gradB = (gradB[0] * W[0].transpose()); MyMatrix pool_gradB; layer2pool(curr_batch_size, pool_params[conv_W.size()-1].N, conv_params[conv_W.size()-1].n_filter, layer_gradB, pool_gradB); convBprop(curr_batch_size, conv_params, pool_params, conv_W_T, conv_Ap, pool_gradB, conv_gradB, poolIdxX1, poolIdxY1); if(params.algo == "bprop"){ update(eta, gradB, A, z0, params.regularizer, params.lambda, W_out, W, Wt, B); convUpdate(curr_batch_size, eta, conv_params, conv_gradB, conv_A, X_batch, "", 0., conv_W, conv_W_T, conv_B); }else{ // Compute the metric std::vector<MyMatrix> metric_gradB(n_layers-1); std::vector<MyMatrix> metric_conv_gradB(conv_params.size()); if(params.algo=="qdMCNat"){ // Monte-Carlo Approximation of the metric std::vector<MyMatrix> mc_gradB(n_layers-1); computeMcError(out, mc_gradB[n_layers-2]); // Backpropagation bprop(Wt, W_out, Ap, mc_gradB); for(unsigned k = 0; k < gradB.size(); k++){ metric_gradB[k] = mc_gradB[k].array().square(); } // Backpropagation for conv layer std::vector<MyMatrix> mc_conv_gradB(conv_W.size()); MyMatrix mc_layer_gradB = (mc_gradB[0] * W[0].transpose()); MyMatrix mc_pool_gradB; layer2pool(curr_batch_size, pool_params[conv_W.size()-1].N, conv_params[conv_W.size()-1].n_filter, mc_layer_gradB, mc_pool_gradB); convBprop(curr_batch_size, conv_params, pool_params, conv_W_T, conv_Ap, mc_pool_gradB, mc_conv_gradB, poolIdxX1, poolIdxY1); for(unsigned k = 0; k < conv_params.size(); k++){ metric_conv_gradB[k] = mc_conv_gradB[k].array().square(); } } else if(params.algo=="qdop"){ for(unsigned k = 0; k < conv_params.size(); k++){ metric_conv_gradB[k] = conv_gradB[k].array().square(); } for(unsigned k = 0; k < gradB.size(); k++){ metric_gradB[k] = gradB[k].array().square(); } } else if(params.algo=="qdNat"){ for(unsigned k = 0; k < conv_params.size(); k++){ metric_conv_gradB[k] = conv_gradB[k].array().square(); } for(unsigned k = 0; k < metric_gradB.size(); k++){ metric_gradB[k] = MyMatrix::Zero(gradB[k].rows(),gradB[k].cols()); } for(unsigned l = 0; l < n_label; l++){ MyMatrix fisher_ohbatch = MyMatrix::Zero(curr_batch_size, n_label); fisher_ohbatch.col(l).setOnes(); std::vector<MyMatrix> fgradB(n_layers-1); fgradB[n_layers-2] = out - fisher_ohbatch; bprop(Wt, W_out, Ap, fgradB); // Backpropagation for conv layer std::vector<MyMatrix> fisher_conv_gradB(conv_W.size()); MyMatrix fisher_layer_gradB = (fgradB[0] * W[0].transpose()); MyMatrix fisher_pool_gradB; layer2pool(curr_batch_size, pool_params[conv_W.size()-1].N, conv_params[conv_W.size()-1].n_filter, fisher_layer_gradB, fisher_pool_gradB); convBprop(curr_batch_size, conv_params, pool_params, conv_W_T, conv_Ap, fisher_pool_gradB, fisher_conv_gradB, poolIdxX1, poolIdxY1); for(unsigned k = 0; k < conv_params.size(); k++){ MyMatrix fisher_conv_gradB_sq = fisher_conv_gradB[k].array().square(); for(unsigned m = 0; m < out.rows(); m++){ for(unsigned f = 0; f < conv_params[k].n_filter; f++){ for(unsigned n = 0; n < conv_params[k].N * conv_params[k].N; n++){ fisher_conv_gradB_sq(f,m*conv_params[k].N*conv_params[k].N+n) *= out(m,l); } } } metric_conv_gradB[k] += fisher_conv_gradB_sq; } for(unsigned k = 0; k < W.size(); k++){ const unsigned rev_k = n_layers - k - 2; metric_gradB[rev_k] += (fgradB[rev_k].array().square().array().colwise() * out.array().col(l)).matrix(); } } } bool init_flag = false; if(i == 0 && j == 0 && !params.init_metric_id){ init_flag = true; } std::vector<MyMatrix> conv_Mii(conv_params.size()); std::vector<MyMatrix> conv_M0i(conv_params.size()); std::vector<MyVector> conv_M00(conv_params.size()); buildConvQDMetric(curr_batch_size, metric_conv_gradB, conv_A, X_batch, conv_W, params.matrix_reg, conv_Mii, conv_M0i, conv_M00); updateConvMetric(init_flag, params.metric_gamma, conv_pMii, conv_pM0i, conv_pM00, conv_Mii, conv_M0i, conv_M00); MyMatrix Mii_out, M0i_out; MyVector M00_out; std::vector<MySpMatrix> Mii(W.size()); std::vector<MySpMatrix> M0i(W.size()); std::vector<MyVector> M00(W.size()); buildQDMetric(metric_gradB, A, z0, W_out, W, params.matrix_reg, Mii_out, M0i_out, M00_out, Mii, M0i, M00); updateMetric(init_flag, params.metric_gamma, Mii_out, M0i_out, M00_out, Mii, M0i, M00, pMii_out, pM0i_out, pM00_out, pMii, pM0i, pM00); update(eta, gradB, A, z0, params.regularizer, params.lambda, W_out, W, Wt, B, Mii_out, M0i_out, M00_out, Mii, M0i, M00); } double curr_time = gettime(); cumul_time += curr_time - prev_time; if(params.minilog_flag){ double train_loss = 0.; double train_accuracy = 0.; double valid_loss = 0.; double valid_accuracy = 0.; evalModel(eval_act_func, params, n_train_batch, n_training, X_train, Y_train, conv_params, pool_params, conv_W, conv_B, W_out, W, B, train_loss, train_accuracy); evalModel(eval_act_func, params, n_valid_batch, n_valid, X_valid, Y_valid, conv_params, pool_params, conv_W, conv_B, W_out, W, B, valid_loss, valid_accuracy); // Logging *logger << i + float(j)/n_train_batch << " " << cumul_time << " " << train_loss << " " << train_accuracy << " " << valid_loss << " " << valid_accuracy << " " << eta << std::endl; } } if(!params.minilog_flag || params.adaptive_flag){ double train_loss = 0.; double train_accuracy = 0.; double valid_loss = 0.; double valid_accuracy = 0.; evalModel(eval_act_func, params, n_train_batch, n_training, X_train, Y_train, conv_params, pool_params, conv_W, conv_B, W_out, W, B, train_loss, train_accuracy); evalModel(eval_act_func, params, n_valid_batch, n_valid, X_valid, Y_valid, conv_params, pool_params, conv_W, conv_B, W_out, W, B, valid_loss, valid_accuracy); // if(params.adaptive_flag) // adaptiveRule(train_loss, prev_loss, eta, W, B, pMii, pM0i, pM00, pW, pB, ppMii, ppM0i, ppM00); // Logging if(!params.minilog_flag){ *logger << i << " " << cumul_time << " " << train_loss << " " << train_accuracy << " " << valid_loss << " " << valid_accuracy << " " << eta << std::endl; } } } }
/* Convert a 3x1 matrix to a 3x3 diagonal matrix */ void x3f_3x3_diag(double *a, double *b) { M00(b) = M0(a); M01(b) = 0.0; M02(b) = 0.0; M10(b) = 0.0; M11(b) = M1(a); M12(b) = 0.0; M20(b) = 0.0; M21(b) = 0.0; M22(b) = M2(a); }