void matlab::read_cast_matrix(mxArray *var, idx<T> &m) { #ifdef __MATLAB__ // allocate a temporary matrix with same type as original matrix type idx<Tmatlab> tmp(m.get_idxdim()); // load data void *data = mxGetData(var); // copy to idx memcpy(m.idx_ptr(), (Tmatlab*) data, m.nelements() * sizeof (Tmatlab)); // copy-cast idx_copy(tmp, m); #endif }
generic_conv_net(parameter &trainableParam, intg output_size) : layers_n<state_idx>(true) { // owns modules cout << "Initializing ConvNet..." << endl; //! Define the number of feature maps per layer (C0, C1, C2) intg featureMaps0 = 6; intg featureMaps1 = 12; intg featureMaps2 = 40; //! Define tables of connections between layers. //! These two are fully connected layer, i.e. each feature map in a layer //! is connected to every feature map in the previous layer table0 = full_table(1, featureMaps0); //! from input to C0 table2 = full_table(featureMaps1, featureMaps2); //! from S1 to C2 //! ... whereas the connections there are sparse (S0 to C1): table1 = idx<intg>(44, 2); //! from S0 to C1 intg tbl[44][2] = {{0, 0}, {1, 0}, {2, 0}, //! 0,1,2 in S0 connected to 0 in C1 {1, 1}, {2, 1}, {3, 1}, //! and so on... {2, 2}, {3, 2}, {4, 2}, {3, 3}, {4, 3}, {5, 3}, {4, 4}, {5, 4}, {0, 4}, {5, 5}, {0, 5}, {1, 5}, {0, 6}, {1, 6}, {2, 6}, {3, 6}, {1, 7}, {2, 7}, {3, 7}, {4, 7}, {2, 8}, {3, 8}, {4, 8}, {5, 8}, {3, 9}, {4, 9}, {5, 9}, {0, 9}, {4, 10}, {5, 10}, {0, 10}, {1, 10}, {0, 11}, {1, 11}, {2, 11}, {3, 11}, {4, 11}, {5, 11}}; memcpy(table1.idx_ptr(), tbl, table1.nelements() * sizeof (intg)); //! Finally we initialize the architecture of the ConvNet. //! In this case we create a CSCSCF network. //! It's easy to change the architecture, by simply removing/adding a call //! to addModule(...) //! C0 Layer add_module(new nn_layer_convolution(trainableParam, //! Shared weights 7, 7, //! Dim of kernel 1, 1, //! size of subsampling table0), //! Conx btwn input layer and C0 //! state_idx holds the feature maps of C0 new state_idx(featureMaps0,1,1)); //! S0 Layer add_module(new nn_layer_subsampling(trainableParam, 2, 2, //! Dim of stride 2, 2, //! Dim of subsampling mask featureMaps0), new state_idx(featureMaps0,1,1)); //! C1 Layer add_module(new nn_layer_convolution(trainableParam, 7, 7, 1, 1, table1), new state_idx(featureMaps1,1,1)); //! S1 Layer add_module(new nn_layer_subsampling(trainableParam, 2, 2, 2, 2, featureMaps1), new state_idx(featureMaps1,1,1)); //! C2 Layer add_module(new nn_layer_convolution(trainableParam, 7, 7, 1, 1, table2), new state_idx(featureMaps2,1,1)); //! F Layer add_last_module(new nn_layer_full(trainableParam, featureMaps2, output_size)); }