// Neural Network ------------------------------------------------------------- // Load the snapshot of the CNN we are going to run. Network* construct_gtsrb_net() { fprintf(stderr, "Constructing GTSRB Network \n"); Network* net = make_network(12); network_add(net, make_conv_layer(48, 48, 3, 3, 100, 1, 0)); network_add(net, make_relu_layer(net->layers[0]->out_sx, net->layers[0]->out_sy, net->layers[0]->out_depth)); network_add(net, make_max_pool_layer(net->layers[1]->out_sx, net->layers[1]->out_sy, net->layers[1]->out_depth, 2, 2)); network_add(net, make_conv_layer(net->layers[2]->out_sx, net->layers[2]->out_sy, net->layers[2]->out_depth, 4, 150, 1, 0)); network_add(net, make_relu_layer(net->layers[3]->out_sx, net->layers[3]->out_sy, net->layers[3]->out_depth)); network_add(net, make_max_pool_layer(net->layers[4]->out_sx, net->layers[4]->out_sy, net->layers[4]->out_depth, 2, 2)); network_add(net, make_conv_layer(net->layers[5]->out_sx, net->layers[5]->out_sy, net->layers[5]->out_depth, 3, 250, 1, 0)); network_add(net, make_relu_layer(net->layers[6]->out_sx, net->layers[6]->out_sy, net->layers[6]->out_depth)); network_add(net, make_max_pool_layer(net->layers[7]->out_sx, net->layers[7]->out_sy, net->layers[7]->out_depth, 2, 2)); network_add(net, make_fc_layer(net->layers[8]->out_sx, net->layers[8]->out_sy, net->layers[8]->out_depth, 200)); network_add(net, make_fc_layer(net->layers[9]->out_sx, net->layers[9]->out_sy, net->layers[9]->out_depth, 43)); network_add(net, make_softmax_layer(net->layers[10]->out_sx, net->layers[10]->out_sy, net->layers[10]->out_depth)); // load pre-trained weights conv_load(net->layers[0], conv1_params, conv1_data); conv_load(net->layers[3], conv2_params, conv2_data); conv_load(net->layers[6], conv3_params, conv3_data); fc_load(net->layers[9], ip1_params, ip1_data); fc_load(net->layers[10], ip2_params, ip2_data); return net; }
// Load the snapshot of the CNN we are going to run. network_t* load_cnn_snapshot() { network_t* net = make_network(); conv_load(net->l0, "../data/snapshot/layer1_conv.txt"); conv_load(net->l3, "../data/snapshot/layer4_conv.txt"); conv_load(net->l6, "../data/snapshot/layer7_conv.txt"); fc_load(net->l9, "../data/snapshot/layer10_fc.txt"); return net; }
static void test_conv_save_load() { printf("test_conv_save_load..."); unsigned int img_width = 512; unsigned int img_height = 512; unsigned int bitsperpixel = 24; int i, no_of_layers = 3; int max_features = 20; int reduction_factor = 4; int pooling_factor = 2; float error_threshold[] = {0.1, 0.2, 0.3}; float error_threshold2[] = {0.6, 0.7, 0.8}; unsigned int random_seed = 648326; deeplearn_conv conv1; deeplearn_conv conv2; FILE * fp; char filename[256]; assert(conv_init(no_of_layers, img_width, img_height, bitsperpixel/8, max_features, reduction_factor, pooling_factor, &conv1, error_threshold, &random_seed) == 0); sprintf(filename, "/tmp/%s", "libdeep_conv.dat"); /* save */ fp = fopen(filename,"w"); assert(fp); assert(conv_save(fp, &conv1) == 0); fclose(fp); /* set some different values */ conv2.reduction_factor = 45; conv2.pooling_factor = 8; conv2.inputs_across = 100; conv2.inputs_down = 200; conv2.inputs_depth = 16; conv2.no_of_layers = 2; conv2.max_features = 15; memcpy((void*)conv2.error_threshold, error_threshold2, conv2.no_of_layers*sizeof(float)); conv2.random_seed = 20313; conv2.enable_learning = 0; conv2.current_layer = 4577; conv2.training_complete = 3; conv2.itterations = 642; conv2.layer[0].autocoder=NULL; /* load */ fp = fopen(filename,"r"); assert(fp); assert(conv_load(fp, &conv2) == 0); fclose(fp); /* compare the results */ assert(conv2.layer[0].autocoder != NULL); assert(conv2.layer[0].autocoder->inputs != NULL); assert(conv2.layer[0].autocoder->hiddens != NULL); assert(conv1.layer[0].autocoder->NoOfInputs == conv2.layer[0].autocoder->NoOfInputs); assert(conv1.layer[0].autocoder->NoOfHiddens == conv2.layer[0].autocoder->NoOfHiddens); assert(conv1.reduction_factor == conv2.reduction_factor); assert(conv1.pooling_factor == conv2.pooling_factor); assert(conv1.random_seed != conv2.random_seed); assert(conv1.inputs_across == conv2.inputs_across); assert(conv1.inputs_down == conv2.inputs_down); assert(conv1.inputs_depth == conv2.inputs_depth); assert(conv1.max_features == conv2.max_features); assert(conv1.no_of_layers == conv2.no_of_layers); assert(conv1.enable_learning == conv2.enable_learning); assert(conv1.current_layer == conv2.current_layer); assert(conv1.training_complete == conv2.training_complete); assert(conv1.itterations == conv2.itterations); for (i = 0; i < conv1.no_of_layers; i++) { for (int j = 0; j < conv1.layer[i].autocoder->NoOfInputs* conv1.layer[i].autocoder->NoOfHiddens; j++) { assert(conv1.layer[i].autocoder->weights[j] > -0.3f); assert(conv1.layer[i].autocoder->weights[j] < 0.3f); assert(conv2.layer[i].autocoder->weights[j] > -0.3f); assert(conv2.layer[i].autocoder->weights[j] < 0.3f); } assert(conv1.error_threshold[i] == conv2.error_threshold[i]); if ((conv1.layer[i].autocoder != NULL) && (conv2.layer[i].autocoder != NULL)) { assert(autocoder_compare(conv1.layer[i].autocoder, conv2.layer[i].autocoder) == 0); } assert(conv1.layer[i].units_across == conv2.layer[i].units_across); assert(conv1.layer[i].units_down == conv2.layer[i].units_down); assert(conv1.layer[i].pooling_factor == conv2.layer[i].pooling_factor); } conv_free(&conv1); conv_free(&conv2); printf("Ok\n"); }