deconvolutional_layer parse_deconvolutional(list *options, size_params params) { int n = option_find_int(options, "filters",1); int size = option_find_int(options, "size",1); int stride = option_find_int(options, "stride",1); char *activation_s = option_find_str(options, "activation", "logistic"); ACTIVATION activation = get_activation(activation_s); int batch,h,w,c; h = params.h; w = params.w; c = params.c; batch=params.batch; if(!(h && w && c)) error("Layer before deconvolutional layer must output image."); deconvolutional_layer layer = make_deconvolutional_layer(batch,h,w,c,n,size,stride,activation); char *weights = option_find_str(options, "weights", 0); char *biases = option_find_str(options, "biases", 0); parse_data(weights, layer.filters, c*n*size*size); parse_data(biases, layer.biases, n); #ifdef GPU if(weights || biases) push_deconvolutional_layer(layer); #endif return layer; }
void load_weights_upto(network *net, char *filename, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "r"); if(!fp) file_error(filename); float garbage; fread(&garbage, sizeof(float), 1, fp); fread(&garbage, sizeof(float), 1, fp); fread(&garbage, sizeof(float), 1, fp); fread(net->seen, sizeof(int), 1, fp); int i; for(i = 0; i < net->n && i < cutoff; ++i){ layer l = net->layers[i]; if (l.dontload) continue; if(l.type == CONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); if (l.batch_normalize && (!l.dontloadscales)){ fread(l.scales, sizeof(float), l.n, fp); fread(l.rolling_mean, sizeof(float), l.n, fp); fread(l.rolling_variance, sizeof(float), l.n, fp); } fread(l.filters, sizeof(float), num, fp); #ifdef GPU if(gpu_index >= 0){ push_convolutional_layer(l); } #endif } if(l.type == DECONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); fread(l.filters, sizeof(float), num, fp); #ifdef GPU if(gpu_index >= 0){ push_deconvolutional_layer(l); } #endif } if(l.type == CONNECTED){ fread(l.biases, sizeof(float), l.outputs, fp); fread(l.weights, sizeof(float), l.outputs*l.inputs, fp); #ifdef GPU if(gpu_index >= 0){ push_connected_layer(l); } #endif } } fprintf(stderr, "Done!\n"); fclose(fp); }
void load_weights_upto(network *net, char *filename, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "r"); if(!fp) file_error(filename); fread(&net->learning_rate, sizeof(float), 1, fp); fread(&net->momentum, sizeof(float), 1, fp); fread(&net->decay, sizeof(float), 1, fp); fread(&net->seen, sizeof(int), 1, fp); int i; for(i = 0; i < net->n && i < cutoff; ++i){ layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); fread(l.filters, sizeof(float), num, fp); #ifdef GPU if(gpu_index >= 0){ push_convolutional_layer(l); } #endif } if(l.type == DECONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); fread(l.filters, sizeof(float), num, fp); #ifdef GPU if(gpu_index >= 0){ push_deconvolutional_layer(l); } #endif } if(l.type == CONNECTED){ fread(l.biases, sizeof(float), l.outputs, fp); fread(l.weights, sizeof(float), l.outputs*l.inputs, fp); #ifdef GPU if(gpu_index >= 0){ push_connected_layer(l); } #endif } } fprintf(stderr, "Done!\n"); fclose(fp); }
void load_weights_upto(network *net, char *filename, int cutoff) { fprintf(stderr, "Loading weights from %s...", filename); fflush(stdout); FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); int major; int minor; int revision; fread(&major, sizeof(int), 1, fp); fread(&minor, sizeof(int), 1, fp); fread(&revision, sizeof(int), 1, fp); fread(net->seen, sizeof(int), 1, fp); int transpose = (major > 1000) || (minor > 1000); int i; for(i = 0; i < net->n && i < cutoff; ++i){ layer l = net->layers[i]; if (l.dontload) continue; if(l.type == CONVOLUTIONAL){ load_convolutional_weights(l, fp); } if(l.type == DECONVOLUTIONAL){ int num = l.n*l.c*l.size*l.size; fread(l.biases, sizeof(float), l.n, fp); fread(l.filters, sizeof(float), num, fp); #ifdef GPU if(gpu_index >= 0){ push_deconvolutional_layer(l); } #endif } if(l.type == CONNECTED){ load_connected_weights(l, fp, transpose); } if(l.type == BATCHNORM){ load_batchnorm_weights(l, fp); } if(l.type == CRNN){ load_convolutional_weights(*(l.input_layer), fp); load_convolutional_weights(*(l.self_layer), fp); load_convolutional_weights(*(l.output_layer), fp); } if(l.type == RNN){ load_connected_weights(*(l.input_layer), fp, transpose); load_connected_weights(*(l.self_layer), fp, transpose); load_connected_weights(*(l.output_layer), fp, transpose); } if(l.type == GRU){ load_connected_weights(*(l.input_z_layer), fp, transpose); load_connected_weights(*(l.input_r_layer), fp, transpose); load_connected_weights(*(l.input_h_layer), fp, transpose); load_connected_weights(*(l.state_z_layer), fp, transpose); load_connected_weights(*(l.state_r_layer), fp, transpose); load_connected_weights(*(l.state_h_layer), fp, transpose); } if(l.type == LOCAL){ int locations = l.out_w*l.out_h; int size = l.size*l.size*l.c*l.n*locations; fread(l.biases, sizeof(float), l.outputs, fp); fread(l.filters, sizeof(float), size, fp); #ifdef GPU if(gpu_index >= 0){ push_local_layer(l); } #endif } } fprintf(stderr, "Done!\n"); fclose(fp); }