network *parse_network_cfg(char *filename) { list *sections = read_cfg(filename); node *n = sections->front; if(!n) error("Config file has no sections"); network *net = make_network(sections->size - 1); net->gpu_index = gpu_index; size_params params; section *s = (section *)n->val; list *options = s->options; if(!is_network(s)) error("First section must be [net] or [network]"); parse_net_options(options, net); params.h = net->h; params.w = net->w; params.c = net->c; params.inputs = net->inputs; params.batch = net->batch; params.time_steps = net->time_steps; params.net = net; size_t workspace_size = 0; n = n->next; int count = 0; free_section(s); fprintf(stderr, "layer filters size input output\n"); while(n){ params.index = count; fprintf(stderr, "%5d ", count); s = (section *)n->val; options = s->options; layer l = {0}; LAYER_TYPE lt = string_to_layer_type(s->type); if(lt == CONVOLUTIONAL){ l = parse_convolutional(options, params); }else if(lt == DECONVOLUTIONAL){ l = parse_deconvolutional(options, params); }else if(lt == LOCAL){ l = parse_local(options, params); }else if(lt == ACTIVE){ l = parse_activation(options, params); }else if(lt == LOGXENT){ l = parse_logistic(options, params); }else if(lt == L2NORM){ l = parse_l2norm(options, params); }else if(lt == RNN){ l = parse_rnn(options, params); }else if(lt == GRU){ l = parse_gru(options, params); }else if (lt == LSTM) { l = parse_lstm(options, params); }else if(lt == CRNN){ l = parse_crnn(options, params); }else if(lt == CONNECTED){ l = parse_connected(options, params); }else if(lt == CROP){ l = parse_crop(options, params); }else if(lt == COST){ l = parse_cost(options, params); }else if(lt == REGION){ l = parse_region(options, params); }else if(lt == YOLO){ l = parse_yolo(options, params); }else if(lt == ISEG){ l = parse_iseg(options, params); }else if(lt == DETECTION){ l = parse_detection(options, params); }else if(lt == SOFTMAX){ l = parse_softmax(options, params); net->hierarchy = l.softmax_tree; }else if(lt == NORMALIZATION){ l = parse_normalization(options, params); }else if(lt == BATCHNORM){ l = parse_batchnorm(options, params); }else if(lt == MAXPOOL){ l = parse_maxpool(options, params); }else if(lt == REORG){ l = parse_reorg(options, params); }else if(lt == AVGPOOL){ l = parse_avgpool(options, params); }else if(lt == ROUTE){ l = parse_route(options, params, net); }else if(lt == UPSAMPLE){ l = parse_upsample(options, params, net); }else if(lt == SHORTCUT){ l = parse_shortcut(options, params, net); }else if(lt == DROPOUT){ l = parse_dropout(options, params); l.output = net->layers[count-1].output; l.delta = net->layers[count-1].delta; #ifdef GPU l.output_gpu = net->layers[count-1].output_gpu; l.delta_gpu = net->layers[count-1].delta_gpu; #endif }else{ fprintf(stderr, "Type not recognized: %s\n", s->type); } l.clip = net->clip; l.truth = option_find_int_quiet(options, "truth", 0); l.onlyforward = option_find_int_quiet(options, "onlyforward", 0); l.stopbackward = option_find_int_quiet(options, "stopbackward", 0); l.dontsave = option_find_int_quiet(options, "dontsave", 0); l.dontload = option_find_int_quiet(options, "dontload", 0); l.numload = option_find_int_quiet(options, "numload", 0); l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0); l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1); l.smooth = option_find_float_quiet(options, "smooth", 0); option_unused(options); net->layers[count] = l; if (l.workspace_size > workspace_size) workspace_size = l.workspace_size; free_section(s); n = n->next; ++count; if(n){ params.h = l.out_h; params.w = l.out_w; params.c = l.out_c; params.inputs = l.outputs; } } free_list(sections); layer out = get_network_output_layer(net); net->outputs = out.outputs; net->truths = out.outputs; if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths; net->output = out.output; net->input = calloc(net->inputs*net->batch, sizeof(float)); net->truth = calloc(net->truths*net->batch, sizeof(float)); #ifdef GPU net->output_gpu = out.output_gpu; net->input_gpu = cuda_make_array(net->input, net->inputs*net->batch); net->truth_gpu = cuda_make_array(net->truth, net->truths*net->batch); #endif if(workspace_size){ //printf("%ld\n", workspace_size); #ifdef GPU if(gpu_index >= 0){ net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); }else { net->workspace = calloc(1, workspace_size); } #else net->workspace = calloc(1, workspace_size); #endif } return net; }
int resize_network(network *net, int w, int h) { #ifdef GPU cuda_set_device(net->gpu_index); cuda_free(net->workspace); #endif int i; //if(w == net->w && h == net->h) return 0; net->w = w; net->h = h; int inputs = 0; size_t workspace_size = 0; //fprintf(stderr, "Resizing to %d x %d...\n", w, h); //fflush(stderr); for (i = 0; i < net->n; ++i){ layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ resize_convolutional_layer(&l, w, h); }else if(l.type == CROP){ resize_crop_layer(&l, w, h); }else if(l.type == MAXPOOL){ resize_maxpool_layer(&l, w, h); }else if(l.type == REGION){ resize_region_layer(&l, w, h); }else if(l.type == ROUTE){ resize_route_layer(&l, net); }else if(l.type == REORG){ resize_reorg_layer(&l, w, h); }else if(l.type == AVGPOOL){ resize_avgpool_layer(&l, w, h); }else if(l.type == NORMALIZATION){ resize_normalization_layer(&l, w, h); }else if(l.type == COST){ resize_cost_layer(&l, inputs); }else{ error("Cannot resize this type of layer"); } if(l.workspace_size > workspace_size) workspace_size = l.workspace_size; inputs = l.outputs; net->layers[i] = l; w = l.out_w; h = l.out_h; if(l.type == AVGPOOL) break; } layer out = get_network_output_layer(*net); net->inputs = net->layers[0].inputs; net->outputs = out.outputs; net->truths = out.outputs; if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths; net->output = out.output; free(net->input); free(net->truth); net->input = calloc(net->inputs*net->batch, sizeof(float)); net->truth = calloc(net->truths*net->batch, sizeof(float)); #ifdef GPU if(gpu_index >= 0){ cuda_free(net->input_gpu); cuda_free(net->truth_gpu); net->input_gpu = cuda_make_array(net->input, net->inputs*net->batch); net->truth_gpu = cuda_make_array(net->truth, net->truths*net->batch); net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); }else { free(net->workspace); net->workspace = calloc(1, workspace_size); } #else free(net->workspace); net->workspace = calloc(1, workspace_size); #endif //fprintf(stderr, " Done!\n"); return 0; }
void pull_network_output(network net) { layer l = get_network_output_layer(net); cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); }