int resize_network(network *net, int w, int h) { #ifdef GPU cuda_set_device(net->gpu_index); cuda_free(net->workspace); #endif int i; //if(w == net->w && h == net->h) return 0; net->w = w; net->h = h; int inputs = 0; size_t workspace_size = 0; //fprintf(stderr, "Resizing to %d x %d...\n", w, h); //fflush(stderr); for (i = 0; i < net->n; ++i){ layer l = net->layers[i]; if(l.type == CONVOLUTIONAL){ resize_convolutional_layer(&l, w, h); }else if(l.type == CROP){ resize_crop_layer(&l, w, h); }else if(l.type == MAXPOOL){ resize_maxpool_layer(&l, w, h); }else if(l.type == REGION){ resize_region_layer(&l, w, h); }else if(l.type == ROUTE){ resize_route_layer(&l, net); }else if(l.type == REORG){ resize_reorg_layer(&l, w, h); }else if(l.type == AVGPOOL){ resize_avgpool_layer(&l, w, h); }else if(l.type == NORMALIZATION){ resize_normalization_layer(&l, w, h); }else if(l.type == COST){ resize_cost_layer(&l, inputs); }else{ error("Cannot resize this type of layer"); } if(l.workspace_size > workspace_size) workspace_size = l.workspace_size; inputs = l.outputs; net->layers[i] = l; w = l.out_w; h = l.out_h; if(l.type == AVGPOOL) break; } layer out = get_network_output_layer(*net); net->inputs = net->layers[0].inputs; net->outputs = out.outputs; net->truths = out.outputs; if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths; net->output = out.output; free(net->input); free(net->truth); net->input = calloc(net->inputs*net->batch, sizeof(float)); net->truth = calloc(net->truths*net->batch, sizeof(float)); #ifdef GPU if(gpu_index >= 0){ cuda_free(net->input_gpu); cuda_free(net->truth_gpu); net->input_gpu = cuda_make_array(net->input, net->inputs*net->batch); net->truth_gpu = cuda_make_array(net->truth, net->truths*net->batch); net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1); }else { free(net->workspace); net->workspace = calloc(1, workspace_size); } #else free(net->workspace); net->workspace = calloc(1, workspace_size); #endif //fprintf(stderr, " Done!\n"); return 0; }
int resize_network(network *net, int w, int h) { #ifdef GPU cuda_set_device(net->gpu_index); if(gpu_index >= 0){ cuda_free(net->workspace); if (net->input_gpu) { cuda_free(*net->input_gpu); *net->input_gpu = 0; cuda_free(*net->truth_gpu); *net->truth_gpu = 0; } } #endif int i; //if(w == net->w && h == net->h) return 0; net->w = w; net->h = h; int inputs = 0; size_t workspace_size = 0; //fprintf(stderr, "Resizing to %d x %d...\n", w, h); //fflush(stderr); for (i = 0; i < net->n; ++i){ layer l = net->layers[i]; //printf(" %d: layer = %d,", i, l.type); if(l.type == CONVOLUTIONAL){ resize_convolutional_layer(&l, w, h); }else if(l.type == CROP){ resize_crop_layer(&l, w, h); }else if(l.type == MAXPOOL){ resize_maxpool_layer(&l, w, h); }else if(l.type == REGION){ resize_region_layer(&l, w, h); }else if (l.type == YOLO) { resize_yolo_layer(&l, w, h); }else if(l.type == ROUTE){ resize_route_layer(&l, net); }else if (l.type == SHORTCUT) { resize_shortcut_layer(&l, w, h); }else if (l.type == UPSAMPLE) { resize_upsample_layer(&l, w, h); }else if(l.type == REORG){ resize_reorg_layer(&l, w, h); }else if(l.type == AVGPOOL){ resize_avgpool_layer(&l, w, h); }else if(l.type == NORMALIZATION){ resize_normalization_layer(&l, w, h); }else if(l.type == COST){ resize_cost_layer(&l, inputs); }else{ fprintf(stderr, "Resizing type %d \n", (int)l.type); error("Cannot resize this type of layer"); } if(l.workspace_size > workspace_size) workspace_size = l.workspace_size; inputs = l.outputs; net->layers[i] = l; w = l.out_w; h = l.out_h; if(l.type == AVGPOOL) break; } #ifdef GPU if(gpu_index >= 0){ printf(" try to allocate workspace = %zu * sizeof(float), ", workspace_size / sizeof(float) + 1); net->workspace = cuda_make_array(0, workspace_size/sizeof(float) + 1); printf(" CUDA allocate done! \n"); }else { free(net->workspace); net->workspace = calloc(1, workspace_size); } #else free(net->workspace); net->workspace = calloc(1, workspace_size); #endif //fprintf(stderr, " Done!\n"); return 0; }