Ejemplo n.º 1
0
void forward_batchnorm_layer_gpu(layer l, network_state state)
{
    if(l.type == BATCHNORM) copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
    if(l.type == CONNECTED){
        l.out_c = l.outputs;
        l.out_h = l.out_w = 1;
    }
    if (state.train) {
        fast_mean_gpu(l.output_gpu, l.batch, l.out_c, l.out_h*l.out_w, l.mean_gpu);
        fast_variance_gpu(l.output_gpu, l.mean_gpu, l.batch, l.out_c, l.out_h*l.out_w, l.variance_gpu);

        scal_ongpu(l.out_c, .99, l.rolling_mean_gpu, 1);
        axpy_ongpu(l.out_c, .01, l.mean_gpu, 1, l.rolling_mean_gpu, 1);
        scal_ongpu(l.out_c, .99, l.rolling_variance_gpu, 1);
        axpy_ongpu(l.out_c, .01, l.variance_gpu, 1, l.rolling_variance_gpu, 1);

        copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1);
        normalize_gpu(l.output_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
        copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_norm_gpu, 1);
    } else {
        normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
    }

    scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
}
Ejemplo n.º 2
0
void update_connected_layer_gpu(connected_layer l, int batch, float learning_rate, float momentum, float decay)
{
    axpy_ongpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
    scal_ongpu(l.outputs, momentum, l.bias_updates_gpu, 1);

    axpy_ongpu(l.inputs*l.outputs, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
    axpy_ongpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
    scal_ongpu(l.inputs*l.outputs, momentum, l.weight_updates_gpu, 1);
}
Ejemplo n.º 3
0
void update_local_layer_gpu(local_layer l, int batch, float learning_rate, float momentum, float decay, cudaStream_t stream)
{
	int locations = l.out_w*l.out_h;
	int size = l.size*l.size*l.c*l.n*locations;
	axpy_ongpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1, stream);
	scal_ongpu(l.outputs, momentum, l.bias_updates_gpu, 1, stream);

	axpy_ongpu(size, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1, stream);
	axpy_ongpu(size, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1, stream);
	scal_ongpu(size, momentum, l.weight_updates_gpu, 1, stream);
}
Ejemplo n.º 4
0
void update_local_layer_gpu(local_layer l, int batch, float learning_rate, float momentum, float decay)
{
    int locations = l.out_w*l.out_h;
    int size = l.size*l.size*l.c*l.n*locations;
    axpy_ongpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
    scal_ongpu(l.outputs, momentum, l.bias_updates_gpu, 1);

    axpy_ongpu(size, -decay*batch, l.filters_gpu, 1, l.filter_updates_gpu, 1);
    axpy_ongpu(size, learning_rate/batch, l.filter_updates_gpu, 1, l.filters_gpu, 1);
    scal_ongpu(size, momentum, l.filter_updates_gpu, 1);
}
Ejemplo n.º 5
0
void forward_batchnorm_layer_gpu(layer l, network_state state)
{
    if(l.type == BATCHNORM) copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
    if(l.type == CONNECTED){
        l.out_c = l.outputs;
        l.out_h = l.out_w = 1;
    }
    if (state.train) {
#ifdef CUDNN
        copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1);
        float one = 1;
        float zero = 0;
        cudnnBatchNormalizationForwardTraining(cudnn_handle(),
                    CUDNN_BATCHNORM_SPATIAL,
                    &one,
                    &zero,
                    l.dstTensorDesc,
                    l.x_gpu,
                    l.dstTensorDesc,
                    l.output_gpu,
                    l.normTensorDesc,
                    l.scales_gpu,
                    l.biases_gpu,
                    .01,
                    l.rolling_mean_gpu,
                    l.rolling_variance_gpu,
                    .00001,
                    l.mean_gpu,
                    l.variance_gpu);
#else
        fast_mean_gpu(l.output_gpu, l.batch, l.out_c, l.out_h*l.out_w, l.mean_gpu);
        fast_variance_gpu(l.output_gpu, l.mean_gpu, l.batch, l.out_c, l.out_h*l.out_w, l.variance_gpu);

        scal_ongpu(l.out_c, .99, l.rolling_mean_gpu, 1);
        axpy_ongpu(l.out_c, .01, l.mean_gpu, 1, l.rolling_mean_gpu, 1);
        scal_ongpu(l.out_c, .99, l.rolling_variance_gpu, 1);
        axpy_ongpu(l.out_c, .01, l.variance_gpu, 1, l.rolling_variance_gpu, 1);

        copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1);
        normalize_gpu(l.output_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
        copy_ongpu(l.outputs*l.batch, l.output_gpu, 1, l.x_norm_gpu, 1);

        scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
        add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
#endif
    } else {
        normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
        scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
        add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
    }

}
Ejemplo n.º 6
0
void backward_connected_layer_gpu(connected_layer l, network_state state)
{
    int i;
    constrain_ongpu(l.outputs*l.batch, 5, l.delta_gpu, 1);
    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
    for(i = 0; i < l.batch; ++i){
        axpy_ongpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1);
    }

    if(l.batch_normalize){
        backward_batchnorm_layer_gpu(l, state);
    }

    int m = l.outputs;
    int k = l.batch;
    int n = l.inputs;
    float * a = l.delta_gpu;
    float * b = state.input;
    float * c = l.weight_updates_gpu;
    gemm_ongpu(1,0,m,n,k,1,a,m,b,n,1,c,n);

    m = l.batch;
    k = l.outputs;
    n = l.inputs;

    a = l.delta_gpu;
    b = l.weights_gpu;
    c = state.delta;

    if(c) gemm_ongpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
Ejemplo n.º 7
0
void forward_compact_layer_gpu(const layer l, network_state state)
{
    int i, b;
    for (b=0;b<l.batch;b++)
    {
        if (l.method==0) // add
        {
            // copy first section
            copy_ongpu(l.outputs, state.input+b*l.inputs, 1, l.output_gpu+b*l.outputs, 1);
            // add other splits
            for (i=1;i<l.index;i++)
            {
                axpy_ongpu(l.outputs, 1, state.input+b*l.inputs+i*l.outputs, 1, l.output_gpu+b*l.outputs, 1);
            }
        } else if (l.method==1) // sub
        {
            // copy first section
            copy_ongpu(l.outputs, state.input+b*l.inputs, 1, l.output_gpu+b*l.outputs, 1);
            // sub other splits
            for (i=1;i<l.index;i++)
            {
                axpy_ongpu(l.outputs, -1, state.input+b*l.inputs+i*l.outputs, 1, l.output_gpu+b*l.outputs, 1);
            }
        } else if (l.method==2) // max
        {
            compact_forward_max_gpu(l.w, l.h, l.c, l.index,
                                    state.input+b*l.inputs,
                                    l.output_gpu+b*l.outputs,
                                    l.indexes_gpu);
        } else if (l.method==10)
        {
            compact_forward_padd_gpu(l.w, l.h, l.c,
                                     state.input+b*l.inputs,
                                     l.output_gpu+b*l.outputs);
        } else if (l.method==12)
        {
            compact_forward_pmax_gpu(l.w, l.h, l.c,
                                     state.input+b*l.inputs,
                                     l.output_gpu+b*l.outputs,
                                     l.indexes_gpu);
        }
    }
    activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
Ejemplo n.º 8
0
void forward_rnn_layer_gpu(layer l, network_state state)
{
    network_state s = {0};
    s.train = state.train;
    int i;
    layer input_layer = *(l.input_layer);
    layer self_layer = *(l.self_layer);
    layer output_layer = *(l.output_layer);

    fill_ongpu(l.outputs * l.batch * l.steps, 0, output_layer.delta_gpu, 1);
    fill_ongpu(l.hidden * l.batch * l.steps, 0, self_layer.delta_gpu, 1);
    fill_ongpu(l.hidden * l.batch * l.steps, 0, input_layer.delta_gpu, 1);
    if(state.train) fill_ongpu(l.hidden * l.batch, 0, l.state_gpu, 1);

    for (i = 0; i < l.steps; ++i) {
        s.input = state.input;
        forward_connected_layer_gpu(input_layer, s);

        s.input = l.state_gpu;
        forward_connected_layer_gpu(self_layer, s);

        float *old_state = l.state_gpu;
        if(state.train) l.state_gpu += l.hidden*l.batch;
        if(l.shortcut){
            copy_ongpu(l.hidden * l.batch, old_state, 1, l.state_gpu, 1);
        }else{
            fill_ongpu(l.hidden * l.batch, 0, l.state_gpu, 1);
        }
        axpy_ongpu(l.hidden * l.batch, 1, input_layer.output_gpu, 1, l.state_gpu, 1);
        axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);

        s.input = l.state_gpu;
        forward_connected_layer_gpu(output_layer, s);

        state.input += l.inputs*l.batch;
        increment_layer(&input_layer, 1);
        increment_layer(&self_layer, 1);
        increment_layer(&output_layer, 1);
    }
}
Ejemplo n.º 9
0
void forward_cost_layer_gpu(cost_layer l, network_state state)
{
    if (!state.truth) return;
    if (l.cost_type == MASKED) {
        mask_ongpu(l.batch*l.inputs, state.input, state.truth);
    }
    
    copy_ongpu(l.batch*l.inputs, state.truth, 1, l.delta_gpu, 1);
    axpy_ongpu(l.batch*l.inputs, -1, state.input, 1, l.delta_gpu, 1);

    cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
    *(l.output) = dot_cpu(l.batch*l.inputs, l.delta, 1, l.delta, 1);
}
Ejemplo n.º 10
0
void backward_crnn_layer_gpu(layer_t l, network_state state)
{
    NETWORK_STATE(s);
    s.train = state.train;
    int i;
    layer_t input_layer = *(l.input_layer);
    layer_t self_layer = *(l.self_layer);
    layer_t output_layer = *(l.output_layer);
    increment_layer(&input_layer,  l.steps - 1);
    increment_layer(&self_layer,   l.steps - 1);
    increment_layer(&output_layer, l.steps - 1);
    l.state_gpu += l.hidden*l.batch*l.steps;
    for (i = l.steps-1; i >= 0; --i) {
        copy_ongpu(l.hidden * l.batch, input_layer.output_gpu, 1, l.state_gpu, 1);
        axpy_ongpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);

        s.input = l.state_gpu;
        s.delta = self_layer.delta_gpu;
        backward_convolutional_layer_gpu(output_layer, s);

        l.state_gpu -= l.hidden*l.batch;

        s.input = l.state_gpu;
        s.delta = self_layer.delta_gpu - l.hidden*l.batch;
        if (i == 0) s.delta = NULL;
        backward_convolutional_layer_gpu(self_layer, s);

        copy_ongpu(l.hidden*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1);
        if (i > 0 && l.shortcut) axpy_ongpu(l.hidden*l.batch, 1, self_layer.delta_gpu, 1, self_layer.delta_gpu - l.hidden*l.batch, 1);
        s.input = state.input + i*l.inputs*l.batch;
        if(state.delta) s.delta = state.delta + i*l.inputs*l.batch;
        else s.delta = NULL;
        backward_convolutional_layer_gpu(input_layer, s);

        increment_layer(&input_layer,  -1);
        increment_layer(&self_layer,   -1);
        increment_layer(&output_layer, -1);
    }
}
Ejemplo n.º 11
0
void backward_compact_layer_gpu(const layer l, network_state state)
{
    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
    int i, b;
    for (b=0;b<l.batch;b++)
    {
        if (l.method==0) // add
        {
            for (i=0;i<l.index;i++)
            {
                axpy_ongpu(l.outputs, 1, l.delta_gpu+b*l.outputs, 1, state.delta+b*l.inputs+i*l.outputs, 1);
            }
        } else if (l.method==1) // sub
        {
            axpy_ongpu(l.outputs, 1, l.delta_gpu+b*l.outputs, 1, state.delta+b*l.inputs, 1);
            for (i=1;i<l.index;i++)
            {
                axpy_ongpu(l.outputs, -1, l.delta_gpu+b*l.outputs, 1, state.delta+b*l.inputs+i*l.outputs, 1);
            }
        } else if (l.method==2) // max
        {
            compact_backward_max_gpu(l.w, l.h, l.c, l.index,
                                     l.delta_gpu+b*l.outputs,
                                     state.delta+b*l.inputs,
                                     l.indexes_gpu);
        } else if (l.method==10)
        {
            compact_backward_padd_gpu(l.w, l.h, l.c,
                                      l.delta_gpu+b*l.outputs,
                                      state.delta+b*l.inputs);
        } else if (l.method==12)
        {
            compact_backward_pmax_gpu(l.w, l.h, l.c,
                                      l.delta_gpu+b*l.outputs,
                                      state.delta+b*l.inputs,
                                      l.indexes_gpu);
        }
    }
}
Ejemplo n.º 12
0
void backward_route_layer_gpu(const route_layer l, network_state state)
{
    int i, j;
    int offset = 0;
    for(i = 0; i < l.n; ++i){
        int index = l.input_layers[i];
        float *delta = state.net.layers[index].delta_gpu;
        int input_size = l.input_sizes[i];
        for(j = 0; j < l.batch; ++j){
            axpy_ongpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1);
        }
        offset += input_size;
    }
}
Ejemplo n.º 13
0
void forward_cost_layer_gpu(cost_layer l, network_state state)
{
    if (!state.truth) return;
    if (l.cost_type == MASKED) {
        mask_ongpu(l.batch*l.inputs, state.input, SECRET_NUM, state.truth);
    }

    if(l.cost_type == SMOOTH){
        smooth_l1_gpu(l.batch*l.inputs, state.input, state.truth, l.delta_gpu);
    } else {
        copy_ongpu(l.batch*l.inputs, state.truth, 1, l.delta_gpu, 1);
        axpy_ongpu(l.batch*l.inputs, -1, state.input, 1, l.delta_gpu, 1);
    }

    cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
    *(l.output) = dot_cpu(l.batch*l.inputs, l.delta, 1, l.delta, 1);
}
Ejemplo n.º 14
0
void backward_local_layer_gpu(local_layer l, network_state state)
{
	int i, j;
	int locations = l.out_w*l.out_h;

	gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu, state.st_handle.stream);
	for(i = 0; i < l.batch; ++i) {
		axpy_ongpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1, state.st_handle.stream);
	}

	for(i = 0; i < l.batch; ++i) {
		float *input = state.input + i*l.w*l.h*l.c;
		im2col_ongpu(input, l.c, l.h, l.w,
				l.size, l.stride, l.pad, l.col_image_gpu, state.st_handle.stream);

		for(j = 0; j < locations; ++j) {
			float *a = l.delta_gpu + i*l.outputs + j;
			float *b = l.col_image_gpu + j;
			float *c = l.weight_updates_gpu + j*l.size*l.size*l.c*l.n;
			int m = l.n;
			int n = l.size*l.size*l.c;
			int k = 1;
			//printf("passou no backward_local_layer_gpu first call\n");
			gemm_ongpu(0,1,m,n,k,1,a,locations,b,locations,1,c,n, state.st_handle);
		}

		if(state.delta) {
			for(j = 0; j < locations; ++j) {
				float *a = l.weights_gpu + j*l.size*l.size*l.c*l.n;
				float *b = l.delta_gpu + i*l.outputs + j;
				float *c = l.col_image_gpu + j;

				int m = l.size*l.size*l.c;
				int n = 1;
				int k = l.n;
				//printf("passou no backward_local_layer_gpu second call\n");
				gemm_ongpu(1,0,m,n,k,1,a,m,b,locations,0,c,locations, state.st_handle);
			}

			col2im_ongpu(l.col_image_gpu, l.c, l.h, l.w, l.size, l.stride, l.pad, state.delta+i*l.c*l.h*l.w, state.st_handle.stream);
		}
	}
}
Ejemplo n.º 15
0
void backward_local_layer_gpu(local_layer l, network_state state)
{
    int i, j;
    int locations = l.out_w*l.out_h;

    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
    for(i = 0; i < l.batch; ++i){
        axpy_ongpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1);
    }

    for(i = 0; i < l.batch; ++i){
        float *input = state.input + i*l.w*l.h*l.c;
        im2col_ongpu(input, l.c, l.h, l.w, 
                l.size, l.stride, l.pad, l.col_image_gpu);

        for(j = 0; j < locations; ++j){ 
            float *a = l.delta_gpu + i*l.outputs + j;
            float *b = l.col_image_gpu + j;
            float *c = l.filter_updates_gpu + j*l.size*l.size*l.c*l.n;
            int m = l.n;
            int n = l.size*l.size*l.c;
            int k = 1;

            gemm_ongpu(0,1,m,n,k,1,a,locations,b,locations,1,c,n);
        }

        if(state.delta){
            for(j = 0; j < locations; ++j){ 
                float *a = l.filters_gpu + j*l.size*l.size*l.c*l.n;
                float *b = l.delta_gpu + i*l.outputs + j;
                float *c = l.col_image_gpu + j;

                int m = l.size*l.size*l.c;
                int n = 1;
                int k = l.n;

                gemm_ongpu(1,0,m,n,k,1,a,m,b,locations,0,c,locations);
            }

            col2im_ongpu(l.col_image_gpu, l.c,  l.h,  l.w,  l.size,  l.stride, l.pad, state.delta+i*l.c*l.h*l.w);
        }
    }
}
Ejemplo n.º 16
0
void forward_connected_layer_gpu(connected_layer l, network_state state)
{
    int i;
    fill_ongpu(l.outputs*l.batch, 0, l.output_gpu, 1);

    int m = l.batch;
    int k = l.inputs;
    int n = l.outputs;
    float * a = state.input;
    float * b = l.weights_gpu;
    float * c = l.output_gpu;
    gemm_ongpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
    if(l.batch_normalize){
        forward_batchnorm_layer_gpu(l, state);
    }
    for(i = 0; i < l.batch; ++i){
        axpy_ongpu(l.outputs, 1, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1);
    }
    activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
Ejemplo n.º 17
0
void backward_rnn_layer_gpu(layer l, network_state state)
{
    network_state s = {0};
    s.train = state.train;
    int i;
    layer input_layer = *(l.input_layer);
    layer self_layer = *(l.self_layer);
    layer output_layer = *(l.output_layer);
    increment_layer(&input_layer,  l.steps - 1);
    increment_layer(&self_layer,   l.steps - 1);
    increment_layer(&output_layer, l.steps - 1);
    l.state_gpu += l.hidden*l.batch*l.steps;
    for (i = l.steps-1; i >= 0; --i) {

        s.input = l.state_gpu;
        s.delta = self_layer.delta_gpu;
        backward_connected_layer_gpu(output_layer, s);

        l.state_gpu -= l.hidden*l.batch;

        copy_ongpu(l.hidden*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1);

        s.input = l.state_gpu;
        s.delta = self_layer.delta_gpu - l.hidden*l.batch;
        if (i == 0) s.delta = 0;
        backward_connected_layer_gpu(self_layer, s);

        //copy_ongpu(l.hidden*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1);
        if (i > 0 && l.shortcut) axpy_ongpu(l.hidden*l.batch, 1, self_layer.delta_gpu, 1, self_layer.delta_gpu - l.hidden*l.batch, 1);
        s.input = state.input + i*l.inputs*l.batch;
        if(state.delta) s.delta = state.delta + i*l.inputs*l.batch;
        else s.delta = 0;
        backward_connected_layer_gpu(input_layer, s);

        increment_layer(&input_layer,  -1);
        increment_layer(&self_layer,   -1);
        increment_layer(&output_layer, -1);
    }
}
Ejemplo n.º 18
0
void backward_softmax_layer_gpu(const softmax_layer layer, network_state state)
{
    axpy_ongpu(layer.batch*layer.inputs, 1, layer.delta_gpu, 1, state.delta, 1);
}
Ejemplo n.º 19
0
Archivo: lsd.c Proyecto: vaiv/OpenANPR
void train_lsd3(char *fcfg, char *fweight, char *gcfg, char *gweight, char *acfg, char *aweight, int clear)
{
#ifdef GPU
    //char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
    char *train_images = "/home/pjreddie/data/imagenet/imagenet1k.train.list";
    //char *style_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
    char *style_images = "/home/pjreddie/zelda.txt";
    char *backup_directory = "/home/pjreddie/backup/";
    srand(time(0));
    network fnet = load_network(fcfg, fweight, clear);
    network gnet = load_network(gcfg, gweight, clear);
    network anet = load_network(acfg, aweight, clear);
    char *gbase = basecfg(gcfg);
    char *abase = basecfg(acfg);

    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", gnet.learning_rate, gnet.momentum, gnet.decay);
    int imgs = gnet.batch*gnet.subdivisions;
    int i = *gnet.seen/imgs;
    data train, tbuffer;
    data style, sbuffer;


    list *slist = get_paths(style_images);
    char **spaths = (char **)list_to_array(slist);

    list *tlist = get_paths(train_images);
    char **tpaths = (char **)list_to_array(tlist);

    load_args targs= get_base_args(gnet);
    targs.paths = tpaths;
    targs.n = imgs;
    targs.m = tlist->size;
    targs.d = &tbuffer;
    targs.type = CLASSIFICATION_DATA;
    targs.classes = 1;
    char *ls[1] = {"zelda"};
    targs.labels = ls;

    load_args sargs = get_base_args(gnet);
    sargs.paths = spaths;
    sargs.n = imgs;
    sargs.m = slist->size;
    sargs.d = &sbuffer;
    sargs.type = CLASSIFICATION_DATA;
    sargs.classes = 1;
    sargs.labels = ls;

    pthread_t tload_thread = load_data_in_thread(targs);
    pthread_t sload_thread = load_data_in_thread(sargs);
    clock_t time;

    float aloss_avg = -1;
    float floss_avg = -1;

    network_state fstate = {};
    fstate.index = 0;
    fstate.net = fnet;
    int x_size = get_network_input_size(fnet)*fnet.batch;
    int y_size = get_network_output_size(fnet)*fnet.batch;
    fstate.input = cuda_make_array(0, x_size);
    fstate.truth = cuda_make_array(0, y_size);
    fstate.delta = cuda_make_array(0, x_size);
    fstate.train = 1;
    float *X = (float*)calloc(x_size, sizeof(float));
    float *y = (float*)calloc(y_size, sizeof(float));

    float *ones = cuda_make_array(0, anet.batch);
    float *zeros = cuda_make_array(0, anet.batch);
    fill_ongpu(anet.batch, .99, ones, 1);
    fill_ongpu(anet.batch, .01, zeros, 1);

    network_state astate = {};
    astate.index = 0;
    astate.net = anet;
    int ax_size = get_network_input_size(anet)*anet.batch;
    int ay_size = get_network_output_size(anet)*anet.batch;
    astate.input = 0;
    astate.truth = ones;
    astate.delta = cuda_make_array(0, ax_size);
    astate.train = 1;

    network_state gstate = {};
    gstate.index = 0;
    gstate.net = gnet;
    int gx_size = get_network_input_size(gnet)*gnet.batch;
    int gy_size = get_network_output_size(gnet)*gnet.batch;
    gstate.input = cuda_make_array(0, gx_size);
    gstate.truth = 0;
    gstate.delta = 0;
    gstate.train = 1;

    while (get_current_batch(gnet) < gnet.max_batches) {
        i += 1;
        time=clock();
        pthread_join(tload_thread, 0);
        pthread_join(sload_thread, 0);
        train = tbuffer;
        style = sbuffer;
        tload_thread = load_data_in_thread(targs);
        sload_thread = load_data_in_thread(sargs);

        printf("Loaded: %lf seconds\n", sec(clock()-time));

        data generated = copy_data(train);
        time=clock();

        int j, k;
        float floss = 0;
        for(j = 0; j < fnet.subdivisions; ++j){
            layer imlayer = gnet.layers[gnet.n - 1];
            get_next_batch(train, fnet.batch, j*fnet.batch, X, y);

            cuda_push_array(fstate.input, X, x_size);
            cuda_push_array(gstate.input, X, gx_size);
            *gnet.seen += gnet.batch;

            forward_network_gpu(fnet, fstate);
            float *feats = fnet.layers[fnet.n - 2].output_gpu;
            copy_ongpu(y_size, feats, 1, fstate.truth, 1);

            forward_network_gpu(gnet, gstate);
            float *gen = gnet.layers[gnet.n-1].output_gpu;
            copy_ongpu(x_size, gen, 1, fstate.input, 1);

            fill_ongpu(x_size, 0, fstate.delta, 1);
            forward_network_gpu(fnet, fstate);
            backward_network_gpu(fnet, fstate);
            //HERE

            astate.input = gen;
            fill_ongpu(ax_size, 0, astate.delta, 1);
            forward_network_gpu(anet, astate);
            backward_network_gpu(anet, astate);

            float *delta = imlayer.delta_gpu;
            fill_ongpu(x_size, 0, delta, 1);
            scal_ongpu(x_size, 100, astate.delta, 1);
            scal_ongpu(x_size, .00001, fstate.delta, 1);
            axpy_ongpu(x_size, 1, fstate.delta, 1, delta, 1);
            axpy_ongpu(x_size, 1, astate.delta, 1, delta, 1);

            //fill_ongpu(x_size, 0, delta, 1);
            //cuda_push_array(delta, X, x_size);
            //axpy_ongpu(x_size, -1, imlayer.output_gpu, 1, delta, 1);
            //printf("pix error: %f\n", cuda_mag_array(delta, x_size));
            printf("fea error: %f\n", cuda_mag_array(fstate.delta, x_size));
            printf("adv error: %f\n", cuda_mag_array(astate.delta, x_size));
            //axpy_ongpu(x_size, 1, astate.delta, 1, delta, 1);

            backward_network_gpu(gnet, gstate);

            floss += get_network_cost(fnet) /(fnet.subdivisions*fnet.batch);

            cuda_pull_array(imlayer.output_gpu, imlayer.output, x_size);
            for(k = 0; k < gnet.batch; ++k){
                int index = j*gnet.batch + k;
                copy_cpu(imlayer.outputs, imlayer.output + k*imlayer.outputs, 1, generated.X.vals[index], 1);
                generated.y.vals[index][0] = .01;
            }
        }

/*
        image sim = float_to_image(anet.w, anet.h, anet.c, style.X.vals[j]);
        show_image(sim, "style");
        cvWaitKey(0);
        */

        harmless_update_network_gpu(anet);

        data merge = concat_data(style, generated);
        randomize_data(merge);
        float aloss = train_network(anet, merge);

        update_network_gpu(gnet);

        free_data(merge);
        free_data(train);
        free_data(generated);
        free_data(style);
        if (aloss_avg < 0) aloss_avg = aloss;
        if (floss_avg < 0) floss_avg = floss;
        aloss_avg = aloss_avg*.9 + aloss*.1;
        floss_avg = floss_avg*.9 + floss*.1;

        printf("%d: gen: %f, adv: %f | gen_avg: %f, adv_avg: %f, %f rate, %lf seconds, %d images\n", i, floss, aloss, floss_avg, aloss_avg, get_current_rate(gnet), sec(clock()-time), i*imgs);
        if(i%1000==0){
            char buff[256];
            sprintf(buff, "%s/%s_%d.weights", backup_directory, gbase, i);
            save_weights(gnet, buff);
            sprintf(buff, "%s/%s_%d.weights", backup_directory, abase, i);
            save_weights(anet, buff);
        }
        if(i%100==0){
            char buff[256];
            sprintf(buff, "%s/%s.backup", backup_directory, gbase);
            save_weights(gnet, buff);
            sprintf(buff, "%s/%s.backup", backup_directory, abase);
            save_weights(anet, buff);
        }
    }
#endif
}
Ejemplo n.º 20
0
Archivo: lsd.c Proyecto: vaiv/OpenANPR
void train_lsd2(char *cfgfile, char *weightfile, char *acfgfile, char *aweightfile, int clear)
{
#ifdef GPU
    char *train_images = "/home/pjreddie/data/coco/trainvalno5k.txt";
    char *backup_directory = "/home/pjreddie/backup/";
    srand(time(0));
    char *base = basecfg(cfgfile);
    printf("%s\n", base);
    network net = parse_network_cfg(cfgfile);
    if(weightfile){
        load_weights(&net, weightfile);
    }
    if(clear) *net.seen = 0;

    char *abase = basecfg(acfgfile);
    network anet = parse_network_cfg(acfgfile);
    if(aweightfile){
        load_weights(&anet, aweightfile);
    }
    if(clear) *anet.seen = 0;

    int i, j, k;
    layer imlayer = {};
    for (i = 0; i < net.n; ++i) {
        if (net.layers[i].out_c == 3) {
            imlayer = net.layers[i];
            break;
        }
    }

    printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net.learning_rate, net.momentum, net.decay);
    int imgs = net.batch*net.subdivisions;
    i = *net.seen/imgs;
    data train, buffer;


    list *plist = get_paths(train_images);
    //int N = plist->size;
    char **paths = (char **)list_to_array(plist);

    load_args args = {};
    args.w = net.w;
    args.h = net.h;
    args.paths = paths;
    args.n = imgs;
    args.m = plist->size;
    args.d = &buffer;

    args.min = net.min_crop;
    args.max = net.max_crop;
    args.angle = net.angle;
    args.aspect = net.aspect;
    args.exposure = net.exposure;
    args.saturation = net.saturation;
    args.hue = net.hue;
    args.size = net.w;
    args.type = CLASSIFICATION_DATA;
    args.classes = 1;
    char *ls[1] = {"coco"};
    args.labels = ls;

    pthread_t load_thread = load_data_in_thread(args);
    clock_t time;

    network_state gstate = {};
    gstate.index = 0;
    gstate.net = net;
    int x_size = get_network_input_size(net)*net.batch;
    int y_size = 1*net.batch;
    gstate.input = cuda_make_array(0, x_size);
    gstate.truth = 0;
    gstate.delta = 0;
    gstate.train = 1;
    float *X = (float*)calloc(x_size, sizeof(float));
    float *y = (float*)calloc(y_size, sizeof(float));

    network_state astate = {};
    astate.index = 0;
    astate.net = anet;
    int ay_size = get_network_output_size(anet)*anet.batch;
    astate.input = 0;
    astate.truth = 0;
    astate.delta = 0;
    astate.train = 1;

    float *imerror = cuda_make_array(0, imlayer.outputs);
    float *ones_gpu = cuda_make_array(0, ay_size);
    fill_ongpu(ay_size, 1, ones_gpu, 1);

    float aloss_avg = -1;
    float gloss_avg = -1;

    //data generated = copy_data(train);

    while (get_current_batch(net) < net.max_batches) {
        i += 1;
        time=clock();
        pthread_join(load_thread, 0);
        train = buffer;
        load_thread = load_data_in_thread(args);

        printf("Loaded: %lf seconds\n", sec(clock()-time));

        data generated = copy_data(train);
        time=clock();
        float gloss = 0;

        for(j = 0; j < net.subdivisions; ++j){
            get_next_batch(train, net.batch, j*net.batch, X, y);
            cuda_push_array(gstate.input, X, x_size);
            *net.seen += net.batch;
            forward_network_gpu(net, gstate);

            fill_ongpu(imlayer.outputs, 0, imerror, 1);
            astate.input = imlayer.output_gpu;
            astate.delta = imerror;
            astate.truth = ones_gpu;
            forward_network_gpu(anet, astate);
            backward_network_gpu(anet, astate);

            scal_ongpu(imlayer.outputs, 1, imerror, 1);
            axpy_ongpu(imlayer.outputs, 1, imerror, 1, imlayer.delta_gpu, 1);

            backward_network_gpu(net, gstate);

            printf("features %f\n", cuda_mag_array(imlayer.delta_gpu, imlayer.outputs));
            printf("realness %f\n", cuda_mag_array(imerror, imlayer.outputs));

            gloss += get_network_cost(net) /(net.subdivisions*net.batch);

            cuda_pull_array(imlayer.output_gpu, imlayer.output, x_size);
            for(k = 0; k < net.batch; ++k){
                int index = j*net.batch + k;
                copy_cpu(imlayer.outputs, imlayer.output + k*imlayer.outputs, 1, generated.X.vals[index], 1);
                generated.y.vals[index][0] = 0;
            }
        }
        harmless_update_network_gpu(anet);

        data merge = concat_data(train, generated);
        randomize_data(merge);
        float aloss = train_network(anet, merge);

        update_network_gpu(net);
        update_network_gpu(anet);
        free_data(merge);
        free_data(train);
        free_data(generated);
        if (aloss_avg < 0) aloss_avg = aloss;
        aloss_avg = aloss_avg*.9 + aloss*.1;
        gloss_avg = gloss_avg*.9 + gloss*.1;

        printf("%d: gen: %f, adv: %f | gen_avg: %f, adv_avg: %f, %f rate, %lf seconds, %d images\n", i, gloss, aloss, gloss_avg, aloss_avg, get_current_rate(net), sec(clock()-time), i*imgs);
        if(i%1000==0){
            char buff[256];
            sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
            save_weights(net, buff);
            sprintf(buff, "%s/%s_%d.weights", backup_directory, abase, i);
            save_weights(anet, buff);
        }
        if(i%100==0){
            char buff[256];
            sprintf(buff, "%s/%s.backup", backup_directory, base);
            save_weights(net, buff);
            sprintf(buff, "%s/%s.backup", backup_directory, abase);
            save_weights(anet, buff);
        }
    }
    char buff[256];
    sprintf(buff, "%s/%s_final.weights", backup_directory, base);
    save_weights(net, buff);
#endif
}
Ejemplo n.º 21
0
void backward_shortcut_layer_gpu(const layer l, network_state state)
{
    gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
    axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1);
    shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu);
}
Ejemplo n.º 22
0
void backward_detection_layer_gpu(detection_layer l, network_state state)
{
    axpy_ongpu(l.batch*l.inputs, 1, l.delta_gpu, 1, state.delta, 1);
    //copy_ongpu(l.batch*l.inputs, l.delta_gpu, 1, state.delta, 1);
}
Ejemplo n.º 23
0
void backward_cost_layer_gpu(const cost_layer l, network_state state)
{
    axpy_ongpu(l.batch*l.inputs, l.scale, l.delta_gpu, 1, state.delta, 1);
}