void backward_connected_layer(connected_layer l, network_state state) { int i; gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta); for(i = 0; i < l.batch; ++i){ axpy_cpu(l.outputs, 1, l.delta + i*l.outputs, 1, l.bias_updates, 1); } if(l.batch_normalize){ backward_scale_cpu(l.x_norm, l.delta, l.batch, l.outputs, 1, l.scale_updates); scale_bias(l.delta, l.scales, l.batch, l.outputs, 1); mean_delta_cpu(l.delta, l.variance, l.batch, l.outputs, 1, l.mean_delta); variance_delta_cpu(l.x, l.delta, l.mean, l.variance, l.batch, l.outputs, 1, l.variance_delta); normalize_delta_cpu(l.x, l.mean, l.variance, l.mean_delta, l.variance_delta, l.batch, l.outputs, 1, l.delta); } int m = l.outputs; int k = l.batch; int n = l.inputs; float *a = l.delta; float *b = state.input; float *c = l.weight_updates; gemm(1,0,m,n,k,1,a,m,b,n,1,c,n); m = l.batch; k = l.outputs; n = l.inputs; a = l.delta; b = l.weights; c = state.delta; if(c) gemm(0,0,m,n,k,1,a,k,b,n,1,c,n); }
void backward_batchnorm_layer(const layer l, network_state state) { backward_scale_cpu(l.x_norm, l.delta, l.batch, l.out_c, l.out_w*l.out_h, l.scale_updates); scale_bias(l.delta, l.scales, l.batch, l.out_c, l.out_h*l.out_w); mean_delta_cpu(l.delta, l.variance, l.batch, l.out_c, l.out_w*l.out_h, l.mean_delta); variance_delta_cpu(l.x, l.delta, l.mean, l.variance, l.batch, l.out_c, l.out_w*l.out_h, l.variance_delta); normalize_delta_cpu(l.x, l.mean, l.variance, l.mean_delta, l.variance_delta, l.batch, l.out_c, l.out_w*l.out_h, l.delta); if(l.type == BATCHNORM) copy_cpu(l.outputs*l.batch, l.delta, 1, state.delta, 1); }