void forward_iseg_layer_gpu(const layer l, network net) {
	copy_gpu(l.batch * l.inputs, net.input_gpu, 1, l.output_gpu, 1, net.st);
	int b;
	for (b = 0; b < l.batch; ++b) {
		activate_array_gpu(l.output_gpu + b * l.outputs, l.classes * l.w * l.h,
				LOGISTIC, net.st);
		//if(l.extra) activate_array_gpu(l.output_gpu + b*l.outputs + l.classes*l.w*l.h, l.extra*l.w*l.h, LOGISTIC);
	}

	cuda_pull_array(l.output_gpu, net.input, l.batch * l.inputs);
	forward_iseg_layer(l, net);
	cuda_push_array(l.delta_gpu, l.delta, l.batch * l.outputs);
}
Esempio n. 2
0
void forward_shortcut_layer_gpu(const layer l, network net)
{
    copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1);
    shortcut_gpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
    activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_lstm_layer_gpu(layer l, network state) {
	network s = { 0 };
	s.train = state.train;
	int i;
	layer wf = *(l.wf);
	layer wi = *(l.wi);
	layer wg = *(l.wg);
	layer wo = *(l.wo);

	layer uf = *(l.uf);
	layer ui = *(l.ui);
	layer ug = *(l.ug);
	layer uo = *(l.uo);

	increment_layer(&wf, l.steps - 1);
	increment_layer(&wi, l.steps - 1);
	increment_layer(&wg, l.steps - 1);
	increment_layer(&wo, l.steps - 1);

	increment_layer(&uf, l.steps - 1);
	increment_layer(&ui, l.steps - 1);
	increment_layer(&ug, l.steps - 1);
	increment_layer(&uo, l.steps - 1);

	state.input_gpu += l.inputs * l.batch * (l.steps - 1);
	if (state.delta_gpu)
		state.delta_gpu += l.inputs * l.batch * (l.steps - 1);

	l.output_gpu += l.outputs * l.batch * (l.steps - 1);
	l.cell_gpu += l.outputs * l.batch * (l.steps - 1);
	l.delta_gpu += l.outputs * l.batch * (l.steps - 1);

	for (i = l.steps - 1; i >= 0; --i) {
		if (i != 0)
			copy_gpu(l.outputs * l.batch, l.cell_gpu - l.outputs * l.batch, 1,
					l.prev_cell_gpu, 1, state.st);
		copy_gpu(l.outputs * l.batch, l.cell_gpu, 1, l.c_gpu, 1, state.st);
		if (i != 0)
			copy_gpu(l.outputs * l.batch, l.output_gpu - l.outputs * l.batch, 1,
					l.prev_state_gpu, 1, state.st);
		copy_gpu(l.outputs * l.batch, l.output_gpu, 1, l.h_gpu, 1, state.st);

		l.dh_gpu = (i == 0) ? 0 : l.delta_gpu - l.outputs * l.batch;

		copy_gpu(l.outputs * l.batch, wf.output_gpu, 1, l.f_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, uf.output_gpu, 1, l.f_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, wi.output_gpu, 1, l.i_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, ui.output_gpu, 1, l.i_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, wg.output_gpu, 1, l.g_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, ug.output_gpu, 1, l.g_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, wo.output_gpu, 1, l.o_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, uo.output_gpu, 1, l.o_gpu, 1, state.st);

		activate_array_gpu(l.f_gpu, l.outputs * l.batch, LOGISTIC, state.st);
		activate_array_gpu(l.i_gpu, l.outputs * l.batch, LOGISTIC, state.st);
		activate_array_gpu(l.g_gpu, l.outputs * l.batch, TANH, state.st);
		activate_array_gpu(l.o_gpu, l.outputs * l.batch, LOGISTIC, state.st);

		copy_gpu(l.outputs * l.batch, l.delta_gpu, 1, l.temp3_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.temp_gpu, 1, state.st);
		activate_array_gpu(l.temp_gpu, l.outputs * l.batch, TANH, state.st);

		copy_gpu(l.outputs * l.batch, l.temp3_gpu, 1, l.temp2_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.o_gpu, 1, l.temp2_gpu, 1, state.st);

		gradient_array_gpu(l.temp_gpu, l.outputs * l.batch, TANH, l.temp2_gpu, state.st);
		axpy_gpu(l.outputs * l.batch, 1, l.dc_gpu, 1, l.temp2_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.temp_gpu, 1, state.st);
		activate_array_gpu(l.temp_gpu, l.outputs * l.batch, TANH, state.st);
		mul_gpu(l.outputs * l.batch, l.temp3_gpu, 1, l.temp_gpu, 1, state.st);
		gradient_array_gpu(l.o_gpu, l.outputs * l.batch, LOGISTIC, l.temp_gpu, state.st);
		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wo.delta_gpu, 1, state.st);
		s.input_gpu = l.prev_state_gpu;
		s.delta_gpu = l.dh_gpu;
		backward_connected_layer_gpu(wo, s);

		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, uo.delta_gpu, 1, state.st);
		s.input_gpu = state.input_gpu;
		s.delta_gpu = state.delta_gpu;
		backward_connected_layer_gpu(uo, s);

		copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.i_gpu, 1, l.temp_gpu, 1, state.st);
		gradient_array_gpu(l.g_gpu, l.outputs * l.batch, TANH, l.temp_gpu, state.st);
		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wg.delta_gpu, 1, state.st);
		s.input_gpu = l.prev_state_gpu;
		s.delta_gpu = l.dh_gpu;
		backward_connected_layer_gpu(wg, s);

		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, ug.delta_gpu, 1, state.st);
		s.input_gpu = state.input_gpu;
		s.delta_gpu = state.delta_gpu;
		backward_connected_layer_gpu(ug, s);

		copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.g_gpu, 1, l.temp_gpu, 1, state.st);
		gradient_array_gpu(l.i_gpu, l.outputs * l.batch, LOGISTIC, l.temp_gpu, state.st);
		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wi.delta_gpu, 1, state.st);
		s.input_gpu = l.prev_state_gpu;
		s.delta_gpu = l.dh_gpu;
		backward_connected_layer_gpu(wi, s);

		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, ui.delta_gpu, 1, state.st);
		s.input_gpu = state.input_gpu;
		s.delta_gpu = state.delta_gpu;
		backward_connected_layer_gpu(ui, s);

		copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.prev_cell_gpu, 1, l.temp_gpu, 1, state.st);
		gradient_array_gpu(l.f_gpu, l.outputs * l.batch, LOGISTIC, l.temp_gpu, state.st);
		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, wf.delta_gpu, 1, state.st);
		s.input_gpu = l.prev_state_gpu;
		s.delta_gpu = l.dh_gpu;
		backward_connected_layer_gpu(wf, s);

		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, uf.delta_gpu, 1, state.st);
		s.input_gpu = state.input_gpu;
		s.delta_gpu = state.delta_gpu;
		backward_connected_layer_gpu(uf, s);

		copy_gpu(l.outputs * l.batch, l.temp2_gpu, 1, l.temp_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.f_gpu, 1, l.temp_gpu, 1, state.st);
		copy_gpu(l.outputs * l.batch, l.temp_gpu, 1, l.dc_gpu, 1, state.st);

		state.input_gpu -= l.inputs * l.batch;
		if (state.delta_gpu)
			state.delta_gpu -= l.inputs * l.batch;
		l.output_gpu -= l.outputs * l.batch;
		l.cell_gpu -= l.outputs * l.batch;
		l.delta_gpu -= l.outputs * l.batch;

		increment_layer(&wf, -1);
		increment_layer(&wi, -1);
		increment_layer(&wg, -1);
		increment_layer(&wo, -1);

		increment_layer(&uf, -1);
		increment_layer(&ui, -1);
		increment_layer(&ug, -1);
		increment_layer(&uo, -1);
	}
}
void forward_lstm_layer_gpu(layer l, network state) {
	network s = { 0 };
	s.train = state.train;
	int i;
	layer wf = *(l.wf);
	layer wi = *(l.wi);
	layer wg = *(l.wg);
	layer wo = *(l.wo);

	layer uf = *(l.uf);
	layer ui = *(l.ui);
	layer ug = *(l.ug);
	layer uo = *(l.uo);

	fill_gpu(l.outputs * l.batch * l.steps, 0, wf.delta_gpu, 1, state.st);
	fill_gpu(l.outputs * l.batch * l.steps, 0, wi.delta_gpu, 1, state.st);
	fill_gpu(l.outputs * l.batch * l.steps, 0, wg.delta_gpu, 1, state.st);
	fill_gpu(l.outputs * l.batch * l.steps, 0, wo.delta_gpu, 1, state.st);

	fill_gpu(l.outputs * l.batch * l.steps, 0, uf.delta_gpu, 1, state.st);
	fill_gpu(l.outputs * l.batch * l.steps, 0, ui.delta_gpu, 1, state.st);
	fill_gpu(l.outputs * l.batch * l.steps, 0, ug.delta_gpu, 1, state.st);
	fill_gpu(l.outputs * l.batch * l.steps, 0, uo.delta_gpu, 1, state.st);
	if (state.train) {
		fill_gpu(l.outputs * l.batch * l.steps, 0, l.delta_gpu, 1, state.st);
	}

	for (i = 0; i < l.steps; ++i) {
		s.input_gpu = l.h_gpu;
		forward_connected_layer_gpu(wf, s);
		forward_connected_layer_gpu(wi, s);
		forward_connected_layer_gpu(wg, s);
		forward_connected_layer_gpu(wo, s);

		s.input_gpu = state.input_gpu;
		forward_connected_layer_gpu(uf, s);
		forward_connected_layer_gpu(ui, s);
		forward_connected_layer_gpu(ug, s);
		forward_connected_layer_gpu(uo, s);

		copy_gpu(l.outputs * l.batch, wf.output_gpu, 1, l.f_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, uf.output_gpu, 1, l.f_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, wi.output_gpu, 1, l.i_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, ui.output_gpu, 1, l.i_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, wg.output_gpu, 1, l.g_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, ug.output_gpu, 1, l.g_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, wo.output_gpu, 1, l.o_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, uo.output_gpu, 1, l.o_gpu, 1, state.st);

		activate_array_gpu(l.f_gpu, l.outputs * l.batch, LOGISTIC, state.st);
		activate_array_gpu(l.i_gpu, l.outputs * l.batch, LOGISTIC, state.st);
		activate_array_gpu(l.g_gpu, l.outputs * l.batch, TANH, state.st);
		activate_array_gpu(l.o_gpu, l.outputs * l.batch, LOGISTIC, state.st);

		copy_gpu(l.outputs * l.batch, l.i_gpu, 1, l.temp_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.g_gpu, 1, l.temp_gpu, 1, state.st);
		mul_gpu(l.outputs * l.batch, l.f_gpu, 1, l.c_gpu, 1, state.st);
		axpy_gpu(l.outputs * l.batch, 1, l.temp_gpu, 1, l.c_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.h_gpu, 1, state.st);
		activate_array_gpu(l.h_gpu, l.outputs * l.batch, TANH, state.st);
		mul_gpu(l.outputs * l.batch, l.o_gpu, 1, l.h_gpu, 1, state.st);

		copy_gpu(l.outputs * l.batch, l.c_gpu, 1, l.cell_gpu, 1, state.st);
		copy_gpu(l.outputs * l.batch, l.h_gpu, 1, l.output_gpu, 1, state.st);

		state.input_gpu += l.inputs * l.batch;
		l.output_gpu += l.outputs * l.batch;
		l.cell_gpu += l.outputs * l.batch;

		increment_layer(&wf, 1);
		increment_layer(&wi, 1);
		increment_layer(&wg, 1);
		increment_layer(&wo, 1);

		increment_layer(&uf, 1);
		increment_layer(&ui, 1);
		increment_layer(&ug, 1);
		increment_layer(&uo, 1);
	}
}
void forward_activation_layer_gpu(layer l, network net) {
	copy_gpu(l.outputs * l.batch, net.input_gpu, 1, l.output_gpu, 1, net.st);
	activate_array_gpu(l.output_gpu, l.outputs * l.batch, l.activation, net.st);
}