コード例 #1
0
	layer_configuration_specific maxout_layer::get_layer_configuration_specific(const layer_configuration_specific& input_configuration_specific) const
	{
		if ((input_configuration_specific.feature_map_count % feature_map_subsampling_size) != 0)
			throw neural_network_exception((boost::format("Feature map count in layer (%1%) is not evenly divisible by feature map subsampling count (%2%)") % input_configuration_specific.feature_map_count % feature_map_subsampling_size).str());

		return layer_configuration_specific(input_configuration_specific.feature_map_count / feature_map_subsampling_size, input_configuration_specific.dimension_sizes);
	}
コード例 #2
0
ファイル: cudnn_util.cpp プロジェクト: anshumang/nnForge
		void cudnn_util::set_tensor_bias_descriptor(
			cudnnTensorDescriptor_t tensor_desc,
			unsigned int feature_map_count,
			unsigned int dimension_count)
		{
			cudnn_util::set_tensor_descriptor(
				tensor_desc,
				layer_configuration_specific(feature_map_count, std::vector<unsigned int>(dimension_count, 1)),
				1);
		}
コード例 #3
0
	layer_configuration_specific rgb_to_yuv_convert_layer::get_layer_configuration_specific(const layer_configuration_specific& input_configuration_specific) const
	{
		for(std::vector<color_feature_map_config>::const_iterator it = color_feature_map_config_list.begin(); it != color_feature_map_config_list.end(); ++it)
		{
			if (it->red_and_y_feature_map_id >= input_configuration_specific.feature_map_count)
				throw neural_network_exception((boost::format("ID of feature map layer for red and Y (%1%) is greater or equal than feature map count of input configuration (%2%)") % it->red_and_y_feature_map_id % input_configuration_specific.feature_map_count).str());
			if (it->green_and_u_feature_map_id >= input_configuration_specific.feature_map_count)
				throw neural_network_exception((boost::format("ID of feature map layer for green and U (%1%) is greater or equal than feature map count of input configuration (%2%)") % it->green_and_u_feature_map_id % input_configuration_specific.feature_map_count).str());
			if (it->blue_and_v_feature_map_id >= input_configuration_specific.feature_map_count)
				throw neural_network_exception((boost::format("ID of feature map layer for blue and V (%1%) is greater or equal than feature map count of input configuration (%2%)") % it->blue_and_v_feature_map_id % input_configuration_specific.feature_map_count).str());
		}

		return layer_configuration_specific(input_configuration_specific);
	}
コード例 #4
0
ファイル: concat_layer.cpp プロジェクト: milakov/nnForge
	layer_configuration_specific concat_layer::get_output_layer_configuration_specific(const std::vector<layer_configuration_specific>& input_configuration_specific_list) const
	{
		unsigned int feature_map_count = input_configuration_specific_list[0].feature_map_count;
		unsigned int neuron_count_per_feature_map = input_configuration_specific_list[0].get_neuron_count_per_feature_map();
		for(std::vector<layer_configuration_specific>::const_iterator it = input_configuration_specific_list.begin() + 1; it != input_configuration_specific_list.end(); ++it)
		{
			feature_map_count += it->feature_map_count;
			unsigned int new_neuron_count_per_feature_map = it->get_neuron_count_per_feature_map();
			if (neuron_count_per_feature_map != new_neuron_count_per_feature_map)
				throw neural_network_exception("Neuron count per feature maps mismatch in 2 input layers for concat_layer");
		}

		return layer_configuration_specific(feature_map_count, input_configuration_specific_list[0].dimension_sizes);
	}
コード例 #5
0
	layer_configuration_specific linear_sampler_layer::get_output_layer_configuration_specific(const std::vector<layer_configuration_specific>& input_configuration_specific_list) const
	{
		if (input_configuration_specific_list.size() != 2)
			throw neural_network_exception((boost::format("linear_sampler_layer: %1% input layers specified, while 2 are expected") % input_configuration_specific_list.size()).str());

		if (input_configuration_specific_list[0].dimension_sizes.size() != 2)
			throw neural_network_exception((boost::format("linear_sampler_layer is able to run in 2D only, while the grid has %1% dimensions") % input_configuration_specific_list[0].dimension_sizes.size()).str());

		if (input_configuration_specific_list[0].dimension_sizes.size() != input_configuration_specific_list[0].feature_map_count)
			throw neural_network_exception((boost::format("linear_sampler_layer: dimensions count mismatch for the grid: %1% and %2%") % input_configuration_specific_list[0].dimension_sizes.size() % input_configuration_specific_list[0].feature_map_count).str());

		if (input_configuration_specific_list[1].dimension_sizes.size() != input_configuration_specific_list[0].dimension_sizes.size())
			throw neural_network_exception((boost::format("linear_sampler_layer: sampled image has %1% dimensions, while grid has %2%") % input_configuration_specific_list[1].dimension_sizes.size() % input_configuration_specific_list[0].dimension_sizes.size()).str());

		return layer_configuration_specific(input_configuration_specific_list[1].feature_map_count, input_configuration_specific_list[0].dimension_sizes);
	}
コード例 #6
0
	layer_configuration_specific negative_log_likelihood_layer::get_output_layer_configuration_specific(const std::vector<layer_configuration_specific>& input_configuration_specific_list) const
	{
		if (input_configuration_specific_list[0].feature_map_count != input_configuration_specific_list[1].feature_map_count)
			throw neural_network_exception((boost::format("Feature map counts in 2 input layers for negative_log_likelihood_layer don't match: %1% and %2%") % input_configuration_specific_list[0].feature_map_count % input_configuration_specific_list[1].feature_map_count).str());

		if (input_configuration_specific_list[0].get_neuron_count_per_feature_map() != input_configuration_specific_list[1].get_neuron_count_per_feature_map())
			throw neural_network_exception("Neuron count per feature maps mismatch in 2 input layers for negative_log_likelihood_layer");

		if (input_configuration_specific_list.size() > 2)
		{
			if (input_configuration_specific_list[2].feature_map_count != 1)
				throw neural_network_exception((boost::format("Feature map count for negative_log_likelihood_layer scaling should be equal to 1, while it is %1%") % input_configuration_specific_list[2].feature_map_count).str());

			if (input_configuration_specific_list[2].get_neuron_count_per_feature_map() != input_configuration_specific_list[0].get_neuron_count_per_feature_map())
				throw neural_network_exception((boost::format("Neuron count per feature map negative_log_likelihood_layer for scaling equals %1%, expected %2%") % input_configuration_specific_list[2].get_neuron_count_per_feature_map() % input_configuration_specific_list[0].get_neuron_count_per_feature_map()).str());
		}

		return layer_configuration_specific(1, input_configuration_specific_list[0].dimension_sizes);
	}
コード例 #7
0
	bool upsampling_layer::get_input_layer_configuration_specific(
		layer_configuration_specific& input_configuration_specific,
		const layer_configuration_specific& output_configuration_specific,
		unsigned int input_layer_id) const
	{
		if (output_configuration_specific.get_dimension_count() != upsampling_sizes.size())
			throw neural_network_exception((boost::format("Dimension count in layer (%1%) and output configuration (%2%) don't match") % upsampling_sizes.size() % output_configuration_specific.get_dimension_count()).str());

		if (output_configuration_specific.feature_map_count % feature_map_upsampling_size != 0)
			throw neural_network_exception((boost::format("Feature map count in output config (%1%) is not evenly divisible by feature map upsampling size (%2%)") % output_configuration_specific.feature_map_count % feature_map_upsampling_size).str());

		input_configuration_specific = layer_configuration_specific(output_configuration_specific.feature_map_count / feature_map_upsampling_size);

		for(unsigned int i = 0; i < upsampling_sizes.size(); ++i)
		{
			if (output_configuration_specific.feature_map_count % feature_map_upsampling_size != 0)
				throw neural_network_exception((boost::format("Dimension size in output config (%1%) is not evenly divisible by upsampling size (%2%)") % output_configuration_specific.dimension_sizes[i] % upsampling_sizes[i]).str());

			input_configuration_specific.dimension_sizes.push_back(output_configuration_specific.dimension_sizes[i] / upsampling_sizes[i]);
		}

		return true;
	}
コード例 #8
0
	bool max_subsampling_layer::get_input_layer_configuration_specific(
		layer_configuration_specific& input_configuration_specific,
		const layer_configuration_specific& output_configuration_specific,
		unsigned int input_layer_id) const
	{
		if (output_configuration_specific.get_dimension_count() != subsampling_sizes.size())
			throw neural_network_exception((boost::format("Dimension count in layer (%1%) and output configuration (%2%) don't match") % subsampling_sizes.size() % output_configuration_specific.get_dimension_count()).str());

		input_configuration_specific = layer_configuration_specific(output_configuration_specific.feature_map_count * feature_map_subsampling_size);

		if (tiling)
		{
			for(unsigned int i = 0; i < subsampling_sizes.size(); ++i)
				input_configuration_specific.dimension_sizes.push_back(output_configuration_specific.dimension_sizes[i] * subsampling_sizes[i] + (subsampling_sizes[i] - 1));
		}
		else
		{
			for(unsigned int i = 0; i < subsampling_sizes.size(); ++i)
				input_configuration_specific.dimension_sizes.push_back((output_configuration_specific.dimension_sizes[i] - 1) * strides[i] + subsampling_sizes[i]);
		}

		return true;
	}
コード例 #9
0
	layer_configuration_specific soft_rectified_linear_layer::get_layer_configuration_specific(const layer_configuration_specific& input_configuration_specific) const
	{
		return layer_configuration_specific(input_configuration_specific);
	}
コード例 #10
0
ファイル: layer.cpp プロジェクト: bluelzx/nnForge
	layer_configuration_specific layer::get_output_layer_configuration_specific(const layer_configuration_specific& input_configuration_specific) const
	{
		return layer_configuration_specific(input_configuration_specific);
	}
コード例 #11
0
	layer_configuration_specific absolute_layer::get_layer_configuration_specific(const layer_configuration_specific& input_configuration_specific) const
	{
		return layer_configuration_specific(input_configuration_specific);
	}