std::vector<const_cuda_linear_buffer_device_smart_ptr> local_contrast_subtractive_layer_updater_schema::get_schema_buffers() const
{
    std::vector<const_cuda_linear_buffer_device_smart_ptr> res;

    std::tr1::shared_ptr<const local_contrast_subtractive_layer> layer_derived = std::tr1::dynamic_pointer_cast<const local_contrast_subtractive_layer>(layer_schema);

    res.push_back(
        cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
                &(*layer_derived->feature_maps_affected.begin()),
                layer_derived->feature_maps_affected.size() * sizeof(unsigned int)))
    );

    for(std::vector<std::vector<float> >::const_iterator it = layer_derived->window_weights_list.begin(); it != layer_derived->window_weights_list.end(); ++it)
    {
        const std::vector<float>& current_weights = *it;
        res.push_back(
            cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
                    &(*current_weights.begin()),
                    current_weights.size() * sizeof(float)))
        );
    }

    if (!layer_derived->feature_maps_unaffected.empty())
    {
        res.push_back(
            cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
                    &(*layer_derived->feature_maps_unaffected.begin()),
                    layer_derived->feature_maps_unaffected.size() * sizeof(unsigned int)))
        );
    }

    return res;
}
示例#2
0
		layer_hessian_cuda::buffer_set layer_hessian_cuda::allocate_all_buffers(unsigned int max_entry_count) const
		{
			buffer_set res;

			std::vector<size_t> sizes = get_sizes_of_additional_buffers_per_entry();
			for(std::vector<size_t>::const_iterator it = sizes.begin(); it != sizes.end(); ++it)
			{
				size_t sz = *it * max_entry_count;
				res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz)));
			}

			std::vector<size_t> fixed_sizes = get_sizes_of_additional_buffers_fixed();
			for(std::vector<size_t>::const_iterator it = fixed_sizes.begin(); it != fixed_sizes.end(); ++it)
			{
				res.additional_buffers.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it)));
			}

			{
				size_t sz = output_elem_count_per_entry * sizeof(float) * max_entry_count;
				res.output_neurons_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
			}

			if (backprop_required && !is_in_place_backprop())
			{
				size_t sz = input_elem_count_per_entry * sizeof(float) * max_entry_count;
				res.input_errors_buffer = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(sz));
			}

			fill_additional_buffers(res.additional_buffers);

			return res;
		}
std::vector<cuda_linear_buffer_device_smart_ptr> weight_vector_bound_cuda::allocate_additional_buffers(unsigned int max_entry_count)
{
    std::vector<cuda_linear_buffer_device_smart_ptr> res;

    std::vector<size_t> per_entry_sizes = get_sizes_of_additional_buffers_per_entry();
    for(std::vector<size_t>::const_iterator it = per_entry_sizes.begin(); it != per_entry_sizes.end(); ++it)
        res.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(*it * max_entry_count)));

    return res;
}
		std::vector<const_cuda_linear_buffer_device_smart_ptr> sigmoid_layer_updater_schema::get_schema_buffers() const
		{
			std::vector<const_cuda_linear_buffer_device_smart_ptr> res;

			nnforge_shared_ptr<const sigmoid_layer> layer_derived = nnforge_dynamic_pointer_cast<const sigmoid_layer>(layer_schema);
			if (!layer_derived->affected_feature_map_id_list.empty())
			{
				res.push_back(
					cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(
						&(layer_derived->affected_feature_map_id_list.front()),
						layer_derived->affected_feature_map_id_list.size() * sizeof(unsigned int)))
					);
			}

			return res;
		}