void convolution_layer_updater_cuda::enqueue_backprop( cudaStream_t stream_id, const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data, const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom, const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer, const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer, cuda_linear_buffer_device_smart_ptr output_errors_buffer, cuda_linear_buffer_device_smart_ptr input_errors_buffer, const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers, std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects, unsigned int entry_count, bool force_deterministic) { if (!backprop_required) throw neural_network_exception("convolution_layer_updater_cuda is not configured to do backprop but requested to"); cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id)); cudnn_safe_call(cudnnSetTensor4dDescriptor( input_data_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, entry_count, input_configuration_specific.feature_map_count, (input_configuration_specific.dimension_sizes.size() > 1) ? input_configuration_specific.dimension_sizes[1] : 1, input_configuration_specific.dimension_sizes[0])); cudnn_safe_call(cudnnSetTensor4dDescriptor( output_data_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, entry_count, output_configuration_specific.feature_map_count, (output_configuration_specific.dimension_sizes.size() > 1) ? output_configuration_specific.dimension_sizes[1] : 1, output_configuration_specific.dimension_sizes[0])); { float alpha = 1.0F; float beta = 0.0F; cudnn_safe_call(cudnnConvolutionBackwardData( cuda_config->get_cudnn_handle(), &alpha, weights_desc, *data[0], output_data_desc, *output_errors_buffer, convolution_desc, &beta, input_data_desc, *input_errors_buffer)); } }
void ConvBC01CuDNN<T>::bprop(const T* imgs, const T* filters, const T *convout_d, T *imgs_d, T *filters_d) { if (filters_d) { CUDNN_CHECK(cudnnConvolutionBackwardFilter( CUDNN::handle(), &CUDNN::one, imgs_desc, imgs, convout_desc, convout_d, conv_desc, &CUDNN::zero, filters_desc, filters_d )); } if (imgs_d) { CUDNN_CHECK(cudnnConvolutionBackwardData( CUDNN::handle(), &CUDNN::one, filters_desc, filters, convout_desc, convout_d, conv_desc, &CUDNN::zero, imgs_desc, imgs_d )); } }
void ConvBC01CuDNN<T>::bprop(const T* imgs, const T* filters, const T *convout_d, T *imgs_d, T *filters_d) { void *workspace = NULL; if (workspace_size > 0) { workspace = CUDA::buffer(workspace_size); } if (filters_d) { CUDNN_CHECK(cudnnConvolutionBackwardFilter( CUDNN::handle(), &CUDNN::one, imgs_desc, imgs, convout_desc, convout_d, conv_desc, bwd_filters_algo, workspace, workspace_size, &CUDNN::zero, filters_desc, filters_d )); } if (imgs_d) { CUDNN_CHECK(cudnnConvolutionBackwardData( CUDNN::handle(), &CUDNN::one, filters_desc, filters, convout_desc, convout_d, conv_desc, bwd_imgs_algo, workspace, workspace_size, &CUDNN::zero, imgs_desc, imgs_d )); } }
int APPLY_SPECIFIC(conv_gi)(CudaNdarray *kerns, CudaNdarray *output, CudaNdarray *im, cudnnConvolutionDescriptor_t desc, float alpha, float beta, CudaNdarray **input) { cudnnStatus_t err = CUDNN_STATUS_SUCCESS; if (CudaNdarray_HOST_DIMS(im)[1] != CudaNdarray_HOST_DIMS(kerns)[1]) { PyErr_SetString(PyExc_ValueError, "GpuDnnConv images and kernel must have the same stack size\n"); return 1; } if (c_set_tensorNd(output, APPLY_SPECIFIC(output)) == -1) return 1; if (c_set_filterNd(kerns, APPLY_SPECIFIC(kerns)) == -1) return 1; int nb_dim = CudaNdarray_NDIM(output); #ifdef CONV_INPLACE Py_XDECREF(*input); *input = im; Py_INCREF(*input); #else if (CudaNdarray_prep_output(input, nb_dim, CudaNdarray_HOST_DIMS(im)) != 0) return 1; if (beta != 0.0 && CudaNdarray_CopyFromCudaNdarray(*input, im)) return 1; #endif if (c_set_tensorNd(*input, APPLY_SPECIFIC(input)) == -1) return 1; #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 3000 { size_t worksize; void *workspace; cudnnConvolutionBwdDataAlgo_t chosen_algo; if (CHOOSE_ALGO) { // A new convolution implementation should be selected, based either on // timing or heuristics, if in one of the two following cases : // - The implementation should only be chosen during the first execution // of an apply node and this is the first execution of the apply node. // - The implementation should be chosen as often as necessary and the // shapes of the inputs differ from the last time an implementation // was chosen. bool reuse_previous_algo; if (CHOOSE_ALGO_ONCE) { // Only choose a new implementation of none has been chosen before. reuse_previous_algo = APPLY_SPECIFIC(previous_algo_set); } else { // Reuse the previous implementation if the the kernels and the outputs // have the same shapes as they had when the previous implementation // was selected bool same_shapes = true; for (int i = 0; (i < nb_dim) && same_shapes; i++) { same_shapes &= (CudaNdarray_HOST_DIMS(kerns)[i] == APPLY_SPECIFIC(previous_kerns_shape)[i]); same_shapes &= (CudaNdarray_HOST_DIMS(output)[i] == APPLY_SPECIFIC(previous_output_shape)[i]); } reuse_previous_algo = same_shapes; } // If the previously choosen implementation can't be reused, select a // new one based on the shapes of the current inputs if (!reuse_previous_algo) { // Obtain a convolution algorithm appropriate for the kernel and output // shapes. Either by choosing one according to heuristics or by making // CuDNN time every implementation and choose the best one. if (CHOOSE_ALGO_TIME) { // Time the different implementations to choose the best one int requestedCount = 1; int count; cudnnConvolutionBwdDataAlgoPerf_t choosen_algo_perf; err = cudnnFindConvolutionBackwardDataAlgorithm(_handle, APPLY_SPECIFIC(kerns), APPLY_SPECIFIC(output), desc, APPLY_SPECIFIC(input), requestedCount, &count, &choosen_algo_perf); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error selecting convolution algo: " "%s", cudnnGetErrorString(err)); return 1; } chosen_algo = choosen_algo_perf.algo; } else { // Choose the convolution implementation using heuristics based on the // shapes of the inputs and the amount of memory available. // Get the amount of available memory size_t free = 0, total = 0; cudaError_t err2 = cudaMemGetInfo(&free, &total); if (err2 != cudaSuccess){ cudaGetLastError(); fprintf(stderr, "Error when trying to find the memory information" " on the GPU: %s\n", cudaGetErrorString(err2)); return 1; } // Use heuristics to choose the implementation err = cudnnGetConvolutionBackwardDataAlgorithm(_handle, APPLY_SPECIFIC(kerns), APPLY_SPECIFIC(output), desc, APPLY_SPECIFIC(input), CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, free, &chosen_algo); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error selecting convolution algo: %s", cudnnGetErrorString(err)); return 1; } } // Store the shapes of the kernels and output as well as the chosen // algorithm for future use. APPLY_SPECIFIC(previous_bwd_d_algo) = chosen_algo; for (int i = 0; i < nb_dim; i++) { APPLY_SPECIFIC(previous_kerns_shape)[i] = CudaNdarray_HOST_DIMS(kerns)[i]; APPLY_SPECIFIC(previous_output_shape)[i] = CudaNdarray_HOST_DIMS(output)[i]; } } else { // Reuse the previously chosen convlution implementation chosen_algo = APPLY_SPECIFIC(previous_bwd_d_algo); } } else { chosen_algo = CONV_ALGO; } // The FFT implementation (only in v3 and onward) does not support strides, // 1x1 filters or inputs with a spatial dimension larger than 1024. // If the chosen implementation is FFT, validate that it can be used // on the current data and default on a safe implementation if it // can't. if (chosen_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT && nb_dim == 4) { // Extract the properties of the convolution descriptor int pad_h, pad_w, stride_v, stride_h, upscale_x, upscale_y; cudnnConvolutionMode_t mode; err = cudnnGetConvolution2dDescriptor(desc, &pad_h, &pad_w, &stride_v, &stride_h, &upscale_x, &upscale_y, &mode); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error getting convolution properties: %s", cudnnGetErrorString(err)); return 1; } // Extract the spatial size of the filters int filter_h = CudaNdarray_HOST_DIMS(kerns)[3]; int filter_w = CudaNdarray_HOST_DIMS(kerns)[4]; // Extract the spatial size of the input int input_h = CudaNdarray_HOST_DIMS(*input)[3]; int input_w = CudaNdarray_HOST_DIMS(*input)[4]; // Ensure that the selected implementation supports the requested // convolution. Fall back to a safe implementation otherwise. if (stride_v != 1 || stride_h != 1 || input_h > 1024 || input_w > 1024 || (filter_h == 1 && filter_w == 1)) { chosen_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0; } } // Infer required workspace size from the chosen implementation err = cudnnGetConvolutionBackwardDataWorkspaceSize(_handle, APPLY_SPECIFIC(kerns), APPLY_SPECIFIC(output), desc, APPLY_SPECIFIC(input), chosen_algo, &worksize); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error getting worksize: %s", cudnnGetErrorString(err)); return 1; } // Allocate workspace for the convolution workspace = get_work_mem(worksize); if (workspace == NULL && worksize != 0) return 1; // Perform the convolution err = cudnnConvolutionBackwardData_v3( _handle, (void *)&alpha, APPLY_SPECIFIC(kerns), CudaNdarray_DEV_DATA(kerns), APPLY_SPECIFIC(output), CudaNdarray_DEV_DATA(output), desc, chosen_algo, workspace, worksize, (void *)&beta, APPLY_SPECIFIC(input), CudaNdarray_DEV_DATA(*input)); } #else err = cudnnConvolutionBackwardData( _handle, (void *)&alpha, APPLY_SPECIFIC(kerns), CudaNdarray_DEV_DATA(kerns), APPLY_SPECIFIC(output), CudaNdarray_DEV_DATA(output), desc, (void *)&beta, APPLY_SPECIFIC(input), CudaNdarray_DEV_DATA(*input)); #endif if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error doing operation: %s", cudnnGetErrorString(err)); return 1; } return 0; }
int APPLY_SPECIFIC(conv_gi)(CudaNdarray *kerns, CudaNdarray *output, CudaNdarray *im, cudnnConvolutionDescriptor_t desc, float alpha, float beta, CudaNdarray **input) { cudnnStatus_t err = CUDNN_STATUS_SUCCESS; if (CudaNdarray_HOST_DIMS(im)[1] != CudaNdarray_HOST_DIMS(kerns)[1]) { PyErr_SetString(PyExc_ValueError, "GpuDnnConv images and kernel must have the same stack size\n"); return 1; } int nb_dim = CudaNdarray_NDIM(output); #ifdef CONV_INPLACE Py_XDECREF(*input); *input = im; Py_INCREF(*input); #else if (CudaNdarray_prep_output(input, nb_dim, CudaNdarray_HOST_DIMS(im)) != 0) return 1; if (beta != 0.0 && CudaNdarray_CopyFromCudaNdarray(*input, im)) return 1; #endif if (CudaNdarray_DIMS(im)[0] == 0 || CudaNdarray_DIMS(kerns)[0] == 0 || CudaNdarray_DIMS(kerns)[1] == 0) { cudaError_t err2 = cudaMemset((*input)->devdata, 0, CudaNdarray_SIZE(*input) * sizeof(real)); if (err2 != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConv grad wrt. inputs could not fill the output with zeros: %s", cudaGetErrorString(err2)); return 1; } return 0; } if (c_set_tensorNd(output, APPLY_SPECIFIC(output)) == -1) return 1; if (c_set_filterNd(kerns, APPLY_SPECIFIC(kerns)) == -1) return 1; if (c_set_tensorNd(*input, APPLY_SPECIFIC(input)) == -1) return 1; int expected_output_dims[5] = {0}; err = cudnnGetConvolutionNdForwardOutputDim(desc, APPLY_SPECIFIC(input), APPLY_SPECIFIC(kerns), nb_dim, expected_output_dims); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "error computing convolution output dim: %s", cudnnGetErrorString(err)); return 1; } if (nb_dim == 4) { if ((CudaNdarray_HOST_DIMS(output)[0] != expected_output_dims[0]) || (CudaNdarray_HOST_DIMS(output)[1] != expected_output_dims[1]) || (CudaNdarray_HOST_DIMS(output)[2] != expected_output_dims[2]) || (CudaNdarray_HOST_DIMS(output)[3] != expected_output_dims[3])) { PyErr_Format(PyExc_ValueError, "impossible convolution output dim: expected %ldx%ldx%ldx%ld" " but received gradient with shape %ldx%ldx%ldx%ld", (long int)expected_output_dims[0], (long int)expected_output_dims[1], (long int)expected_output_dims[2], (long int)expected_output_dims[3], (long int)CudaNdarray_HOST_DIMS(output)[0], (long int)CudaNdarray_HOST_DIMS(output)[1], (long int)CudaNdarray_HOST_DIMS(output)[2], (long int)CudaNdarray_HOST_DIMS(output)[3]); return 1; } } else if (nb_dim == 5) { if ((CudaNdarray_HOST_DIMS(output)[0] != expected_output_dims[0]) || (CudaNdarray_HOST_DIMS(output)[1] != expected_output_dims[1]) || (CudaNdarray_HOST_DIMS(output)[2] != expected_output_dims[2]) || (CudaNdarray_HOST_DIMS(output)[3] != expected_output_dims[3]) || (CudaNdarray_HOST_DIMS(output)[4] != expected_output_dims[4])) { PyErr_Format(PyExc_ValueError, "impossible convolution output dim: expected %ldx%ldx%ldx%ldx%ld" " but received gradient with shape %ldx%ldx%ldx%ldx%ld", (long int)expected_output_dims[0], (long int)expected_output_dims[1], (long int)expected_output_dims[2], (long int)expected_output_dims[3], (long int)expected_output_dims[4], (long int)CudaNdarray_HOST_DIMS(output)[0], (long int)CudaNdarray_HOST_DIMS(output)[1], (long int)CudaNdarray_HOST_DIMS(output)[2], (long int)CudaNdarray_HOST_DIMS(output)[3], (long int)CudaNdarray_HOST_DIMS(output)[4]); return 1; } } { size_t worksize; void *workspace; cudnnConvolutionBwdDataAlgo_t chosen_algo; if (CHOOSE_ALGO) { // A new convolution implementation should be selected, based either on // timing or heuristics, if in one of the two following cases : // - The implementation should only be chosen during the first execution // of an apply node and this is the first execution of the apply node. // - The implementation should be chosen as often as necessary and the // shapes of the inputs differ from the last time an implementation // was chosen. bool reuse_previous_algo; if (CHOOSE_ALGO_ONCE) { // Only choose a new implementation of none has been chosen before. reuse_previous_algo = APPLY_SPECIFIC(previous_algo_set); } else { // Reuse the previous implementation if the the kernels and the outputs // have the same shapes as they had when the previous implementation // was selected bool same_shapes = true; for (int i = 0; (i < nb_dim) && same_shapes; i++) { same_shapes &= (CudaNdarray_HOST_DIMS(kerns)[i] == APPLY_SPECIFIC(previous_kerns_shape)[i]); same_shapes &= (CudaNdarray_HOST_DIMS(output)[i] == APPLY_SPECIFIC(previous_output_shape)[i]); } reuse_previous_algo = same_shapes; } // If the previously choosen implementation can't be reused, select a // new one based on the shapes of the current inputs if (!reuse_previous_algo) { // Obtain a convolution algorithm appropriate for the kernel and output // shapes. Either by choosing one according to heuristics or by making // cuDNN time every implementation and choose the best one. if (CHOOSE_ALGO_TIME) { // Time the different implementations to choose the best one int requestedCount = 1; int count; cudnnConvolutionBwdDataAlgoPerf_t choosen_algo_perf; err = cudnnFindConvolutionBackwardDataAlgorithm(_handle, APPLY_SPECIFIC(kerns), APPLY_SPECIFIC(output), desc, APPLY_SPECIFIC(input), requestedCount, &count, &choosen_algo_perf); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error selecting convolution algo: " "%s", cudnnGetErrorString(err)); return 1; } chosen_algo = choosen_algo_perf.algo; } else { // Choose the convolution implementation using heuristics based on the // shapes of the inputs and the amount of memory available. // Get the amount of available memory size_t free = 0, total = 0; cudaError_t err2 = cudaMemGetInfo(&free, &total); if (err2 != cudaSuccess){ cudaGetLastError(); fprintf(stderr, "Error when trying to find the memory information" " on the GPU: %s\n", cudaGetErrorString(err2)); return 1; } // Use heuristics to choose the implementation err = cudnnGetConvolutionBackwardDataAlgorithm(_handle, APPLY_SPECIFIC(kerns), APPLY_SPECIFIC(output), desc, APPLY_SPECIFIC(input), CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT, free, &chosen_algo); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error selecting convolution algo: %s", cudnnGetErrorString(err)); return 1; } } // Store the shapes of the kernels and output as well as the chosen // algorithm for future use. APPLY_SPECIFIC(previous_bwd_d_algo) = chosen_algo; APPLY_SPECIFIC(previous_algo_set) = true; for (int i = 0; i < nb_dim; i++) { APPLY_SPECIFIC(previous_kerns_shape)[i] = CudaNdarray_HOST_DIMS(kerns)[i]; APPLY_SPECIFIC(previous_output_shape)[i] = CudaNdarray_HOST_DIMS(output)[i]; } } else { // Reuse the previously chosen convlution implementation chosen_algo = APPLY_SPECIFIC(previous_bwd_d_algo); } } else { chosen_algo = CONV_ALGO; } if (0){ char * a; switch(chosen_algo){ case CUDNN_CONVOLUTION_BWD_DATA_ALGO_0: a = "implicit gemm (0)"; break; case CUDNN_CONVOLUTION_BWD_DATA_ALGO_1: a = "precomp gemm (1)"; break; case CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT: a = "fft (2)"; break; case CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING: a = "fft tiling (3)"; break; #if CUDNN_VERSION > 5000 case CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD: a = "winograd (4)"; break; #endif } printf("GpuDNNConvGI: algo %s\n", a); } // The FFT implementation (only in V3 and onward) does not support strides, // 1x1 filters or inputs with a spatial dimension larger than 1024. // The tiled-FFT implementation (only in V4 onward) does not support // strides. // If the chosen implementation is FFT or tiled-FFT, validate that it can // be used on the current data and default on a safe implementation if it // can't. // Following code is 2d-specific, but it is fine as FFT and tiled-FFT are // defined only for 2d-filters if ((chosen_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING || chosen_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT) && nb_dim == 4) { // Extract the properties of the convolution descriptor int nd; int pad[2]; int stride[2]; int upscale[2]; cudnnConvolutionMode_t mode; cudnnDataType_t data_type; err = cudnnGetConvolutionNdDescriptor(desc, 2, &nd, pad, stride, upscale, &mode, &data_type); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error getting convolution properties: %s", cudnnGetErrorString(err)); return 1; } // Extract the spatial size of the filters int filter_h = CudaNdarray_HOST_DIMS(kerns)[2]; int filter_w = CudaNdarray_HOST_DIMS(kerns)[3]; // Extract the spatial size of the input int input_h = CudaNdarray_HOST_DIMS(*input)[2]; int input_w = CudaNdarray_HOST_DIMS(*input)[3]; // Ensure that the selected implementation supports the requested // convolution. Fall back to a safe implementation otherwise. if (chosen_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT) { if (stride[0] != 1 || stride[1] != 1 || input_h > 1024 || input_w > 1024 || (filter_h == 1 && filter_w == 1)) { chosen_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0; } } else { // chosen_algo == CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING if (stride[0] != 1 || stride[1] != 1) { chosen_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_0; } } } // Infer required workspace size from the chosen implementation err = cudnnGetConvolutionBackwardDataWorkspaceSize(_handle, APPLY_SPECIFIC(kerns), APPLY_SPECIFIC(output), desc, APPLY_SPECIFIC(input), chosen_algo, &worksize); if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error getting worksize: %s", cudnnGetErrorString(err)); return 1; } // Allocate workspace for the convolution workspace = get_work_mem(worksize); if (workspace == NULL && worksize != 0) return 1; // Perform the convolution err = cudnnConvolutionBackwardData( _handle, (void *)&alpha, APPLY_SPECIFIC(kerns), CudaNdarray_DEV_DATA(kerns), APPLY_SPECIFIC(output), CudaNdarray_DEV_DATA(output), desc, chosen_algo, workspace, worksize, (void *)&beta, APPLY_SPECIFIC(input), CudaNdarray_DEV_DATA(*input)); } if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuDnnConvGradI: error doing operation: %s", cudnnGetErrorString(err)); return 1; } return 0; }
void convolution_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_fixed_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id)); cudnn_util::set_tensor_descriptor( input_data_desc, input_configuration_specific_list[0], entry_count); cudnn_util::set_tensor_descriptor( output_data_desc, output_configuration_specific, entry_count); { void * workspace = 0; size_t workspace_size = 0; if (temporary_working_fixed_buffer) { workspace = *temporary_working_fixed_buffer; workspace_size = temporary_working_fixed_buffer->get_size(); } cudnnConvolutionBwdDataAlgo_t algo = cuda_config->cudnn_find_convolution_backward_data_algo( input_data_desc, weights_desc, convolution_desc, output_data_desc, *output_errors_buffer, *data[0], *temporary_working_per_entry_buffer, workspace, workspace_size); float alpha = 1.0F; float beta = (add_update_to_destination ? 1.0F : 0.0F); cudnn_safe_call(cudnnConvolutionBackwardData( cuda_config->get_cudnn_handle(), &alpha, weights_desc, *data[0], output_data_desc, *output_errors_buffer, convolution_desc, algo, workspace, workspace_size, &beta, input_data_desc, *input_errors_buffer)); } }