inline cudaArray* MallocArray3D< uchar4 >( VolumeDescription volumeDescription ) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc( 8, 8, 8, 8, cudaChannelFormatKindUnsigned ); cudaExtent volumeExtent = make_cudaExtent( volumeDescription.numVoxels.x, volumeDescription.numVoxels.y, volumeDescription.numVoxels.z ); cudaArray* cuArray; MOJO_CUDA_SAFE( cudaMalloc3DArray( &cuArray, &channelDesc, volumeExtent ) ); return cuArray; }
void SingleParticle2dx::Methods::CUDAProjectionMethod::prepareForProjections(SingleParticle2dx::DataStructures::ParticleContainer& cont) { cudaSetDevice(getMyGPU()); cudaStreamCreate(&m_stream); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaExtent VS = make_cudaExtent(m_size, m_size, m_size); if( m_alloc_done == false ) { cudaMalloc3DArray(&m_cuArray, &channelDesc, VS); } SingleParticle2dx::real_array3d_type real_data( boost::extents[m_size][m_size][m_size] ); m_context->getRealSpaceData(real_data); unsigned int size = m_size*m_size*m_size*sizeof(float); if( m_alloc_done == false ) { res_data_h = (float*)malloc(m_size*m_size*sizeof(float)); cudaMalloc((void**)&res_data_d, m_size*m_size*sizeof(float)); m_alloc_done = true; } cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)real_data.origin(), VS.width*sizeof(float), VS.width, VS.height); copyParams.dstArray = m_cuArray; copyParams.extent = VS; copyParams.kind = cudaMemcpyHostToDevice; // cudaMemcpy3D(©Params); cudaMemcpy3DAsync(©Params, m_stream); struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = m_cuArray; struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; if(m_alloc_done == true) { cudaDestroyTextureObject(m_texObj); } m_texObj = 0; cudaCreateTextureObject(&m_texObj, &resDesc, &texDesc, NULL); }
TEST(Malloc3DArray, NegativeChannels) { struct cudaArray * ary; struct cudaChannelFormatDesc dsc; dsc.x = dsc.y = dsc.z = 8; dsc.w = -8; dsc.f = cudaChannelFormatKindSigned; cudaError_t ret; ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(1, 1, 1), 0); EXPECT_EQ(cudaErrorInvalidChannelDescriptor, ret); }
TEST(Malloc3DArray, Attributes) { struct cudaArray * ary; struct cudaChannelFormatDesc dsc; dsc.x = dsc.y = dsc.z = dsc.w = 8; dsc.f = cudaChannelFormatKindSigned; cudaError_t ret; ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(1, 1, 1), 0); ASSERT_EQ(cudaSuccess, ret); struct cudaPointerAttributes attr; ret = cudaPointerGetAttributes(&attr, ary); EXPECT_EQ(cudaErrorInvalidValue, ret); EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); }
CTfactory( const VolumeGPU<T>& src, U& texRef, const cudaTextureFilterMode fm = cudaFilterModePoint, const cudaTextureAddressMode am = cudaAddressModeClamp, const int norm = false ) : dca_data(NULL) { // Check for valid input if( src.d_data.ptr == NULL ) { std::cerr << __FUNCTION__ << ": Source has no data" << std::endl; abort(); } // Allocate memory cudaChannelFormatDesc cd = cudaCreateChannelDesc<T>(); cudaExtent tmpExtent = ExtentFromDims( src.dims ); CUDA_SAFE_CALL( cudaMalloc3DArray( &(this->dca_data), &cd, tmpExtent ) ); // Do the copy cudaMemcpy3DParms cp = {0}; cp.srcPtr = src.d_data; cp.dstArray = this->dca_data; cp.extent = tmpExtent; cp.kind = cudaMemcpyDeviceToDevice; CUDA_SAFE_CALL( cudaMemcpy3D( &cp ) ); // Bind the texture texRef.normalized = norm; texRef.addressMode[0] = am; texRef.addressMode[1] = am; texRef.addressMode[2] = am; texRef.filterMode = fm; CUDA_SAFE_CALL( cudaBindTextureToArray( texRef, this->dca_data ) ); }
void VolSkin::init( int width, int height, TetMesh *tm ) { this->width = width; this->height = height; tetMesh = tm; // TEMP initialize volume data cudaExtent volumeSize = make_cudaExtent(128, 128, 128); //cudaExtent volumeSize = make_cudaExtent(256, 256, 256); // generate raw volume data float *h_densityData = (float*)malloc( sizeof(float)*volumeSize.width*volumeSize.height*volumeSize.depth ); math::PerlinNoise pn; pn.setDepth( 4 ); pn.setFrequency(3.0f); //pn.setInflection(true); for( int k=0;k<volumeSize.depth;++k ) for( int j=0;j<volumeSize.height;++j ) for( int i=0;i<volumeSize.width;++i ) { int index = k*volumeSize.width*volumeSize.height + j*volumeSize.width + i; math::Vec3f uvw( (float)(i)/(float)(volumeSize.width), (float)(j)/(float)(volumeSize.height), (float)(k)/(float)(volumeSize.depth)); float t = (float)(j)/(float)(volumeSize.height); //h_densityData[index] = 0.5f; //h_densityData[index] = (1.0f-t)*1.0f; h_densityData[index] = std::max( 0.0f, pn.perlinNoise_3D( uvw.x, uvw.y*2.0, uvw.z ) )*1.0f; // cylinder //h_densityData[index] = std::max( 0.0f, pn.perlinNoise_3D( uvw.x*2.0f, uvw.y*2.0f, uvw.z*2.0f ))*1.0f; // tetraeder //h_densityData[index] = (uvw.getLength() < 0.2f ? 1.0f : 0.0f)*2.0f; } // create 3D array d_densityArray = 0; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_densityArray, &channelDesc, volumeSize); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)h_densityData, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_densityArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(©Params); // TMP /* h_debugVec.resize( 1000.0f ); d_debugVec = h_debugVec; h_debugInfo.samples = convertToKernel(d_debugVec); h_debugInfo.numSamples = 0; cudaMemcpyToSymbol( d_debugInfo, &h_debugInfo, sizeof(DebugInfo), 0, cudaMemcpyHostToDevice ); */ // setup lighting m_light0.cam = base::CameraPtr( new base::Camera() ); m_light0.cam->m_aspectRatio = 1.0; //m_light0.cam->m_transform = math::createLookAtMatrix( math::Vec3f( -2.0f, -2.0f, 2.0f ), math::Vec3f( 0.0f, 0.0f, 0.0f ), math::Vec3f( 0.0f, 1.0f, 0.0f ), false ); //m_light0.cam->m_transform = math::Matrix44f::TranslationMatrix( 0.3f, 0.15f, 2.0f ); //m_light0.cam->m_transform = math::Matrix44f::TranslationMatrix( -3.0f, 0.0f, 0.0f ); m_light0.cam->m_transform = math::createLookAtMatrix( math::Vec3f( 4.0f, 0.0f, 0.0f ), math::Vec3f( 0.0f, 0.0f, 0.0f ), math::Vec3f( 0.0f, 1.0f, 0.0f ), false ); m_light0.cam->update(); cudaMalloc( &m_light0.d_dctCoefficients, width*height*sizeof(float)*8 );// 8 floats /6 coefficients // set defaults setTotalCrossSection( 10.0f ); setAlbedo( 1.0f ); setAbsorptionColor( math::Vec3f(0.5f,0.5f, 0.5f) ); setScatteringColor(math::Vec3f(0.5f, 0.5f, 0.5f)); setLight(0, math::Vec3f(1.0f, 1.0f, 1.0f), 0.0f); setTime( 0.0f ); setStepSize( 0.01f ); // get tetmesh onto gpu gpuUploadTetMesh(); }
TEST(Malloc3DArray, NullArguments) { struct cudaArray * ary; struct cudaChannelFormatDesc dsc; dsc.x = dsc.y = dsc.z = dsc.w = 8; dsc.f = cudaChannelFormatKindSigned; // Commented out cases segfault. cudaError_t ret; ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(0, 0, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(0, 0, 8), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(0, 8, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(0, 8, 8), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(8, 0, 0), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(8, 0, 8), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(8, 8, 0), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(NULL, NULL, make_cudaExtent(8, 8, 8), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(0, 0, 0), 0); EXPECT_EQ(cudaSuccess, ret); ret = cudaFreeArray(ary); EXPECT_EQ(cudaSuccess, ret); ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(0, 0, 8), 0); EXPECT_EQ(cudaSuccess, ret); ret = cudaFreeArray(ary); EXPECT_EQ(cudaSuccess, ret); ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(0, 8, 0), 0); EXPECT_EQ(cudaSuccess, ret); ret = cudaFreeArray(ary); EXPECT_EQ(cudaSuccess, ret); ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(0, 8, 8), 0); EXPECT_EQ(cudaSuccess, ret); ret = cudaFreeArray(ary); EXPECT_EQ(cudaSuccess, ret); // ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(8, 0, 0), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); /** * There's no reason why this should pass... ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(8, 0, 8), 0); EXPECT_EQ(cudaSuccess, ret); ret = cudaFreeArray(ary); EXPECT_EQ(cudaSuccess, ret); */ // ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(8, 8, 0), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(&ary, NULL, make_cudaExtent(8, 8, 8), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(0, 0, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(0, 0, 8), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(0, 8, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(0, 8, 8), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(8, 0, 0), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(8, 0, 8), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(8, 8, 0), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); // ret = cudaMalloc3DArray(NULL, &dsc, make_cudaExtent(8, 8, 8), 0); // EXPECT_EQ(cudaErrorInvalidValue, ret); }
TEST(Malloc3DArray, Limits) { struct cudaArray * ary; struct cudaChannelFormatDesc dsc; dsc.x = dsc.y = dsc.z = dsc.w = 8; dsc.f = cudaChannelFormatKindSigned; cudaError_t ret; ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(0, 0, 0), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } int device; ret = cudaGetDevice(&device); ASSERT_EQ(cudaSuccess, ret); struct cudaDeviceProp prop; ret = cudaGetDeviceProperties(&prop, device); ASSERT_EQ(cudaSuccess, ret); /* Adapt to what's available by a safe margin */ size_t targetable = prop.totalGlobalMem / 8; if ((size_t) prop.maxTexture1D < targetable) { ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture1D, 0, 0), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture1D + 1, 0, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } } if ((size_t) prop.maxTexture2D[0] < targetable) { ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0], 1, 0), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0] + 1, 1, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } } if ((size_t) prop.maxTexture2D[1] < targetable) { ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(1, prop.maxTexture2D[1], 0), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(1, prop.maxTexture2D[1] + 1, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } } if ((size_t) prop.maxTexture2D[0] * prop.maxTexture2D[1] < targetable) { ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0], prop.maxTexture2D[1], 0), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0], prop.maxTexture2D[1] + 1, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0] + 1, prop.maxTexture2D[1], 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0] + 1, prop.maxTexture2D[1] + 1, 0), 0); EXPECT_EQ(cudaErrorInvalidValue, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } } else if ((size_t) prop.maxTexture2D[0] * prop.maxTexture2D[1] > prop.totalGlobalMem) { ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(prop.maxTexture2D[0], prop.maxTexture2D[1], 0), 0); EXPECT_EQ(cudaErrorMemoryAllocation, ret); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(1, 1, 1), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } ret = cudaMalloc3DArray(&ary, &dsc, make_cudaExtent(64, 64, 64), 0); EXPECT_EQ(cudaSuccess, ret); if (ret == cudaSuccess) { EXPECT_EQ(cudaSuccess, cudaFreeArray(ary)); } /* TODO: More 3D tests. */ }
void CudaImagePyramidHost::initialize(int width, int height, cudaTextureFilterMode filter_mode, int depth) { qDebug() << "pyramid host initializing with params: " << width << height << filter_mode << depth; if (isInitialized() && width == _baseWidth && height == _baseHeight && filter_mode == _filterMode) { return; } clear(); qDebug() << "Clear done."; _baseWidth = width; _baseHeight = height; _filterMode = filter_mode; _numLayers = depth; // Get the texture and its channel descriptor to allocate the storage. const textureReference* constTexRefPtr=NULL; cudaGetTextureReference(&constTexRefPtr, _texture_name); qDebug() << "Texture Ref got:" << _name; if (constTexRefPtr == 0) { qDebug() << "constTexRefPtr==0"; } checkCUDAError("Can't get tex ref for init TEXTURE_PYRAMID", _name); cudaChannelFormatDesc formatDesc = constTexRefPtr->channelDesc; if(_textureType == cudaTextureType2DLayered){ cudaDeviceProp prop; qDebug() << "to get CUDA device prop"; cudaGetDeviceProperties(&prop,0); qDebug() << "CUDA Device Prop got"; if(prop.maxTexture2DLayered[0] < _baseWidth || prop.maxTexture2DLayered[1] < _baseHeight || prop.maxTexture2DLayered[2] < _numLayers){ qDebug()<< "Max layered texture size:" << prop.maxTexture2DLayered[0] << " x " << prop.maxTexture2DLayered[1] << " x " << prop.maxTexture2DLayered[2]; assert(0); } cudaExtent extent = make_cudaExtent(_baseWidth, _baseHeight, _numLayers); cudaMalloc3DArray(&_storage, &formatDesc, extent, cudaArrayLayered); }else{ cudaMallocArray(&_storage, &formatDesc, _baseWidth, _baseHeight); } checkCUDAError("Failure to allocate", _name); qDebug() << "allocate done"; // Set texture parameters. // Evil hack to get around an apparent bug in the cuda api: // cudaGetTextureReference only returns a const reference, and // there is no way to set the parameters with a reference other // than cast it to non-const. textureReference* texRefPtr=NULL; texRefPtr = const_cast<textureReference*>( constTexRefPtr ); texRefPtr->addressMode[0] = cudaAddressModeClamp; texRefPtr->addressMode[1] = cudaAddressModeClamp; texRefPtr->filterMode = filter_mode; texRefPtr->normalized = false; // Use unnormalized (pixel) coordinates for addressing. This forbids texture mode wrap. bindTexture(); qDebug() << "texture binded"; bool found = false; for (size_t i = 0; i < _instances.size(); i++) { if (_instances[i] == this) found = true; } if (!found) { qDebug() << "Not found"; _instances.push_back(this); } qDebug() << "paramid host initialized."; }
cudaError_t WINAPI wine_cudaMalloc3DArray(cudaArray_t *array, const struct cudaChannelFormatDesc* desc, struct cudaExtent extent, unsigned int flags) { WINE_TRACE("\n"); return cudaMalloc3DArray( array, desc, extent, flags ); }
cudaError_t WINAPI wine_cudaMalloc3DArray( struct cudaArray** arrayPtr, const struct cudaChannelFormatDesc* desc, struct cudaExtent extent ){ WINE_TRACE("\n"); return cudaMalloc3DArray( arrayPtr, desc, extent ); }