void b3Solver::convertToConstraints( const b3OpenCLArray<b3RigidBodyCL>* bodyBuf, const b3OpenCLArray<b3InertiaCL>* shapeBuf, b3OpenCLArray<b3Contact4>* contactsIn, b3OpenCLArray<b3GpuConstraint4>* contactCOut, void* additionalData, int nContacts, const ConstraintCfg& cfg ) { b3OpenCLArray<b3GpuConstraint4>* constraintNative =0; struct CB { int m_nContacts; float m_dt; float m_positionDrift; float m_positionConstraintCoeff; }; { B3_PROFILE("m_contactToConstraintKernel"); CB cdata; cdata.m_nContacts = nContacts; cdata.m_dt = cfg.m_dt; cdata.m_positionDrift = cfg.m_positionDrift; cdata.m_positionConstraintCoeff = cfg.m_positionConstraintCoeff; b3BufferInfoCL bInfo[] = { b3BufferInfoCL( contactsIn->getBufferCL() ), b3BufferInfoCL( bodyBuf->getBufferCL() ), b3BufferInfoCL( shapeBuf->getBufferCL()), b3BufferInfoCL( contactCOut->getBufferCL() ) }; b3LauncherCL launcher( m_queue, m_contactToConstraintKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); //launcher.setConst( cdata ); launcher.setConst(cdata.m_nContacts); launcher.setConst(cdata.m_dt); launcher.setConst(cdata.m_positionDrift); launcher.setConst(cdata.m_positionConstraintCoeff); launcher.launch1D( nContacts, 64 ); clFinish(m_queue); } contactCOut->resize(nContacts); }
void b3BoundSearchCL::execute(b3OpenCLArray<b3SortData>& src, int nSrc, b3OpenCLArray<unsigned int>& dst, int nDst, Option option ) { b3Int4 constBuffer; constBuffer.x = nSrc; constBuffer.y = nDst; if( option == BOUND_LOWER ) { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( src.getBufferCL(), true ), b3BufferInfoCL( dst.getBufferCL()) }; b3LauncherCL launcher( m_queue, m_lowerSortDataKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( nSrc ); launcher.setConst( nDst ); launcher.launch1D( nSrc, 64 ); } else if( option == BOUND_UPPER ) { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( src.getBufferCL(), true ), b3BufferInfoCL( dst.getBufferCL() ) }; b3LauncherCL launcher(m_queue, m_upperSortDataKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( nSrc ); launcher.setConst( nDst ); launcher.launch1D( nSrc, 64 ); } else if( option == COUNT ) { b3Assert( m_lower ); b3Assert( m_upper ); b3Assert( m_lower->capacity() <= (int)nDst ); b3Assert( m_upper->capacity() <= (int)nDst ); int zero = 0; m_filler->execute( *m_lower, zero, nDst ); m_filler->execute( *m_upper, zero, nDst ); execute( src, nSrc, *m_lower, nDst, BOUND_LOWER ); execute( src, nSrc, *m_upper, nDst, BOUND_UPPER ); { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_upper->getBufferCL(), true ), b3BufferInfoCL( m_lower->getBufferCL(), true ), b3BufferInfoCL( dst.getBufferCL() ) }; b3LauncherCL launcher( m_queue, m_subtractKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( nSrc ); launcher.setConst( nDst ); launcher.launch1D( nDst, 64 ); } } else { b3Assert( 0 ); } }
void b3Solver::solveContactConstraint( const b3OpenCLArray<b3RigidBodyCL>* bodyBuf, const b3OpenCLArray<b3InertiaCL>* shapeBuf, b3OpenCLArray<b3GpuConstraint4>* constraint, void* additionalData, int n ,int maxNumBatches) { b3Int4 cdata = b3MakeInt4( n, 0, 0, 0 ); { const int nn = N_SPLIT*N_SPLIT; cdata.x = 0; cdata.y = maxNumBatches;//250; int numWorkItems = 64*nn/N_BATCHES; #ifdef DEBUG_ME SolverDebugInfo* debugInfo = new SolverDebugInfo[numWorkItems]; adl::b3OpenCLArray<SolverDebugInfo> gpuDebugInfo(data->m_device,numWorkItems); #endif { B3_PROFILE("m_batchSolveKernel iterations"); for(int iter=0; iter<m_nIterations; iter++) { for(int ib=0; ib<N_BATCHES; ib++) { if (verify) { checkConstraintBatch(bodyBuf,shapeBuf,constraint,m_numConstraints,m_offsets,ib); } #ifdef DEBUG_ME memset(debugInfo,0,sizeof(SolverDebugInfo)*numWorkItems); gpuDebugInfo.write(debugInfo,numWorkItems); #endif cdata.z = ib; cdata.w = N_SPLIT; b3LauncherCL launcher( m_queue, m_solveContactKernel ); #if 1 b3BufferInfoCL bInfo[] = { b3BufferInfoCL( bodyBuf->getBufferCL() ), b3BufferInfoCL( shapeBuf->getBufferCL() ), b3BufferInfoCL( constraint->getBufferCL() ), b3BufferInfoCL( m_numConstraints->getBufferCL() ), b3BufferInfoCL( m_offsets->getBufferCL() ) #ifdef DEBUG_ME , b3BufferInfoCL(&gpuDebugInfo) #endif }; launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); //launcher.setConst( cdata.x ); launcher.setConst( cdata.y ); launcher.setConst( cdata.z ); launcher.setConst( cdata.w ); launcher.launch1D( numWorkItems, 64 ); #else const char* fileName = "m_batchSolveKernel.bin"; FILE* f = fopen(fileName,"rb"); if (f) { int sizeInBytes=0; if (fseek(f, 0, SEEK_END) || (sizeInBytes = ftell(f)) == EOF || fseek(f, 0, SEEK_SET)) { printf("error, cannot get file size\n"); exit(0); } unsigned char* buf = (unsigned char*) malloc(sizeInBytes); fread(buf,sizeInBytes,1,f); int serializedBytes = launcher.deserializeArgs(buf, sizeInBytes,m_context); int num = *(int*)&buf[serializedBytes]; launcher.launch1D( num); //this clFinish is for testing on errors clFinish(m_queue); } #endif #ifdef DEBUG_ME clFinish(m_queue); gpuDebugInfo.read(debugInfo,numWorkItems); clFinish(m_queue); for (int i=0; i<numWorkItems; i++) { if (debugInfo[i].m_valInt2>0) { printf("debugInfo[i].m_valInt2 = %d\n",i,debugInfo[i].m_valInt2); } if (debugInfo[i].m_valInt3>0) { printf("debugInfo[i].m_valInt3 = %d\n",i,debugInfo[i].m_valInt3); } } #endif //DEBUG_ME } } clFinish(m_queue); } cdata.x = 1; bool applyFriction=true; if (applyFriction) { B3_PROFILE("m_batchSolveKernel iterations2"); for(int iter=0; iter<m_nIterations; iter++) { for(int ib=0; ib<N_BATCHES; ib++) { cdata.z = ib; cdata.w = N_SPLIT; b3BufferInfoCL bInfo[] = { b3BufferInfoCL( bodyBuf->getBufferCL() ), b3BufferInfoCL( shapeBuf->getBufferCL() ), b3BufferInfoCL( constraint->getBufferCL() ), b3BufferInfoCL( m_numConstraints->getBufferCL() ), b3BufferInfoCL( m_offsets->getBufferCL() ) #ifdef DEBUG_ME ,b3BufferInfoCL(&gpuDebugInfo) #endif //DEBUG_ME }; b3LauncherCL launcher( m_queue, m_solveFrictionKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); //launcher.setConst( cdata.x ); launcher.setConst( cdata.y ); launcher.setConst( cdata.z ); launcher.setConst( cdata.w ); launcher.launch1D( 64*nn/N_BATCHES, 64 ); } } clFinish(m_queue); } #ifdef DEBUG_ME delete[] debugInfo; #endif //DEBUG_ME } }
///todo: add some acceleration structure (AABBs, tree etc) void b3GpuRaycast::castRays(const b3AlignedObjectArray<b3RayInfo>& rays, b3AlignedObjectArray<b3RayHit>& hitResults, int numBodies,const struct b3RigidBodyData* bodies, int numCollidables, const struct b3Collidable* collidables, const struct b3GpuNarrowPhaseInternalData* narrowphaseData, class b3GpuBroadphaseInterface* broadphase) { //castRaysHost(rays,hitResults,numBodies,bodies,numCollidables,collidables,narrowphaseData); B3_PROFILE("castRaysGPU"); { B3_PROFILE("raycast copyFromHost"); m_data->m_gpuRays->copyFromHost(rays); m_data->m_gpuHitResults->copyFromHost(hitResults); } int numRays = hitResults.size(); { m_data->m_firstRayRigidPairIndexPerRay->resize(numRays); m_data->m_numRayRigidPairsPerRay->resize(numRays); m_data->m_gpuNumRayRigidPairs->resize(1); m_data->m_gpuRayRigidPairs->resize(numRays * 16); } //run kernel const bool USE_BRUTE_FORCE_RAYCAST = false; if(USE_BRUTE_FORCE_RAYCAST) { B3_PROFILE("raycast launch1D"); b3LauncherCL launcher(m_data->m_q,m_data->m_raytraceKernel,"m_raytraceKernel"); int numRays = rays.size(); launcher.setConst(numRays); launcher.setBuffer(m_data->m_gpuRays->getBufferCL()); launcher.setBuffer(m_data->m_gpuHitResults->getBufferCL()); launcher.setConst(numBodies); launcher.setBuffer(narrowphaseData->m_bodyBufferGPU->getBufferCL()); launcher.setBuffer(narrowphaseData->m_collidablesGPU->getBufferCL()); launcher.setBuffer(narrowphaseData->m_convexFacesGPU->getBufferCL()); launcher.setBuffer(narrowphaseData->m_convexPolyhedraGPU->getBufferCL()); launcher.launch1D(numRays); clFinish(m_data->m_q); } else { m_data->m_plbvh->build( broadphase->getAllAabbsGPU(), broadphase->getSmallAabbIndicesGPU(), broadphase->getLargeAabbIndicesGPU() ); m_data->m_plbvh->testRaysAgainstBvhAabbs(*m_data->m_gpuRays, *m_data->m_gpuNumRayRigidPairs, *m_data->m_gpuRayRigidPairs); int numRayRigidPairs = -1; m_data->m_gpuNumRayRigidPairs->copyToHostPointer(&numRayRigidPairs, 1); if( numRayRigidPairs > m_data->m_gpuRayRigidPairs->size() ) { numRayRigidPairs = m_data->m_gpuRayRigidPairs->size(); m_data->m_gpuNumRayRigidPairs->copyFromHostPointer(&numRayRigidPairs, 1); } m_data->m_gpuRayRigidPairs->resize(numRayRigidPairs); //Radix sort needs b3OpenCLArray::size() to be correct //Sort ray-rigid pairs by ray index { B3_PROFILE("sort ray-rigid pairs"); m_data->m_radixSorter->execute( *reinterpret_cast< b3OpenCLArray<b3SortData>* >(m_data->m_gpuRayRigidPairs) ); } //detect start,count of each ray pair { B3_PROFILE("detect ray-rigid pair index ranges"); { B3_PROFILE("reset ray-rigid pair index ranges"); m_data->m_fill->execute(*m_data->m_firstRayRigidPairIndexPerRay, numRayRigidPairs, numRays); //atomic_min used to find first index m_data->m_fill->execute(*m_data->m_numRayRigidPairsPerRay, 0, numRays); clFinish(m_data->m_q); } b3BufferInfoCL bufferInfo[] = { b3BufferInfoCL( m_data->m_gpuRayRigidPairs->getBufferCL() ), b3BufferInfoCL( m_data->m_firstRayRigidPairIndexPerRay->getBufferCL() ), b3BufferInfoCL( m_data->m_numRayRigidPairsPerRay->getBufferCL() ) }; b3LauncherCL launcher(m_data->m_q, m_data->m_findRayRigidPairIndexRanges, "m_findRayRigidPairIndexRanges"); launcher.setBuffers( bufferInfo, sizeof(bufferInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst(numRayRigidPairs); launcher.launch1D(numRayRigidPairs); clFinish(m_data->m_q); } { B3_PROFILE("ray-rigid intersection"); b3BufferInfoCL bufferInfo[] = { b3BufferInfoCL( m_data->m_gpuRays->getBufferCL() ), b3BufferInfoCL( m_data->m_gpuHitResults->getBufferCL() ), b3BufferInfoCL( m_data->m_firstRayRigidPairIndexPerRay->getBufferCL() ), b3BufferInfoCL( m_data->m_numRayRigidPairsPerRay->getBufferCL() ), b3BufferInfoCL( narrowphaseData->m_bodyBufferGPU->getBufferCL() ), b3BufferInfoCL( narrowphaseData->m_collidablesGPU->getBufferCL() ), b3BufferInfoCL( narrowphaseData->m_convexFacesGPU->getBufferCL() ), b3BufferInfoCL( narrowphaseData->m_convexPolyhedraGPU->getBufferCL() ), b3BufferInfoCL( m_data->m_gpuRayRigidPairs->getBufferCL() ) }; b3LauncherCL launcher(m_data->m_q, m_data->m_raytracePairsKernel, "m_raytracePairsKernel"); launcher.setBuffers( bufferInfo, sizeof(bufferInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst(numRays); launcher.launch1D(numRays); clFinish(m_data->m_q); } } //copy results { B3_PROFILE("raycast copyToHost"); m_data->m_gpuHitResults->copyToHost(hitResults); } }
void b3GpuPgsContactSolver::solveContacts(int numBodies, cl_mem bodyBuf, cl_mem inertiaBuf, int numContacts, cl_mem contactBuf, const b3Config& config, int static0Index) { B3_PROFILE("solveContacts"); m_data->m_bodyBufferGPU->setFromOpenCLBuffer(bodyBuf,numBodies); m_data->m_inertiaBufferGPU->setFromOpenCLBuffer(inertiaBuf,numBodies); m_data->m_pBufContactOutGPU->setFromOpenCLBuffer(contactBuf,numContacts); if (optionalSortContactsDeterminism) { if (!gCpuSortContactsDeterminism) { B3_PROFILE("GPU Sort contact constraints (determinism)"); m_data->m_pBufContactOutGPUCopy->resize(numContacts); m_data->m_contactKeyValues->resize(numContacts); m_data->m_pBufContactOutGPU->copyToCL(m_data->m_pBufContactOutGPUCopy->getBufferCL(),numContacts,0,0); { b3LauncherCL launcher(m_data->m_queue, m_data->m_setDeterminismSortDataChildShapeBKernel,"m_setDeterminismSortDataChildShapeBKernel"); launcher.setBuffer(m_data->m_pBufContactOutGPUCopy->getBufferCL()); launcher.setBuffer(m_data->m_contactKeyValues->getBufferCL()); launcher.setConst(numContacts); launcher.launch1D( numContacts, 64 ); } m_data->m_solverGPU->m_sort32->execute(*m_data->m_contactKeyValues); { b3LauncherCL launcher(m_data->m_queue, m_data->m_setDeterminismSortDataChildShapeAKernel,"m_setDeterminismSortDataChildShapeAKernel"); launcher.setBuffer(m_data->m_pBufContactOutGPUCopy->getBufferCL()); launcher.setBuffer(m_data->m_contactKeyValues->getBufferCL()); launcher.setConst(numContacts); launcher.launch1D( numContacts, 64 ); } m_data->m_solverGPU->m_sort32->execute(*m_data->m_contactKeyValues); { b3LauncherCL launcher(m_data->m_queue, m_data->m_setDeterminismSortDataBodyBKernel,"m_setDeterminismSortDataBodyBKernel"); launcher.setBuffer(m_data->m_pBufContactOutGPUCopy->getBufferCL()); launcher.setBuffer(m_data->m_contactKeyValues->getBufferCL()); launcher.setConst(numContacts); launcher.launch1D( numContacts, 64 ); } m_data->m_solverGPU->m_sort32->execute(*m_data->m_contactKeyValues); { b3LauncherCL launcher(m_data->m_queue, m_data->m_setDeterminismSortDataBodyAKernel,"m_setDeterminismSortDataBodyAKernel"); launcher.setBuffer(m_data->m_pBufContactOutGPUCopy->getBufferCL()); launcher.setBuffer(m_data->m_contactKeyValues->getBufferCL()); launcher.setConst(numContacts); launcher.launch1D( numContacts, 64 ); } m_data->m_solverGPU->m_sort32->execute(*m_data->m_contactKeyValues); { B3_PROFILE("gpu reorderContactKernel (determinism)"); b3Int4 cdata; cdata.x = numContacts; //b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_pBufContactOutGPU->getBufferCL() ), b3BufferInfoCL( m_data->m_solverGPU->m_contactBuffer2->getBufferCL()) // , b3BufferInfoCL( m_data->m_solverGPU->m_sortDataBuffer->getBufferCL()) }; b3LauncherCL launcher(m_data->m_queue,m_data->m_solverGPU->m_reorderContactKernel,"m_reorderContactKernel"); launcher.setBuffer(m_data->m_pBufContactOutGPUCopy->getBufferCL()); launcher.setBuffer(m_data->m_pBufContactOutGPU->getBufferCL()); launcher.setBuffer(m_data->m_contactKeyValues->getBufferCL()); launcher.setConst( cdata ); launcher.launch1D( numContacts, 64 ); } } else { B3_PROFILE("CPU Sort contact constraints (determinism)"); b3AlignedObjectArray<b3Contact4> cpuConstraints; m_data->m_pBufContactOutGPU->copyToHost(cpuConstraints); bool sort = true; if (sort) { cpuConstraints.quickSort(b3ContactCmp); for (int i=0;i<cpuConstraints.size();i++) { cpuConstraints[i].m_batchIdx = i; } } m_data->m_pBufContactOutGPU->copyFromHost(cpuConstraints); if (m_debugOutput==100) { for (int i=0;i<cpuConstraints.size();i++) { printf("c[%d].m_bodyA = %d, m_bodyB = %d, batchId = %d\n",i,cpuConstraints[i].m_bodyAPtrAndSignBit,cpuConstraints[i].m_bodyBPtrAndSignBit, cpuConstraints[i].m_batchIdx); } } m_debugOutput++; } } int nContactOut = m_data->m_pBufContactOutGPU->size(); bool useSolver = true; if (useSolver) { float dt=1./60.; b3ConstraintCfg csCfg( dt ); csCfg.m_enableParallelSolve = true; csCfg.m_batchCellSize = 6; csCfg.m_staticIdx = static0Index; b3OpenCLArray<b3RigidBodyData>* bodyBuf = m_data->m_bodyBufferGPU; void* additionalData = 0;//m_data->m_frictionCGPU; const b3OpenCLArray<b3InertiaData>* shapeBuf = m_data->m_inertiaBufferGPU; b3OpenCLArray<b3GpuConstraint4>* contactConstraintOut = m_data->m_contactCGPU; int nContacts = nContactOut; int maxNumBatches = 0; if (!gUseLargeBatches) { if( m_data->m_solverGPU->m_contactBuffer2) { m_data->m_solverGPU->m_contactBuffer2->resize(nContacts); } if( m_data->m_solverGPU->m_contactBuffer2 == 0 ) { m_data->m_solverGPU->m_contactBuffer2 = new b3OpenCLArray<b3Contact4>(m_data->m_context,m_data->m_queue, nContacts ); m_data->m_solverGPU->m_contactBuffer2->resize(nContacts); } //clFinish(m_data->m_queue); { B3_PROFILE("batching"); //@todo: just reserve it, without copy of original contact (unless we use warmstarting) const b3OpenCLArray<b3RigidBodyData>* bodyNative = bodyBuf; { //b3OpenCLArray<b3RigidBodyData>* bodyNative = b3OpenCLArrayUtils::map<adl::TYPE_CL, true>( data->m_device, bodyBuf ); //b3OpenCLArray<b3Contact4>* contactNative = b3OpenCLArrayUtils::map<adl::TYPE_CL, true>( data->m_device, contactsIn ); const int sortAlignment = 512; // todo. get this out of sort if( csCfg.m_enableParallelSolve ) { int sortSize = B3NEXTMULTIPLEOF( nContacts, sortAlignment ); b3OpenCLArray<unsigned int>* countsNative = m_data->m_solverGPU->m_numConstraints; b3OpenCLArray<unsigned int>* offsetsNative = m_data->m_solverGPU->m_offsets; if (!gCpuSetSortData) { // 2. set cell idx B3_PROFILE("GPU set cell idx"); struct CB { int m_nContacts; int m_staticIdx; float m_scale; b3Int4 m_nSplit; }; b3Assert( sortSize%64 == 0 ); CB cdata; cdata.m_nContacts = nContacts; cdata.m_staticIdx = csCfg.m_staticIdx; cdata.m_scale = 1.f/csCfg.m_batchCellSize; cdata.m_nSplit.x = B3_SOLVER_N_SPLIT_X; cdata.m_nSplit.y = B3_SOLVER_N_SPLIT_Y; cdata.m_nSplit.z = B3_SOLVER_N_SPLIT_Z; m_data->m_solverGPU->m_sortDataBuffer->resize(nContacts); b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_pBufContactOutGPU->getBufferCL() ), b3BufferInfoCL( bodyBuf->getBufferCL()), b3BufferInfoCL( m_data->m_solverGPU->m_sortDataBuffer->getBufferCL()) }; b3LauncherCL launcher(m_data->m_queue, m_data->m_solverGPU->m_setSortDataKernel,"m_setSortDataKernel" ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( cdata.m_nContacts ); launcher.setConst( cdata.m_scale ); launcher.setConst(cdata.m_nSplit); launcher.setConst(cdata.m_staticIdx); launcher.launch1D( sortSize, 64 ); } else { m_data->m_solverGPU->m_sortDataBuffer->resize(nContacts); b3AlignedObjectArray<b3SortData> sortDataCPU; m_data->m_solverGPU->m_sortDataBuffer->copyToHost(sortDataCPU); b3AlignedObjectArray<b3Contact4> contactCPU; m_data->m_pBufContactOutGPU->copyToHost(contactCPU); b3AlignedObjectArray<b3RigidBodyData> bodiesCPU; bodyBuf->copyToHost(bodiesCPU); float scale = 1.f/csCfg.m_batchCellSize; b3Int4 nSplit; nSplit.x = B3_SOLVER_N_SPLIT_X; nSplit.y = B3_SOLVER_N_SPLIT_Y; nSplit.z = B3_SOLVER_N_SPLIT_Z; SetSortDataCPU(&contactCPU[0], &bodiesCPU[0], &sortDataCPU[0], nContacts,scale,nSplit,csCfg.m_staticIdx); m_data->m_solverGPU->m_sortDataBuffer->copyFromHost(sortDataCPU); } if (!gCpuRadixSort) { // 3. sort by cell idx B3_PROFILE("gpuRadixSort"); //int n = B3_SOLVER_N_SPLIT*B3_SOLVER_N_SPLIT; //int sortBit = 32; //if( n <= 0xffff ) sortBit = 16; //if( n <= 0xff ) sortBit = 8; //adl::RadixSort<adl::TYPE_CL>::execute( data->m_sort, *data->m_sortDataBuffer, sortSize ); //adl::RadixSort32<adl::TYPE_CL>::execute( data->m_sort32, *data->m_sortDataBuffer, sortSize ); b3OpenCLArray<b3SortData>& keyValuesInOut = *(m_data->m_solverGPU->m_sortDataBuffer); this->m_data->m_solverGPU->m_sort32->execute(keyValuesInOut); } else { b3OpenCLArray<b3SortData>& keyValuesInOut = *(m_data->m_solverGPU->m_sortDataBuffer); b3AlignedObjectArray<b3SortData> hostValues; keyValuesInOut.copyToHost(hostValues); hostValues.quickSort(sortfnc); keyValuesInOut.copyFromHost(hostValues); } if (gUseScanHost) { // 4. find entries B3_PROFILE("cpuBoundSearch"); b3AlignedObjectArray<unsigned int> countsHost; countsNative->copyToHost(countsHost); b3AlignedObjectArray<b3SortData> sortDataHost; m_data->m_solverGPU->m_sortDataBuffer->copyToHost(sortDataHost); //m_data->m_solverGPU->m_search->executeHost(*m_data->m_solverGPU->m_sortDataBuffer,nContacts,*countsNative,B3_SOLVER_N_CELLS,b3BoundSearchCL::COUNT); m_data->m_solverGPU->m_search->executeHost(sortDataHost,nContacts,countsHost,B3_SOLVER_N_CELLS,b3BoundSearchCL::COUNT); countsNative->copyFromHost(countsHost); //adl::BoundSearch<adl::TYPE_CL>::execute( data->m_search, *data->m_sortDataBuffer, nContacts, *countsNative, // B3_SOLVER_N_SPLIT*B3_SOLVER_N_SPLIT, adl::BoundSearchBase::COUNT ); //unsigned int sum; //m_data->m_solverGPU->m_scan->execute(*countsNative,*offsetsNative, B3_SOLVER_N_CELLS);//,&sum ); b3AlignedObjectArray<unsigned int> offsetsHost; offsetsHost.resize(offsetsNative->size()); m_data->m_solverGPU->m_scan->executeHost(countsHost,offsetsHost, B3_SOLVER_N_CELLS);//,&sum ); offsetsNative->copyFromHost(offsetsHost); //printf("sum = %d\n",sum); } else { // 4. find entries B3_PROFILE("gpuBoundSearch"); m_data->m_solverGPU->m_search->execute(*m_data->m_solverGPU->m_sortDataBuffer,nContacts,*countsNative,B3_SOLVER_N_CELLS,b3BoundSearchCL::COUNT); m_data->m_solverGPU->m_scan->execute(*countsNative,*offsetsNative, B3_SOLVER_N_CELLS);//,&sum ); } if (nContacts) { // 5. sort constraints by cellIdx if (gReorderContactsOnCpu) { B3_PROFILE("cpu m_reorderContactKernel"); b3AlignedObjectArray<b3SortData> sortDataHost; m_data->m_solverGPU->m_sortDataBuffer->copyToHost(sortDataHost); b3AlignedObjectArray<b3Contact4> inContacts; b3AlignedObjectArray<b3Contact4> outContacts; m_data->m_pBufContactOutGPU->copyToHost(inContacts); outContacts.resize(inContacts.size()); for (int i=0;i<nContacts;i++) { int srcIdx = sortDataHost[i].y; outContacts[i] = inContacts[srcIdx]; } m_data->m_solverGPU->m_contactBuffer2->copyFromHost(outContacts); /* "void ReorderContactKernel(__global struct b3Contact4Data* in, __global struct b3Contact4Data* out, __global int2* sortData, int4 cb )\n" "{\n" " int nContacts = cb.x;\n" " int gIdx = GET_GLOBAL_IDX;\n" " if( gIdx < nContacts )\n" " {\n" " int srcIdx = sortData[gIdx].y;\n" " out[gIdx] = in[srcIdx];\n" " }\n" "}\n" */ } else { B3_PROFILE("gpu m_reorderContactKernel"); b3Int4 cdata; cdata.x = nContacts; b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_pBufContactOutGPU->getBufferCL() ), b3BufferInfoCL( m_data->m_solverGPU->m_contactBuffer2->getBufferCL()) , b3BufferInfoCL( m_data->m_solverGPU->m_sortDataBuffer->getBufferCL()) }; b3LauncherCL launcher(m_data->m_queue,m_data->m_solverGPU->m_reorderContactKernel,"m_reorderContactKernel"); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( cdata ); launcher.launch1D( nContacts, 64 ); } } } } //clFinish(m_data->m_queue); // { // b3AlignedObjectArray<unsigned int> histogram; // m_data->m_solverGPU->m_numConstraints->copyToHost(histogram); // printf(",,,\n"); // } if (nContacts) { if (gUseCpuCopyConstraints) { for (int i=0;i<nContacts;i++) { m_data->m_pBufContactOutGPU->copyFromOpenCLArray(*m_data->m_solverGPU->m_contactBuffer2); // m_data->m_solverGPU->m_contactBuffer2->getBufferCL(); // m_data->m_pBufContactOutGPU->getBufferCL() } } else { B3_PROFILE("gpu m_copyConstraintKernel"); b3Int4 cdata; cdata.x = nContacts; b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_solverGPU->m_contactBuffer2->getBufferCL() ), b3BufferInfoCL( m_data->m_pBufContactOutGPU->getBufferCL() ) }; b3LauncherCL launcher(m_data->m_queue, m_data->m_solverGPU->m_copyConstraintKernel,"m_copyConstraintKernel" ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( cdata ); launcher.launch1D( nContacts, 64 ); //we use the clFinish for proper benchmark/profile clFinish(m_data->m_queue); } } bool compareGPU = false; if (nContacts) { if (!gCpuBatchContacts) { B3_PROFILE("gpu batchContacts"); maxNumBatches = 150;//250; m_data->m_solverGPU->batchContacts( m_data->m_pBufContactOutGPU, nContacts, m_data->m_solverGPU->m_numConstraints, m_data->m_solverGPU->m_offsets, csCfg.m_staticIdx ); clFinish(m_data->m_queue); } else { B3_PROFILE("cpu batchContacts"); static b3AlignedObjectArray<b3Contact4> cpuContacts; b3OpenCLArray<b3Contact4>* contactsIn = m_data->m_solverGPU->m_contactBuffer2; { B3_PROFILE("copyToHost"); contactsIn->copyToHost(cpuContacts); } b3OpenCLArray<unsigned int>* countsNative = m_data->m_solverGPU->m_numConstraints; b3OpenCLArray<unsigned int>* offsetsNative = m_data->m_solverGPU->m_offsets; b3AlignedObjectArray<unsigned int> nNativeHost; b3AlignedObjectArray<unsigned int> offsetsNativeHost; { B3_PROFILE("countsNative/offsetsNative copyToHost"); countsNative->copyToHost(nNativeHost); offsetsNative->copyToHost(offsetsNativeHost); } int numNonzeroGrid=0; if (gUseLargeBatches) { m_data->m_batchSizes.resize(B3_MAX_NUM_BATCHES); int totalNumConstraints = cpuContacts.size(); int simdWidth =numBodies+1;//-1;//64;//-1;//32; int numBatches = sortConstraintByBatch3( &cpuContacts[0], totalNumConstraints, totalNumConstraints+1,csCfg.m_staticIdx ,numBodies,&m_data->m_batchSizes[0]); // on GPU maxNumBatches = b3Max(numBatches,maxNumBatches); static int globalMaxBatch = 0; if (maxNumBatches>globalMaxBatch ) { globalMaxBatch = maxNumBatches; b3Printf("maxNumBatches = %d\n",maxNumBatches); } } else { m_data->m_batchSizes.resize(B3_SOLVER_N_CELLS*B3_MAX_NUM_BATCHES); B3_PROFILE("cpu batch grid"); for(int i=0; i<B3_SOLVER_N_CELLS; i++) { int n = (nNativeHost)[i]; int offset = (offsetsNativeHost)[i]; if( n ) { numNonzeroGrid++; int simdWidth =numBodies+1;//-1;//64;//-1;//32; int numBatches = sortConstraintByBatch3( &cpuContacts[0]+offset, n, simdWidth,csCfg.m_staticIdx ,numBodies,&m_data->m_batchSizes[i*B3_MAX_NUM_BATCHES]); // on GPU maxNumBatches = b3Max(numBatches,maxNumBatches); static int globalMaxBatch = 0; if (maxNumBatches>globalMaxBatch ) { globalMaxBatch = maxNumBatches; b3Printf("maxNumBatches = %d\n",maxNumBatches); } //we use the clFinish for proper benchmark/profile } } //clFinish(m_data->m_queue); } { B3_PROFILE("m_contactBuffer->copyFromHost"); m_data->m_solverGPU->m_contactBuffer2->copyFromHost((b3AlignedObjectArray<b3Contact4>&)cpuContacts); } } } } } //printf("maxNumBatches = %d\n", maxNumBatches); if (gUseLargeBatches) { if (nContacts) { B3_PROFILE("cpu batchContacts"); static b3AlignedObjectArray<b3Contact4> cpuContacts; // b3OpenCLArray<b3Contact4>* contactsIn = m_data->m_solverGPU->m_contactBuffer2; { B3_PROFILE("copyToHost"); m_data->m_pBufContactOutGPU->copyToHost(cpuContacts); } b3OpenCLArray<unsigned int>* countsNative = m_data->m_solverGPU->m_numConstraints; b3OpenCLArray<unsigned int>* offsetsNative = m_data->m_solverGPU->m_offsets; int numNonzeroGrid=0; { m_data->m_batchSizes.resize(B3_MAX_NUM_BATCHES); int totalNumConstraints = cpuContacts.size(); int simdWidth =numBodies+1;//-1;//64;//-1;//32; int numBatches = sortConstraintByBatch3( &cpuContacts[0], totalNumConstraints, totalNumConstraints+1,csCfg.m_staticIdx ,numBodies,&m_data->m_batchSizes[0]); // on GPU maxNumBatches = b3Max(numBatches,maxNumBatches); static int globalMaxBatch = 0; if (maxNumBatches>globalMaxBatch ) { globalMaxBatch = maxNumBatches; b3Printf("maxNumBatches = %d\n",maxNumBatches); } } { B3_PROFILE("m_contactBuffer->copyFromHost"); m_data->m_solverGPU->m_contactBuffer2->copyFromHost((b3AlignedObjectArray<b3Contact4>&)cpuContacts); } } } if (nContacts) { B3_PROFILE("gpu convertToConstraints"); m_data->m_solverGPU->convertToConstraints( bodyBuf, shapeBuf, m_data->m_solverGPU->m_contactBuffer2, contactConstraintOut, additionalData, nContacts, (b3SolverBase::ConstraintCfg&) csCfg ); clFinish(m_data->m_queue); } if (1) { int numIter = 4; m_data->m_solverGPU->m_nIterations = numIter;//10 if (!gCpuSolveConstraint) { B3_PROFILE("GPU solveContactConstraint"); /*m_data->m_solverGPU->solveContactConstraint( m_data->m_bodyBufferGPU, m_data->m_inertiaBufferGPU, m_data->m_contactCGPU,0, nContactOut , maxNumBatches); */ //m_data->m_batchSizesGpu->copyFromHost(m_data->m_batchSizes); if (gUseLargeBatches) { solveContactConstraintBatchSizes(m_data->m_bodyBufferGPU, m_data->m_inertiaBufferGPU, m_data->m_contactCGPU,0, nContactOut , maxNumBatches,numIter,&m_data->m_batchSizes); } else { solveContactConstraint( m_data->m_bodyBufferGPU, m_data->m_inertiaBufferGPU, m_data->m_contactCGPU,0, nContactOut , maxNumBatches,numIter,&m_data->m_batchSizes);//m_data->m_batchSizesGpu); } } else { B3_PROFILE("Host solveContactConstraint"); m_data->m_solverGPU->solveContactConstraintHost(m_data->m_bodyBufferGPU, m_data->m_inertiaBufferGPU, m_data->m_contactCGPU,0, nContactOut ,maxNumBatches,&m_data->m_batchSizes); } } #if 0 if (0) { B3_PROFILE("read body velocities back to CPU"); //read body updated linear/angular velocities back to CPU m_data->m_bodyBufferGPU->read( m_data->m_bodyBufferCPU->m_ptr,numOfConvexRBodies); adl::DeviceUtils::waitForCompletion( m_data->m_deviceCL ); } #endif } }
void b3GpuGridBroadphase::calculateOverlappingPairs(int maxPairs) { B3_PROFILE("b3GpuGridBroadphase::calculateOverlappingPairs"); if (0) { calculateOverlappingPairsHost(maxPairs); /* b3AlignedObjectArray<b3Int4> cpuPairs; m_gpuPairs.copyToHost(cpuPairs); printf("host m_gpuPairs.size()=%d\n",m_gpuPairs.size()); for (int i=0;i<m_gpuPairs.size();i++) { printf("host pair %d = %d,%d\n",i,cpuPairs[i].x,cpuPairs[i].y); } */ return; } int numSmallAabbs = m_smallAabbsMappingGPU.size(); b3OpenCLArray<int> pairCount(m_context,m_queue); pairCount.push_back(0); m_gpuPairs.resize(maxPairs);//numSmallAabbs*maxPairsPerBody); { int numLargeAabbs = m_largeAabbsMappingGPU.size(); if (numLargeAabbs && numSmallAabbs) { B3_PROFILE("sap2Kernel"); b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_allAabbsGPU1.getBufferCL() ), b3BufferInfoCL( m_largeAabbsMappingGPU.getBufferCL() ), b3BufferInfoCL( m_smallAabbsMappingGPU.getBufferCL() ), b3BufferInfoCL( m_gpuPairs.getBufferCL() ), b3BufferInfoCL(pairCount.getBufferCL())}; b3LauncherCL launcher(m_queue, m_sap2Kernel,"m_sap2Kernel"); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( numLargeAabbs ); launcher.setConst( numSmallAabbs); launcher.setConst( 0 );//axis is not used launcher.setConst( maxPairs ); //@todo: use actual maximum work item sizes of the device instead of hardcoded values launcher.launch2D( numLargeAabbs, numSmallAabbs,4,64); int numPairs = pairCount.at(0); if (numPairs >maxPairs) { b3Error("Error running out of pairs: numPairs = %d, maxPairs = %d.\n", numPairs, maxPairs); numPairs =maxPairs; } } } if (numSmallAabbs) { B3_PROFILE("gridKernel"); m_hashGpu.resize(numSmallAabbs); { B3_PROFILE("kCalcHashAABB"); b3LauncherCL launch(m_queue,kCalcHashAABB,"kCalcHashAABB"); launch.setConst(numSmallAabbs); launch.setBuffer(m_allAabbsGPU1.getBufferCL()); launch.setBuffer(m_smallAabbsMappingGPU.getBufferCL()); launch.setBuffer(m_hashGpu.getBufferCL()); launch.setBuffer(this->m_paramsGPU.getBufferCL()); launch.launch1D(numSmallAabbs); } m_sorter->execute(m_hashGpu); int numCells = this->m_paramsCPU.m_gridSize[0]*this->m_paramsCPU.m_gridSize[1]*this->m_paramsCPU.m_gridSize[2]; m_cellStartGpu.resize(numCells); //b3AlignedObjectArray<int > cellStartCpu; { B3_PROFILE("kClearCellStart"); b3LauncherCL launch(m_queue,kClearCellStart,"kClearCellStart"); launch.setConst(numCells); launch.setBuffer(m_cellStartGpu.getBufferCL()); launch.launch1D(numCells); //m_cellStartGpu.copyToHost(cellStartCpu); //printf("??\n"); } { B3_PROFILE("kFindCellStart"); b3LauncherCL launch(m_queue,kFindCellStart,"kFindCellStart"); launch.setConst(numSmallAabbs); launch.setBuffer(m_hashGpu.getBufferCL()); launch.setBuffer(m_cellStartGpu.getBufferCL()); launch.launch1D(numSmallAabbs); //m_cellStartGpu.copyToHost(cellStartCpu); //printf("??\n"); } { B3_PROFILE("kFindOverlappingPairs"); b3LauncherCL launch(m_queue,kFindOverlappingPairs,"kFindOverlappingPairs"); launch.setConst(numSmallAabbs); launch.setBuffer(m_allAabbsGPU1.getBufferCL()); launch.setBuffer(m_smallAabbsMappingGPU.getBufferCL()); launch.setBuffer(m_hashGpu.getBufferCL()); launch.setBuffer(m_cellStartGpu.getBufferCL()); launch.setBuffer(m_paramsGPU.getBufferCL()); //launch.setBuffer(0); launch.setBuffer(pairCount.getBufferCL()); launch.setBuffer(m_gpuPairs.getBufferCL()); launch.setConst(maxPairs); launch.launch1D(numSmallAabbs); int numPairs = pairCount.at(0); if (numPairs >maxPairs) { b3Error("Error running out of pairs: numPairs = %d, maxPairs = %d.\n", numPairs, maxPairs); numPairs =maxPairs; } m_gpuPairs.resize(numPairs); if (0) { b3AlignedObjectArray<b3Int4> pairsCpu; m_gpuPairs.copyToHost(pairsCpu); printf("m_gpuPairs.size()=%d\n",m_gpuPairs.size()); for (int i=0;i<m_gpuPairs.size();i++) { printf("pair %d = %d,%d\n",i,pairsCpu[i].x,pairsCpu[i].y); } printf("?!?\n"); } } } //calculateOverlappingPairsHost(maxPairs); }
void ParticleDemo::clientMoveAndDisplay() { int numParticles = NUM_PARTICLES_X*NUM_PARTICLES_Y*NUM_PARTICLES_Z; GLuint vbo = m_instancingRenderer->getInternalData()->m_vbo; glBindBuffer(GL_ARRAY_BUFFER, vbo); glFlush(); int posArraySize = numParticles*sizeof(float)*4; cl_bool blocking= CL_TRUE; char* hostPtr= (char*)glMapBufferRange( GL_ARRAY_BUFFER,m_instancingRenderer->getMaxShapeCapacity(),posArraySize, GL_MAP_WRITE_BIT|GL_MAP_READ_BIT );//GL_READ_WRITE);//GL_WRITE_ONLY GLint err = glGetError(); assert(err==GL_NO_ERROR); glFinish(); #if 1 //do some stuff using the OpenCL buffer bool useCpu = false; if (useCpu) { float* posBuffer = (float*)hostPtr; for (int i=0;i<numParticles;i++) { posBuffer[i*4+1] += 0.1; } } else { cl_int ciErrNum; if (!m_data->m_clPositionBuffer) { m_data->m_clPositionBuffer = clCreateBuffer(m_clData->m_clContext, CL_MEM_READ_WRITE, posArraySize, 0, &ciErrNum); clFinish(m_clData->m_clQueue); oclCHECKERROR(ciErrNum, CL_SUCCESS); ciErrNum = clEnqueueWriteBuffer ( m_clData->m_clQueue,m_data->m_clPositionBuffer, blocking,0,posArraySize,hostPtr,0,0,0 ); clFinish(m_clData->m_clQueue); } if (0) { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_velocitiesGPU->getBufferCL(), true ), b3BufferInfoCL( m_data->m_clPositionBuffer) }; b3LauncherCL launcher(m_clData->m_clQueue, m_data->m_updatePositionsKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( numParticles); launcher.launch1D( numParticles); clFinish(m_clData->m_clQueue); } if (1) { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_clPositionBuffer), b3BufferInfoCL( m_data->m_velocitiesGPU->getBufferCL() ), b3BufferInfoCL( m_data->m_simParamGPU->getBufferCL(),true) }; b3LauncherCL launcher(m_clData->m_clQueue, m_data->m_updatePositionsKernel2 ); launcher.setConst( numParticles); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); float timeStep = 1.f/60.f; launcher.setConst( timeStep); launcher.launch1D( numParticles); clFinish(m_clData->m_clQueue); } { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_clPositionBuffer), b3BufferInfoCL( m_data->m_broadphaseGPU->getAabbBufferWS()), }; b3LauncherCL launcher(m_clData->m_clQueue, m_data->m_updateAabbsKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( m_data->m_simParamCPU[0].m_particleRad); launcher.setConst( numParticles); launcher.launch1D( numParticles); clFinish(m_clData->m_clQueue); } //broadphase int numPairsGPU=0; cl_mem pairsGPU = 0; { m_data->m_broadphaseGPU->calculateOverlappingPairs(64*numParticles); pairsGPU = m_data->m_broadphaseGPU->getOverlappingPairBuffer(); numPairsGPU = m_data->m_broadphaseGPU->getNumOverlap(); } if (numPairsGPU) { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( m_data->m_clPositionBuffer), b3BufferInfoCL( m_data->m_velocitiesGPU->getBufferCL() ), b3BufferInfoCL( m_data->m_broadphaseGPU->getOverlappingPairBuffer(),true), }; b3LauncherCL launcher(m_clData->m_clQueue, m_data->m_collideParticlesKernel); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( numPairsGPU); launcher.launch1D( numPairsGPU); clFinish(m_clData->m_clQueue); //__kernel void collideParticlesKernel( __global float4* pPos, __global float4* pVel, __global int2* pairs, const int numPairs) } if (1) { ciErrNum = clEnqueueReadBuffer ( m_clData->m_clQueue, m_data->m_clPositionBuffer, blocking, 0, posArraySize, hostPtr,0,0,0); //clReleaseMemObject(clBuffer); clFinish(m_clData->m_clQueue); } } #endif glUnmapBuffer( GL_ARRAY_BUFFER); glFlush(); /* int numParticles = NUM_PARTICLES_X*NUM_PARTICLES_Y*NUM_PARTICLES_Z; for (int objectIndex=0;objectIndex<numParticles;objectIndex++) { float pos[4]={0,0,0,0}; float orn[4]={0,0,0,1}; // m_instancingRenderer->writeSingleInstanceTransformToGPU(pos,orn,i); { glBindBuffer(GL_ARRAY_BUFFER, m_instancingRenderer->getInternalData()->m_vbo); glFlush(); char* orgBase = (char*)glMapBuffer( GL_ARRAY_BUFFER,GL_READ_WRITE); //b3GraphicsInstance* gfxObj = m_graphicsInstances[k]; int totalNumInstances= numParticles; int POSITION_BUFFER_SIZE = (totalNumInstances*sizeof(float)*4); char* base = orgBase; int capInBytes = m_instancingRenderer->getMaxShapeCapacity(); float* positions = (float*)(base+capInBytes); float* orientations = (float*)(base+capInBytes+ POSITION_BUFFER_SIZE); positions[objectIndex*4+1] += 0.1f; glUnmapBuffer( GL_ARRAY_BUFFER); glFlush(); } } */ }
void b3RadixSort32CL::execute(b3OpenCLArray<b3SortData>& keyValuesInOut, int sortBits /* = 32 */) { int originalSize = keyValuesInOut.size(); int workingSize = originalSize; int dataAlignment = DATA_ALIGNMENT; #ifdef DEBUG_RADIXSORT2 b3AlignedObjectArray<b3SortData> test2; keyValuesInOut.copyToHost(test2); printf("numElem = %d\n",test2.size()); for (int i=0;i<test2.size();i++) { printf("test2[%d].m_key=%d\n",i,test2[i].m_key); printf("test2[%d].m_value=%d\n",i,test2[i].m_value); } #endif //DEBUG_RADIXSORT2 b3OpenCLArray<b3SortData>* src = 0; if (workingSize%dataAlignment) { workingSize += dataAlignment-(workingSize%dataAlignment); m_workBuffer4->copyFromOpenCLArray(keyValuesInOut); m_workBuffer4->resize(workingSize); b3SortData fillValue; fillValue.m_key = 0xffffffff; fillValue.m_value = 0xffffffff; #define USE_BTFILL #ifdef USE_BTFILL m_fill->execute((b3OpenCLArray<b3Int2>&)*m_workBuffer4,(b3Int2&)fillValue,workingSize-originalSize,originalSize); #else //fill the remaining bits (very slow way, todo: fill on GPU/OpenCL side) for (int i=originalSize; i<workingSize;i++) { m_workBuffer4->copyFromHostPointer(&fillValue,1,i); } #endif//USE_BTFILL src = m_workBuffer4; } else { src = &keyValuesInOut; m_workBuffer4->resize(0); } b3Assert( workingSize%DATA_ALIGNMENT == 0 ); int minCap = NUM_BUCKET*NUM_WGS; int n = workingSize; m_workBuffer1->resize(minCap); m_workBuffer3->resize(workingSize); // ADLASSERT( ELEMENTS_PER_WORK_ITEM == 4 ); b3Assert( BITS_PER_PASS == 4 ); b3Assert( WG_SIZE == 64 ); b3Assert( (sortBits&0x3) == 0 ); b3OpenCLArray<b3SortData>* dst = m_workBuffer3; b3OpenCLArray<unsigned int>* srcHisto = m_workBuffer1; b3OpenCLArray<unsigned int>* destHisto = m_workBuffer2; int nWGs = NUM_WGS; b3ConstData cdata; { int blockSize = ELEMENTS_PER_WORK_ITEM*WG_SIZE;//set at 256 int nBlocks = (n+blockSize-1)/(blockSize); cdata.m_n = n; cdata.m_nWGs = NUM_WGS; cdata.m_startBit = 0; cdata.m_nBlocksPerWG = (nBlocks + cdata.m_nWGs - 1)/cdata.m_nWGs; if( nBlocks < NUM_WGS ) { cdata.m_nBlocksPerWG = 1; nWGs = nBlocks; } } int count=0; for(int ib=0; ib<sortBits; ib+=4) { #ifdef DEBUG_RADIXSORT2 keyValuesInOut.copyToHost(test2); printf("numElem = %d\n",test2.size()); for (int i=0;i<test2.size();i++) { if (test2[i].m_key != test2[i].m_value) { printf("test2[%d].m_key=%d\n",i,test2[i].m_key); printf("test2[%d].m_value=%d\n",i,test2[i].m_value); } } #endif //DEBUG_RADIXSORT2 cdata.m_startBit = ib; if (src->size()) { b3BufferInfoCL bInfo[] = { b3BufferInfoCL( src->getBufferCL(), true ), b3BufferInfoCL( srcHisto->getBufferCL() ) }; b3LauncherCL launcher(m_commandQueue, m_streamCountSortDataKernel); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( cdata ); int num = NUM_WGS*WG_SIZE; launcher.launch1D( num, WG_SIZE ); } #ifdef DEBUG_RADIXSORT b3AlignedObjectArray<unsigned int> testHist; srcHisto->copyToHost(testHist); printf("ib = %d, testHist size = %d, non zero elements:\n",ib, testHist.size()); for (int i=0;i<testHist.size();i++) { if (testHist[i]!=0) printf("testHist[%d]=%d\n",i,testHist[i]); } #endif //DEBUG_RADIXSORT //fast prefix scan is not working properly on Mac OSX yet #ifdef _WIN32 bool fastScan=!m_deviceCPU;//only use fast scan on GPU #else bool fastScan=false; #endif if (fastScan) {// prefix scan group histogram b3BufferInfoCL bInfo[] = { b3BufferInfoCL( srcHisto->getBufferCL() ) }; b3LauncherCL launcher( m_commandQueue, m_prefixScanKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( cdata ); launcher.launch1D( 128, 128 ); destHisto = srcHisto; }else { //unsigned int sum; //for debugging m_scan->execute(*srcHisto,*destHisto,1920,0);//,&sum); } #ifdef DEBUG_RADIXSORT destHisto->copyToHost(testHist); printf("ib = %d, testHist size = %d, non zero elements:\n",ib, testHist.size()); for (int i=0;i<testHist.size();i++) { if (testHist[i]!=0) printf("testHist[%d]=%d\n",i,testHist[i]); } for (int i=0;i<testHist.size();i+=NUM_WGS) { printf("testHist[%d]=%d\n",i/NUM_WGS,testHist[i]); } #endif //DEBUG_RADIXSORT #define USE_GPU #ifdef USE_GPU if (src->size()) {// local sort and distribute b3BufferInfoCL bInfo[] = { b3BufferInfoCL( src->getBufferCL(), true ), b3BufferInfoCL( destHisto->getBufferCL(), true ), b3BufferInfoCL( dst->getBufferCL() )}; b3LauncherCL launcher( m_commandQueue, m_sortAndScatterSortDataKernel ); launcher.setBuffers( bInfo, sizeof(bInfo)/sizeof(b3BufferInfoCL) ); launcher.setConst( cdata ); launcher.launch1D( nWGs*WG_SIZE, WG_SIZE ); } #else { #define NUM_TABLES 16 //#define SEQUENTIAL #ifdef SEQUENTIAL int counter2[NUM_TABLES]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; int tables[NUM_TABLES]; int startBit = ib; destHisto->copyToHost(testHist); b3AlignedObjectArray<b3SortData> srcHost; b3AlignedObjectArray<b3SortData> dstHost; dstHost.resize(src->size()); src->copyToHost(srcHost); for (int i=0;i<NUM_TABLES;i++) { tables[i] = testHist[i*NUM_WGS]; } // distribute for(int i=0; i<n; i++) { int tableIdx = (srcHost[i].m_key >> startBit) & (NUM_TABLES-1); dstHost[tables[tableIdx] + counter2[tableIdx]] = srcHost[i]; counter2[tableIdx] ++; } #else int counter2[NUM_TABLES]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; int tables[NUM_TABLES]; b3AlignedObjectArray<b3SortData> dstHostOK; dstHostOK.resize(src->size()); destHisto->copyToHost(testHist); b3AlignedObjectArray<b3SortData> srcHost; src->copyToHost(srcHost); int blockSize = 256; int nBlocksPerWG = cdata.m_nBlocksPerWG; int startBit = ib; { for (int i=0;i<NUM_TABLES;i++) { tables[i] = testHist[i*NUM_WGS]; } // distribute for(int i=0; i<n; i++) { int tableIdx = (srcHost[i].m_key >> startBit) & (NUM_TABLES-1); dstHostOK[tables[tableIdx] + counter2[tableIdx]] = srcHost[i]; counter2[tableIdx] ++; } } b3AlignedObjectArray<b3SortData> dstHost; dstHost.resize(src->size()); int counter[NUM_TABLES]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; for (int wgIdx=0;wgIdx<NUM_WGS;wgIdx++) { int counter[NUM_TABLES]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; int nBlocks = (n)/blockSize - nBlocksPerWG*wgIdx; for(int iblock=0; iblock<b3Min(cdata.m_nBlocksPerWG, nBlocks); iblock++) { for (int lIdx = 0;lIdx < 64;lIdx++) { int addr = iblock*blockSize + blockSize*cdata.m_nBlocksPerWG*wgIdx + ELEMENTS_PER_WORK_ITEM*lIdx; // MY_HISTOGRAM( localKeys.x ) ++ is much expensive than atomic add as it requires read and write while atomics can just add on AMD // Using registers didn't perform well. It seems like use localKeys to address requires a lot of alu ops // AMD: AtomInc performs better while NV prefers ++ for(int j=0; j<ELEMENTS_PER_WORK_ITEM; j++) { if( addr+j < n ) { // printf ("addr+j=%d\n", addr+j); int i = addr+j; int tableIdx = (srcHost[i].m_key >> startBit) & (NUM_TABLES-1); int destIndex = testHist[tableIdx*NUM_WGS+wgIdx] + counter[tableIdx]; b3SortData ok = dstHostOK[destIndex]; if (ok.m_key != srcHost[i].m_key) { printf("ok.m_key = %d, srcHost[i].m_key = %d\n", ok.m_key,srcHost[i].m_key ); printf("(ok.m_value = %d, srcHost[i].m_value = %d)\n", ok.m_value,srcHost[i].m_value ); } if (ok.m_value != srcHost[i].m_value) { printf("ok.m_value = %d, srcHost[i].m_value = %d\n", ok.m_value,srcHost[i].m_value ); printf("(ok.m_key = %d, srcHost[i].m_key = %d)\n", ok.m_key,srcHost[i].m_key ); } dstHost[destIndex] = srcHost[i]; counter[tableIdx] ++; } } } } } #endif //SEQUENTIAL dst->copyFromHost(dstHost); } #endif//USE_GPU #ifdef DEBUG_RADIXSORT destHisto->copyToHost(testHist); printf("ib = %d, testHist size = %d, non zero elements:\n",ib, testHist.size()); for (int i=0;i<testHist.size();i++) { if (testHist[i]!=0) printf("testHist[%d]=%d\n",i,testHist[i]); } #endif //DEBUG_RADIXSORT b3Swap(src, dst ); b3Swap(srcHisto,destHisto); #ifdef DEBUG_RADIXSORT2 keyValuesInOut.copyToHost(test2); printf("numElem = %d\n",test2.size()); for (int i=0;i<test2.size();i++) { if (test2[i].m_key != test2[i].m_value) { printf("test2[%d].m_key=%d\n",i,test2[i].m_key); printf("test2[%d].m_value=%d\n",i,test2[i].m_value); } } #endif //DEBUG_RADIXSORT2 count++; }