void PxsFluidDynamics::updatePacket(PxsSphUpdateType updateType, PxVec3* forceBuf, PxsFluidParticle* particles, const PxsParticleCell& packet, const PxsFluidPacketSections& packetSections, const PxsFluidPacketHaloRegions& haloRegions, PxsFluidDynamicsTempBuffers& tempBuffers) { PX_COMPILE_TIME_ASSERT(PXS_FLUID_BRUTE_FORCE_PARTICLE_THRESHOLD <= PXS_FLUID_SUBPACKET_PARTICLE_LIMIT_FORCE_DENSITY); updateParticlesPrePass(updateType, forceBuf + packet.firstParticle, particles + packet.firstParticle, packet.numParticles, mParams); bool bruteForceApproach = ((packet.numParticles <= PXS_FLUID_BRUTE_FORCE_PARTICLE_THRESHOLD) && (haloRegions.maxNumParticles <= PXS_FLUID_BRUTE_FORCE_PARTICLE_THRESHOLD)); if (bruteForceApproach) { // There are not enough particles in the packet and its neighbors to make it worth building the local cell hash. // So, we do a brute force approach testing each particle against each particle. // sschirm: TODO check whether one way is faster (fewer function calls... more math) PxsFluidParticle* packetParticles = particles + packet.firstParticle; PxVec3* packetForceBuf = forceBuf + packet.firstParticle; for(PxU32 p=1; p < packet.numParticles; p++) { updateParticleGroupPair(packetForceBuf, packetForceBuf, packetParticles, packetParticles, tempBuffers.orderedIndicesSubpacket + p - 1, 1, tempBuffers.orderedIndicesSubpacket + p, packet.numParticles - p, true, updateType == PXS_SPH_DENSITY, mParams, tempBuffers.simdPositionsSubpacket, tempBuffers.indexStream); } // Compute particle interactions between particles of the current packet and particles of neighboring packets. updateParticlesBruteForceHalo(updateType, forceBuf, particles, packetSections, haloRegions, tempBuffers); } else { updatePacketLocalHash(updateType, forceBuf, particles, packet, packetSections, haloRegions, tempBuffers); } updateParticlesPostPass(updateType, forceBuf + packet.firstParticle, particles + packet.firstParticle, packet.numParticles, mParams); }
void FileRecorder::writeVertexBuffer(unsigned int id, const physx::apex::NxApexRenderVertexBufferData& data, unsigned int firstVertex, unsigned int numVertices) { fprintf(mOutputFile, "VertexBuffer[%d]::write: ", id); WRITE_ITEM(firstVertex); WRITE_ITEM(numVertices); WRITE_ITEM(data.moduleId); WRITE_ITEM(data.numModuleSpecificSemantics); fprintf(mOutputFile, "\n"); #ifdef PX_X86 PX_COMPILE_TIME_ASSERT(sizeof(physx::apex::NxApexRenderSemanticData) == (sizeof(void*) + 4 + sizeof(void*) + 4 + 4 + 1 + 3/*padding*/)); #endif for (unsigned int i = 0; i < physx::apex::NxRenderVertexSemantic::NUM_SEMANTICS; i++) { const physx::apex::NxApexRenderSemanticData& semanticData = data.getSemanticData(physx::apex::NxRenderVertexSemantic::Enum(i)); if (semanticData.format != physx::apex::NxRenderDataFormat::UNSPECIFIED) { fprintf(mOutputFile, " [%d]: ", i); WRITE_DATA_ITEM(stride); WRITE_DATA_ITEM(format); WRITE_DATA_ITEM(srcFormat); fprintf(mOutputFile, "\n"); //writeBufferData(semanticData.data, semanticData.stride, numVertices, semanticData.format); } } PX_COMPILE_TIME_ASSERT(physx::apex::NxRenderVertexSemantic::NUM_SEMANTICS == 13); #ifdef PX_X86 PX_COMPILE_TIME_ASSERT(sizeof(data) == sizeof(physx::apex::NxApexRenderSemanticData) * physx::apex::NxRenderVertexSemantic::NUM_SEMANTICS + 4 + sizeof(void*) + 4 + sizeof(void*) + 4); #endif }
ThreadPriority::Enum Thread::getPriority( Id threadId ) { ThreadPriority::Enum retval = ThreadPriority::eLOW; int priority = GetThreadPriority( (HANDLE) threadId ); PX_COMPILE_TIME_ASSERT( THREAD_PRIORITY_HIGHEST > THREAD_PRIORITY_ABOVE_NORMAL ); if ( priority >= THREAD_PRIORITY_HIGHEST ) retval = ThreadPriority::eHIGH; else if ( priority >= THREAD_PRIORITY_ABOVE_NORMAL ) retval = ThreadPriority::eABOVE_NORMAL; else if ( priority >= THREAD_PRIORITY_NORMAL ) retval = ThreadPriority::eNORMAL; else if ( priority >= THREAD_PRIORITY_BELOW_NORMAL ) retval = ThreadPriority::eBELOW_NORMAL; return retval; }
Gu::TriangleMesh::TriangleMesh(GuMeshFactory& factory, TriangleMeshData& d) : PxTriangleMesh(PxType(gTable[d.mType]), PxBaseFlag::eOWNS_MEMORY | PxBaseFlag::eIS_RELEASABLE) , mNbVertices (d.mNbVertices) , mNbTriangles (d.mNbTriangles) , mVertices (d.mVertices) , mTriangles (d.mTriangles) , mAABB (d.mAABB) , mExtraTrigData (d.mExtraTrigData) , mGeomEpsilon (d.mGeomEpsilon) , mFlags (d.mFlags) , mMaterialIndices (d.mMaterialIndices) , mFaceRemap (d.mFaceRemap) , mAdjacencies (d.mAdjacencies) , mMeshFactory (&factory) , mGRB_triIndices (d.mGRB_triIndices) , mGRB_triAdjacencies (d.mGRB_triAdjacencies) , mGRB_vertValency (d.mGRB_vertValency) , mGRB_adjVertStart (d.mGRB_adjVertStart) , mGRB_adjVertices (d.mGRB_adjVertices) , mGRB_meshAdjVerticiesTotal (d.mGRB_meshAdjVerticiesTotal) , mGRB_faceRemap (d.mGRB_faceRemap) , mGRB_BV32Tree (d.mGRB_BV32Tree) { // this constructor takes ownership of memory from the data object d.mVertices = 0; d.mTriangles = 0; d.mExtraTrigData = 0; d.mFaceRemap = 0; d.mAdjacencies = 0; d.mMaterialIndices = 0; d.mGRB_triIndices = 0; d.mGRB_triAdjacencies = 0; d.mGRB_vertValency = 0; d.mGRB_adjVertStart = 0; d.mGRB_adjVertices = 0; d.mGRB_faceRemap = 0; d.mGRB_BV32Tree = 0; // PT: 'getPaddedBounds()' is only safe if we make sure the bounds member is followed by at least 32bits of data PX_COMPILE_TIME_ASSERT(PX_OFFSET_OF(Gu::TriangleMesh, mExtraTrigData)>=PX_OFFSET_OF(Gu::TriangleMesh, mAABB)+4); }
void FileRecorder::createIndexBuffer(unsigned int id, const physx::apex::NxUserRenderIndexBufferDesc& desc) { fprintf(mOutputFile, "IndexBuffer[%d]::create: ", id); WRITE_DESC_ELEM(maxIndices); WRITE_DESC_ELEM(hint); WRITE_DESC_ELEM(format); WRITE_DESC_ELEM(primitives); WRITE_DESC_ELEM(registerInCUDA); //WRITE_DESC_ELEM(interopContext); fprintf(mOutputFile, "\n"); #ifdef PX_X86 PX_COMPILE_TIME_ASSERT(sizeof(desc) == 4 + 4 + 4 + 4 + 1 + 3/*padding*/ + sizeof(void*)); #endif }
const char* Writer::semanticToString(physx::apex::NxRenderBoneSemantic::Enum semantic) { const char* result = NULL; switch (semantic) { #define CASE(_SEMANTIC) case physx::apex::NxRenderBoneSemantic::_SEMANTIC: result = #_SEMANTIC; break CASE(POSE); CASE(PREVIOUS_POSE); #undef CASE // if this assert is hit add/remove semantics above to match NxRenderBoneSemantic PX_COMPILE_TIME_ASSERT(physx::apex::NxRenderBoneSemantic::NUM_SEMANTICS == 2); default: PX_ALWAYS_ASSERT(); } return result; }
void FileRecorder::createVertexBuffer(unsigned int id, const physx::apex::NxUserRenderVertexBufferDesc& desc) { fprintf(mOutputFile, "VertexBuffer[%d]::create: ", id); WRITE_DESC_ELEM(maxVerts); WRITE_DESC_ELEM(hint); WRITE_REQUEST(POSITION); WRITE_REQUEST(NORMAL); WRITE_REQUEST(TANGENT); WRITE_REQUEST(BINORMAL); WRITE_REQUEST(COLOR); WRITE_REQUEST(TEXCOORD0); WRITE_REQUEST(TEXCOORD1); WRITE_REQUEST(TEXCOORD2); WRITE_REQUEST(TEXCOORD3); WRITE_REQUEST(BONE_INDEX); WRITE_REQUEST(BONE_WEIGHT); WRITE_REQUEST(DISPLACEMENT_TEXCOORD); WRITE_REQUEST(DISPLACEMENT_FLAGS); WRITE_DESC_ELEM(numCustomBuffers); // PH: not done on purpose (yet) //void** customBuffersIdents; //NxRenderDataFormat::Enum* customBuffersRequest; WRITE_DESC_ELEM(moduleIdentifier); WRITE_DESC_ELEM(uvOrigin); WRITE_DESC_ELEM(canBeShared); fprintf(mOutputFile, "\n"); #ifdef PX_X86 // PH: Make sure that if the size of the descriptor changes, we get a compile error here and adapt the WRITE_REQUESTs from above accordingly PX_COMPILE_TIME_ASSERT(sizeof(desc) == 4 + 4 + (13 * 4) + 4 + sizeof(void*) + sizeof(void*) + 4 + 4 + 1 + 1 + 2/*padding*/ + sizeof(void*) ); #endif }
void RTree::refitAllStaticTree(CallbackRefit& cb, PxBounds3* retBounds) { PxU8* treeNodes8 = PX_IS_X64 ? CAST_U8(get64BitBasePage()) : CAST_U8((mFlags & IS_DYNAMIC) ? NULL : mPages); // since pages are ordered we can scan back to front and the hierarchy will be updated for (PxI32 iPage = PxI32(mTotalPages)-1; iPage>=0; iPage--) { RTreePage& page = mPages[iPage]; for (PxU32 j = 0; j < RTREE_PAGE_SIZE; j++) { if (page.isEmpty(j)) continue; if (page.isLeaf(j)) { Vec3V childMn, childMx; cb.recomputeBounds(page.ptrs[j]-1, childMn, childMx); // compute the bound around triangles PxVec3 mn3, mx3; V3StoreU(childMn, mn3); V3StoreU(childMx, mx3); page.minx[j] = mn3.x; page.miny[j] = mn3.y; page.minz[j] = mn3.z; page.maxx[j] = mx3.x; page.maxy[j] = mx3.y; page.maxz[j] = mx3.z; } else { const RTreePage* child = (const RTreePage*)(treeNodes8 + page.ptrs[j]); PX_COMPILE_TIME_ASSERT(RTREE_PAGE_SIZE == 4); bool first = true; for (PxU32 k = 0; k < RTREE_PAGE_SIZE; k++) { if (child->isEmpty(k)) continue; if (first) { page.minx[j] = child->minx[k]; page.miny[j] = child->miny[k]; page.minz[j] = child->minz[k]; page.maxx[j] = child->maxx[k]; page.maxy[j] = child->maxy[k]; page.maxz[j] = child->maxz[k]; first = false; } else { page.minx[j] = PxMin(page.minx[j], child->minx[k]); page.miny[j] = PxMin(page.miny[j], child->miny[k]); page.minz[j] = PxMin(page.minz[j], child->minz[k]); page.maxx[j] = PxMax(page.maxx[j], child->maxx[k]); page.maxy[j] = PxMax(page.maxy[j], child->maxy[k]); page.maxz[j] = PxMax(page.maxz[j], child->maxz[k]); } } } } } if (retBounds) { RTreeNodeQ bound1; for (PxU32 ii = 0; ii<mNumRootPages; ii++) { mPages[ii].computeBounds(bound1); if (ii == 0) { retBounds->minimum = PxVec3(bound1.minx, bound1.miny, bound1.minz); retBounds->maximum = PxVec3(bound1.maxx, bound1.maxy, bound1.maxz); } else { retBounds->minimum = retBounds->minimum.minimum(PxVec3(bound1.minx, bound1.miny, bound1.minz)); retBounds->maximum = retBounds->maximum.maximum(PxVec3(bound1.maxx, bound1.maxy, bound1.maxz)); } } } #ifdef PX_CHECKED validate(&cb); #endif }