static PxU32 addToStringTable(physx::shdfnd::Array<char>& stringTable, const char* str)
	{
		if(!str)
			return 0xffffffff;

		PxI32 length = PxI32(stringTable.size());
		const char* table = stringTable.begin();
		const char* start = table;
		while(length)
		{
			if(strcmp(table, str)==0)
				return PxU32(table - start);

			const char* saved = table;
			while(*table++);
			length -= PxU32(table - saved);
			PX_ASSERT(length>=0);
		}

		const PxU32 offset = stringTable.size();

		while(*str)
			stringTable.pushBack(*str++);
		stringTable.pushBack(0);
		return offset;
	}
void* PxSampleAllocator::allocate(size_t size, const char* typeName, const char* filename, int line)
{
	if(!size)
		return NULL;

#if defined(PX_DEBUG) || defined(PX_PROFILE)
	Ps::MutexT<Ps::RawAllocator>::ScopedLock lock(mMutex);

	// Allocate one debug block in front of each real allocation
	const size_t neededSize = size + sizeof(DebugBlock);
	void* ptr = platformAlignedAlloc(neededSize);

	if (NULL != ptr)
	{
		// Fill debug block
		DebugBlock* DB = (DebugBlock*)ptr;
		DB->mCheckValue	= DEBUG_IDENTIFIER;
		DB->mSize		= PxU32(size);
		DB->mLine		= line;
		DB->mSlotIndex	= INVALID_ID;
		DB->mFilename	= filename;
		DB->mHandle		= typeName ? typeName : "";

		// Update global stats
		mTotalNbAllocs++;
		mNbAllocs++;
		mNbAllocatedBytes += PxU32(size);
		if(mNbAllocatedBytes>mHighWaterMark)
			mHighWaterMark = mNbAllocatedBytes;

		// Insert the allocated block in the debug memory block list
		if(mMemBlockList)
		{
			if(mFirstFree!=INVALID_ID)
			{
				// Recycle old location
				PxU32 NextFree = (PxU32)(size_t)(mMemBlockList[mFirstFree]);
				if(NextFree!=INVALID_ID)
					NextFree>>=1;

				mMemBlockList[mFirstFree] = ptr;
				DB->mSlotIndex = mFirstFree;

				mFirstFree = NextFree;
			}
			else
			{
				if(mMemBlockUsed==mMemBlockListSize)
	static void setMissingPropertiesToDefault( RepXCollection& collection, RepXReaderWriter& editor, const RepXDefaultEntry* defaults, PxU32 numDefaults )
	{
		FoundationWrapper wrapper( collection.getAllocator() );
		//Release all strings at once, instead of piece by piece
		RepXMemoryAllocatorImpl alloc( collection.getAllocator() );
		//build a hashtable of the initial default value strings.
		TNameOffsetMap nameOffsets( wrapper );
		for ( PxU32 idx = 0; idx < numDefaults; ++idx )
		{
			const RepXDefaultEntry& item( defaults[idx] );
			size_t nameLen = 0;
			const char* periodPtr = nextPeriod (item.name);
			for ( ; periodPtr && *periodPtr; ++periodPtr ) if( *periodPtr == '.' )	break;
			if ( periodPtr == NULL || *periodPtr != '.' ) continue;
			nameLen = periodPtr - item.name;
			char* newMem = (char*)alloc.allocate( PxU32(nameLen + 1) );
			memcpy( newMem, item.name, nameLen );
			newMem[nameLen] = 0;
		
			if ( nameOffsets.find( newMem ) )
				alloc.deallocate( (PxU8*)newMem );
			else
				nameOffsets.insert( newMem, idx );
		}
		//Run through each collection item, and recursively find it and its children
		//If an object's name is in the hash map, check and add any properties that don't exist.
		//else return.
		for ( const RepXCollectionItem* item = collection.begin(), *end = collection.end(); item != end; ++ item )
		{
			RepXCollectionItem theItem( *item );
			setMissingPropertiesToDefault( theItem.mDescriptor, editor, defaults, numDefaults, nameOffsets );
		}
	}
				//Accounting for small jitter, the next value in the stream
				//should be higher than the last value.
				//You also have to take into account that the values won't be strictly incrementing.
				//They will be *roughly* incrementing.  So you have to account for some jitter
				//in the multiprocessor timing stream.  We detect possible cases of overflow
				//by looking at the high bit of this value vs. the high bit of the last value.
				//We account for jitter by taking the absolute value of the difference of the two values
				//and ensuring this is more than a certain amount.
				PxU64 NextValue(PxU32 value)
				{
					const PxU32 largestJitterValue = PX_MAX_U32 / 4;

					//Detect overflow by checking the high bit of this value against the high
					//bit of the last value
					bool highBitRaised = IsHighBitHight( value );
					if (mLastValue != PxU32(-1))
					{

						bool lastHighBitRaised = IsHighBitHight( mLastValue );
						if (highBitRaised != lastHighBitRaised)
						{

							if (highBitRaised)
							{
								PxU32 diff = value - mLastValue;
								if ( diff < largestJitterValue )
									mHighBitHighOffset = mHighBitLowOffset;
							}
							else 
							{
								PxU32 diff = mLastValue - value;
								if ( diff > largestJitterValue && mHighBitLowOffset == mHighBitHighOffset)
									mHighBitLowOffset += PX_MAX_U32;
							}
						}
					}
					mLastValue = value;
					PxU64 offset = highBitRaised ? mHighBitHighOffset : mHighBitLowOffset;
					return value + offset;
				}
static void visualizeActiveEdges(Cm::RenderOutput& out, const Gu::TriangleMesh& mesh, PxU32 nbTriangles, const PxU32* results, const Cm::Matrix34& absPose, const PxMat44& midt)
{
	const PxU8* extraTrigData = mesh.getExtraTrigData();
	PX_ASSERT(extraTrigData);

	const PxVec3* vertices = mesh.getVerticesFast();
	const void* indices = mesh.getTrianglesFast();

	const PxU32 ecolor = PxU32(PxDebugColor::eARGB_YELLOW);
	const bool has16Bit = mesh.has16BitIndices();
	for(PxU32 i=0; i<nbTriangles; i++)
	{
		const PxU32 index = results ? results[i] : i;

		PxVec3 wp[3];
		getTriangle(mesh, index, wp, vertices, indices, absPose, has16Bit);

		const PxU32 flags = extraTrigData[index];

		if(flags & Gu::ETD_CONVEX_EDGE_01)
		{
			out << midt << ecolor << Cm::RenderOutput::LINES << wp[0] << wp[1];
		}
		if(flags & Gu::ETD_CONVEX_EDGE_12)
		{
			out << midt << ecolor << Cm::RenderOutput::LINES << wp[1] << wp[2];
		}
		if(flags & Gu::ETD_CONVEX_EDGE_20)
		{
			out << midt << ecolor << Cm::RenderOutput::LINES << wp[0] << wp[2];
		}
	}
}
Beispiel #6
0
void NpArticulation::getSolverIterationCounts(PxU32 & positionIters, PxU32 & velocityIters) const
{
	NP_READ_CHECK(getOwnerScene());
	PxU16 x = getArticulation().getSolverIterationCounts();
	velocityIters = PxU32(x >> 8);
	positionIters = PxU32(x & 0xff);
}
void VisualDebugger::setVisualDebuggerFlag(PxVisualDebuggerFlags::Enum flag, bool value)
{
	if(value)
		mFlags |= PxU32(flag);
	else
		mFlags &= ~PxU32(flag);
}
Beispiel #8
0
void ParticleEmitterRate::initSparseSiteHash(PxU32 numEmit, PxU32 sparseMax)
{
	PX_ASSERT(PxU32(PARTICLE_EMITTER_SPARSE_FACTOR*numEmit) <= sparseMax);
	PX_ASSERT(mSites.size() == sparseMax);
	for(PxU32 i = 0; i < sparseMax; i++)
		mSites[i] = 0xffffffff;
}
	BatchStreamHeader(
		PxHitFlags aHitFlags, const PxQueryCache* aCache, const PxQueryFilterData& aFd,
		void* aUserData, PxU16 aMaxTouchHits, QTypeROS::Enum aHitTypeId) :
			hitFlags(aHitFlags), fd(aFd), userData(aUserData), cache(aCache),
			maxTouchHits(aMaxTouchHits), hitTypeId(char(aHitTypeId))
	{
		nextQueryOffset = PxU32(NpBatchQuery::eTERMINAL);
	}
Beispiel #10
0
void physx::shdfnd::enableFPExceptions()
{
	// clear any pending exceptions
	_clearfp();

	// enable all fp exceptions except inexact and underflow (common, benign)
	_controlfp_s(NULL, PxU32(~_MCW_EM) | _EM_INEXACT | _EM_UNDERFLOW, _MCW_EM);
}
Beispiel #11
0
void getSimplePose( PxActor* actor, float* data ) //TODO rework
{
    PxShape* shp[1];
    PxRigidDynamic* rigid = (PxRigidDynamic*)actor;
    rigid->getShapes( shp, PxU32(1) );
    PxMat44 shape_pose = rigid->getGlobalPose(); //(PxShapeExt::getGlobalPose(*shp[0], *rigid));
    for( int i = 0; i < 4; i++ )
        for( int j = 0; j < 4; j++ )
            data[i*4 + j] = shape_pose[j][i];
}
Beispiel #12
0
void VisualDebugger::setVisualDebuggerFlag(PxVisualDebuggerFlag::Enum flag, bool value)
{
	if(value)
		mFlags |= PxU32(flag);
	else
		mFlags &= ~PxU32(flag);
	//This has been a bug against the debugger for some time,
	//changing this flag doesn't always change the sending-contact-reports behavior.
	if ( flag == PxVisualDebuggerFlag::eTRANSMIT_CONTACTS )
	{
		setCreateContactReports( value );
	}
}
Beispiel #13
0
void VisualDebugger::setVisualDebuggerFlags(PxVisualDebuggerFlags flags)
{
	bool oldContactFlag = mFlags & PxVisualDebuggerFlag::eTRANSMIT_CONTACTS;
	bool newContactFlag = flags & PxVisualDebuggerFlag::eTRANSMIT_CONTACTS;

	mFlags = PxU32(flags);
	

	//This has been a bug against the debugger for some time,
	//changing this flag doesn't always change the sending-contact-reports behavior.
	if ( oldContactFlag != newContactFlag )
	{
		setCreateContactReports( newContactFlag );
	}
}
ReadCheck::ReadCheck(const ApexRWLockable* scene, const char* functionName)
	: mLockable(scene), mName(functionName), mErrorCount(0)
{
	if (NxGetApexSDK()->isConcurrencyCheckEnabled() && mLockable && !mLockable->isEnabled())
	{
		if (!mLockable->startRead() && !mLockable->isEnabled())
		{
			APEX_INTERNAL_ERROR("An API read call (%s) was made from thread %d but acquireReadLock() was not called first, note that "
				"when NxApexSDKDesc::enableConcurrencyCheck is enabled all API reads and writes must be "
				"wrapped in the appropriate locks.", mName, PxU32(physx::shdfnd::Thread::getId()));
		}

		// Record the NpScene read/write error counter which is
		// incremented any time a NpScene::startWrite/startRead fails
		// (see destructor for additional error checking based on this count)
		mErrorCount = mLockable->getReadWriteErrorCount();
	}
}
Beispiel #15
0
void FixedStepper::substepStrategy(const PxReal stepSize, PxU32& substepCount, PxReal& substepSize)
{
	if(mAccumulator > mFixedSubStepSize)
		mAccumulator = 0.0f;

	// don't step less than the step size, just accumulate
	mAccumulator  += stepSize;
	if(mAccumulator < mFixedSubStepSize)
	{
		substepCount = 0;
		return;
	}

	substepSize = mFixedSubStepSize;
	substepCount = PxMin(PxU32(mAccumulator/mFixedSubStepSize), mMaxSubSteps);

	mAccumulator -= PxReal(substepCount)*substepSize;
}
Beispiel #16
0
void VariableStepper::substepStrategy(const PxReal stepSize, PxU32& substepCount, PxReal& substepSize)
{
	if(mAccumulator > mMaxSubStepSize)
		mAccumulator = 0.0f;

	// don't step less than the min step size, just accumulate
	mAccumulator  += stepSize;
	if(mAccumulator < mMinSubStepSize)
	{
		substepCount = 0;
		return;
	}

	substepCount = PxMin(PxU32(PxCeil(mAccumulator/mMaxSubStepSize)), mMaxSubSteps);
	substepSize = PxMin(mAccumulator/substepCount, mMaxSubStepSize);

	mAccumulator -= PxReal(substepCount)*substepSize;
}
PxU32 RTree::computeBottomLevelCount(PxU32 multiplier) const
{
	PX_ASSERT((mFlags & IS_DYNAMIC) == 0);
	PxU32 topCount = 0, curCount = mNumRootPages;
	const RTreePage* rightMostPage = &mPages[mNumRootPages-1];
	PX_ASSERT(rightMostPage);
	for (PxU32 level = 0; level < mNumLevels-1; level++)
	{
		topCount += curCount;
		PxU32 nc = rightMostPage->nodeCount();
		PX_ASSERT(nc > 0 && nc <= RTreePage::SIZE);
		// old version pointer, up to PX_MESH_VERSION 8
		PxU32 ptr = (rightMostPage->ptrs[nc-1]) * multiplier;
		PX_ASSERT(ptr % sizeof(RTreePage) == 0);
		const RTreePage* rightMostPageNext = mPages + (ptr / sizeof(RTreePage));
		curCount = PxU32(rightMostPageNext - rightMostPage);
		rightMostPage = rightMostPageNext;
	}

	return mTotalPages - topCount;
}
Beispiel #18
0
void VisualDebugger::setVisualDebuggerFlag(PxVisualDebuggerFlags::Enum flag, bool value)
{
	if(value)
		mFlags |= PxU32(flag);
	else
		mFlags &= ~PxU32(flag);
	//This has been a bug against the debugger for some time,
	//changing this flag doesn't always change the sending-contact-reports behavior.
	if ( flag == PxVisualDebuggerFlags::eTRANSMIT_CONTACTS )
	{
		if ( isConnected() )
		{
			NpPhysics& npPhysics = NpPhysics::getInstance();
			PxU32 numScenes = npPhysics.getNbScenes();
			for(PxU32 i = 0; i < numScenes; i++)
			{
				NpScene* npScene = npPhysics.getScene(i);
				Scb::Scene& scbScene = npScene->getScene();
				scbScene.getSceneVisualDebugger().setCreateContactReports(value);
			}
		}
	}
}
bool SampleNorthPole::isDetachable(PxFilterData& filterData)
{
	return filterData.word3 & PxU32(DETACHABLE_FLAG) ? true : false;
}
void SampleNorthPole::setDetachable(PxShape& shape)
{
	PxFilterData fd = shape.getSimulationFilterData();
	fd.word3 |= PxU32(DETACHABLE_FLAG);
	shape.setSimulationFilterData(fd);
}
bool SampleNorthPole::needsContactReport(const PxFilterData& filterData0, const PxFilterData& filterData1)
{
	const PxU32 needsReport = PxU32(DETACHABLE_FLAG | SNOWBALL_FLAG);
	PxU32 flags = (filterData0.word3 | filterData1.word3);
	return (flags & needsReport) == needsReport;
}
void PxsFluidDynamics::updateSph(PxBaseTask& continuation)
{
	PxsFluidParticle* particles = mParticleSystem.mParticleState->getParticleBuffer();
	PxU32 numParticles = mParticleSystem.mNumPacketParticlesIndices;
	const PxU32* particleIndices = mParticleSystem.mPacketParticlesIndices;
	const PxsParticleCell* packets = mParticleSystem.mSpatialHash->getPackets();
	const PxsFluidPacketSections* packetSections = mParticleSystem.mSpatialHash->getPacketSections();
	PX_ASSERT(packets);
	PX_ASSERT(packetSections);
	PX_ASSERT(numParticles > 0);
	PX_UNUSED(packetSections);

#ifdef PX_PS3
	const Cm::BitMap& particleMap = mParticleSystem.mParticleState->getParticleMap();
	PxF32 timeStep = mParticleSystem.mSimulationTimeStep;
	startTimerMarker(ePARTICLEUPDATESPH);

	mDynamicSPU.mSPHSPUs = mParticleSystem.getContext().getSceneParamInt(PxPS3ConfigParam::eSPU_FLUID_SPH);

	if(mDynamicSPU.mSPHSPUs > 0)
	{
		mDynamicSPU.updateSphSPU(particles, mParticleSystem.mTransientBuffer, particleMap, numParticles, particleIndices, packets, packetSections, mParams, timeStep, mParticleSystem.mContext.getTaskPool(), continuation);
	}
	else
#endif
	{		
		//sschirm: for now we reorder particles for sph exclusively, and scatter again after sph.
		if (!mTempReorderedParticles)
		{
			PxU32 maxParticles = mParticleSystem.mParticleState->getMaxParticles();
			mTempReorderedParticles = (PxsFluidParticle*)mParticleSystem.mAlign16.allocate(maxParticles*sizeof(PxsFluidParticle), __FILE__, __LINE__);
		}

		if (!mTempParticleForceBuf)
		{
			PxU32 maxParticles = mParticleSystem.mParticleState->getMaxParticles();
			//sschirm: Add extra float, since we are accessing this buffer later with: Vec4V_From_F32Array.
			//The last 4 element would contain unallocated memory otherwise.
			//Also initializing buffer that may only be used partially and non-contiguously with 0 to avoid
			//simd operations to use bad values.
			PxU32 byteSize = maxParticles*sizeof(PxVec3) + sizeof(PxF32);
			mTempParticleForceBuf = (PxVec3*)mParticleSystem.mAlign16.allocate(byteSize, __FILE__, __LINE__);
			memset(mTempParticleForceBuf, 0, byteSize);
		}		
		
		for (PxU32 i = 0; i < numParticles; ++i)
		{
			PxU32 particleIndex = particleIndices[i];
			mTempReorderedParticles[i] = particles[particleIndex];			
		}

		//would be nice to get available thread count to decide on task decomposition
		//mParticleSystem.getContext().getTaskManager().getCpuDispatcher();

		// use number of particles for task decomposition
		PxU32 targetParticleCountPerTask = PxMax(PxU32(numParticles / PXS_FLUID_MAX_PARALLEL_TASKS_SPH), PxU32(PXS_FLUID_SUBPACKET_PARTICLE_LIMIT_FORCE_DENSITY));
		PxU16 packetIndex = 0;
		PxU16 lastPacketIndex = 0;
		PxU32 numTasks = 0;
		for (PxU32 i = 0; i < PXS_FLUID_MAX_PARALLEL_TASKS_SPH; ++i)
		{
			// if this is the last interation, we need to gather all remaining packets
			if (i == PXS_FLUID_MAX_PARALLEL_TASKS_SPH - 1)
				targetParticleCountPerTask = 0xffffffff;

			lastPacketIndex = packetIndex;
			PxU32 currentParticleCount = 0;
			while (currentParticleCount < targetParticleCountPerTask && packetIndex < PXS_PARTICLE_SYSTEM_PACKET_HASH_SIZE)
			{
				const PxsParticleCell& packet = packets[packetIndex];
				currentParticleCount += (packet.numParticles != PX_INVALID_U32) ? packet.numParticles : 0;
				packetIndex++;
			}

			if (currentParticleCount > 0)
			{
				PX_ASSERT(lastPacketIndex != packetIndex);
				mTaskData[i].beginPacketIndex = lastPacketIndex;
				mTaskData[i].endPacketIndex = packetIndex;
				numTasks++;
			}
			else
			{
				mTaskData[i].beginPacketIndex = PX_INVALID_U16;
				mTaskData[i].endPacketIndex = PX_INVALID_U16;
			}
		}
		PX_ASSERT(packetIndex == PXS_PARTICLE_SYSTEM_PACKET_HASH_SIZE);

		mNumTasks = numTasks;
		adjustTempBuffers(PxMax(numTasks, mNumTempBuffers));

		mMergeForceTask.setContinuation(&continuation);
		mMergeDensityTask.setContinuation(&mMergeForceTask);

		schedulePackets(PXS_SPH_DENSITY, mMergeDensityTask);
		mMergeDensityTask.removeReference();		
	}

#ifdef PX_PS3
	stopTimerMarker(ePARTICLEUPDATESPH);
#endif
}
void VisualDebugger::updatePvdProperties(const PxConvexMesh* convexMesh)
{
	PVD::PvdCommLayerError error;

	PxReal mass;
	PxMat33Legacy localInertia;
	PxVec3 localCom;
	convexMesh->getMassInformation(mass, reinterpret_cast<PxMat33 &>(localInertia), localCom);
	
	PxU64 theInstance(PX_PROFILE_POINTER_TO_U64(convexMesh));
	mPvdConnectionHelper.addPropertyGroupProperty(ConvexMeshProp::Mass,					mass);
	mPvdConnectionHelper.addPropertyGroupProperty(ConvexMeshProp::LocalInertia,			toPvdType(localInertia.toQuat()));
	mPvdConnectionHelper.addPropertyGroupProperty(ConvexMeshProp::LocalCenterOfMass,	toPvdType(localCom));

	mPvdConnectionHelper.sendSinglePropertyGroup(mPvdConnection, theInstance, PvdClassKeys::ConvexMesh);
	
	// update arrays:
	// vertex Array:
	{
		const PxU8* vertexPtr = reinterpret_cast<const PxU8*>(convexMesh->getVertices());
		const PxU32 vertexStride = sizeof(PxVec3);
		const PxU32 numVertices = convexMesh->getNbVertices();
		
		error = PvdConnectionHelper::sendSingleElementArrayProperty(mPvdConnection, theInstance, ConvexMeshProp::VertexArray
																 , VectorArrayProp::Element, PVD::PvdCommLayerDatatype::Float3
																 , vertexPtr, vertexStride, numVertices);
		PX_ASSERT(error == PVD::PvdCommLayerError::None);
	}

	// HullPolyArray:
	PxU16 maxIndices = 0;
	{

		PxU32 properties[HullPolygonArrayProp::NUM_ELEMENTS];
		PVD::PvdCommLayerDatatype dataTypes[HullPolygonArrayProp::NUM_ELEMENTS] = {	PVD::PvdCommLayerDatatype::Plane,
																					PVD::PvdCommLayerDatatype::U16,
																					PVD::PvdCommLayerDatatype::U16};
		for(PxU32 i = 0; i < HullPolygonArrayProp::NUM_ELEMENTS; i++)
			properties[i] = i+1;

		error = mPvdConnection->beginArrayPropertyBlock(theInstance, ConvexMeshProp::HullPolygonArray+1, properties, dataTypes, HullPolygonArrayProp::NUM_ELEMENTS);

		static const PxU32 NUM_STACK_ELT = 32;
		PX_ALLOCA(stack, PxHullPolygon, NUM_STACK_ELT);
		PxHullPolygon* pxHullPolygons = stack;
		PxHullPolygon* pxHullPolygonsEnd = pxHullPolygons+NUM_STACK_ELT;
		PxHullPolygon* curOut = pxHullPolygons;

		PxU32 numPolygons = convexMesh->getNbPolygons();
		for(PxU32 index = 0; index < numPolygons; index++)
		{
			convexMesh->getPolygonData(index, *curOut);
			maxIndices = PxMax(maxIndices, PxU16(curOut->mIndexBase + curOut->mNbVerts));
			curOut++;

			if(curOut == pxHullPolygonsEnd)
			{
				error = mPvdConnection->sendArrayObjects((PxU8*)(pxHullPolygons), sizeof(PxHullPolygon), NUM_STACK_ELT);
				curOut = pxHullPolygons;
			}
		}

		if(curOut != pxHullPolygons)
			error = mPvdConnection->sendArrayObjects((PxU8*)(pxHullPolygons), sizeof(PxHullPolygon), PxU32(curOut-pxHullPolygons));

		error = mPvdConnection->endArrayPropertyBlock();
	}
	

	// poly index Array:
	{
		const PxU8* indices = convexMesh->getIndexBuffer();
		PxU32 indexCount = maxIndices;

		error = PvdConnectionHelper::sendSingleElementArrayProperty(mPvdConnection, theInstance, ConvexMeshProp::IndexArray
																 , U8ArrayProp::Element, PVD::PvdCommLayerDatatype::U8
																 , indices, sizeof(PxU8), indexCount);
		PX_ASSERT(error == PVD::PvdCommLayerError::None);
	}
}
PxU32 ThreadImpl::setAffinityMask(PxU32 mask)
{
	return PxU32(0);
}
				OverflowRecord()
					: mLastValue(PxU32(-1)), mHighBitHighOffset(0), mHighBitLowOffset(0)
				{
				}
void Gu::TriangleMesh::debugVisualize(
	Cm::RenderOutput& out, const PxTransform& pose, const PxMeshScale& scaling, const PxBounds3& cullbox,
	const PxU64 mask, const PxReal fscale, const PxU32 numMaterials) const 
{
	PX_UNUSED(numMaterials);

	//bool cscale = !!(mask & ((PxU64)1 << PxVisualizationParameter::eCULL_BOX));
	const PxU64 cullBoxMask = PxU64(1) << PxVisualizationParameter::eCULL_BOX;
	bool cscale = ((mask & cullBoxMask) == cullBoxMask);

	const PxMat44 midt(PxIdentity);
	const Cm::Matrix34 absPose(PxMat33(pose.q) * scaling.toMat33(), pose.p);

	PxU32 nbTriangles = getNbTrianglesFast();
	const PxU32 nbVertices = getNbVerticesFast();
	const PxVec3* vertices = getVerticesFast();
	const void* indices = getTrianglesFast();

	const PxDebugColor::Enum colors[] = 
	{
		PxDebugColor::eARGB_BLACK,		
		PxDebugColor::eARGB_RED,		
		PxDebugColor::eARGB_GREEN,		
		PxDebugColor::eARGB_BLUE,		
		PxDebugColor::eARGB_YELLOW,	
		PxDebugColor::eARGB_MAGENTA,	
		PxDebugColor::eARGB_CYAN,		
		PxDebugColor::eARGB_WHITE,		
		PxDebugColor::eARGB_GREY,		
		PxDebugColor::eARGB_DARKRED,	
		PxDebugColor::eARGB_DARKGREEN,	
		PxDebugColor::eARGB_DARKBLUE,	
	};

	const PxU32 colorCount = sizeof(colors)/sizeof(PxDebugColor::Enum);

	if(cscale)
	{
		const Gu::Box worldBox(
			(cullbox.maximum + cullbox.minimum)*0.5f,
			(cullbox.maximum - cullbox.minimum)*0.5f,
			PxMat33(PxIdentity));
		
		// PT: TODO: use the callback version here to avoid allocating this huge array
		PxU32* results = reinterpret_cast<PxU32*>(PX_ALLOC_TEMP(sizeof(PxU32)*nbTriangles, "tmp triangle indices"));
		LimitedResults limitedResults(results, nbTriangles, 0);
		Midphase::intersectBoxVsMesh(worldBox, *this, pose, scaling, &limitedResults);
		nbTriangles = limitedResults.mNbResults;

		if (fscale)
		{
			const PxU32 fcolor = PxU32(PxDebugColor::eARGB_DARKRED);

			for (PxU32 i=0; i<nbTriangles; i++)
			{
				const PxU32 index = results[i];
				PxVec3 wp[3];
				getTriangle(*this, index, wp, vertices, indices, absPose, has16BitIndices());

				const PxVec3 center = (wp[0] + wp[1] + wp[2]) / 3.0f;
				PxVec3 normal = (wp[0] - wp[1]).cross(wp[0] - wp[2]);
				PX_ASSERT(!normal.isZero());
				normal = normal.getNormalized();

				out << midt << fcolor <<
						Cm::DebugArrow(center, normal * fscale);
			}
		}

		if (mask & (PxU64(1) << PxVisualizationParameter::eCOLLISION_SHAPES))
		{
			const PxU32 scolor = PxU32(PxDebugColor::eARGB_MAGENTA);

			out << midt << scolor;	// PT: no need to output this for each segment!

			PxDebugLine* segments = out.reserveSegments(nbTriangles*3);
			for(PxU32 i=0; i<nbTriangles; i++)
			{
				const PxU32 index = results[i];
				PxVec3 wp[3];
				getTriangle(*this, index, wp, vertices, indices, absPose, has16BitIndices());
				segments[0] = PxDebugLine(wp[0], wp[1], scolor);
				segments[1] = PxDebugLine(wp[1], wp[2], scolor);
				segments[2] = PxDebugLine(wp[2], wp[0], scolor);
				segments+=3;
			}
		}

		if ((mask & (PxU64(1) << PxVisualizationParameter::eCOLLISION_EDGES)) && mExtraTrigData)
			visualizeActiveEdges(out, *this, nbTriangles, results, absPose, midt);

		PX_FREE(results);
	}
	else
	{
		if (fscale)
		{
			const PxU32 fcolor = PxU32(PxDebugColor::eARGB_DARKRED);

			for (PxU32 i=0; i<nbTriangles; i++)
			{
				PxVec3 wp[3];
				getTriangle(*this, i, wp, vertices, indices, absPose, has16BitIndices());

				const PxVec3 center = (wp[0] + wp[1] + wp[2]) / 3.0f;
				PxVec3 normal = (wp[0] - wp[1]).cross(wp[0] - wp[2]);
				PX_ASSERT(!normal.isZero());
				normal = normal.getNormalized();

				out << midt << fcolor <<
						Cm::DebugArrow(center, normal * fscale);
			}
		}

		if (mask & (PxU64(1) << PxVisualizationParameter::eCOLLISION_SHAPES))
		{
			PxU32 scolor = PxU32(PxDebugColor::eARGB_MAGENTA);

			out << midt << scolor;	// PT: no need to output this for each segment!

			PxVec3* transformed = reinterpret_cast<PxVec3*>(PX_ALLOC(sizeof(PxVec3)*nbVertices, "PxVec3"));
			for(PxU32 i=0;i<nbVertices;i++)
				transformed[i] = absPose.transform(vertices[i]);

			PxDebugLine* segments = out.reserveSegments(nbTriangles*3);
			for (PxU32 i=0; i<nbTriangles; i++)
			{
				PxVec3 wp[3];
				getTriangle(*this, i, wp, transformed, indices, has16BitIndices());
				const PxU32 localMaterialIndex = getTriangleMaterialIndex(i);
				scolor = colors[localMaterialIndex % colorCount];
				
				segments[0] = PxDebugLine(wp[0], wp[1], scolor);
				segments[1] = PxDebugLine(wp[1], wp[2], scolor);
				segments[2] = PxDebugLine(wp[2], wp[0], scolor);
				segments+=3;
			}

			PX_FREE(transformed);
		}

		if ((mask & (PxU64(1) << PxVisualizationParameter::eCOLLISION_EDGES)) && mExtraTrigData)
			visualizeActiveEdges(out, *this, nbTriangles, NULL, absPose, midt);
	}
}
	PxU32 RevoluteJointSolverPrep(Px1DConstraint* constraints,
		PxVec3& body0WorldOffset,
		PxU32 /*maxConstraints*/,
		PxConstraintInvMassScale &invMassScale,
		const void* constantBlock,
		const PxTransform& bA2w,
		const PxTransform& bB2w)
	{
		const RevoluteJointData& data = *reinterpret_cast<const RevoluteJointData*>(constantBlock);
		invMassScale = data.invMassScale;

		const PxJointAngularLimitPair& limit = data.limit;

		bool limitEnabled = data.jointFlags & PxRevoluteJointFlag::eLIMIT_ENABLED;
		bool limitIsLocked = limitEnabled && limit.lower >= limit.upper;

		PxTransform cA2w = bA2w * data.c2b[0];
		PxTransform cB2w = bB2w * data.c2b[1];

		if(cB2w.q.dot(cA2w.q)<0.f)
			cB2w.q = -cB2w.q;

		body0WorldOffset = cB2w.p-bA2w.p;
		Ext::joint::ConstraintHelper ch(constraints, cB2w.p - bA2w.p, cB2w.p - bB2w.p);

		ch.prepareLockedAxes(cA2w.q, cB2w.q, cA2w.transformInv(cB2w.p), 7, PxU32(limitIsLocked ? 7 : 6));

		if(limitIsLocked)
			return ch.getCount();

		PxVec3 axis = cA2w.rotate(PxVec3(1.f,0,0));

		if(data.jointFlags & PxRevoluteJointFlag::eDRIVE_ENABLED)
		{
			Px1DConstraint *c = ch.getConstraintRow();

			c->solveHint = PxConstraintSolveHint::eNONE;

			c->linear0			= PxVec3(0);
			c->angular0			= -axis;
			c->linear1			= PxVec3(0);
			c->angular1			= -axis * data.driveGearRatio;

			c->velocityTarget	= data.driveVelocity;

			c->minImpulse = -data.driveForceLimit;
			c->maxImpulse = data.driveForceLimit;
			if(data.jointFlags & PxRevoluteJointFlag::eDRIVE_FREESPIN)
			{
				if(data.driveVelocity > 0)
					c->minImpulse = 0;
				if(data.driveVelocity < 0)
					c->maxImpulse = 0;
			}
			c->flags |= Px1DConstraintFlag::eHAS_DRIVE_LIMIT;
		}


		if(limitEnabled)
		{
			PxQuat cB2cAq = cA2w.q.getConjugate() * cB2w.q;
			PxQuat twist(cB2cAq.x,0,0,cB2cAq.w);

			PxReal magnitude = twist.normalize();
			PxReal tqPhi = physx::intrinsics::fsel(magnitude - 1e-6f, twist.x / (1.0f + twist.w), 0.f);

			ch.quarterAnglePair(tqPhi, data.tqLow, data.tqHigh, data.tqPad, axis, limit);
		}

		return ch.getCount();
	}
ReadCheck::~ReadCheck()
{
	if (NxGetApexSDK()->isConcurrencyCheckEnabled() && mLockable)
	{
		// By checking if the NpScene::mConcurrentErrorCount has been incremented
		// we can detect if an erroneous read/write was performed during 
		// this objects lifetime. In this case we also print this function's
		// details so that the user can see which two API calls overlapped
		if (mLockable->getReadWriteErrorCount() != mErrorCount && !mLockable->isEnabled())
		{
			APEX_INTERNAL_ERROR("Leaving %s on thread %d, an API overlapping write on another thread was detected.", mName, PxU32(physx::shdfnd::Thread::getId()));
		}

		mLockable->stopRead();
	}
}
Beispiel #29
0
void ParticleEmitterRate::stepInternal(ParticleData& particles, PxReal dt, const PxVec3& externalAcceleration, PxReal maxParticleVelocity)
{
	PX_ASSERT(mNumX > 0 && mNumY > 0);
	PxU32 numEmittedParticles = 0;

	//figure out how many particle have to be emitted with the given rate.
	mParticlesToEmit += mRate*dt;
	PxU32 numEmit = (PxU32)(mParticlesToEmit);
	if(numEmit == 0)
		return;
	
	PxU32 numLayers = (PxU32)(numEmit / (mNumX * mNumY)) + 1;
	PxReal layerDistance = dt * mVelocity / numLayers;

	PxU32 sparseMax = 0;

	//either shuffle or draw without repeat (approximation)
	bool denseEmission = (PxU32(PARTICLE_EMITTER_SPARSE_FACTOR*numEmit) > mNumSites);
	if(denseEmission)
	{
		initDenseSites();
	}
	else
	{
		sparseMax = PARTICLE_EMITTER_SPARSE_FACTOR*numEmit;
		mSites.resize(sparseMax);
	}

	// generate particles
	PxU32 l = 0;
	while(numEmit > 0)
	{
		PxVec3 layerVec = mAxisZ * (layerDistance * (PxReal)l);
		l++;

		if(denseEmission)
			shuffleDenseSites();
		else
			initSparseSiteHash(numEmit, sparseMax);

		for (PxU32 i = 0; i < mNumSites && numEmit > 0; i++)
		{
			PxU32 emissionSite;
			if (denseEmission)
				emissionSite = mSites[i];
			else
				emissionSite = pickSparseEmissionSite(sparseMax);

			PxU32 x = emissionSite / mNumY;
			PxU32 y = emissionSite % mNumY;

			PxReal offset = 0.0f;
			if (y%2) offset = mSpacingX * 0.5f;
				
			if (isOutsideShape(x,y,offset)) 
				continue;

			//position noise
			PxVec3 posNoise;
			posNoise.x = randInRange(-mRandomPos.x, mRandomPos.x);
			posNoise.y = randInRange(-mRandomPos.y, mRandomPos.y);	
			posNoise.z = randInRange(-mRandomPos.z, mRandomPos.z);	

			PxVec3 emissionPoint = mBasePos + layerVec + 
				mAxisX*(posNoise.x+offset+mSpacingX*x) + mAxisY*(posNoise.y+mSpacingY*y) + mAxisZ*posNoise.z;

			PxVec3 particleVelocity;
			computeSiteVelocity(particleVelocity, emissionPoint);

			bool isSpawned = spawnParticle(particles, numEmittedParticles, particles.maxParticles - particles.numParticles, emissionPoint, particleVelocity);
			if(isSpawned)
			{
				numEmit--;
				mParticlesToEmit -= 1.0f;
			}
			else
				return;
		}
	}
}
Beispiel #30
0
bool UDestructibleComponent::DoCustomNavigableGeometryExport(FNavigableGeometryExport& GeomExport) const
{
#if WITH_APEX
	if (ApexDestructibleActor == NULL)
	{
		return false;
	}

	NxDestructibleActor* DestrActor = const_cast<NxDestructibleActor*>(ApexDestructibleActor);

	const FTransform ComponentToWorldNoScale(ComponentToWorld.GetRotation(), ComponentToWorld.GetTranslation(), FVector(1.f));
	TArray<PxShape*> Shapes;
	Shapes.AddUninitialized(8);
	PxRigidDynamic** PActorBuffer = NULL;
	PxU32 PActorCount = 0;
	if (DestrActor->acquirePhysXActorBuffer(PActorBuffer, PActorCount
		, NxDestructiblePhysXActorQueryFlags::Static 
		| NxDestructiblePhysXActorQueryFlags::Dormant
		| NxDestructiblePhysXActorQueryFlags::Dynamic))
	{
		uint32 ShapesExportedCount = 0;

		while (PActorCount--)
		{
			const PxRigidDynamic* PActor = *PActorBuffer++;
			if (PActor != NULL)
			{
				const FTransform PActorGlobalPose = P2UTransform(PActor->getGlobalPose());

				const PxU32 ShapesCount = PActor->getNbShapes();
				if (ShapesCount > PxU32(Shapes.Num()))
				{
					Shapes.AddUninitialized(ShapesCount - Shapes.Num());
				}
				const PxU32 RetrievedShapesCount = PActor->getShapes(Shapes.GetData(), Shapes.Num());
				PxShape* const* ShapePtr = Shapes.GetData();
				for (PxU32 ShapeIndex = 0; ShapeIndex < RetrievedShapesCount; ++ShapeIndex, ++ShapePtr)
				{
					if (*ShapePtr != NULL)
					{
						const PxTransform LocalPose = (*ShapePtr)->getLocalPose();
						FTransform LocalToWorld = P2UTransform(LocalPose);
						LocalToWorld.Accumulate(PActorGlobalPose);

						switch((*ShapePtr)->getGeometryType())
						{
						case PxGeometryType::eCONVEXMESH:
							{
								PxConvexMeshGeometry Geometry;
								if ((*ShapePtr)->getConvexMeshGeometry(Geometry))
								{
									++ShapesExportedCount;

									// @todo address Geometry.scale not being used here
									GeomExport.ExportPxConvexMesh(Geometry.convexMesh, LocalToWorld);
								}
							}
							break;
						case PxGeometryType::eTRIANGLEMESH:
							{
								// @todo address Geometry.scale not being used here
								PxTriangleMeshGeometry Geometry;
								if ((*ShapePtr)->getTriangleMeshGeometry(Geometry))
								{
									++ShapesExportedCount;

									if ((Geometry.triangleMesh->getTriangleMeshFlags()) & PxTriangleMeshFlag::eHAS_16BIT_TRIANGLE_INDICES)
									{
										GeomExport.ExportPxTriMesh16Bit(Geometry.triangleMesh, LocalToWorld);
									}
									else
									{
										GeomExport.ExportPxTriMesh32Bit(Geometry.triangleMesh, LocalToWorld);
									}
								}
							}
						default:
							{
								UE_LOG(LogPhysics, Log, TEXT("UDestructibleComponent::DoCustomNavigableGeometryExport(): unhandled PxGeometryType, %d.")
									, int32((*ShapePtr)->getGeometryType()));
							}
							break;
						}
					}
				}
			}
		}
		ApexDestructibleActor->releasePhysXActorBuffer();

		INC_DWORD_STAT_BY(STAT_Navigation_DestructiblesShapesExported, ShapesExportedCount);
	}
#endif // WITH_APEX

	// we don't want a regular geometry export
	return false;
}