ReadWriteLock::ReadWriteLock()
{
    mImpl = reinterpret_cast<ReadWriteLockImpl *>(PX_ALLOC(sizeof(ReadWriteLockImpl), PX_DEBUG_EXP("ReadWriteLockImpl")));
	PX_PLACEMENT_NEW(mImpl, ReadWriteLockImpl);

    mImpl->readerCounter = 0;
}
static bool fullContactsGenerationCapsuleConvex(const CapsuleV& capsule, const ConvexHullV& convexHull,  const PsMatTransformV& aToB, const PsTransformV& transf0,const PsTransformV& transf1,
								PersistentContact* manifoldContacts, ContactBuffer& contactBuffer, const bool idtScale, PersistentContactManifold& manifold, Vec3VArg normal, 
								const Vec3VArg closest, const FloatVArg tolerance, const FloatVArg contactDist, const bool doOverlapTest, Cm::RenderOutput* renderOutput, const FloatVArg toleranceScale)
{

	PX_UNUSED(renderOutput);
	Gu::PolygonalData polyData;
	getPCMConvexData(convexHull,idtScale, polyData);

	PxU8 buff[sizeof(SupportLocalImpl<ConvexHullV>)];
	SupportLocal* map = (idtScale ? static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullNoScaleV>)(static_cast<const ConvexHullNoScaleV&>(convexHull), transf1, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScale)) : 
	static_cast<SupportLocal*>(PX_PLACEMENT_NEW(buff, SupportLocalImpl<ConvexHullV>)(convexHull, transf1, convexHull.vertex2Shape, convexHull.shape2Vertex, idtScale)));

	PxU32 numContacts = 0;
	if (generateFullContactManifold(capsule, polyData, map, aToB, manifoldContacts, numContacts, contactDist, normal, closest, tolerance, doOverlapTest, toleranceScale))
	{

		if (numContacts > 0)
		{
			manifold.addBatchManifoldContacts2(manifoldContacts, numContacts);
			//transform normal into the world space
			normal = transf1.rotate(normal);
			manifold.addManifoldContactsToContactBuffer(contactBuffer, normal, transf0, capsule.radius, contactDist);
		}
		else
		{
			if (!doOverlapTest)
			{
				normal = transf1.rotate(normal);
				manifold.addManifoldContactsToContactBuffer(contactBuffer, normal, transf0, capsule.radius, contactDist);
			}
		}

#if	PCM_LOW_LEVEL_DEBUG
		manifold.drawManifold(*renderOutput, transf0, transf1);
#endif
		return true;
		
	}
	return false;

}
void PxsFluidDynamics::schedulePackets(PxsSphUpdateType updateType, PxBaseTask& continuation)
{
	mCurrentUpdateType = updateType;
	for (PxU32 i = 0; i < mNumTasks; ++i)
	{
		PX_ASSERT(mTaskData[i].beginPacketIndex != PX_INVALID_U16 && mTaskData[i].endPacketIndex != PX_INVALID_U16);
		void* ptr = mParticleSystem.getContext().getTaskPool().allocate(sizeof(PxsFluidDynamicsSphTask));
		PxsFluidDynamicsSphTask* task = PX_PLACEMENT_NEW(ptr, PxsFluidDynamicsSphTask)(*this, i);	
		task->setContinuation(&continuation);
		task->removeReference();		
	}
}
Serializer *internalCreateSerializer(Serializer::SerializeType type, Traits *traits)
{
	switch ( type )
	{
		case Serializer::NST_XML:
			{
				void *buf = serializerMemAlloc(sizeof(XmlSerializer), traits);
				return buf ? PX_PLACEMENT_NEW(buf, XmlSerializer)(traits) : 0;
			}
		case Serializer::NST_BINARY:
			{
				void *buf = serializerMemAlloc(sizeof(BinSerializer), traits);
				return buf ? PX_PLACEMENT_NEW(buf, BinSerializer)(traits) : 0;
			}
		default:
			NX_PARAM_TRAITS_WARNING(
				traits,
				"Unknown serializer type: %d",
				(int)type );
			break;
	}

	return 0;
}
DefaultCpuDispatcher::DefaultCpuDispatcher(PxU32 numThreads, PxU32* affinityMasks)
	: mQueueEntryPool(TASK_QUEUE_ENTRY_POOL_SIZE), mNumThreads(numThreads), mShuttingDown(false)
{
	PxU32 defaultAffinityMask = 0;

	if (!affinityMasks)
	{
		defaultAffinityMask = getAffinityMask(numThreads);
	}

	// initialize threads first, then start

	mWorkerThreads = reinterpret_cast<CpuWorkerThread*>(PX_ALLOC(numThreads * sizeof(CpuWorkerThread), PX_DEBUG_EXP("CpuWorkerThread")));
	if (mWorkerThreads)
	{
		for (PxU32 i = 0; i < numThreads; ++i)
		{
			PX_PLACEMENT_NEW(mWorkerThreads + i, CpuWorkerThread)();
			mWorkerThreads[i].initialize(this);
		}

		for (PxU32 i = 0; i < numThreads; ++i)
		{
			mWorkerThreads[i].start(shdfnd::Thread::getDefaultStackSize());
			if (affinityMasks)
			{
				mWorkerThreads[i].setAffinityMask(affinityMasks[i]);
			}
			else
			{
				mWorkerThreads[i].setAffinityMask(defaultAffinityMask);
#ifdef PX_X360
				defaultAffinityMask &= defaultAffinityMask - 1; // clear lowest bit
#endif
			}

			char threadName[32];
			string::sprintf_s(threadName, 32, "PxWorker%02d", i);
			mWorkerThreads[i].setName(threadName);
		}
	}
	else
	{
		mNumThreads = 0;
	}
}
Foundation* Foundation::createInstance(PxU32 version, PxErrorCallback& errc, PxAllocatorCallback& alloc)
{
	if (version != PX_PHYSICS_VERSION)
	{
		char* buffer = new char[256];
		physx::string::sprintf_s(buffer,256, "Wrong version: foundation version is 0x%08x, tried to create 0x%08x", PX_PHYSICS_VERSION, version);
		errc.reportError(PxErrorCode::eINVALID_PARAMETER, buffer, __FILE__, __LINE__);
		return 0;
	}

	if (!mInstance)
	{
		// if we don't assign this here, the Foundation object can't create member
		// subobjects which require the allocator

		mInstance = reinterpret_cast<Foundation*>( alloc.allocate( 
			sizeof(Foundation), "Foundation", __FILE__,__LINE__));

		if (mInstance)
		{
			PX_PLACEMENT_NEW(mInstance, Foundation)(errc, alloc);

			PX_ASSERT(mRefCount == 0);
			mRefCount = 1;

			//skip 0 which marks uninitialized timestaps in PX_WARN_ONCE
			mWarnOnceTimestap = (mWarnOnceTimestap == PX_MAX_U32) ? 1 : mWarnOnceTimestap + 1;

			return mInstance;
		}
		else
		{
			errc.reportError(PxErrorCode::eINTERNAL_ERROR, "Memory allocation for foundation object failed.", __FILE__, __LINE__);
		}
	}
	else
	{
		errc.reportError(PxErrorCode::eINVALID_OPERATION, "Foundation object exists already. Only one instance per process can be created.", __FILE__, __LINE__);
	}

	return 0;
}