Ejemplo n.º 1
0
bool BigConvexData::Load(PxInputStream& stream)
{
	// Import header
	PxU32 Version;
	bool Mismatch;
	if(!ReadHeader('S', 'U', 'P', 'M', Version, Mismatch, stream))
		return false;

	// Load base gaussmap
//	if(!GaussMap::Load(stream))	return false;

		// Import header
		if(!ReadHeader('G', 'A', 'U', 'S', Version, Mismatch, stream))
			return false;

		// Import basic info
		mData.mSubdiv		= Ps::to16(readDword(Mismatch, stream));
		mData.mNbSamples	= Ps::to16(readDword(Mismatch, stream));

	// Load map data
	mData.mSamples = (PxU8*)PX_ALLOC(sizeof(PxU8)*mData.mNbSamples*2, PX_DEBUG_EXP("BigConvex Samples Data "));

	// These byte buffers shouldn't need converting
	stream.read(mData.mSamples, sizeof(PxU8)*mData.mNbSamples*2);

	//load the valencies
	return VLoad(stream);
}
Ejemplo n.º 2
0
bool BigConvexData::VLoad(PxInputStream& stream)
{
	// Import header
	PxU32 Version;
	bool Mismatch;
	if(!ReadHeader('V', 'A', 'L', 'E', Version, Mismatch, stream))
		return false;

	mData.mNbVerts		= readDword(Mismatch, stream);
	mData.mNbAdjVerts	= readDword(Mismatch, stream);

	PX_FREE(mVBuffer);

	// PT: align Gu::Valency?
	const PxU32 numVerts = (mData.mNbVerts+3)&~3;
	const PxU32 TotalSize = sizeof(Gu::Valency)*numVerts + sizeof(PxU8)*mData.mNbAdjVerts;
	mVBuffer = PX_ALLOC(TotalSize, PX_DEBUG_EXP("BigConvexData data"));
	mData.mValencies		= (Gu::Valency*)mVBuffer;
	mData.mAdjacentVerts	= ((PxU8*)mVBuffer) + sizeof(Gu::Valency)*numVerts;

	PX_ASSERT(0 == (size_t(mData.mAdjacentVerts) & 0xf));
	PX_ASSERT(Version==2);

	{
		PxU16* temp = (PxU16*)mData.mValencies;

		PxU32 MaxIndex = readDword(Mismatch, stream);
		ReadIndices(Ps::to16(MaxIndex), mData.mNbVerts, temp, stream, Mismatch);

		// We transform from:
		//
		// |5555|4444|3333|2222|1111|----|----|----|----|----|
		//
		// to:
		//
		// |5555|4444|4444|2222|3333|----|2222|----|1111|----|
		//
		for(PxU32 i=0;i<mData.mNbVerts;i++)
			mData.mValencies[mData.mNbVerts-i-1].mCount = temp[mData.mNbVerts-i-1];
	}
	stream.read(mData.mAdjacentVerts, mData.mNbAdjVerts);

	// Recreate offsets
	CreateOffsets();

	return true;
}
Ejemplo n.º 3
0
static bool convexHullLoad(Gu::ConvexHullData& data, PxInputStream& stream, PxBitAndDword& bufferSize)
{
	PxU32 version;
	bool Mismatch;
	if(!ReadHeader('C', 'L', 'H', 'L', version, Mismatch, stream))
		return false;

	if(!ReadHeader('C', 'V', 'H', 'L', version, Mismatch, stream))
		return false;

	PxU32 Nb;

	// Import figures
	{
		PxU32 tmp[4];
		ReadDwordBuffer(tmp, 4, Mismatch, stream);
		data.mNbHullVertices	= Ps::to8(tmp[0]);
		data.mNbEdges			= Ps::to16(tmp[1]);
		data.mNbPolygons		= Ps::to8(tmp[2]);
		Nb						= tmp[3];
	}

	//AM: In practice the old aligner approach wastes 20 bytes and there is no reason to 20 byte align this data.
	//I changed the code to just 4 align for the time being.  
	//On consoles if anything we will need to make this stuff 16 byte align vectors to have any sense, which will have to be done by padding data structures.
	PX_ASSERT(sizeof(Gu::HullPolygonData) % sizeof(PxReal) == 0);	//otherwise please pad it.
	PX_ASSERT(sizeof(PxVec3) % sizeof(PxReal) == 0);

	PxU32 bytesNeeded = computeBufferSize(data, Nb);

	PX_FREE(data.mPolygons);	// Load() can be called for an existing convex mesh. In that case we need to free
	// the memory first.

	bufferSize = Nb;
	void* mDataMemory = PX_ALLOC(bytesNeeded, "ConvexHullData data");

	PxU8* address = reinterpret_cast<PxU8*>(mDataMemory);

	data.mPolygons				= reinterpret_cast<Gu::HullPolygonData*>(address);	address += sizeof(Gu::HullPolygonData) * data.mNbPolygons;
	PxVec3* mDataHullVertices	= reinterpret_cast<PxVec3*>(address);				address += sizeof(PxVec3) * data.mNbHullVertices;
	PxU8* mDataFacesByEdges8	= address;											address += sizeof(PxU8) * data.mNbEdges * 2;
	PxU8* mDataFacesByVertices8 = address;											address += sizeof(PxU8) * data.mNbHullVertices * 3;
	PxU16* mEdges				= reinterpret_cast<PxU16*>(address);				address += data.mNbEdges.isBitSet() ? (sizeof(PxU16) * data.mNbEdges * 2) : 0;
	PxU8* mDataVertexData8		= address;											address += sizeof(PxU8) * Nb;	// PT: leave that one last, so that we don't need to serialize "Nb"

	PX_ASSERT(!(size_t(mDataHullVertices) % sizeof(PxReal)));
	PX_ASSERT(!(size_t(data.mPolygons) % sizeof(PxReal)));
	PX_ASSERT(size_t(address)<=size_t(mDataMemory)+bytesNeeded);

	// Import vertices
	readFloatBuffer(&mDataHullVertices->x, PxU32(3*data.mNbHullVertices), Mismatch, stream);

	if(version<=6)
	{
		PxU16 useUnquantizedNormals = readWord(Mismatch, stream);
		PX_UNUSED(useUnquantizedNormals);
	}

	// Import polygons
	stream.read(data.mPolygons, data.mNbPolygons*sizeof(Gu::HullPolygonData));

	if(Mismatch)
	{
		for(PxU32 i=0;i<data.mNbPolygons;i++)
			flipData(data.mPolygons[i]);
	}

	stream.read(mDataVertexData8, Nb);
	stream.read(mDataFacesByEdges8, PxU32(data.mNbEdges*2));
	if(version <= 5)
	{
		//KS - we need to compute faces-by-vertices here

		bool noPlaneShift = false;
		for(PxU32 i=0; i< data.mNbHullVertices; ++i)
		{
			PxU32 count = 0;
			PxU8 inds[3];
			for(PxU32 j=0; j<data.mNbPolygons; ++j)
			{
				Gu::HullPolygonData& polygon = data.mPolygons[j];
				for(PxU32 k=0; k< polygon.mNbVerts; ++k)
				{
					PxU8 index = mDataVertexData8[polygon.mVRef8 + k];
					if(i == index)
					{
						//Found a polygon
						inds[count++] = Ps::to8(j);
						break;
					}
				}
				if(count == 3)
					break;
			}
			//We have 3 indices
			//PX_ASSERT(count == 3);
			//Do something here
			if(count == 3)
			{
				mDataFacesByVertices8[i*3+0] = inds[0];
				mDataFacesByVertices8[i*3+1] = inds[1];
				mDataFacesByVertices8[i*3+2] = inds[2];
			}
			else
			{
				noPlaneShift = true;
				break;
			}
		}


		if(noPlaneShift)
		{
			for(PxU32 a = 0; a < data.mNbHullVertices; ++a)
			{
				mDataFacesByVertices8[a*3] = 0xFF;
				mDataFacesByVertices8[a*3+1] = 0xFF;
				mDataFacesByVertices8[a*3+2] = 0xFF;
			}
		}

	}
	else
		stream.read(mDataFacesByVertices8, PxU32(data.mNbHullVertices * 3)); 

	if (data.mNbEdges.isBitSet())
	{
		if (version <= 7)
		{
			for (PxU32 a = 0; a < PxU32(data.mNbEdges * 2); ++a)
			{
				mEdges[a] = 0xFFFF;
			}
		}
		else
		{
			readWordBuffer(mEdges, PxU32(data.mNbEdges * 2), Mismatch, stream);
		}
	}
	return true;
}