Example #1
0
	// Merge these input hulls.
	virtual hacd::HaU32 mergeHulls(const MergeHullVector &inputHulls,
		MergeHullVector &outputHulls,
		hacd::HaU32 mergeHullCount,
		hacd::HaF32 smallClusterThreshold,
		hacd::HaU32 maxHullVertices)
	{
		mGuid = 0;

		HaU32 count = (HaU32)inputHulls.size();
		mHasBeenTested = HACD_NEW(TestedMap)(count*count);
		mSmallClusterThreshold = smallClusterThreshold;
		mMaxHullVertices = maxHullVertices;
		mMergeNumHulls = mergeHullCount;

		mTotalVolume = 0;
		for (HaU32 i=0; i<inputHulls.size(); i++)
		{
			const MergeHull &h = inputHulls[i];
			ChUll *ch = HACD_NEW(ChUll)(h.mVertexCount,h.mVertices,h.mTriangleCount,h.mIndices,mGuid++);
			mChulls.push_back(ch);
			mTotalVolume+=ch->mVolume;
		}

		for(;;)
		{
			bool combined = combineHulls(); // mege smallest hulls first, up to the max merge count.
			if ( !combined ) break;
		} 

		// return results..
		for (HaU32 i=0; i<mChulls.size(); i++)
		{
			ChUll *ch = mChulls[i];
			MergeHull mh;
			mh.mVertexCount = ch->mVertexCount;
			mh.mTriangleCount = ch->mTriangleCount;
			mh.mIndices = ch->mIndices;
			mh.mVertices = ch->mVertices;
			outputHulls.push_back(mh);
		}
		delete mHasBeenTested;
		return (HaU32)outputHulls.size();
	}
Example #2
0
unsigned int ConvexBuilder::process(const DecompDesc &desc)
{

	unsigned int ret = 0;

	MAXDEPTH        = desc.mDepth;
	CONCAVE_PERCENT = desc.mCpercent;
	MERGE_PERCENT   = desc.mPpercent;


	calcConvexDecomposition(desc.mVcount, desc.mVertices, desc.mTcount, desc.mIndices,this,0,0);


	while ( combineHulls() ); // keep combinging hulls until I can't combine any more...

	int i;
	for (i=0;i<mChulls.size();i++)
	{
		CHull *cr = mChulls[i];

		// before we hand it back to the application, we need to regenerate the hull based on the
		// limits given by the user.

		const ConvexResult &c = *cr->mResult; // the high resolution hull...

		HullResult result;
		HullLibrary hl;
		HullDesc   hdesc;

		hdesc.SetHullFlag(QF_TRIANGLES);

		hdesc.mVcount       = c.mHullVcount;
		hdesc.mVertices     = c.mHullVertices;
		hdesc.mVertexStride = sizeof(float)*3;
		hdesc.mMaxVertices  = desc.mMaxVertices; // maximum number of vertices allowed in the output

		if ( desc.mSkinWidth  )
		{
			hdesc.mSkinWidth = desc.mSkinWidth;
			hdesc.SetHullFlag(QF_SKIN_WIDTH); // do skin width computation.
		}

		HullError ret = hl.CreateConvexHull(hdesc,result);

		if ( ret == QE_OK )
		{
			ConvexResult r(result.mNumOutputVertices, result.mOutputVertices, result.mNumFaces, result.mIndices);

			r.mHullVolume = computeMeshVolume( result.mOutputVertices, result.mNumFaces, result.mIndices ); // the volume of the hull.

			// compute the best fit OBB
			computeBestFitOBB( result.mNumOutputVertices, result.mOutputVertices, sizeof(float)*3, r.mOBBSides, r.mOBBTransform );

			r.mOBBVolume = r.mOBBSides[0] * r.mOBBSides[1] *r.mOBBSides[2]; // compute the OBB volume.

			fm_getTranslation( r.mOBBTransform, r.mOBBCenter );      // get the translation component of the 4x4 matrix.

			fm_matrixToQuat( r.mOBBTransform, r.mOBBOrientation );   // extract the orientation as a quaternion.

			r.mSphereRadius = computeBoundingSphere( result.mNumOutputVertices, result.mOutputVertices, r.mSphereCenter );
			r.mSphereVolume = fm_sphereVolume( r.mSphereRadius );


			mCallback->ConvexDecompResult(r);
		}

		hl.ReleaseResult (result);


		delete cr;
	}

	ret = mChulls.size();

	mChulls.clear();

	return ret;
}
	// Merge these input hulls.
	virtual hacd::HaU32 mergeHulls(const MergeHullVector &inputHulls,
		MergeHullVector &outputHulls,
		hacd::HaU32 mergeHullCount,
		hacd::HaF32 smallClusterThreshold,
		hacd::HaU32 maxHullVertices,
		hacd::ICallback *callback,
		JOB_SWARM_STANDALONE::JobSwarmContext *jobSwarmContext)
	{
		mGuid = 0;

		HaU32 count = (HaU32)inputHulls.size();
		mHasBeenTested = HACD_NEW(TestedMap)(count*count);
		mSmallClusterThreshold = smallClusterThreshold;
		mMaxHullVertices = maxHullVertices;
		mMergeNumHulls = mergeHullCount;

		mTotalVolume = 0;
		for (HaU32 i=0; i<inputHulls.size(); i++)
		{
			const MergeHull &h = inputHulls[i];
			CHull *ch = HACD_NEW(CHull)(h.mVertexCount,h.mVertices,h.mTriangleCount,h.mIndices,mGuid++);
			mChulls.push_back(ch);
			mTotalVolume+=ch->mVolume;
			if ( callback )
			{
				HaF32 fraction = (HaF32)i / (HaF32)inputHulls.size();
				callback->ReportProgress("Gathering Hulls To Merge", fraction );
			}
		}

		//
		hacd::HaU32 mergeCount = count - mergeHullCount;
		hacd::HaU32 mergeIndex = 0;

		for(;;)
		{
			if ( callback )
			{
				hacd::HaF32 fraction = (hacd::HaF32)mergeIndex / (hacd::HaF32)mergeCount;
				callback->ReportProgress("Merging", fraction );
			}
			bool combined = combineHulls(jobSwarmContext); // mege smallest hulls first, up to the max merge count.
			if ( !combined ) break;
			mergeIndex++;
		} 

		// return results..
		for (HaU32 i=0; i<mChulls.size(); i++)
		{
			CHull *ch = mChulls[i];
			MergeHull mh;
			mh.mVertexCount = ch->mVertexCount;
			mh.mTriangleCount = ch->mTriangleCount;
			mh.mIndices = ch->mIndices;
			mh.mVertices = ch->mVertices;
			outputHulls.push_back(mh);
			if ( callback )
			{
				HaF32 fraction = (HaF32)i / (HaF32)mChulls.size();
				callback->ReportProgress("Gathering Merged Hulls Output", fraction );
			}

		}
		delete mHasBeenTested;
		return (HaU32)outputHulls.size();
	}