Exemple #1
0
	ChUll * doMerge(ChUll *a,ChUll *b)
	{
		ChUll *ret = 0;
		HaU32 combinedVertexCount = a->mVertexCount + b->mVertexCount;
		HaF32 *combinedVertices = (HaF32 *)HACD_ALLOC(combinedVertexCount*sizeof(HaF32)*3);
		HaF32 *dest = combinedVertices;
		memcpy(dest,a->mVertices, sizeof(HaF32)*3*a->mVertexCount);
		dest+=a->mVertexCount*3;
		memcpy(dest,b->mVertices,sizeof(HaF32)*3*b->mVertexCount);
		HullResult hresult;
		HullLibrary hl;
		HullDesc   desc;
		desc.mVcount       = combinedVertexCount;
		desc.mVertices     = combinedVertices;
		desc.mVertexStride = sizeof(hacd::HaF32)*3;
		desc.mMaxVertices = mMaxHullVertices;
		desc.mUseWuQuantizer = true;
		HullError hret = hl.CreateConvexHull(desc,hresult);
		HACD_ASSERT( hret == QE_OK );
		if ( hret == QE_OK )
		{
			ret = HACD_NEW(ChUll)(hresult.mNumOutputVertices, hresult.mOutputVertices, hresult.mNumTriangles, hresult.mIndices,mGuid++);
		}
		HACD_FREE(combinedVertices);
		hl.ReleaseResult(hresult);
		return ret;
	}
Exemple #2
0
	// Merge these input hulls.
	virtual hacd::HaU32 mergeHulls(const MergeHullVector &inputHulls,
		MergeHullVector &outputHulls,
		hacd::HaU32 mergeHullCount,
		hacd::HaF32 smallClusterThreshold,
		hacd::HaU32 maxHullVertices)
	{
		mGuid = 0;

		HaU32 count = (HaU32)inputHulls.size();
		mHasBeenTested = HACD_NEW(TestedMap)(count*count);
		mSmallClusterThreshold = smallClusterThreshold;
		mMaxHullVertices = maxHullVertices;
		mMergeNumHulls = mergeHullCount;

		mTotalVolume = 0;
		for (HaU32 i=0; i<inputHulls.size(); i++)
		{
			const MergeHull &h = inputHulls[i];
			ChUll *ch = HACD_NEW(ChUll)(h.mVertexCount,h.mVertices,h.mTriangleCount,h.mIndices,mGuid++);
			mChulls.push_back(ch);
			mTotalVolume+=ch->mVolume;
		}

		for(;;)
		{
			bool combined = combineHulls(); // mege smallest hulls first, up to the max merge count.
			if ( !combined ) break;
		} 

		// return results..
		for (HaU32 i=0; i<mChulls.size(); i++)
		{
			ChUll *ch = mChulls[i];
			MergeHull mh;
			mh.mVertexCount = ch->mVertexCount;
			mh.mTriangleCount = ch->mTriangleCount;
			mh.mIndices = ch->mIndices;
			mh.mVertices = ch->mVertices;
			outputHulls.push_back(mh);
		}
		delete mHasBeenTested;
		return (HaU32)outputHulls.size();
	}
Exemple #3
0
	virtual void ConvexDecompResult(hacd::HaU32 hvcount,const hacd::HaF32 *hvertices,hacd::HaU32 htcount,const hacd::HaU32 *hindices)
	{
		ChUll *ch = HACD_NEW(ChUll)(hvcount,hvertices,htcount,hindices,mGuid++);
		if ( ch->mVolume > 0.00001f )
		{
			mChulls.push_back(ch);
		}
		else
		{
			delete ch;
		}
	}
Exemple #4
0
MergeHullsInterface * createMergeHullsInterface(void)
{
	MyMergeHullsInterface *m = HACD_NEW(MyMergeHullsInterface);
	return static_cast< MergeHullsInterface *>(m);
}
	HACD::AutoGeometry * createAutoGeometry()
	{
		HACD::MyAutoGeometry *g = HACD_NEW(HACD::MyAutoGeometry);
		return static_cast< HACD::AutoGeometry *>(g);
	}
	// Merge these input hulls.
	virtual hacd::HaU32 mergeHulls(const MergeHullVector &inputHulls,
		MergeHullVector &outputHulls,
		hacd::HaU32 mergeHullCount,
		hacd::HaF32 smallClusterThreshold,
		hacd::HaU32 maxHullVertices,
		hacd::ICallback *callback,
		JOB_SWARM_STANDALONE::JobSwarmContext *jobSwarmContext)
	{
		mGuid = 0;

		HaU32 count = (HaU32)inputHulls.size();
		mHasBeenTested = HACD_NEW(TestedMap)(count*count);
		mSmallClusterThreshold = smallClusterThreshold;
		mMaxHullVertices = maxHullVertices;
		mMergeNumHulls = mergeHullCount;

		mTotalVolume = 0;
		for (HaU32 i=0; i<inputHulls.size(); i++)
		{
			const MergeHull &h = inputHulls[i];
			CHull *ch = HACD_NEW(CHull)(h.mVertexCount,h.mVertices,h.mTriangleCount,h.mIndices,mGuid++);
			mChulls.push_back(ch);
			mTotalVolume+=ch->mVolume;
			if ( callback )
			{
				HaF32 fraction = (HaF32)i / (HaF32)inputHulls.size();
				callback->ReportProgress("Gathering Hulls To Merge", fraction );
			}
		}

		//
		hacd::HaU32 mergeCount = count - mergeHullCount;
		hacd::HaU32 mergeIndex = 0;

		for(;;)
		{
			if ( callback )
			{
				hacd::HaF32 fraction = (hacd::HaF32)mergeIndex / (hacd::HaF32)mergeCount;
				callback->ReportProgress("Merging", fraction );
			}
			bool combined = combineHulls(jobSwarmContext); // mege smallest hulls first, up to the max merge count.
			if ( !combined ) break;
			mergeIndex++;
		} 

		// return results..
		for (HaU32 i=0; i<mChulls.size(); i++)
		{
			CHull *ch = mChulls[i];
			MergeHull mh;
			mh.mVertexCount = ch->mVertexCount;
			mh.mTriangleCount = ch->mTriangleCount;
			mh.mIndices = ch->mIndices;
			mh.mVertices = ch->mVertices;
			outputHulls.push_back(mh);
			if ( callback )
			{
				HaF32 fraction = (HaF32)i / (HaF32)mChulls.size();
				callback->ReportProgress("Gathering Merged Hulls Output", fraction );
			}

		}
		delete mHasBeenTested;
		return (HaU32)outputHulls.size();
	}
 ThreadSafeQueue * createLockFreeQ(void)
 {
   MyLockFreeQ *m = HACD_NEW(MyLockFreeQ);
   return static_cast< ThreadSafeQueue *>(m);
 }
Exemple #8
0
    virtual hacd::HaU32	performHACD(const Desc &desc)
    {
        hacd::HaU32 ret = 0;
        releaseHACD();

        if ( desc.mVertexCount )
        {
            {
                dgMeshEffect mesh(true);

                float normal[3] = { 0,1,0 };
                float uv[2] = { 0,0 };

                hacd::HaI32 *faceIndexCount = (hacd::HaI32 *)HACD_ALLOC(sizeof(hacd::HaI32)*desc.mTriangleCount);
                hacd::HaI32 *dummyIndex = (hacd::HaI32 *)HACD_ALLOC(sizeof(hacd::HaI32)*desc.mTriangleCount*3);

                for (hacd::HaU32 i=0; i<desc.mTriangleCount; i++)
                {
                    faceIndexCount[i] = 3;
                    dummyIndex[i*3+0] = 0;
                    dummyIndex[i*3+1] = 0;
                    dummyIndex[i*3+2] = 0;
                }

                mesh.BuildFromVertexListIndexList(desc.mTriangleCount,faceIndexCount,dummyIndex,
                                                  desc.mVertices,sizeof(hacd::HaF32)*3,(const hacd::HaI32 *const)desc.mIndices,
                                                  normal,sizeof(hacd::HaF32)*3,dummyIndex,
                                                  uv,sizeof(hacd::HaF32)*2,dummyIndex,
                                                  uv,sizeof(hacd::HaF32)*2,dummyIndex);

                dgMeshEffect *result = mesh.CreateConvexApproximation(desc.mConcavity,desc.mMaxHullCount, desc.mCallback);

                if ( result )
                {
                    // now we build hulls for each connected surface...
                    dgPolyhedra segment;
                    result->BeginConectedSurface();
                    if ( result->GetConectedSurface(segment))
                    {
                        dgMeshEffect *solid = HACD_NEW(dgMeshEffect)(segment,*result);
                        while ( solid )
                        {
                            dgConvexHull3d *hull = solid->CreateConvexHull(0.00001,desc.mMaxHullVertices);
                            if ( hull )
                            {
                                Hull h;
                                h.mVertexCount = hull->GetVertexCount();
                                h.mVertices = (hacd::HaF32 *)HACD_ALLOC( sizeof(hacd::HaF32)*3*h.mVertexCount);
                                for (hacd::HaU32 i=0; i<h.mVertexCount; i++)
                                {
                                    hacd::HaF32 *dest = (hacd::HaF32 *)&h.mVertices[i*3];
                                    const dgBigVector &source = hull->GetVertex(i);
                                    dest[0] = (hacd::HaF32)source.m_x;
                                    dest[1] = (hacd::HaF32)source.m_y;
                                    dest[2] = (hacd::HaF32)source.m_z;
                                }

                                h.mTriangleCount = hull->GetCount();
                                hacd::HaU32 *destIndices = (hacd::HaU32 *)HACD_ALLOC(sizeof(hacd::HaU32)*3*h.mTriangleCount);
                                h.mIndices = destIndices;

                                dgList<dgConvexHull3DFace>::Iterator iter(*hull);
                                for (iter.Begin(); iter; iter++)
                                {
                                    dgConvexHull3DFace &face = (*iter);
                                    destIndices[0] = face.m_index[0];
                                    destIndices[1] = face.m_index[1];
                                    destIndices[2] = face.m_index[2];
                                    destIndices+=3;
                                }

                                mHulls.push_back(h);

                                // save it!
                                delete hull;
                            }

                            delete solid;
                            solid = NULL;
                            dgPolyhedra nextSegment;
                            hacd::HaI32 moreSegments = result->GetConectedSurface(nextSegment);
                            if ( moreSegments )
                            {
                                solid = HACD_NEW(dgMeshEffect)(nextSegment,*result);
                            }
                            else
                            {
                                result->EndConectedSurface();
                            }
                        }
                    }

                    delete result;
                }
                ret= (HaU32)mHulls.size();
            }
        }

        //if (desc.mCallback)
        //	desc.mCallback->ReportProgress("+ Merging Hulls\n", 99.0f);

        if ( ret && ((ret > desc.mMaxMergeHullCount) ||
                     (desc.mSmallClusterThreshold != 0.0f)) )
        {
            MergeHullsInterface *mhi = createMergeHullsInterface();
            if ( mhi )
            {
                MergeHullVector inputHulls;
                MergeHullVector outputHulls;
                for (hacd::HaU32 i=0; i<ret; i++)
                {
                    Hull &h = mHulls[i];
                    MergeHull mh;
                    mh.mTriangleCount = h.mTriangleCount;
                    mh.mVertexCount = h.mVertexCount;
                    mh.mVertices = h.mVertices;
                    mh.mIndices = h.mIndices;
                    inputHulls.push_back(mh);
                }

                ret = mhi->mergeHulls(inputHulls,outputHulls,desc.mMaxMergeHullCount, desc.mSmallClusterThreshold + FLT_EPSILON, desc.mMaxHullVertices);

                for (HaU32 i=0; i<ret; i++)
                {
                    Hull &h = mHulls[i];
                    releaseHull(h);
                }
                mHulls.clear();

                for (hacd::HaU32 i=0; i<outputHulls.size(); i++)
                {
                    Hull h;
                    const MergeHull &mh = outputHulls[i];
                    h.mTriangleCount =  mh.mTriangleCount;
                    h.mVertexCount = mh.mVertexCount;
                    h.mIndices = (HaU32 *)HACD_ALLOC(sizeof(HaU32)*3*h.mTriangleCount);
                    h.mVertices = (HaF32 *)HACD_ALLOC(sizeof(HaF32)*3*h.mVertexCount);
                    memcpy((HaU32 *)h.mIndices,mh.mIndices,sizeof(HaU32)*3*h.mTriangleCount);
                    memcpy((HaF32 *)h.mVertices,mh.mVertices,sizeof(HaF32)*3*h.mVertexCount);
                    mHulls.push_back(h);
                }

                ret = (HaU32)mHulls.size();

                mhi->release();
            }
        }

        return ret;
    }
Exemple #9
0
HACD_API * createHACD_API(void)
{
    MyHACD_API *m = HACD_NEW(MyHACD_API);
    return static_cast<HACD_API *>(m);
}