dgVector dgMatrix::CalcPitchYawRoll () const { const hacd::HaF32 minSin = hacd::HaF32(0.99995f); const dgMatrix& matrix = *this; hacd::HaF32 roll = hacd::HaF32(0.0f); hacd::HaF32 pitch = hacd::HaF32(0.0f); hacd::HaF32 yaw = dgAsin (-ClampValue (matrix[0][2], hacd::HaF32(-0.999999f), hacd::HaF32(0.999999f))); HACD_ASSERT (dgCheckFloat (yaw)); if (matrix[0][2] < minSin) { if (matrix[0][2] > (-minSin)) { roll = dgAtan2 (matrix[0][1], matrix[0][0]); pitch = dgAtan2 (matrix[1][2], matrix[2][2]); } else { pitch = dgAtan2 (matrix[1][0], matrix[1][1]); } } else { pitch = -dgAtan2 (matrix[1][0], matrix[1][1]); } #ifdef _DEBUG dgMatrix m (dgPitchMatrix (pitch) * dgYawMatrix(yaw) * dgRollMatrix(roll)); for (hacd::HaI32 i = 0; i < 3; i ++) { for (hacd::HaI32 j = 0; j < 3; j ++) { hacd::HaF32 error = dgAbsf (m[i][j] - matrix[i][j]); HACD_ASSERT (error < 5.0e-2f); } } #endif return dgVector (pitch, yaw, roll, hacd::HaF32(0.0f)); }
hacd::HaI32 AddFilterFace (hacd::HaU32 count, hacd::HaI32* const pool) { BeginFace(); HACD_ASSERT (count); bool reduction = true; while (reduction && !AddFace (hacd::HaI32 (count), pool)) { reduction = false; if (count >3) { for (hacd::HaU32 i = 0; i < count; i ++) { for (hacd::HaU32 j = i + 1; j < count; j ++) { if (pool[j] == pool[i]) { for (i = j; i < count - 1; i ++) { pool[i] = pool[i + 1]; } count --; i = count; reduction = true; break; } } } } } EndFace(); HACD_ASSERT (reduction); return reduction ? hacd::HaI32 (count) : 0; }
ChUll * doMerge(ChUll *a,ChUll *b) { ChUll *ret = 0; HaU32 combinedVertexCount = a->mVertexCount + b->mVertexCount; HaF32 *combinedVertices = (HaF32 *)HACD_ALLOC(combinedVertexCount*sizeof(HaF32)*3); HaF32 *dest = combinedVertices; memcpy(dest,a->mVertices, sizeof(HaF32)*3*a->mVertexCount); dest+=a->mVertexCount*3; memcpy(dest,b->mVertices,sizeof(HaF32)*3*b->mVertexCount); HullResult hresult; HullLibrary hl; HullDesc desc; desc.mVcount = combinedVertexCount; desc.mVertices = combinedVertices; desc.mVertexStride = sizeof(hacd::HaF32)*3; desc.mMaxVertices = mMaxHullVertices; desc.mUseWuQuantizer = true; HullError hret = hl.CreateConvexHull(desc,hresult); HACD_ASSERT( hret == QE_OK ); if ( hret == QE_OK ) { ret = HACD_NEW(ChUll)(hresult.mNumOutputVertices, hresult.mOutputVertices, hresult.mNumTriangles, hresult.mIndices,mGuid++); } HACD_FREE(combinedVertices); hl.ReleaseResult(hresult); return ret; }
dgMatrix::dgMatrix (const dgQuaternion &rotation, const dgVector &position) { hacd::HaF32 x2 = hacd::HaF32 (2.0f) * rotation.m_q1 * rotation.m_q1; hacd::HaF32 y2 = hacd::HaF32 (2.0f) * rotation.m_q2 * rotation.m_q2; hacd::HaF32 z2 = hacd::HaF32 (2.0f) * rotation.m_q3 * rotation.m_q3; #ifdef _DEBUG hacd::HaF32 w2 = hacd::HaF32 (2.0f) * rotation.m_q0 * rotation.m_q0; HACD_ASSERT (dgAbsf (w2 + x2 + y2 + z2 - hacd::HaF32(2.0f)) <hacd::HaF32 (1.0e-3f)); #endif hacd::HaF32 xy = hacd::HaF32 (2.0f) * rotation.m_q1 * rotation.m_q2; hacd::HaF32 xz = hacd::HaF32 (2.0f) * rotation.m_q1 * rotation.m_q3; hacd::HaF32 xw = hacd::HaF32 (2.0f) * rotation.m_q1 * rotation.m_q0; hacd::HaF32 yz = hacd::HaF32 (2.0f) * rotation.m_q2 * rotation.m_q3; hacd::HaF32 yw = hacd::HaF32 (2.0f) * rotation.m_q2 * rotation.m_q0; hacd::HaF32 zw = hacd::HaF32 (2.0f) * rotation.m_q3 * rotation.m_q0; m_front = dgVector (hacd::HaF32(1.0f) - y2 - z2, xy + zw, xz - yw , hacd::HaF32(0.0f)); m_up = dgVector (xy - zw, hacd::HaF32(1.0f) - x2 - z2, yz + xw , hacd::HaF32(0.0f)); m_right = dgVector (xz + yw, yz - xw, hacd::HaF32(1.0f) - x2 - y2 , hacd::HaF32(0.0f)); m_posit.m_x = position.m_x; m_posit.m_y = position.m_y; m_posit.m_z = position.m_z; m_posit.m_w = hacd::HaF32(1.0f); }
dgSphere::dgSphere (const dgQuaternion &quat, const dgVector &position, const dgVector& dim) :dgMatrix(quat, position) { SetDimensions (dim.m_x, dim.m_y, dim.m_z); HACD_ASSERT (0); // planeTest = FrontTest; }
void dgPolygonSoupDatabaseBuilder::OptimizeByIndividualFaces() { hacd::HaI32* const faceArray = &m_faceVertexCount[0]; hacd::HaI32* const indexArray = &m_vertexIndex[0]; hacd::HaI32* const oldFaceArray = &m_faceVertexCount[0]; hacd::HaI32* const oldIndexArray = &m_vertexIndex[0]; hacd::HaI32 polygonIndex = 0; hacd::HaI32 newFaceCount = 0; hacd::HaI32 newIndexCount = 0; for (hacd::HaI32 i = 0; i < m_faceCount; i ++) { hacd::HaI32 oldCount = oldFaceArray[i]; hacd::HaI32 count = FilterFace (oldCount - 1, &oldIndexArray[polygonIndex + 1]); if (count) { faceArray[newFaceCount] = count + 1; for (hacd::HaI32 j = 0; j < count + 1; j ++) { indexArray[newIndexCount + j] = oldIndexArray[polygonIndex + j]; } newFaceCount ++; newIndexCount += (count + 1); } polygonIndex += oldCount; } HACD_ASSERT (polygonIndex == m_indexCount); m_faceCount = newFaceCount; m_indexCount = newIndexCount; }
HaF32 canMerge(ChUll *a,ChUll *b) { if ( !a->overlap(*b) ) return 0; // if their AABB's (with a little slop) don't overlap, then return. // ok..we are going to combine both meshes into a single mesh // and then we are going to compute the concavity... HaF32 ret = 0; HaU32 combinedVertexCount = a->mVertexCount + b->mVertexCount; HaF32 *combinedVertices = (HaF32 *)HACD_ALLOC(combinedVertexCount*sizeof(HaF32)*3); HaF32 *dest = combinedVertices; memcpy(dest,a->mVertices, sizeof(HaF32)*3*a->mVertexCount); dest+=a->mVertexCount*3; memcpy(dest,b->mVertices,sizeof(HaF32)*3*b->mVertexCount); HullResult hresult; HullLibrary hl; HullDesc desc; desc.mVcount = combinedVertexCount; desc.mVertices = combinedVertices; desc.mVertexStride = sizeof(hacd::HaF32)*3; desc.mUseWuQuantizer = true; HullError hret = hl.CreateConvexHull(desc,hresult); HACD_ASSERT( hret == QE_OK ); if ( hret == QE_OK ) { ret = fm_computeMeshVolume( hresult.mOutputVertices, hresult.mNumTriangles, hresult.mIndices ); } HACD_FREE(combinedVertices); hl.ReleaseResult(hresult); return ret; }
void dgPolygonSoupDatabaseBuilder::OptimizeByGroupID() { dgTree<int, int> attribFilter; dgPolygonSoupDatabaseBuilder builder; dgPolygonSoupDatabaseBuilder builderAux; dgPolygonSoupDatabaseBuilder builderLeftOver; builder.Begin(); hacd::HaI32 polygonIndex = 0; for (hacd::HaI32 i = 0; i < m_faceCount; i ++) { hacd::HaI32 attribute = m_vertexIndex[polygonIndex]; if (!attribFilter.Find(attribute)) { attribFilter.Insert (attribute, attribute); builder.OptimizeByGroupID (*this, i, polygonIndex, builderLeftOver); for (hacd::HaI32 j = 0; builderLeftOver.m_faceCount && (j < 64); j ++) { builderAux.m_faceVertexCount[builderLeftOver.m_faceCount] = 0; builderAux.m_vertexIndex[builderLeftOver.m_indexCount] = 0; builderAux.m_vertexPoints[builderLeftOver.m_vertexCount].m_x = hacd::HaF32 (0.0f); memcpy (&builderAux.m_faceVertexCount[0], &builderLeftOver.m_faceVertexCount[0], sizeof (hacd::HaI32) * builderLeftOver.m_faceCount); memcpy (&builderAux.m_vertexIndex[0], &builderLeftOver.m_vertexIndex[0], sizeof (hacd::HaI32) * builderLeftOver.m_indexCount); memcpy (&builderAux.m_vertexPoints[0], &builderLeftOver.m_vertexPoints[0], sizeof (dgBigVector) * builderLeftOver.m_vertexCount); builderAux.m_faceCount = builderLeftOver.m_faceCount; builderAux.m_indexCount = builderLeftOver.m_indexCount; builderAux.m_vertexCount = builderLeftOver.m_vertexCount; hacd::HaI32 prevFaceCount = builderLeftOver.m_faceCount; builderLeftOver.m_faceCount = 0; builderLeftOver.m_indexCount = 0; builderLeftOver.m_vertexCount = 0; builder.OptimizeByGroupID (builderAux, 0, 0, builderLeftOver); if (prevFaceCount == builderLeftOver.m_faceCount) { break; } } HACD_ASSERT (builderLeftOver.m_faceCount == 0); } polygonIndex += m_faceVertexCount[i]; } // builder.End(); builder.Optimize(false); m_faceVertexCount[builder.m_faceCount] = 0; m_vertexIndex[builder.m_indexCount] = 0; m_vertexPoints[builder.m_vertexCount].m_x = hacd::HaF32 (0.0f); memcpy (&m_faceVertexCount[0], &builder.m_faceVertexCount[0], sizeof (hacd::HaI32) * builder.m_faceCount); memcpy (&m_vertexIndex[0], &builder.m_vertexIndex[0], sizeof (hacd::HaI32) * builder.m_indexCount); memcpy (&m_vertexPoints[0], &builder.m_vertexPoints[0], sizeof (dgBigVector) * builder.m_vertexCount); m_faceCount = builder.m_faceCount; m_indexCount = builder.m_indexCount; m_vertexCount = builder.m_vertexCount; m_normalCount = builder.m_normalCount; }
virtual const char * stristr(const char *str,const char *key) // case insensitive ststr { HACD_ASSERT( strlen(str) < 2048 ); HACD_ASSERT( strlen(key) < 2048 ); char istr[2048]; char ikey[2048]; strncpy(istr,str,2048); strncpy(ikey,key,2048); mystrlwr(istr); mystrlwr(ikey); char *foo = strstr(istr,ikey); if ( foo ) { uint32_t loc = (uint32_t)(foo - istr); foo = (char *)str+loc; } return foo; }
dgMatrix dgMatrix::Symetric3by3Inverse () const { const dgMatrix& mat = *this; hacd::HaF64 det = mat[0][0] * mat[1][1] * mat[2][2] + mat[0][1] * mat[1][2] * mat[0][2] * hacd::HaF32 (2.0f) - mat[0][2] * mat[1][1] * mat[0][2] - mat[0][1] * mat[0][1] * mat[2][2] - mat[0][0] * mat[1][2] * mat[1][2]; det = hacd::HaF32 (1.0f) / det; hacd::HaF32 x11 = (hacd::HaF32)(det * (mat[1][1] * mat[2][2] - mat[1][2] * mat[1][2])); hacd::HaF32 x22 = (hacd::HaF32)(det * (mat[0][0] * mat[2][2] - mat[0][2] * mat[0][2])); hacd::HaF32 x33 = (hacd::HaF32)(det * (mat[0][0] * mat[1][1] - mat[0][1] * mat[0][1])); hacd::HaF32 x12 = (hacd::HaF32)(det * (mat[1][2] * mat[2][0] - mat[1][0] * mat[2][2])); hacd::HaF32 x13 = (hacd::HaF32)(det * (mat[1][0] * mat[2][1] - mat[1][1] * mat[2][0])); hacd::HaF32 x23 = (hacd::HaF32)(det * (mat[0][1] * mat[2][0] - mat[0][0] * mat[2][1])); #ifdef _DEBUG dgMatrix matInv (dgVector (x11, x12, x13, hacd::HaF32(0.0f)), dgVector (x12, x22, x23, hacd::HaF32(0.0f)), dgVector (x13, x23, x33, hacd::HaF32(0.0f)), dgVector (hacd::HaF32(0.0f), hacd::HaF32(0.0f), hacd::HaF32(0.0f), hacd::HaF32(1.0f))); dgMatrix test (matInv * mat); HACD_ASSERT (dgAbsf (test[0][0] - hacd::HaF32(1.0f)) < hacd::HaF32(0.01f)); HACD_ASSERT (dgAbsf (test[1][1] - hacd::HaF32(1.0f)) < hacd::HaF32(0.01f)); HACD_ASSERT (dgAbsf (test[2][2] - hacd::HaF32(1.0f)) < hacd::HaF32(0.01f)); #endif return dgMatrix (dgVector (x11, x12, x13, hacd::HaF32(0.0f)), dgVector (x12, x22, x23, hacd::HaF32(0.0f)), dgVector (x13, x23, x33, hacd::HaF32(0.0f)), dgVector (hacd::HaF32(0.0f), hacd::HaF32(0.0f), hacd::HaF32(0.0f), hacd::HaF32(1.0f))); }
void HullLibrary::BringOutYourDead(const float *verts,uint32_t vcount, float *overts,uint32_t &ocount,uint32_t *indices,uint32_t indexcount) { uint32_t *used = (uint32_t *)HACD_ALLOC(sizeof(uint32_t)*vcount); memset(used,0,sizeof(uint32_t)*vcount); ocount = 0; for (uint32_t i=0; i<indexcount; i++) { uint32_t v = indices[i]; // original array index HACD_ASSERT( v < vcount ); if ( used[v] ) // if already remapped { indices[i] = used[v]-1; // index to new array } else { indices[i] = ocount; // new index mapping overts[ocount*3+0] = verts[v*3+0]; // copy old vert to new vert array overts[ocount*3+1] = verts[v*3+1]; overts[ocount*3+2] = verts[v*3+2]; ocount++; // increment output vert count HACD_ASSERT( ocount <= vcount ); used[v] = ocount; // assign new index remapping } } HACD_FREE(used); }
dgBigVector LineTriangleIntersection (const dgBigVector& p0, const dgBigVector& p1, const dgBigVector& A, const dgBigVector& B, const dgBigVector& C) { dgHugeVector ph0 (p0); dgHugeVector ph1 (p1); dgHugeVector Ah (A); dgHugeVector Bh (B); dgHugeVector Ch (C); dgHugeVector p1p0 (ph1 - ph0); dgHugeVector Ap0 (Ah - ph0); dgHugeVector Bp0 (Bh - ph0); dgHugeVector Cp0 (Ch - ph0); dgGoogol t0 ((Bp0 * Cp0) % p1p0); //hacd::HaF64 val0 = t0.GetAproximateValue(); //if (val0 < hacd::HaF64 (0.0f)) { if (hacd::HaF64(t0) < hacd::HaF64(0.0f)) { return dgBigVector (hacd::HaF32 (0.0f), hacd::HaF32 (0.0f), hacd::HaF32 (0.0f), hacd::HaF32 (-1.0f)); } dgGoogol t1 ((Cp0 * Ap0) % p1p0); // hacd::HaF64 val1 = t1.GetAproximateValue(); // if (val1 < hacd::HaF64 (0.0f)) { if (hacd::HaF64 (t1) < hacd::HaF64 (0.0f)) { return dgBigVector (hacd::HaF32 (0.0f), hacd::HaF32 (0.0f), hacd::HaF32 (0.0f), hacd::HaF32 (-1.0f)); } dgGoogol t2 ((Ap0 * Bp0) % p1p0); //hacd::HaF64 val2 = t2.GetAproximateValue(); //if (val2 < hacd::HaF64 (0.0f)) { if (hacd::HaF64 (t2) < hacd::HaF64 (0.0f)) { return dgBigVector (hacd::HaF32 (0.0f), hacd::HaF32 (0.0f), hacd::HaF32 (0.0f), hacd::HaF32 (-1.0f)); } dgGoogol sum = t0 + t1 + t2; //hacd::HaF64 den = sum.GetAproximateValue(); #ifdef _DEBUG //dgBigVector testpoint (A.Scale (val0 / den) + B.Scale (val1 / den) + C.Scale(val2 / den)); dgBigVector testpoint (A.Scale (t0 / sum) + B.Scale (t1 / sum) + C.Scale(t2 / sum)); hacd::HaF64 volume = ((B - A) * (C - A)) % (testpoint - A); HACD_ASSERT (fabs (volume) < hacd::HaF64 (1.0e-12f)); #endif // return dgBigVector (val0 / den, val1 / den, val2 / den, hacd::HaF32 (0.0f)); return dgBigVector (t0 / sum, t1 / sum, t2 / sum, hacd::HaF32 (0.0f)); }
void addBone(uint32_t bone,uint32_t *bones,uint32_t &bcount) { HACD_ASSERT(bcount < MAX_BONE_COUNT); if ( bcount < MAX_BONE_COUNT ) { bool found = false; for (uint32_t i=0; i<bcount; i++) { if ( bones[i] == bone ) { found = true; break; } } if ( !found ) { bones[bcount] = bone; bcount++; } } }
float dgFastRayTest::PolygonIntersect (const dgVector& normal, const float* const polygon, int32_t strideInBytes, const int32_t* const indexArray, int32_t indexCount) const { HACD_ASSERT (m_p0.m_w == m_p1.m_w); #ifndef __USE_DOUBLE_PRECISION__ float unrealible = float (1.0e10f); #endif float dist = normal % m_diff; if (dist < m_dirError) { int32_t stride = int32_t (strideInBytes / sizeof (float)); dgVector v0 (&polygon[indexArray[indexCount - 1] * stride]); dgVector p0v0 (v0 - m_p0); float tOut = normal % p0v0; // this only work for convex polygons and for single side faces // walk the polygon around the edges and calculate the volume if ((tOut < float (0.0f)) && (tOut > dist)) { for (int32_t i = 0; i < indexCount; i ++) { int32_t i2 = indexArray[i] * stride; dgVector v1 (&polygon[i2]); dgVector p0v1 (v1 - m_p0); // calculate the volume formed by the line and the edge of the polygon float alpha = (m_diff * p0v1) % p0v0; // if a least one volume is negative it mean the line cross the polygon outside this edge and do not hit the face if (alpha < DG_RAY_TOL_ERROR) { #ifdef __USE_DOUBLE_PRECISION__ return 1.2f; #else unrealible = alpha; break; #endif } p0v0 = p0v1; } #ifndef __USE_DOUBLE_PRECISION__ if ((unrealible < float (0.0f)) && (unrealible > (DG_RAY_TOL_ERROR * float (10.0f)))) { // the edge is too close to an edge float is not reliable, do the calculation with double dgBigVector v0_ (v0); dgBigVector m_p0_ (m_p0); //dgBigVector m_p1_ (m_p1); dgBigVector p0v0_ (v0_ - m_p0_); dgBigVector normal_ (normal); dgBigVector diff_ (m_diff); double tOut_ = normal_ % p0v0_; //double dist_ = normal_ % diff_; if ((tOut < double (0.0f)) && (tOut > dist)) { for (int32_t i = 0; i < indexCount; i ++) { int32_t i2 = indexArray[i] * stride; dgBigVector v1 (&polygon[i2]); dgBigVector p0v1_ (v1 - m_p0_); // calculate the volume formed by the line and the edge of the polygon double alpha = (diff_ * p0v1_) % p0v0_; // if a least one volume is negative it mean the line cross the polygon outside this edge and do not hit the face if (alpha < DG_RAY_TOL_ERROR) { return 1.2f; } p0v0_ = p0v1_; } tOut = float (tOut_); } } #endif //the line is to the left of all the polygon edges, //then the intersection is the point we the line intersect the plane of the polygon tOut = tOut / dist; HACD_ASSERT (tOut >= float (0.0f)); HACD_ASSERT (tOut <= float (1.0f)); return tOut; } } return float (1.2f); }
dgBigVector dgPointToTriangleDistance (const dgBigVector& point, const dgBigVector& p0, const dgBigVector& p1, const dgBigVector& p2) { // const dgBigVector p (double (0.0f), double (0.0f), double (0.0f)); const dgBigVector p10 (p1 - p0); const dgBigVector p20 (p2 - p0); const dgBigVector p_p0 (point - p0); double alpha1 = p10 % p_p0; double alpha2 = p20 % p_p0; if ((alpha1 <= double (0.0f)) && (alpha2 <= double (0.0f))) { return p0; } dgBigVector p_p1 (point - p1); double alpha3 = p10 % p_p1; double alpha4 = p20 % p_p1; if ((alpha3 >= double (0.0f)) && (alpha4 <= alpha3)) { return p1; } double vc = alpha1 * alpha4 - alpha3 * alpha2; if ((vc <= double (0.0f)) && (alpha1 >= double (0.0f)) && (alpha3 <= double (0.0f))) { double t = alpha1 / (alpha1 - alpha3); HACD_ASSERT (t >= double (0.0f)); HACD_ASSERT (t <= double (1.0f)); return p0 + p10.Scale (t); } dgBigVector p_p2 (point - p2); double alpha5 = p10 % p_p2; double alpha6 = p20 % p_p2; if ((alpha6 >= double (0.0f)) && (alpha5 <= alpha6)) { return p2; } double vb = alpha5 * alpha2 - alpha1 * alpha6; if ((vb <= double (0.0f)) && (alpha2 >= double (0.0f)) && (alpha6 <= double (0.0f))) { double t = alpha2 / (alpha2 - alpha6); HACD_ASSERT (t >= double (0.0f)); HACD_ASSERT (t <= double (1.0f)); return p0 + p20.Scale (t); } double va = alpha3 * alpha6 - alpha5 * alpha4; if ((va <= double (0.0f)) && ((alpha4 - alpha3) >= double (0.0f)) && ((alpha5 - alpha6) >= double (0.0f))) { double t = (alpha4 - alpha3) / ((alpha4 - alpha3) + (alpha5 - alpha6)); HACD_ASSERT (t >= double (0.0f)); HACD_ASSERT (t <= double (1.0f)); return p1 + (p2 - p1).Scale (t); } double den = float(double (1.0f)) / (va + vb + vc); double t = vb * den; double s = vc * den; HACD_ASSERT (t >= double (0.0f)); HACD_ASSERT (s >= double (0.0f)); HACD_ASSERT (t <= double (1.0f)); HACD_ASSERT (s <= double (1.0f)); return p0 + p10.Scale (t) + p20.Scale (s); }
HullError HullLibrary::CreateConvexHull(const HullDesc &desc, // describes the input request HullResult &result) // contains the resulst { HullError ret = QE_FAIL; uint32_t vcount = desc.mVcount; if ( vcount < 8 ) vcount = 8; float *vsource = (float *) HACD_ALLOC( sizeof(float)*vcount*3 ); float scale[3]; float center[3]; uint32_t ovcount; bool ok = NormalizeAndCleanupVertices(desc.mVcount,desc.mVertices, desc.mVertexStride, ovcount, vsource, desc.mNormalEpsilon, scale, center, desc.mMaxVertices*2, desc.mUseWuQuantizer ); // normalize point cloud, remove duplicates! if ( ok ) { double *bigVertices = (double *)HACD_ALLOC(sizeof(double)*3*ovcount); for (uint32_t i=0; i<3*ovcount; i++) { bigVertices[i] = vsource[i]; } dgConvexHull3d convexHull(bigVertices,sizeof(double)*3,(int32_t)ovcount,0.0001f,(int32_t)desc.mMaxVertices); if ( convexHull.GetCount() ) { float *hullVertices = (float *)HACD_ALLOC( sizeof(float)*3*convexHull.GetVertexCount() ); float *dest = hullVertices; for (int32_t i=0; i<convexHull.GetVertexCount(); i++) { const dgBigVector &v = convexHull.GetVertex(i); dest[0] = (float)v.m_x*scale[0]+center[0]; dest[1] = (float)v.m_y*scale[1]+center[1]; dest[2] = (float)v.m_z*scale[2]+center[2]; dest+=3; } uint32_t triangleCount = (uint32_t)convexHull.GetCount(); uint32_t *indices = (uint32_t*)HACD_ALLOC(triangleCount*sizeof(uint32_t)*3); uint32_t *destIndices = indices; dgList<dgConvexHull3DFace>::Iterator iter(convexHull); uint32_t outCount = 0; for (iter.Begin(); iter; iter++) { dgConvexHull3DFace &face = (*iter); destIndices[0] = (uint32_t)face.m_index[0]; destIndices[1] = (uint32_t)face.m_index[1]; destIndices[2] = (uint32_t)face.m_index[2]; destIndices+=3; outCount++; } HACD_ASSERT( outCount == triangleCount ); // re-index triangle mesh so it refers to only used vertices, rebuild a new vertex table. float *vscratch = (float *) HACD_ALLOC( sizeof(float)*convexHull.GetVertexCount()*3 ); BringOutYourDead(hullVertices,(uint32_t)convexHull.GetVertexCount(),vscratch, ovcount, indices, triangleCount*3 ); ret = QE_OK; result.mNumOutputVertices = ovcount; result.mOutputVertices = (float *)HACD_ALLOC( sizeof(float)*ovcount*3); result.mNumTriangles = triangleCount; result.mIndices = (uint32_t *) HACD_ALLOC( sizeof(uint32_t)*triangleCount*3); memcpy(result.mOutputVertices, vscratch, sizeof(float)*3*ovcount ); memcpy(result.mIndices, indices, sizeof(uint32_t)*triangleCount*3); HACD_FREE(indices); HACD_FREE(vscratch); HACD_FREE(hullVertices); } HACD_FREE(bigVertices); } HACD_FREE(vsource); return ret; }
void dgPolygonSoupDatabaseBuilder::OptimizeByGroupID (dgPolygonSoupDatabaseBuilder& source, hacd::HaI32 faceNumber, hacd::HaI32 faceIndexNumber, dgPolygonSoupDatabaseBuilder& leftOver) { hacd::HaI32 indexPool[1024 * 1]; hacd::HaI32 atributeData[1024 * 1]; dgVector vertexPool[1024 * 1]; dgPolyhedra polyhedra; hacd::HaI32 attribute = source.m_vertexIndex[faceIndexNumber]; for (hacd::HaI32 i = 0; i < hacd::HaI32 (sizeof(atributeData) / sizeof (hacd::HaI32)); i ++) { indexPool[i] = i; atributeData[i] = attribute; } leftOver.Begin(); polyhedra.BeginFace (); for (hacd::HaI32 i = faceNumber; i < source.m_faceCount; i ++) { hacd::HaI32 indexCount; indexCount = source.m_faceVertexCount[i]; HACD_ASSERT (indexCount < 1024); if (source.m_vertexIndex[faceIndexNumber] == attribute) { dgEdge* const face = polyhedra.AddFace(indexCount - 1, &source.m_vertexIndex[faceIndexNumber + 1]); if (!face) { hacd::HaI32 faceArray; for (hacd::HaI32 j = 0; j < indexCount - 1; j ++) { hacd::HaI32 index; index = source.m_vertexIndex[faceIndexNumber + j + 1]; vertexPool[j] = source.m_vertexPoints[index]; } faceArray = indexCount - 1; leftOver.AddMesh (&vertexPool[0].m_x, indexCount - 1, sizeof (dgVector), 1, &faceArray, indexPool, atributeData, dgGetIdentityMatrix()); } else { // set the attribute dgEdge* ptr = face; do { ptr->m_userData = hacd::HaU64 (attribute); ptr = ptr->m_next; } while (ptr != face); } } faceIndexNumber += indexCount; } leftOver.Optimize(false); polyhedra.EndFace(); dgPolyhedra facesLeft; facesLeft.BeginFace(); polyhedra.ConvexPartition (&source.m_vertexPoints[0].m_x, sizeof (dgBigVector), &facesLeft); facesLeft.EndFace(); hacd::HaI32 mark = polyhedra.IncLRU(); dgPolyhedra::Iterator iter (polyhedra); for (iter.Begin(); iter; iter ++) { dgEdge* const edge = &(*iter); if (edge->m_incidentFace < 0) { continue; } if (edge->m_mark == mark) { continue; } dgEdge* ptr = edge; hacd::HaI32 indexCount = 0; do { ptr->m_mark = mark; vertexPool[indexCount] = source.m_vertexPoints[ptr->m_incidentVertex]; indexCount ++; ptr = ptr->m_next; } while (ptr != edge); if (indexCount >= 3) { AddMesh (&vertexPool[0].m_x, indexCount, sizeof (dgVector), 1, &indexCount, indexPool, atributeData, dgGetIdentityMatrix()); } } mark = facesLeft.IncLRU(); dgPolyhedra::Iterator iter1 (facesLeft); for (iter1.Begin(); iter1; iter1 ++) { dgEdge* const edge = &(*iter1); if (edge->m_incidentFace < 0) { continue; } if (edge->m_mark == mark) { continue; } dgEdge* ptr = edge; hacd::HaI32 indexCount = 0; do { ptr->m_mark = mark; vertexPool[indexCount] = source.m_vertexPoints[ptr->m_incidentVertex]; indexCount ++; ptr = ptr->m_next; } while (ptr != edge); if (indexCount >= 3) { AddMesh (&vertexPool[0].m_x, indexCount, sizeof (dgVector), 1, &indexCount, indexPool, atributeData, dgGetIdentityMatrix()); } } }
void dgMatrix::EigenVectors (dgVector &eigenValues, const dgMatrix& initialGuess) { hacd::HaF32 b[3]; hacd::HaF32 z[3]; hacd::HaF32 d[3]; dgMatrix& mat = *this; dgMatrix eigenVectors (initialGuess.Transpose4X4()); mat = initialGuess * mat * eigenVectors; b[0] = mat[0][0]; b[1] = mat[1][1]; b[2] = mat[2][2]; d[0] = mat[0][0]; d[1] = mat[1][1]; d[2] = mat[2][2]; z[0] = hacd::HaF32 (0.0f); z[1] = hacd::HaF32 (0.0f); z[2] = hacd::HaF32 (0.0f); for (hacd::HaI32 i = 0; i < 50; i++) { hacd::HaF32 sm = dgAbsf(mat[0][1]) + dgAbsf(mat[0][2]) + dgAbsf(mat[1][2]); if (sm < hacd::HaF32 (1.0e-6f)) { HACD_ASSERT (dgAbsf((eigenVectors.m_front % eigenVectors.m_front) - hacd::HaF32(1.0f)) < dgEPSILON); HACD_ASSERT (dgAbsf((eigenVectors.m_up % eigenVectors.m_up) - hacd::HaF32(1.0f)) < dgEPSILON); HACD_ASSERT (dgAbsf((eigenVectors.m_right % eigenVectors.m_right) - hacd::HaF32(1.0f)) < dgEPSILON); // order the eigenvalue vectors dgVector tmp (eigenVectors.m_front * eigenVectors.m_up); if (tmp % eigenVectors.m_right < hacd::HaF32(0.0f)) { eigenVectors.m_right = eigenVectors.m_right.Scale (-hacd::HaF32(1.0f)); } eigenValues = dgVector (d[0], d[1], d[2], hacd::HaF32 (0.0f)); *this = eigenVectors.Inverse(); return; } hacd::HaF32 thresh = hacd::HaF32 (0.0f); if (i < 3) { thresh = (hacd::HaF32)(0.2f / 9.0f) * sm; } for (hacd::HaI32 ip = 0; ip < 2; ip ++) { for (hacd::HaI32 iq = ip + 1; iq < 3; iq ++) { hacd::HaF32 g = hacd::HaF32 (100.0f) * dgAbsf(mat[ip][iq]); //if ((i > 3) && (dgAbsf(d[0]) + g == dgAbsf(d[ip])) && (dgAbsf(d[1]) + g == dgAbsf(d[1]))) { if ((i > 3) && ((dgAbsf(d[ip]) + g) == dgAbsf(d[ip])) && ((dgAbsf(d[iq]) + g) == dgAbsf(d[iq]))) { mat[ip][iq] = hacd::HaF32 (0.0f); } else if (dgAbsf(mat[ip][iq]) > thresh) { hacd::HaF32 t; hacd::HaF32 h = d[iq] - d[ip]; if (dgAbsf(h) + g == dgAbsf(h)) { t = mat[ip][iq] / h; } else { hacd::HaF32 theta = hacd::HaF32 (0.5f) * h / mat[ip][iq]; t = hacd::HaF32(1.0f) / (dgAbsf(theta) + dgSqrt(hacd::HaF32(1.0f) + theta * theta)); if (theta < hacd::HaF32 (0.0f)) { t = -t; } } hacd::HaF32 c = hacd::HaF32(1.0f) / dgSqrt (hacd::HaF32 (1.0f) + t * t); hacd::HaF32 s = t * c; hacd::HaF32 tau = s / (hacd::HaF32(1.0f) + c); h = t * mat[ip][iq]; z[ip] -= h; z[iq] += h; d[ip] -= h; d[iq] += h; mat[ip][iq] = hacd::HaF32(0.0f); for (hacd::HaI32 j = 0; j <= ip - 1; j ++) { //ROT (mat, j, ip, j, iq, s, tau); //ROT(dgMatrix &a, hacd::HaI32 i, hacd::HaI32 j, hacd::HaI32 k, hacd::HaI32 l, hacd::HaF32 s, hacd::HaF32 tau) hacd::HaF32 g = mat[j][ip]; hacd::HaF32 h = mat[j][iq]; mat[j][ip] = g - s * (h + g * tau); mat[j][iq] = h + s * (g - h * tau); } for (hacd::HaI32 j = ip + 1; j <= iq - 1; j ++) { //ROT (mat, ip, j, j, iq, s, tau); //ROT(dgMatrix &a, hacd::HaI32 i, hacd::HaI32 j, hacd::HaI32 k, hacd::HaI32 l, hacd::HaF32 s, hacd::HaF32 tau) hacd::HaF32 g = mat[ip][j]; hacd::HaF32 h = mat[j][iq]; mat[ip][j] = g - s * (h + g * tau); mat[j][iq] = h + s * (g - h * tau); } for (hacd::HaI32 j = iq + 1; j < 3; j ++) { //ROT (mat, ip, j, iq, j, s, tau); //ROT(dgMatrix &a, hacd::HaI32 i, hacd::HaI32 j, hacd::HaI32 k, hacd::HaI32 l, hacd::HaF32 s, hacd::HaF32 tau) hacd::HaF32 g = mat[ip][j]; hacd::HaF32 h = mat[iq][j]; mat[ip][j] = g - s * (h + g * tau); mat[iq][j] = h + s * (g - h * tau); } for (hacd::HaI32 j = 0; j < 3; j ++) { //ROT (eigenVectors, j, ip, j, iq, s, tau); //ROT(dgMatrix &a, hacd::HaI32 i, hacd::HaI32 j, hacd::HaI32 k, hacd::HaI32 l, hacd::HaF32 s, hacd::HaF32 tau) hacd::HaF32 g = eigenVectors[j][ip]; hacd::HaF32 h = eigenVectors[j][iq]; eigenVectors[j][ip] = g - s * (h + g * tau); eigenVectors[j][iq] = h + s * (g - h * tau); } } } } b[0] += z[0]; d[0] = b[0]; z[0] = hacd::HaF32 (0.0f); b[1] += z[1]; d[1] = b[1]; z[1] = hacd::HaF32 (0.0f); b[2] += z[2]; d[2] = b[2]; z[2] = hacd::HaF32 (0.0f); } eigenValues = dgVector (d[0], d[1], d[2], hacd::HaF32 (0.0f)); *this = dgGetIdentityMatrix(); }
bool combineHulls(JOB_SWARM_STANDALONE::JobSwarmContext *jobSwarmContext) { bool combine = false; // each new convex hull is given a unique guid. // A hash map is used to make sure that no hulls are tested twice. CHullVector output; HaU32 count = (HaU32)mChulls.size(); // Early out to save walking all the hulls. Hulls are combined based on // a target number or on a number of generated hulls. bool mergeTargetMet = (HaU32)mChulls.size() <= mMergeNumHulls; if (mergeTargetMet && (mSmallClusterThreshold == 0.0f)) return false; hacd::vector< CombineVolumeJob > jobs; // First, see if there are any pairs of hulls who's combined volume we have not yet calculated. // If there are, then we add them to the jobs list { for (HaU32 i=0; i<count; i++) { CHull *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { CHull *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 *v = mHasBeenTested->find(hashIndex); if ( v == NULL ) { CombineVolumeJob job(cr,match,hashIndex); jobs.push_back(job); (*mHasBeenTested)[hashIndex] = 0.0f; // assign it to some value so we don't try to create more than one job for it. } } } } // ok..we have posted all of the jobs, let's let's solve them in parallel for (hacd::HaU32 i=0; i<jobs.size(); i++) { jobs[i].startJob(jobSwarmContext); } // solve all of them in parallel... while ( gCombineCount != 0 ) { jobSwarmContext->processSwarmJobs(); // solve merged hulls in parallel } // once we have the answers, now put the results into the hash table. for (hacd::HaU32 i=0; i<jobs.size(); i++) { CombineVolumeJob &job = jobs[i]; (*mHasBeenTested)[job.mHashIndex] = job.mCombinedVolume; } HaF32 bestVolume = 1e9; CHull *mergeA = NULL; CHull *mergeB = NULL; // now find the two hulls which merged produce the smallest combined volume. { for (HaU32 i=0; i<count; i++) { CHull *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { CHull *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 *v = mHasBeenTested->find(hashIndex); HACD_ASSERT(v); if ( v && *v != 0 && *v < bestVolume ) { bestVolume = *v; mergeA = cr; mergeB = match; } } } } // If we found a merge pair, and we are below the merge threshold or we haven't reduced to the target // do the merge. bool thresholdBelow = ((bestVolume / mTotalVolume) * 100.0f) < mSmallClusterThreshold; if ( mergeA && (thresholdBelow || !mergeTargetMet)) { CHull *merge = doMerge(mergeA,mergeB); HaF32 volumeA = mergeA->mVolume; HaF32 volumeB = mergeB->mVolume; if ( merge ) { combine = true; output.push_back(merge); for (CHullVector::iterator j=mChulls.begin(); j!=mChulls.end(); ++j) { CHull *h = (*j); if ( h !=mergeA && h != mergeB ) { output.push_back(h); } } delete mergeA; delete mergeB; // Remove the old volumes and add the new one. mTotalVolume -= (volumeA + volumeB); mTotalVolume += merge->mVolume; } mChulls = output; } return combine; }
hacd::HaI32 dgPolygonSoupDatabaseBuilder::FilterFace (hacd::HaI32 count, hacd::HaI32* const pool) { if (count == 3) { dgBigVector p0 (m_vertexPoints[pool[2]]); for (hacd::HaI32 i = 0; i < 3; i ++) { dgBigVector p1 (m_vertexPoints[pool[i]]); dgBigVector edge (p1 - p0); hacd::HaF64 mag2 = edge % edge; if (mag2 < hacd::HaF32 (1.0e-6f)) { count = 0; } p0 = p1; } if (count == 3) { dgBigVector edge0 (m_vertexPoints[pool[2]] - m_vertexPoints[pool[0]]); dgBigVector edge1 (m_vertexPoints[pool[1]] - m_vertexPoints[pool[0]]); dgBigVector normal (edge0 * edge1); hacd::HaF64 mag2 = normal % normal; if (mag2 < hacd::HaF32 (1.0e-8f)) { count = 0; } } } else { dgPolySoupFilterAllocator polyhedra; count = polyhedra.AddFilterFace (hacd::HaU32 (count), pool); if (!count) { return 0; } dgEdge* edge = &polyhedra.GetRoot()->GetInfo(); if (edge->m_incidentFace < 0) { edge = edge->m_twin; } bool flag = true; while (flag) { flag = false; if (count >= 3) { dgEdge* ptr = edge; dgBigVector p0 (&m_vertexPoints[ptr->m_incidentVertex].m_x); do { dgBigVector p1 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x); dgBigVector e0 (p1 - p0); hacd::HaF64 mag2 = e0 % e0; if (mag2 < hacd::HaF32 (1.0e-6f)) { count --; flag = true; edge = ptr->m_next; ptr->m_prev->m_next = ptr->m_next; ptr->m_next->m_prev = ptr->m_prev; ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev; ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next; break; } p0 = p1; ptr = ptr->m_next; } while (ptr != edge); } } if (count >= 3) { flag = true; dgBigVector normal (polyhedra.FaceNormal (edge, &m_vertexPoints[0].m_x, sizeof (dgBigVector))); HACD_ASSERT ((normal % normal) > hacd::HaF32 (1.0e-10f)); normal = normal.Scale (dgRsqrt (normal % normal + hacd::HaF32 (1.0e-20f))); while (flag) { flag = false; if (count >= 3) { dgEdge* ptr = edge; dgBigVector p0 (&m_vertexPoints[ptr->m_prev->m_incidentVertex].m_x); dgBigVector p1 (&m_vertexPoints[ptr->m_incidentVertex].m_x); dgBigVector e0 (p1 - p0); e0 = e0.Scale (dgRsqrt (e0 % e0 + hacd::HaF32(1.0e-10f))); do { dgBigVector p2 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x); dgBigVector e1 (p2 - p1); e1 = e1.Scale (dgRsqrt (e1 % e1 + hacd::HaF32(1.0e-10f))); hacd::HaF64 mag2 = e1 % e0; if (mag2 > hacd::HaF32 (0.9999f)) { count --; flag = true; edge = ptr->m_next; ptr->m_prev->m_next = ptr->m_next; ptr->m_next->m_prev = ptr->m_prev; ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev; ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next; break; } dgBigVector n (e0 * e1); mag2 = n % normal; if (mag2 < hacd::HaF32 (1.0e-5f)) { count --; flag = true; edge = ptr->m_next; ptr->m_prev->m_next = ptr->m_next; ptr->m_next->m_prev = ptr->m_prev; ptr->m_twin->m_next->m_prev = ptr->m_twin->m_prev; ptr->m_twin->m_prev->m_next = ptr->m_twin->m_next; break; } e0 = e1; p1 = p2; ptr = ptr->m_next; } while (ptr != edge); } } } dgEdge* first = edge; if (count >= 3) { hacd::HaF64 best = hacd::HaF32 (2.0f); dgEdge* ptr = edge; dgBigVector p0 (&m_vertexPoints[ptr->m_incidentVertex].m_x); dgBigVector p1 (&m_vertexPoints[ptr->m_next->m_incidentVertex].m_x); dgBigVector e0 (p1 - p0); e0 = e0.Scale (dgRsqrt (e0 % e0 + hacd::HaF32(1.0e-10f))); do { dgBigVector p2 (&m_vertexPoints[ptr->m_next->m_next->m_incidentVertex].m_x); dgBigVector e1 (p2 - p1); e1 = e1.Scale (dgRsqrt (e1 % e1 + hacd::HaF32(1.0e-10f))); hacd::HaF64 mag2 = fabs (e1 % e0); if (mag2 < best) { best = mag2; first = ptr; } e0 = e1; p1 = p2; ptr = ptr->m_next; } while (ptr != edge); count = 0; ptr = first; do { pool[count] = ptr->m_incidentVertex; count ++; ptr = ptr->m_next; } while (ptr != first); } #ifdef _DEBUG if (count >= 3) { hacd::HaI32 j0 = count - 2; hacd::HaI32 j1 = count - 1; dgBigVector normal (polyhedra.FaceNormal (edge, &m_vertexPoints[0].m_x, sizeof (dgBigVector))); for (hacd::HaI32 j2 = 0; j2 < count; j2 ++) { dgBigVector p0 (&m_vertexPoints[pool[j0]].m_x); dgBigVector p1 (&m_vertexPoints[pool[j1]].m_x); dgBigVector p2 (&m_vertexPoints[pool[j2]].m_x); dgBigVector e0 ((p0 - p1)); dgBigVector e1 ((p2 - p1)); dgBigVector n (e1 * e0); HACD_ASSERT ((n % normal) > hacd::HaF32 (0.0f)); j0 = j1; j1 = j2; } } #endif } return (count >= 3) ? count : 0; }