static void appendStacks(rcIntArray& srcStack, rcIntArray& dstStack, unsigned short* srcReg) { for (int j=0; j<srcStack.size(); j+=3) { int i = srcStack[j+2]; if ((i < 0) || (srcReg[i] != 0)) continue; dstStack.push(srcStack[j]); dstStack.push(srcStack[j+1]); dstStack.push(srcStack[j+2]); } }
static void addUnique(rcIntArray& a, int v) { if (!a.contains(v)) { a.push(v); } }
static void getHeightData(const rcCompactHeightfield& chf, const unsigned short* poly, const int npoly, const unsigned short* verts, const int bs, rcHeightPatch& hp, rcIntArray& stack, int region) { // Note: Reads to the compact heightfield are offset by border size (bs) // since border size offset is already removed from the polymesh vertices. stack.resize(0); memset(hp.data, 0xff, sizeof(unsigned short)*hp.width*hp.height); bool empty = true; // Copy the height from the same region, and mark region borders // as seed points to fill the rest. for (int hy = 0; hy < hp.height; hy++) { int y = hp.ymin + hy + bs; for (int hx = 0; hx < hp.width; hx++) { int x = hp.xmin + hx + bs; const rcCompactCell& c = chf.cells[x+y*chf.width]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; if (s.reg == region) { // Store height hp.data[hx + hy*hp.width] = s.y; empty = false; // If any of the neighbours is not in same region, // add the current location as flood fill start bool border = false; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); const rcCompactSpan& as = chf.spans[ai]; if (as.reg != region) { border = true; break; } } } if (border) { stack.push(x); stack.push(y); stack.push(i); } break; } } } } // if the polygon does not contian any points from the current region (rare, but happens) // then use the cells closest to the polygon vertices as seeds to fill the height field if (empty) getHeightDataSeedsFromVertices(chf, poly, npoly, verts, bs, hp, stack); static const int RETRACT_SIZE = 256; int head = 0; while (head*3 < stack.size()) { int cx = stack[head*3+0]; int cy = stack[head*3+1]; int ci = stack[head*3+2]; head++; if (head >= RETRACT_SIZE) { head = 0; if (stack.size() > RETRACT_SIZE*3) memmove(&stack[0], &stack[RETRACT_SIZE*3], sizeof(int)*(stack.size()-RETRACT_SIZE*3)); stack.resize(stack.size()-RETRACT_SIZE*3); } const rcCompactSpan& cs = chf.spans[ci]; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int hx = ax - hp.xmin - bs; const int hy = ay - hp.ymin - bs; if (hx < 0 || hx >= hp.width || hy < 0 || hy >= hp.height) continue; if (hp.data[hx + hy*hp.width] != RC_UNSET_HEIGHT) continue; const int ai = (int)chf.cells[ax + ay*chf.width].index + rcGetCon(cs, dir); const rcCompactSpan& as = chf.spans[ai]; hp.data[hx + hy*hp.width] = as.y; stack.push(ax); stack.push(ay); stack.push(ai); } } }
static void getHeightDataSeedsFromVertices(const rcCompactHeightfield& chf, const unsigned short* poly, const int npoly, const unsigned short* verts, const int bs, rcHeightPatch& hp, rcIntArray& stack) { // Floodfill the heightfield to get 2D height data, // starting at vertex locations as seeds. // Note: Reads to the compact heightfield are offset by border size (bs) // since border size offset is already removed from the polymesh vertices. memset(hp.data, 0, sizeof(unsigned short)*hp.width*hp.height); stack.resize(0); static const int offset[9*2] = { 0,0, -1,-1, 0,-1, 1,-1, 1,0, 1,1, 0,1, -1,1, -1,0, }; // Use poly vertices as seed points for the flood fill. for (int j = 0; j < npoly; ++j) { int cx = 0, cz = 0, ci =-1; int dmin = RC_UNSET_HEIGHT; for (int k = 0; k < 9; ++k) { const int ax = (int)verts[poly[j]*3+0] + offset[k*2+0]; const int ay = (int)verts[poly[j]*3+1]; const int az = (int)verts[poly[j]*3+2] + offset[k*2+1]; if (ax < hp.xmin || ax >= hp.xmin+hp.width || az < hp.ymin || az >= hp.ymin+hp.height) continue; const rcCompactCell& c = chf.cells[(ax+bs)+(az+bs)*chf.width]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; int d = rcAbs(ay - (int)s.y); if (d < dmin) { cx = ax; cz = az; ci = i; dmin = d; } } } if (ci != -1) { stack.push(cx); stack.push(cz); stack.push(ci); } } // Find center of the polygon using flood fill. int pcx = 0, pcz = 0; for (int j = 0; j < npoly; ++j) { pcx += (int)verts[poly[j]*3+0]; pcz += (int)verts[poly[j]*3+2]; } pcx /= npoly; pcz /= npoly; for (int i = 0; i < stack.size(); i += 3) { int cx = stack[i+0]; int cy = stack[i+1]; int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; hp.data[idx] = 1; } while (stack.size() > 0) { int ci = stack.pop(); int cy = stack.pop(); int cx = stack.pop(); // Check if close to center of the polygon. if (rcAbs(cx-pcx) <= 1 && rcAbs(cy-pcz) <= 1) { stack.resize(0); stack.push(cx); stack.push(cy); stack.push(ci); break; } const rcCompactSpan& cs = chf.spans[ci]; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); if (ax < hp.xmin || ax >= (hp.xmin+hp.width) || ay < hp.ymin || ay >= (hp.ymin+hp.height)) continue; if (hp.data[ax-hp.xmin+(ay-hp.ymin)*hp.width] != 0) continue; const int ai = (int)chf.cells[(ax+bs)+(ay+bs)*chf.width].index + rcGetCon(cs, dir); int idx = ax-hp.xmin+(ay-hp.ymin)*hp.width; hp.data[idx] = 1; stack.push(ax); stack.push(ay); stack.push(ai); } } memset(hp.data, 0xff, sizeof(unsigned short)*hp.width*hp.height); // Mark start locations. for (int i = 0; i < stack.size(); i += 3) { int cx = stack[i+0]; int cy = stack[i+1]; int ci = stack[i+2]; int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; const rcCompactSpan& cs = chf.spans[ci]; hp.data[idx] = cs.y; // getHeightData seeds are given in coordinates with borders stack[i+0] += bs; stack[i+1] += bs; } }
static bool buildPolyDetail(rcContext* ctx, const float* in, const int nin, const float sampleDist, const float sampleMaxError, const rcCompactHeightfield& chf, const rcHeightPatch& hp, float* verts, int& nverts, rcIntArray& tris, rcIntArray& edges, rcIntArray& samples) { static const int MAX_VERTS = 127; static const int MAX_TRIS = 255; // Max tris for delaunay is 2n-2-k (n=num verts, k=num hull verts). static const int MAX_VERTS_PER_EDGE = 32; float edge[(MAX_VERTS_PER_EDGE+1)*3]; int hull[MAX_VERTS]; int nhull = 0; nverts = 0; for (int i = 0; i < nin; ++i) rcVcopy(&verts[i*3], &in[i*3]); nverts = nin; edges.resize(0); tris.resize(0); const float cs = chf.cs; const float ics = 1.0f/cs; // Calculate minimum extents of the polygon based on input data. float minExtent = polyMinExtent(verts, nverts); // Tessellate outlines. // This is done in separate pass in order to ensure // seamless height values across the ply boundaries. if (sampleDist > 0) { for (int i = 0, j = nin-1; i < nin; j=i++) { const float* vj = &in[j*3]; const float* vi = &in[i*3]; bool swapped = false; // Make sure the segments are always handled in same order // using lexological sort or else there will be seams. if (fabsf(vj[0]-vi[0]) < 1e-6f) { if (vj[2] > vi[2]) { rcSwap(vj,vi); swapped = true; } } else { if (vj[0] > vi[0]) { rcSwap(vj,vi); swapped = true; } } // Create samples along the edge. float dx = vi[0] - vj[0]; float dy = vi[1] - vj[1]; float dz = vi[2] - vj[2]; float d = sqrtf(dx*dx + dz*dz); int nn = 1 + (int)floorf(d/sampleDist); if (nn >= MAX_VERTS_PER_EDGE) nn = MAX_VERTS_PER_EDGE-1; if (nverts+nn >= MAX_VERTS) nn = MAX_VERTS-1-nverts; for (int k = 0; k <= nn; ++k) { float u = (float)k/(float)nn; float* pos = &edge[k*3]; pos[0] = vj[0] + dx*u; pos[1] = vj[1] + dy*u; pos[2] = vj[2] + dz*u; pos[1] = getHeight(pos[0],pos[1],pos[2], cs, ics, chf.ch, hp)*chf.ch; } // Simplify samples. int idx[MAX_VERTS_PER_EDGE] = {0,nn}; int nidx = 2; for (int k = 0; k < nidx-1; ) { const int a = idx[k]; const int b = idx[k+1]; const float* va = &edge[a*3]; const float* vb = &edge[b*3]; // Find maximum deviation along the segment. float maxd = 0; int maxi = -1; for (int m = a+1; m < b; ++m) { float dev = distancePtSeg(&edge[m*3],va,vb); if (dev > maxd) { maxd = dev; maxi = m; } } // If the max deviation is larger than accepted error, // add new point, else continue to next segment. if (maxi != -1 && maxd > rcSqr(sampleMaxError)) { for (int m = nidx; m > k; --m) idx[m] = idx[m-1]; idx[k+1] = maxi; nidx++; } else { ++k; } } hull[nhull++] = j; // Add new vertices. if (swapped) { for (int k = nidx-2; k > 0; --k) { rcVcopy(&verts[nverts*3], &edge[idx[k]*3]); hull[nhull++] = nverts; nverts++; } } else { for (int k = 1; k < nidx-1; ++k) { rcVcopy(&verts[nverts*3], &edge[idx[k]*3]); hull[nhull++] = nverts; nverts++; } } } } // If the polygon minimum extent is small (sliver or small triangle), do not try to add internal points. if (minExtent < sampleDist*2) { triangulateHull(nverts, verts, nhull, hull, tris); return true; } // Tessellate the base mesh. // We're using the triangulateHull instead of delaunayHull as it tends to // create a bit better triangulation for long thing triangles when there // are no internal points. triangulateHull(nverts, verts, nhull, hull, tris); if (tris.size() == 0) { // Could not triangulate the poly, make sure there is some valid data there. ctx->log(RC_LOG_WARNING, "buildPolyDetail: Could not triangulate polygon (%d verts).", nverts); return true; } if (sampleDist > 0) { // Create sample locations in a grid. float bmin[3], bmax[3]; rcVcopy(bmin, in); rcVcopy(bmax, in); for (int i = 1; i < nin; ++i) { rcVmin(bmin, &in[i*3]); rcVmax(bmax, &in[i*3]); } int x0 = (int)floorf(bmin[0]/sampleDist); int x1 = (int)ceilf(bmax[0]/sampleDist); int z0 = (int)floorf(bmin[2]/sampleDist); int z1 = (int)ceilf(bmax[2]/sampleDist); samples.resize(0); for (int z = z0; z < z1; ++z) { for (int x = x0; x < x1; ++x) { float pt[3]; pt[0] = x*sampleDist; pt[1] = (bmax[1]+bmin[1])*0.5f; pt[2] = z*sampleDist; // Make sure the samples are not too close to the edges. if (distToPoly(nin,in,pt) > -sampleDist/2) continue; samples.push(x); samples.push(getHeight(pt[0], pt[1], pt[2], cs, ics, chf.ch, hp)); samples.push(z); samples.push(0); // Not added } } // Add the samples starting from the one that has the most // error. The procedure stops when all samples are added // or when the max error is within treshold. const int nsamples = samples.size()/4; for (int iter = 0; iter < nsamples; ++iter) { if (nverts >= MAX_VERTS) break; // Find sample with most error. float bestpt[3] = {0,0,0}; float bestd = 0; int besti = -1; for (int i = 0; i < nsamples; ++i) { const int* s = &samples[i*4]; if (s[3]) continue; // skip added. float pt[3]; // The sample location is jittered to get rid of some bad triangulations // which are cause by symmetrical data from the grid structure. pt[0] = s[0]*sampleDist + getJitterX(i)*cs*0.1f; pt[1] = s[1]*chf.ch; pt[2] = s[2]*sampleDist + getJitterY(i)*cs*0.1f; float d = distToTriMesh(pt, verts, nverts, &tris[0], tris.size()/4); if (d < 0) continue; // did not hit the mesh. if (d > bestd) { bestd = d; besti = i; rcVcopy(bestpt,pt); } } // If the max error is within accepted threshold, stop tesselating. if (bestd <= sampleMaxError || besti == -1) break; // Mark sample as added. samples[besti*4+3] = 1; // Add the new sample point. rcVcopy(&verts[nverts*3],bestpt); nverts++; // Create new triangulation. // TODO: Incremental add instead of full rebuild. edges.resize(0); tris.resize(0); delaunayHull(ctx, nverts, verts, nhull, hull, tris, edges); } } const int ntris = tris.size()/4; if (ntris > MAX_TRIS) { tris.resize(MAX_TRIS*4); ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Shrinking triangle count from %d to max %d.", ntris, MAX_TRIS); } return true; }
static void triangulateHull(const int nverts, const float* verts, const int nhull, const int* hull, rcIntArray& tris) { int start = 0, left = 1, right = nhull-1; // Start from an ear with shortest perimeter. // This tends to favor well formed triangles as starting point. float dmin = 0; for (int i = 0; i < nhull; i++) { int pi = prev(i, nhull); int ni = next(i, nhull); const float* pv = &verts[hull[pi]*3]; const float* cv = &verts[hull[i]*3]; const float* nv = &verts[hull[ni]*3]; const float d = vdist2(pv,cv) + vdist2(cv,nv) + vdist2(nv,pv); if (d < dmin) { start = i; left = ni; right = pi; dmin = d; } } // Add first triangle tris.push(hull[start]); tris.push(hull[left]); tris.push(hull[right]); tris.push(0); // Triangulate the polygon by moving left or right, // depending on which triangle has shorter perimeter. // This heuristic was chose emprically, since it seems // handle tesselated straight edges well. while (next(left, nhull) != right) { // Check to see if se should advance left or right. int nleft = next(left, nhull); int nright = prev(right, nhull); const float* cvleft = &verts[hull[left]*3]; const float* nvleft = &verts[hull[nleft]*3]; const float* cvright = &verts[hull[right]*3]; const float* nvright = &verts[hull[nright]*3]; const float dleft = vdist2(cvleft, nvleft) + vdist2(nvleft, cvright); const float dright = vdist2(cvright, nvright) + vdist2(cvleft, nvright); if (dleft < dright) { tris.push(hull[left]); tris.push(hull[nleft]); tris.push(hull[right]); tris.push(0); left = nleft; } else { tris.push(hull[left]); tris.push(hull[nright]); tris.push(hull[right]); tris.push(0); right = nright; } } }
static void getHeightData(const rcCompactHeightfield& chf, const unsigned short* poly, const int npoly, const unsigned short* verts, rcHeightPatch& hp, rcIntArray& stack) { // Floodfill the heightfield to get 2D height data, // starting at vertex locations as seeds. memset(hp.data, 0, sizeof(unsigned short)*hp.width*hp.height); stack.resize(0); static const int offset[9*2] = { 0,0, -1,-1, 0,-1, 1,-1, 1,0, 1,1, 0,1, -1,1, -1,0, }; // Use poly vertices as seed points for the flood fill. for (int j = 0; j < npoly; ++j) { int cx = 0, cz = 0, ci =-1; int dmin = RC_UNSET_HEIGHT; for (int k = 0; k < 9; ++k) { const int ax = (int)verts[poly[j]*3+0] + offset[k*2+0]; const int ay = (int)verts[poly[j]*3+1]; const int az = (int)verts[poly[j]*3+2] + offset[k*2+1]; if (ax < hp.xmin || ax >= hp.xmin+hp.width || az < hp.ymin || az >= hp.ymin+hp.height) continue; const rcCompactCell& c = chf.cells[ax+az*chf.width]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; int d = rcAbs(ay - (int)s.y); if (d < dmin) { cx = ax; cz = az; ci = i; dmin = d; } } } if (ci != -1) { stack.push(cx); stack.push(cz); stack.push(ci); } } // Find center of the polygon using flood fill. int pcx = 0, pcz = 0; for (int j = 0; j < npoly; ++j) { pcx += (int)verts[poly[j]*3+0]; pcz += (int)verts[poly[j]*3+2]; } pcx /= npoly; pcz /= npoly; for (int i = 0; i < stack.size(); i += 3) { int cx = stack[i+0]; int cy = stack[i+1]; int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; hp.data[idx] = 1; } while (stack.size() > 0) { int ci = stack.pop(); int cy = stack.pop(); int cx = stack.pop(); // Check if close to center of the polygon. if (rcAbs(cx-pcx) <= 1 && rcAbs(cy-pcz) <= 1) { stack.resize(0); stack.push(cx); stack.push(cy); stack.push(ci); break; } const rcCompactSpan& cs = chf.spans[ci]; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); if (ax < hp.xmin || ax >= (hp.xmin+hp.width) || ay < hp.ymin || ay >= (hp.ymin+hp.height)) continue; if (hp.data[ax-hp.xmin+(ay-hp.ymin)*hp.width] != 0) continue; const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(cs, dir); int idx = ax-hp.xmin+(ay-hp.ymin)*hp.width; hp.data[idx] = 1; stack.push(ax); stack.push(ay); stack.push(ai); } } memset(hp.data, 0xff, sizeof(unsigned short)*hp.width*hp.height); // Mark start locations. for (int i = 0; i < stack.size(); i += 3) { int cx = stack[i+0]; int cy = stack[i+1]; int ci = stack[i+2]; int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; const rcCompactSpan& cs = chf.spans[ci]; hp.data[idx] = cs.y; } static const int RETRACT_SIZE = 256; int head = 0; while (head*3 < stack.size()) { int cx = stack[head*3+0]; int cy = stack[head*3+1]; int ci = stack[head*3+2]; head++; if (head >= RETRACT_SIZE) { head = 0; if (stack.size() > RETRACT_SIZE*3) memmove(&stack[0], &stack[RETRACT_SIZE*3], sizeof(int)*(stack.size()-RETRACT_SIZE*3)); stack.resize(stack.size()-RETRACT_SIZE*3); } const rcCompactSpan& cs = chf.spans[ci]; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); if (ax < hp.xmin || ax >= (hp.xmin+hp.width) || ay < hp.ymin || ay >= (hp.ymin+hp.height)) continue; if (hp.data[ax-hp.xmin+(ay-hp.ymin)*hp.width] != RC_UNSET_HEIGHT) continue; const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(cs, dir); const rcCompactSpan& as = chf.spans[ai]; int idx = ax-hp.xmin+(ay-hp.ymin)*hp.width; hp.data[idx] = as.y; stack.push(ax); stack.push(ay); stack.push(ai); } } }
static bool buildPolyDetail(rcContext* ctx, const dtCoordinates* in, const int nin, const float sampleDist, const float sampleMaxError, const rcCompactHeightfield& chf, const rcHeightPatch& hp, dtCoordinates* verts, int& nverts, rcIntArray& tris, rcIntArray& edges, rcIntArray& samples #ifdef MODIFY_VOXEL_FLAG , const char /*area*/ #endif // MODIFY_VOXEL_FLAG ) { static const int MAX_VERTS = 127; static const int MAX_TRIS = 255; // Max tris for delaunay is 2n-2-k (n=num verts, k=num hull verts). static const int MAX_VERTS_PER_EDGE = 32; dtCoordinates edge[(MAX_VERTS_PER_EDGE+1)]; int hull[MAX_VERTS]; int nhull = 0; nverts = 0; for (int i = 0; i < nin; ++i) rcVcopy(verts[i], in[i]); nverts = nin; const float cs = chf.cs; const float ics = 1.0f/cs; // Tessellate outlines. // This is done in separate pass in order to ensure // seamless height values across the ply boundaries. #ifdef MODIFY_VOXEL_FLAG if( 0 < sampleDist /*&& rcIsTerrainArea( area )*/ ) #else // MODIFY_VOXEL_FLAG if (sampleDist > 0) #endif // MODIFY_VOXEL_FLAG { for (int i = 0, j = nin-1; i < nin; j=i++) { const dtCoordinates* vj = &in[j]; const dtCoordinates* vi = &in[i]; bool swapped = false; // Make sure the segments are always handled in same order // using lexological sort or else there will be seams. if (fabsf(vj->X()-vi->X()) < 1e-6f) { if (vj->Z() > vi->Z()) { rcSwap(vj,vi); swapped = true; } } else { if (vj->X() > vi->X()) { rcSwap(vj,vi); swapped = true; } } // Create samples along the edge. float dx = vi->X() - vj->X(); float dy = vi->Y() - vj->Y(); float dz = vi->Z() - vj->Z(); float d = sqrtf(dx*dx + dz*dz); int nn = 1 + (int)floorf(d/sampleDist); if (nn >= MAX_VERTS_PER_EDGE) nn = MAX_VERTS_PER_EDGE-1; if (nverts+nn >= MAX_VERTS) nn = MAX_VERTS-1-nverts; for (int k = 0; k <= nn; ++k) { float u = (float)k/(float)nn; dtCoordinates* pos = &edge[k]; pos->SetX( vj->X() + dx*u ); pos->SetY( vj->Y() + dy*u ); pos->SetZ( vj->Z() + dz*u ); pos->SetY( getHeight(pos->X(),pos->Y(),pos->Z(), cs, ics, chf.ch, hp)*chf.ch ); } // Simplify samples. int idx[MAX_VERTS_PER_EDGE] = {0,nn}; int nidx = 2; for (int k = 0; k < nidx-1; ) { const int a = idx[k]; const int b = idx[k+1]; const dtCoordinates va( edge[a] ); const dtCoordinates vb( edge[b] ); // Find maximum deviation along the segment. float maxd = 0; int maxi = -1; for (int m = a+1; m < b; ++m) { float dev = distancePtSeg(edge[m],va,vb); if (dev > maxd) { maxd = dev; maxi = m; } } // If the max deviation is larger than accepted error, // add new point, else continue to next segment. if (maxi != -1 && maxd > rcSqr(sampleMaxError)) { for (int m = nidx; m > k; --m) idx[m] = idx[m-1]; idx[k+1] = maxi; nidx++; } else { ++k; } } hull[nhull++] = j; // Add new vertices. if (swapped) { for (int k = nidx-2; k > 0; --k) { rcVcopy(verts[nverts], edge[idx[k]]); hull[nhull++] = nverts; nverts++; } } else { for (int k = 1; k < nidx-1; ++k) { rcVcopy(verts[nverts], edge[idx[k]]); hull[nhull++] = nverts; nverts++; } } } } // Tessellate the base mesh. edges.resize(0); tris.resize(0); delaunayHull(ctx, nverts, verts, nhull, hull, tris, edges); if (tris.size() == 0) { // Could not triangulate the poly, make sure there is some valid data there. ctx->log(RC_LOG_WARNING, "buildPolyDetail: Could not triangulate polygon, adding default data."); for (int i = 2; i < nverts; ++i) { tris.push(0); tris.push(i-1); tris.push(i); tris.push(0); } return true; } #ifdef MODIFY_VOXEL_FLAG if( 0 < sampleDist /*&& rcIsTerrainArea( area )*/ ) #else // MODIFY_VOXEL_FLAG if (sampleDist > 0) #endif // MODIFY_VOXEL_FLAG { // Create sample locations in a grid. dtCoordinates bmin, bmax; rcVcopy(bmin, in[0]); rcVcopy(bmax, in[0]); for (int i = 1; i < nin; ++i) { rcVmin(bmin, in[i]); rcVmax(bmax, in[i]); } int x0 = (int)floorf(bmin.X()/sampleDist); int x1 = (int)ceilf(bmax.X()/sampleDist); int z0 = (int)floorf(bmin.Z()/sampleDist); int z1 = (int)ceilf(bmax.Z()/sampleDist); samples.resize(0); for (int z = z0; z < z1; ++z) { for (int x = x0; x < x1; ++x) { const dtCoordinates pt( x*sampleDist, (bmax.Y()+bmin.Y())*0.5f, z*sampleDist ); // Make sure the samples are not too close to the edges. if (distToPoly(nin,in,pt) > -sampleDist/2) continue; samples.push(x); samples.push(getHeight(pt.X(), pt.Y(), pt.Z(), cs, ics, chf.ch, hp)); samples.push(z); samples.push(0); // Not added } } // Add the samples starting from the one that has the most // error. The procedure stops when all samples are added // or when the max error is within treshold. const int nsamples = samples.size()/4; for (int iter = 0; iter < nsamples; ++iter) { if (nverts >= MAX_VERTS) break; // Find sample with most error. dtCoordinates bestpt; float bestd = 0; int besti = -1; for (int i = 0; i < nsamples; ++i) { const int* s = &samples[i*4]; if (s[3]) continue; // skip added. const dtCoordinates pt( s[0]*sampleDist + getJitterX(i)*cs*0.1f, s[1]*chf.ch, s[2]*sampleDist + getJitterY(i)*cs*0.1f ); // The sample location is jittered to get rid of some bad triangulations // which are cause by symmetrical data from the grid structure. float d = distToTriMesh(pt, verts, nverts, &tris[0], tris.size()/4); if (d < 0) continue; // did not hit the mesh. if (d > bestd) { bestd = d; besti = i; rcVcopy(bestpt,pt); } } // If the max error is within accepted threshold, stop tesselating. if (bestd <= sampleMaxError || besti == -1) break; // Mark sample as added. samples[besti*4+3] = 1; // Add the new sample point. rcVcopy(verts[nverts],bestpt); nverts++; // Create new triangulation. // TODO: Incremental add instead of full rebuild. edges.resize(0); tris.resize(0); delaunayHull(ctx, nverts, verts, nhull, hull, tris, edges); } } const int ntris = tris.size()/4; if (ntris > MAX_TRIS) { tris.resize(MAX_TRIS*4); ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Shrinking triangle count from %d to max %d.", ntris, MAX_TRIS); } return true; }
static void simplifyContour(rcIntArray& points, rcIntArray& simplified, const float maxError, const int maxEdgeLen, const int buildFlags) { // Add initial points. bool hasConnections = false; for (int i = 0; i < points.size(); i += 4) { if ((points[i+3] & RC_CONTOUR_REG_MASK) != 0) { hasConnections = true; break; } } if (hasConnections) { // The contour has some portals to other regions. // Add a new point to every location where the region changes. for (int i = 0, ni = points.size()/4; i < ni; ++i) { int ii = (i+1) % ni; const bool differentRegs = (points[i*4+3] & RC_CONTOUR_REG_MASK) != (points[ii*4+3] & RC_CONTOUR_REG_MASK); const bool areaBorders = (points[i*4+3] & RC_AREA_BORDER) != (points[ii*4+3] & RC_AREA_BORDER); if (differentRegs || areaBorders) { simplified.push(points[i*4+0]); simplified.push(points[i*4+1]); simplified.push(points[i*4+2]); simplified.push(i); } } } if (simplified.size() == 0) { // If there is no connections at all, // create some initial points for the simplification process. // Find lower-left and upper-right vertices of the contour. int llx = points[0]; int lly = points[1]; int llz = points[2]; int lli = 0; int urx = points[0]; int ury = points[1]; int urz = points[2]; int uri = 0; for (int i = 0; i < points.size(); i += 4) { int x = points[i+0]; int y = points[i+1]; int z = points[i+2]; if (x < llx || (x == llx && z < llz)) { llx = x; lly = y; llz = z; lli = i/4; } if (x > urx || (x == urx && z > urz)) { urx = x; ury = y; urz = z; uri = i/4; } } simplified.push(llx); simplified.push(lly); simplified.push(llz); simplified.push(lli); simplified.push(urx); simplified.push(ury); simplified.push(urz); simplified.push(uri); } // Add points until all raw points are within // error tolerance to the simplified shape. const int pn = points.size()/4; for (int i = 0; i < simplified.size()/4; ) { int ii = (i+1) % (simplified.size()/4); int ax = simplified[i*4+0]; int az = simplified[i*4+2]; int ai = simplified[i*4+3]; int bx = simplified[ii*4+0]; int bz = simplified[ii*4+2]; int bi = simplified[ii*4+3]; // Find maximum deviation from the segment. float maxd = 0; int maxi = -1; int ci, cinc, endi; // Traverse the segment in lexilogical order so that the // max deviation is calculated similarly when traversing // opposite segments. if (bx > ax || (bx == ax && bz > az)) { cinc = 1; ci = (ai+cinc) % pn; endi = bi; } else { cinc = pn-1; ci = (bi+cinc) % pn; endi = ai; rcSwap(ax, bx); rcSwap(az, bz); } // Tessellate only outer edges or edges between areas. if ((points[ci*4+3] & RC_CONTOUR_REG_MASK) == 0 || (points[ci*4+3] & RC_AREA_BORDER)) { while (ci != endi) { float d = distancePtSeg(points[ci*4+0], points[ci*4+2], ax, az, bx, bz); if (d > maxd) { maxd = d; maxi = ci; } ci = (ci+cinc) % pn; } } // If the max deviation is larger than accepted error, // add new point, else continue to next segment. if (maxi != -1 && maxd > (maxError*maxError)) { // Add space for the new point. simplified.resize(simplified.size()+4); const int n = simplified.size()/4; for (int j = n-1; j > i; --j) { simplified[j*4+0] = simplified[(j-1)*4+0]; simplified[j*4+1] = simplified[(j-1)*4+1]; simplified[j*4+2] = simplified[(j-1)*4+2]; simplified[j*4+3] = simplified[(j-1)*4+3]; } // Add the point. simplified[(i+1)*4+0] = points[maxi*4+0]; simplified[(i+1)*4+1] = points[maxi*4+1]; simplified[(i+1)*4+2] = points[maxi*4+2]; simplified[(i+1)*4+3] = maxi; } else { ++i; } } // Split too long edges. if (maxEdgeLen > 0 && (buildFlags & (RC_CONTOUR_TESS_WALL_EDGES|RC_CONTOUR_TESS_AREA_EDGES)) != 0) { for (int i = 0; i < simplified.size()/4; ) { const int ii = (i+1) % (simplified.size()/4); const int ax = simplified[i*4+0]; const int az = simplified[i*4+2]; const int ai = simplified[i*4+3]; const int bx = simplified[ii*4+0]; const int bz = simplified[ii*4+2]; const int bi = simplified[ii*4+3]; // Find maximum deviation from the segment. int maxi = -1; int ci = (ai+1) % pn; // Tessellate only outer edges or edges between areas. bool tess = false; // Wall edges. if ((buildFlags & RC_CONTOUR_TESS_WALL_EDGES) && (points[ci*4+3] & RC_CONTOUR_REG_MASK) == 0) tess = true; // Edges between areas. if ((buildFlags & RC_CONTOUR_TESS_AREA_EDGES) && (points[ci*4+3] & RC_AREA_BORDER)) tess = true; if (tess) { int dx = bx - ax; int dz = bz - az; if (dx*dx + dz*dz > maxEdgeLen*maxEdgeLen) { // Round based on the segments in lexilogical order so that the // max tesselation is consistent regardles in which direction // segments are traversed. const int n = bi < ai ? (bi+pn - ai) : (bi - ai); if (n > 1) { if (bx > ax || (bx == ax && bz > az)) maxi = (ai + n/2) % pn; else maxi = (ai + (n+1)/2) % pn; } } } // If the max deviation is larger than accepted error, // add new point, else continue to next segment. if (maxi != -1) { // Add space for the new point. simplified.resize(simplified.size()+4); const int n = simplified.size()/4; for (int j = n-1; j > i; --j) { simplified[j*4+0] = simplified[(j-1)*4+0]; simplified[j*4+1] = simplified[(j-1)*4+1]; simplified[j*4+2] = simplified[(j-1)*4+2]; simplified[j*4+3] = simplified[(j-1)*4+3]; } // Add the point. simplified[(i+1)*4+0] = points[maxi*4+0]; simplified[(i+1)*4+1] = points[maxi*4+1]; simplified[(i+1)*4+2] = points[maxi*4+2]; simplified[(i+1)*4+3] = maxi; } else { ++i; } } } for (int i = 0; i < simplified.size()/4; ++i) { // The edge vertex flag is take from the current raw point, // and the neighbour region is take from the next raw point. const int ai = (simplified[i*4+3]+1) % pn; const int bi = simplified[i*4+3]; simplified[i*4+3] = (points[ai*4+3] & (RC_CONTOUR_REG_MASK|RC_AREA_BORDER)) | (points[bi*4+3] & RC_BORDER_VERTEX); } }
static bool floodRegion(int x, int y, int i, unsigned short level, unsigned short minLevel, unsigned short r, rcCompactHeightfield& chf, unsigned short* src, rcIntArray& stack) { const int w = chf.width; // Flood fill mark region. stack.resize(0); stack.push((int)x); stack.push((int)y); stack.push((int)i); src[i*2] = r; src[i*2+1] = 0; unsigned short lev = level >= minLevel+2 ? level-2 : minLevel; int count = 0; while (stack.size() > 0) { int ci = stack.pop(); int cy = stack.pop(); int cx = stack.pop(); const rcCompactSpan& cs = chf.spans[ci]; // Check if any of the neighbours already have a valid region set. unsigned short ar = 0; for (int dir = 0; dir < 4; ++dir) { // 8 connected if (rcGetCon(cs, dir) != 0xf) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(cs, dir); unsigned short nr = src[ai*2]; if (nr != 0 && nr != r) ar = nr; const rcCompactSpan& as = chf.spans[ai]; const int dir2 = (dir+1) & 0x3; if (rcGetCon(as, dir2) != 0xf) { const int ax2 = ax + rcGetDirOffsetX(dir2); const int ay2 = ay + rcGetDirOffsetY(dir2); const int ai2 = (int)chf.cells[ax2+ay2*w].index + rcGetCon(as, dir2); unsigned short nr = src[ai2*2]; if (nr != 0 && nr != r) ar = nr; } } } if (ar != 0) { src[ci*2] = 0; continue; } count++; // Expand neighbours. for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) != 0xf) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(cs, dir); if (chf.spans[ai].dist >= lev) { if (src[ai*2] == 0) { src[ai*2] = r; src[ai*2+1] = 0; stack.push(ax); stack.push(ay); stack.push(ai); } } } } } return count > 0; }
static void walkContour(int x, int y, int i, rcCompactHeightfield& chf, unsigned char* flags, rcIntArray& points) { // Choose the first non-connected edge unsigned char dir = 0; while ((flags[i] & (1 << dir)) == 0) dir++; unsigned char startDir = dir; int starti = i; const navAreaMask area = chf.areaMasks[ i ]; int iter = 0; while (++iter < 40000) { if (flags[i] & (1 << dir)) { // Choose the edge corner bool isBorderVertex = false; bool isAreaBorder = false; int px = x; int py = getCornerHeight(x, y, i, dir, chf, isBorderVertex); int pz = y; switch(dir) { case 0: pz++; break; case 1: px++; pz++; break; case 2: px++; break; } int r = 0; const rcCompactSpan& s = chf.spans[i]; if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); r = (int)chf.spans[ai].regionID; if (area != chf.areaMasks[ai]) isAreaBorder = true; } if (isBorderVertex) r |= RC_BORDER_VERTEX; if (isAreaBorder) r |= RC_AREA_BORDER; points.push(px); points.push(py); points.push(pz); points.push(r); flags[i] &= ~(1 << dir); // Remove visited edges dir = (dir+1) & 0x3; // Rotate CW } else { int ni = -1; const int nx = x + rcGetDirOffsetX(dir); const int ny = y + rcGetDirOffsetY(dir); const rcCompactSpan& s = chf.spans[i]; if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const rcCompactCell& nc = chf.cells[nx+ny*chf.width]; ni = (int)nc.index + rcGetCon(s, dir); } if (ni == -1) { // Should not happen. return; } x = nx; y = ny; i = ni; dir = (dir+3) & 0x3; // Rotate CCW } if (starti == i && startDir == dir) { break; } } }
static void getHeightData(const rcCompactHeightfield& chf, const unsigned short* poly, const int npoly, const unsigned short* verts, rcHeightPatch& hp, rcIntArray& stack) { // Floodfill the heightfield to get 2D height data, // starting at vertex locations as seeds. memset(hp.data, 0xff, sizeof(unsigned short)*hp.width*hp.height); stack.resize(0); // Use poly vertices as seed points for the flood fill. for (int j = 0; j < npoly; ++j) { const int ax = (int)verts[poly[j]*3+0]; const int ay = (int)verts[poly[j]*3+1]; const int az = (int)verts[poly[j]*3+2]; if (ax < hp.xmin || ax >= hp.xmin+hp.width || az < hp.ymin || az >= hp.ymin+hp.height) continue; const rcCompactCell& c = chf.cells[ax+az*chf.width]; int dmin = 0xffff; int ai = -1; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; int d = rcAbs(ay - (int)s.y); if (d < dmin) { ai = i; dmin = d; } } if (ai != -1) { stack.push(ax); stack.push(az); stack.push(ai); } } while (stack.size() > 0) { int ci = stack.pop(); int cy = stack.pop(); int cx = stack.pop(); // Skip already visited locations. int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; if (hp.data[idx] != 0xffff) continue; const rcCompactSpan& cs = chf.spans[ci]; hp.data[idx] = cs.y; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) == 0xf) continue; const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); if (ax < hp.xmin || ax >= (hp.xmin+hp.width) || ay < hp.ymin || ay >= (hp.ymin+hp.height)) continue; if (hp.data[ax-hp.xmin+(ay-hp.ymin)*hp.width] != 0xffff) continue; const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(cs, dir); stack.push(ax); stack.push(ay); stack.push(ai); } } }
static bool buildPolyDetail(const float* in, const int nin, unsigned short reg, const float sampleDist, const float sampleMaxError, const rcCompactHeightfield& chf, const rcHeightPatch& hp, float* verts, int& nverts, rcIntArray& tris, rcIntArray& edges, rcIntArray& idx, rcIntArray& samples) { static const int MAX_VERTS = 256; static const int MAX_EDGE = 64; float edge[(MAX_EDGE+1)*3]; nverts = 0; for (int i = 0; i < nin; ++i) vcopy(&verts[i*3], &in[i*3]); nverts = nin; const float ics = 1.0f/chf.cs; // Tesselate outlines. // This is done in separate pass in order to ensure // seamless height values across the ply boundaries. if (sampleDist > 0) { for (int i = 0, j = nin-1; i < nin; j=i++) { const float* vj = &in[j*3]; const float* vi = &in[i*3]; // Make sure the segments are always handled in same order // using lexological sort or else there will be seams. if (fabsf(vj[0]-vi[0]) < 1e-6f) { if (vj[2] > vi[2]) rcSwap(vj,vi); } else { if (vj[0] > vi[0]) rcSwap(vj,vi); } // Create samples along the edge. float dx = vi[0] - vj[0]; float dy = vi[1] - vj[1]; float dz = vi[2] - vj[2]; float d = sqrtf(dx*dx + dz*dz); int nn = 1 + (int)floorf(d/sampleDist); if (nn > MAX_EDGE) nn = MAX_EDGE; if (nverts+nn >= MAX_VERTS) nn = MAX_VERTS-1-nverts; for (int k = 0; k <= nn; ++k) { float u = (float)k/(float)nn; float* pos = &edge[k*3]; pos[0] = vj[0] + dx*u; pos[1] = vj[1] + dy*u; pos[2] = vj[2] + dz*u; pos[1] = chf.bmin[1] + getHeight(pos, chf.bmin, ics, hp)*chf.ch; } // Simplify samples. int idx[MAX_EDGE] = {0,nn}; int nidx = 2; for (int k = 0; k < nidx-1; ) { const int a = idx[k]; const int b = idx[k+1]; const float* va = &edge[a*3]; const float* vb = &edge[b*3]; // Find maximum deviation along the segment. float maxd = 0; int maxi = -1; for (int m = a+1; m < b; ++m) { float d = distancePtSeg(&edge[m*3],va,vb); if (d > maxd) { maxd = d; maxi = m; } } // If the max deviation is larger than accepted error, // add new point, else continue to next segment. if (maxi != -1 && maxd > rcSqr(sampleMaxError)) { for (int m = nidx; m > k; --m) idx[m] = idx[m-1]; idx[k+1] = maxi; nidx++; } else { ++k; } } // Add new vertices. for (int k = 1; k < nidx-1; ++k) { vcopy(&verts[nverts*3], &edge[idx[k]*3]); nverts++; } } } // Tesselate the base mesh. edges.resize(0); tris.resize(0); idx.resize(0); delaunay(nverts, verts, idx, tris, edges); if (sampleDist > 0) { // Create sample locations in a grid. float bmin[3], bmax[3]; vcopy(bmin, in); vcopy(bmax, in); for (int i = 1; i < nin; ++i) { vmin(bmin, &in[i*3]); vmax(bmax, &in[i*3]); } int x0 = (int)floorf(bmin[0]/sampleDist); int x1 = (int)ceilf(bmax[0]/sampleDist); int z0 = (int)floorf(bmin[2]/sampleDist); int z1 = (int)ceilf(bmax[2]/sampleDist); samples.resize(0); for (int z = z0; z < z1; ++z) { for (int x = x0; x < x1; ++x) { float pt[3]; pt[0] = x*sampleDist; pt[2] = z*sampleDist; // Make sure the samples are not too close to the edges. if (distToPoly(nin,in,pt) > -sampleDist/2) continue; samples.push(x); samples.push(getHeight(pt, chf.bmin, ics, hp)); samples.push(z); } } // Add the samples starting from the one that has the most // error. The procedure stops when all samples are added // or when the max error is within treshold. const int nsamples = samples.size()/3; for (int iter = 0; iter < nsamples; ++iter) { // Find sample with most error. float bestpt[3]; float bestd = 0; for (int i = 0; i < nsamples; ++i) { float pt[3]; pt[0] = samples[i*3+0]*sampleDist; pt[1] = chf.bmin[1] + samples[i*3+1]*chf.ch; pt[2] = samples[i*3+2]*sampleDist; float d = distToTriMesh(pt, verts, nverts, &tris[0], tris.size()/4); if (d < 0) continue; // did not hit the mesh. if (d > bestd) { bestd = d; vcopy(bestpt,pt); } } // If the max error is within accepted threshold, stop tesselating. if (bestd <= sampleMaxError) break; // Add the new sample point. vcopy(&verts[nverts*3],bestpt); nverts++; // Create new triangulation. // TODO: Incremental add instead of full rebuild. edges.resize(0); tris.resize(0); idx.resize(0); delaunay(nverts, verts, idx, tris, edges); if (nverts >= MAX_VERTS) break; } } return true; }
// Based on Paul Bourke's triangulate.c // http://astronomy.swin.edu.au/~pbourke/terrain/triangulate/triangulate.c static void delaunay(const int nv, float *verts, rcIntArray& idx, rcIntArray& tris, rcIntArray& edges) { // Sort vertices idx.resize(nv); for (int i = 0; i < nv; ++i) idx[i] = i; #ifdef WIN32 qsort_s(&idx[0], idx.size(), sizeof(int), ptcmp, verts); #else qsort_r(&idx[0], idx.size(), sizeof(int), verts, ptcmp); #endif // Find the maximum and minimum vertex bounds. // This is to allow calculation of the bounding triangle float xmin = verts[0]; float ymin = verts[2]; float xmax = xmin; float ymax = ymin; for (int i = 1; i < nv; ++i) { xmin = rcMin(xmin, verts[i*3+0]); xmax = rcMax(xmax, verts[i*3+0]); ymin = rcMin(ymin, verts[i*3+2]); ymax = rcMax(ymax, verts[i*3+2]); } float dx = xmax - xmin; float dy = ymax - ymin; float dmax = (dx > dy) ? dx : dy; float xmid = (xmax + xmin) / 2.0f; float ymid = (ymax + ymin) / 2.0f; // Set up the supertriangle // This is a triangle which encompasses all the sample points. // The supertriangle coordinates are added to the end of the // vertex list. The supertriangle is the first triangle in // the triangle list. float sv[3*3]; sv[0] = xmid - 20 * dmax; sv[1] = 0; sv[2] = ymid - dmax; sv[3] = xmid; sv[4] = 0; sv[5] = ymid + 20 * dmax; sv[6] = xmid + 20 * dmax; sv[7] = 0; sv[8] = ymid - dmax; tris.push(-3); tris.push(-2); tris.push(-1); tris.push(0); // not completed for (int i = 0; i < nv; ++i) { const float xp = verts[idx[i]*3+0]; const float yp = verts[idx[i]*3+2]; edges.resize(0); // Set up the edge buffer. // If the point (xp,yp) lies inside the circumcircle then the // three edges of that triangle are added to the edge buffer // and that triangle is removed. for (int j = 0; j < tris.size()/4; ++j) { int* t = &tris[j*4]; if (t[3]) // completed? continue; const float* v1 = t[0] < 0 ? &sv[(t[0]+3)*3] : &verts[idx[t[0]]*3]; const float* v2 = t[1] < 0 ? &sv[(t[1]+3)*3] : &verts[idx[t[1]]*3]; const float* v3 = t[2] < 0 ? &sv[(t[2]+3)*3] : &verts[idx[t[2]]*3]; float xc,yc,rsqr; int inside = circumCircle(xp,yp, v1[0],v1[2], v2[0],v2[2], v3[0],v3[2], xc,yc,rsqr); if (xc < xp && rcSqr(xp-xc) > rsqr) t[3] = 1; if (inside) { // Collect triangle edges. edges.push(t[0]); edges.push(t[1]); edges.push(t[1]); edges.push(t[2]); edges.push(t[2]); edges.push(t[0]); // Remove triangle j. t[0] = tris[tris.size()-4]; t[1] = tris[tris.size()-3]; t[2] = tris[tris.size()-2]; t[3] = tris[tris.size()-1]; tris.resize(tris.size()-4); j--; } } // Remove duplicate edges. const int ne = edges.size()/2; for (int j = 0; j < ne-1; ++j) { for (int k = j+1; k < ne; ++k) { // Dupe?, make null. if ((edges[j*2+0] == edges[k*2+1]) && (edges[j*2+1] == edges[k*2+0])) { edges[j*2+0] = 0; edges[j*2+1] = 0; edges[k*2+0] = 0; edges[k*2+1] = 0; } } } // Form new triangles for the current point // Skipping over any null. // All edges are arranged in clockwise order. for (int j = 0; j < ne; ++j) { if (edges[j*2+0] == edges[j*2+1]) continue; tris.push(edges[j*2+0]); tris.push(edges[j*2+1]); tris.push(i); tris.push(0); // not completed } } // Remove triangles with supertriangle vertices // These are triangles which have a vertex number greater than nv for (int i = 0; i < tris.size()/4; ++i) { int* t = &tris[i*4]; if (t[0] < 0 || t[1] < 0 || t[2] < 0) { t[0] = tris[tris.size()-4]; t[1] = tris[tris.size()-3]; t[2] = tris[tris.size()-2]; t[3] = tris[tris.size()-1]; tris.resize(tris.size()-4); i--; } } // Triangle vertices are pointing to sorted vertices, remap indices. for (int i = 0; i < tris.size(); ++i) tris[i] = idx[tris[i]]; }
static bool floodRegion(int x, int y, int i, unsigned short level, unsigned short r, rcCompactHeightfield& chf, unsigned short* srcReg, unsigned short* srcDist, rcIntArray& stack) { const int w = chf.width; const unsigned char area = chf.areas[i]; // Flood fill mark region. stack.resize(0); stack.push((int)x); stack.push((int)y); stack.push((int)i); srcReg[i] = r; srcDist[i] = 0; unsigned short lev = level >= 2 ? level-2 : 0; int count = 0; while (stack.size() > 0) { int ci = stack.pop(); int cy = stack.pop(); int cx = stack.pop(); const rcCompactSpan& cs = chf.spans[ci]; // Check if any of the neighbours already have a valid region set. unsigned short ar = 0; for (int dir = 0; dir < 4; ++dir) { // 8 connected if (rcGetCon(cs, dir) != RC_NOT_CONNECTED) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(cs, dir); if (chf.areas[ai] != area) continue; unsigned short nr = srcReg[ai]; if (nr & RC_BORDER_REG) // Do not take borders into account. continue; if (nr != 0 && nr != r) ar = nr; const rcCompactSpan& as = chf.spans[ai]; const int dir2 = (dir+1) & 0x3; if (rcGetCon(as, dir2) != RC_NOT_CONNECTED) { const int ax2 = ax + rcGetDirOffsetX(dir2); const int ay2 = ay + rcGetDirOffsetY(dir2); const int ai2 = (int)chf.cells[ax2+ay2*w].index + rcGetCon(as, dir2); if (chf.areas[ai2] != area) continue; unsigned short nr2 = srcReg[ai2]; if (nr2 != 0 && nr2 != r) ar = nr2; } } } if (ar != 0) { srcReg[ci] = 0; continue; } count++; // Expand neighbours. for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(cs, dir) != RC_NOT_CONNECTED) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(cs, dir); if (chf.areas[ai] != area) continue; if (chf.dist[ai] >= lev && srcReg[ai] == 0) { srcReg[ai] = r; srcDist[ai] = 0; stack.push(ax); stack.push(ay); stack.push(ai); } } } } return count > 0; }
static bool mergeAndFilterRegions(rcContext* ctx, int minRegionArea, int mergeRegionSize, unsigned short& maxRegionId, rcCompactHeightfield& chf, unsigned short* srcReg, rcIntArray& overlaps) { const int w = chf.width; const int h = chf.height; const int nreg = maxRegionId+1; rcRegion* regions = (rcRegion*)rcAlloc(sizeof(rcRegion)*nreg, RC_ALLOC_TEMP); if (!regions) { ctx->log(RC_LOG_ERROR, "mergeAndFilterRegions: Out of memory 'regions' (%d).", nreg); return false; } // Construct regions for (int i = 0; i < nreg; ++i) new(®ions[i]) rcRegion((unsigned short)i); // Find edge of a region and find connections around the contour. for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { unsigned short r = srcReg[i]; if (r == 0 || r >= nreg) continue; rcRegion& reg = regions[r]; reg.spanCount++; // Update floors. for (int j = (int)c.index; j < ni; ++j) { if (i == j) continue; unsigned short floorId = srcReg[j]; if (floorId == 0 || floorId >= nreg) continue; if (floorId == r) reg.overlap = true; addUniqueFloorRegion(reg, floorId); } // Have found contour if (reg.connections.size() > 0) continue; reg.areaType = chf.areas[i]; // Check if this cell is next to a border. int ndir = -1; for (int dir = 0; dir < 4; ++dir) { if (isSolidEdge(chf, srcReg, x, y, i, dir)) { ndir = dir; break; } } if (ndir != -1) { // The cell is at border. // Walk around the contour to find all the neighbours. walkContour(x, y, i, ndir, chf, srcReg, reg.connections); } } } } // Remove too small regions. rcIntArray stack(32); rcIntArray trace(32); for (int i = 0; i < nreg; ++i) { rcRegion& reg = regions[i]; if (reg.id == 0 || (reg.id & RC_BORDER_REG)) continue; if (reg.spanCount == 0) continue; if (reg.visited) continue; // Count the total size of all the connected regions. // Also keep track of the regions connects to a tile border. bool connectsToBorder = false; int spanCount = 0; stack.resize(0); trace.resize(0); reg.visited = true; stack.push(i); while (stack.size()) { // Pop int ri = stack.pop(); rcRegion& creg = regions[ri]; spanCount += creg.spanCount; trace.push(ri); for (int j = 0; j < creg.connections.size(); ++j) { if (creg.connections[j] & RC_BORDER_REG) { connectsToBorder = true; continue; } rcRegion& neireg = regions[creg.connections[j]]; if (neireg.visited) continue; if (neireg.id == 0 || (neireg.id & RC_BORDER_REG)) continue; // Visit stack.push(neireg.id); neireg.visited = true; } } // If the accumulated regions size is too small, remove it. // Do not remove areas which connect to tile borders // as their size cannot be estimated correctly and removing them // can potentially remove necessary areas. if (spanCount < minRegionArea && !connectsToBorder) { // Kill all visited regions. for (int j = 0; j < trace.size(); ++j) { regions[trace[j]].spanCount = 0; regions[trace[j]].id = 0; } } } // Merge too small regions to neighbour regions. int mergeCount = 0 ; do { mergeCount = 0; for (int i = 0; i < nreg; ++i) { rcRegion& reg = regions[i]; if (reg.id == 0 || (reg.id & RC_BORDER_REG)) continue; if (reg.overlap) continue; if (reg.spanCount == 0) continue; // Check to see if the region should be merged. if (reg.spanCount > mergeRegionSize && isRegionConnectedToBorder(reg)) continue; // Small region with more than 1 connection. // Or region which is not connected to a border at all. // Find smallest neighbour region that connects to this one. int smallest = 0xfffffff; unsigned short mergeId = reg.id; for (int j = 0; j < reg.connections.size(); ++j) { if (reg.connections[j] & RC_BORDER_REG) continue; rcRegion& mreg = regions[reg.connections[j]]; if (mreg.id == 0 || (mreg.id & RC_BORDER_REG) || mreg.overlap) continue; if (mreg.spanCount < smallest && canMergeWithRegion(reg, mreg) && canMergeWithRegion(mreg, reg)) { smallest = mreg.spanCount; mergeId = mreg.id; } } // Found new id. if (mergeId != reg.id) { unsigned short oldId = reg.id; rcRegion& target = regions[mergeId]; // Merge neighbours. if (mergeRegions(target, reg)) { // Fixup regions pointing to current region. for (int j = 0; j < nreg; ++j) { if (regions[j].id == 0 || (regions[j].id & RC_BORDER_REG)) continue; // If another region was already merged into current region // change the nid of the previous region too. if (regions[j].id == oldId) regions[j].id = mergeId; // Replace the current region with the new one if the // current regions is neighbour. replaceNeighbour(regions[j], oldId, mergeId); } mergeCount++; } } } } while (mergeCount > 0); // Compress region Ids. for (int i = 0; i < nreg; ++i) { regions[i].remap = false; if (regions[i].id == 0) continue; // Skip nil regions. if (regions[i].id & RC_BORDER_REG) continue; // Skip external regions. regions[i].remap = true; } unsigned short regIdGen = 0; for (int i = 0; i < nreg; ++i) { if (!regions[i].remap) continue; unsigned short oldId = regions[i].id; unsigned short newId = ++regIdGen; for (int j = i; j < nreg; ++j) { if (regions[j].id == oldId) { regions[j].id = newId; regions[j].remap = false; } } } maxRegionId = regIdGen; // Remap regions. for (int i = 0; i < chf.spanCount; ++i) { if ((srcReg[i] & RC_BORDER_REG) == 0) srcReg[i] = regions[srcReg[i]].id; } // Return regions that we found to be overlapping. for (int i = 0; i < nreg; ++i) if (regions[i].overlap) overlaps.push(regions[i].id); for (int i = 0; i < nreg; ++i) regions[i].~rcRegion(); rcFree(regions); return true; }
static void walkContour(int x, int y, int i, int dir, rcCompactHeightfield& chf, unsigned short* srcReg, rcIntArray& cont) { int startDir = dir; int starti = i; const rcCompactSpan& ss = chf.spans[i]; unsigned short curReg = 0; if (rcGetCon(ss, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(ss, dir); curReg = srcReg[ai]; } cont.push(curReg); int iter = 0; while (++iter < 40000) { const rcCompactSpan& s = chf.spans[i]; if (isSolidEdge(chf, srcReg, x, y, i, dir)) { // Choose the edge corner unsigned short r = 0; if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); r = srcReg[ai]; } if (r != curReg) { curReg = r; cont.push(curReg); } dir = (dir+1) & 0x3; // Rotate CW } else { int ni = -1; const int nx = x + rcGetDirOffsetX(dir); const int ny = y + rcGetDirOffsetY(dir); if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const rcCompactCell& nc = chf.cells[nx+ny*chf.width]; ni = (int)nc.index + rcGetCon(s, dir); } if (ni == -1) { // Should not happen. return; } x = nx; y = ny; i = ni; dir = (dir+3) & 0x3; // Rotate CCW } if (starti == i && startDir == dir) { break; } } // Remove adjacent duplicates. if (cont.size() > 1) { for (int j = 0; j < cont.size(); ) { int nj = (j+1) % cont.size(); if (cont[j] == cont[nj]) { for (int k = j; k < cont.size()-1; ++k) cont[k] = cont[k+1]; cont.pop(); } else ++j; } } }
static void seedArrayWithPolyCenter(rcContext* ctx, const rcCompactHeightfield& chf, const unsigned short* poly, const int npoly, const unsigned short* verts, const int bs, rcHeightPatch& hp, rcIntArray& array) { // Note: Reads to the compact heightfield are offset by border size (bs) // since border size offset is already removed from the polymesh vertices. static const int offset[9*2] = { 0,0, -1,-1, 0,-1, 1,-1, 1,0, 1,1, 0,1, -1,1, -1,0, }; // Find cell closest to a poly vertex int startCellX = 0, startCellY = 0, startSpanIndex = -1; int dmin = RC_UNSET_HEIGHT; for (int j = 0; j < npoly && dmin > 0; ++j) { for (int k = 0; k < 9 && dmin > 0; ++k) { const int ax = (int)verts[poly[j]*3+0] + offset[k*2+0]; const int ay = (int)verts[poly[j]*3+1]; const int az = (int)verts[poly[j]*3+2] + offset[k*2+1]; if (ax < hp.xmin || ax >= hp.xmin+hp.width || az < hp.ymin || az >= hp.ymin+hp.height) continue; const rcCompactCell& c = chf.cells[(ax+bs)+(az+bs)*chf.width]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni && dmin > 0; ++i) { const rcCompactSpan& s = chf.spans[i]; int d = rcAbs(ay - (int)s.y); if (d < dmin) { startCellX = ax; startCellY = az; startSpanIndex = i; dmin = d; } } } } rcAssert(startSpanIndex != -1); // Find center of the polygon int pcx = 0, pcy = 0; for (int j = 0; j < npoly; ++j) { pcx += (int)verts[poly[j]*3+0]; pcy += (int)verts[poly[j]*3+2]; } pcx /= npoly; pcy /= npoly; // Use seeds array as a stack for DFS array.resize(0); array.push(startCellX); array.push(startCellY); array.push(startSpanIndex); int dirs[] = { 0, 1, 2, 3 }; memset(hp.data, 0, sizeof(unsigned short)*hp.width*hp.height); // DFS to move to the center. Note that we need a DFS here and can not just move // directly towards the center without recording intermediate nodes, even though the polygons // are convex. In very rare we can get stuck due to contour simplification if we do not // record nodes. int cx = -1, cy = -1, ci = -1; while (true) { if (array.size() < 3) { ctx->log(RC_LOG_WARNING, "Walk towards polygon center failed to reach center"); break; } ci = array.pop(); cy = array.pop(); cx = array.pop(); if (cx == pcx && cy == pcy) break; // If we are already at the correct X-position, prefer direction // directly towards the center in the Y-axis; otherwise prefer // direction in the X-axis int directDir; if (cx == pcx) directDir = rcGetDirForOffset(0, pcy > cy ? 1 : -1); else directDir = rcGetDirForOffset(pcx > cx ? 1 : -1, 0); // Push the direct dir last so we start with this on next iteration rcSwap(dirs[directDir], dirs[3]); const rcCompactSpan& cs = chf.spans[ci]; for (int i = 0; i < 4; i++) { int dir = dirs[i]; if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; int newX = cx + rcGetDirOffsetX(dir); int newY = cy + rcGetDirOffsetY(dir); int hpx = newX - hp.xmin; int hpy = newY - hp.ymin; if (hpx < 0 || hpx >= hp.width || hpy < 0 || hpy >= hp.height) continue; if (hp.data[hpx+hpy*hp.width] != 0) continue; hp.data[hpx+hpy*hp.width] = 1; array.push(newX); array.push(newY); array.push((int)chf.cells[(newX+bs)+(newY+bs)*chf.width].index + rcGetCon(cs, dir)); } rcSwap(dirs[directDir], dirs[3]); } array.resize(0); // getHeightData seeds are given in coordinates with borders array.push(cx+bs); array.push(cy+bs); array.push(ci); memset(hp.data, 0xff, sizeof(unsigned short)*hp.width*hp.height); const rcCompactSpan& cs = chf.spans[ci]; hp.data[cx-hp.xmin+(cy-hp.ymin)*hp.width] = cs.y; }