static EjsArray *makeUnion(Ejs *ejs, EjsArray *lhs, EjsArray *rhs) { EjsArray *result; EjsObj **l, **r; int i; result = ejsCreateArray(ejs, 0); l = lhs->data; r = rhs->data; for (i = 0; i < lhs->length; i++) { addUnique(ejs, result, l[i]); } for (i = 0; i < rhs->length; i++) { addUnique(ejs, result, r[i]); } return result; }
/// @par /// /// See the #rcConfig documentation for more information on the configuration parameters. /// /// @see rcAllocHeightfieldLayerSet, rcCompactHeightfield, rcHeightfieldLayerSet, rcConfig bool rcBuildHeightfieldLayers(rcContext* ctx, rcCompactHeightfield& chf, const int borderSize, const int walkableHeight, rcHeightfieldLayerSet& lset) { rcAssert(ctx); rcScopedTimer timer(ctx, RC_TIMER_BUILD_LAYERS); const int w = chf.width; const int h = chf.height; rcScopedDelete<unsigned char> srcReg((unsigned char*)rcAlloc(sizeof(unsigned char)*chf.spanCount, RC_ALLOC_TEMP)); if (!srcReg) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'srcReg' (%d).", chf.spanCount); return false; } memset(srcReg,0xff,sizeof(unsigned char)*chf.spanCount); const int nsweeps = chf.width; rcScopedDelete<rcLayerSweepSpan> sweeps((rcLayerSweepSpan*)rcAlloc(sizeof(rcLayerSweepSpan)*nsweeps, RC_ALLOC_TEMP)); if (!sweeps) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'sweeps' (%d).", nsweeps); return false; } // Partition walkable area into monotone regions. int prevCount[256]; unsigned char regId = 0; for (int y = borderSize; y < h-borderSize; ++y) { memset(prevCount,0,sizeof(int)*regId); unsigned char sweepId = 0; for (int x = borderSize; x < w-borderSize; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; if (chf.areas[i] == RC_NULL_AREA) continue; unsigned char sid = 0xff; // -x if (rcGetCon(s, 0) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(0); const int ay = y + rcGetDirOffsetY(0); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0); if (chf.areas[ai] != RC_NULL_AREA && srcReg[ai] != 0xff) sid = srcReg[ai]; } if (sid == 0xff) { sid = sweepId++; sweeps[sid].nei = 0xff; sweeps[sid].ns = 0; } // -y if (rcGetCon(s,3) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(3); const int ay = y + rcGetDirOffsetY(3); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3); const unsigned char nr = srcReg[ai]; if (nr != 0xff) { // Set neighbour when first valid neighbour is encoutered. if (sweeps[sid].ns == 0) sweeps[sid].nei = nr; if (sweeps[sid].nei == nr) { // Update existing neighbour sweeps[sid].ns++; prevCount[nr]++; } else { // This is hit if there is nore than one neighbour. // Invalidate the neighbour. sweeps[sid].nei = 0xff; } } } srcReg[i] = sid; } } // Create unique ID. for (int i = 0; i < sweepId; ++i) { // If the neighbour is set and there is only one continuous connection to it, // the sweep will be merged with the previous one, else new region is created. if (sweeps[i].nei != 0xff && prevCount[sweeps[i].nei] == (int)sweeps[i].ns) { sweeps[i].id = sweeps[i].nei; } else { if (regId == 255) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Region ID overflow."); return false; } sweeps[i].id = regId++; } } // Remap local sweep ids to region ids. for (int x = borderSize; x < w-borderSize; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { if (srcReg[i] != 0xff) srcReg[i] = sweeps[srcReg[i]].id; } } } // Allocate and init layer regions. const int nregs = (int)regId; rcScopedDelete<rcLayerRegion> regs((rcLayerRegion*)rcAlloc(sizeof(rcLayerRegion)*nregs, RC_ALLOC_TEMP)); if (!regs) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'regs' (%d).", nregs); return false; } memset(regs, 0, sizeof(rcLayerRegion)*nregs); for (int i = 0; i < nregs; ++i) { regs[i].layerId = 0xff; regs[i].ymin = 0xffff; regs[i].ymax = 0; } // Find region neighbours and overlapping regions. for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; unsigned char lregs[RC_MAX_LAYERS]; int nlregs = 0; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; const unsigned char ri = srcReg[i]; if (ri == 0xff) continue; regs[ri].ymin = rcMin(regs[ri].ymin, s.y); regs[ri].ymax = rcMax(regs[ri].ymax, s.y); // Collect all region layers. if (nlregs < RC_MAX_LAYERS) lregs[nlregs++] = ri; // Update neighbours for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); const unsigned char rai = srcReg[ai]; if (rai != 0xff && rai != ri) { // Don't check return value -- if we cannot add the neighbor // it will just cause a few more regions to be created, which // is fine. addUnique(regs[ri].neis, regs[ri].nneis, RC_MAX_NEIS, rai); } } } } // Update overlapping regions. for (int i = 0; i < nlregs-1; ++i) { for (int j = i+1; j < nlregs; ++j) { if (lregs[i] != lregs[j]) { rcLayerRegion& ri = regs[lregs[i]]; rcLayerRegion& rj = regs[lregs[j]]; if (!addUnique(ri.layers, ri.nlayers, RC_MAX_LAYERS, lregs[j]) || !addUnique(rj.layers, rj.nlayers, RC_MAX_LAYERS, lregs[i])) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: layer overflow (too many overlapping walkable platforms). Try increasing RC_MAX_LAYERS."); return false; } } } } } } // Create 2D layers from regions. unsigned char layerId = 0; static const int MAX_STACK = 64; unsigned char stack[MAX_STACK]; int nstack = 0; for (int i = 0; i < nregs; ++i) { rcLayerRegion& root = regs[i]; // Skip already visited. if (root.layerId != 0xff) continue; // Start search. root.layerId = layerId; root.base = 1; nstack = 0; stack[nstack++] = (unsigned char)i; while (nstack) { // Pop front rcLayerRegion& reg = regs[stack[0]]; nstack--; for (int j = 0; j < nstack; ++j) stack[j] = stack[j+1]; const int nneis = (int)reg.nneis; for (int j = 0; j < nneis; ++j) { const unsigned char nei = reg.neis[j]; rcLayerRegion& regn = regs[nei]; // Skip already visited. if (regn.layerId != 0xff) continue; // Skip if the neighbour is overlapping root region. if (contains(root.layers, root.nlayers, nei)) continue; // Skip if the height range would become too large. const int ymin = rcMin(root.ymin, regn.ymin); const int ymax = rcMax(root.ymax, regn.ymax); if ((ymax - ymin) >= 255) continue; if (nstack < MAX_STACK) { // Deepen stack[nstack++] = (unsigned char)nei; // Mark layer id regn.layerId = layerId; // Merge current layers to root. for (int k = 0; k < regn.nlayers; ++k) { if (!addUnique(root.layers, root.nlayers, RC_MAX_LAYERS, regn.layers[k])) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: layer overflow (too many overlapping walkable platforms). Try increasing RC_MAX_LAYERS."); return false; } } root.ymin = rcMin(root.ymin, regn.ymin); root.ymax = rcMax(root.ymax, regn.ymax); } } } layerId++; } // Merge non-overlapping regions that are close in height. const unsigned short mergeHeight = (unsigned short)walkableHeight * 4; for (int i = 0; i < nregs; ++i) { rcLayerRegion& ri = regs[i]; if (!ri.base) continue; unsigned char newId = ri.layerId; for (;;) { unsigned char oldId = 0xff; for (int j = 0; j < nregs; ++j) { if (i == j) continue; rcLayerRegion& rj = regs[j]; if (!rj.base) continue; // Skip if the regions are not close to each other. if (!overlapRange(ri.ymin,ri.ymax+mergeHeight, rj.ymin,rj.ymax+mergeHeight)) continue; // Skip if the height range would become too large. const int ymin = rcMin(ri.ymin, rj.ymin); const int ymax = rcMax(ri.ymax, rj.ymax); if ((ymax - ymin) >= 255) continue; // Make sure that there is no overlap when merging 'ri' and 'rj'. bool overlap = false; // Iterate over all regions which have the same layerId as 'rj' for (int k = 0; k < nregs; ++k) { if (regs[k].layerId != rj.layerId) continue; // Check if region 'k' is overlapping region 'ri' // Index to 'regs' is the same as region id. if (contains(ri.layers,ri.nlayers, (unsigned char)k)) { overlap = true; break; } } // Cannot merge of regions overlap. if (overlap) continue; // Can merge i and j. oldId = rj.layerId; break; } // Could not find anything to merge with, stop. if (oldId == 0xff) break; // Merge for (int j = 0; j < nregs; ++j) { rcLayerRegion& rj = regs[j]; if (rj.layerId == oldId) { rj.base = 0; // Remap layerIds. rj.layerId = newId; // Add overlaid layers from 'rj' to 'ri'. for (int k = 0; k < rj.nlayers; ++k) { if (!addUnique(ri.layers, ri.nlayers, RC_MAX_LAYERS, rj.layers[k])) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: layer overflow (too many overlapping walkable platforms). Try increasing RC_MAX_LAYERS."); return false; } } // Update height bounds. ri.ymin = rcMin(ri.ymin, rj.ymin); ri.ymax = rcMax(ri.ymax, rj.ymax); } } } } // Compact layerIds unsigned char remap[256]; memset(remap, 0, 256); // Find number of unique layers. layerId = 0; for (int i = 0; i < nregs; ++i) remap[regs[i].layerId] = 1; for (int i = 0; i < 256; ++i) { if (remap[i]) remap[i] = layerId++; else remap[i] = 0xff; } // Remap ids. for (int i = 0; i < nregs; ++i) regs[i].layerId = remap[regs[i].layerId]; // No layers, return empty. if (layerId == 0) return true; // Create layers. rcAssert(lset.layers == 0); const int lw = w - borderSize*2; const int lh = h - borderSize*2; // Build contracted bbox for layers. float bmin[3], bmax[3]; rcVcopy(bmin, chf.bmin); rcVcopy(bmax, chf.bmax); bmin[0] += borderSize*chf.cs; bmin[2] += borderSize*chf.cs; bmax[0] -= borderSize*chf.cs; bmax[2] -= borderSize*chf.cs; lset.nlayers = (int)layerId; lset.layers = (rcHeightfieldLayer*)rcAlloc(sizeof(rcHeightfieldLayer)*lset.nlayers, RC_ALLOC_PERM); if (!lset.layers) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'layers' (%d).", lset.nlayers); return false; } memset(lset.layers, 0, sizeof(rcHeightfieldLayer)*lset.nlayers); // Store layers. for (int i = 0; i < lset.nlayers; ++i) { unsigned char curId = (unsigned char)i; rcHeightfieldLayer* layer = &lset.layers[i]; const int gridSize = sizeof(unsigned char)*lw*lh; layer->heights = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->heights) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'heights' (%d).", gridSize); return false; } memset(layer->heights, 0xff, gridSize); layer->areas = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->areas) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'areas' (%d).", gridSize); return false; } memset(layer->areas, 0, gridSize); layer->cons = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->cons) { ctx->log(RC_LOG_ERROR, "rcBuildHeightfieldLayers: Out of memory 'cons' (%d).", gridSize); return false; } memset(layer->cons, 0, gridSize); // Find layer height bounds. int hmin = 0, hmax = 0; for (int j = 0; j < nregs; ++j) { if (regs[j].base && regs[j].layerId == curId) { hmin = (int)regs[j].ymin; hmax = (int)regs[j].ymax; } } layer->width = lw; layer->height = lh; layer->cs = chf.cs; layer->ch = chf.ch; // Adjust the bbox to fit the heightfield. rcVcopy(layer->bmin, bmin); rcVcopy(layer->bmax, bmax); layer->bmin[1] = bmin[1] + hmin*chf.ch; layer->bmax[1] = bmin[1] + hmax*chf.ch; layer->hmin = hmin; layer->hmax = hmax; // Update usable data region. layer->minx = layer->width; layer->maxx = 0; layer->miny = layer->height; layer->maxy = 0; // Copy height and area from compact heightfield. for (int y = 0; y < lh; ++y) { for (int x = 0; x < lw; ++x) { const int cx = borderSize+x; const int cy = borderSize+y; const rcCompactCell& c = chf.cells[cx+cy*w]; for (int j = (int)c.index, nj = (int)(c.index+c.count); j < nj; ++j) { const rcCompactSpan& s = chf.spans[j]; // Skip unassigned regions. if (srcReg[j] == 0xff) continue; // Skip of does nto belong to current layer. unsigned char lid = regs[srcReg[j]].layerId; if (lid != curId) continue; // Update data bounds. layer->minx = rcMin(layer->minx, x); layer->maxx = rcMax(layer->maxx, x); layer->miny = rcMin(layer->miny, y); layer->maxy = rcMax(layer->maxy, y); // Store height and area type. const int idx = x+y*lw; layer->heights[idx] = (unsigned char)(s.y - hmin); layer->areas[idx] = chf.areas[j]; // Check connection. unsigned char portal = 0; unsigned char con = 0; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); unsigned char alid = srcReg[ai] != 0xff ? regs[srcReg[ai]].layerId : 0xff; // Portal mask if (chf.areas[ai] != RC_NULL_AREA && lid != alid) { portal |= (unsigned char)(1<<dir); // Update height so that it matches on both sides of the portal. const rcCompactSpan& as = chf.spans[ai]; if (as.y > hmin) layer->heights[idx] = rcMax(layer->heights[idx], (unsigned char)(as.y - hmin)); } // Valid connection mask if (chf.areas[ai] != RC_NULL_AREA && lid == alid) { const int nx = ax - borderSize; const int ny = ay - borderSize; if (nx >= 0 && ny >= 0 && nx < lw && ny < lh) con |= (unsigned char)(1<<dir); } } } layer->cons[idx] = (portal << 4) | con; } } } if (layer->minx > layer->maxx) layer->minx = layer->maxx = 0; if (layer->miny > layer->maxy) layer->miny = layer->maxy = 0; } return true; }
static bool CollectLayerRegionsMonotone(rcContext* ctx, rcCompactHeightfield& chf, const int borderSize, unsigned short* srcReg, rcLayerRegionMonotone*& regs, int& nregs) { const int w = chf.width; const int h = chf.height; const int nsweeps = chf.width; rcScopedDelete<rcLayerSweepSpan> sweeps = (rcLayerSweepSpan*)rcAlloc(sizeof(rcLayerSweepSpan)*nsweeps, RC_ALLOC_TEMP); if (!sweeps) { ctx->log(RC_LOG_ERROR, "CollectLayerRegionsMonotone: Out of memory 'sweeps' (%d).", nsweeps); return false; } // Partition walkable area into monotone regions. rcIntArray prev(256); unsigned short regId = 0; for (int y = borderSize; y < h-borderSize; ++y) { prev.resize(regId+1); memset(&prev[0],0,sizeof(int)*regId); unsigned short sweepId = 0; for (int x = borderSize; x < w-borderSize; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; if (chf.areas[i] == RC_NULL_AREA) continue; unsigned short sid = 0xffff; // -x if (rcGetCon(s, 0) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(0); const int ay = y + rcGetDirOffsetY(0); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0); if (chf.areas[ai] != RC_NULL_AREA && srcReg[ai] != 0xffff) sid = srcReg[ai]; } if (sid == 0xffff) { sid = sweepId++; sweeps[sid].nei = 0xffff; sweeps[sid].ns = 0; } // -y if (rcGetCon(s,3) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(3); const int ay = y + rcGetDirOffsetY(3); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3); const unsigned short nr = srcReg[ai]; if (nr != 0xffff) { // Set neighbour when first valid neighbour is encoutered. if (sweeps[sid].ns == 0) sweeps[sid].nei = nr; if (sweeps[sid].nei == nr) { // Update existing neighbour sweeps[sid].ns++; prev[nr]++; } else { // This is hit if there is nore than one neighbour. // Invalidate the neighbour. sweeps[sid].nei = 0xffff; } } } srcReg[i] = sid; } } // Create unique ID. for (int i = 0; i < sweepId; ++i) { // If the neighbour is set and there is only one continuous connection to it, // the sweep will be merged with the previous one, else new region is created. if (sweeps[i].nei != 0xffff && prev[sweeps[i].nei] == sweeps[i].ns) { sweeps[i].id = sweeps[i].nei; } else { sweeps[i].id = regId++; } } // Remap local sweep ids to region ids. for (int x = borderSize; x < w-borderSize; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { if (srcReg[i] != 0xffff) srcReg[i] = sweeps[srcReg[i]].id; } } } // Allocate and init layer regions. nregs = (int)regId; regs = (rcLayerRegionMonotone*)rcAlloc(sizeof(rcLayerRegionMonotone)*nregs, RC_ALLOC_TEMP); if (!regs) { ctx->log(RC_LOG_ERROR, "CollectLayerRegionsMonotone: Out of memory 'regs' (%d).", nregs); return false; } memset(regs, 0, sizeof(rcLayerRegionMonotone)*nregs); for (int i = 0; i < nregs; ++i) { regs[i].layerId = 0xffff; regs[i].ymin = 0xffff; regs[i].ymax = 0; } rcIntArray lregs(64); // Find region neighbours and overlapping regions. for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { const rcCompactCell& c = chf.cells[x+y*w]; lregs.resize(0); for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) { const rcCompactSpan& s = chf.spans[i]; const unsigned short ri = srcReg[i]; if (ri == 0xffff) continue; regs[ri].ymin = rcMin(regs[ri].ymin, s.y); regs[ri].ymax = rcMax(regs[ri].ymax, s.y); // Collect all region layers. lregs.push(ri); // Update neighbours for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = x + rcGetDirOffsetX(dir); const int ay = y + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); const unsigned short rai = srcReg[ai]; if (rai != 0xffff && rai != ri) addUnique(regs[ri].neis, rai); } } } // Update overlapping regions. const int nlregs = lregs.size(); for (int i = 0; i < nlregs-1; ++i) { for (int j = i+1; j < nlregs; ++j) { if (lregs[i] != lregs[j]) { rcLayerRegionMonotone& ri = regs[lregs[i]]; rcLayerRegionMonotone& rj = regs[lregs[j]]; addUnique(ri.layers, lregs[j]); addUnique(rj.layers, lregs[i]); } } } } } return true; }
static bool SplitAndStoreLayerRegions(rcContext* ctx, rcCompactHeightfield& chf, const int borderSize, const int walkableHeight, unsigned short* srcReg, rcLayerRegionMonotone* regs, const int nregs, rcHeightfieldLayerSet& lset) { // Create 2D layers from regions. unsigned short layerId = 0; rcIntArray stack(64); stack.resize(0); for (int i = 0; i < nregs; ++i) { rcLayerRegionMonotone& root = regs[i]; // Skip already visited. if (root.layerId != 0xffff) continue; // Start search. root.layerId = layerId; root.base = 1; stack.push(i); while (stack.size()) { // Pop front rcLayerRegionMonotone& reg = regs[stack[0]]; for (int j = 1; j < stack.size(); ++j) stack[j - 1] = stack[j]; stack.pop(); const int nneis = (int)reg.neis.size(); for (int j = 0; j < nneis; ++j) { const int nei = reg.neis[j]; rcLayerRegionMonotone& regn = regs[nei]; // Skip already visited. if (regn.layerId != 0xffff) continue; // Skip if the neighbour is overlapping root region. if (root.layers.contains(nei)) continue; // Skip if the height range would become too large. const int ymin = rcMin(root.ymin, regn.ymin); const int ymax = rcMin(root.ymax, regn.ymax); if ((ymax - ymin) >= 255) continue; // Deepen stack.push(nei); // Mark layer id regn.layerId = layerId; // Merge current layers to root. for (int k = 0; k < regn.layers.size(); ++k) addUnique(root.layers, regn.layers[k]); root.ymin = rcMin(root.ymin, regn.ymin); root.ymax = rcMax(root.ymax, regn.ymax); } } layerId++; } // Merge non-overlapping regions that are close in height. const unsigned short mergeHeight = (unsigned short)walkableHeight * 4; for (int i = 0; i < nregs; ++i) { rcLayerRegionMonotone& ri = regs[i]; if (!ri.base) continue; unsigned short newId = ri.layerId; for (;;) { unsigned short oldId = 0xffff; for (int j = 0; j < nregs; ++j) { if (i == j) continue; rcLayerRegionMonotone& rj = regs[j]; if (!rj.base) continue; // Skip if the regions are not close to each other. if (!overlapRange(ri.ymin,ri.ymax+mergeHeight, rj.ymin,rj.ymax+mergeHeight)) continue; // Skip if the height range would become too large. const int ymin = rcMin(ri.ymin, rj.ymin); const int ymax = rcMin(ri.ymax, rj.ymax); if ((ymax - ymin) >= 255) continue; // Make sure that there is no overlap when mergin 'ri' and 'rj'. bool overlap = false; // Iterate over all regions which have the same layerId as 'rj' for (int k = 0; k < nregs; ++k) { if (regs[k].layerId != rj.layerId) continue; // Check if region 'k' is overlapping region 'ri' // Index to 'regs' is the same as region id. if (ri.layers.contains(k)) { overlap = true; break; } } // Cannot merge of regions overlap. if (overlap) continue; // Can merge i and j. oldId = rj.layerId; break; } // Could not find anything to merge with, stop. if (oldId == 0xffff) break; // Merge for (int j = 0; j < nregs; ++j) { rcLayerRegionMonotone& rj = regs[j]; if (rj.layerId == oldId) { rj.base = 0; // Remap layerIds. rj.layerId = newId; // Add overlaid layers from 'rj' to 'ri'. for (int k = 0; k < rj.layers.size(); ++k) addUnique(ri.layers, rj.layers[k]); // Update heigh bounds. ri.ymin = rcMin(ri.ymin, rj.ymin); ri.ymax = rcMax(ri.ymax, rj.ymax); } } } } // Compact layerIds layerId = 0; if (nregs < 256) { // Compact ids. unsigned short remap[256]; memset(remap, 0, sizeof(unsigned short)*256); // Find number of unique regions. for (int i = 0; i < nregs; ++i) remap[regs[i].layerId] = 1; for (int i = 0; i < 256; ++i) if (remap[i]) remap[i] = layerId++; // Remap ids. for (int i = 0; i < nregs; ++i) regs[i].layerId = remap[regs[i].layerId]; } else { for (int i = 0; i < nregs; ++i) regs[i].remap = true; for (int i = 0; i < nregs; ++i) { if (!regs[i].remap) continue; unsigned short oldId = regs[i].layerId; unsigned short newId = ++layerId; for (int j = i; j < nregs; ++j) { if (regs[j].layerId == oldId) { regs[j].layerId = newId; regs[j].remap = false; } } } } // No layers, return empty. if (layerId == 0) { ctx->stopTimer(RC_TIMER_BUILD_LAYERS); return true; } // Create layers. rcAssert(lset.layers == 0); const int w = chf.width; const int h = chf.height; const int lw = w - borderSize*2; const int lh = h - borderSize*2; // Build contracted bbox for layers. float bmin[3], bmax[3]; rcVcopy(bmin, chf.bmin); rcVcopy(bmax, chf.bmax); bmin[0] += borderSize*chf.cs; bmin[2] += borderSize*chf.cs; bmax[0] -= borderSize*chf.cs; bmax[2] -= borderSize*chf.cs; lset.nlayers = (int)layerId; lset.layers = (rcHeightfieldLayer*)rcAlloc(sizeof(rcHeightfieldLayer)*lset.nlayers, RC_ALLOC_PERM); if (!lset.layers) { ctx->log(RC_LOG_ERROR, "SplitAndStoreLayerRegions: Out of memory 'layers' (%d).", lset.nlayers); return false; } memset(lset.layers, 0, sizeof(rcHeightfieldLayer)*lset.nlayers); // Store layers. for (int i = 0; i < lset.nlayers; ++i) { unsigned short curId = (unsigned short)i; // Allocate memory for the current layer. rcHeightfieldLayer* layer = &lset.layers[i]; memset(layer, 0, sizeof(rcHeightfieldLayer)); const int gridSize = sizeof(unsigned char)*lw*lh; layer->heights = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->heights) { ctx->log(RC_LOG_ERROR, "SplitAndStoreLayerRegions: Out of memory 'heights' (%d).", gridSize); return false; } memset(layer->heights, 0xff, gridSize); layer->areas = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->areas) { ctx->log(RC_LOG_ERROR, "SplitAndStoreLayerRegions: Out of memory 'areas' (%d).", gridSize); return false; } memset(layer->areas, 0, gridSize); layer->cons = (unsigned char*)rcAlloc(gridSize, RC_ALLOC_PERM); if (!layer->cons) { ctx->log(RC_LOG_ERROR, "SplitAndStoreLayerRegions: Out of memory 'cons' (%d).", gridSize); return false; } memset(layer->cons, 0, gridSize); // Find layer height bounds. int hmin = 0, hmax = 0; for (int j = 0; j < nregs; ++j) { if (regs[j].base && regs[j].layerId == curId) { hmin = (int)regs[j].ymin; hmax = (int)regs[j].ymax; } } layer->width = lw; layer->height = lh; layer->cs = chf.cs; layer->ch = chf.ch; // Adjust the bbox to fit the heighfield. rcVcopy(layer->bmin, bmin); rcVcopy(layer->bmax, bmax); layer->bmin[1] = bmin[1] + hmin*chf.ch; layer->bmax[1] = bmin[1] + hmax*chf.ch; layer->hmin = hmin; layer->hmax = hmax; // Update usable data region. layer->minx = layer->width; layer->maxx = 0; layer->miny = layer->height; layer->maxy = 0; // Copy height and area from compact heighfield. for (int y = 0; y < lh; ++y) { for (int x = 0; x < lw; ++x) { const int cx = borderSize+x; const int cy = borderSize+y; const rcCompactCell& c = chf.cells[cx+cy*w]; for (int j = (int)c.index, nj = (int)(c.index+c.count); j < nj; ++j) { const rcCompactSpan& s = chf.spans[j]; // Skip unassigned regions. if (srcReg[j] == 0xffff) continue; // Skip of does nto belong to current layer. unsigned short lid = regs[srcReg[j]].layerId; if (lid != curId) continue; // Update data bounds. layer->minx = rcMin(layer->minx, x); layer->maxx = rcMax(layer->maxx, x); layer->miny = rcMin(layer->miny, y); layer->maxy = rcMax(layer->maxy, y); // Store height and area type. const int idx = x+y*lw; layer->heights[idx] = (unsigned char)(s.y - hmin); layer->areas[idx] = chf.areas[j]; // Check connection. unsigned char portal = 0; unsigned char con = 0; for (int dir = 0; dir < 4; ++dir) { if (rcGetCon(s, dir) != RC_NOT_CONNECTED) { const int ax = cx + rcGetDirOffsetX(dir); const int ay = cy + rcGetDirOffsetY(dir); const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); unsigned short alid = srcReg[ai] != 0xffff ? regs[srcReg[ai]].layerId : 0xffff; // Portal mask if (chf.areas[ai] != RC_NULL_AREA && lid != alid) { portal |= (unsigned char)(1<<dir); // Update height so that it matches on both sides of the portal. const rcCompactSpan& as = chf.spans[ai]; if (as.y > hmin) layer->heights[idx] = rcMax(layer->heights[idx], (unsigned char)(as.y - hmin)); } // Valid connection mask if (chf.areas[ai] != RC_NULL_AREA && lid == alid) { const int nx = ax - borderSize; const int ny = ay - borderSize; if (nx >= 0 && ny >= 0 && nx < lw && ny < lh) { con |= (unsigned char)(1 << dir); // [UE4: make sure that connections are bidirectional, otherwise contour tracing will stuck in infinite loop] const int nidx = nx + (ny * lw); layer->cons[nidx] |= (unsigned char)(1 << ((dir + 2) % 4)); } } } } layer->cons[idx] |= (portal << 4) | con; } } } if (layer->minx > layer->maxx) layer->minx = layer->maxx = 0; if (layer->miny > layer->maxy) layer->miny = layer->maxy = 0; } return true; }
// All roots returned in interval [0,1]. Assumed geometry followed a linear // trajectory between x and xnew. void getCoplanarityTimes( const Vec3& x0, const Vec3& x1, const Vec3& x2, const Vec3& x3, const Vec3& xnew0, const Vec3& xnew1, const Vec3& xnew2, const Vec3& xnew3, double* times, double* errors, unsigned &num_times ) { const double tol = 1e-8; num_times = 0; // cubic coefficients, A*t^3+B*t^2+C*t+D (for t in [0,1]) const Vec3 x03 = x0 - x3; const Vec3 x13 = x1 - x3; const Vec3 x23 = x2 - x3; const Vec3 v03 = ( xnew0 - xnew3 ) - x03; const Vec3 v13 = ( xnew1 - xnew3 ) - x13; const Vec3 v23 = ( xnew2 - xnew3 ) - x23; double A = triple( v03, v13, v23 ); double B = triple( x03, v13, v23 ) + triple( v03, x13, v23 ) + triple( v03, v13, x23 ); double C = triple( x03, x13, v23 ) + triple( x03, v13, x23 ) + triple( v03, x13, x23 ); double D = triple( x03, x13, x23 ); const double convergence_tol = tol * ( std::fabs( A ) + std::fabs( B ) + std::fabs( C ) + std::fabs( D ) ); // find intervals to check, or just solve it if it reduces to a quadratic ============================= double interval_times[4]; unsigned interval_times_size = 0; double discriminant = B * B - 3 * A * C; // of derivative of cubic, 3*A*t^2+2*B*t+C, divided by 4 for convenience if ( discriminant <= 0 ) { // monotone cubic: only one root in [0,1] possible // so we just interval_times[0] = 0; interval_times[1] = 1; interval_times_size = 2; } else { // positive discriminant, B!=0 if ( A == 0 ) { // the cubic is just a quadratic, B*t^2+C*t+D ======================================== discriminant = C * C - 4 * B * D; // of the quadratic if ( discriminant <= 0 ) { double t = -C / ( 2 * B ); if ( t >= -tol && t <= 1 + tol ) { t = clamp( t, 0., 1. ); double val = std::fabs( signed_volume( ( 1 - t ) * x0 + t * xnew0, ( 1 - t ) * x1 + t * xnew1, ( 1 - t ) * x2 + t * xnew2, ( 1 - t ) * x3 + t * xnew3 ) ); if ( val < convergence_tol ) { times[num_times++] = t; } } } else { // two separate real roots double t0, t1; if ( C > 0 ) t0 = ( -C - std::sqrt( discriminant ) ) / ( 2 * B ); else t0 = ( -C + std::sqrt( discriminant ) ) / ( 2 * B ); t1 = D / ( B * t0 ); if ( t1 < t0 ) std::swap( t0, t1 ); if ( t0 >= -tol && t0 <= 1 + tol ) { times[num_times++] = clamp( t0, 0., 1. ); } if ( t1 >= -tol && t1 <= 1 + tol ) { addUnique( times, num_times, clamp( t1, 0., 1. ) ); } } if ( errors ) { for ( unsigned i = 0; i < num_times; ++i ) { double ti = times[i]; double val = std::fabs( signed_volume( ( 1 - ti ) * x0 + ti * xnew0, ( 1 - ti ) * x1 + ti * xnew1, ( 1 - ti ) * x2 + ti * xnew2, ( 1 - ti ) * x3 + ti * xnew3 ) ); errors[i] = val; } } return; } else { // cubic is not monotone: divide up [0,1] accordingly ===================================== double t0, t1; if ( B > 0 ) t0 = ( -B - std::sqrt( discriminant ) ) / ( 3 * A ); else t0 = ( -B + std::sqrt( discriminant ) ) / ( 3 * A ); t1 = C / ( 3 * A * t0 ); if ( t1 < t0 ) std::swap( t0, t1 ); interval_times[interval_times_size++] = 0; if ( t0 > 0 && t0 < 1 ) interval_times[interval_times_size++] = t0; if ( t1 > 0 && t1 < 1 ) interval_times[interval_times_size++] = t1; interval_times[interval_times_size++] = 1; } } // look for roots in indicated intervals ============================================================== // evaluate coplanarity more accurately at each endpoint of the intervals double interval_values[interval_times_size]; for ( unsigned int i = 0; i < interval_times_size; ++i ) { double t = interval_times[i]; interval_values[i] = signed_volume( ( 1 - t ) * x0 + t * xnew0, ( 1 - t ) * x1 + t * xnew1, ( 1 - t ) * x2 + t * xnew2, ( 1 - t ) * x3 + t * xnew3 ); } // first look for interval endpoints that are close enough to zero, without a sign change for ( unsigned int i = 0; i < interval_times_size; ++i ) { if ( interval_values[i] == 0 ) { times[num_times++] = interval_times[i]; } else if ( std::fabs( interval_values[i] ) < convergence_tol ) { if ( ( i == 0 || ( interval_values[i - 1] >= 0 && interval_values[i] >= 0 ) || ( interval_values[i - 1] <= 0 && interval_values[i] <= 0 ) ) && ( i == interval_times_size - 1 || ( interval_values[i + 1] >= 0 && interval_values[i] >= 0 ) || ( interval_values[i + 1] <= 0 && interval_values[i] <= 0 ) ) ) { times[num_times++] = interval_times[i]; } } } // and then search in intervals with a sign change for ( unsigned int i = 1; i < interval_times_size; ++i ) { double tlo = interval_times[i - 1], thi = interval_times[i], tmid; double vlo = interval_values[i - 1], vhi = interval_values[i], vmid; if ( ( vlo < 0 && vhi > 0 ) || ( vlo > 0 && vhi < 0 ) ) { // start off with secant approximation (in case the cubic is actually linear) double alpha = vhi / ( vhi - vlo ); tmid = alpha * tlo + ( 1 - alpha ) * thi; for ( int iteration = 0; iteration < 50; ++iteration ) { vmid = signed_volume( ( 1 - tmid ) * x0 + tmid * xnew0, ( 1 - tmid ) * x1 + tmid * xnew1, ( 1 - tmid ) * x2 + tmid * xnew2, ( 1 - tmid ) * x3 + tmid * xnew3 ); if ( std::fabs( vmid ) < 1e-2 * convergence_tol ) break; if ( ( vlo < 0 && vmid > 0 ) || ( vlo > 0 && vmid < 0 ) ) { // if sign change between lo and mid thi = tmid; vhi = vmid; } else { // otherwise sign change between hi and mid tlo = tmid; vlo = vmid; } if ( iteration % 2 ) alpha = 0.5; // sometimes go with bisection to guarantee we make progress else alpha = vhi / ( vhi - vlo ); // other times go with secant to hopefully get there fast tmid = alpha * tlo + ( 1 - alpha ) * thi; } times[num_times++] = tmid; } } sort( times, num_times ); if ( errors ) { for ( unsigned i = 0; i < num_times; ++i ) { double ti = times[i]; double val = std::fabs( signed_volume( ( 1 - ti ) * x0 + ti * xnew0, ( 1 - ti ) * x1 + ti * xnew1, ( 1 - ti ) * x2 + ti * xnew2, ( 1 - ti ) * x3 + ti * xnew3 ) ); errors[i] = val; } } }
void setUnique(complex arr[], int i, int j, int stats[]){ stats[UNIQUES_GENERATED] += addUnique(arr, i, j, stats); }
KService::List KServiceType::offers(const QString &_servicetype) { QDict< KService > dict(53); KService::List lst; // Services associated directly with this servicetype (the normal case) KServiceType::Ptr serv = KServiceTypeFactory::self()->findServiceTypeByName(_servicetype); if(serv) addUnique(lst, dict, KServiceFactory::self()->offers(serv->offset()), false); else kdWarning(7009) << "KServiceType::offers : servicetype " << _servicetype << " not found" << endl; // Find services associated with any mimetype parents. e.g. text/x-java -> text/plain KMimeType::Ptr mime = dynamic_cast< KMimeType * >(static_cast< KServiceType * >(serv)); bool isAMimeType = (mime != 0); if(mime) { while(true) { QString parent = mime->parentMimeType(); if(parent.isEmpty()) break; mime = dynamic_cast< KMimeType * >(KServiceTypeFactory::self()->findServiceTypeByName(parent)); if(!mime) break; addUnique(lst, dict, KServiceFactory::self()->offers(mime->offset()), false); } } serv = mime = 0; // QValueListIterator<KService::Ptr> it = lst.begin(); // for( ; it != lst.end(); ++it ) // kdDebug() << (*it).data() << " " << (*it)->name() << endl; // Support for all/* is deactivated by KServiceTypeProfile::configurationMode() // (and makes no sense when querying for an "all" servicetype itself // nor for non-mimetypes service types) if(!KServiceTypeProfile::configurationMode() && isAMimeType && _servicetype.left(4) != "all/") { // Support for services associated with "all" KServiceType *servAll = KServiceTypeFactory::self()->findServiceTypeByName("all/all"); if(servAll) { addUnique(lst, dict, KServiceFactory::self()->offers(servAll->offset()), true); } else kdWarning(7009) << "KServiceType::offers : servicetype all/all not found" << endl; delete servAll; // Support for services associated with "allfiles" if(_servicetype != "inode/directory" && _servicetype != "inode/directory-locked") { KServiceType *servAllFiles = KServiceTypeFactory::self()->findServiceTypeByName("all/allfiles"); if(servAllFiles) { addUnique(lst, dict, KServiceFactory::self()->offers(servAllFiles->offset()), true); } else kdWarning(7009) << "KServiceType::offers : servicetype all/allfiles not found" << endl; delete servAllFiles; } } return lst; }