void WMemoryMap::updateRowGroupDataWidgets(RowGroup &rg, MemoryMap::NodeIterator mmNode) { ASSERT_require(mmNode != memoryMap_.nodes().end()); const AddressInterval &interval = mmNode->key(); ASSERT_forbid(interval.isEmpty()); ASSERT_require(rg.segmentVa == interval.least()); rg.wSplit->setHidden(!isEditable_ || rg.editingColumn==SplitColumn || !canSplit(rg, mmNode)); rg.wMerge->setHidden(!isEditable_ || rg.editingColumn==MergeColumn || !canMerge(rg, mmNode)); rg.wLeastVa->setText(StringUtility::addrToString(interval.least())); rg.wGreatestVa->setText(StringUtility::addrToString(interval.greatest())); if (interval.isWhole()) { rg.wSize->setText("whole"); // since size would overflow back to zero } else { rg.wSize->setText(StringUtility::addrToString(interval.size())); } const MemoryMap::Segment &segment = mmNode->value(); rg.wReadable->setChecked(0 != (segment.accessibility() & MemoryMap::READABLE)); rg.wWritable->setChecked(0 != (segment.accessibility() & MemoryMap::WRITABLE)); rg.wExecutable->setChecked(0 != (segment.accessibility() & MemoryMap::EXECUTABLE)); rg.wName->setText(StringUtility::cEscape(segment.name())); }
QSPatch QSPatch::merged(const QSPatch &other) const { if (!canMerge(other)) { return QSPatch(); } QSPatch res = *this; res.merge(other); return res; }
void LargeMap::add(const LargeRange& range) { LargeRange merged = range; for (size_t i = 0; i < m_free.size(); ++i) { if (!canMerge(merged, m_free[i])) continue; merged = merge(merged, m_free.pop(i--)); } m_free.push(merged); }
void XLargeMap::addFree(const XLargeRange& range) { XLargeRange merged = range; for (size_t i = 0; i < m_free.size(); ++i) { auto& other = m_free[i]; if (!canMerge(merged, other)) continue; merged = merge(merged, m_free.pop(i--)); } m_free.push(merged); }
bool Interval::merge(const Interval& other) { // If cant merge, return false if(!canMerge(other)) return false; // Else, merge - e.g "2" into "3-5" to create "2-5": if(other.start() < m_start) m_start = other.start(); if(other.end() > m_end) m_end = other.end(); return true; }
void WMemoryMap::updateRowGroupEditWidgets(RowGroup &rg, MemoryMap::NodeIterator mmNode) { if (isEditable_) { rg.wDelete->setHidden(rg.editingColumn==DeleteColumn); rg.wMove->setHidden(rg.editingColumn==MoveColumn); rg.wSplit->setHidden(rg.editingColumn==SplitColumn || !canSplit(rg, mmNode)); rg.wMerge->setHidden(rg.editingColumn==MergeColumn || !canMerge(rg, mmNode)); rg.wReadable->setEnabled(true); rg.wWritable->setEnabled(true); rg.wExecutable->setEnabled(true); switch (rg.editingColumn) { case ZeroColumn: rg.wEditStack->hide(); break; case DeleteColumn: rg.wEditStack->setCurrentWidget(rg.wDeleteConfirm); rg.wEditStack->show(); break; case MoveColumn: rg.wEditStack->setCurrentWidget(rg.wMoveSegment); rg.wEditStack->show(); break; case MergeColumn: rg.wEditStack->setCurrentWidget(rg.wMergeConfirm); rg.wEditStack->show(); break; case SplitColumn: case LeastVaColumn: case GreatestVaColumn: case SizeColumn: rg.wEditStack->setCurrentWidget(rg.wHexValueEdit); rg.wEditStack->show(); break; default: ASSERT_not_reachable("don't know how to edit column " + StringUtility::numberToString(rg.editingColumn)); } } else { rg.wDelete->hide(); rg.wMove->hide(); rg.wSplit->hide(); rg.wMerge->hide(); rg.wReadable->setEnabled(false); rg.wWritable->setEnabled(false); rg.wExecutable->setEnabled(false); } }
static void processBlockQuadratic(ConcurrentDSU &uf, const std::vector<size_t> &block, const KMerData &data, unsigned tau) { size_t blockSize = block.size(); for (size_t i = 0; i < blockSize; ++i) { unsigned x = (unsigned)block[i]; hammer::KMer kmerx = data.kmer(x); for (size_t j = i + 1; j < blockSize; j++) { unsigned y = (unsigned)block[j]; hammer::KMer kmery = data.kmer(y); if (uf.find_set(x) != uf.find_set(y) && canMerge(uf, x, y) && hamdistKMer(kmerx, kmery, tau) <= tau) { uf.unite(x, y); } } } }
QSPatch &QSPatch::merge(const QSPatch &other) { if (!canMerge(other)) return (*this); if (d->type == QSPatch::Remove) { d->from = qMin(d->from, other.from()); d->to = qMax(d->to, other.to()); d->count = d->to - d->from + 1; } else if (d->type == QSPatch::Move) { d->count = d->count + other.count(); } else if (d->type == QSPatch::Insert) { QVariantList list = d->data.toList(); list.append(other.data()); d->data = list; d->to = d->from + list.count() - 1; d->count = list.count(); } return *this; }
// shift() // Description: shift tiles in given direction // Arguments: // dir - shift direction // Return Val: TRUE if shift is successful, FALSE if not bool Grid::shift(dir_e dir){ m_nSlot = 0; bool isShifted; int& (Grid::* getDirEntry)(int, int) = NULL; if(dir == LEFT) getDirEntry = &Grid::getEntry; else if(dir == DOWN) getDirEntry = &Grid::getFlipTransEntry; else if(dir == RIGHT) getDirEntry = &Grid::getFlipEntry; else if(dir == UP) getDirEntry = &Grid::getTransEntry; assert(getDirEntry != NULL); for(int i = 0;i < GRID_LENGTH;i++){ isShifted = FALSE; for(int j = 1;j < GRID_LENGTH;j++){ if((this->*getDirEntry)(i, j) == EMPTY) continue; if( (this->*getDirEntry)(i, j-1) == EMPTY ){ (this->*getDirEntry)(i, j-1) = (this->*getDirEntry)(i, j); (this->*getDirEntry)(i, j) = EMPTY; isShifted = TRUE; } else if(canMerge((this->*getDirEntry)(i, j),(this->*getDirEntry)(i, j-1))){ (this->*getDirEntry)(i, j-1) += (this->*getDirEntry)(i, j); if( (this->*getDirEntry)(i, j-1) > m_maxTile ) m_maxTile = (this->*getDirEntry)(i, j-1); (this->*getDirEntry)(i, j) = EMPTY; isShifted = TRUE; m_nEmptyBlk++; } } if(isShifted == TRUE){ m_slot[m_nSlot] = &(this->*getDirEntry)(i, GRID_LENGTH-1); m_nSlot++; } } return (m_nSlot > 0); }
bool combineHulls(void) { bool combine = false; // each new convex hull is given a unique guid. // A hash map is used to make sure that no hulls are tested twice. ChUllVector output; HaU32 count = (HaU32)mChulls.size(); ChUll *mergeA = NULL; ChUll *mergeB = NULL; // Early out to save walking all the hulls. Hulls are combined based on // a target number or on a number of generated hulls. bool mergeTargetMet = (HaU32)mChulls.size() <= mMergeNumHulls; if (mergeTargetMet && (mSmallClusterThreshold == 0.0f)) return false; HaF32 bestVolume = mTotalVolume; { for (HaU32 i=0; i<count; i++) { ChUll *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { ChUll *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 combinedVolume; HaF32 *v = mHasBeenTested->find(hashIndex); if ( v == NULL ) { combinedVolume = canMerge(cr,match); (*mHasBeenTested)[hashIndex] = combinedVolume; } else { combinedVolume = *v; } if ( combinedVolume != 0 ) { if ( combinedVolume < bestVolume ) { bestVolume = combinedVolume; mergeA = cr; mergeB = match; } } } } } // If we found a merge pair, and we are below the merge threshold or we haven't reduced to the target // do the merge. bool thresholdBelow = ((bestVolume / mTotalVolume) * 100.0f) < mSmallClusterThreshold; if ( mergeA && (thresholdBelow || !mergeTargetMet)) { ChUll *merge = doMerge(mergeA,mergeB); HaF32 volumeA = mergeA->mVolume; HaF32 volumeB = mergeB->mVolume; if ( merge ) { combine = true; output.push_back(merge); for (ChUllVector::iterator j=mChulls.begin(); j!=mChulls.end(); ++j) { ChUll *h = (*j); if ( h !=mergeA && h != mergeB ) { output.push_back(h); } } delete mergeA; delete mergeB; // Remove the old volumes and add the new one. mTotalVolume -= (volumeA + volumeB); mTotalVolume += merge->mVolume; } mChulls = output; } return combine; }
bool LocalBiconnectedMerger::canMerge( Graph &G, node parent, node mergePartner ) { return canMerge(G, parent, mergePartner, 1) && canMerge(G, parent, mergePartner, 0); }
bool ConvexBuilder::combineHulls(void) { bool combine = false; sortChulls(mChulls); // sort the convex hulls, largest volume to least... CHullVector output; // the output hulls... int i; for (i=0;i<mChulls.size() && !combine; ++i) { CHull *cr = mChulls[i]; int j; for (j=0;j<mChulls.size();j++) { CHull *match = mChulls[j]; if ( cr != match ) // don't try to merge a hull with itself, that be stoopid { CHull *merge = canMerge(cr,match); // if we can merge these two.... if ( merge ) { output.push_back(merge); ++i; while ( i != mChulls.size() ) { CHull *cr = mChulls[i]; if ( cr != match ) { output.push_back(cr); } i++; } delete cr; delete match; combine = true; break; } } } if ( combine ) { break; } else { output.push_back(cr); } } if ( combine ) { mChulls.clear(); mChulls = output; output.clear(); } return combine; }
static void MergeAndCompressRegions(dtTileCacheAlloc* alloc, dtTileCacheLayer& layer, dtLayerMonotoneRegion* regs, int nregs, const int minRegionArea, const int mergeRegionArea) { for (int i = 0; i < nregs; ++i) regs[i].regId = (unsigned short)(i + 1); // Remove too small regions. if (minRegionArea > 0) { dtIntArray stack(32); dtIntArray trace(32); for (int i = 0; i < nregs; ++i) { dtLayerMonotoneRegion& reg = regs[i]; if (reg.visited || reg.area == 0) continue; // Count the total size of all the connected regions. // Also keep track of the regions connects to a tile border. bool connectsToBorder = false; int cellCount = 0; stack.resize(0); trace.resize(0); reg.visited = true; stack.push(i); while (stack.size()) { // Pop int ri = stack.pop(); dtLayerMonotoneRegion& creg = regs[ri]; connectsToBorder |= creg.border; cellCount += creg.area; trace.push(ri); for (int j = 0; j < creg.neis.size(); ++j) { dtLayerMonotoneRegion& neireg = regs[creg.neis[j]]; if (neireg.visited) continue; if (neireg.regId == 0) continue; // Visit stack.push(neireg.regId - 1); neireg.visited = true; } } // If the accumulated regions size is too small, remove it. // Do not remove areas which connect to tile borders // as their size cannot be estimated correctly and removing them // can potentially remove necessary areas. if (cellCount < minRegionArea && !connectsToBorder) { // Kill all visited regions. for (int j = 0; j < trace.size(); ++j) { regs[trace[j]].area = 0; regs[trace[j]].regId = 0; } } } } for (int i = 0; i < nregs; ++i) { dtLayerMonotoneRegion& reg = regs[i]; if (reg.regId == 0) continue; // don't use mergeRegionArea, it doesn't work well with monotone partitioning // (results in even more long thin polys) int merge = -1; int mergea = 0; for (int j = 0; j < reg.neis.size(); ++j) { const unsigned short nei = (unsigned short)reg.neis[j]; dtLayerMonotoneRegion& regn = regs[nei]; if (reg.regId == regn.regId) continue; if (reg.areaId != regn.areaId || reg.chunkId != regn.chunkId) continue; if (regn.area > mergea) { if (canMerge(reg.regId, regn.regId, regs, nregs)) { mergea = regn.area; merge = (int)nei; } } } if (merge != -1) { const unsigned short oldId = reg.regId; const unsigned short newId = regs[merge].regId; for (int j = 0; j < nregs; ++j) if (regs[j].regId == oldId) regs[j].regId = newId; } } unsigned short regId = 0; if (nregs < 256) { // Compact ids. unsigned short remap[256]; memset(remap, 0, sizeof(unsigned short)*256); // Find number of unique regions. for (int i = 0; i < nregs; ++i) remap[regs[i].regId] = 1; // skip region id 0, it's used for skipping minRegionArea remap[0] = 0; for (int i = 1; i < 256; ++i) if (remap[i]) remap[i] = ++regId; // Remap ids. for (int i = 0; i < nregs; ++i) regs[i].regId = remap[regs[i].regId]; } else { for (int i = 0; i < nregs; ++i) regs[i].remap = true; for (int i = 0; i < nregs; ++i) { // skip region id 0, it's used for skipping minRegionArea if (!regs[i].remap || regs[i].regId == 0) continue; unsigned short oldId = regs[i].regId; unsigned short newId = ++regId; for (int j = i; j < nregs; ++j) { if (regs[j].regId == oldId) { regs[j].regId = newId; regs[j].remap = false; } } } } layer.regCount = regId; const int maxi = (int)layer.header->width * (int)layer.header->height; for (int i = 0; i < maxi; ++i) { if (layer.regs[i] != 0xffff) layer.regs[i] = regs[layer.regs[i]].regId; } alloc->free(regs); }
memPtrSize CHeapManager::mergeIteration(memPtrSize mergeBlock) { if (smallMemSet.size() > 0) { auto smallLowerBound = smallMemSet.lower_bound(mergeBlock); if (smallLowerBound != smallMemSet.end() && *smallLowerBound == mergeBlock) { smallLowerBound--; } auto smallUpperBound = smallMemSet.upper_bound(mergeBlock); if (smallUpperBound == smallMemSet.end()) { smallUpperBound--; } if (smallLowerBound != smallMemSet.end() && canMerge(*smallLowerBound, mergeBlock) == LMERGE) { memPtrSize newBlock = mergeBlocks(*smallLowerBound, mergeBlock); smallMemSet.erase(smallLowerBound); delFromMemSet(mergeBlock); addToMemSet(newBlock); return newBlock; } if (canMerge(mergeBlock, *smallUpperBound) == LMERGE) { memPtrSize newBlock = mergeBlocks(mergeBlock, *smallUpperBound); smallMemSet.erase(smallUpperBound); delFromMemSet(mergeBlock); addToMemSet(newBlock); return newBlock; } } if (mediumMemSet.size() > 0) { auto mediumLowerBound = mediumMemSet.lower_bound(mergeBlock); if (mediumLowerBound != mediumMemSet.end() && *mediumLowerBound == mergeBlock) { mediumLowerBound--; } auto mediumUpperBound = mediumMemSet.upper_bound(mergeBlock); if (mediumUpperBound == mediumMemSet.end()) { mediumUpperBound--; } if (mediumLowerBound != mediumMemSet.end() && canMerge(*mediumLowerBound, mergeBlock) == LMERGE) { memPtrSize newBlock = mergeBlocks(*mediumLowerBound, mergeBlock); mediumMemSet.erase(mediumLowerBound); delFromMemSet(mergeBlock); addToMemSet(newBlock); return newBlock; } if (canMerge(mergeBlock, *mediumUpperBound) == LMERGE) { memPtrSize newBlock = mergeBlocks(mergeBlock, *mediumUpperBound); mediumMemSet.erase(mediumUpperBound); delFromMemSet(mergeBlock); addToMemSet(newBlock); return newBlock; } } if (largeMemSet.size() > 0) { auto largeLowerBound = largeMemSet.lower_bound(mergeBlock); if (largeLowerBound != largeMemSet.end() && *largeLowerBound == mergeBlock) { largeLowerBound--; } auto largeUpperBound = largeMemSet.upper_bound(mergeBlock); if (largeUpperBound == largeMemSet.end()) { largeUpperBound--; } if (largeLowerBound != largeMemSet.end() && canMerge(*largeLowerBound, mergeBlock) == LMERGE) { memPtrSize newBlock = mergeBlocks(*largeLowerBound, mergeBlock); largeMemSet.erase(largeLowerBound); delFromMemSet(mergeBlock); addToMemSet(newBlock); return newBlock; } if (canMerge(mergeBlock, *largeUpperBound) == LMERGE) { memPtrSize newBlock = mergeBlocks(mergeBlock, *largeUpperBound); largeMemSet.erase(largeUpperBound); delFromMemSet(mergeBlock); addToMemSet(newBlock); return newBlock; } } return (std::make_pair((void *)NULL, -1)); }
static void MergeAndCompressRegions(dtTileCacheAlloc* alloc, dtTileCacheLayer& layer, dtLayerMonotoneRegion* regs, int nregs) { for (int i = 0; i < nregs; ++i) regs[i].regId = (unsigned short)(i + 1); for (int i = 0; i < nregs; ++i) { dtLayerMonotoneRegion& reg = regs[i]; int merge = -1; int mergea = 0; for (int j = 0; j < reg.neis.size(); ++j) { const unsigned short nei = (unsigned short)reg.neis[j]; dtLayerMonotoneRegion& regn = regs[nei]; if (reg.regId == regn.regId) continue; if (reg.areaId != regn.areaId || reg.chunkId != regn.chunkId) continue; if (regn.area > mergea) { if (canMerge(reg.regId, regn.regId, regs, nregs)) { mergea = regn.area; merge = (int)nei; } } } if (merge != -1) { const unsigned short oldId = reg.regId; const unsigned short newId = regs[merge].regId; for (int j = 0; j < nregs; ++j) if (regs[j].regId == oldId) regs[j].regId = newId; } } unsigned short regId = 0; if (nregs < 256) { // Compact ids. unsigned short remap[256]; memset(remap, 0, sizeof(unsigned short)*256); // Find number of unique regions. for (int i = 0; i < nregs; ++i) remap[regs[i].regId] = 1; // skip region id 0, it's used for skipping minRegionArea for (int i = 1; i < 256; ++i) if (remap[i]) remap[i] = ++regId; // Remap ids. for (int i = 0; i < nregs; ++i) regs[i].regId = remap[regs[i].regId]; } else { for (int i = 0; i < nregs; ++i) regs[i].remap = true; for (int i = 0; i < nregs; ++i) { // skip region id 0, it's used for skipping minRegionArea if (!regs[i].remap || regs[i].regId == 0) continue; unsigned short oldId = regs[i].regId; unsigned short newId = ++regId; for (int j = i; j < nregs; ++j) { if (regs[j].regId == oldId) { regs[j].regId = newId; regs[j].remap = false; } } } } layer.regCount = regId; const int maxi = (int)layer.header->width * (int)layer.header->height; for (int i = 0; i < maxi; ++i) { if (layer.regs[i] != 0xffff) layer.regs[i] = regs[layer.regs[i]].regId; } alloc->free(regs); }
virtual void job_process(void *userData,hacd::HaI32 userId) // RUNS IN ANOTHER THREAD!! MUST BE THREAD SAFE! { mCombinedVolume = canMerge(mHullA,mHullB); }