/*! * Method to handle expose events with a background repainter. Events that * Pass through here are rapidly delt with by either expanding an * already existing expose rectangle to cover the expose rectangle of * the current event or if there is no pending expose rectangle, because * the background repainter has cleared it, set a new expose rectangle. \param UT_Rect *rClip the rectangle of the expose event. */ void GR_Graphics::doRepaint( UT_Rect * rClip) { // // Look if we have a pending expose left over. // xxx_UT_DEBUGMSG(("SEVIOR: Starting doRepaint \n")); while(isSpawnedRedraw()) { UT_usleep(100); } // // Stop the repainter // setDontRedraw(true); // // Get a lock on the expose rectangle // while(isExposedAreaAccessed()) { UT_usleep(10); // 10 microseconds } setExposedAreaAccessed(true); if(isExposePending() || doMerge()) { // // If so merge in the current expose area // xxx_UT_DEBUGMSG(("Doing a union in expose handler\n")); unionPendingRect( rClip); setRecentRect(rClip); setDoMerge(false); } else { // // Otherwise Load the current expose area into the redraw area. // xxx_UT_DEBUGMSG(("Setting Exposed Area in expose handler \n")); setPendingRect(rClip->left,rClip->top,rClip->width,rClip->height); setRecentRect(rClip); } // // Release expose rectangle lock // setExposedAreaAccessed(false); // // Tell the repainter there is something to repaint. // setExposePending(true); // // Allow the repainter to paint // setDontRedraw(false); xxx_UT_DEBUGMSG(("SEVIOR: Finished doRepaint \n")); // // OK this event is handled. // }
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent), ui(new Ui::MainWindow) { ui->setupUi(this); connect(ui->pushButton, SIGNAL(clicked()), this, SLOT(selectFirstFile())); connect(ui->pushButton_2, SIGNAL(clicked()), this, SLOT(selectSecondFile())); connect(ui->pushButton_3, SIGNAL(clicked()), this, SLOT(doMerge())); this->printMessage("Ready."); }
// Recursive Delaunay Triangulation Procedure // Contains modifications for axis-switching division. void CDelaunay::build(int lo, int hi, EdgePointer *le, EdgePointer *re, int rows) { EdgePointer a, b, c, ldo, rdi, ldi, rdo, maxx, minx; int split, lowrows; int low, high; SitePointer s1, s2, s3; low = lo; high = hi; if ( low < (high-2) ) { // more than three elements; do recursion minx = sp[low]; maxx = sp[high]; if (rows == 1) { // time to switch axis of division spsorty( sp, low, high); rows = 65536; } lowrows = rows/2; split = low - 1 + (int) (0.5 + ((double)(high-low+1) * ((double)lowrows / (double)rows))); build( low, split, &ldo, &ldi, lowrows ); build( split+1, high, &rdi, &rdo, (rows-lowrows) ); doMerge(&ldo, ldi, rdi, &rdo); while (orig(ldo) != minx) { ldo = rprev(ldo); } while (orig(rdo) != maxx) { rdo = (SitePointer) lprev(rdo); } *le = ldo; *re = rdo; } else if (low >= (high - 1)) { // two or one points a = makeEdge(sp[low], sp[high]); *le = a; *re = (EdgePointer) sym(a); } else { // three points // 3 cases: triangles of 2 orientations, and 3 points on a line a = makeEdge((s1 = sp[low]), (s2 = sp[low+1])); b = makeEdge(s2, (s3 = sp[high])); splice((EdgePointer) sym(a), b); if (ccw(s1, s3, s2)) { c = connectLeft(b, a); *le = (EdgePointer) sym(c); *re = c; } else { *le = a; *re = (EdgePointer) sym(b); if (ccw(s1, s2, s3)) { // not colinear c = connectLeft(b, a); } } } }
void Sorts::mergeSort(int * arr, int left, int right) { if(left < right) { // Get mid point int p = (left + right) / 2; // Merge sort left side mergeSort(arr, left, p); // Merge sort right side mergeSort(arr, p + 1, right); // Perform the merging doMerge(arr, left, right); } }
void addFile(std::string const & fn) { libmaus::util::TempFileRemovalContainer::addTempFile(fn); SparseGammaGapFile S(fn,0); { libmaus::parallel::ScopeLock slock(lock); addcnt += 1; L[0].push_back(S); } uint64_t l = 0; std::pair<libmaus::gamma::SparseGammaGapFile,libmaus::gamma::SparseGammaGapFile> P; while ( needMerge(l,P) ) { libmaus::gamma::SparseGammaGapFile const N = doMerge(l,P,tmpgen.getFileName()); libmaus::parallel::ScopeLock slock(lock); L[l+1].push_back(N); } }
bool merge(std::string const & outputfilename) { libmaus::parallel::ScopeLock slock(lock); // set up merge queue Q std::priority_queue<libmaus::gamma::SparseGammaGapFile> Q; for ( std::map< uint64_t,std::deque<libmaus::gamma::SparseGammaGapFile> >::iterator ita = L.begin(); ita != L.end(); ++ita ) for ( uint64_t i = 0; i < ita->second.size(); ++i ) Q.push(ita->second[i]); // erase level data structure L.clear(); // do merging while ( Q.size() > 1 ) { std::pair<libmaus::gamma::SparseGammaGapFile,libmaus::gamma::SparseGammaGapFile> P; P.first = Q.top(); Q.pop(); P.second = Q.top(); Q.pop(); libmaus::gamma::SparseGammaGapFile N = doMerge(P.second.level,P,tmpgen.getFileName()); Q.push(N); } if ( !Q.empty() ) { rename(Q.top().fn.c_str(),outputfilename.c_str()); return true; } else { return false; } }
bool combineHulls(void) { bool combine = false; // each new convex hull is given a unique guid. // A hash map is used to make sure that no hulls are tested twice. ChUllVector output; HaU32 count = (HaU32)mChulls.size(); ChUll *mergeA = NULL; ChUll *mergeB = NULL; // Early out to save walking all the hulls. Hulls are combined based on // a target number or on a number of generated hulls. bool mergeTargetMet = (HaU32)mChulls.size() <= mMergeNumHulls; if (mergeTargetMet && (mSmallClusterThreshold == 0.0f)) return false; HaF32 bestVolume = mTotalVolume; { for (HaU32 i=0; i<count; i++) { ChUll *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { ChUll *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 combinedVolume; HaF32 *v = mHasBeenTested->find(hashIndex); if ( v == NULL ) { combinedVolume = canMerge(cr,match); (*mHasBeenTested)[hashIndex] = combinedVolume; } else { combinedVolume = *v; } if ( combinedVolume != 0 ) { if ( combinedVolume < bestVolume ) { bestVolume = combinedVolume; mergeA = cr; mergeB = match; } } } } } // If we found a merge pair, and we are below the merge threshold or we haven't reduced to the target // do the merge. bool thresholdBelow = ((bestVolume / mTotalVolume) * 100.0f) < mSmallClusterThreshold; if ( mergeA && (thresholdBelow || !mergeTargetMet)) { ChUll *merge = doMerge(mergeA,mergeB); HaF32 volumeA = mergeA->mVolume; HaF32 volumeB = mergeB->mVolume; if ( merge ) { combine = true; output.push_back(merge); for (ChUllVector::iterator j=mChulls.begin(); j!=mChulls.end(); ++j) { ChUll *h = (*j); if ( h !=mergeA && h != mergeB ) { output.push_back(h); } } delete mergeA; delete mergeB; // Remove the old volumes and add the new one. mTotalVolume -= (volumeA + volumeB); mTotalVolume += merge->mVolume; } mChulls = output; } return combine; }
bool combineHulls(JOB_SWARM_STANDALONE::JobSwarmContext *jobSwarmContext) { bool combine = false; // each new convex hull is given a unique guid. // A hash map is used to make sure that no hulls are tested twice. CHullVector output; HaU32 count = (HaU32)mChulls.size(); // Early out to save walking all the hulls. Hulls are combined based on // a target number or on a number of generated hulls. bool mergeTargetMet = (HaU32)mChulls.size() <= mMergeNumHulls; if (mergeTargetMet && (mSmallClusterThreshold == 0.0f)) return false; hacd::vector< CombineVolumeJob > jobs; // First, see if there are any pairs of hulls who's combined volume we have not yet calculated. // If there are, then we add them to the jobs list { for (HaU32 i=0; i<count; i++) { CHull *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { CHull *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 *v = mHasBeenTested->find(hashIndex); if ( v == NULL ) { CombineVolumeJob job(cr,match,hashIndex); jobs.push_back(job); (*mHasBeenTested)[hashIndex] = 0.0f; // assign it to some value so we don't try to create more than one job for it. } } } } // ok..we have posted all of the jobs, let's let's solve them in parallel for (hacd::HaU32 i=0; i<jobs.size(); i++) { jobs[i].startJob(jobSwarmContext); } // solve all of them in parallel... while ( gCombineCount != 0 ) { jobSwarmContext->processSwarmJobs(); // solve merged hulls in parallel } // once we have the answers, now put the results into the hash table. for (hacd::HaU32 i=0; i<jobs.size(); i++) { CombineVolumeJob &job = jobs[i]; (*mHasBeenTested)[job.mHashIndex] = job.mCombinedVolume; } HaF32 bestVolume = 1e9; CHull *mergeA = NULL; CHull *mergeB = NULL; // now find the two hulls which merged produce the smallest combined volume. { for (HaU32 i=0; i<count; i++) { CHull *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { CHull *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 *v = mHasBeenTested->find(hashIndex); HACD_ASSERT(v); if ( v && *v != 0 && *v < bestVolume ) { bestVolume = *v; mergeA = cr; mergeB = match; } } } } // If we found a merge pair, and we are below the merge threshold or we haven't reduced to the target // do the merge. bool thresholdBelow = ((bestVolume / mTotalVolume) * 100.0f) < mSmallClusterThreshold; if ( mergeA && (thresholdBelow || !mergeTargetMet)) { CHull *merge = doMerge(mergeA,mergeB); HaF32 volumeA = mergeA->mVolume; HaF32 volumeB = mergeB->mVolume; if ( merge ) { combine = true; output.push_back(merge); for (CHullVector::iterator j=mChulls.begin(); j!=mChulls.end(); ++j) { CHull *h = (*j); if ( h !=mergeA && h != mergeB ) { output.push_back(h); } } delete mergeA; delete mergeB; // Remove the old volumes and add the new one. mTotalVolume -= (volumeA + volumeB); mTotalVolume += merge->mVolume; } mChulls = output; } return combine; }