void BtreeBasedBuilder::addKeysToPhaseOne(NamespaceDetails* d, const char* ns, const IndexDetails& idx, const BSONObj& order, SortPhaseOne* phaseOne, int64_t nrecords, ProgressMeter* progressMeter, bool mayInterrupt, int idxNo) { shared_ptr<Cursor> cursor = theDataFileMgr.findAll( ns ); phaseOne->sortCmp.reset(getComparison(idx.version(), idx.keyPattern())); phaseOne->sorter.reset(new BSONObjExternalSorter(phaseOne->sortCmp.get())); phaseOne->sorter->hintNumObjects( nrecords ); auto_ptr<IndexDescriptor> desc(CatalogHack::getDescriptor(d, idxNo)); auto_ptr<BtreeBasedAccessMethod> iam(CatalogHack::getBtreeBasedIndex(desc.get())); while ( cursor->ok() ) { RARELY killCurrentOp.checkForInterrupt( !mayInterrupt ); BSONObj o = cursor->current(); DiskLoc loc = cursor->currLoc(); BSONObjSet keys; iam->getKeys(o, &keys); phaseOne->addKeys(keys, loc, mayInterrupt); cursor->advance(); progressMeter->hit(); if ( logLevel > 1 && phaseOne->n % 10000 == 0 ) { printMemInfo( "\t iterating objects" ); } } }
void BtreeBasedBuilder::addKeysToPhaseOne(Collection* collection, IndexDescriptor* idx, const BSONObj& order, SortPhaseOne* phaseOne, ProgressMeter* progressMeter, bool mayInterrupt ) { phaseOne->sortCmp.reset(getComparison(idx->version(), idx->keyPattern())); phaseOne->sorter.reset(new BSONObjExternalSorter(phaseOne->sortCmp.get())); phaseOne->sorter->hintNumObjects( collection->numRecords() ); BtreeBasedAccessMethod* iam =collection->getIndexCatalog()->getBtreeBasedIndex( idx ); auto_ptr<Runner> runner(InternalPlanner::collectionScan(collection->ns().ns())); BSONObj o; DiskLoc loc; Runner::RunnerState state; while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&o, &loc))) { RARELY killCurrentOp.checkForInterrupt( !mayInterrupt ); BSONObjSet keys; iam->getKeys(o, &keys); phaseOne->addKeys(keys, loc, mayInterrupt); progressMeter->hit(); if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2)) && phaseOne->n % 10000 == 0 ) { printMemInfo( "\t iterating objects" ); } } uassert(17050, "Internal error reading docs from collection", Runner::RUNNER_EOF == state); }
void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) { // don't unmap, just mmap over the old region void * x = mmap( oldPrivateAddr, len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED , fd , 0 ); if( x == MAP_FAILED ) { int err = errno; error() << "13601 Couldn't remap private view: " << errnoWithDescription(err) << endl; log() << "aborting" << endl; printMemInfo(); abort(); } verify( x == oldPrivateAddr ); return x; }
void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) { #if defined(__sunos__) // SERVER-8795 verify( Lock::isW() ); LockMongoFilesExclusive lockMongoFiles; #endif // don't unmap, just mmap over the old region void * x = mmap( oldPrivateAddr, len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED , fd , 0 ); if( x == MAP_FAILED ) { int err = errno; error() << "13601 Couldn't remap private view: " << errnoWithDescription(err) << endl; log() << "aborting" << endl; printMemInfo(); abort(); } verify( x == oldPrivateAddr ); return x; }
uint64_t BtreeBasedBuilder::fastBuildIndex(const char* ns, NamespaceDetails* d, IndexDetails& idx, bool mayInterrupt, int idxNo) { CurOp * op = cc().curop(); Timer t; tlog(1) << "fastBuildIndex " << ns << ' ' << idx.info.obj().toString() << endl; bool dupsAllowed = !idx.unique() || ignoreUniqueIndex(idx); bool dropDups = idx.dropDups() || inDBRepair; BSONObj order = idx.keyPattern(); getDur().writingDiskLoc(idx.head).Null(); if ( logLevel > 1 ) printMemInfo( "before index start" ); /* get and sort all the keys ----- */ ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: (1/3) External Sort Progress", d->stats.nrecords, 10)); SortPhaseOne phase1; addKeysToPhaseOne(d, ns, idx, order, &phase1, d->stats.nrecords, pm.get(), mayInterrupt, idxNo ); pm.finished(); BSONObjExternalSorter& sorter = *(phase1.sorter); if( phase1.multi ) { d->setIndexIsMultikey(ns, idxNo); } if ( logLevel > 1 ) printMemInfo( "before final sort" ); phase1.sorter->sort( mayInterrupt ); if ( logLevel > 1 ) printMemInfo( "after final sort" ); LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl; set<DiskLoc> dupsToDrop; /* build index --- */ if( idx.version() == 0 ) buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, &phase1, pm, t, mayInterrupt); else if( idx.version() == 1 ) buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, &phase1, pm, t, mayInterrupt); else verify(false); if( dropDups ) log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl; BtreeBasedBuilder::doDropDups(ns, d, dupsToDrop, mayInterrupt); return phase1.n; }
uint64_t BtreeBasedBuilder::fastBuildIndex( Collection* collection, IndexDescriptor* idx, bool mayInterrupt ) { CurOp * op = cc().curop(); Timer t; MONGO_TLOG(1) << "fastBuildIndex " << collection->ns() << ' ' << idx->toString() << endl; bool dupsAllowed = !idx->unique() || ignoreUniqueIndex(idx->getOnDisk()); bool dropDups = idx->dropDups() || inDBRepair; BSONObj order = idx->keyPattern(); getDur().writingDiskLoc(idx->getOnDisk().head).Null(); if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) ) printMemInfo( "before index start" ); /* get and sort all the keys ----- */ ProgressMeterHolder pm(op->setMessage("index: (1/3) external sort", "Index: (1/3) External Sort Progress", collection->numRecords(), 10)); SortPhaseOne phase1; addKeysToPhaseOne(collection, idx, order, &phase1, pm.get(), mayInterrupt ); pm.finished(); BSONObjExternalSorter& sorter = *(phase1.sorter); if( phase1.multi ) { collection->getIndexCatalog()->markMultikey( idx ); } if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) ) printMemInfo( "before final sort" ); phase1.sorter->sort( mayInterrupt ); if ( logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(2) ) ) printMemInfo( "after final sort" ); LOG(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl; set<DiskLoc> dupsToDrop; /* build index --- */ if( idx->version() == 0 ) buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, &phase1, pm, t, mayInterrupt); else if( idx->version() == 1 ) buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, &phase1, pm, t, mayInterrupt); else verify(false); if( dropDups ) log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl; doDropDups(collection, dupsToDrop, mayInterrupt); return phase1.n; }
// throws DBException unsigned long long fastBuildIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) { CurOp * op = cc().curop(); Timer t; tlog(1) << "fastBuildIndex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl; bool dupsAllowed = !idx.unique(); bool dropDups = idx.dropDups() || inDBRepair; BSONObj order = idx.keyPattern(); getDur().writingDiskLoc(idx.head).Null(); if ( logLevel > 1 ) printMemInfo( "before index start" ); /* get and sort all the keys ----- */ ProgressMeterHolder pm( op->setMessage( "index: (1/3) external sort" , d->stats.nrecords , 10 ) ); SortPhaseOne _ours; SortPhaseOne *phase1 = precalced; if( phase1 == 0 ) { phase1 = &_ours; SortPhaseOne& p1 = *phase1; shared_ptr<Cursor> c = theDataFileMgr.findAll(ns); p1.sorter.reset( new BSONObjExternalSorter(idx.idxInterface(), order) ); p1.sorter->hintNumObjects( d->stats.nrecords ); const IndexSpec& spec = idx.getSpec(); while ( c->ok() ) { BSONObj o = c->current(); DiskLoc loc = c->currLoc(); p1.addKeys(spec, o, loc); c->advance(); pm.hit(); if ( logLevel > 1 && p1.n % 10000 == 0 ) { printMemInfo( "\t iterating objects" ); } }; } pm.finished(); BSONObjExternalSorter& sorter = *(phase1->sorter); if( phase1->multi ) d->setIndexIsMultikey(ns, idxNo); if ( logLevel > 1 ) printMemInfo( "before final sort" ); phase1->sorter->sort(); if ( logLevel > 1 ) printMemInfo( "after final sort" ); log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl; set<DiskLoc> dupsToDrop; /* build index --- */ if( idx.version() == 0 ) buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t); else if( idx.version() == 1 ) buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t); else verify(false); if( dropDups ) log() << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl; for( set<DiskLoc>::iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); i++ ){ theDataFileMgr.deleteRecord( ns, i->rec(), *i, false /* cappedOk */ , true /* noWarn */ , isMaster( ns ) /* logOp */ ); getDur().commitIfNeeded(); } return phase1->n; }