void run() { dblock lk; const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeReverse"; { DBDirectClient c; for( int i = 0; i < 10; ++i ) c.insert( ns, BSON( "a" << i ) ); ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) ); } int v[] = { 1, 2, 4, 6 }; boost::shared_ptr< FieldRangeVector > frv( vec( v, 4, -1 ) ); Client::Context ctx( ns ); scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, -1 ) ); BtreeCursor& c = *_c.get(); ASSERT_EQUALS( "BtreeCursor a_1 reverse multi", c.toString() ); double expected[] = { 6, 5, 4, 2, 1 }; for( int i = 0; i < 5; ++i ) { ASSERT( c.ok() ); ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() ); c.advance(); } ASSERT( !c.ok() ); }
void check( const BSONObj &spec ) { _c.ensureIndex( ns(), idx() ); Client::Context ctx( ns() ); FieldRangeSet frs( ns(), spec ); // orphan spec for this test. IndexSpec *idxSpec = new IndexSpec( idx() ); boost::shared_ptr< FieldRangeVector > frv( new FieldRangeVector( frs, *idxSpec, direction() ) ); BtreeCursor c( nsdetails( ns() ), 1, nsdetails( ns() )->idx( 1 ), frv, direction() ); Matcher m( spec ); int count = 0; while( c.ok() ) { ASSERT( m.matches( c.current() ) ); c.advance(); ++count; } int expectedCount = 0; for( vector< BSONObj >::const_iterator i = _objs.begin(); i != _objs.end(); ++i ) { if ( m.matches( *i ) ) { ++expectedCount; } } ASSERT_EQUALS( expectedCount, count ); }
bool gtidExistsInOplog(GTID gtid) { Client::ReadContext ctx(rsoplog); // TODO: Should this be using rsOplogDetails, verifying non-null? NamespaceDetails *d = nsdetails(rsoplog); BSONObjBuilder q; BSONObj result; addGTIDToBSON("_id", gtid, q); const bool found = d != NULL && d->findOne( q.done(), result ); return found; }
void run() { // Insert some documents. int32_t nDocs = 1000; for( int32_t i = 0; i < nDocs; ++i ) { _client.insert( _ns, BSON( "a" << ( i / 4 ) ) ); } // Find the documents that are dups. set<DiskLoc> dups; int32_t last = -1; for( boost::shared_ptr<Cursor> cursor = theDataFileMgr.findAll( _ns ); cursor->ok(); cursor->advance() ) { int32_t currA = cursor->current()[ "a" ].Int(); if ( currA == last ) { dups.insert( cursor->currLoc() ); } last = currA; } // Check the expected number of dups. There must be enough to trigger a RARELY // condition when deleting them. ASSERT_EQUALS( static_cast<uint32_t>( nDocs / 4 * 3 ), dups.size() ); // Kill the current operation. cc().curop()->kill(); if ( _mayInterrupt ) { // doDropDups() aborts. ASSERT_THROWS( BtreeBasedBuilder::doDropDups( _ns, nsdetails( _ns ), dups, _mayInterrupt ), UserException ); // Not all dups are dropped. ASSERT( static_cast<uint32_t>( nDocs / 4 ) < _client.count( _ns ) ); } else { // doDropDups() succeeds. BtreeBasedBuilder::doDropDups( _ns, nsdetails( _ns ), dups, _mayInterrupt ); // The expected number of documents were dropped. ASSERT_EQUALS( static_cast<uint32_t>( nDocs / 4 ), _client.count( _ns ) ); } }
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string ns = dbname + "." + cmdObj.firstElement().valuestr(); NamespaceDetails *nsd = nsdetails(ns); if (NULL == nsd) { errmsg = "can't find ns"; return false; } vector<int> idxs; nsd->findIndexByType(GEOSEARCHNAME, idxs); if (idxs.size() == 0) { errmsg = "no geoSearch index"; return false; } if (idxs.size() > 1) { errmsg = "more than 1 geosearch index"; return false; } BSONElement nearElt = cmdObj["near"]; BSONElement maxDistance = cmdObj["maxDistance"]; BSONElement search = cmdObj["search"]; uassert(13318, "near needs to be an array", nearElt.isABSONObj()); uassert(13319, "maxDistance needs a number", maxDistance.isNumber()); uassert(13320, "search needs to be an object", search.type() == Object); unsigned limit = 50; if (cmdObj["limit"].isNumber()) limit = static_cast<unsigned>(cmdObj["limit"].numberInt()); int idxNum = idxs[0]; IndexDetails& id = nsd->idx(idxNum); if (CatalogHack::testIndexMigration()) { auto_ptr<IndexDescriptor> desc(CatalogHack::getDescriptor(nsd, idxNum)); auto_ptr<HaystackAccessMethod> ham(new HaystackAccessMethod(desc.get())); ham->searchCommand(nearElt.Obj(), maxDistance.numberDouble(), search.Obj(), &result, limit); } else { GeoHaystackSearchIndex *si = static_cast<GeoHaystackSearchIndex*>(id.getSpec().getType()); verify(&id == si->getDetails()); si->searchCommand(nsd, nearElt.Obj(), maxDistance.numberDouble(), search.Obj(), result, limit); } return 1; }
DbSet::~DbSet() { if ( name_.empty() ) return; try { Client::Context c( name_.c_str() ); if ( nsdetails( name_.c_str() ) ) { string errmsg; BSONObjBuilder result; dropCollection( name_, errmsg, result ); } } catch ( ... ) { problem() << "exception cleaning up DbSet" << endl; } }
void DbSet::reset( const string &name, const BSONObj &key ) { if ( !name.empty() ) name_ = name; if ( !key.isEmpty() ) key_ = key.getOwned(); Client::Context c( name_.c_str() ); if ( nsdetails( name_.c_str() ) ) { Helpers::emptyCollection( name_.c_str() ); } else { string err; massert( 10303 , err, userCreateNS( name_.c_str(), fromjson( "{autoIndexId:false}" ), err, false ) ); } Helpers::ensureIndex( name_.c_str(), key_, true, "setIdx" ); }
void run() { dblock lk; const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeGap"; { DBDirectClient c; for( int i = 0; i < 10; ++i ) c.insert( ns, BSON( "a" << i ) ); for( int i = 100; i < 110; ++i ) c.insert( ns, BSON( "a" << i ) ); ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) ); } int v[] = { -50, 2, 40, 60, 109, 200 }; boost::shared_ptr< FieldRangeVector > frv( vec( v, 6 ) ); Client::Context ctx( ns ); BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ); ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() ); double expected[] = { 0, 1, 2, 109 }; for( int i = 0; i < 4; ++i ) { ASSERT( c.ok() ); ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() ); c.advance(); } ASSERT( !c.ok() ); }
bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { string ns = dbname + "." + cmdObj.firstElement().valuestrsafe(); NamespaceDetails * d = nsdetails( ns.c_str() ); if ( !cmdLine.quiet ) tlog() << "CMD: validate " << ns << endl; if ( ! d ) { errmsg = "ns not found"; return 0; } result.append( "ns", ns ); result.append( "result" , validateNS( ns.c_str() , d, &cmdObj ) ); return 1; }
long long Helpers::removeRange( const string& ns , const BSONObj& min , const BSONObj& max , bool yield , bool maxInclusive , RemoveCallback * callback ) { BSONObj keya , keyb; BSONObj minClean = toKeyFormat( min , keya ); BSONObj maxClean = toKeyFormat( max , keyb ); assert( keya == keyb ); Client::Context ctx(ns); NamespaceDetails* nsd = nsdetails( ns.c_str() ); if ( ! nsd ) return 0; int ii = nsd->findIndexByKeyPattern( keya ); assert( ii >= 0 ); long long num = 0; IndexDetails& i = nsd->idx( ii ); shared_ptr<Cursor> c( new BtreeCursor( nsd , ii , i , minClean , maxClean , maxInclusive, 1 ) ); auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) ); cc->setDoingDeletes( true ); while ( c->ok() ) { DiskLoc rloc = c->currLoc(); if ( callback ) callback->goingToDelete( c->current() ); c->advance(); c->noteLocation(); logOp( "d" , ns.c_str() , rloc.obj()["_id"].wrap() ); theDataFileMgr.deleteRecord(ns.c_str() , rloc.rec(), rloc); num++; c->checkLocation(); getDur().commitIfNeeded(); if ( yield && ! cc->yieldSometimes() ) { // cursor got finished by someone else, so we're done cc.release(); // if the collection/db is dropped, cc may be deleted break; } } return num; }
void run() { // RARELY shoud be once/128x for (int i=0; i<150; i++) { insert(); updateSucceed(); } DBDirectClient client; int count = (int) client.count(cappedNs(), BSONObj()); verify(count > 1); // Just to be sure, no _id index, right? Client::Context ctx(cappedNs()); NamespaceDetails *nsd = nsdetails(cappedNs().c_str()); verify(nsd->findIdIndex() == -1); }
void NamespaceDetails::emptyCappedCollection( const char *ns ) { DEV verify( this == nsdetails(ns) ); massert( 13424, "collection must be capped", capped ); massert( 13425, "background index build in progress", !indexBuildInProgress ); massert( 13426, "indexes present", nIndexes == 0 ); // Clear all references to this namespace. ClientCursor::invalidate( ns ); NamespaceDetailsTransient::clearForPrefix( ns ); // Get a writeable reference to 'this' and reset all pertinent // attributes. NamespaceDetails *t = writingWithoutExtra(); t->cappedLastDelRecLastExtent() = DiskLoc(); t->cappedListOfAllDeletedRecords() = DiskLoc(); // preserve firstExtent/lastExtent t->capExtent = firstExtent; t->stats.datasize = stats.nrecords = 0; // lastExtentSize preserve // nIndexes preserve 0 // capped preserve true // max preserve t->paddingFactor = 1.0; t->flags = 0; t->capFirstNewRecord = DiskLoc(); t->capFirstNewRecord.setInvalid(); t->cappedLastDelRecLastExtent().setInvalid(); // dataFileVersion preserve // indexFileVersion preserve t->multiKeyIndexBits = 0; t->reservedA = 0; t->extraOffset = 0; // indexBuildInProgress preserve 0 memset(t->reserved, 0, sizeof(t->reserved)); // Reset all existing extents and recreate the deleted list. for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) { DiskLoc prev = ext.ext()->xprev; DiskLoc next = ext.ext()->xnext; DiskLoc empty = ext.ext()->reuse( ns, true ); ext.ext()->xprev.writing() = prev; ext.ext()->xnext.writing() = next; addDeletedRec( empty.drec(), empty ); } }
shared_ptr<Cursor> DataFileMgr::findAll(const StringData& ns, const DiskLoc &startLoc) { NamespaceDetails * d = nsdetails( ns ); if ( ! d ) return shared_ptr<Cursor>(new BasicCursor(DiskLoc())); DiskLoc loc = d->firstExtent(); if ( loc.isNull() ) return shared_ptr<Cursor>(new BasicCursor(DiskLoc())); Extent *e = getExtent(loc); DEBUGGING { out() << "listing extents for " << ns << endl; DiskLoc tmp = loc; set<DiskLoc> extents; while ( 1 ) { Extent *f = getExtent(tmp); out() << "extent: " << tmp.toString() << endl; extents.insert(tmp); tmp = f->xnext; if ( tmp.isNull() ) break; f = f->getNextExtent(); } out() << endl; d->dumpDeleted(&extents); } if ( d->isCapped() ) return shared_ptr<Cursor>( ForwardCappedCursor::make( d , startLoc ) ); if ( !startLoc.isNull() ) return shared_ptr<Cursor>(new BasicCursor( startLoc )); while ( e->firstRecord.isNull() && !e->xnext.isNull() ) { /* todo: if extent is empty, free it for reuse elsewhere. that is a bit complicated have to clean up the freelists. */ RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead. ns:" << ns << endl; // find a nonempty extent // it might be nice to free the whole extent here! but have to clean up free recs then. e = e->getNextExtent(); } return shared_ptr<Cursor>(new BasicCursor( e->firstRecord )); }
bool touch( std::string& ns, std::string& errmsg, bool touch_data, bool touch_indexes, BSONObjBuilder& result ) { if (touch_data) { log() << "touching namespace " << ns << endl; Timer t; int numRanges = touchNs( ns ); result.append( "data", BSON( "numRanges" << numRanges << "millis" << t.millis() ) ); log() << "touching namespace " << ns << " complete" << endl; } if (touch_indexes) { Timer t; // enumerate indexes std::vector< std::string > indexes; { Client::ReadContext ctx(ns); NamespaceDetails *nsd = nsdetails(ns); massert( 16153, "namespace does not exist", nsd ); NamespaceDetails::IndexIterator ii = nsd->ii(); while ( ii.more() ) { IndexDetails& idx = ii.next(); indexes.push_back( idx.indexNamespace() ); } } int numRanges = 0; for ( std::vector<std::string>::const_iterator it = indexes.begin(); it != indexes.end(); it++ ) { numRanges += touchNs( *it ); } result.append( "indexes", BSON( "num" << static_cast<int>(indexes.size()) << "numRanges" << numRanges << "millis" << t.millis() ) ); } return true; }
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string ns = dbname + "." + cmdObj.firstElement().valuestrsafe(); const NamespaceDetails* nsd = nsdetails(ns.c_str()); if (!cmdLine.quiet) { tlog() << "CMD: indexStats " << ns << endl; } if (!nsd) { errmsg = "ns not found"; return false; } IndexStatsParams params; // { index: _index_name } BSONElement indexName = cmdObj["index"]; if (!indexName.ok() || indexName.type() != String) { errmsg = "an index name is required, use {index: \"indexname\"}"; return false; } params.indexName = indexName.String(); BSONElement expandNodes = cmdObj["expandNodes"]; if (expandNodes.ok()) { if (expandNodes.type() != mongo::Array) { errmsg = "expandNodes must be an array of numbers"; return false; } vector<BSONElement> arr = expandNodes.Array(); for (vector<BSONElement>::const_iterator it = arr.begin(); it != arr.end(); ++it) { if (!it->isNumber()) { errmsg = "expandNodes must be an array of numbers"; return false; } params.expandNodes.push_back(int(it->Number())); } } BSONObjBuilder resultBuilder; if (!runInternal(nsd, params, errmsg, resultBuilder)) return false; result.appendElements(resultBuilder.obj()); return true; }
void IndexRebuilder::checkDB(const std::string& dbName, bool* firstTime) { const std::string systemNS = dbName + ".system.namespaces"; DBDirectClient cli; scoped_ptr<DBClientCursor> cursor(cli.query(systemNS, Query())); // This depends on system.namespaces not changing while we iterate while (cursor->more()) { BSONObj nsDoc = cursor->next(); const char* ns = nsDoc["name"].valuestrsafe(); Client::WriteContext ctx(ns); NamespaceDetails* nsd = nsdetails(ns); if (!nsd || !nsd->indexBuildsInProgress) { continue; } log() << "Found interrupted index build on " << ns << endl; if (*firstTime) { log() << "Restart the server with --noIndexBuildRetry to skip index rebuilds" << endl; *firstTime = false; } // If the indexBuildRetry flag isn't set, just clear the inProg flag if (!cmdLine.indexBuildRetry) { // If we crash between unsetting the inProg flag and cleaning up the index, the // index space will be lost. int inProg = nsd->indexBuildsInProgress; getDur().writingInt(nsd->indexBuildsInProgress) = 0; for (int i = 0; i < inProg; i++) { nsd->idx(nsd->nIndexes+i).kill_idx(); } continue; } // We go from right to left building these indexes, so that indexBuildInProgress-- has // the correct effect of "popping" an index off the list. while (nsd->indexBuildsInProgress > 0) { retryIndexBuild(dbName, nsd, nsd->nIndexes+nsd->indexBuildsInProgress-1); } } }
bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string ns = dbname + "." + cmdObj.firstElement().valuestr(); NamespaceDetails * d = nsdetails( ns.c_str() ); if ( ! d ) { errmsg = "can't find ns"; return false; } vector<int> idxs; d->findIndexByType( GEOSEARCHNAME , idxs ); if ( idxs.size() == 0 ) { errmsg = "no geoSearch index"; return false; } if ( idxs.size() > 1 ) { errmsg = "more than 1 geosearch index"; return false; } int idxNum = idxs[0]; IndexDetails& id = d->idx( idxNum ); GeoHaystackSearchIndex * si = (GeoHaystackSearchIndex*)id.getSpec().getType(); verify( &id == si->getDetails() ); BSONElement n = cmdObj["near"]; BSONElement maxDistance = cmdObj["maxDistance"]; BSONElement search = cmdObj["search"]; uassert( 13318 , "near needs to be an array" , n.isABSONObj() ); uassert( 13319 , "maxDistance needs a number" , maxDistance.isNumber() ); uassert( 13320 , "search needs to be an object" , search.type() == Object ); unsigned limit = 50; if ( cmdObj["limit"].isNumber() ) limit = (unsigned)cmdObj["limit"].numberInt(); si->searchCommand( d , idxNum , n.Obj() , maxDistance.numberDouble() , search.Obj() , result , limit ); return 1; }
void QueryPlan::checkTableScanAllowed() const { if ( likely( !cmdLine.noTableScan ) ) return; // TODO - is this desirable? See SERVER-2222. if ( _frs.numNonUniversalRanges() == 0 ) return; if ( strstr( ns(), ".system." ) ) return; if( str::startsWith( ns(), "local." ) ) return; if ( !nsdetails( ns() ) ) return; uassert( 10111, (string)"table scans not allowed:" + ns(), !cmdLine.noTableScan ); }
virtual bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string coll = cmdObj.firstElement().valuestr(); if( coll.empty() || db.empty() ) { errmsg = "no collection name specified"; return false; } if( isCurrentlyAReplSetPrimary() && !cmdObj["force"].trueValue() ) { errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force"; return false; } string ns = db + '.' + coll; if ( ! NamespaceString::normal(ns.c_str()) ) { errmsg = "bad namespace name"; return false; } // parameter validation to avoid triggering assertions in compact() if ( str::contains(ns, ".system.") ) { errmsg = "can't compact a system namespace"; return false; } { writelock lk; Client::Context ctx(ns); NamespaceDetails *d = nsdetails(ns.c_str()); if( ! d ) { errmsg = "namespace does not exist"; return false; } if ( d->capped ) { errmsg = "cannot compact a capped collection"; return false; } } bool validate = !cmdObj.hasElement("validate") || cmdObj["validate"].trueValue(); // default is true at the moment bool ok = compact(ns, errmsg, validate, result); return ok; }
IndexCatalog::IndexBuildBlock* halfAddIndex(const std::string& key) { string name = key + "_1"; BSONObj indexInfo = BSON( "v" << 1 << "key" << BSON( key << 1 ) << "ns" << _ns << "name" << name ); int32_t lenWHdr = indexInfo.objsize() + Record::HeaderSize; const char* systemIndexes = "unittests.system.indexes"; DiskLoc infoLoc = allocateSpaceForANewRecord( systemIndexes, nsdetails( systemIndexes ), lenWHdr, false ); Record* infoRecord = reinterpret_cast<Record*>( getDur().writingPtr( infoLoc.rec(), lenWHdr ) ); memcpy( infoRecord->data(), indexInfo.objdata(), indexInfo.objsize() ); addRecordToRecListInExtent( infoRecord, infoLoc ); return new IndexCatalog::IndexBuildBlock( _ctx.ctx().db()->getCollection( _ns )->getIndexCatalog(), name, infoLoc ); }
bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { string dropns = dbname + "." + cmdObj.firstElement().valuestrsafe(); if ( !cmdLine.quiet ) tlog() << "CMD: clean " << dropns << endl; NamespaceDetails *d = nsdetails(dropns.c_str()); if ( ! d ) { errmsg = "ns not found"; return 0; } for ( int i = 0; i < Buckets; i++ ) d->deletedList[i].Null(); result.append("ns", dropns.c_str()); return 1; }
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string ns = dbname + "." + cmdObj.firstElement().valuestr(); NamespaceDetails *d = nsdetails(ns); if (NULL == d) { errmsg = "can't find ns"; return false; } GeoNearArguments commonArgs(cmdObj); if (commonArgs.numWanted < 0) { errmsg = "numWanted must be >= 0"; return false; } vector<int> idxs; d->findIndexByType("2d", idxs); if (idxs.size() > 1) { errmsg = "more than one 2d index, not sure which to run geoNear on"; return false; } if (1 == idxs.size()) { result.append("ns", ns); return run2DGeoNear(d->idx(idxs[0]), cmdObj, commonArgs, errmsg, result); } d->findIndexByType("2dsphere", idxs); if (idxs.size() > 1) { errmsg = "more than one 2dsphere index, not sure which to run geoNear on"; return false; } if (1 == idxs.size()) { result.append("ns", ns); return run2DSphereGeoNear(d->idx(idxs[0]), cmdObj, commonArgs, errmsg, result); } errmsg = "no geo indices for geoNear"; return false; }
void NamespaceDetails::emptyCappedCollection( const char *ns ) { DEV assert( this == nsdetails(ns) ); massert( 13424, "collection must be capped", capped ); massert( 13425, "background index build in progress", !backgroundIndexBuildInProgress ); massert( 13426, "indexes present", nIndexes == 0 ); ClientCursor::invalidate( ns ); NamespaceDetailsTransient::clearForPrefix( ns ); cappedLastDelRecLastExtent() = DiskLoc(); cappedListOfAllDeletedRecords() = DiskLoc(); // preserve firstExtent/lastExtent capExtent = firstExtent; stats.datasize = stats.nrecords = 0; // lastExtentSize preserve // nIndexes preserve 0 // capped preserve true // max preserve paddingFactor = 1.0; flags = 0; capFirstNewRecord = DiskLoc(); capFirstNewRecord.setInvalid(); cappedLastDelRecLastExtent().setInvalid(); // dataFileVersion preserve // indexFileVersion preserve multiKeyIndexBits = 0; reservedA = 0; extraOffset = 0; // backgroundIndexBuildInProgress preserve 0 memset(reserved, 0, sizeof(reserved)); for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) { DiskLoc prev = ext.ext()->xprev; DiskLoc next = ext.ext()->xnext; DiskLoc empty = ext.ext()->reuse( ns ); ext.ext()->xprev = prev; ext.ext()->xnext = next; addDeletedRec( empty.drec(), empty ); } }
/** @return IndexDetails for a new index on a:1, with the info field populated. */ IndexDescriptor* addIndexWithInfo() { BSONObj indexInfo = BSON( "v" << 1 << "key" << BSON( "a" << 1 ) << "ns" << _ns << "name" << "a_1" ); int32_t lenWHdr = indexInfo.objsize() + Record::HeaderSize; const char* systemIndexes = "unittests.system.indexes"; DiskLoc infoLoc = allocateSpaceForANewRecord( systemIndexes, nsdetails( systemIndexes ), lenWHdr, false ); Record* infoRecord = reinterpret_cast<Record*>( getDur().writingPtr( infoLoc.rec(), lenWHdr ) ); memcpy( infoRecord->data(), indexInfo.objdata(), indexInfo.objsize() ); addRecordToRecListInExtent( infoRecord, infoLoc ); IndexCatalog::IndexBuildBlock blk( collection()->getIndexCatalog(), "a_1", infoLoc ); blk.success(); return collection()->getIndexCatalog()->findIndexByName( "a_1" ); }
/** write an op to the oplog that is already built. todo : make _logOpRS() call this so we don't repeat ourself? */ void _logOpObjRS(const BSONObj& op) { Lock::DBWrite lk("local"); const OpTime ts = op["ts"]._opTime(); long long h = op["h"].numberLong(); { const char *logns = rsoplog; if ( rsOplogDetails == 0 ) { Client::Context ctx(logns , dbpath); localDB = ctx.db(); verify( localDB ); rsOplogDetails = nsdetails(logns); massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails); } Client::Context ctx(logns , localDB); { int len = op.objsize(); Record *r = theDataFileMgr.fast_oplog_insert(rsOplogDetails, logns, len); memcpy(getDur().writingPtr(r->data(), len), op.objdata(), len); } /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy. this code (or code in now() maybe) should be improved. */ if( theReplSet ) { if( !(theReplSet->lastOpTimeWritten<ts) ) { log() << "replSet error possible failover clock skew issue? " << theReplSet->lastOpTimeWritten.toString() << ' ' << endl; } theReplSet->lastOpTimeWritten = ts; theReplSet->lastH = h; ctx.getClient()->setLastOp( ts ); replset::BackgroundSync::notify(); } } OpTime::setLast( ts ); }
void Client::dropTempCollectionsInDB( const string db ) { list<string>::iterator i = _tempCollections.begin(); while ( i!=_tempCollections.end() ) { string ns = *i; dblock l; Client::Context ctx( ns ); if ( nsdetails( ns.c_str() ) && ns.compare( 0, db.length(), db ) == 0 ) { try { string err; BSONObjBuilder b; dropCollection( ns, err, b ); i = _tempCollections.erase(i); ++i; } catch ( ... ){ log() << "error dropping temp collection: " << ns << endl; } } else { ++i; } } }
void CursorGenerator::setArgumentsHint() { if ( useHints && _parsedQuery ) { _argumentsHint = _parsedQuery->getHint(); } if ( snapshot() ) { NamespaceDetails *d = nsdetails( _ns ); if ( d ) { int i = d->findIdIndex(); if( i < 0 ) { if ( _ns.find( ".system." ) == string::npos ) log() << "warning: no _id index on $snapshot query, ns:" << _ns << endl; } else { /* [dm] the name of an _id index tends to vary, so we build the hint the hard way here. probably need a better way to specify "use the _id index" as a hint. if someone is in the query optimizer please fix this then! */ _argumentsHint = BSON( "$hint" << d->idx(i).indexName() ); } } } }
shared_ptr<Cursor> FindingStartCursor::getCursor( const char *ns, const BSONObj &query, const BSONObj &order ) { NamespaceDetails *d = nsdetails(ns); if ( !d ) { return shared_ptr<Cursor>( new BasicCursor( DiskLoc() ) ); } FieldRangeSetPair frsp( ns, query ); QueryPlan oplogPlan( d, -1, frsp, 0, query, order ); FindingStartCursor finder( oplogPlan ); ElapsedTracker yieldCondition( 256, 20 ); while( !finder.done() ) { if ( yieldCondition.intervalHasElapsed() ) { if ( finder.prepareToYield() ) { ClientCursor::staticYield( -1, ns, 0 ); finder.recoverFromYield(); } } finder.next(); } shared_ptr<Cursor> ret = finder.cursor(); shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, BSONObj() ) ); ret->setMatcher( matcher ); return ret; }
unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) { unsigned long long n = 0; prep(ns.c_str(), d); verify( idxNo == d->nIndexes ); try { idx.head.writing() = idx.idxInterface().addBucket(idx); n = addExistingToIndex(ns.c_str(), d, idx, idxNo); } catch(...) { if( cc().database() && nsdetails(ns.c_str()) == d ) { verify( idxNo == d->nIndexes ); done(ns.c_str(), d); } else { log() << "ERROR: db gone during bg index?" << endl; } throw; } verify( idxNo == d->nIndexes ); done(ns.c_str(), d); return n; }
bool S2Cursor::ok() { if (NULL == _btreeCursor.get()) { // FieldRangeVector needs an IndexSpec so we make it one. BSONObjBuilder specBuilder; BSONObjIterator i(_keyPattern); while (i.more()) { BSONElement e = i.next(); specBuilder.append(e.fieldName(), 1); } BSONObj spec = specBuilder.obj(); IndexSpec specForFRV(spec); // All the magic is in makeUnifiedFRS. See below. // A lot of these arguments are opaque. BSONObj frsObj; if (!makeFRSObject(&frsObj)) { return false; } FieldRangeSet frs(_details->parentNS().c_str(), frsObj, false, false); shared_ptr<FieldRangeVector> frv(new FieldRangeVector(frs, specForFRV, 1)); _btreeCursor.reset(BtreeCursor::make(nsdetails(_details->parentNS()), *_details, frv, 0, 1)); return advance(); } return _btreeCursor->ok(); }