string OpDebug::report(const CurOp& curop, const SingleThreadedLockStats& lockStats) const { StringBuilder s; if ( iscommand ) s << "command "; else s << opToString( op ) << ' '; s << ns; if ( ! query.isEmpty() ) { if ( iscommand ) { s << " command: "; Command* curCommand = curop.getCommand(); if (curCommand) { mutablebson::Document cmdToLog(query, mutablebson::Document::kInPlaceDisabled); curCommand->redactForLogging(&cmdToLog); s << curCommand->name << " "; s << cmdToLog.toString(); } else { // Should not happen but we need to handle curCommand == NULL gracefully s << query.toString(); } } else { s << " query: "; s << query.toString(); } } if (!planSummary.empty()) { s << " planSummary: " << planSummary.toString(); } if ( ! updateobj.isEmpty() ) { s << " update: "; updateobj.toString( s ); } OPDEBUG_TOSTRING_HELP( cursorid ); OPDEBUG_TOSTRING_HELP( ntoreturn ); OPDEBUG_TOSTRING_HELP( ntoskip ); OPDEBUG_TOSTRING_HELP_BOOL( exhaust ); OPDEBUG_TOSTRING_HELP( nscanned ); OPDEBUG_TOSTRING_HELP( nscannedObjects ); OPDEBUG_TOSTRING_HELP_BOOL( idhack ); OPDEBUG_TOSTRING_HELP_BOOL( scanAndOrder ); OPDEBUG_TOSTRING_HELP( nmoved ); OPDEBUG_TOSTRING_HELP( nMatched ); OPDEBUG_TOSTRING_HELP( nModified ); OPDEBUG_TOSTRING_HELP( ninserted ); OPDEBUG_TOSTRING_HELP( ndeleted ); OPDEBUG_TOSTRING_HELP_BOOL( fastmod ); OPDEBUG_TOSTRING_HELP_BOOL( fastmodinsert ); OPDEBUG_TOSTRING_HELP_BOOL( upsert ); OPDEBUG_TOSTRING_HELP_BOOL( cursorExhausted ); OPDEBUG_TOSTRING_HELP( keyUpdates ); OPDEBUG_TOSTRING_HELP( writeConflicts ); if ( extra.len() ) s << " " << extra.str(); if ( ! exceptionInfo.empty() ) { s << " exception: " << exceptionInfo.msg; if ( exceptionInfo.code ) s << " code:" << exceptionInfo.code; } s << " numYields:" << curop.numYields(); OPDEBUG_TOSTRING_HELP( nreturned ); if (responseLength > 0) { s << " reslen:" << responseLength; } { BSONObjBuilder locks; lockStats.report(&locks); s << " locks:" << locks.obj().toString(); } s << " " << executionTime << "ms"; return s.str(); }
// static void IndexBoundsBuilder::translate(const MatchExpression* expr, int direction, OrderedIntervalList* oilOut, bool* exactOut) { Interval interval; bool exact = false; if (expr->isLeaf()) { if (MatchExpression::EQ == expr->matchType()) { const EqualityMatchExpression* node = static_cast<const EqualityMatchExpression*>(expr); // We have to copy the data out of the parse tree and stuff it into the index bounds. // BSONValue will be useful here. BSONObj dataObj = objFromElement(node->getData()); if (dataObj.couldBeArray()) { // XXX: build better bounds warning() << "building lazy bounds for " << expr->toString() << endl; interval = allValues(); exact = false; } else { verify(dataObj.isOwned()); interval = makePointInterval(dataObj); exact = true; } } else if (MatchExpression::LTE == expr->matchType()) { const LTEMatchExpression* node = static_cast<const LTEMatchExpression*>(expr); BSONObjBuilder bob; bob.appendMinKey(""); bob.append(node->getData()); BSONObj dataObj = bob.obj(); verify(dataObj.isOwned()); interval = makeRangeInterval(dataObj, true, true); exact = true; } else if (MatchExpression::LT == expr->matchType()) { const LTMatchExpression* node = static_cast<const LTMatchExpression*>(expr); BSONObjBuilder bob; bob.appendMinKey(""); bob.append(node->getData()); BSONObj dataObj = bob.obj(); verify(dataObj.isOwned()); interval = makeRangeInterval(dataObj, true, false); exact = true; } else if (MatchExpression::GT == expr->matchType()) { const GTMatchExpression* node = static_cast<const GTMatchExpression*>(expr); BSONObjBuilder bob; bob.append(node->getData()); bob.appendMaxKey(""); BSONObj dataObj = bob.obj(); verify(dataObj.isOwned()); interval = makeRangeInterval(dataObj, false, true); exact = true; } else if (MatchExpression::GTE == expr->matchType()) { const GTEMatchExpression* node = static_cast<const GTEMatchExpression*>(expr); BSONObjBuilder bob; bob.append(node->getData()); bob.appendMaxKey(""); BSONObj dataObj = bob.obj(); verify(dataObj.isOwned()); interval = makeRangeInterval(dataObj, true, true); exact = true; } else { // XXX: build better bounds warning() << "building lazy bounds for " << expr->toString() << endl; interval = allValues(); exact = false; } } else { // XXX: build better bounds verify(expr->isArray()); warning() << "building lazy bounds for " << expr->toString() << endl; interval = allValues(); exact = false; } if (-1 == direction) { reverseInterval(&interval); } oilOut->intervals.push_back(interval); *exactOut = exact; }
// liberally cribbed from user_prio.cpp void plumage::stats::processAccountantStats(ClassAd* ad, ODSMongodbOps* ops, Date_t& ts) { // attr%d holders...sadly reverting back to MyString for convenience of formatstr MyString attrName, attrPrio, attrResUsed, attrWtResUsed, attrFactor, attrBeginUsage, attrAccUsage; MyString attrLastUsage, attrAcctGroup, attrIsAcctGroup; MyString attrConfigQuota, attrEffectiveQuota, attrSubtreeQuota, attrSurplusPolicy; // values string name, acctGroup, surplusPolicy; float priority, factor, wtResUsed, configQuota, effectiveQuota, subtreeQuota, accUsage = -1; int resUsed, beginUsage, lastUsage; resUsed = beginUsage = lastUsage = 0; bool isAcctGroup; DBClientConnection* conn = ops->m_db_conn; conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "ts" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "lu" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "n" << 1 )); unsigned long long acct_count = conn->count(DB_STATS_SAMPLES_ACCOUNTANT); // eventhough the Accountant doesn't forget // we don't care about stale submitters (default: last 24 hours) int cfg_last_usage = param_integer("ODS_ACCOUNTANT_LAST_USAGE", 60*60*24); int minLastUsageTime = time(0)-cfg_last_usage; int numElem = -1; ad->LookupInteger( "NumSubmittors", numElem ); for( int i=1; i<=numElem; i++) { priority=0; isAcctGroup = false; // skip stale records unless we have none attrLastUsage.formatstr("LastUsageTime%d", i ); ad->LookupInteger ( attrLastUsage.Value(), lastUsage ); if (lastUsage < minLastUsageTime && acct_count > 0) continue; // parse the horrid classad attrName.formatstr("Name%d", i ); attrPrio.formatstr("Priority%d", i ); attrResUsed.formatstr("ResourcesUsed%d", i ); attrWtResUsed.formatstr("WeightedResourcesUsed%d", i ); attrFactor.formatstr("PriorityFactor%d", i ); attrBeginUsage.formatstr("BeginUsageTime%d", i ); attrAccUsage.formatstr("WeightedAccumulatedUsage%d", i ); attrAcctGroup.formatstr("AccountingGroup%d", i); attrIsAcctGroup.formatstr("IsAccountingGroup%d", i); attrConfigQuota.formatstr("ConfigQuota%d", i); attrEffectiveQuota.formatstr("EffectiveQuota%d", i); attrSubtreeQuota.formatstr("SubtreeQuota%d", i); attrSurplusPolicy.formatstr("SurplusPolicy%d", i); ad->LookupString ( attrName.Value(), name ); ad->LookupFloat ( attrPrio.Value(), priority ); ad->LookupFloat ( attrFactor.Value(), factor ); ad->LookupFloat ( attrAccUsage.Value(), accUsage ); ad->LookupInteger ( attrBeginUsage.Value(), beginUsage ); ad->LookupInteger ( attrResUsed.Value(), resUsed ); ad->LookupBool ( attrIsAcctGroup.Value(), isAcctGroup); ad->LookupFloat ( attrConfigQuota.Value(), configQuota ); ad->LookupFloat ( attrEffectiveQuota.Value(), effectiveQuota ); ad->LookupFloat ( attrSubtreeQuota.Value(), subtreeQuota ); ad->LookupString ( attrSurplusPolicy.Value(), surplusPolicy ); if( !ad->LookupFloat( attrWtResUsed.Value(), wtResUsed ) ) { wtResUsed = resUsed; } if (!ad->LookupString(attrAcctGroup.Value(), acctGroup)) { acctGroup = "<none>"; } BSONObjBuilder bob; bob.appendDate("ts",ts); bob.append("n",name); bob.append("ag",acctGroup); bob.appendAsNumber("prio",formatReal(priority)); bob.appendAsNumber("fac",formatReal(factor)); bob.append("ru",resUsed); bob.append("wru",wtResUsed); // condor timestamps need massaging when going in the db bob.appendDate("bu",static_cast<unsigned long long>(beginUsage)*1000); bob.appendDate("lu",static_cast<unsigned long long>(lastUsage)*1000); bob.appendAsNumber("au",formatReal(accUsage)); bob.appendAsNumber("cq",formatReal(configQuota)); bob.appendAsNumber("eq",formatReal(effectiveQuota)); bob.appendAsNumber("sq",formatReal(subtreeQuota)); if (!surplusPolicy.empty()) bob.append("sp",surplusPolicy); conn->insert(DB_STATS_SAMPLES_ACCOUNTANT,bob.obj()); } }
bool OpDebug::append(const CurOp& curop, BSONObjBuilder& b, size_t maxSize) const { b.append( "op" , iscommand ? "command" : opToString( op ) ); b.append( "ns" , ns.toString() ); int queryUpdateObjSize = 0; if (!query.isEmpty()) { queryUpdateObjSize += query.objsize(); } else if (!iscommand && curop.haveQuery()) { queryUpdateObjSize += curop.query()["query"].size(); } if (!updateobj.isEmpty()) { queryUpdateObjSize += updateobj.objsize(); } if (static_cast<size_t>(queryUpdateObjSize) > maxSize) { if (!query.isEmpty()) { // Use 60 since BSONObj::toString can truncate strings into 150 chars // and we want to have enough room for both query and updateobj when // the entire document is going to be serialized into a string const string abbreviated(query.toString(false, false), 0, 60); b.append(iscommand ? "command" : "query", abbreviated + "..."); } else if (!iscommand && curop.haveQuery()) { const string abbreviated(curop.query()["query"].toString(false, false), 0, 60); b.append("query", abbreviated + "..."); } if (!updateobj.isEmpty()) { const string abbreviated(updateobj.toString(false, false), 0, 60); b.append("updateobj", abbreviated + "..."); } return false; } if (!query.isEmpty()) { b.append(iscommand ? "command" : "query", query); } else if (!iscommand && curop.haveQuery()) { curop.appendQuery(b, "query"); } if (!updateobj.isEmpty()) { b.append("updateobj", updateobj); } const bool moved = (nmoved >= 1); OPDEBUG_APPEND_NUMBER( cursorid ); OPDEBUG_APPEND_NUMBER( ntoreturn ); OPDEBUG_APPEND_NUMBER( ntoskip ); OPDEBUG_APPEND_BOOL( exhaust ); OPDEBUG_APPEND_NUMBER( nscanned ); OPDEBUG_APPEND_BOOL( idhack ); OPDEBUG_APPEND_BOOL( scanAndOrder ); OPDEBUG_APPEND_BOOL( moved ); OPDEBUG_APPEND_NUMBER( nmoved ); OPDEBUG_APPEND_NUMBER( nupdated ); OPDEBUG_APPEND_BOOL( fastmod ); OPDEBUG_APPEND_BOOL( fastmodinsert ); OPDEBUG_APPEND_BOOL( upsert ); OPDEBUG_APPEND_NUMBER( keyUpdates ); b.appendNumber( "numYield" , curop.numYields() ); b.append( "lockStats" , curop.lockStat().report() ); if ( ! exceptionInfo.empty() ) exceptionInfo.append( b , "exception" , "exceptionCode" ); OPDEBUG_APPEND_NUMBER( nreturned ); OPDEBUG_APPEND_NUMBER( responseLength ); b.append( "millis" , executionTime ); return true; }
// static BSONObj IndexBoundsBuilder::objFromElement(const BSONElement& elt) { BSONObjBuilder bob; bob.append(elt); return bob.obj(); }
Status MMAPV1DatabaseCatalogEntry::_renameSingleNamespace( OperationContext* txn, const StringData& fromNS, const StringData& toNS, bool stayTemp ) { // some sanity checking NamespaceDetails* fromDetails = _namespaceIndex.details( fromNS ); if ( !fromDetails ) return Status( ErrorCodes::BadValue, "from namespace doesn't exist" ); if ( _namespaceIndex.details( toNS ) ) return Status( ErrorCodes::BadValue, "to namespace already exists" ); // at this point, we haven't done anything destructive yet // ---- // actually start moving // ---- // this could throw, but if it does we're ok _namespaceIndex.add_ns( txn, toNS, fromDetails ); NamespaceDetails* toDetails = _namespaceIndex.details( toNS ); try { toDetails->copyingFrom(txn, toNS, _namespaceIndex, fromDetails); // fixes extraOffset } catch( DBException& ) { // could end up here if .ns is full - if so try to clean up / roll back a little _namespaceIndex.kill_ns( txn, toNS ); throw; } // at this point, code .ns stuff moved _namespaceIndex.kill_ns( txn, fromNS ); fromDetails = NULL; // fix system.namespaces BSONObj newSpec; DiskLoc oldSpecLocation; { BSONObj oldSpec; { RecordStoreV1Base* rs = _getNamespaceRecordStore( txn, fromNS ); scoped_ptr<RecordIterator> it( rs->getIterator() ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); const Record* rec = it->recordFor( loc ); BSONObj entry( rec->data() ); if ( fromNS == entry["name"].String() ) { oldSpecLocation = loc; oldSpec = entry.getOwned(); break; } } } invariant( !oldSpec.isEmpty() ); invariant( !oldSpecLocation.isNull() ); BSONObjBuilder b; BSONObjIterator i( oldSpec.getObjectField( "options" ) ); while( i.more() ) { BSONElement e = i.next(); if ( strcmp( e.fieldName(), "create" ) != 0 ) { if (stayTemp || (strcmp(e.fieldName(), "temp") != 0)) b.append( e ); } else { b << "create" << toNS; } } newSpec = b.obj(); } _addNamespaceToNamespaceCollection( txn, toNS, newSpec.isEmpty() ? 0 : &newSpec ); _getNamespaceRecordStore( txn, fromNS )->deleteRecord( txn, oldSpecLocation ); return Status::OK(); }
Status ChunkMoveOperationState::commitMigration(const MigrationSessionId& sessionId) { invariant(_distLockStatus.is_initialized()); invariant(_distLockStatus->isOK()); log() << "About to enter migrate critical section"; // We're under the collection distributed lock here, so no other migrate can change maxVersion // or CollectionMetadata state. ShardingState* const shardingState = ShardingState::get(_txn); Status startStatus = ShardingStateRecovery::startMetadataOp(_txn); if (!startStatus.isOK()) { warning() << "Failed to write sharding state recovery document" << causedBy(startStatus); return startStatus; } shardingState->migrationSourceManager()->setInCriticalSection(true); const ChunkVersion originalCollVersion = getCollMetadata()->getCollVersion(); ChunkVersion myVersion = originalCollVersion; myVersion.incMajor(); { ScopedTransaction transaction(_txn, MODE_IX); Lock::DBLock lk(_txn->lockState(), _nss.db(), MODE_IX); Lock::CollectionLock collLock(_txn->lockState(), _nss.ns(), MODE_X); invariant(myVersion > shardingState->getVersion(_nss.ns())); // Bump the metadata's version up and "forget" about the chunk being moved. This is // not the commit point, but in practice the state in this shard won't change until // the commit it done. shardingState->donateChunk(_txn, _nss.ns(), _minKey, _maxKey, myVersion); } log() << "moveChunk setting version to: " << myVersion << migrateLog; // We're under the collection lock here, too, so we can undo the chunk donation because // no other state change could be ongoing BSONObj res; Status recvChunkCommitStatus{ErrorCodes::InternalError, "status not set"}; try { ScopedDbConnection connTo(_toShardCS, 35.0); connTo->runCommand("admin", createRecvChunkCommitRequest(sessionId), res); connTo.done(); recvChunkCommitStatus = getStatusFromCommandResult(res); } catch (const DBException& e) { const string msg = stream() << "moveChunk could not contact to shard " << _toShard << " to commit transfer" << causedBy(e); warning() << msg; recvChunkCommitStatus = Status(e.toStatus().code(), msg); } if (MONGO_FAIL_POINT(failMigrationCommit) && recvChunkCommitStatus.isOK()) { recvChunkCommitStatus = Status(ErrorCodes::InternalError, "Failing _recvChunkCommit due to failpoint."); } if (!recvChunkCommitStatus.isOK()) { log() << "moveChunk migrate commit not accepted by TO-shard: " << res << " resetting shard version to: " << getShardVersion() << migrateLog; { ScopedTransaction transaction(_txn, MODE_IX); Lock::DBLock dbLock(_txn->lockState(), _nss.db(), MODE_IX); Lock::CollectionLock collLock(_txn->lockState(), _nss.ns(), MODE_X); log() << "moveChunk collection lock acquired to reset shard version from " "failed migration"; // Revert the chunk manager back to the state before "forgetting" about the chunk shardingState->undoDonateChunk(_txn, _nss.ns(), getCollMetadata()); } log() << "Shard version successfully reset to clean up failed migration"; return Status(recvChunkCommitStatus.code(), stream() << "_recvChunkCommit failed: " << causedBy(recvChunkCommitStatus)); } log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog; BSONArrayBuilder updates; { // Update for the chunk being moved BSONObjBuilder op; op.append("op", "u"); op.appendBool("b", false); // No upserting op.append("ns", ChunkType::ConfigNS); BSONObjBuilder n(op.subobjStart("o")); n.append(ChunkType::name(), Chunk::genID(_nss.ns(), _minKey)); myVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod()); n.append(ChunkType::ns(), _nss.ns()); n.append(ChunkType::min(), _minKey); n.append(ChunkType::max(), _maxKey); n.append(ChunkType::shard(), _toShard); n.done(); BSONObjBuilder q(op.subobjStart("o2")); q.append(ChunkType::name(), Chunk::genID(_nss.ns(), _minKey)); q.done(); updates.append(op.obj()); } // Version at which the next highest lastmod will be set. If the chunk being moved is the last // in the shard, nextVersion is that chunk's lastmod otherwise the highest version is from the // chunk being bumped on the FROM-shard. ChunkVersion nextVersion = myVersion; // If we have chunks left on the FROM shard, update the version of one of them as well. We can // figure that out by grabbing the metadata as it has been changed. const std::shared_ptr<CollectionMetadata> bumpedCollMetadata( shardingState->getCollectionMetadata(_nss.ns())); if (bumpedCollMetadata->getNumChunks() > 0) { // get another chunk on that shard ChunkType bumpChunk; invariant(bumpedCollMetadata->getNextChunk(bumpedCollMetadata->getMinKey(), &bumpChunk)); BSONObj bumpMin = bumpChunk.getMin(); BSONObj bumpMax = bumpChunk.getMax(); dassert(bumpMin.woCompare(_minKey) != 0); BSONObjBuilder op; op.append("op", "u"); op.appendBool("b", false); op.append("ns", ChunkType::ConfigNS); nextVersion.incMinor(); // same as used on donateChunk BSONObjBuilder n(op.subobjStart("o")); n.append(ChunkType::name(), Chunk::genID(_nss.ns(), bumpMin)); nextVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod()); n.append(ChunkType::ns(), _nss.ns()); n.append(ChunkType::min(), bumpMin); n.append(ChunkType::max(), bumpMax); n.append(ChunkType::shard(), _fromShard); n.done(); BSONObjBuilder q(op.subobjStart("o2")); q.append(ChunkType::name(), Chunk::genID(_nss.ns(), bumpMin)); q.done(); updates.append(op.obj()); log() << "moveChunk updating self version to: " << nextVersion << " through " << bumpMin << " -> " << bumpMax << " for collection '" << _nss.ns() << "'" << migrateLog; } else { log() << "moveChunk moved last chunk out for collection '" << _nss.ns() << "'" << migrateLog; } BSONArrayBuilder preCond; { BSONObjBuilder b; b.append("ns", ChunkType::ConfigNS); b.append("q", BSON("query" << BSON(ChunkType::ns(_nss.ns())) << "orderby" << BSON(ChunkType::DEPRECATED_lastmod() << -1))); { BSONObjBuilder bb(b.subobjStart("res")); // TODO: For backwards compatibility, we can't yet require an epoch here bb.appendTimestamp(ChunkType::DEPRECATED_lastmod(), originalCollVersion.toLong()); bb.done(); } preCond.append(b.obj()); } Status applyOpsStatus{Status::OK()}; try { // For testing migration failures if (MONGO_FAIL_POINT(failMigrationConfigWritePrepare)) { throw DBException("mock migration failure before config write", ErrorCodes::PrepareConfigsFailed); } applyOpsStatus = grid.catalogManager(_txn)->applyChunkOpsDeprecated(_txn, updates.arr(), preCond.arr()); if (MONGO_FAIL_POINT(failMigrationApplyOps)) { throw SocketException(SocketException::RECV_ERROR, shardingState->getConfigServer(_txn).toString()); } } catch (const DBException& ex) { applyOpsStatus = ex.toStatus(); } if (applyOpsStatus == ErrorCodes::PrepareConfigsFailed) { // In the process of issuing the migrate commit, the SyncClusterConnection checks that // the config servers are reachable. If they are not, we are sure that the applyOps // command was not sent to any of the configs, so we can safely back out of the // migration here, by resetting the shard version that we bumped up to in the // donateChunk() call above. log() << "About to acquire moveChunk coll lock to reset shard version from " << "failed migration"; { ScopedTransaction transaction(_txn, MODE_IX); Lock::DBLock dbLock(_txn->lockState(), _nss.db(), MODE_IX); Lock::CollectionLock collLock(_txn->lockState(), _nss.ns(), MODE_X); // Revert the metadata back to the state before "forgetting" about the chunk shardingState->undoDonateChunk(_txn, _nss.ns(), getCollMetadata()); } log() << "Shard version successfully reset to clean up failed migration"; const string msg = stream() << "Failed to send migrate commit to configs " << causedBy(applyOpsStatus); return Status(applyOpsStatus.code(), msg); } else if (!applyOpsStatus.isOK()) { // This could be a blip in the connectivity. Wait out a few seconds and check if the // commit request made it. // // If the commit made it to the config, we'll see the chunk in the new shard and // there's no further action to be done. // // If the commit did not make it, currently the only way to fix this state is to // bounce the mongod so that the old state (before migrating) is brought in. warning() << "moveChunk commit failed and metadata will be revalidated" << causedBy(applyOpsStatus) << migrateLog; sleepsecs(10); // Look for the chunk in this shard whose version got bumped. We assume that if that // mod made it to the config server, then applyOps was successful. try { std::vector<ChunkType> newestChunk; Status status = grid.catalogManager(_txn)->getChunks(_txn, BSON(ChunkType::ns(_nss.ns())), BSON(ChunkType::DEPRECATED_lastmod() << -1), 1, &newestChunk, nullptr); uassertStatusOK(status); ChunkVersion checkVersion; if (!newestChunk.empty()) { invariant(newestChunk.size() == 1); checkVersion = newestChunk[0].getVersion(); } if (checkVersion.equals(nextVersion)) { log() << "moveChunk commit confirmed" << migrateLog; } else { error() << "moveChunk commit failed: version is at " << checkVersion << " instead of " << nextVersion << migrateLog; error() << "TERMINATING" << migrateLog; dbexit(EXIT_SHARDING_ERROR); } } catch (...) { error() << "moveChunk failed to get confirmation of commit" << migrateLog; error() << "TERMINATING" << migrateLog; dbexit(EXIT_SHARDING_ERROR); } } MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection); shardingState->migrationSourceManager()->setInCriticalSection(false); ShardingStateRecovery::endMetadataOp(_txn); // Migration is done, just log some diagnostics information BSONObj chunkInfo = BSON("min" << _minKey << "max" << _maxKey << "from" << _fromShard << "to" << _toShard); BSONObjBuilder commitInfo; commitInfo.appendElements(chunkInfo); if (res["counts"].type() == Object) { commitInfo.appendElements(res["counts"].Obj()); } grid.catalogManager(_txn)->logChange(_txn, "moveChunk.commit", _nss.ns(), commitInfo.obj()); shardingState->migrationSourceManager()->done(_txn); _isRunning = false; return Status::OK(); }
void run(){ Scope * s = globalScriptEngine->newScope(); // -- A -- BSONObj o; { BSONObjBuilder b ; b.append( "a" , (int)5 ); b.append( "b" , 5.6 ); o = b.obj(); } ASSERT_EQUALS( NumberInt , o["a"].type() ); ASSERT_EQUALS( NumberDouble , o["b"].type() ); s->setObject( "z" , o ); s->invoke( "return z" , BSONObj() ); BSONObj out = s->getObject( "return" ); ASSERT_EQUALS( 5 , out["a"].number() ); ASSERT_EQUALS( 5.6 , out["b"].number() ); ASSERT_EQUALS( NumberDouble , out["b"].type() ); ASSERT_EQUALS( NumberInt , out["a"].type() ); // -- B -- { BSONObjBuilder b ; b.append( "a" , (int)5 ); b.append( "b" , 5.6 ); o = b.obj(); } s->setObject( "z" , o , false ); s->invoke( "return z" , BSONObj() ); out = s->getObject( "return" ); ASSERT_EQUALS( 5 , out["a"].number() ); ASSERT_EQUALS( 5.6 , out["b"].number() ); ASSERT_EQUALS( NumberDouble , out["b"].type() ); ASSERT_EQUALS( NumberInt , out["a"].type() ); // -- C -- { BSONObjBuilder b ; { BSONObjBuilder c; c.append( "0" , 5.5 ); c.append( "1" , 6 ); b.appendArray( "a" , c.obj() ); } o = b.obj(); } ASSERT_EQUALS( NumberDouble , o["a"].embeddedObjectUserCheck()["0"].type() ); ASSERT_EQUALS( NumberInt , o["a"].embeddedObjectUserCheck()["1"].type() ); s->setObject( "z" , o , false ); out = s->getObject( "z" ); ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() ); ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() ); s->invokeSafe( "z.z = 5;" , BSONObj() ); out = s->getObject( "z" ); ASSERT_EQUALS( 5 , out["z"].number() ); ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() ); // Commenting so that v8 tests will work // ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior // Eliot says I don't have to worry about this case // // -- D -- // // o = fromjson( "{a:3.0,b:4.5}" ); // ASSERT_EQUALS( NumberDouble , o["a"].type() ); // ASSERT_EQUALS( NumberDouble , o["b"].type() ); // // s->setObject( "z" , o , false ); // s->invoke( "return z" , BSONObj() ); // out = s->getObject( "return" ); // ASSERT_EQUALS( 3 , out["a"].number() ); // ASSERT_EQUALS( 4.5 , out["b"].number() ); // // ASSERT_EQUALS( NumberDouble , out["b"].type() ); // ASSERT_EQUALS( NumberDouble , out["a"].type() ); // delete s; }
virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { if( !check(errmsg, result) ) return false; result.append("rbid",rbid); return true; }
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) { _runCalled = true; const auto service = txn->getServiceContext(); const auto clock = service->getFastClockSource(); const auto runStart = clock->now(); BSONObjBuilder timeBuilder(256); const auto authSession = AuthorizationSession::get(Client::getCurrent()); // --- basic fields that are global result.append("host", prettyHostName()); result.append("version", VersionInfoInterface::instance().version()); result.append("process", serverGlobalParams.binaryName); result.append("pid", ProcessId::getCurrent().asLongLong()); result.append("uptime", (double)(time(0) - serverGlobalParams.started)); auto uptime = clock->now() - _started; result.append("uptimeMillis", durationCount<Milliseconds>(uptime)); result.append("uptimeEstimate", durationCount<Seconds>(uptime)); result.appendDate("localTime", jsTime()); timeBuilder.appendNumber("after basic", durationCount<Milliseconds>(clock->now() - runStart)); // --- all sections for (SectionMap::const_iterator i = _sections->begin(); i != _sections->end(); ++i) { ServerStatusSection* section = i->second; std::vector<Privilege> requiredPrivileges; section->addRequiredPrivileges(&requiredPrivileges); if (!authSession->isAuthorizedForPrivileges(requiredPrivileges)) continue; bool include = section->includeByDefault(); const auto& elem = cmdObj[section->getSectionName()]; if (elem.type()) { include = elem.trueValue(); } if (!include) { continue; } section->appendSection(txn, elem, &result); timeBuilder.appendNumber( static_cast<string>(str::stream() << "after " << section->getSectionName()), durationCount<Milliseconds>(clock->now() - runStart)); } // --- counters bool includeMetricTree = MetricTree::theMetricTree != NULL; if (cmdObj["metrics"].type() && !cmdObj["metrics"].trueValue()) includeMetricTree = false; if (includeMetricTree) { MetricTree::theMetricTree->appendTo(result); } // --- some hard coded global things hard to pull out { RamLog::LineIterator rl(RamLog::get("warnings")); if (rl.lastWrite() >= time(0) - (10 * 60)) { // only show warnings from last 10 minutes BSONArrayBuilder arr(result.subarrayStart("warnings")); while (rl.more()) { arr.append(rl.next()); } arr.done(); } } auto runElapsed = clock->now() - runStart; timeBuilder.appendNumber("at end", durationCount<Milliseconds>(runElapsed)); if (runElapsed > Milliseconds(1000)) { BSONObj t = timeBuilder.obj(); log() << "serverStatus was very slow: " << t; result.append("timing", t); } return true; }
void run(){ Scope * s = globalScriptEngine->newScope(); { // date BSONObj o; { BSONObjBuilder b; b.appendDate( "d" , 123456789 ); o = b.obj(); } s->setObject( "x" , o ); s->invoke( "return x.d.getTime() != 12;" , BSONObj() ); ASSERT_EQUALS( true, s->getBoolean( "return" ) ); s->invoke( "z = x.d.getTime();" , BSONObj() ); ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) ); s->invoke( "z = { z : x.d }" , BSONObj() ); BSONObj out = s->getObject( "z" ); ASSERT( out["z"].type() == Date ); } { // regex BSONObj o; { BSONObjBuilder b; b.appendRegex( "r" , "^a" , "i" ); o = b.obj(); } s->setObject( "x" , o ); s->invoke( "z = x.r.test( 'b' );" , BSONObj() ); ASSERT_EQUALS( false , s->getBoolean( "z" ) ); s->invoke( "z = x.r.test( 'a' );" , BSONObj() ); ASSERT_EQUALS( true , s->getBoolean( "z" ) ); s->invoke( "z = x.r.test( 'ba' );" , BSONObj() ); ASSERT_EQUALS( false , s->getBoolean( "z" ) ); s->invoke( "z = { a : x.r };" , BSONObj() ); BSONObj out = s->getObject("z"); ASSERT_EQUALS( (string)"^a" , out["a"].regex() ); ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() ); } // array { BSONObj o = fromjson( "{r:[1,2,3]}" ); s->setObject( "x", o, false ); BSONObj out = s->getObject( "x" ); ASSERT_EQUALS( Array, out.firstElement().type() ); s->setObject( "x", o, true ); out = s->getObject( "x" ); ASSERT_EQUALS( Array, out.firstElement().type() ); } delete s; }
/** @param fromRepl false if from ApplyOpsCmd @return true if was and update should have happened and the document DNE. see replset initial sync code. */ bool applyOperation_inlock(const BSONObj& op, bool fromRepl, bool convertUpdateToUpsert) { LOG(3) << "applying op: " << op << endl; bool failedUpdate = false; OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters; const char *names[] = { "o", "ns", "op", "b" }; BSONElement fields[4]; op.getFields(4, names, fields); BSONObj o; if( fields[0].isABSONObj() ) o = fields[0].embeddedObject(); const char *ns = fields[1].valuestrsafe(); Lock::assertWriteLocked(ns); NamespaceDetails *nsd = nsdetails(ns); // operation type -- see logOp() comments for types const char *opType = fields[2].valuestrsafe(); if ( *opType == 'i' ) { opCounters->gotInsert(); const char *p = strchr(ns, '.'); if ( p && strcmp(p, ".system.indexes") == 0 ) { if (o["background"].trueValue()) { IndexBuilder* builder = new IndexBuilder(ns, o); // This spawns a new thread and returns immediately. builder->go(); } else { IndexBuilder builder(ns, o); // Finish the foreground build before returning builder.build(); } } else { // do upserts for inserts as we might get replayed more than once OpDebug debug; BSONElement _id; if( !o.getObjectID(_id) ) { /* No _id. This will be very slow. */ Timer t; updateObjectsForReplication(ns, o, o, true, false, false, debug, false, QueryPlanSelectionPolicy::idElseNatural() ); if( t.millis() >= 2 ) { RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl; } } else { // probably don't need this since all replicated colls have _id indexes now // but keep it just in case RARELY if ( nsd && !nsd->isCapped() ) { ensureHaveIdIndex(ns, false); } /* todo : it may be better to do an insert here, and then catch the dup key exception and do update then. very few upserts will not be inserts... */ BSONObjBuilder b; b.append(_id); updateObjectsForReplication(ns, o, b.done(), true, false, false , debug, false, QueryPlanSelectionPolicy::idElseNatural() ); } } }
void createOplog() { Lock::GlobalWrite lk; const char * ns = "local.oplog.$main"; bool rs = !cmdLine._replSet.empty(); if( rs ) ns = rsoplog; Client::Context ctx(ns); NamespaceDetails * nsd = nsdetails( ns ); if ( nsd ) { if ( cmdLine.oplogSize != 0 ) { int o = (int)(nsd->storageSize() / ( 1024 * 1024 ) ); int n = (int)(cmdLine.oplogSize / ( 1024 * 1024 ) ); if ( n != o ) { stringstream ss; ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog"; log() << ss.str() << endl; throw UserException( 13257 , ss.str() ); } } if( rs ) return; DBDirectClient c; BSONObj lastOp = c.findOne( ns, Query().sort(reverseNaturalObj) ); if ( !lastOp.isEmpty() ) { OpTime::setLast( lastOp[ "ts" ].date() ); } return; } /* create an oplog collection, if it doesn't yet exist. */ BSONObjBuilder b; double sz; if ( cmdLine.oplogSize != 0 ) sz = (double)cmdLine.oplogSize; else { /* not specified. pick a default size */ sz = 50.0 * 1024 * 1024; if ( sizeof(int *) >= 8 ) { #if defined(__APPLE__) // typically these are desktops (dev machines), so keep it smallish sz = (256-64) * 1024 * 1024; #else sz = 990.0 * 1024 * 1024; boost::intmax_t free = File::freeSpace(dbpath); //-1 if call not supported. double fivePct = free * 0.05; if ( fivePct > sz ) sz = fivePct; #endif } } log() << "******" << endl; log() << "creating replication oplog of size: " << (int)( sz / ( 1024 * 1024 ) ) << "MB..." << endl; b.append("size", sz); b.appendBool("capped", 1); b.appendBool("autoIndexId", false); string err; BSONObj o = b.done(); userCreateNS(ns, o, err, false); if( !rs ) logOp( "n", "", BSONObj() ); /* sync here so we don't get any surprising lag later when we try to sync */ MemoryMappedFile::flushAll(true); log() << "******" << endl; }
void handleRESTQuery( string ns , string action , BSONObj & params , int & responseCode , stringstream & out ) { Timer t; int skip = _getOption( params["skip"] , 0 ); int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new int one = 0; if ( params["one"].type() == String && tolower( params["one"].valuestr()[0] ) == 't' ) { num = 1; one = 1; } BSONObjBuilder queryBuilder; BSONObjIterator i(params); while ( i.more() ){ BSONElement e = i.next(); string name = e.fieldName(); if ( ! name.find( "filter_" ) == 0 ) continue; const char * field = name.substr( 7 ).c_str(); const char * val = e.valuestr(); char * temp; // TODO: this is how i guess if something is a number. pretty lame right now double number = strtod( val , &temp ); if ( temp != val ) queryBuilder.append( field , number ); else queryBuilder.append( field , val ); } BSONObj query = queryBuilder.obj(); auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip ); if ( one ) { if ( cursor->more() ) { BSONObj obj = cursor->next(); out << obj.jsonString() << "\n"; } else { responseCode = 404; } return; } out << "{\n"; out << " \"offset\" : " << skip << ",\n"; out << " \"rows\": [\n"; int howMany = 0; while ( cursor->more() ) { if ( howMany++ ) out << " ,\n"; BSONObj obj = cursor->next(); out << " " << obj.jsonString(); } out << "\n ],\n\n"; out << " \"total_rows\" : " << howMany << " ,\n"; out << " \"query\" : " << query.jsonString() << " ,\n"; out << " \"millis\" : " << t.millis() << "\n"; out << "}\n"; }
/* **************************************************************************** * * mongoSubscribeContext - */ HttpStatusCode mongoSubscribeContext(SubscribeContextRequest* requestP, SubscribeContextResponse* responseP, Format inFormat) { /* Take semaphore. The LM_S* family of macros combines semaphore release with return */ semTake(); LM_T(LmtMongo, ("Subscribe Context Request")); DBClientConnection* connection = getMongoConnection(); /* If expiration is not present, then use a default one */ if (requestP->duration.isEmpty()) { requestP->duration.set(DEFAULT_DURATION); } /* Calculate expiration (using the current time and the duration field in the request) */ long long expiration = getCurrentTime() + requestP->duration.parse(); LM_T(LmtMongo, ("Subscription expiration: %lu", expiration)); /* Create the mongoDB subscription document */ BSONObjBuilder sub; OID oid; oid.init(); sub.append("_id", oid); sub.append(CSUB_EXPIRATION, expiration); sub.append(CSUB_REFERENCE, requestP->reference.get()); /* Throttling */ if (!requestP->throttling.isEmpty()) { sub.append(CSUB_THROTTLING, requestP->throttling.parse()); } /* Build entities array */ BSONArrayBuilder entities; for (unsigned int ix = 0; ix < requestP->entityIdVector.size(); ++ix) { EntityId* en = requestP->entityIdVector.get(ix); entities.append(BSON(CSUB_ENTITY_ID << en->id << CSUB_ENTITY_TYPE << en->type << CSUB_ENTITY_ISPATTERN << en->isPattern)); } sub.append(CSUB_ENTITIES, entities.arr()); /* Build attributes array */ BSONArrayBuilder attrs; for (unsigned int ix = 0; ix < requestP->attributeList.size(); ++ix) { attrs.append(requestP->attributeList.get(ix)); } sub.append(CSUB_ATTRS, attrs.arr()); /* Build conditions array (including side-effect notifications and threads creation) */ bool notificationDone = false; BSONArray conds = processConditionVector(&requestP->notifyConditionVector, requestP->entityIdVector, requestP->attributeList, oid.str(), requestP->reference.get(), ¬ificationDone, inFormat); sub.append(CSUB_CONDITIONS, conds); if (notificationDone) { sub.append(CSUB_LASTNOTIFICATION, getCurrentTime()); sub.append(CSUB_COUNT, 1); } /* Adding format to use in notifications */ sub.append(CSUB_FORMAT, std::string(formatToString(inFormat))); /* Insert document in database */ BSONObj subDoc = sub.obj(); try { LM_T(LmtMongo, ("insert() in '%s' collection: '%s'", getSubscribeContextCollectionName(), subDoc.toString().c_str())); connection->insert(getSubscribeContextCollectionName(), subDoc); } catch( const DBException &e ) { responseP->subscribeError.errorCode.fill(SccReceiverInternalError, std::string("collection: ") + getSubscribeContextCollectionName() + " - insert(): " + subDoc.toString() + " - exception: " + e.what()); LM_SRE(SccOk,("Database error '%s'", responseP->subscribeError.errorCode.reasonPhrase.c_str())); } /* Fill the response element */ responseP->subscribeResponse.duration = requestP->duration; responseP->subscribeResponse.subscriptionId.set(oid.str()); responseP->subscribeResponse.throttling = requestP->throttling; LM_SR(SccOk); }
bool _run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { if( cmdObj["replSetReconfig"].type() != Object ) { errmsg = "no configuration specified"; return false; } bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue(); if( force && !theReplSet ) { replSettings.reconfig = cmdObj["replSetReconfig"].Obj().getOwned(); result.append("msg", "will try this config momentarily, try running rs.conf() again in a few seconds"); return true; } if ( !check(errmsg, result) ) { return false; } if( !force && !theReplSet->box.getState().primary() ) { errmsg = "replSetReconfig command must be sent to the current replica set primary."; return false; } { // just make sure we can get a write lock before doing anything else. we'll reacquire one // later. of course it could be stuck then, but this check lowers the risk if weird things // are up - we probably don't want a change to apply 30 minutes after the initial attempt. time_t t = time(0); Lock::GlobalWrite lk; if( time(0)-t > 20 ) { errmsg = "took a long time to get write lock, so not initiating. Initiate when server less busy?"; return false; } } try { scoped_ptr<ReplSetConfig> newConfig (ReplSetConfig::make(cmdObj["replSetReconfig"].Obj(), force)); log() << "replSet replSetReconfig config object parses ok, " << newConfig->members.size() << " members specified" << rsLog; if( !ReplSetConfig::legalChange(theReplSet->getConfig(), *newConfig, errmsg) ) { return false; } checkMembersUpForConfigChange(*newConfig, result, false); log() << "replSet replSetReconfig [2]" << rsLog; theReplSet->haveNewConfig(*newConfig, true); ReplSet::startupStatusMsg.set("replSetReconfig'd"); } catch( DBException& e ) { log() << "replSet replSetReconfig exception: " << e.what() << rsLog; throw; } catch( string& se ) { log() << "replSet reconfig exception: " << se << rsLog; errmsg = se; return false; } resetSlaveCache(); return true; }
Status MMAPV1DatabaseCatalogEntry::renameCollection( OperationContext* txn, const StringData& fromNS, const StringData& toNS, bool stayTemp ) { Status s = _renameSingleNamespace( txn, fromNS, toNS, stayTemp ); if ( !s.isOK() ) return s; NamespaceDetails* details = _namespaceIndex.details( toNS ); invariant( details ); RecordStoreV1Base* systemIndexRecordStore = _getIndexRecordStore( txn ); scoped_ptr<RecordIterator> it( systemIndexRecordStore->getIterator() ); while ( !it->isEOF() ) { DiskLoc loc = it->getNext(); const Record* rec = it->recordFor( loc ); BSONObj oldIndexSpec( rec->data() ); if ( fromNS != oldIndexSpec["ns"].valuestrsafe() ) continue; BSONObj newIndexSpec; { BSONObjBuilder b; BSONObjIterator i( oldIndexSpec ); while( i.more() ) { BSONElement e = i.next(); if ( strcmp( e.fieldName(), "ns" ) != 0 ) b.append( e ); else b << "ns" << toNS; } newIndexSpec = b.obj(); } StatusWith<DiskLoc> newIndexSpecLoc = systemIndexRecordStore->insertRecord( txn, newIndexSpec.objdata(), newIndexSpec.objsize(), -1 ); if ( !newIndexSpecLoc.isOK() ) return newIndexSpecLoc.getStatus(); const string& indexName = oldIndexSpec.getStringField( "name" ); { // fix IndexDetails pointer NamespaceDetailsCollectionCatalogEntry ce( toNS, details, _getIndexRecordStore( txn ), this ); int indexI = ce._findIndexNumber( indexName ); IndexDetails& indexDetails = details->idx(indexI); *txn->recoveryUnit()->writing(&indexDetails.info) = newIndexSpecLoc.getValue(); // XXX: dur } { // move underlying namespac string oldIndexNs = IndexDescriptor::makeIndexNamespace( fromNS, indexName ); string newIndexNs = IndexDescriptor::makeIndexNamespace( toNS, indexName ); Status s = _renameSingleNamespace( txn, oldIndexNs, newIndexNs, false ); if ( !s.isOK() ) return s; } systemIndexRecordStore->deleteRecord( txn, loc ); } return Status::OK(); }
bool RunOnAllShardsCommand::run(OperationContext* txn, const std::string& dbName, BSONObj& cmdObj, int options, std::string& errmsg, BSONObjBuilder& output) { LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << cmdObj; if (_implicitCreateDb) { uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, dbName)); } std::vector<ShardId> shardIds; getShardIds(txn, dbName, cmdObj, shardIds); std::list<std::shared_ptr<Future::CommandResult>> futures; for (const ShardId& shardId : shardIds) { const auto shard = grid.shardRegistry()->getShard(txn, shardId); if (!shard) { continue; } futures.push_back(Future::spawnCommand( shard->getConnString().toString(), dbName, cmdObj, 0, NULL, _useShardConn)); } std::vector<ShardAndReply> results; BSONObjBuilder subobj(output.subobjStart("raw")); BSONObjBuilder errors; int commonErrCode = -1; std::list<std::shared_ptr<Future::CommandResult>>::iterator futuresit; std::vector<ShardId>::const_iterator shardIdsIt; BSONElement wcErrorElem; ShardId wcErrorShardId; bool hasWCError = false; // We iterate over the set of shard ids and their corresponding futures in parallel. // TODO: replace with zip iterator if we ever decide to use one from Boost or elsewhere for (futuresit = futures.begin(), shardIdsIt = shardIds.cbegin(); futuresit != futures.end() && shardIdsIt != shardIds.end(); ++futuresit, ++shardIdsIt) { std::shared_ptr<Future::CommandResult> res = *futuresit; if (res->join(txn)) { // success :) BSONObj result = res->result(); results.emplace_back(shardIdsIt->toString(), result); subobj.append(res->getServer(), result); if (!hasWCError) { if ((wcErrorElem = result["writeConcernError"])) { wcErrorShardId = *shardIdsIt; hasWCError = true; } } continue; } BSONObj result = res->result(); if (!hasWCError) { if ((wcErrorElem = result["writeConcernError"])) { wcErrorShardId = *shardIdsIt; hasWCError = true; } } if (result["errmsg"].type() || result["code"].numberInt() != 0) { result = specialErrorHandler(res->getServer(), dbName, cmdObj, result); BSONElement errmsgObj = result["errmsg"]; if (errmsgObj.eoo() || errmsgObj.String().empty()) { // it was fixed! results.emplace_back(shardIdsIt->toString(), result); subobj.append(res->getServer(), result); continue; } } // Handle "errmsg". if (!result["errmsg"].eoo()) { errors.appendAs(result["errmsg"], res->getServer()); } else { // Can happen if message is empty, for some reason errors.append(res->getServer(), str::stream() << "result without error message returned : " << result); } // Handle "code". int errCode = result["code"].numberInt(); if (commonErrCode == -1) { commonErrCode = errCode; } else if (commonErrCode != errCode) { commonErrCode = 0; } results.emplace_back(shardIdsIt->toString(), result); subobj.append(res->getServer(), result); } subobj.done(); if (hasWCError) { appendWriteConcernErrorToCmdResponse(wcErrorShardId, wcErrorElem, output); } BSONObj errobj = errors.done(); if (!errobj.isEmpty()) { errmsg = errobj.toString(); // If every error has a code, and the code for all errors is the same, then add // a top-level field "code" with this value to the output object. if (commonErrCode > 0) { output.append("code", commonErrCode); } return false; } aggregateResults(results, output); return true; }
virtual void append(OperationContext* txn, BSONObjBuilder& b, const string& name) { b.append( name, _value() ); }
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { Timer t; string ns = dbname + '.' + cmdObj.firstElement().valuestr(); string key = cmdObj["key"].valuestrsafe(); BSONObj keyPattern = BSON( key << 1 ); BSONObj query = getQuery( cmdObj ); int bufSize = BSONObjMaxUserSize - 4096; BufBuilder bb( bufSize ); char * start = bb.buf(); BSONArrayBuilder arr( bb ); BSONElementSet values; long long nscanned = 0; // locations looked at long long nscannedObjects = 0; // full objects looked at long long n = 0; // matches MatchDetails md; NamespaceDetails * d = nsdetails( ns.c_str() ); if ( ! d ) { result.appendArray( "values" , BSONObj() ); result.append( "stats" , BSON( "n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0 ) ); return true; } shared_ptr<Cursor> cursor; if ( ! query.isEmpty() ) { cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() ); } else { // query is empty, so lets see if we can find an index // with the key so we don't have to hit the raw data NamespaceDetails::IndexIterator ii = d->ii(); while ( ii.more() ) { IndexDetails& idx = ii.next(); if ( d->isMultikey( ii.pos() - 1 ) ) continue; if ( idx.inKeyPattern( key ) ) { cursor = NamespaceDetailsTransient::bestGuessCursor( ns.c_str() , BSONObj() , idx.keyPattern() ); if( cursor.get() ) break; } } if ( ! cursor.get() ) cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() ); } verify( cursor ); string cursorName = cursor->toString(); auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns)); while ( cursor->ok() ) { nscanned++; bool loadedRecord = false; if ( cursor->currentMatches( &md ) && !cursor->getsetdup( cursor->currLoc() ) ) { n++; BSONObj holder; BSONElementSet temp; loadedRecord = ! cc->getFieldsDotted( key , temp, holder ); for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) { BSONElement e = *i; if ( values.count( e ) ) continue; int now = bb.len(); uassert(10044, "distinct too big, 16mb cap", ( now + e.size() + 1024 ) < bufSize ); arr.append( e ); BSONElement x( start + now ); values.insert( x ); } } if ( loadedRecord || md.hasLoadedRecord() ) nscannedObjects++; cursor->advance(); if (!cc->yieldSometimes( ClientCursor::MaybeCovered )) { cc.release(); break; } RARELY killCurrentOp.checkForInterrupt(); } verify( start == bb.buf() ); result.appendArray( "values" , arr.done() ); { BSONObjBuilder b; b.appendNumber( "n" , n ); b.appendNumber( "nscanned" , nscanned ); b.appendNumber( "nscannedObjects" , nscannedObjects ); b.appendNumber( "timems" , t.millis() ); b.append( "cursor" , cursorName ); result.append( "stats" , b.obj() ); } return true; }
void Client::appendLastOp( BSONObjBuilder& b ) const { // _lastOp is never set if replication is off if( theReplSet || ! _lastOp.isNull() ) { b.appendTimestamp( "lastOp" , _lastOp.asDate() ); } }
int run() { string ns; const bool csv = hasParam( "csv" ); const bool jsonArray = hasParam( "jsonArray" ); ostream *outPtr = &cout; string outfile = getParam( "out" ); auto_ptr<ofstream> fileStream; if ( hasParam( "out" ) ) { size_t idx = outfile.rfind( "/" ); if ( idx != string::npos ) { string dir = outfile.substr( 0 , idx + 1 ); boost::filesystem::create_directories( dir ); } ofstream * s = new ofstream( outfile.c_str() , ios_base::out ); fileStream.reset( s ); outPtr = s; if ( ! s->good() ) { cerr << "couldn't open [" << outfile << "]" << endl; return -1; } } ostream &out = *outPtr; BSONObj * fieldsToReturn = 0; BSONObj realFieldsToReturn; try { ns = getNS(); } catch (...) { printHelp(cerr); return 1; } auth(); if ( hasParam( "fields" ) || csv ) { needFields(); // we can't use just _fieldsObj since we support everything getFieldDotted does set<string> seen; BSONObjBuilder b; BSONObjIterator i( _fieldsObj ); while ( i.more() ){ BSONElement e = i.next(); string f = str::before( e.fieldName() , '.' ); if ( seen.insert( f ).second ) b.append( f , 1 ); } realFieldsToReturn = b.obj(); fieldsToReturn = &realFieldsToReturn; } if ( csv && _fields.size() == 0 ) { cerr << "csv mode requires a field list" << endl; return -1; } Query q( getParam( "query" , "" ) ); if ( q.getFilter().isEmpty() && !hasParam("dbpath")) q.snapshot(); bool slaveOk = _params["slaveOk"].as<bool>(); auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , ( slaveOk ? QueryOption_SlaveOk : 0 ) | QueryOption_NoCursorTimeout ); if ( csv ) { for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) { if ( i != _fields.begin() ) out << ","; out << *i; } out << endl; } if (jsonArray) out << '['; long long num = 0; while ( cursor->more() ) { num++; BSONObj obj = cursor->next(); if ( csv ) { for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) { if ( i != _fields.begin() ) out << ","; const BSONElement & e = obj.getFieldDotted(i->c_str()); if ( ! e.eoo() ) { out << csvString(e); } } out << endl; } else { if (jsonArray && num != 1) out << ','; out << obj.jsonString(); if (!jsonArray) out << endl; } } if (jsonArray) out << ']' << endl; cerr << "exported " << num << " records" << endl; return 0; }
BSONObj getBuildInfo(const BSONObj& a, void* data) { uassert( 16822, "getBuildInfo accepts no arguments", a.nFields() == 0 ); BSONObjBuilder b; appendBuildInfo(b); return BSON( "" << b.done() ); }
bool run(const string& dbname , BSONObj& cmd, string& errmsg, BSONObjBuilder& result, bool fromRepl ){ Timer t; Client::GodScope cg; Client& client = cc(); CurOp * op = client.curop(); MRSetup mr( dbname , cmd ); log(1) << "mr ns: " << mr.ns << endl; if ( ! db.exists( mr.ns ) ){ errmsg = "ns doesn't exist"; return false; } bool shouldHaveData = false; long long num = 0; long long inReduce = 0; BSONObjBuilder countsBuilder; BSONObjBuilder timingBuilder; try { MRState state( mr ); state.scope->injectNative( "emit" , fast_emit ); MRTL * mrtl = new MRTL( state ); _tlmr.reset( mrtl ); ProgressMeterHolder pm( op->setMessage( "m/r: (1/3) emit phase" , db.count( mr.ns , mr.filter ) ) ); long long mapTime = 0; { readlock lock( mr.ns ); Client::Context ctx( mr.ns ); shared_ptr<Cursor> temp = bestGuessCursor( mr.ns.c_str(), mr.filter, mr.sort ); auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , mr.ns.c_str() ) ); Timer mt; while ( cursor->ok() ){ if ( ! cursor->currentMatches() ){ cursor->advance(); continue; } BSONObj o = cursor->current(); cursor->advance(); if ( mr.verbose ) mt.reset(); state.scope->setThis( &o ); if ( state.scope->invoke( state.map , state.setup.mapparams , 0 , true ) ) throw UserException( 9014, (string)"map invoke failed: " + state.scope->getError() ); if ( mr.verbose ) mapTime += mt.micros(); num++; if ( num % 100 == 0 ){ ClientCursor::YieldLock yield (cursor.get()); Timer t; mrtl->checkSize(); inReduce += t.micros(); if ( ! yield.stillOk() ){ cursor.release(); break; } killCurrentOp.checkForInterrupt(); } pm.hit(); if ( mr.limit && num >= mr.limit ) break; } } pm.finished(); killCurrentOp.checkForInterrupt(); countsBuilder.appendNumber( "input" , num ); countsBuilder.appendNumber( "emit" , mrtl->numEmits ); if ( mrtl->numEmits ) shouldHaveData = true; timingBuilder.append( "mapTime" , mapTime / 1000 ); timingBuilder.append( "emitLoop" , t.millis() ); // final reduce op->setMessage( "m/r: (2/3) final reduce in memory" ); mrtl->reduceInMemory(); mrtl->dump(); BSONObj sortKey = BSON( "0" << 1 ); db.ensureIndex( mr.incLong , sortKey ); { writelock lock( mr.tempLong.c_str() ); Client::Context ctx( mr.tempLong.c_str() ); assert( userCreateNS( mr.tempLong.c_str() , BSONObj() , errmsg , mr.replicate ) ); } { readlock rl(mr.incLong.c_str()); Client::Context ctx( mr.incLong ); BSONObj prev; BSONList all; assert( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , db.count( mr.incLong ) ) ); shared_ptr<Cursor> temp = bestGuessCursor( mr.incLong.c_str() , BSONObj() , sortKey ); auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , mr.incLong.c_str() ) ); while ( cursor->ok() ){ BSONObj o = cursor->current().getOwned(); cursor->advance(); pm.hit(); if ( o.woSortOrder( prev , sortKey ) == 0 ){ all.push_back( o ); if ( pm->hits() % 1000 == 0 ){ if ( ! cursor->yield() ){ cursor.release(); break; } killCurrentOp.checkForInterrupt(); } continue; } ClientCursor::YieldLock yield (cursor.get()); state.finalReduce( all ); all.clear(); prev = o; all.push_back( o ); if ( ! yield.stillOk() ){ cursor.release(); break; } killCurrentOp.checkForInterrupt(); } { dbtempreleasecond tl; if ( ! tl.unlocked() ) log( LL_WARNING ) << "map/reduce can't temp release" << endl; state.finalReduce( all ); } pm.finished(); } _tlmr.reset( 0 ); } catch ( ... ){ log() << "mr failed, removing collection" << endl; db.dropCollection( mr.tempLong ); db.dropCollection( mr.incLong ); throw; } long long finalCount = 0; { dblock lock; db.dropCollection( mr.incLong ); finalCount = mr.renameIfNeeded( db ); } timingBuilder.append( "total" , t.millis() ); result.append( "result" , mr.finalShort ); result.append( "timeMillis" , t.millis() ); countsBuilder.appendNumber( "output" , finalCount ); if ( mr.verbose ) result.append( "timing" , timingBuilder.obj() ); result.append( "counts" , countsBuilder.obj() ); if ( finalCount == 0 && shouldHaveData ){ result.append( "cmd" , cmd ); errmsg = "there were emits but no data!"; return false; } return true; }
Interval IndexBoundsBuilder::allValues() { BSONObjBuilder bob; bob.appendMinKey(""); bob.appendMaxKey(""); return makeRangeInterval(bob.obj(), true, true); }
bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe(); MRSetup mr( dbname , cmdObj.firstElement().embeddedObjectUserCheck() , false ); set<ServerAndQuery> servers; BSONObjBuilder shardCounts; map<string,long long> counts; BSONObj shards = cmdObj["shards"].embeddedObjectUserCheck(); vector< auto_ptr<DBClientCursor> > shardCursors; { // parse per shard results BSONObjIterator i( shards ); while ( i.more() ){ BSONElement e = i.next(); string shard = e.fieldName(); BSONObj res = e.embeddedObjectUserCheck(); uassert( 10078 , "something bad happened" , shardedOutputCollection == res["result"].valuestrsafe() ); servers.insert( shard ); shardCounts.appendAs( res["counts"] , shard ); BSONObjIterator j( res["counts"].embeddedObjectUserCheck() ); while ( j.more() ){ BSONElement temp = j.next(); counts[temp.fieldName()] += temp.numberLong(); } } } DBDirectClient db; { // reduce from each stream BSONObj sortKey = BSON( "_id" << 1 ); ParallelSortClusteredCursor cursor( servers , dbname + "." + shardedOutputCollection , Query().sort( sortKey ) ); cursor.init(); auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbname ); s->localConnect( dbname.c_str() ); ScriptingFunction reduceFunction = s->createFunction( mr.reduceCode.c_str() ); ScriptingFunction finalizeFunction = 0; if ( mr.finalizeCode.size() ) finalizeFunction = s->createFunction( mr.finalizeCode.c_str() ); BSONList values; result.append( "result" , mr.finalShort ); while ( cursor.more() ){ BSONObj t = cursor.next().getOwned(); if ( values.size() == 0 ){ values.push_back( t ); continue; } if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ){ values.push_back( t ); continue; } db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) ); values.clear(); values.push_back( t ); } if ( values.size() ) db.insert( mr.tempLong , reduceValues( values , s.get() , reduceFunction , 1 , finalizeFunction ) ); } long long finalCount = mr.renameIfNeeded( db ); log(0) << " mapreducefinishcommand " << mr.finalLong << " " << finalCount << endl; for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ){ ScopedDbConnection conn( i->_server ); conn->dropCollection( dbname + "." + shardedOutputCollection ); conn.done(); } result.append( "shardCounts" , shardCounts.obj() ); { BSONObjBuilder c; for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); i++ ){ c.append( i->first , i->second ); } result.append( "counts" , c.obj() ); } return 1; }
bool FTSCommand::_run(const string& dbName, BSONObj& cmdObj, int cmdOptions, const string& ns, const string& searchString, string language, // "" for not-set int limit, BSONObj& filter, BSONObj& projection, string& errmsg, BSONObjBuilder& result ) { Timer timer; map<Shard, BSONObj> results; SHARDED->commandOp( dbName, cmdObj, cmdOptions, ns, filter, results ); vector<Scored> all; long long nscanned = 0; long long nscannedObjects = 0; BSONObjBuilder shardStats; for ( map<Shard,BSONObj>::const_iterator i = results.begin(); i != results.end(); ++i ) { BSONObj r = i->second; LOG(2) << "fts result for shard: " << i->first << "\n" << r << endl; if ( !r["ok"].trueValue() ) { errmsg = str::stream() << "failure on shard: " << i->first.toString() << ": " << r["errmsg"]; result.append( "rawresult", r ); return false; } if ( r["stats"].isABSONObj() ) { BSONObj x = r["stats"].Obj(); nscanned += x["nscanned"].numberLong(); nscannedObjects += x["nscannedObjects"].numberLong(); shardStats.append( i->first.getName(), x ); } if ( r["results"].isABSONObj() ) { BSONObjIterator j( r["results"].Obj() ); while ( j.more() ) { BSONElement e = j.next(); all.push_back( Scored(e.Obj()) ); } } } sort( all.begin(), all.end() ); long long n = 0; { BSONArrayBuilder arr( result.subarrayStart( "results" ) ); for ( unsigned i = 0; i < all.size(); i++ ) { arr.append( all[i].full ); if ( ++n >= limit ) break; } arr.done(); } { BSONObjBuilder stats( result.subobjStart( "stats" ) ); stats.appendNumber( "nscanned", nscanned ); stats.appendNumber( "nscannedObjects", nscannedObjects ); stats.appendNumber( "n", n ); stats.append( "timeMicros", (int)timer.micros() ); stats.append( "shards", shardStats.obj() ); stats.done(); } return true; }
void DocumentSourceSort::sourceToBson( BSONObjBuilder *pBuilder, bool explain) const { BSONObjBuilder insides; sortKeyToBson(&insides, false); pBuilder->append(sortName, insides.done()); }
bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* opCtx, const std::string& commandName, const std::string& dbname, const BSONObj& cmdObj, BSONObjBuilder* result) { BSONObj cmdToRun = cmdObj; { // Make sure that if the command has a write concern that it is w:1 or w:majority, and // convert w:1 or no write concern to w:majority before sending. WriteConcernOptions writeConcern; writeConcern.reset(); BSONElement writeConcernElement = cmdObj[WriteConcernOptions::kWriteConcernField]; bool initialCmdHadWriteConcern = !writeConcernElement.eoo(); if (initialCmdHadWriteConcern) { Status status = writeConcern.parse(writeConcernElement.Obj()); if (!status.isOK()) { return CommandHelpers::appendCommandStatusNoThrow(*result, status); } if (!(writeConcern.wNumNodes == 1 || writeConcern.wMode == WriteConcernOptions::kMajority)) { return CommandHelpers::appendCommandStatusNoThrow( *result, {ErrorCodes::InvalidOptions, str::stream() << "Invalid replication write concern. User management write " "commands may only use w:1 or w:'majority', got: " << writeConcern.toBSON()}); } } writeConcern.wMode = WriteConcernOptions::kMajority; writeConcern.wNumNodes = 0; BSONObjBuilder modifiedCmd; if (!initialCmdHadWriteConcern) { modifiedCmd.appendElements(cmdObj); } else { BSONObjIterator cmdObjIter(cmdObj); while (cmdObjIter.more()) { BSONElement e = cmdObjIter.next(); if (WriteConcernOptions::kWriteConcernField == e.fieldName()) { continue; } modifiedCmd.append(e); } } modifiedCmd.append(WriteConcernOptions::kWriteConcernField, writeConcern.toBSON()); cmdToRun = modifiedCmd.obj(); } auto response = Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, dbname, cmdToRun, Shard::kDefaultConfigCommandTimeout, Shard::RetryPolicy::kNotIdempotent); if (!response.isOK()) { return CommandHelpers::appendCommandStatusNoThrow(*result, response.getStatus()); } if (!response.getValue().commandStatus.isOK()) { return CommandHelpers::appendCommandStatusNoThrow(*result, response.getValue().commandStatus); } if (!response.getValue().writeConcernStatus.isOK()) { return CommandHelpers::appendCommandStatusNoThrow(*result, response.getValue().writeConcernStatus); } CommandHelpers::filterCommandReplyForPassthrough(response.getValue().response, result); return true; }
void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj, const BSONObj& keyPattern, const S2IndexingParams& params, BSONObjSet* keys) { BSONObjSet keysToAdd; // Does one of our documents have a geo field? bool haveGeoField = false; // We output keys in the same order as the fields we index. BSONObjIterator i(keyPattern); while (i.more()) { BSONElement e = i.next(); // First, we get the keys that this field adds. Either they're added literally from // the value of the field, or they're transformed if the field is geo. BSONElementSet fieldElements; // false means Don't expand the last array, duh. obj.getFieldsDotted(e.fieldName(), fieldElements, false); BSONObjSet keysForThisField; if (IndexNames::GEO_2DSPHERE == e.valuestr()) { if (S2_INDEX_VERSION_2 == params.indexVersion) { // For V2, // geo: null, // geo: undefined // geo: [] // should all behave like there is no geo field. So we look for these cases and // throw out the field elements if we find them. if (1 == fieldElements.size()) { BSONElement elt = *fieldElements.begin(); // Get the :null and :undefined cases. if (elt.isNull() || Undefined == elt.type()) { fieldElements.clear(); } else if (elt.isABSONObj()) { // And this is the :[] case. BSONObj obj = elt.Obj(); if (0 == obj.nFields()) { fieldElements.clear(); } } } // V2 2dsphere indices require that at least one geo field to be present in a // document in order to index it. if (fieldElements.size() > 0) { haveGeoField = true; } } getS2GeoKeys(obj, fieldElements, params, &keysForThisField); } else { getS2LiteralKeys(fieldElements, &keysForThisField); } // We expect there to be the missing field element present in the keys if data is // missing. So, this should be non-empty. verify(!keysForThisField.empty()); // We take the Cartesian product of all of the keys. This requires that we have // some keys to take the Cartesian product with. If keysToAdd.empty(), we // initialize it. if (keysToAdd.empty()) { keysToAdd = keysForThisField; continue; } BSONObjSet updatedKeysToAdd; for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) { for (BSONObjSet::const_iterator newIt = keysForThisField.begin(); newIt!= keysForThisField.end(); ++newIt) { BSONObjBuilder b; b.appendElements(*it); b.append(newIt->firstElement()); updatedKeysToAdd.insert(b.obj()); } } keysToAdd = updatedKeysToAdd; } // Make sure that if we're V2 there's at least one geo field present in the doc. if (S2_INDEX_VERSION_2 == params.indexVersion) { if (!haveGeoField) { return; } } if (keysToAdd.size() > params.maxKeysPerInsert) { warning() << "insert of geo object generated lots of keys (" << keysToAdd.size() << ") consider creating larger buckets. obj=" << obj; } *keys = keysToAdd; }