void appendReplicationInfo(OperationContext* opCtx, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = ReplicationCoordinator::get(opCtx); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; replCoord->fillIsMasterForReplSet(&isMasterResponse); result.appendElements(isMasterResponse.toBSON()); if (level) { replCoord->appendSlaveInfoData(&result); } return; } result.appendBool("ismaster", ReplicationCoordinator::get(opCtx)->isMasterForReportingPurposes()); if (level) { BSONObjBuilder sources(result.subarrayStart("sources")); int n = 0; list<BSONObj> src; { const NamespaceString localSources{"local.sources"}; AutoGetCollectionForReadCommand ctx(opCtx, localSources); auto exec = InternalPlanner::collectionScan( opCtx, localSources.ns(), ctx.getCollection(), PlanExecutor::NO_YIELD); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj.getOwned()); } // Non-yielding collection scans from InternalPlanner will never error. invariant(PlanExecutor::IS_EOF == state); } for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) { BSONObj s = *i; BSONObjBuilder bb; bb.append(s["host"]); string sourcename = s["source"].valuestr(); if (sourcename != "main") bb.append(s["source"]); { BSONElement e = s["syncedTo"]; BSONObjBuilder t(bb.subobjStart("syncedTo")); t.appendDate("time", e.timestampTime()); t.append("inc", e.timestampInc()); t.done(); } if (level > 1) { invariant(!opCtx->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have // one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn()); if (cliConn && replAuthenticate(cliConn)) { BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << 1))); BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << -1))); bb.appendDate("masterFirst", first["ts"].timestampTime()); bb.appendDate("masterLast", last["ts"].timestampTime()); const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0); } conn.done(); } sources.append(BSONObjBuilder::numStr(n++), bb.obj()); } sources.done(); replCoord->appendSlaveInfoData(&result); } }
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) { if (!cmdObj["start"].eoo()) { errmsg = "using deprecated 'start' argument to geoNear"; return false; } const NamespaceString nss(parseNs(dbname, cmdObj)); AutoGetCollectionForRead ctx(txn, nss); Collection* collection = ctx.getCollection(); if (!collection) { errmsg = "can't find ns"; return false; } IndexCatalog* indexCatalog = collection->getIndexCatalog(); // cout << "raw cmd " << cmdObj.toString() << endl; // We seek to populate this. string nearFieldName; bool using2DIndex = false; if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) { return false; } PointWithCRS point; uassert(17304, "'near' field must be point", GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK()); bool isSpherical = cmdObj["spherical"].trueValue(); if (!using2DIndex) { uassert(17301, "2dsphere index must have spherical: true", isSpherical); } // Build the $near expression for the query. BSONObjBuilder nearBob; if (isSpherical) { nearBob.append("$nearSphere", cmdObj["near"].Obj()); } else { nearBob.append("$near", cmdObj["near"].Obj()); } if (!cmdObj["maxDistance"].eoo()) { uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber()); nearBob.append("$maxDistance", cmdObj["maxDistance"].number()); } if (!cmdObj["minDistance"].eoo()) { uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex); uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber()); nearBob.append("$minDistance", cmdObj["minDistance"].number()); } if (!cmdObj["uniqueDocs"].eoo()) { warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command"; } // And, build the full query expression. BSONObjBuilder queryBob; queryBob.append(nearFieldName, nearBob.obj()); if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) { queryBob.appendElements(cmdObj["query"].Obj()); } BSONObj rewritten = queryBob.obj(); // Extract the collation, if it exists. // TODO SERVER-23473: Pass this collation spec object down so that it can be converted into // a CollatorInterface. BSONObj collation; { BSONElement collationElt; Status collationEltStatus = bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt); if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) { return appendCommandStatus(result, collationEltStatus); } if (collationEltStatus.isOK()) { collation = collationElt.Obj(); } } long long numWanted = 100; const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit"; BSONElement eNumWanted = cmdObj[limitName]; if (!eNumWanted.eoo()) { uassert(17303, "limit must be number", eNumWanted.isNumber()); numWanted = eNumWanted.safeNumberLong(); uassert(17302, "limit must be >=0", numWanted >= 0); } bool includeLocs = false; if (!cmdObj["includeLocs"].eoo()) { includeLocs = cmdObj["includeLocs"].trueValue(); } double distanceMultiplier = 1.0; BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"]; if (!eDistanceMultiplier.eoo()) { uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber()); distanceMultiplier = eDistanceMultiplier.number(); uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0); } BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance)); const ExtensionsCallbackReal extensionsCallback(txn, &nss); auto statusWithCQ = CanonicalQuery::canonicalize( nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), extensionsCallback); if (!statusWithCQ.isOK()) { errmsg = "Can't parse filter / create query"; return false; } unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue()); // Prevent chunks from being cleaned up during yields - this allows us to only check the // version on initial entry into geoNear. RangePreserver preserver(collection); auto statusWithPlanExecutor = getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0); if (!statusWithPlanExecutor.isOK()) { errmsg = "can't get query executor"; return false; } unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue()); auto curOp = CurOp::get(txn); { stdx::lock_guard<Client>(*txn->getClient()); curOp->setPlanSummary_inlock(Explain::getPlanSummary(exec.get())); } double totalDistance = 0; BSONObjBuilder resultBuilder(result.subarrayStart("results")); double farthestDist = 0; BSONObj currObj; long long results = 0; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&currObj, NULL))) { // Come up with the correct distance. double dist = currObj["$dis"].number() * distanceMultiplier; totalDistance += dist; if (dist > farthestDist) { farthestDist = dist; } // Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj' // in the command result. BSONObjIterator resIt(currObj); BSONObjBuilder resBob; while (resIt.more()) { BSONElement elt = resIt.next(); if (!mongoutils::str::equals("$pt", elt.fieldName()) && !mongoutils::str::equals("$dis", elt.fieldName())) { resBob.append(elt); } } BSONObj resObj = resBob.obj(); // Don't make a too-big result object. if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) { warning() << "Too many geoNear results for query " << rewritten.toString() << ", truncating output."; break; } // Add the next result to the result builder. BSONObjBuilder oneResultBuilder( resultBuilder.subobjStart(BSONObjBuilder::numStr(results))); oneResultBuilder.append("dis", dist); if (includeLocs) { oneResultBuilder.appendAs(currObj["$pt"], "loc"); } oneResultBuilder.append("obj", resObj); oneResultBuilder.done(); ++results; // Break if we have the number of requested result documents. if (results >= numWanted) { break; } } resultBuilder.done(); // Return an error if execution fails for any reason. if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state) << ", stats: " << Explain::getWinningPlanStats(exec.get()); return appendCommandStatus(result, Status(ErrorCodes::OperationFailed, str::stream() << "Executor error during geoNear command: " << WorkingSetCommon::toStatusString(currObj))); } PlanSummaryStats summary; Explain::getSummaryStats(*exec, &summary); // Fill out the stats subobj. BSONObjBuilder stats(result.subobjStart("stats")); stats.appendNumber("nscanned", summary.totalKeysExamined); stats.appendNumber("objectsLoaded", summary.totalDocsExamined); if (results > 0) { stats.append("avgDistance", totalDistance / results); } stats.append("maxDistance", farthestDist); stats.append("time", curOp->elapsedMillis()); stats.done(); collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed); curOp->debug().setPlanSummaryMetrics(summary); if (curOp->shouldDBProfile(curOp->elapsedMillis())) { BSONObjBuilder execStatsBob; Explain::getWinningPlanStats(exec.get(), &execStatsBob); curOp->debug().execStats.set(execStatsBob.obj()); } return true; }
bool DBHashCmd::run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) { Timer timer; set<string> desiredCollections; if (cmdObj["collections"].type() == Array) { BSONObjIterator i(cmdObj["collections"].Obj()); while (i.more()) { BSONElement e = i.next(); if (e.type() != String) { errmsg = "collections entries have to be strings"; return false; } desiredCollections.insert(e.String()); } } list<string> colls; const string ns = parseNs(dbname, cmdObj); // We lock the entire database in S-mode in order to ensure that the contents will not // change for the snapshot. ScopedTransaction scopedXact(txn, MODE_IS); AutoGetDb autoDb(txn, ns, MODE_S); Database* db = autoDb.getDb(); if (db) { db->getDatabaseCatalogEntry()->getCollectionNamespaces(&colls); colls.sort(); } result.appendNumber("numCollections", (long long)colls.size()); result.append("host", prettyHostName()); md5_state_t globalState; md5_init(&globalState); vector<string> cached; BSONObjBuilder bb(result.subobjStart("collections")); for (list<string>::iterator i = colls.begin(); i != colls.end(); i++) { string fullCollectionName = *i; if (fullCollectionName.size() - 1 <= dbname.size()) { errmsg = str::stream() << "weird fullCollectionName [" << fullCollectionName << "]"; return false; } string shortCollectionName = fullCollectionName.substr(dbname.size() + 1); if (shortCollectionName.find("system.") == 0) continue; if (desiredCollections.size() > 0 && desiredCollections.count(shortCollectionName) == 0) continue; bool fromCache = false; string hash = hashCollection(txn, db, fullCollectionName, &fromCache); bb.append(shortCollectionName, hash); md5_append(&globalState, (const md5_byte_t*)hash.c_str(), hash.size()); if (fromCache) cached.push_back(fullCollectionName); } bb.done(); md5digest d; md5_finish(&globalState, d); string hash = digestToString(d); result.append("md5", hash); result.appendNumber("timeMillis", timer.millis()); result.append("fromCache", cached); return 1; }
bool SyncSourceFeedback::replHandshake() { // handshake for us BSONObjBuilder cmd; cmd.append("replSetUpdatePosition", 1); BSONObjBuilder sub (cmd.subobjStart("handshake")); sub.appendAs(_me["_id"], "handshake"); sub.append("member", theReplSet->selfId()); sub.append("config", theReplSet->myConfig().asBson()); sub.doneFast(); LOG(1) << "detecting upstream updater"; BSONObj res; try { if (!_connection->runCommand("admin", cmd.obj(), res)) { if (res["errmsg"].str().find("no such cmd") != std::string::npos) { LOG(1) << "upstream updater is not supported by the member from which we" " are syncing, using oplogreader-based updating instead"; _supportsUpdater = false; } resetConnection(); return false; } else { LOG(1) << "upstream updater is supported"; _supportsUpdater = true; } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending handshake: " << e.what() << endl; resetConnection(); return false; } // handshakes for those connected to us { OIDMemberMap::iterator itr = _members.begin(); while (itr != _members.end()) { BSONObjBuilder slaveCmd; slaveCmd.append("replSetUpdatePosition", 1); // outer handshake indicates this is a handshake command // inner is needed as part of the structure to be passed to gotHandshake BSONObjBuilder slaveSub (slaveCmd.subobjStart("handshake")); slaveSub.append("handshake", itr->first); slaveSub.append("member", itr->second->id()); slaveSub.append("config", itr->second->config().asBson()); slaveSub.doneFast(); BSONObj slaveRes; try { if (!_connection->runCommand("admin", slaveCmd.obj(), slaveRes)) { if (slaveRes["errmsg"].str().find("node could not be found ") != std::string::npos) { if (!theReplSet->getMutableMember(itr->second->id())) { log() << "sync source does not have member " << itr->second->id() << " in its config and neither do we, removing member from" " tracking"; OIDMemberMap::iterator removeItr = itr; ++itr; _slaveMap.erase(removeItr->first); _members.erase(removeItr); continue; } // here the node exists in our config, so do not stop tracking it // and continue with the handshaking process } else { resetConnection(); return false; } } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending chained handshakes: " << e.what() << endl; resetConnection(); return false; } ++itr; } } return true; }
void appendReplicationInfo(BSONObjBuilder& result, int level) { if ( replSet ) { if( theReplSet == 0 || theReplSet->state().shunned() ) { result.append("ismaster", false); result.append("secondary", false); result.append("info", ReplSet::startupStatusMsg.get()); result.append( "isreplicaset" , true ); } else { theReplSet->fillIsMaster(result); } return; } if ( replAllDead ) { result.append("ismaster", 0); string s = string("dead: ") + replAllDead; result.append("info", s); } else { result.appendBool("ismaster", _isMaster() ); } if ( level && replSet ) { result.append( "info" , "is replica set" ); } else if ( level ) { BSONObjBuilder sources( result.subarrayStart( "sources" ) ); int n = 0; list<BSONObj> src; { Client::ReadContext ctx("local.sources", dbpath); shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj()); while ( c->ok() ) { src.push_back(c->current()); c->advance(); } } for( list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++ ) { BSONObj s = *i; BSONObjBuilder bb; bb.append( s["host"] ); string sourcename = s["source"].valuestr(); if ( sourcename != "main" ) bb.append( s["source"] ); { BSONElement e = s["syncedTo"]; BSONObjBuilder t( bb.subobjStart( "syncedTo" ) ); t.appendDate( "time" , e.timestampTime() ); t.append( "inc" , e.timestampInc() ); t.done(); } if ( level > 1 ) { wassert( !Lock::isLocked() ); // note: there is no so-style timeout on this connection; perhaps we should have one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() ); if ( cliConn && replAuthenticate(cliConn, false) ) { BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << 1 ) ) ); BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename, Query().sort( BSON( "$natural" << -1 ) ) ); bb.appendDate( "masterFirst" , first["ts"].timestampTime() ); bb.appendDate( "masterLast" , last["ts"].timestampTime() ); double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append( "lagSeconds" , lag / 1000 ); } conn.done(); } sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() ); } sources.done(); } }
virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string source = cmdObj.getStringField( name.c_str() ); string target = cmdObj.getStringField( "to" ); if ( source.empty() || target.empty() ) { errmsg = "invalid command syntax"; return false; } bool capped = false; long long size = 0; { Client::Context ctx( source ); NamespaceDetails *nsd = nsdetails( source.c_str() ); uassert( 10026 , "source namespace does not exist", nsd ); capped = nsd->capped; if ( capped ) for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext ) size += i.ext()->length; } Client::Context ctx( target ); if ( nsdetails( target.c_str() ) ){ uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() ); BSONObjBuilder bb( result.subobjStart( "dropTarget" ) ); dropCollection( target , errmsg , bb ); bb.done(); if ( errmsg.size() > 0 ) return false; } { char from[256]; nsToDatabase( source.c_str(), from ); char to[256]; nsToDatabase( target.c_str(), to ); if ( strcmp( from, to ) == 0 ) { renameNamespace( source.c_str(), target.c_str() ); return true; } } BSONObjBuilder spec; if ( capped ) { spec.appendBool( "capped", true ); spec.append( "size", double( size ) ); } if ( !userCreateNS( target.c_str(), spec.done(), errmsg, false ) ) return false; auto_ptr< DBClientCursor > c; DBDirectClient bridge; { c = bridge.query( source, BSONObj() ); } while( 1 ) { { if ( !c->more() ) break; } BSONObj o = c->next(); theDataFileMgr.insertWithObjMod( target.c_str(), o ); } char cl[256]; nsToDatabase( source.c_str(), cl ); string sourceIndexes = string( cl ) + ".system.indexes"; nsToDatabase( target.c_str(), cl ); string targetIndexes = string( cl ) + ".system.indexes"; { c = bridge.query( sourceIndexes, QUERY( "ns" << source ) ); } while( 1 ) { { if ( !c->more() ) break; } BSONObj o = c->next(); BSONObjBuilder b; BSONObjIterator i( o ); while( i.moreWithEOO() ) { BSONElement e = i.next(); if ( e.eoo() ) break; if ( strcmp( e.fieldName(), "ns" ) == 0 ) { b.append( "ns", target ); } else { b.append( e ); } } BSONObj n = b.done(); theDataFileMgr.insertWithObjMod( targetIndexes.c_str(), n ); } { Client::Context ctx( source ); dropCollection( source, errmsg, result ); } return true; }
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { if (!cmdObj["start"].eoo()) { errmsg = "using deprecated 'start' argument to geoNear"; return false; } const NamespaceString nss(parseNs(dbname, cmdObj)); AutoGetCollectionForRead ctx(txn, nss); Collection* collection = ctx.getCollection(); if ( !collection ) { errmsg = "can't find ns"; return false; } IndexCatalog* indexCatalog = collection->getIndexCatalog(); // cout << "raw cmd " << cmdObj.toString() << endl; // We seek to populate this. string nearFieldName; bool using2DIndex = false; if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) { return false; } PointWithCRS point; uassert(17304, "'near' field must be point", GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK()); bool isSpherical = cmdObj["spherical"].trueValue(); if (!using2DIndex) { uassert(17301, "2dsphere index must have spherical: true", isSpherical); } // Build the $near expression for the query. BSONObjBuilder nearBob; if (isSpherical) { nearBob.append("$nearSphere", cmdObj["near"].Obj()); } else { nearBob.append("$near", cmdObj["near"].Obj()); } if (!cmdObj["maxDistance"].eoo()) { uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber()); nearBob.append("$maxDistance", cmdObj["maxDistance"].number()); } if (!cmdObj["minDistance"].eoo()) { uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex); uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber()); nearBob.append("$minDistance", cmdObj["minDistance"].number()); } if (!cmdObj["uniqueDocs"].eoo()) { warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command"; } // And, build the full query expression. BSONObjBuilder queryBob; queryBob.append(nearFieldName, nearBob.obj()); if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) { queryBob.appendElements(cmdObj["query"].Obj()); } BSONObj rewritten = queryBob.obj(); // cout << "rewritten query: " << rewritten.toString() << endl; long long numWanted = 100; const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit"; BSONElement eNumWanted = cmdObj[limitName]; if (!eNumWanted.eoo()) { uassert(17303, "limit must be number", eNumWanted.isNumber()); numWanted = eNumWanted.safeNumberLong(); uassert(17302, "limit must be >=0", numWanted >= 0); } bool includeLocs = false; if (!cmdObj["includeLocs"].eoo()) { includeLocs = cmdObj["includeLocs"].trueValue(); } double distanceMultiplier = 1.0; BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"]; if (!eDistanceMultiplier.eoo()) { uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber()); distanceMultiplier = eDistanceMultiplier.number(); uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0); } BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance)); CanonicalQuery* cq; const WhereCallbackReal whereCallback(txn, nss.db()); if (!CanonicalQuery::canonicalize(nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), &cq, whereCallback).isOK()) { errmsg = "Can't parse filter / create query"; return false; } PlanExecutor* rawExec; if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) { errmsg = "can't get query executor"; return false; } scoped_ptr<PlanExecutor> exec(rawExec); double totalDistance = 0; BSONObjBuilder resultBuilder(result.subarrayStart("results")); double farthestDist = 0; BSONObj currObj; long long results = 0; while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) { // Come up with the correct distance. double dist = currObj["$dis"].number() * distanceMultiplier; totalDistance += dist; if (dist > farthestDist) { farthestDist = dist; } // Strip out '$dis' and '$pt' from the result obj. The rest gets added as 'obj' // in the command result. BSONObjIterator resIt(currObj); BSONObjBuilder resBob; while (resIt.more()) { BSONElement elt = resIt.next(); if (!mongoutils::str::equals("$pt", elt.fieldName()) && !mongoutils::str::equals("$dis", elt.fieldName())) { resBob.append(elt); } } BSONObj resObj = resBob.obj(); // Don't make a too-big result object. if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) { warning() << "Too many geoNear results for query " << rewritten.toString() << ", truncating output."; break; } // Add the next result to the result builder. BSONObjBuilder oneResultBuilder( resultBuilder.subobjStart(BSONObjBuilder::numStr(results))); oneResultBuilder.append("dis", dist); if (includeLocs) { oneResultBuilder.appendAs(currObj["$pt"], "loc"); } oneResultBuilder.append("obj", resObj); oneResultBuilder.done(); ++results; } resultBuilder.done(); // Fill out the stats subobj. BSONObjBuilder stats(result.subobjStart("stats")); // Fill in nscanned from the explain. PlanSummaryStats summary; Explain::getSummaryStats(exec.get(), &summary); stats.appendNumber("nscanned", summary.totalKeysExamined); stats.appendNumber("objectsLoaded", summary.totalDocsExamined); stats.append("avgDistance", totalDistance / results); stats.append("maxDistance", farthestDist); stats.append("time", txn->getCurOp()->elapsedMillis()); stats.done(); return true; }
void Model::append( const char * name , BSONObjBuilder& b ) { BSONObjBuilder bb( b.subobjStart( name ) ); serialize( bb ); bb.done(); }
bool run2DSphereGeoNear(const IndexDetails &id, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { S2IndexType *idxType = static_cast<S2IndexType*>(id.getSpec().getType()); verify(&id == idxType->getDetails()); // We support both "num" and "limit" options to control limit int numWanted = 100; const char* limitName = cmdObj["num"].isNumber() ? "num" : "limit"; if (cmdObj[limitName].isNumber()) { numWanted = cmdObj[limitName].numberInt(); verify(numWanted >= 0); } // Don't count any docs twice. Isn't this default behavior? Or will yields screw this up? //bool uniqueDocs = false; //if (!cmdObj["uniqueDocs"].eoo()) uniqueDocs = cmdObj["uniqueDocs"].trueValue(); // Add the location information to each result as a field with name 'loc'. bool includeLocs = false; if (!cmdObj["includeLocs"].eoo()) includeLocs = cmdObj["includeLocs"].trueValue(); // The actual query point uassert(16551, "'near' param missing/invalid", !cmdObj["near"].eoo()); BSONObj nearObj = cmdObj["near"].embeddedObject(); // nearObj must be a point. uassert(16571, "near must be called with a point, called with " + nearObj.toString(), GeoParser::isPoint(nearObj)); // The non-near query part. BSONObj query; if (cmdObj["query"].isABSONObj()) query = cmdObj["query"].embeddedObject(); // The farthest away we're willing to look. double maxDistance = numeric_limits<double>::max(); if (cmdObj["maxDistance"].isNumber()) maxDistance = cmdObj["maxDistance"].number(); vector<string> geoFieldNames; idxType->getGeoFieldNames(&geoFieldNames); uassert(16552, "geoNear called but no indexed geo fields?", 1 == geoFieldNames.size()); QueryGeometry queryGeo(geoFieldNames[0]); uassert(16553, "geoNear couldn't parse geo: " + nearObj.toString(), queryGeo.parseFrom(nearObj)); vector<QueryGeometry> regions; regions.push_back(queryGeo); scoped_ptr<S2NearCursor> cursor(new S2NearCursor(idxType->keyPattern(), idxType->getDetails(), query, regions, idxType->getParams(), numWanted, maxDistance)); double totalDistance = 0; int results = 0; BSONObjBuilder resultBuilder(result.subarrayStart("results")); double farthestDist = 0; while (cursor->ok()) { double dist = cursor->currentDistance(); totalDistance += dist; if (dist > farthestDist) { farthestDist = dist; } BSONObjBuilder oneResultBuilder(resultBuilder.subobjStart(BSONObjBuilder::numStr(results))); oneResultBuilder.append("dis", dist); if (includeLocs) { BSONElementSet geoFieldElements; cursor->current().getFieldsDotted(geoFieldNames[0], geoFieldElements, false); for (BSONElementSet::iterator oi = geoFieldElements.begin(); oi != geoFieldElements.end(); ++oi) { if (oi->isABSONObj()) { oneResultBuilder.appendAs(*oi, "loc"); } } } oneResultBuilder.append("obj", cursor->current()); oneResultBuilder.done(); ++results; cursor->advance(); } resultBuilder.done(); BSONObjBuilder stats(result.subobjStart("stats")); stats.append("time", cc().curop()->elapsedMillis()); stats.appendNumber("nscanned", cursor->nscanned()); stats.append("avgDistance", totalDistance / results); stats.append("maxDistance", farthestDist); stats.done(); return true; }
bool RunOnAllShardsCommand::run(OperationContext* txn, const std::string& dbName, BSONObj& cmdObj, int options, std::string& errmsg, BSONObjBuilder& output) { LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << redact(cmdObj); if (_implicitCreateDb) { uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, dbName)); } std::vector<ShardId> shardIds; getShardIds(txn, dbName, cmdObj, shardIds); std::list<std::shared_ptr<Future::CommandResult>> futures; for (const ShardId& shardId : shardIds) { const auto shard = grid.shardRegistry()->getShard(txn, shardId); if (!shard) { continue; } futures.push_back(Future::spawnCommand( shard->getConnString().toString(), dbName, cmdObj, 0, NULL, _useShardConn)); } std::vector<ShardAndReply> results; BSONObjBuilder subobj(output.subobjStart("raw")); BSONObjBuilder errors; int commonErrCode = -1; std::list<std::shared_ptr<Future::CommandResult>>::iterator futuresit; std::vector<ShardId>::const_iterator shardIdsIt; BSONElement wcErrorElem; ShardId wcErrorShardId; bool hasWCError = false; // We iterate over the set of shard ids and their corresponding futures in parallel. // TODO: replace with zip iterator if we ever decide to use one from Boost or elsewhere for (futuresit = futures.begin(), shardIdsIt = shardIds.cbegin(); futuresit != futures.end() && shardIdsIt != shardIds.end(); ++futuresit, ++shardIdsIt) { std::shared_ptr<Future::CommandResult> res = *futuresit; if (res->join(txn)) { // success :) BSONObj result = res->result(); results.emplace_back(shardIdsIt->toString(), result); subobj.append(res->getServer(), result); if (!hasWCError) { if ((wcErrorElem = result["writeConcernError"])) { wcErrorShardId = *shardIdsIt; hasWCError = true; } } continue; } BSONObj result = res->result(); if (!hasWCError) { if ((wcErrorElem = result["writeConcernError"])) { wcErrorShardId = *shardIdsIt; hasWCError = true; } } if (result["errmsg"].type() || result["code"].numberInt() != 0) { result = specialErrorHandler(res->getServer(), dbName, cmdObj, result); BSONElement errmsgObj = result["errmsg"]; if (errmsgObj.eoo() || errmsgObj.String().empty()) { // it was fixed! results.emplace_back(shardIdsIt->toString(), result); subobj.append(res->getServer(), result); continue; } } // Handle "errmsg". if (!result["errmsg"].eoo()) { errors.appendAs(result["errmsg"], res->getServer()); } else { // Can happen if message is empty, for some reason errors.append(res->getServer(), str::stream() << "result without error message returned : " << result); } // Handle "code". int errCode = result["code"].numberInt(); if (commonErrCode == -1) { commonErrCode = errCode; } else if (commonErrCode != errCode) { commonErrCode = 0; } results.emplace_back(shardIdsIt->toString(), result); subobj.append(res->getServer(), result); } subobj.done(); if (hasWCError) { appendWriteConcernErrorToCmdResponse(wcErrorShardId, wcErrorElem, output); } BSONObj errobj = errors.done(); if (!errobj.isEmpty()) { errmsg = errobj.toString(); // If every error has a code, and the code for all errors is the same, then add // a top-level field "code" with this value to the output object. if (commonErrCode > 0) { output.append("code", commonErrCode); } return false; } aggregateResults(results, output); return true; }
bool runNoDirectClient( const string& ns , const BSONObj& queryOriginal , const BSONObj& fields , const BSONObj& update , bool upsert , bool returnNew , bool remove , BSONObjBuilder& result ) { Lock::DBWrite lk( ns ); Client::Context cx( ns ); BSONObj doc; bool found = Helpers::findOne( ns.c_str() , queryOriginal , doc ); BSONObj queryModified = queryOriginal; if ( found && doc["_id"].type() && ! isSimpleIdQuery( queryOriginal ) ) { // we're going to re-write the query to be more efficient // we have to be a little careful because of positional operators // maybe we can pass this all through eventually, but right now isn't an easy way BSONObjBuilder b( queryOriginal.objsize() + 10 ); b.append( doc["_id"] ); bool addedAtomic = false; BSONObjIterator i( queryOriginal ); while ( i.more() ) { const BSONElement& elem = i.next(); if ( str::equals( "_id" , elem.fieldName() ) ) { // we already do _id continue; } if ( ! str::contains( elem.fieldName() , '.' ) ) { // if there is a dotted field, accept we may need more query parts continue; } if ( ! addedAtomic ) { b.appendBool( "$atomic" , true ); addedAtomic = true; } b.append( elem ); } queryModified = b.obj(); } if ( remove ) { _appendHelper( result , doc , found , fields ); if ( found ) { deleteObjects( ns.c_str() , queryModified , true , true ); BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) ); le.appendNumber( "n" , 1 ); le.done(); } } else { // update if ( ! found && ! upsert ) { // didn't have it, and am not upserting _appendHelper( result , doc , found , fields ); } else { // we found it or we're updating if ( ! returnNew ) { _appendHelper( result , doc , found , fields ); } UpdateResult res = updateObjects( ns.c_str() , update , queryModified , upsert , false , true , cc().curop()->debug() ); if ( returnNew ) { if ( ! res.existing && res.upserted.isSet() ) { queryModified = BSON( "_id" << res.upserted ); } log() << "queryModified: " << queryModified << endl; verify( Helpers::findOne( ns.c_str() , queryModified , doc ) ); _appendHelper( result , doc , true , fields ); } BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) ); le.appendBool( "updatedExisting" , res.existing ); le.appendNumber( "n" , res.num ); if ( res.upserted.isSet() ) le.append( "upserted" , res.upserted ); le.done(); } } return true; }
void DBConnectionPool::appendInfo( BSONObjBuilder& b ) { int avail = 0; long long created = 0; map<ConnectionString::ConnectionType,long long> createdByType; set<string> replicaSets; BSONObjBuilder bb( b.subobjStart( "hosts" ) ); { scoped_lock lk( _mutex ); for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) { if ( i->second.numCreated() == 0 ) continue; string s = str::stream() << i->first.ident << "::" << i->first.timeout; BSONObjBuilder temp( bb.subobjStart( s ) ); temp.append( "available" , i->second.numAvailable() ); temp.appendNumber( "created" , i->second.numCreated() ); temp.done(); avail += i->second.numAvailable(); created += i->second.numCreated(); long long& x = createdByType[i->second.type()]; x += i->second.numCreated(); } } bb.done(); // Always report all replica sets being tracked ReplicaSetMonitor::getAllTrackedSets(&replicaSets); BSONObjBuilder setBuilder( b.subobjStart( "replicaSets" ) ); for ( set<string>::iterator i=replicaSets.begin(); i!=replicaSets.end(); ++i ) { string rs = *i; ReplicaSetMonitorPtr m = ReplicaSetMonitor::get( rs ); if ( ! m ) { warning() << "no monitor for set: " << rs << endl; continue; } BSONObjBuilder temp( setBuilder.subobjStart( rs ) ); m->appendInfo( temp ); temp.done(); } setBuilder.done(); { BSONObjBuilder temp( bb.subobjStart( "createdByType" ) ); for ( map<ConnectionString::ConnectionType,long long>::iterator i=createdByType.begin(); i!=createdByType.end(); ++i ) { temp.appendNumber( ConnectionString::typeToString( i->first ) , i->second ); } temp.done(); } b.append( "totalAvailable" , avail ); b.appendNumber( "totalCreated" , created ); }
void Chunk::appendShortVersion(const char* name, BSONObjBuilder& b) const { BSONObjBuilder bb(b.subobjStart(name)); bb.append(ChunkType::min(), _min); bb.append(ChunkType::max(), _max); bb.done(); }
BSONObj rewriteCommandForListingOwnCollections(OperationContext* opCtx, const std::string& dbName, const BSONObj& cmdObj) { mutablebson::Document rewrittenCmdObj(cmdObj); mutablebson::Element ownCollections = mutablebson::findFirstChildNamed(rewrittenCmdObj.root(), "authorizedCollections"); AuthorizationSession* authzSession = AuthorizationSession::get(opCtx->getClient()); // We must strip $ownCollections from the delegated command. uassertStatusOK(ownCollections.remove()); BSONObj collectionFilter; // Extract and retain any previous filter mutablebson::Element oldFilter = mutablebson::findFirstChildNamed(rewrittenCmdObj.root(), "filter"); // Make a new filter, containing a $and array. mutablebson::Element newFilter = rewrittenCmdObj.makeElementObject("filter"); mutablebson::Element newFilterAnd = rewrittenCmdObj.makeElementArray("$and"); uassertStatusOK(newFilter.pushBack(newFilterAnd)); // Append a rule to the $and, which rejects system collections. mutablebson::Element systemCollectionsFilter = rewrittenCmdObj.makeElementObject( "", BSON("name" << BSON("$regex" << BSONRegEx("^(?!system\\.)")))); uassertStatusOK(newFilterAnd.pushBack(systemCollectionsFilter)); if (!authzSession->isAuthorizedForAnyActionOnResource( ResourcePattern::forDatabaseName(dbName))) { // We passed an auth check which said we might be able to render some collections, // but it doesn't seem like we should render all of them. We must filter. // Compute the set of collection names which would be permissible to return. std::set<std::string> collectionNames; for (UserNameIterator nameIter = authzSession->getAuthenticatedUserNames(); nameIter.more(); nameIter.next()) { User* authUser = authzSession->lookupUser(*nameIter); const User::ResourcePrivilegeMap& resourcePrivilegeMap = authUser->getPrivileges(); for (const std::pair<ResourcePattern, Privilege>& resourcePrivilege : resourcePrivilegeMap) { const auto& resource = resourcePrivilege.first; if (resource.isCollectionPattern() || (resource.isExactNamespacePattern() && resource.databaseToMatch() == dbName)) { collectionNames.emplace(resource.collectionToMatch().toString()); } } } // Construct a new filter predicate which returns only collections we were found to // have privileges for. BSONObjBuilder predicateBuilder; BSONObjBuilder nameBuilder(predicateBuilder.subobjStart("name")); BSONArrayBuilder setBuilder(nameBuilder.subarrayStart("$in")); // Load the de-duplicated set into a BSON array for (StringData collectionName : collectionNames) { setBuilder << collectionName; } setBuilder.done(); nameBuilder.done(); collectionFilter = predicateBuilder.obj(); // Filter the results by our collection names. mutablebson::Element newFilterAndIn = rewrittenCmdObj.makeElementObject("", collectionFilter); uassertStatusOK(newFilterAnd.pushBack(newFilterAndIn)); } // If there was a pre-existing filter, compose it with our new one. if (oldFilter.ok()) { uassertStatusOK(oldFilter.remove()); uassertStatusOK(newFilterAnd.pushBack(oldFilter)); } // Attach our new composite filter back onto the listCollections command object. uassertStatusOK(rewrittenCmdObj.root().pushBack(newFilter)); return rewrittenCmdObj.getObject(); }
static bool runImpl(OperationContext* txn, const string& dbname, const string& ns, const BSONObj& query, const BSONObj& fields, const BSONObj& update, const BSONObj& sort, bool upsert, bool returnNew, bool remove , BSONObjBuilder& result, string& errmsg) { AutoGetOrCreateDb autoDb(txn, dbname, MODE_IX); Lock::CollectionLock collLock(txn->lockState(), ns, MODE_IX); Client::Context ctx(txn, ns, autoDb.getDb(), autoDb.justCreated()); if (!repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(dbname)) { return appendCommandStatus(result, Status(ErrorCodes::NotMaster, str::stream() << "Not primary while running findAndModify in " << ns)); } Collection* collection = ctx.db()->getCollection(ns); const WhereCallbackReal whereCallback(txn, StringData(ns)); if ( !collection ) { if ( !upsert ) { // no collectio and no upsert, so can't possible do anything _appendHelper( result, BSONObj(), false, fields, whereCallback ); return true; } // no collection, but upsert, so we want to create it // problem is we only have IX on db and collection :( // so we tell our caller who can do it errmsg = "no-collection"; return false; } Snapshotted<BSONObj> snapshotDoc; RecordId loc; bool found = false; { CanonicalQuery* cq; const BSONObj projection; const long long skip = 0; const long long limit = -1; // 1 document requested; negative indicates hard limit. uassertStatusOK(CanonicalQuery::canonicalize(ns, query, sort, projection, skip, limit, &cq, whereCallback)); PlanExecutor* rawExec; uassertStatusOK(getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, QueryPlannerParams::DEFAULT)); scoped_ptr<PlanExecutor> exec(rawExec); PlanExecutor::ExecState state = exec->getNextSnapshotted(&snapshotDoc, &loc); if (PlanExecutor::ADVANCED == state) { found = true; } else if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) { if (PlanExecutor::FAILURE == state && WorkingSetCommon::isValidStatusMemberObject(snapshotDoc.value())) { const Status errorStatus = WorkingSetCommon::getMemberObjectStatus(snapshotDoc.value()); invariant(!errorStatus.isOK()); uasserted(errorStatus.code(), errorStatus.reason()); } uasserted(ErrorCodes::OperationFailed, str::stream() << "executor returned " << PlanExecutor::statestr(state) << " while finding document to update"); } else { invariant(PlanExecutor::IS_EOF == state); } } WriteUnitOfWork wuow(txn); if (found) { // We found a doc, but it might not be associated with the active snapshot. // If the doc has changed or is no longer in the collection, we will throw a // write conflict exception and start again from the beginning. if (txn->recoveryUnit()->getSnapshotId() != snapshotDoc.snapshotId()) { BSONObj oldObj = snapshotDoc.value(); if (!collection->findDoc(txn, loc, &snapshotDoc)) { // Got deleted in the new snapshot. throw WriteConflictException(); } if (!oldObj.binaryEqual(snapshotDoc.value())) { // Got updated in the new snapshot. throw WriteConflictException(); } } // If we get here without throwing, then we should have the copy of the doc from // the latest snapshot. invariant(txn->recoveryUnit()->getSnapshotId() == snapshotDoc.snapshotId()); } BSONObj doc = snapshotDoc.value(); BSONObj queryModified = query; if (found && !doc["_id"].eoo() && !CanonicalQuery::isSimpleIdQuery(query)) { // we're going to re-write the query to be more efficient // we have to be a little careful because of positional operators // maybe we can pass this all through eventually, but right now isn't an easy way bool hasPositionalUpdate = false; { // if the update has a positional piece ($) // then we need to pull all query parts in // so here we check for $ // a little hacky BSONObjIterator i( update ); while ( i.more() ) { const BSONElement& elem = i.next(); if ( elem.fieldName()[0] != '$' || elem.type() != Object ) continue; BSONObjIterator j( elem.Obj() ); while ( j.more() ) { if ( str::contains( j.next().fieldName(), ".$" ) ) { hasPositionalUpdate = true; break; } } } } BSONObjBuilder b(query.objsize() + 10); b.append( doc["_id"] ); bool addedAtomic = false; BSONObjIterator i(query); while ( i.more() ) { const BSONElement& elem = i.next(); if ( str::equals( "_id" , elem.fieldName() ) ) { // we already do _id continue; } if ( ! hasPositionalUpdate ) { // if there is a dotted field, accept we may need more query parts continue; } if ( ! addedAtomic ) { b.appendBool( "$atomic" , true ); addedAtomic = true; } b.append( elem ); } queryModified = b.obj(); } if ( remove ) { _appendHelper(result, doc, found, fields, whereCallback); if ( found ) { deleteObjects(txn, ctx.db(), ns, queryModified, PlanExecutor::YIELD_MANUAL, true, true); BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) ); le.appendNumber( "n" , 1 ); le.done(); } } else { // update if ( ! found && ! upsert ) { // didn't have it, and am not upserting _appendHelper(result, doc, found, fields, whereCallback); } else { // we found it or we're updating if ( ! returnNew ) { _appendHelper(result, doc, found, fields, whereCallback); } const NamespaceString requestNs(ns); UpdateRequest request(requestNs); request.setQuery(queryModified); request.setUpdates(update); request.setUpsert(upsert); request.setUpdateOpLog(); request.setStoreResultDoc(returnNew); request.setYieldPolicy(PlanExecutor::YIELD_MANUAL); // TODO(greg) We need to send if we are ignoring // the shard version below, but for now no UpdateLifecycleImpl updateLifecycle(false, requestNs); request.setLifecycle(&updateLifecycle); UpdateResult res = mongo::update(txn, ctx.db(), request, &txn->getCurOp()->debug()); if (!found && res.existing) { // No match was found during the read part of this find and modify, which // means that we're here doing an upsert. But the update also told us that // we modified an *already existing* document. This probably means that // the query reported EOF based on an out-of-date snapshot. This should be // a rare event, so we handle it by throwing a write conflict. throw WriteConflictException(); } if ( !collection ) { // collection created by an upsert collection = ctx.db()->getCollection(ns); } LOG(3) << "update result: " << res ; if (returnNew) { dassert(!res.newObj.isEmpty()); _appendHelper(result, res.newObj, true, fields, whereCallback); } BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) ); le.appendBool( "updatedExisting" , res.existing ); le.appendNumber( "n" , res.numMatched ); if ( !res.upserted.isEmpty() ) { le.append( res.upserted[kUpsertedFieldName] ); } le.done(); } } // Committing the WUOW can close the current snapshot. Until this happens, the // snapshot id should not have changed. if (found) { invariant(txn->recoveryUnit()->getSnapshotId() == snapshotDoc.snapshotId()); } wuow.commit(); return true; }
BSONObj ReplSetConfig::toBSON() const { BSONObjBuilder configBuilder; configBuilder.append(kIdFieldName, _replSetName); configBuilder.appendIntOrLL(kVersionFieldName, _version); if (_configServer) { // Only include "configsvr" field if true configBuilder.append(kConfigServerFieldName, _configServer); } // Only include writeConcernMajorityJournalDefault if it is not the default version for this // ProtocolVersion to prevent breaking cross version-3.2.1 compatibilty of ReplSetConfigs. if (_protocolVersion > 0) { configBuilder.append(kProtocolVersionFieldName, _protocolVersion); // Only include writeConcernMajorityJournalDefault if it is not the default version for this // ProtocolVersion to prevent breaking cross version-3.2.1 compatibilty of // ReplSetConfigs. if (!_writeConcernMajorityJournalDefault) { configBuilder.append(kWriteConcernMajorityJournalDefaultFieldName, _writeConcernMajorityJournalDefault); } } else if (_writeConcernMajorityJournalDefault) { configBuilder.append(kWriteConcernMajorityJournalDefaultFieldName, _writeConcernMajorityJournalDefault); } BSONArrayBuilder members(configBuilder.subarrayStart(kMembersFieldName)); for (MemberIterator mem = membersBegin(); mem != membersEnd(); mem++) { members.append(mem->toBSON(getTagConfig())); } members.done(); BSONObjBuilder settingsBuilder(configBuilder.subobjStart(kSettingsFieldName)); settingsBuilder.append(kChainingAllowedFieldName, _chainingAllowed); settingsBuilder.appendIntOrLL(kHeartbeatIntervalFieldName, durationCount<Milliseconds>(_heartbeatInterval)); settingsBuilder.appendIntOrLL(kHeartbeatTimeoutFieldName, durationCount<Seconds>(_heartbeatTimeoutPeriod)); settingsBuilder.appendIntOrLL(kElectionTimeoutFieldName, durationCount<Milliseconds>(_electionTimeoutPeriod)); settingsBuilder.appendIntOrLL(kCatchUpTimeoutFieldName, durationCount<Milliseconds>(_catchUpTimeoutPeriod)); settingsBuilder.appendIntOrLL(kCatchUpTakeoverDelayFieldName, durationCount<Milliseconds>(_catchUpTakeoverDelay)); BSONObjBuilder gleModes(settingsBuilder.subobjStart(kGetLastErrorModesFieldName)); for (StringMap<ReplSetTagPattern>::const_iterator mode = _customWriteConcernModes.begin(); mode != _customWriteConcernModes.end(); ++mode) { if (mode->first[0] == '$') { // Filter out internal modes continue; } BSONObjBuilder modeBuilder(gleModes.subobjStart(mode->first)); for (ReplSetTagPattern::ConstraintIterator itr = mode->second.constraintsBegin(); itr != mode->second.constraintsEnd(); itr++) { modeBuilder.append(_tagConfig.getTagKey(ReplSetTag(itr->getKeyIndex(), 0)), itr->getMinCount()); } modeBuilder.done(); } gleModes.done(); settingsBuilder.append(kGetLastErrorDefaultsFieldName, _defaultWriteConcern.toBSON()); if (_replicaSetId.isSet()) { settingsBuilder.append(kReplicaSetIdFieldName, _replicaSetId); } settingsBuilder.done(); return configBuilder.obj(); }
void ProtobufBsonFormatter::formatSingleField(const google::protobuf::Message& message, const google::protobuf::FieldDescriptor* field, BSONObjBuilder& builder) { std::string fieldName(""); if (field->is_extension()) { //TODO } else if (field->type() == google::protobuf::FieldDescriptor::TYPE_GROUP) { // Groups must be serialized with their original capitalization. fieldName = field->message_type()->name().c_str(); //...append values } else { fieldName = field->camelcase_name(); const google::protobuf::Reflection* reflection = message.GetReflection(); if (field->is_repeated()) { int fieldsize = reflection->FieldSize(message, field); switch (field->cpp_type()) { case FieldDescriptor::CPPTYPE_INT32: { //= 1, // TYPE_INT32, TYPE_SINT32, TYPE_SFIXED32 std::vector<int32> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedInt32(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_INT64: { //= 2, // TYPE_INT64, TYPE_SINT64, TYPE_SFIXED64 std::vector<long long> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedInt64(message,field,i)); } builder.append(fieldName, values); break; } case FieldDescriptor::CPPTYPE_UINT32: { //= 3, // TYPE_UINT32, TYPE_FIXED32 std::vector<uint32> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedUInt32(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_UINT64: { //= 4, // TYPE_UINT64, TYPE_FIXED64 std::vector<long long> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back((long long)reflection->GetRepeatedUInt64(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_DOUBLE: { //= 5, // TYPE_DOUBLE std::vector<double> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedDouble(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_FLOAT: { //= 6, // TYPE_FLOAT std::vector<float> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedFloat(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_BOOL: { //= 7, // TYPE_BOOL std::vector<bool> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedBool(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_STRING: { //= 9, // TYPE_STRING, TYPE_BYTES std::vector<std::string> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedString(message,field,i)); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_ENUM: { //= 8, // TYPE_ENUM std::vector<std::string> values; values.reserve(fieldsize); for (int i = 0; i < fieldsize; ++i) { values.push_back(reflection->GetRepeatedEnum(message,field,i)->name()); } builder.append(fieldName,values); break; } case FieldDescriptor::CPPTYPE_MESSAGE: { //= 10, // TYPE_MESSAGE, TYPE_GROUP BSONObjBuilder sub(builder.subarrayStart(fieldName)); for (int i = 0; i < fieldsize; ++i) { char number[16] = {0}; sprintf(number, "%d", i); BSONObjBuilder obj(sub.subobjStart(number)); formatMessage(reflection->GetRepeatedMessage(message, field, i), obj); obj.done(); } sub.done(); break; } default: { break; } }// end switch } else { //not repeated switch (/*cppType*/field->cpp_type()) { case FieldDescriptor::CPPTYPE_INT32: { //= 1, // TYPE_INT32, TYPE_SINT32, TYPE_SFIXED32 builder.append(fieldName, reflection->GetInt32(message,field)); break; } case FieldDescriptor::CPPTYPE_INT64: { //= 2, // TYPE_INT64, TYPE_SINT64, TYPE_SFIXED64 builder.append(fieldName, static_cast<long long>(reflection->GetInt64(message,field))); break; } case FieldDescriptor::CPPTYPE_UINT32: { //= 3, // TYPE_UINT32, TYPE_FIXED32 builder.append(fieldName,reflection->GetUInt32(message,field)); break; } case FieldDescriptor::CPPTYPE_UINT64: { //= 4, // TYPE_UINT64, TYPE_FIXED64 builder.append(fieldName, static_cast<long long>(reflection->GetUInt64(message,field))); break; } case FieldDescriptor::CPPTYPE_DOUBLE: { //= 5, // TYPE_DOUBLE builder.append(fieldName,reflection->GetDouble(message,field)); break; } case FieldDescriptor::CPPTYPE_FLOAT: { //= 6, // TYPE_FLOAT builder.append(fieldName,reflection->GetFloat(message,field)); break; } case FieldDescriptor::CPPTYPE_BOOL: { //= 7, // TYPE_BOOL builder.append(fieldName,reflection->GetBool(message,field)); break; } case FieldDescriptor::CPPTYPE_STRING: { //= 9, // TYPE_STRING, TYPE_BYTES builder.append(fieldName,reflection->GetString(message,field)); break; } case FieldDescriptor::CPPTYPE_ENUM: { //= 8, // TYPE_ENUM builder.append(fieldName,reflection->GetEnum(message,field)->name()); break; } case FieldDescriptor::CPPTYPE_MESSAGE: { //= 10, // TYPE_MESSAGE, TYPE_GROUP BSONObjBuilder sub(builder.subobjStart(fieldName)); formatMessage(reflection->GetMessage(message, field), sub); sub.done(); break; } default: { break; } }// end switch } } //end else }
BSONObj CurOp::info() { BSONObjBuilder b; b.append("opid", _opNum); bool a = _active && _start; b.append("active", a); if( a ) { b.append("secs_running", elapsedSeconds() ); } b.append( "op" , opToString( _op ) ); b.append("ns", _ns); if (_op == dbInsert) { _query.append(b, "insert"); } else { _query.append(b , "query"); } if ( !debug().planSummary.empty() ) { b.append( "planSummary" , debug().planSummary.toString() ); } if( !_remote.empty() ) { b.append("client", _remote.toString()); } if ( _client ) { b.append( "desc" , _client->desc() ); if ( _client->_threadId.size() ) b.append( "threadId" , _client->_threadId ); if ( _client->_connectionId ) b.appendNumber( "connectionId" , _client->_connectionId ); _client->_ls.reportState(b); } if ( ! _message.empty() ) { if ( _progressMeter.isActive() ) { StringBuilder buf; buf << _message.toString() << " " << _progressMeter.toString(); b.append( "msg" , buf.str() ); BSONObjBuilder sub( b.subobjStart( "progress" ) ); sub.appendNumber( "done" , (long long)_progressMeter.done() ); sub.appendNumber( "total" , (long long)_progressMeter.total() ); sub.done(); } else { b.append( "msg" , _message.toString() ); } } if( killPending() ) b.append("killPending", true); b.append( "numYields" , _numYields ); b.append( "lockStats" , _lockStat.report() ); return b.obj(); }
void OpDebug::append(const CurOp& curop, const SingleThreadedLockStats& lockStats, BSONObjBuilder& b) const { const size_t maxElementSize = 50 * 1024; b.append("op", iscommand ? "command" : opToString(op)); b.append("ns", ns); if (!query.isEmpty()) { appendAsObjOrString(iscommand ? "command" : "query", query, maxElementSize, &b); } else if (!iscommand && curop.haveQuery()) { appendAsObjOrString("query", curop.query(), maxElementSize, &b); } if (!updateobj.isEmpty()) { appendAsObjOrString("updateobj", updateobj, maxElementSize, &b); } const bool moved = (nmoved >= 1); OPDEBUG_APPEND_NUMBER(cursorid); OPDEBUG_APPEND_NUMBER(ntoreturn); OPDEBUG_APPEND_NUMBER(ntoskip); OPDEBUG_APPEND_BOOL(exhaust); OPDEBUG_APPEND_NUMBER(nscanned); OPDEBUG_APPEND_NUMBER(nscannedObjects); OPDEBUG_APPEND_BOOL(idhack); OPDEBUG_APPEND_BOOL(scanAndOrder); OPDEBUG_APPEND_BOOL(moved); OPDEBUG_APPEND_NUMBER(nmoved); OPDEBUG_APPEND_NUMBER(nMatched); OPDEBUG_APPEND_NUMBER(nModified); OPDEBUG_APPEND_NUMBER(ninserted); OPDEBUG_APPEND_NUMBER(ndeleted); OPDEBUG_APPEND_BOOL(fastmod); OPDEBUG_APPEND_BOOL(fastmodinsert); OPDEBUG_APPEND_BOOL(upsert); OPDEBUG_APPEND_BOOL(cursorExhausted); OPDEBUG_APPEND_NUMBER(keyUpdates); OPDEBUG_APPEND_NUMBER(writeConflicts); b.appendNumber("numYield", curop.numYields()); { BSONObjBuilder locks(b.subobjStart("locks")); lockStats.report(&locks); } if (!exceptionInfo.empty()) { exceptionInfo.append(b, "exception", "exceptionCode"); } OPDEBUG_APPEND_NUMBER(nreturned); OPDEBUG_APPEND_NUMBER(responseLength); if (iscommand) { b.append("protocol", getProtoString(op)); } b.append("millis", executionTime); execStats.append(b, "execStats"); }
bool FTSCommand::_run(const string& dbName, BSONObj& cmdObj, int cmdOptions, const string& ns, const string& searchString, string language, // "" for not-set int limit, BSONObj& filter, BSONObj& projection, string& errmsg, BSONObjBuilder& result ) { Timer timer; vector<Strategy::CommandResult> results; STRATEGY->commandOp( dbName, cmdObj, cmdOptions, ns, filter, &results ); vector<Scored> all; long long nscanned = 0; long long nscannedObjects = 0; BSONObjBuilder shardStats; for ( vector<Strategy::CommandResult>::const_iterator i = results.begin(); i != results.end(); ++i ) { BSONObj r = i->result; LOG(2) << "fts result for shard: " << i->shardTarget << "\n" << r << endl; if ( !r["ok"].trueValue() ) { errmsg = str::stream() << "failure on shard: " << i->shardTarget.toString() << ": " << r["errmsg"]; result.append( "rawresult", r ); return false; } if ( r["stats"].isABSONObj() ) { BSONObj x = r["stats"].Obj(); nscanned += x["nscanned"].numberLong(); nscannedObjects += x["nscannedObjects"].numberLong(); shardStats.append( i->shardTarget.getName(), x ); } if ( r["results"].isABSONObj() ) { BSONObjIterator j( r["results"].Obj() ); while ( j.more() ) { BSONElement e = j.next(); all.push_back( Scored(e.Obj()) ); } } } sort( all.begin(), all.end() ); long long n = 0; { BSONArrayBuilder arr( result.subarrayStart( "results" ) ); for ( unsigned i = 0; i < all.size(); i++ ) { arr.append( all[i].full ); if ( ++n >= limit ) break; } arr.done(); } { BSONObjBuilder stats( result.subobjStart( "stats" ) ); stats.appendNumber( "nscanned", nscanned ); stats.appendNumber( "nscannedObjects", nscannedObjects ); stats.appendNumber( "n", n ); stats.append( "timeMicros", (int)timer.micros() ); stats.append( "shards", shardStats.obj() ); stats.done(); } return true; }
static bool run2DSphereGeoNear(NamespaceDetails* nsDetails, int idxNo, BSONObj& cmdObj, const GeoNearArguments &parsedArgs, string& errmsg, BSONObjBuilder& result) { auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(nsDetails, idxNo)); auto_ptr<S2AccessMethod> sam(new S2AccessMethod(descriptor.get())); const S2IndexingParams& params = sam->getParams(); auto_ptr<S2NearIndexCursor> nic(new S2NearIndexCursor(descriptor.get(), params)); vector<string> geoFieldNames; BSONObjIterator i(descriptor->keyPattern()); while (i.more()) { BSONElement e = i.next(); if (e.type() == String && IndexNames::GEO_2DSPHERE == e.valuestr()) { geoFieldNames.push_back(e.fieldName()); } } // NOTE(hk): If we add a new argument to geoNear, we could have a // 2dsphere index with multiple indexed geo fields, and the geoNear // could pick the one to run over. Right now, we just require one. uassert(16552, "geoNear requires exactly one indexed geo field", 1 == geoFieldNames.size()); NearQuery nearQuery(geoFieldNames[0]); uassert(16679, "Invalid geometry given as arguments to geoNear: " + cmdObj.toString(), nearQuery.parseFromGeoNear(cmdObj, params.radius)); uassert(16683, "geoNear on 2dsphere index requires spherical", parsedArgs.isSpherical); // NOTE(hk): For a speedup, we could look through the query to see if // we've geo-indexed any of the fields in it. vector<GeoQuery> regions; nic->seek(parsedArgs.query, nearQuery, regions); // We do pass in the query above, but it's just so we can possibly use it in our index // scan. We have to do our own matching. auto_ptr<Matcher> matcher(new Matcher(parsedArgs.query)); double totalDistance = 0; BSONObjBuilder resultBuilder(result.subarrayStart("results")); double farthestDist = 0; int results; for (results = 0; results < parsedArgs.numWanted && !nic->isEOF(); ++results) { BSONObj currObj = nic->getValue().obj(); if (!matcher->matches(currObj)) { --results; nic->next(); continue; } double dist = nic->currentDistance(); // If we got the distance in radians, output it in radians too. if (nearQuery.fromRadians) { dist /= params.radius; } dist *= parsedArgs.distanceMultiplier; totalDistance += dist; if (dist > farthestDist) { farthestDist = dist; } BSONObjBuilder oneResultBuilder( resultBuilder.subobjStart(BSONObjBuilder::numStr(results))); oneResultBuilder.append("dis", dist); if (parsedArgs.includeLocs) { BSONElementSet geoFieldElements; currObj.getFieldsDotted(geoFieldNames[0], geoFieldElements, false); for (BSONElementSet::iterator oi = geoFieldElements.begin(); oi != geoFieldElements.end(); ++oi) { if (oi->isABSONObj()) { oneResultBuilder.appendAs(*oi, "loc"); } } } oneResultBuilder.append("obj", currObj); oneResultBuilder.done(); nic->next(); } resultBuilder.done(); BSONObjBuilder stats(result.subobjStart("stats")); stats.appendNumber("nscanned", nic->nscanned()); stats.append("avgDistance", totalDistance / results); stats.append("maxDistance", farthestDist); stats.append("time", cc().curop()->elapsedMillis()); stats.done(); return true; }
bool SyncSourceFeedback::replHandshake() { // handshake for us BSONObjBuilder cmd; cmd.append("replSetUpdatePosition", 1); BSONObjBuilder sub (cmd.subobjStart("handshake")); sub.appendAs(_me["_id"], "handshake"); sub.append("member", theReplSet->selfId()); sub.append("config", theReplSet->myConfig().asBson()); sub.doneFast(); LOG(1) << "detecting upstream updater"; BSONObj res; try { if (!_connection->runCommand("admin", cmd.obj(), res)) { if (res["errmsg"].str().find("no such cmd") != std::string::npos) { LOG(1) << "upstream updater is not supported by the member from which we" " are syncing, using oplogreader-based updating instead"; _supportsUpdater = false; } resetConnection(); return false; } else { LOG(1) << "upstream updater is supported"; _supportsUpdater = true; } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending handshake: " << e.what() << endl; resetConnection(); return false; } // handshakes for those connected to us { for (OIDMemberMap::iterator itr = _members.begin(); itr != _members.end(); ++itr) { BSONObjBuilder slaveCmd; slaveCmd.append("replSetUpdatePosition", 1); // outer handshake indicates this is a handshake command // inner is needed as part of the structure to be passed to gotHandshake BSONObjBuilder slaveSub (slaveCmd.subobjStart("handshake")); slaveSub.append("handshake", itr->first); slaveSub.append("member", itr->second->id()); slaveSub.append("config", itr->second->config().asBson()); slaveSub.doneFast(); BSONObj slaveRes; try { if (!_connection->runCommand("admin", slaveCmd.obj(), slaveRes)) { resetConnection(); return false; } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending chained handshakes: " << e.what() << endl; resetConnection(); return false; } } } return true; }
void Strategy::clientCommandOp( Request& r ) { QueryMessage q( r.d() ); LOG(3) << "command: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << " options: " << q.queryOptions << endl; if (q.queryOptions & QueryOption_Exhaust) { uasserted(18527, string("the 'exhaust' query option is invalid for mongos commands: ") + q.ns + " " + q.query.toString()); } NamespaceString nss( r.getns() ); // Regular queries are handled in strategy_shard.cpp verify( nss.isCommand() || nss.isSpecialCommand() ); if ( handleSpecialNamespaces( r , q ) ) return; int loops = 5; while ( true ) { BSONObjBuilder builder; try { BSONObj cmdObj = q.query; { BSONElement e = cmdObj.firstElement(); if (e.type() == Object && (e.fieldName()[0] == '$' ? str::equals("query", e.fieldName()+1) : str::equals("query", e.fieldName()))) { // Extract the embedded query object. if (cmdObj.hasField(Query::ReadPrefField.name())) { // The command has a read preference setting. We don't want // to lose this information so we copy this to a new field // called $queryOptions.$readPreference BSONObjBuilder finalCmdObjBuilder; finalCmdObjBuilder.appendElements(e.embeddedObject()); BSONObjBuilder queryOptionsBuilder( finalCmdObjBuilder.subobjStart("$queryOptions")); queryOptionsBuilder.append(cmdObj[Query::ReadPrefField.name()]); queryOptionsBuilder.done(); cmdObj = finalCmdObjBuilder.obj(); } else { cmdObj = e.embeddedObject(); } } } Command::runAgainstRegistered(q.ns, cmdObj, builder, q.queryOptions); BSONObj x = builder.done(); replyToQuery(0, r.p(), r.m(), x); return; } catch ( StaleConfigException& e ) { if ( loops <= 0 ) throw e; loops--; log() << "retrying command: " << q.query << endl; // For legacy reasons, ns may not actually be set in the exception :-( string staleNS = e.getns(); if( staleNS.size() == 0 ) staleNS = q.ns; ShardConnection::checkMyConnectionVersions( staleNS ); if( loops < 4 ) versionManager.forceRemoteCheckShardVersionCB( staleNS ); } catch ( AssertionException& e ) { Command::appendCommandStatus(builder, e.toStatus()); BSONObj x = builder.done(); replyToQuery(0, r.p(), r.m(), x); return; } } }
void appendReplicationInfo(OperationContext* txn, BSONObjBuilder& result, int level) { ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator(); if (replCoord->getSettings().usingReplSets()) { IsMasterResponse isMasterResponse; replCoord->fillIsMasterForReplSet(&isMasterResponse); result.appendElements(isMasterResponse.toBSON()); if (level) { replCoord->appendSlaveInfoData(&result); } return; } // TODO(dannenberg) replAllDead is bad and should be removed when master slave is removed if (replAllDead) { result.append("ismaster", 0); string s = string("dead: ") + replAllDead; result.append("info", s); } else { result.appendBool("ismaster", getGlobalReplicationCoordinator()->isMasterForReportingPurposes()); } if (level) { BSONObjBuilder sources(result.subarrayStart("sources")); int n = 0; list<BSONObj> src; { const char* localSources = "local.sources"; AutoGetCollectionForRead ctx(txn, localSources); unique_ptr<PlanExecutor> exec( InternalPlanner::collectionScan(txn, localSources, ctx.getCollection())); BSONObj obj; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) { src.push_back(obj); } } for (list<BSONObj>::const_iterator i = src.begin(); i != src.end(); i++) { BSONObj s = *i; BSONObjBuilder bb; bb.append(s["host"]); string sourcename = s["source"].valuestr(); if (sourcename != "main") bb.append(s["source"]); { BSONElement e = s["syncedTo"]; BSONObjBuilder t(bb.subobjStart("syncedTo")); t.appendDate("time", e.timestampTime()); t.append("inc", e.timestampInc()); t.done(); } if (level > 1) { wassert(!txn->lockState()->isLocked()); // note: there is no so-style timeout on this connection; perhaps we should have // one. ScopedDbConnection conn(s["host"].valuestr()); DBClientConnection* cliConn = dynamic_cast<DBClientConnection*>(&conn.conn()); if (cliConn && replAuthenticate(cliConn)) { BSONObj first = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << 1))); BSONObj last = conn->findOne((string) "local.oplog.$" + sourcename, Query().sort(BSON("$natural" << -1))); bb.appendDate("masterFirst", first["ts"].timestampTime()); bb.appendDate("masterLast", last["ts"].timestampTime()); const auto lag = (last["ts"].timestampTime() - s["syncedTo"].timestampTime()); bb.append("lagSeconds", durationCount<Milliseconds>(lag) / 1000.0); } conn.done(); } sources.append(BSONObjBuilder::numStr(n++), bb.obj()); } sources.done(); replCoord->appendSlaveInfoData(&result); } }
/* * Runs the command object cmdobj on the db with name dbname and puts result in result. * @param dbname, name of db * @param cmdobj, object that contains entire command * @param options * @param errmsg, reference to error message * @param result, reference to builder for result * @param fromRepl * @return true if successful, false otherwise */ bool FTSCommand::_run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int cmdOptions, const string& ns, const string& searchString, string language, // "" for not-set int limit, BSONObj& filter, BSONObj& projection, string& errmsg, BSONObjBuilder& result ) { Timer comm; // Rewrite the cmd as a normal query. BSONObjBuilder queryBob; queryBob.appendElements(filter); BSONObjBuilder textBob; textBob.append("$search", searchString); if (!language.empty()) { textBob.append("$language", language); } queryBob.append("$text", textBob.obj()); // This is the query we exec. BSONObj queryObj = queryBob.obj(); // We sort by the score. BSONObj sortSpec = BSON("$s" << BSON("$meta" << LiteParsedQuery::metaTextScore)); // We also project the score into the document and strip it out later during the reformatting // of the results. BSONObjBuilder projBob; projBob.appendElements(projection); projBob.appendElements(sortSpec); BSONObj projObj = projBob.obj(); AutoGetCollectionForRead ctx(txn, ns); CanonicalQuery* cq; Status canonicalizeStatus = CanonicalQuery::canonicalize(ns, queryObj, sortSpec, projObj, 0, limit, BSONObj(), &cq, WhereCallbackReal(txn, dbname)); if (!canonicalizeStatus.isOK()) { errmsg = canonicalizeStatus.reason(); return false; } PlanExecutor* rawExec; Status getExecStatus = getExecutor(txn, ctx.getCollection(), cq, &rawExec); if (!getExecStatus.isOK()) { errmsg = getExecStatus.reason(); return false; } auto_ptr<PlanExecutor> exec(rawExec); BSONArrayBuilder resultBuilder(result.subarrayStart("results")); // Quoth: "leave a mb for other things" int resultSize = 1024 * 1024; int numReturned = 0; BSONObj obj; while (PlanExecutor::ADVANCED == exec->getNext(&obj, NULL)) { if ((resultSize + obj.objsize()) >= BSONObjMaxUserSize) { break; } // We return an array of results. Add another element. BSONObjBuilder oneResultBuilder(resultBuilder.subobjStart()); oneResultBuilder.append("score", obj["$s"].number()); // Strip out the score from the returned obj. BSONObjIterator resIt(obj); BSONObjBuilder resBob; while (resIt.more()) { BSONElement elt = resIt.next(); if (!mongoutils::str::equals("$s", elt.fieldName())) { resBob.append(elt); } } oneResultBuilder.append("obj", resBob.obj()); BSONObj addedArrayObj = oneResultBuilder.done(); resultSize += addedArrayObj.objsize(); numReturned++; } resultBuilder.done(); // returns some stats to the user BSONObjBuilder stats(result.subobjStart("stats")); // Fill in nscanned from the explain. PlanSummaryStats summary; Explain::getSummaryStats(exec.get(), &summary); stats.appendNumber("nscanned", summary.totalKeysExamined); stats.appendNumber("nscannedObjects", summary.totalDocsExamined); stats.appendNumber( "n" , numReturned ); stats.append( "timeMicros", (int)comm.micros() ); stats.done(); return true; }
bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string ns = dbname + "." + cmdObj.firstElement().valuestr(); if (!cmdObj["start"].eoo()) { errmsg = "using deprecated 'start' argument to geoNear"; return false; } Database* db = cc().database(); if ( !db ) { errmsg = "can't find ns"; return false; } Collection* collection = db->getCollection( ns ); if ( !collection ) { errmsg = "can't find ns"; return false; } IndexCatalog* indexCatalog = collection->getIndexCatalog(); // cout << "raw cmd " << cmdObj.toString() << endl; // We seek to populate this. string nearFieldName; bool using2DIndex = false; if (!getFieldName(collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) { return false; } uassert(17304, "'near' field must be point", !cmdObj["near"].eoo() && cmdObj["near"].isABSONObj() && GeoParser::isPoint(cmdObj["near"].Obj())); bool isSpherical = cmdObj["spherical"].trueValue(); if (!using2DIndex) { uassert(17301, "2dsphere index must have spherical: true", isSpherical); } // Build the $near expression for the query. BSONObjBuilder nearBob; if (isSpherical) { nearBob.append("$nearSphere", cmdObj["near"].Obj()); } else { nearBob.append("$near", cmdObj["near"].Obj()); } if (!cmdObj["maxDistance"].eoo()) { uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber()); nearBob.append("$maxDistance", cmdObj["maxDistance"].number()); } if (!cmdObj["minDistance"].eoo()) { uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex); uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber()); nearBob.append("$minDistance", cmdObj["minDistance"].number()); } if (!cmdObj["uniqueDocs"].eoo()) { nearBob.append("$uniqueDocs", cmdObj["uniqueDocs"].trueValue()); } // And, build the full query expression. BSONObjBuilder queryBob; queryBob.append(nearFieldName, nearBob.obj()); if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) { queryBob.appendElements(cmdObj["query"].Obj()); } BSONObj rewritten = queryBob.obj(); // cout << "rewritten query: " << rewritten.toString() << endl; int numWanted = 100; const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit"; BSONElement eNumWanted = cmdObj[limitName]; if (!eNumWanted.eoo()) { uassert(17303, "limit must be number", eNumWanted.isNumber()); numWanted = eNumWanted.numberInt(); uassert(17302, "limit must be >=0", numWanted >= 0); } bool includeLocs = false; if (!cmdObj["includeLocs"].eoo()) { includeLocs = cmdObj["includeLocs"].trueValue(); } double distanceMultiplier = 1.0; BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"]; if (!eDistanceMultiplier.eoo()) { uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber()); distanceMultiplier = eDistanceMultiplier.number(); uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0); } BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance)); CanonicalQuery* cq; if (!CanonicalQuery::canonicalize(ns, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), &cq).isOK()) { errmsg = "Can't parse filter / create query"; return false; } Runner* rawRunner; if (!getRunner(cq, &rawRunner, 0).isOK()) { errmsg = "can't get query runner"; return false; } auto_ptr<Runner> runner(rawRunner); double totalDistance = 0; BSONObjBuilder resultBuilder(result.subarrayStart("results")); double farthestDist = 0; BSONObj currObj; int results = 0; while ((results < numWanted) && Runner::RUNNER_ADVANCED == runner->getNext(&currObj, NULL)) { // cout << "result is " << currObj.toString() << endl; double dist = currObj["$dis"].number() * distanceMultiplier; // cout << std::setprecision(10) << "HK GEON mul'd dist is " << dist << " raw dist is " << currObj["$dis"].number() << endl; totalDistance += dist; if (dist > farthestDist) { farthestDist = dist; } BSONObjBuilder oneResultBuilder( resultBuilder.subobjStart(BSONObjBuilder::numStr(results))); oneResultBuilder.append("dis", dist); if (includeLocs) { oneResultBuilder.appendAs(currObj["$pt"], "loc"); } // strip out '$dis' and '$pt' and the rest gets added as 'obj'. BSONObjIterator resIt(currObj); BSONObjBuilder resBob; while (resIt.more()) { BSONElement elt = resIt.next(); if (!mongoutils::str::equals("$pt", elt.fieldName()) && !mongoutils::str::equals("$dis", elt.fieldName())) { resBob.append(elt); } } oneResultBuilder.append("obj", resBob.obj()); oneResultBuilder.done(); ++results; } resultBuilder.done(); // Fill out the stats subobj. BSONObjBuilder stats(result.subobjStart("stats")); // Fill in nscanned from the explain. TypeExplain* bareExplain; Status res = runner->getExplainPlan(&bareExplain); if (res.isOK()) { auto_ptr<TypeExplain> explain(bareExplain); stats.append("nscanned", explain->getNScanned()); stats.append("objectsLoaded", explain->getNScannedObjects()); } stats.append("avgDistance", totalDistance / results); stats.append("maxDistance", farthestDist); stats.append("time", cc().curop()->elapsedMillis()); stats.done(); return true; }
Status ChunkMoveOperationState::commitMigration(OperationContext* txn) { invariant(_distLockStatus.is_initialized()); invariant(_distLockStatus->isOK()); // We're under the collection distributed lock here, so no other migrate can change maxVersion // or CollectionMetadata state. ShardingState* const shardingState = ShardingState::get(txn); Status startStatus = ShardingStateRecovery::startMetadataOp(txn); if (!startStatus.isOK()) return startStatus; shardingState->migrationSourceManager()->setInCriticalSection(true); const ChunkVersion originalCollVersion = getCollMetadata()->getCollVersion(); ChunkVersion myVersion = originalCollVersion; myVersion.incMajor(); { ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock lk(txn->lockState(), _nss.db(), MODE_IX); Lock::CollectionLock collLock(txn->lockState(), _nss.ns(), MODE_X); invariant(myVersion > shardingState->getVersion(_nss.ns())); // Bump the metadata's version up and "forget" about the chunk being moved. This is // not the commit point, but in practice the state in this shard won't change until // the commit it done. shardingState->donateChunk(txn, _nss.ns(), _minKey, _maxKey, myVersion); } log() << "moveChunk setting version to: " << myVersion << migrateLog; // We're under the collection lock here, too, so we can undo the chunk donation because // no other state change could be ongoing BSONObj res; Status recvChunkCommitStatus{ErrorCodes::InternalError, "status not set"}; try { ScopedDbConnection connTo(_toShardCS, 35.0); connTo->runCommand("admin", BSON("_recvChunkCommit" << 1), res); connTo.done(); recvChunkCommitStatus = getStatusFromCommandResult(res); } catch (const DBException& e) { const string msg = stream() << "moveChunk could not contact to shard " << _toShard << " to commit transfer" << causedBy(e); warning() << msg; recvChunkCommitStatus = Status(e.toStatus().code(), msg); } if (MONGO_FAIL_POINT(failMigrationCommit) && recvChunkCommitStatus.isOK()) { recvChunkCommitStatus = Status(ErrorCodes::InternalError, "Failing _recvChunkCommit due to failpoint."); } if (!recvChunkCommitStatus.isOK()) { log() << "moveChunk migrate commit not accepted by TO-shard: " << res << " resetting shard version to: " << getShardVersion() << migrateLog; { ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock dbLock(txn->lockState(), _nss.db(), MODE_IX); Lock::CollectionLock collLock(txn->lockState(), _nss.ns(), MODE_X); log() << "moveChunk collection lock acquired to reset shard version from " "failed migration"; // Revert the chunk manager back to the state before "forgetting" about the chunk shardingState->undoDonateChunk(txn, _nss.ns(), getCollMetadata()); } log() << "Shard version successfully reset to clean up failed migration"; return Status(recvChunkCommitStatus.code(), stream() << "_recvChunkCommit failed: " << causedBy(recvChunkCommitStatus)); } log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog; BSONArrayBuilder updates; { // Update for the chunk being moved BSONObjBuilder op; op.append("op", "u"); op.appendBool("b", false); // No upserting op.append("ns", ChunkType::ConfigNS); BSONObjBuilder n(op.subobjStart("o")); n.append(ChunkType::name(), Chunk::genID(_nss.ns(), _minKey)); myVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod()); n.append(ChunkType::ns(), _nss.ns()); n.append(ChunkType::min(), _minKey); n.append(ChunkType::max(), _maxKey); n.append(ChunkType::shard(), _toShard); n.done(); BSONObjBuilder q(op.subobjStart("o2")); q.append(ChunkType::name(), Chunk::genID(_nss.ns(), _minKey)); q.done(); updates.append(op.obj()); } // Version at which the next highest lastmod will be set. If the chunk being moved is the last // in the shard, nextVersion is that chunk's lastmod otherwise the highest version is from the // chunk being bumped on the FROM-shard. ChunkVersion nextVersion = myVersion; // If we have chunks left on the FROM shard, update the version of one of them as well. We can // figure that out by grabbing the metadata as it has been changed. const std::shared_ptr<CollectionMetadata> bumpedCollMetadata( shardingState->getCollectionMetadata(_nss.ns())); if (bumpedCollMetadata->getNumChunks() > 0) { // get another chunk on that shard ChunkType bumpChunk; invariant(bumpedCollMetadata->getNextChunk(bumpedCollMetadata->getMinKey(), &bumpChunk)); BSONObj bumpMin = bumpChunk.getMin(); BSONObj bumpMax = bumpChunk.getMax(); dassert(bumpMin.woCompare(_minKey) != 0); BSONObjBuilder op; op.append("op", "u"); op.appendBool("b", false); op.append("ns", ChunkType::ConfigNS); nextVersion.incMinor(); // same as used on donateChunk BSONObjBuilder n(op.subobjStart("o")); n.append(ChunkType::name(), Chunk::genID(_nss.ns(), bumpMin)); nextVersion.addToBSON(n, ChunkType::DEPRECATED_lastmod()); n.append(ChunkType::ns(), _nss.ns()); n.append(ChunkType::min(), bumpMin); n.append(ChunkType::max(), bumpMax); n.append(ChunkType::shard(), _fromShard); n.done(); BSONObjBuilder q(op.subobjStart("o2")); q.append(ChunkType::name(), Chunk::genID(_nss.ns(), bumpMin)); q.done(); updates.append(op.obj()); log() << "moveChunk updating self version to: " << nextVersion << " through " << bumpMin << " -> " << bumpMax << " for collection '" << _nss.ns() << "'" << migrateLog; } else { log() << "moveChunk moved last chunk out for collection '" << _nss.ns() << "'" << migrateLog; } BSONArrayBuilder preCond; { BSONObjBuilder b; b.append("ns", ChunkType::ConfigNS); b.append("q", BSON("query" << BSON(ChunkType::ns(_nss.ns())) << "orderby" << BSON(ChunkType::DEPRECATED_lastmod() << -1))); { BSONObjBuilder bb(b.subobjStart("res")); // TODO: For backwards compatibility, we can't yet require an epoch here bb.appendTimestamp(ChunkType::DEPRECATED_lastmod(), originalCollVersion.toLong()); bb.done(); } preCond.append(b.obj()); } Status applyOpsStatus{Status::OK()}; try { // For testing migration failures if (MONGO_FAIL_POINT(failMigrationConfigWritePrepare)) { throw DBException("mock migration failure before config write", PrepareConfigsFailedCode); } applyOpsStatus = grid.catalogManager(txn)->applyChunkOpsDeprecated(txn, updates.arr(), preCond.arr()); if (MONGO_FAIL_POINT(failMigrationApplyOps)) { throw SocketException(SocketException::RECV_ERROR, shardingState->getConfigServer(txn).toString()); } } catch (const DBException& ex) { warning() << ex << migrateLog; applyOpsStatus = ex.toStatus(); } if (applyOpsStatus == ErrorCodes::PrepareConfigsFailedCode) { // In the process of issuing the migrate commit, the SyncClusterConnection checks that // the config servers are reachable. If they are not, we are sure that the applyOps // command was not sent to any of the configs, so we can safely back out of the // migration here, by resetting the shard version that we bumped up to in the // donateChunk() call above. log() << "About to acquire moveChunk coll lock to reset shard version from " << "failed migration"; { ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock dbLock(txn->lockState(), _nss.db(), MODE_IX); Lock::CollectionLock collLock(txn->lockState(), _nss.ns(), MODE_X); // Revert the metadata back to the state before "forgetting" about the chunk shardingState->undoDonateChunk(txn, _nss.ns(), getCollMetadata()); } log() << "Shard version successfully reset to clean up failed migration"; const string msg = stream() << "Failed to send migrate commit to configs " << causedBy(applyOpsStatus); return Status(applyOpsStatus.code(), msg); } else if (!applyOpsStatus.isOK()) { // This could be a blip in the connectivity. Wait out a few seconds and check if the // commit request made it. // // If the commit made it to the config, we'll see the chunk in the new shard and // there's no further action to be done. // // If the commit did not make it, currently the only way to fix this state is to // bounce the mongod so that the old state (before migrating) is brought in. warning() << "moveChunk commit outcome ongoing" << migrateLog; sleepsecs(10); // Look for the chunk in this shard whose version got bumped. We assume that if that // mod made it to the config server, then applyOps was successful. try { std::vector<ChunkType> newestChunk; Status status = grid.catalogManager(txn)->getChunks(txn, BSON(ChunkType::ns(_nss.ns())), BSON(ChunkType::DEPRECATED_lastmod() << -1), 1, &newestChunk, nullptr); uassertStatusOK(status); ChunkVersion checkVersion; if (!newestChunk.empty()) { invariant(newestChunk.size() == 1); checkVersion = newestChunk[0].getVersion(); } if (checkVersion.equals(nextVersion)) { log() << "moveChunk commit confirmed" << migrateLog; } else { error() << "moveChunk commit failed: version is at " << checkVersion << " instead of " << nextVersion << migrateLog; error() << "TERMINATING" << migrateLog; dbexit(EXIT_SHARDING_ERROR); } } catch (...) { error() << "moveChunk failed to get confirmation of commit" << migrateLog; error() << "TERMINATING" << migrateLog; dbexit(EXIT_SHARDING_ERROR); } } MONGO_FAIL_POINT_PAUSE_WHILE_SET(hangBeforeLeavingCriticalSection); shardingState->migrationSourceManager()->setInCriticalSection(false); ShardingStateRecovery::endMetadataOp(txn); // Migration is done, just log some diagnostics information BSONObj chunkInfo = BSON("min" << _minKey << "max" << _maxKey << "from" << _fromShard << "to" << _toShard); BSONObjBuilder commitInfo; commitInfo.appendElements(chunkInfo); if (res["counts"].type() == Object) { commitInfo.appendElements(res["counts"].Obj()); } grid.catalogManager(txn)->logChange(txn, txn->getClient()->clientAddress(true), "moveChunk.commit", _nss.ns(), commitInfo.obj()); return Status::OK(); }
bool runNoDirectClient( const string& ns , const BSONObj& queryOriginal , const BSONObj& fields , const BSONObj& update , bool upsert , bool returnNew , bool remove , BSONObjBuilder& result , string& errmsg ) { Lock::DBWrite lk( ns ); Client::Context cx( ns ); BSONObj doc; bool found = Helpers::findOne( ns.c_str() , queryOriginal , doc ); BSONObj queryModified = queryOriginal; if ( found && doc["_id"].type() && ! isSimpleIdQuery( queryOriginal ) ) { // we're going to re-write the query to be more efficient // we have to be a little careful because of positional operators // maybe we can pass this all through eventually, but right now isn't an easy way bool hasPositionalUpdate = false; { // if the update has a positional piece ($) // then we need to pull all query parts in // so here we check for $ // a little hacky BSONObjIterator i( update ); while ( i.more() ) { const BSONElement& elem = i.next(); if ( elem.fieldName()[0] != '$' || elem.type() != Object ) continue; BSONObjIterator j( elem.Obj() ); while ( j.more() ) { if ( str::contains( j.next().fieldName(), ".$" ) ) { hasPositionalUpdate = true; break; } } } } BSONObjBuilder b( queryOriginal.objsize() + 10 ); b.append( doc["_id"] ); bool addedAtomic = false; BSONObjIterator i( queryOriginal ); while ( i.more() ) { const BSONElement& elem = i.next(); if ( str::equals( "_id" , elem.fieldName() ) ) { // we already do _id continue; } if ( ! hasPositionalUpdate ) { // if there is a dotted field, accept we may need more query parts continue; } if ( ! addedAtomic ) { b.appendBool( "$atomic" , true ); addedAtomic = true; } b.append( elem ); } queryModified = b.obj(); } if ( remove ) { _appendHelper( result , doc , found , fields ); if ( found ) { deleteObjects( ns.c_str() , queryModified , true , true ); BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) ); le.appendNumber( "n" , 1 ); le.done(); } } else { // update if ( ! found && ! upsert ) { // didn't have it, and am not upserting _appendHelper( result , doc , found , fields ); } else { // we found it or we're updating if ( ! returnNew ) { _appendHelper( result , doc , found , fields ); } UpdateResult res = updateObjects( ns.c_str() , update , queryModified , upsert , false , true , cc().curop()->debug() ); if ( returnNew ) { if ( res.upserted.isSet() ) { queryModified = BSON( "_id" << res.upserted ); } else if ( queryModified["_id"].type() ) { // we do this so that if the update changes the fields, it still matches queryModified = queryModified["_id"].wrap(); } if ( ! Helpers::findOne( ns.c_str() , queryModified , doc ) ) { errmsg = str::stream() << "can't find object after modification " << " ns: " << ns << " queryModified: " << queryModified << " queryOriginal: " << queryOriginal; log() << errmsg << endl; return false; } _appendHelper( result , doc , true , fields ); } BSONObjBuilder le( result.subobjStart( "lastErrorObject" ) ); le.appendBool( "updatedExisting" , res.existing ); le.appendNumber( "n" , res.num ); if ( res.upserted.isSet() ) le.append( "upserted" , res.upserted ); le.done(); } } return true; }
void Top::_appendStatsEntry( BSONObjBuilder& b , const char * statsName , const UsageData& map ) const { BSONObjBuilder bb( b.subobjStart( statsName ) ); bb.appendNumber( "time" , map.time ); bb.appendNumber( "count" , map.count ); bb.done(); }
static void handleCursorCommand(const string& ns, ClientCursorPin* pin, PipelineRunner* runner, const BSONObj& cmdObj, BSONObjBuilder& result) { ClientCursor* cursor = pin ? pin->c() : NULL; if (pin) { invariant(cursor); invariant(cursor->getRunner() == runner); invariant(cursor->isAggCursor); } BSONElement batchSizeElem = cmdObj.getFieldDotted("cursor.batchSize"); const long long batchSize = batchSizeElem.isNumber() ? batchSizeElem.numberLong() : 101; // same as query // can't use result BSONObjBuilder directly since it won't handle exceptions correctly. BSONArrayBuilder resultsArray; const int byteLimit = MaxBytesToReturnToClientAtOnce; BSONObj next; for (int objCount = 0; objCount < batchSize; objCount++) { // The initial getNext() on a PipelineRunner may be very expensive so we don't // do it when batchSize is 0 since that indicates a desire for a fast return. if (runner->getNext(&next, NULL) != Runner::RUNNER_ADVANCED) { if (pin) pin->deleteUnderlying(); // make it an obvious error to use cursor or runner after this point cursor = NULL; runner = NULL; break; } if (resultsArray.len() + next.objsize() > byteLimit) { // too big. next will be the first doc in the second batch runner->pushBack(next); break; } resultsArray.append(next); } // NOTE: runner->isEOF() can have side effects such as writing by $out. However, it should // be relatively quick since if there was no pin then the input is empty. Also, this // violates the contract for batchSize==0. Sharding requires a cursor to be returned in that // case. This is ok for now however, since you can't have a sharded collection that doesn't // exist. const bool canReturnMoreBatches = pin; if (!canReturnMoreBatches && runner && !runner->isEOF()) { // msgasserting since this shouldn't be possible to trigger from today's aggregation // language. The wording assumes that the only reason pin would be null is if the // collection doesn't exist. msgasserted(17391, str::stream() << "Aggregation has more results than fit in initial batch, but can't " << "create cursor since collection " << ns << " doesn't exist"); } if (cursor) { // If a time limit was set on the pipeline, remaining time is "rolled over" to the // cursor (for use by future getmore ops). cursor->setLeftoverMaxTimeMicros( cc().curop()->getRemainingMaxTimeMicros() ); } BSONObjBuilder cursorObj(result.subobjStart("cursor")); cursorObj.append("id", cursor ? cursor->cursorid() : 0LL); cursorObj.append("ns", ns); cursorObj.append("firstBatch", resultsArray.arr()); cursorObj.done(); }