void LegacyReplicationCoordinator::prepareReplSetUpdatePositionCommandHandshakes( OperationContext* txn, std::vector<BSONObj>* handshakes) { invariant(getReplicationMode() == modeReplSet); boost::lock_guard<boost::mutex> lock(_mutex); // handshake obj for us BSONObjBuilder cmd; cmd.append("replSetUpdatePosition", 1); BSONObjBuilder sub (cmd.subobjStart("handshake")); sub.append("handshake", getMyRID(txn)); sub.append("member", theReplSet->selfId()); sub.append("config", theReplSet->myConfig().asBson()); sub.doneFast(); handshakes->push_back(cmd.obj()); // handshake objs for all chained members for (OIDMemberMap::const_iterator itr = _ridMemberMap.begin(); itr != _ridMemberMap.end(); ++itr) { BSONObjBuilder cmd; cmd.append("replSetUpdatePosition", 1); // outer handshake indicates this is a handshake command // inner is needed as part of the structure to be passed to gotHandshake BSONObjBuilder subCmd (cmd.subobjStart("handshake")); subCmd.append("handshake", itr->first); subCmd.append("member", itr->second->id()); subCmd.append("config", itr->second->config().asBson()); subCmd.doneFast(); handshakes->push_back(cmd.obj()); } }
void DocumentSourceSort::addToBsonArray(BSONArrayBuilder *pBuilder, bool explain) const { if (explain) { // always one obj for combined $sort + $limit BSONObjBuilder sortObj (pBuilder->subobjStart()); BSONObjBuilder insides (sortObj.subobjStart(sortName)); BSONObjBuilder sortKey (insides.subobjStart("sortKey")); sortKeyToBson(&sortKey, false); sortKey.doneFast(); if (explain && limitSrc) { insides.appendNumber("limit", limitSrc->getLimit()); } insides.doneFast(); sortObj.doneFast(); } else { // one obj for $sort + maybe one obj for $limit { BSONObjBuilder sortObj (pBuilder->subobjStart()); BSONObjBuilder insides (sortObj.subobjStart(sortName)); sortKeyToBson(&insides, false); insides.doneFast(); sortObj.doneFast(); } if (limitSrc) { limitSrc->addToBsonArray(pBuilder, explain); } } }
void DocumentSourceGeoNear::sourceToBson(BSONObjBuilder *pBuilder, bool explain) const { BSONObjBuilder geoNear (pBuilder->subobjStart("$geoNear")); if (coordsIsArray) { geoNear.appendArray("near", coords); } else { geoNear.append("near", coords); } geoNear.append("distanceField", distanceField->getPath(false)); // not in buildGeoNearCmd geoNear.append("limit", limit); if (maxDistance > 0) geoNear.append("maxDistance", maxDistance); geoNear.append("query", query); geoNear.append("spherical", spherical); geoNear.append("distanceMultiplier", distanceMultiplier); if (includeLocs) geoNear.append("includeLocs", includeLocs->getPath(false)); geoNear.append("uniqueDocs", uniqueDocs); geoNear.doneFast(); }
bool Pipeline::run(BSONObjBuilder &result, string &errmsg) { massert(16600, "should not have an empty pipeline", !sources.empty()); /* chain together the sources we found */ DocumentSource* prevSource = sources.front().get(); for(SourceContainer::iterator iter(sources.begin() + 1), listEnd(sources.end()); iter != listEnd; ++iter) { intrusive_ptr<DocumentSource> pTemp(*iter); pTemp->setSource(prevSource); prevSource = pTemp.get(); } /* Iterate through the resulting documents, and add them to the result. We do this even if we're doing an explain, in order to capture the document counts and other stats. However, we don't capture the result documents for explain. */ if (explain) { if (!pCtx->getInRouter()) writeExplainShard(result); else { writeExplainMongos(result); } } else { // the array in which the aggregation results reside // cant use subArrayStart() due to error handling BSONArrayBuilder resultArray; DocumentSource* finalSource = sources.back().get(); for(bool hasDoc = !finalSource->eof(); hasDoc; hasDoc = finalSource->advance()) { Document pDocument(finalSource->getCurrent()); /* add the document to the result set */ BSONObjBuilder documentBuilder (resultArray.subobjStart()); pDocument->toBson(&documentBuilder); documentBuilder.doneFast(); // object will be too large, assert. the extra 1KB is for headers uassert(16389, str::stream() << "aggregation result exceeds maximum document size (" << BSONObjMaxUserSize / (1024 * 1024) << "MB)", resultArray.len() < BSONObjMaxUserSize - 1024); } resultArray.done(); result.appendArray("result", resultArray.arr()); } return true; }
BSONObj ClusterCommandTestFixture::_makeCmd(BSONObj cmdObj, bool includeAfterClusterTime) { BSONObjBuilder bob(cmdObj); // Each command runs in a new session. bob.append("lsid", makeLogicalSessionIdForTest().toBSON()); bob.append("txnNumber", TxnNumber(1)); bob.append("autocommit", false); bob.append("startTransaction", true); BSONObjBuilder readConcernBob = bob.subobjStart(repl::ReadConcernArgs::kReadConcernFieldName); readConcernBob.append("level", "snapshot"); if (includeAfterClusterTime) { readConcernBob.append("afterClusterTime", kAfterClusterTime); } readConcernBob.doneFast(); return bob.obj(); }
void Pipeline::run(BSONObjBuilder& result) { /* Iterate through the resulting documents, and add them to the result. We do this even if we're doing an explain, in order to capture the document counts and other stats. However, we don't capture the result documents for explain. */ if (explain) { if (!pCtx->getInRouter()) writeExplainShard(result); else { writeExplainMongos(result); } } else { // the array in which the aggregation results reside // cant use subArrayStart() due to error handling BSONArrayBuilder resultArray; DocumentSource* finalSource = sources.back().get(); for (bool hasDoc = !finalSource->eof(); hasDoc; hasDoc = finalSource->advance()) { Document pDocument(finalSource->getCurrent()); /* add the document to the result set */ BSONObjBuilder documentBuilder (resultArray.subobjStart()); pDocument->toBson(&documentBuilder); documentBuilder.doneFast(); // object will be too large, assert. the extra 1KB is for headers uassert(16389, str::stream() << "aggregation result exceeds maximum document size (" << BSONObjMaxUserSize / (1024 * 1024) << "MB)", resultArray.len() < BSONObjMaxUserSize - 1024); } resultArray.done(); result.appendArray("result", resultArray.arr()); } }
bool SyncSourceFeedback::replHandshake() { // handshake for us BSONObjBuilder cmd; cmd.append("replSetUpdatePosition", 1); BSONObjBuilder sub (cmd.subobjStart("handshake")); sub.appendAs(_me["_id"], "handshake"); sub.append("member", theReplSet->selfId()); sub.append("config", theReplSet->myConfig().asBson()); sub.doneFast(); LOG(1) << "detecting upstream updater"; BSONObj res; try { if (!_connection->runCommand("admin", cmd.obj(), res)) { if (res["errmsg"].str().find("no such cmd") != std::string::npos) { LOG(1) << "upstream updater is not supported by the member from which we" " are syncing, using oplogreader-based updating instead"; _supportsUpdater = false; } resetConnection(); return false; } else { LOG(1) << "upstream updater is supported"; _supportsUpdater = true; } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending handshake: " << e.what() << endl; resetConnection(); return false; } // handshakes for those connected to us { for (OIDMemberMap::iterator itr = _members.begin(); itr != _members.end(); ++itr) { BSONObjBuilder slaveCmd; slaveCmd.append("replSetUpdatePosition", 1); // outer handshake indicates this is a handshake command // inner is needed as part of the structure to be passed to gotHandshake BSONObjBuilder slaveSub (slaveCmd.subobjStart("handshake")); slaveSub.append("handshake", itr->first); slaveSub.append("member", itr->second->id()); slaveSub.append("config", itr->second->config().asBson()); slaveSub.doneFast(); BSONObj slaveRes; try { if (!_connection->runCommand("admin", slaveCmd.obj(), slaveRes)) { resetConnection(); return false; } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending chained handshakes: " << e.what() << endl; resetConnection(); return false; } } } return true; }
bool SyncSourceFeedback::replHandshake() { // handshake for us BSONObjBuilder cmd; cmd.append("replSetUpdatePosition", 1); BSONObjBuilder sub (cmd.subobjStart("handshake")); sub.appendAs(_me["_id"], "handshake"); sub.append("member", theReplSet->selfId()); sub.append("config", theReplSet->myConfig().asBson()); sub.doneFast(); LOG(1) << "detecting upstream updater"; BSONObj res; try { if (!_connection->runCommand("admin", cmd.obj(), res)) { if (res["errmsg"].str().find("no such cmd") != std::string::npos) { LOG(1) << "upstream updater is not supported by the member from which we" " are syncing, using oplogreader-based updating instead"; _supportsUpdater = false; } resetConnection(); return false; } else { LOG(1) << "upstream updater is supported"; _supportsUpdater = true; } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending handshake: " << e.what() << endl; resetConnection(); return false; } // handshakes for those connected to us { OIDMemberMap::iterator itr = _members.begin(); while (itr != _members.end()) { BSONObjBuilder slaveCmd; slaveCmd.append("replSetUpdatePosition", 1); // outer handshake indicates this is a handshake command // inner is needed as part of the structure to be passed to gotHandshake BSONObjBuilder slaveSub (slaveCmd.subobjStart("handshake")); slaveSub.append("handshake", itr->first); slaveSub.append("member", itr->second->id()); slaveSub.append("config", itr->second->config().asBson()); slaveSub.doneFast(); BSONObj slaveRes; try { if (!_connection->runCommand("admin", slaveCmd.obj(), slaveRes)) { if (slaveRes["errmsg"].str().find("node could not be found ") != std::string::npos) { if (!theReplSet->getMutableMember(itr->second->id())) { log() << "sync source does not have member " << itr->second->id() << " in its config and neither do we, removing member from" " tracking"; OIDMemberMap::iterator removeItr = itr; ++itr; _slaveMap.erase(removeItr->first); _members.erase(removeItr); continue; } // here the node exists in our config, so do not stop tracking it // and continue with the handshaking process } else { resetConnection(); return false; } } } catch (const DBException& e) { log() << "SyncSourceFeedback error sending chained handshakes: " << e.what() << endl; resetConnection(); return false; } ++itr; } } return true; }