/* { count: "collectionname"[, query: <query>] } returns -1 on ns does not exist error. */ long long runCount( const char *ns, const BSONObj &cmd, string &err ) { NamespaceDetails *d = nsdetails( ns ); if ( !d ) { err = "ns missing"; return -1; } BSONObj query = cmd.getObjectField("query"); // count of all objects if ( query.isEmpty() ){ long long num = d->nrecords; num = num - cmd["skip"].numberLong(); if ( num < 0 ) { num = 0; } if ( cmd["limit"].isNumber() ){ long long limit = cmd["limit"].numberLong(); if ( limit < num ){ num = limit; } } return num; } QueryPlanSet qps( ns, query, BSONObj() ); CountOp original( cmd ); shared_ptr< CountOp > res = qps.runOp( original ); if ( !res->complete() ) { log() << "Count with ns: " << ns << " and query: " << query << " failed with exception: " << res->exceptionMessage() << endl; return 0; } return res->count(); }
void Chunk::pickSplitVector(vector<BSONObj>& splitPoints, long long chunkSize /* bytes */, int maxPoints, int maxObjs) const { // Ask the mongod holding this chunk to figure out the split points. ScopedDbConnection conn(getShard().getConnString()); BSONObj result; BSONObjBuilder cmd; cmd.append( "splitVector" , _manager->getns() ); cmd.append( "keyPattern" , _manager->getShardKeyPattern().toBSON() ); cmd.append( "min" , getMin() ); cmd.append( "max" , getMax() ); cmd.append( "maxChunkSizeBytes" , chunkSize ); cmd.append( "maxSplitPoints" , maxPoints ); cmd.append( "maxChunkObjects" , maxObjs ); BSONObj cmdObj = cmd.obj(); if ( ! conn->runCommand( "admin" , cmdObj , result )) { conn.done(); ostringstream os; os << "splitVector command failed: " << result; uassert( 13345 , os.str() , 0 ); } BSONObjIterator it( result.getObjectField( "splitKeys" ) ); while ( it.more() ) { splitPoints.push_back( it.next().Obj().getOwned() ); } conn.done(); }
// static BSONObj IndexLegacy::getMissingField(Collection* collection, const BSONObj& infoObj) { BSONObj keyPattern = infoObj.getObjectField( "key" ); string accessMethodName; if ( collection ) accessMethodName = collection->getIndexCatalog()->getAccessMethodName(keyPattern); else accessMethodName = IndexNames::findPluginName(keyPattern); if (IndexNames::HASHED == accessMethodName ) { int hashVersion = infoObj["hashVersion"].numberInt(); HashSeed seed = infoObj["seed"].numberInt(); // Explicit null valued fields and missing fields are both represented in hashed indexes // using the hash value of the null BSONElement. This is partly for historical reasons // (hash of null was used in the initial release of hashed indexes and changing would // alter the data format). Additionally, in certain places the hashed index code and // the index bound calculation code assume null and missing are indexed identically. BSONObj nullObj = BSON("" << BSONNULL); return BSON("" << ExpressionKeysPrivate::makeSingleHashKey(nullObj.firstElement(), seed, hashVersion)); } else { BSONObjBuilder b; b.appendNull(""); return b.obj(); } }
BSONObj Sync::getMissingDoc(const BSONObj& o) { OplogReader missingObjReader; const char *ns = o.getStringField("ns"); // capped collections NamespaceDetails *nsd = nsdetails(ns); if ( nsd && nsd->isCapped() ) { log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl; return BSONObj(); } uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn)); // might be more than just _id in the update criteria BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj(); BSONObj missingObj; try { missingObj = missingObjReader.findOne(ns, query); } catch(DBException& e) { log() << "replication assertion fetching missing object: " << e.what() << endl; throw; } return missingObj; }
void Pipeline::addRequiredPrivileges(const string& db, BSONObj cmdObj, vector<Privilege>* out) { ActionSet actions; actions.addAction(ActionType::find); out->push_back(Privilege(db + '.' + cmdObj.firstElement().str(), actions)); if (false && cmdObj["allowDiskUsage"].trueValue()) { // TODO no privilege for this yet. } BSONObj pipeline = cmdObj.getObjectField("pipeline"); BSONForEach(stageElem, pipeline) { BSONObj stage = stageElem.embeddedObjectUserCheck(); if (str::equals(stage.firstElementFieldName(), "$out")) { // TODO Figure out how to handle temp collection privileges. For now, using the // output ns is ok since we only do db-level privilege checks. const string outputNs = db + '.' + stage.firstElement().str(); ActionSet actions; // logically on output ns actions.addAction(ActionType::remove); actions.addAction(ActionType::insert); actions.addAction(ActionType::indexRead); // on temp ns due to implementation, but not logically on output ns actions.addAction(ActionType::createCollection); actions.addAction(ActionType::ensureIndex); actions.addAction(ActionType::dropCollection); actions.addAction(ActionType::renameCollectionSameDB); out->push_back(Privilege(outputNs, actions)); } }
BSONObj SyncTail::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) { OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query? const char* ns = o.getStringField("ns"); // capped collections Collection* collection = db->getCollection(ns); if (collection && collection->isCapped()) { log() << "missing doc, but this is okay for a capped collection (" << ns << ")"; return BSONObj(); } const int retryMax = 3; for (int retryCount = 1; retryCount <= retryMax; ++retryCount) { if (retryCount != 1) { // if we are retrying, sleep a bit to let the network possibly recover sleepsecs(retryCount * retryCount); } try { bool ok = missingObjReader.connect(HostAndPort(_hostname)); if (!ok) { warning() << "network problem detected while connecting to the " << "sync source, attempt " << retryCount << " of " << retryMax << endl; continue; // try again } } catch (const SocketException&) { warning() << "network problem detected while connecting to the " << "sync source, attempt " << retryCount << " of " << retryMax << endl; continue; // try again } // get _id from oplog entry to create query to fetch document. const BSONElement opElem = o.getField("op"); const bool isUpdate = !opElem.eoo() && opElem.str() == "u"; const BSONElement idElem = o.getObjectField(isUpdate ? "o2" : "o")["_id"]; if (idElem.eoo()) { severe() << "cannot fetch missing document without _id field: " << o.toString(); fassertFailedNoTrace(28742); } BSONObj query = BSONObjBuilder().append(idElem).obj(); BSONObj missingObj; try { missingObj = missingObjReader.findOne(ns, query); } catch (const SocketException&) { warning() << "network problem detected while fetching a missing document from the " << "sync source, attempt " << retryCount << " of " << retryMax << endl; continue; // try again } catch (DBException& e) { error() << "assertion fetching missing object: " << e.what() << endl; throw; } // success! return missingObj; } // retry count exceeded msgasserted(15916, str::stream() << "Can no longer connect to initial sync source: " << _hostname); }
void Chunk::pickMedianKey(OperationContext* txn, BSONObj& medianKey) const { // Ask the mongod holding this chunk to figure out the split points. ScopedDbConnection conn(_getShardConnectionString(txn)); BSONObj result; BSONObjBuilder cmd; cmd.append("splitVector", _manager->getns()); cmd.append("keyPattern", _manager->getShardKeyPattern().toBSON()); cmd.append("min", getMin()); cmd.append("max", getMax()); cmd.appendBool("force", true); BSONObj cmdObj = cmd.obj(); if (!conn->runCommand("admin", cmdObj, result)) { conn.done(); ostringstream os; os << "splitVector command (median key) failed: " << result; uassert(13503, os.str(), 0); } BSONObjIterator it(result.getObjectField("splitKeys")); if (it.more()) { medianKey = it.next().Obj().getOwned(); } conn.done(); }
virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) { string fromhost = cmdObj.getStringField("from"); if ( fromhost.empty() ) { errmsg = "missing from spec"; return false; } string collection = cmdObj.getStringField("cloneCollection"); if ( collection.empty() ) { errmsg = "missing cloneCollection spec"; return false; } BSONObj query = cmdObj.getObjectField("query"); if ( query.isEmpty() ) query = BSONObj(); BSONElement copyIndexesSpec = cmdObj.getField("copyindexes"); bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true; // Will not be used if doesn't exist. int logSizeMb = cmdObj.getIntField( "logSizeMb" ); /* replication note: we must logOp() not the command, but the cloned data -- if the slave were to clone it would get a different point-in-time and not match. */ setClient( collection.c_str() ); log() << "cloneCollection. db:" << ns << " collection:" << collection << " from: " << fromhost << " query: " << query << ( copyIndexes ? "" : ", not copying indexes" ) << endl; Cloner c; long long cursorId; if ( !c.startCloneCollection( fromhost.c_str(), collection.c_str(), query, errmsg, !fromRepl, copyIndexes, logSizeMb, cursorId ) ) return false; return c.finishCloneCollection( fromhost.c_str(), collection.c_str(), query, cursorId, errmsg); }
bool prepareToBuildIndex(const BSONObj& io, bool mayInterrupt, bool god, const string& sourceNS ) { BSONObj key = io.getObjectField("key"); /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to all be treated as the same pattern. */ if ( IndexDetails::isIdIndexPattern(key) ) { if( !god ) { ensureHaveIdIndex( sourceNS.c_str(), mayInterrupt ); return false; } } else { /* is buildIndexes:false set for this replica set member? if so we don't build any indexes except _id */ if( theReplSet && !theReplSet->buildIndexes() ) return false; } string pluginName = IndexNames::findPluginName( key ); if ( pluginName.size() ) { if (needToUpgradeMinorVersion(pluginName)) upgradeMinorVersionOrAssert(pluginName); } return true; }
void Pipeline::addRequiredPrivileges(Command* commandTemplate, const string& db, BSONObj cmdObj, vector<Privilege>* out) { ResourcePattern inputResource(commandTemplate->parseResourcePattern(db, cmdObj)); uassert(17138, mongoutils::str::stream() << "Invalid input resource, " << inputResource.toString(), inputResource.isExactNamespacePattern()); if (false && cmdObj["allowDiskUsage"].trueValue()) { // TODO no privilege for this yet. } out->push_back(Privilege(inputResource, ActionType::find)); BSONObj pipeline = cmdObj.getObjectField("pipeline"); BSONForEach(stageElem, pipeline) { BSONObj stage = stageElem.embeddedObjectUserCheck(); if (str::equals(stage.firstElementFieldName(), "$out")) { NamespaceString outputNs(db, stage.firstElement().str()); uassert(17139, mongoutils::str::stream() << "Invalid $out target namespace, " << outputNs.ns(), outputNs.isValid()); ActionSet actions; actions.addAction(ActionType::remove); actions.addAction(ActionType::insert); out->push_back(Privilege(ResourcePattern::forExactNamespace(outputNs), actions)); } }
static void upgradeMinorVersionOrAssert(const string& newPluginName) { const string systemIndexes = cc().database()->name() + ".system.indexes"; auto_ptr<Runner> runner(InternalPlanner::collectionScan(systemIndexes)); BSONObj index; Runner::RunnerState state; while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&index, NULL))) { const BSONObj key = index.getObjectField("key"); const string plugin = IndexNames::findPluginName(key); if (IndexNames::existedBefore24(plugin)) continue; const string errmsg = str::stream() << "Found pre-existing index " << index << " with invalid type '" << plugin << "'. " << "Disallowing creation of new index type '" << newPluginName << "'. See " << "http://dochub.mongodb.org/core/index-type-changes" ; error() << errmsg << endl; uasserted(16738, errmsg); } if (Runner::RUNNER_EOF != state) { warning() << "Internal error while reading collection " << systemIndexes << endl; } DataFileHeader* dfh = cc().database()->getFile(0)->getHeader(); getDur().writingInt(dfh->versionMinor) = PDFILE_VERSION_MINOR_24_AND_NEWER; }
Status Pipeline::checkAuthForCommand(Client* client, const std::string& db, const BSONObj& cmdObj) { NamespaceString inputNs(db, cmdObj.firstElement().str()); auto inputResource = ResourcePattern::forExactNamespace(inputNs); uassert(17138, mongoutils::str::stream() << "Invalid input namespace, " << inputNs.ns(), inputNs.isValid()); PrivilegeVector privileges; if (dps::extractElementAtPath(cmdObj, "pipeline.0.$indexStats")) { Privilege::addPrivilegeToPrivilegeVector( &privileges, Privilege(ResourcePattern::forAnyNormalResource(), ActionType::indexStats)); } else if (dps::extractElementAtPath(cmdObj, "pipeline.0.$collStats")) { Privilege::addPrivilegeToPrivilegeVector(&privileges, Privilege(inputResource, ActionType::collStats)); } else { // If no source requiring an alternative permission scheme is specified then default to // requiring find() privileges on the given namespace. Privilege::addPrivilegeToPrivilegeVector(&privileges, Privilege(inputResource, ActionType::find)); } BSONObj pipeline = cmdObj.getObjectField("pipeline"); for (auto&& stageElem : pipeline) { addPrivilegesForStage(db, cmdObj, &privileges, stageElem.embeddedObjectUserCheck()); } if (AuthorizationSession::get(client)->isAuthorizedForPrivileges(privileges)) return Status::OK(); return Status(ErrorCodes::Unauthorized, "unauthorized"); }
void pretouchOperation(OperationContext* txn, const BSONObj& op) { if (txn->lockState()->isWriteLocked()) { // no point pretouching if write locked. not sure if this will ever fire, but just in case. return; } const char* which = "o"; const char* opType = op.getStringField("op"); if (*opType == 'i') ; else if (*opType == 'u') which = "o2"; else return; /* todo : other operations */ try { BSONObj o = op.getObjectField(which); BSONElement _id; if (o.getObjectID(_id)) { const char* ns = op.getStringField("ns"); BSONObjBuilder b; b.append(_id); BSONObj result; AutoGetCollectionForRead ctx(txn, ns); if (Helpers::findById(txn, ctx.getDb(), ns, b.done(), result)) { _dummy_z += result.objsize(); // touch } } } catch (DBException&) { log() << "ignoring assertion in pretouchOperation()" << endl; } }
void pretouchOperation(const BSONObj& op) { if( Lock::somethingWriteLocked() ) return; // no point pretouching if write locked. not sure if this will ever fire, but just in case. const char *which = "o"; const char *opType = op.getStringField("op"); if ( *opType == 'i' ) ; else if( *opType == 'u' ) which = "o2"; else return; /* todo : other operations */ try { BSONObj o = op.getObjectField(which); BSONElement _id; if( o.getObjectID(_id) ) { const char *ns = op.getStringField("ns"); BSONObjBuilder b; b.append(_id); BSONObj result; Client::ReadContext ctx( ns ); if( Helpers::findById(cc(), ns, b.done(), result) ) _dummy_z += result.objsize(); // touch } } catch( DBException& ) { log() << "ignoring assertion in pretouchOperation()" << endl; } }
bool run(const char *cmdns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ // so i have to start clone, tell caller its ok to make change // at this point the caller locks me, and updates config db // then finish calls finish, and then deletes data when cursors are done string ns = cmdObj["movechunk.start"].valuestrsafe(); string to = cmdObj["to"].valuestrsafe(); string from = cmdObj["from"].valuestrsafe(); // my public address, a tad redundant, but safe BSONObj filter = cmdObj.getObjectField( "filter" ); if ( ns.size() == 0 ){ errmsg = "need to specify namespace in command"; return false; } if ( to.size() == 0 ){ errmsg = "need to specify server to move shard to"; return false; } if ( from.size() == 0 ){ errmsg = "need to specify server to move shard from (redundat i know)"; return false; } if ( filter.isEmpty() ){ errmsg = "need to specify a filter"; return false; } log() << "got movechunk.start: " << cmdObj << endl; BSONObj res; bool ok; { dbtemprelease unlock; ScopedDbConnection conn( to ); ok = conn->runCommand( "admin" , BSON( "startCloneCollection" << ns << "from" << from << "query" << filter ) , res ); conn.done(); } log() << " movechunk.start res: " << res << endl; if ( ok ){ result.append( res["finishToken"] ); } else { errmsg = "startCloneCollection failed: "; errmsg += res["errmsg"].valuestrsafe(); } return ok; }
/** * Parses a count command object, 'cmdObj'. * * On success, fills in the out-parameter 'request' and returns an OK status. * * Returns a failure status if 'cmdObj' is not well formed. */ Status parseRequest(const std::string& dbname, const BSONObj& cmdObj, CountRequest* request) const { long long skip = 0; if (cmdObj["skip"].isNumber()) { skip = cmdObj["skip"].numberLong(); if (skip < 0) { return Status(ErrorCodes::BadValue, "skip value is negative in count query"); } } else if (cmdObj["skip"].ok()) { return Status(ErrorCodes::BadValue, "skip value is not a valid number"); } long long limit = 0; if (cmdObj["limit"].isNumber()) { limit = cmdObj["limit"].numberLong(); } else if (cmdObj["limit"].ok()) { return Status(ErrorCodes::BadValue, "limit value is not a valid number"); } // For counts, limit and -limit mean the same thing. if (limit < 0) { limit = -limit; } // We don't validate that "query" is a nested object due to SERVER-15456. BSONObj query = cmdObj.getObjectField("query"); BSONObj hintObj; if (Object == cmdObj["hint"].type()) { hintObj = cmdObj["hint"].Obj(); } else if (String == cmdObj["hint"].type()) { const std::string hint = cmdObj.getStringField("hint"); hintObj = BSON("$hint" << hint); } std::string ns = parseNs(dbname, cmdObj); if (!nsIsFull(ns)) { return Status(ErrorCodes::BadValue, "collection name missing"); } // Parsed correctly. Fill out 'request' with the results. request->ns = ns; request->query = query; request->hint = hintObj; request->limit = limit; request->skip = skip; // By default, count requests are regular count not explain of count. request->explain = false; return Status::OK(); }
// prefetch for an oplog operation void prefetchPagesForReplicatedOp(Database* db, const BSONObj& op) { const char *opField; const char *opType = op.getStringField("op"); switch (*opType) { case 'i': // insert case 'd': // delete opField = "o"; break; case 'u': // update opField = "o2"; break; default: // prefetch ignores other ops return; } BSONObj obj = op.getObjectField(opField); const char *ns = op.getStringField("ns"); Collection* collection = db->getCollection( ns ); if ( !collection ) return; LOG(4) << "index prefetch for op " << *opType << endl; DEV Lock::assertAtLeastReadLocked(ns); // should we prefetch index pages on updates? if the update is in-place and doesn't change // indexed values, it is actually slower - a lot slower if there are a dozen indexes or // lots of multikeys. possible variations (not all mutually exclusive): // 1) current behavior: full prefetch // 2) don't do it for updates // 3) don't do multikey indexes for updates // 4) don't prefetchIndexPages on some heuristic; e.g., if it's an $inc. // 5) if not prefetching index pages (#2), we should do it if we are upsertings and it // will be an insert. to do that we could do the prefetchRecordPage first and if DNE // then we do #1. // // note that on deletes 'obj' does not have all the keys we would want to prefetch on. // a way to achieve that would be to prefetch the record first, and then afterwards do // this part. // prefetchIndexPages(collection, obj); // do not prefetch the data for inserts; it doesn't exist yet // // we should consider doing the record prefetch for the delete op case as we hit the record // when we delete. note if done we only want to touch the first page. // // update: do record prefetch. if ((*opType == 'u') && // do not prefetch the data for capped collections because // they typically do not have an _id index for findById() to use. !collection->isCapped()) { prefetchRecordPages(ns, obj); } }
BSONObj Sync::getMissingDoc(OperationContext* txn, Database* db, const BSONObj& o) { OplogReader missingObjReader; // why are we using OplogReader to run a non-oplog query? const char *ns = o.getStringField("ns"); // capped collections Collection* collection = db->getCollection(ns); if ( collection && collection->isCapped() ) { log() << "replication missing doc, but this is okay for a capped collection (" << ns << ")" << endl; return BSONObj(); } const int retryMax = 3; for (int retryCount = 1; retryCount <= retryMax; ++retryCount) { if (retryCount != 1) { // if we are retrying, sleep a bit to let the network possibly recover sleepsecs(retryCount * retryCount); } try { bool ok = missingObjReader.connect(HostAndPort(hn)); if (!ok) { warning() << "network problem detected while connecting to the " << "sync source, attempt " << retryCount << " of " << retryMax << endl; continue; // try again } } catch (const SocketException&) { warning() << "network problem detected while connecting to the " << "sync source, attempt " << retryCount << " of " << retryMax << endl; continue; // try again } // might be more than just _id in the update criteria BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj(); BSONObj missingObj; try { missingObj = missingObjReader.findOne(ns, query); } catch (const SocketException&) { warning() << "network problem detected while fetching a missing document from the " << "sync source, attempt " << retryCount << " of " << retryMax << endl; continue; // try again } catch (DBException& e) { log() << "replication assertion fetching missing object: " << e.what() << endl; throw; } // success! return missingObj; } // retry count exceeded msgasserted(15916, str::stream() << "Can no longer connect to initial sync source: " << hn); }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ ShardConnection::sync(); Timer t; string ns = cmdObj.firstElement().valuestrsafe(); if ( ns.size() == 0 ){ errmsg = "no ns"; return false; } DBConfigPtr config = grid.getDBConfig( ns ); if ( ! config->isSharded( ns ) ){ errmsg = "ns not sharded. have to shard before can move a chunk"; return false; } BSONObj find = cmdObj.getObjectField( "find" ); if ( find.isEmpty() ){ errmsg = "need to specify find. see help"; return false; } string toString = cmdObj["to"].valuestrsafe(); if ( ! toString.size() ){ errmsg = "you have to specify where you want to move the chunk"; return false; } Shard to = Shard::make( toString ); // so far, chunk size serves test purposes; it may or may not become a supported parameter long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong(); if ( maxChunkSizeBytes == 0 ) { maxChunkSizeBytes = Chunk::MaxChunkSize; } tlog() << "CMD: movechunk: " << cmdObj << endl; ChunkManagerPtr info = config->getChunkManager( ns ); ChunkPtr c = info->findChunk( find ); const Shard& from = c->getShard(); if ( from == to ){ errmsg = "that chunk is already on that shard"; return false; } BSONObj res; if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ){ errmsg = "move failed"; result.append( "cause" , res ); return false; } result.append( "millis" , t.millis() ); return true; }
/* this will eventually replace run, once sort is handled */ bool runNoDirectClient( const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { verify( cmdObj["sort"].eoo() ); string ns = dbname + '.' + cmdObj.firstElement().valuestr(); BSONObj query = cmdObj.getObjectField("query"); BSONObj fields = cmdObj.getObjectField("fields"); BSONObj update = cmdObj.getObjectField("update"); bool upsert = cmdObj["upsert"].trueValue(); bool returnNew = cmdObj["new"].trueValue(); bool remove = cmdObj["remove"].trueValue(); if ( remove ) { if ( upsert ) { errmsg = "remove and upsert can't co-exist"; return false; } if ( returnNew ) { errmsg = "remove and returnNew can't co-exist"; return false; } } else if ( update.isEmpty() ) { errmsg = "need remove or update"; return false; } PageFaultRetryableSection s; while ( 1 ) { try { return runNoDirectClient( ns , query , fields , update , upsert , returnNew , remove , result , errmsg ); } catch ( PageFaultException& e ) { e.touch(); } } }
Status CmdCount::parseRequest(const std::string& dbname, const BSONObj& cmdObj, CountRequest* request) const { const string ns = parseNs(dbname, cmdObj); long long skip = 0; if (cmdObj["skip"].isNumber()) { skip = cmdObj["skip"].numberLong(); if (skip < 0) { return Status(ErrorCodes::BadValue, "skip value is negative in count query"); } } else if (cmdObj["skip"].ok()) { return Status(ErrorCodes::BadValue, "skip value is not a valid number"); } long long limit = 0; if (cmdObj["limit"].isNumber()) { limit = cmdObj["limit"].numberLong(); } else if (cmdObj["limit"].ok()) { return Status(ErrorCodes::BadValue, "limit value is not a valid number"); } // For counts, limit and -limit mean the same thing. if (limit < 0) { limit = -limit; } BSONObj query; if (!cmdObj["query"].eoo()) { if (Object != cmdObj["query"].type()) { return Status(ErrorCodes::BadValue, "query field for count must be an object"); } query = cmdObj.getObjectField("query"); } BSONObj hintObj; if (Object == cmdObj["hint"].type()) { hintObj = cmdObj["hint"].Obj(); } else if (String == cmdObj["hint"].type()) { const std::string hint = cmdObj.getStringField("hint"); hintObj = BSON("$hint" << hint); } // Parsed correctly. Fill out 'request' with the results. request->ns = ns; request->query = query; request->hint = hintObj; request->limit = limit; request->skip = skip; return Status::OK(); }
Status Pipeline::checkAuthForCommand(ClientBasic* client, const std::string& db, const BSONObj& cmdObj) { NamespaceString inputNs(db, cmdObj.firstElement().str()); auto inputResource = ResourcePattern::forExactNamespace(inputNs); uassert(17138, mongoutils::str::stream() << "Invalid input namespace, " << inputNs.ns(), inputNs.isValid()); std::vector<Privilege> privileges; if (dps::extractElementAtPath(cmdObj, "pipeline.0.$indexStats")) { Privilege::addPrivilegeToPrivilegeVector( &privileges, Privilege(ResourcePattern::forAnyNormalResource(), ActionType::indexStats)); } else if (dps::extractElementAtPath(cmdObj, "pipeline.0.$collStats")) { Privilege::addPrivilegeToPrivilegeVector(&privileges, Privilege(inputResource, ActionType::collStats)); } else { // If no source requiring an alternative permission scheme is specified then default to // requiring find() privileges on the given namespace. Privilege::addPrivilegeToPrivilegeVector(&privileges, Privilege(inputResource, ActionType::find)); } BSONObj pipeline = cmdObj.getObjectField("pipeline"); BSONForEach(stageElem, pipeline) { BSONObj stage = stageElem.embeddedObjectUserCheck(); StringData stageName = stage.firstElementFieldName(); if (stageName == "$out" && stage.firstElementType() == String) { NamespaceString outputNs(db, stage.firstElement().str()); uassert(17139, mongoutils::str::stream() << "Invalid $out target namespace, " << outputNs.ns(), outputNs.isValid()); ActionSet actions; actions.addAction(ActionType::remove); actions.addAction(ActionType::insert); if (shouldBypassDocumentValidationForCommand(cmdObj)) { actions.addAction(ActionType::bypassDocumentValidation); } Privilege::addPrivilegeToPrivilegeVector( &privileges, Privilege(ResourcePattern::forExactNamespace(outputNs), actions)); } else if (stageName == "$lookup" && stage.firstElementType() == Object) { NamespaceString fromNs(db, stage.firstElement()["from"].str()); Privilege::addPrivilegeToPrivilegeVector( &privileges, Privilege(ResourcePattern::forExactNamespace(fromNs), ActionType::find)); } else if (stageName == "$graphLookup" && stage.firstElementType() == Object) { NamespaceString fromNs(db, stage.firstElement()["from"].str()); Privilege::addPrivilegeToPrivilegeVector( &privileges, Privilege(ResourcePattern::forExactNamespace(fromNs), ActionType::find)); } }
bool SyncTail::shouldRetry(OperationContext* txn, const BSONObj& o) { const NamespaceString nss(o.getStringField("ns")); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { // Take an X lock on the database in order to preclude other modifications. // Also, the database might not exist yet, so create it. AutoGetOrCreateDb autoDb(txn, nss.db(), MODE_X); Database* const db = autoDb.getDb(); // we don't have the object yet, which is possible on initial sync. get it. log() << "adding missing object" << endl; // rare enough we can log BSONObj missingObj = getMissingDoc(txn, db, o); if (missingObj.isEmpty()) { log() << "missing object not found on source." " presumably deleted later in oplog"; log() << "o2: " << o.getObjectField("o2").toString(); log() << "o firstfield: " << o.getObjectField("o").firstElementFieldName(); return false; } else { WriteUnitOfWork wunit(txn); Collection* const coll = db->getOrCreateCollection(txn, nss.toString()); invariant(coll); Status status = coll->insertDocument(txn, missingObj, true); uassert(15917, str::stream() << "failed to insert missing doc: " << status.toString(), status.isOK()); LOG(1) << "inserted missing doc: " << missingObj.toString() << endl; wunit.commit(); return true; } } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "InsertRetry", nss.ns()); // fixes compile errors on GCC - see SERVER-18219 for details MONGO_UNREACHABLE; }
void run(){ int secsToSleep = 0; while ( 1 ){ try { ScopedDbConnection conn( _addr ); BSONObj result; { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ){ log() << "writebacklisten command failed! " << result << endl; conn.done(); continue; } } log(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ){ string ns = data["ns"].valuestrsafe(); int len; Message m( (void*)data["msg"].binData( len ) , false ); massert( 10427 , "invalid writeback message" , m.header()->valid() ); grid.getDBConfig( ns )->getChunkManager( ns , true ); Request r( m , 0 ); r.process(); } else { log() << "unknown writeBack result: " << result << endl; } conn.done(); secsToSleep = 0; } catch ( std::exception e ){ log() << "WriteBackListener exception : " << e.what() << endl; } catch ( ... ){ log() << "WriteBackListener uncaught exception!" << endl; } secsToSleep++; sleepsecs(secsToSleep); if ( secsToSleep > 10 ) secsToSleep = 0; } }
TEST(CollectionOptions, ModifyStorageEngineField) { CollectionOptions opts; // Directly modify storageEngine field in collection options. opts.storageEngine = fromjson("{storageEngine1: {x: 1}}"); // Unrecognized field should not be present in BSON representation. BSONObj obj = opts.toBSON(); ASSERT_FALSE(obj.hasField("unknownField")); // Check "storageEngine" field. ASSERT_TRUE(obj.hasField("storageEngine")); ASSERT_TRUE(obj.getField("storageEngine").isABSONObj()); BSONObj storageEngine = obj.getObjectField("storageEngine"); // Check individual storage storageEngine fields. ASSERT_TRUE(storageEngine.getField("storageEngine1").isABSONObj()); BSONObj storageEngine1 = storageEngine.getObjectField("storageEngine1"); ASSERT_EQUALS(1, storageEngine1.getIntField("x")); }
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ){ // // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get // a good approximation of the size of the chunk -- without needing to access the actual data. // const char* ns = jsobj.getStringField( "splitVector" ); BSONObj keyPattern = jsobj.getObjectField( "keyPattern" ); // If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern. BSONObj min = jsobj.getObjectField( "min" ); BSONObj max = jsobj.getObjectField( "max" ); if ( min.isEmpty() && max.isEmpty() ){ BSONObjBuilder minBuilder; BSONObjBuilder maxBuilder; BSONForEach(key, keyPattern){ minBuilder.appendMinKey( key.fieldName() ); maxBuilder.appendMaxKey( key.fieldName() ); }
long long runCount( const char *ns, const BSONObj &cmd, string &err, int &errCode ) { Collection *cl = getCollection( ns ); if (cl == NULL) { err = "ns missing"; return -1; } BSONObj query = cmd.getObjectField("query"); long long count = 0; long long skip = cmd["skip"].numberLong(); long long limit = cmd["limit"].numberLong(); if ( limit < 0 ) { limit = -limit; } Client::WithOpSettings wos(OpSettings().setQueryCursorMode(DEFAULT_LOCK_CURSOR).setBulkFetch(true)); Lock::assertAtLeastReadLocked(ns); try { for (shared_ptr<Cursor> cursor = getOptimizedCursor( ns, query, BSONObj(), _countPlanPolicies ); cursor->ok() ; cursor->advance() ) { if ( cursor->currentMatches() && !cursor->getsetdup( cursor->currPK() ) ) { if ( skip > 0 ) { --skip; } else { ++count; if ( limit > 0 && count >= limit ) { break; } } } } return count; } catch ( const DBException &e ) { err = e.toString(); errCode = e.getCode(); count = -2; } catch ( const std::exception &e ) { err = e.what(); errCode = 0; count = -2; } if ( count == -2 ) { // Historically we have returned zero in many count assertion cases - see SERVER-2291. log() << "Count with ns: " << ns << " and query: " << query << " failed with exception: " << err << " code: " << errCode << endl; } return count; }
void Sequential::finishRead(BSONObj b, Database* d) { unlock1 = b.getStringField("unlock1"); unlock2 = b.getStringField("unlock2"); prompt = strdup(readString(b,"prompt").c_str()); ans = strdup(readString(b,"response").c_str()); if (b.hasField("components")) { // Ignore atomic sequentials BSONObj arr = b.getObjectField("components"); components = db->b_arr(arr); } }
void Chunk::unserialize(const BSONObj& from){ _ns = from.getStringField( "ns" ); _shard = from.getStringField( "shard" ); _lastmod = from.hasField( "lastmod" ) ? from["lastmod"]._numberLong() : 0; BSONElement e = from["minDotted"]; cout << from << endl; if (e.eoo()){ _min = from.getObjectField( "min" ).getOwned(); _max = from.getObjectField( "max" ).getOwned(); } else { // TODO delete this case after giving people a chance to migrate _min = e.embeddedObject().getOwned(); _max = from.getObjectField( "maxDotted" ).getOwned(); } uassert( 10170 , "Chunk needs a ns" , ! _ns.empty() ); uassert( 10171 , "Chunk needs a server" , ! _ns.empty() ); uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() ); uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() ); }
bool Cloner::startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string &errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId ) { char db[256]; nsToClient( ns, db ); { dbtemprelease r; auto_ptr< DBClientConnection > c( new DBClientConnection() ); if ( !c->connect( fromhost, errmsg ) ) return false; if( !replAuthenticate(c.get()) ) return false; conn = c; // Start temporary op log BSONObjBuilder cmdSpec; cmdSpec << "logCollection" << ns << "start" << 1; if ( logSizeMb != INT_MIN ) cmdSpec << "logSizeMb" << logSizeMb; BSONObj info; if ( !conn->runCommand( db, cmdSpec.done(), info ) ) { errmsg = "logCollection failed: " + (string)info; return false; } } BSONObj spec = conn->findOne( string( db ) + ".system.namespaces", BSON( "name" << ns ) ); if ( !userCreateNS( ns, spec.getObjectField( "options" ), errmsg, true ) ) return false; copy( ns, ns, false, logForRepl, false, false, query ); if ( copyIndexes ) { string indexNs = string( db ) + ".system.indexes"; copy( indexNs.c_str(), indexNs.c_str(), true, logForRepl, false, false, BSON( "ns" << ns << "name" << NE << "_id_" ) ); } auto_ptr< DBClientCursor > c; { dbtemprelease r; string logNS = "local.temp.oplog." + string( ns ); c = conn->query( logNS.c_str(), Query(), 0, 0, 0, Option_CursorTailable ); } if ( c->more() ) { replayOpLog( c.get(), query ); cursorId = c->getCursorId(); massert( "Expected valid tailing cursor", cursorId != 0 ); } else { massert( "Did not expect valid cursor for empty query result", c->getCursorId() == 0 ); cursorId = 0; } c->decouple(); return true; }