// static Status ParsedProjection::make(const BSONObj& spec, const MatchExpression* const query, ParsedProjection** out, const MatchExpressionParser::WhereCallback& whereCallback) { // Are we including or excluding fields? Values: // -1 when we haven't initialized it. // 1 when we're including // 0 when we're excluding. int include_exclude = -1; // If any of these are 'true' the projection isn't covered. bool include = true; bool hasNonSimple = false; bool hasDottedField = false; bool includeID = true; bool hasIndexKeyProjection = false; bool wantGeoNearPoint = false; bool wantGeoNearDistance = false; // Until we see a positional or elemMatch operator we're normal. ArrayOpType arrayOpType = ARRAY_OP_NORMAL; BSONObjIterator it(spec); while (it.more()) { BSONElement e = it.next(); if (!e.isNumber() && !e.isBoolean()) { hasNonSimple = true; } if (Object == e.type()) { BSONObj obj = e.embeddedObject(); if (1 != obj.nFields()) { return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString()); } BSONElement e2 = obj.firstElement(); if (mongoutils::str::equals(e2.fieldName(), "$slice")) { if (e2.isNumber()) { // This is A-OK. } else if (e2.type() == Array) { BSONObj arr = e2.embeddedObject(); if (2 != arr.nFields()) { return Status(ErrorCodes::BadValue, "$slice array wrong size"); } BSONObjIterator it(arr); // Skip over 'skip'. it.next(); int limit = it.next().numberInt(); if (limit <= 0) { return Status(ErrorCodes::BadValue, "$slice limit must be positive"); } } else { return Status(ErrorCodes::BadValue, "$slice only supports numbers and [skip, limit] arrays"); } } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) { // Validate $elemMatch arguments and dependencies. if (Object != e2.type()) { return Status(ErrorCodes::BadValue, "elemMatch: Invalid argument, object required."); } if (ARRAY_OP_POSITIONAL == arrayOpType) { return Status(ErrorCodes::BadValue, "Cannot specify positional operator and $elemMatch."); } if (mongoutils::str::contains(e.fieldName(), '.')) { return Status(ErrorCodes::BadValue, "Cannot use $elemMatch projection on a nested field."); } arrayOpType = ARRAY_OP_ELEM_MATCH; // Create a MatchExpression for the elemMatch. BSONObj elemMatchObj = e.wrap(); verify(elemMatchObj.isOwned()); // TODO: Is there a faster way of validating the elemMatchObj? StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj, whereCallback); if (!swme.isOK()) { return swme.getStatus(); } delete swme.getValue(); } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) { // Field for meta must be top level. We can relax this at some point. if (mongoutils::str::contains(e.fieldName(), '.')) { return Status(ErrorCodes::BadValue, "field for $meta cannot be nested"); } // Make sure the argument to $meta is something we recognize. // e.g. {x: {$meta: "textScore"}} if (String != e2.type()) { return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj"); } if (e2.valuestr() != LiteParsedQuery::metaTextScore && e2.valuestr() != LiteParsedQuery::metaRecordId && e2.valuestr() != LiteParsedQuery::metaIndexKey && e2.valuestr() != LiteParsedQuery::metaGeoNearDistance && e2.valuestr() != LiteParsedQuery::metaGeoNearPoint) { return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str()); } // This clobbers everything else. if (e2.valuestr() == LiteParsedQuery::metaIndexKey) { hasIndexKeyProjection = true; } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) { wantGeoNearDistance = true; } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) { wantGeoNearPoint = true; } } else { return Status(ErrorCodes::BadValue, string("Unsupported projection option: ") + e.toString()); } } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) { includeID = false; } else { // Projections of dotted fields aren't covered. if (mongoutils::str::contains(e.fieldName(), '.')) { hasDottedField = true; } // Validate input. if (include_exclude == -1) { // If we haven't specified an include/exclude, initialize include_exclude. // We expect further include/excludes to match it. include_exclude = e.trueValue(); include = !e.trueValue(); } else if (static_cast<bool>(include_exclude) != e.trueValue()) { // Make sure that the incl./excl. matches the previous. return Status(ErrorCodes::BadValue, "Projection cannot have a mix of inclusion and exclusion."); } } if (_isPositionalOperator(e.fieldName())) { // Validate the positional op. if (!e.trueValue()) { return Status(ErrorCodes::BadValue, "Cannot exclude array elements with the positional operator."); } if (ARRAY_OP_POSITIONAL == arrayOpType) { return Status(ErrorCodes::BadValue, "Cannot specify more than one positional proj. per query."); } if (ARRAY_OP_ELEM_MATCH == arrayOpType) { return Status(ErrorCodes::BadValue, "Cannot specify positional operator and $elemMatch."); } std::string after = mongoutils::str::after(e.fieldName(), ".$"); if (mongoutils::str::contains(after, ".$")) { mongoutils::str::stream ss; ss << "Positional projection '" << e.fieldName() << "' contains " << "the positional operator more than once."; return Status(ErrorCodes::BadValue, ss); } std::string matchfield = mongoutils::str::before(e.fieldName(), '.'); if (!_hasPositionalOperatorMatch(query, matchfield)) { mongoutils::str::stream ss; ss << "Positional projection '" << e.fieldName() << "' does not " << "match the query document."; return Status(ErrorCodes::BadValue, ss); } arrayOpType = ARRAY_OP_POSITIONAL; } } // Fill out the returned obj. unique_ptr<ParsedProjection> pp(new ParsedProjection()); // The positional operator uses the MatchDetails from the query // expression to know which array element was matched. pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL; // Save the raw spec. It should be owned by the LiteParsedQuery. verify(spec.isOwned()); pp->_source = spec; pp->_returnKey = hasIndexKeyProjection; // Dotted fields aren't covered, non-simple require match details, and as for include, "if // we default to including then we can't use an index because we don't know what we're // missing." pp->_requiresDocument = include || hasNonSimple || hasDottedField; // Add geoNear projections. pp->_wantGeoNearPoint = wantGeoNearPoint; pp->_wantGeoNearDistance = wantGeoNearDistance; // If it's possible to compute the projection in a covered fashion, populate _requiredFields // so the planner can perform projection analysis. if (!pp->_requiresDocument) { if (includeID) { pp->_requiredFields.push_back("_id"); } // The only way we could be here is if spec is only simple non-dotted-field projections. // Therefore we can iterate over spec to get the fields required. BSONObjIterator srcIt(spec); while (srcIt.more()) { BSONElement elt = srcIt.next(); // We've already handled the _id field before entering this loop. if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) { continue; } if (elt.trueValue()) { pp->_requiredFields.push_back(elt.fieldName()); } } } // returnKey clobbers everything. if (hasIndexKeyProjection) { pp->_requiresDocument = false; } *out = pp.release(); return Status::OK(); }
ProjectionExec::ProjectionExec(const BSONObj& spec, const MatchExpression* queryExpression, const CollatorInterface* collator, const ExtensionsCallback& extensionsCallback) : _include(true), _special(false), _source(spec), _includeID(true), _skip(0), _limit(-1), _arrayOpType(ARRAY_OP_NORMAL), _queryExpression(queryExpression), _hasReturnKey(false), _collator(collator) { // Whether we're including or excluding fields. enum class IncludeExclude { kUninitialized, kInclude, kExclude }; IncludeExclude includeExclude = IncludeExclude::kUninitialized; BSONObjIterator it(_source); while (it.more()) { BSONElement e = it.next(); if (Object == e.type()) { BSONObj obj = e.embeddedObject(); verify(1 == obj.nFields()); BSONElement e2 = obj.firstElement(); if (mongoutils::str::equals(e2.fieldName(), "$slice")) { if (e2.isNumber()) { int i = e2.numberInt(); if (i < 0) { add(e.fieldName(), i, -i); // limit is now positive } else { add(e.fieldName(), 0, i); } } else { verify(e2.type() == Array); BSONObj arr = e2.embeddedObject(); verify(2 == arr.nFields()); BSONObjIterator it(arr); int skip = it.next().numberInt(); int limit = it.next().numberInt(); verify(limit > 0); add(e.fieldName(), skip, limit); } } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) { _arrayOpType = ARRAY_OP_ELEM_MATCH; // Create a MatchExpression for the elemMatch. BSONObj elemMatchObj = e.wrap(); verify(elemMatchObj.isOwned()); _elemMatchObjs.push_back(elemMatchObj); StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(elemMatchObj, extensionsCallback, _collator); verify(statusWithMatcher.isOK()); // And store it in _matchers. _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()] = statusWithMatcher.getValue().release(); add(e.fieldName(), true); } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) { verify(String == e2.type()); if (e2.valuestr() == QueryRequest::metaTextScore) { _meta[e.fieldName()] = META_TEXT_SCORE; } else if (e2.valuestr() == QueryRequest::metaSortKey) { _sortKeyMetaFields.push_back(e.fieldName()); _meta[_sortKeyMetaFields.back()] = META_SORT_KEY; } else if (e2.valuestr() == QueryRequest::metaRecordId) { _meta[e.fieldName()] = META_RECORDID; } else if (e2.valuestr() == QueryRequest::metaGeoNearPoint) { _meta[e.fieldName()] = META_GEONEAR_POINT; } else if (e2.valuestr() == QueryRequest::metaGeoNearDistance) { _meta[e.fieldName()] = META_GEONEAR_DIST; } else if (e2.valuestr() == QueryRequest::metaIndexKey) { _hasReturnKey = true; } else { // This shouldn't happen, should be caught by parsing. verify(0); } } else { verify(0); } } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) { _includeID = false; } else { add(e.fieldName(), e.trueValue()); // If we haven't specified an include/exclude, initialize includeExclude. if (includeExclude == IncludeExclude::kUninitialized) { includeExclude = e.trueValue() ? IncludeExclude::kInclude : IncludeExclude::kExclude; _include = !e.trueValue(); } } if (mongoutils::str::contains(e.fieldName(), ".$")) { _arrayOpType = ARRAY_OP_POSITIONAL; } } }
void _update( Request& r , DbMessage& d, ChunkManager* manager ){ int flags = d.pullInt(); BSONObj query = d.nextJsObj(); uassert( 10201 , "invalid update" , d.moreJSObjs() ); BSONObj toupdate = d.nextJsObj(); BSONObj chunkFinder = query; bool upsert = flags & UpdateOption_Upsert; bool multi = flags & UpdateOption_Multi; if ( multi ) uassert( 10202 , "can't mix multi and upsert and sharding" , ! upsert ); if ( upsert && !(manager->hasShardKey(toupdate) || (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query)))) { throw UserException( 8012 , "can't upsert something without shard key" ); } bool save = false; if ( ! manager->hasShardKey( query ) ){ if ( multi ){ } else if ( query.nFields() != 1 || strcmp( query.firstElement().fieldName() , "_id" ) ){ throw UserException( 8013 , "can't do update with query that doesn't have the shard key" ); } else { save = true; chunkFinder = toupdate; } } if ( ! save ){ if ( toupdate.firstElement().fieldName()[0] == '$' ){ // TODO: check for $set, etc.. on shard key } else if ( manager->hasShardKey( toupdate ) && manager->getShardKey().compare( query , toupdate ) ){ throw UserException( 8014 , "change would move shards!" ); } } if ( multi ){ vector<Chunk*> chunks; manager->getChunksForQuery( chunks , chunkFinder ); set<string> seen; for ( vector<Chunk*>::iterator i=chunks.begin(); i!=chunks.end(); i++){ Chunk * c = *i; if ( seen.count( c->getShard() ) ) continue; doWrite( dbUpdate , r , c->getShard() ); seen.insert( c->getShard() ); } } else { Chunk& c = manager->findChunk( chunkFinder ); doWrite( dbUpdate , r , c.getShard() ); c.splitIfShould( d.msg().data->dataLen() ); } }
BSONObj interpreterVersion(const BSONObj& a, void* data) { uassert( 16453, "interpreterVersion accepts no arguments", a.nFields() == 0 ); return BSON( "" << globalScriptEngine->getInterpreterVersionString() ); }
UpdateResult _updateObjects( bool su, const char* ns, const BSONObj& updateobj, const BSONObj& patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug, RemoveSaver* rs, bool fromMigrate, const QueryPlanSelectionPolicy& planPolicy, bool forReplication ) { DEBUGUPDATE( "update: " << ns << " update: " << updateobj << " query: " << patternOrig << " upsert: " << upsert << " multi: " << multi ); Client& client = cc(); int profile = client.database()->profile; debug.updateobj = updateobj; // The idea with these here it to make them loop invariant for // multi updates, and thus be a bit faster for that case. The // pointers may be left invalid on a failed or terminal yield // recovery. NamespaceDetails* d = nsdetails(ns); // can be null if an upsert... NamespaceDetailsTransient* nsdt = &NamespaceDetailsTransient::get(ns); auto_ptr<ModSet> mods; bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$'; int modsIsIndexed = false; // really the # of indexes if ( isOperatorUpdate ) { if( d && d->indexBuildsInProgress ) { set<string> bgKeys; for (int i = 0; i < d->indexBuildsInProgress; i++) { d->idx(d->nIndexes+i).keyPattern().getFieldNames(bgKeys); } mods.reset( new ModSet(updateobj, nsdt->indexKeys(), &bgKeys, forReplication) ); } else { mods.reset( new ModSet(updateobj, nsdt->indexKeys(), NULL, forReplication) ); } modsIsIndexed = mods->isIndexed(); } if( planPolicy.permitOptimalIdPlan() && !multi && isSimpleIdQuery(patternOrig) && d && !modsIsIndexed ) { int idxNo = d->findIdIndex(); if( idxNo >= 0 ) { debug.idhack = true; UpdateResult result = _updateById( isOperatorUpdate, idxNo, mods.get(), profile, d, nsdt, su, ns, updateobj, patternOrig, logop, debug, fromMigrate); if ( result.existing || ! upsert ) { return result; } else if ( upsert && ! isOperatorUpdate ) { // this handles repl inserts checkNoMods( updateobj ); debug.upsert = true; BSONObj no = updateobj; theDataFileMgr.insertWithObjMod(ns, no, false, su); if ( logop ) logOp( "i", ns, no, 0, 0, fromMigrate ); return UpdateResult( 0 , 0 , 1 , no ); } } } int numModded = 0; debug.nscanned = 0; shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns, patternOrig, BSONObj(), planPolicy ); d = nsdetails(ns); nsdt = &NamespaceDetailsTransient::get(ns); bool autoDedup = c->autoDedup(); if( c->ok() ) { set<DiskLoc> seenObjects; MatchDetails details; auto_ptr<ClientCursor> cc; do { if ( cc.get() == 0 && client.allowedToThrowPageFaultException() && ! c->currLoc().isNull() && ! c->currLoc().rec()->likelyInPhysicalMemory() ) { throw PageFaultException( c->currLoc().rec() ); } bool atomic = c->matcher() && c->matcher()->docMatcher().atomic(); if ( ! atomic && debug.nscanned > 0 ) { // we need to use a ClientCursor to yield if ( cc.get() == 0 ) { shared_ptr< Cursor > cPtr = c; cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) ); } bool didYield; if ( ! cc->yieldSometimes( ClientCursor::WillNeed, &didYield ) ) { cc.release(); break; } if ( !c->ok() ) { break; } if ( didYield ) { d = nsdetails(ns); if ( ! d ) break; nsdt = &NamespaceDetailsTransient::get(ns); if ( mods.get() && ! mods->isIndexed() ) { set<string> bgKeys; for (int i = 0; i < d->indexBuildsInProgress; i++) { // we need to re-check indexes d->idx(d->nIndexes+i).keyPattern().getFieldNames(bgKeys); } mods->updateIsIndexed( nsdt->indexKeys() , &bgKeys ); modsIsIndexed = mods->isIndexed(); } } } // end yielding block debug.nscanned++; if ( mods.get() && mods->hasDynamicArray() ) { // The Cursor must have a Matcher to record an elemMatchKey. But currently // a modifier on a dynamic array field may be applied even if there is no // elemMatchKey, so a matcher cannot be required. //verify( c->matcher() ); details.requestElemMatchKey(); } if ( !c->currentMatches( &details ) ) { c->advance(); continue; } Record* r = c->_current(); DiskLoc loc = c->currLoc(); if ( c->getsetdup( loc ) && autoDedup ) { c->advance(); continue; } BSONObj js = BSONObj::make(r); BSONObj pattern = patternOrig; if ( logop ) { BSONObjBuilder idPattern; BSONElement id; // NOTE: If the matching object lacks an id, we'll log // with the original pattern. This isn't replay-safe. // It might make sense to suppress the log instead // if there's no id. if ( js.getObjectID( id ) ) { idPattern.append( id ); pattern = idPattern.obj(); } else { uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi ); } } /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some regular ones at the moment. */ if ( isOperatorUpdate ) { if ( multi ) { // go to next record in case this one moves c->advance(); // Update operations are deduped for cursors that implement their own // deduplication. In particular, some geo cursors are excluded. if ( autoDedup ) { if ( seenObjects.count( loc ) ) { continue; } // SERVER-5198 Advance past the document to be modified, provided // deduplication is enabled, but see SERVER-5725. while( c->ok() && loc == c->currLoc() ) { c->advance(); } } } const BSONObj& onDisk = loc.obj(); ModSet* useMods = mods.get(); auto_ptr<ModSet> mymodset; if ( details.hasElemMatchKey() && mods->hasDynamicArray() ) { useMods = mods->fixDynamicArray( details.elemMatchKey() ); mymodset.reset( useMods ); } auto_ptr<ModSetState> mss = useMods->prepare( onDisk ); bool willAdvanceCursor = multi && c->ok() && ( modsIsIndexed || ! mss->canApplyInPlace() ); if ( willAdvanceCursor ) { if ( cc.get() ) { cc->setDoingDeletes( true ); } c->prepareToTouchEarlierIterate(); } // If we've made it this far, "ns" must contain a valid collection name, and so // is of the form "db.collection". Therefore, the following expression must // always be valid. "system.users" updates must never be done in place, in // order to ensure that they are validated inside DataFileMgr::updateRecord(.). bool isSystemUsersMod = (NamespaceString(ns).coll == "system.users"); if ( modsIsIndexed <= 0 && mss->canApplyInPlace() && !isSystemUsersMod ) { mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) ); DEBUGUPDATE( "\t\t\t doing in place update" ); if ( profile && !multi ) debug.fastmod = true; if ( modsIsIndexed ) { seenObjects.insert( loc ); } d->paddingFits(); } else { if ( rs ) rs->goingToDelete( onDisk ); BSONObj newObj = mss->createNewFromMods(); checkTooLarge(newObj); DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc, newObj.objdata(), newObj.objsize(), debug); if ( newLoc != loc || modsIsIndexed ){ // log() << "Moved obj " << newLoc.obj()["_id"] << " from " << loc << " to " << newLoc << endl; // object moved, need to make sure we don' get again seenObjects.insert( newLoc ); } } if ( logop ) { DEV verify( mods->size() ); BSONObj logObj = mss->getOpLogRewrite(); DEBUGUPDATE( "\t rewrite update: " << logObj ); // It is possible that the entire mod set was a no-op over this // document. We would have an empty log record in that case. If we // call logOp, with an empty record, that would be replicated as "clear // this record", which is not what we want. Therefore, to get a no-op // in the replica, we simply don't log. if ( logObj.nFields() ) { logOp("u", ns, logObj , &pattern, 0, fromMigrate ); } } numModded++; if ( ! multi ) return UpdateResult( 1 , 1 , numModded , BSONObj() ); if ( willAdvanceCursor ) c->recoverFromTouchingEarlierIterate(); getDur().commitIfNeeded(); continue; } uassert( 10158 , "multi update only works with $ operators" , ! multi ); BSONElementManipulator::lookForTimestamps( updateobj ); checkNoMods( updateobj ); theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, su); if ( logop ) { DEV wassert( !su ); // super used doesn't get logged, this would be bad. logOp("u", ns, updateobj, &pattern, 0, fromMigrate ); } return UpdateResult( 1 , 0 , 1 , BSONObj() ); } while ( c->ok() ); } // endif if ( numModded ) return UpdateResult( 1 , 1 , numModded , BSONObj() ); if ( upsert ) { if ( updateobj.firstElementFieldName()[0] == '$' ) { // upsert of an $operation. build a default object BSONObj newObj = mods->createNewFromQuery( patternOrig ); checkNoMods( newObj ); debug.fastmodinsert = true; theDataFileMgr.insertWithObjMod(ns, newObj, false, su); if ( logop ) logOp( "i", ns, newObj, 0, 0, fromMigrate ); return UpdateResult( 0 , 1 , 1 , newObj ); } uassert( 10159 , "multi update only works with $ operators" , ! multi ); checkNoMods( updateobj ); debug.upsert = true; BSONObj no = updateobj; theDataFileMgr.insertWithObjMod(ns, no, false, su); if ( logop ) logOp( "i", ns, no, 0, 0, fromMigrate ); return UpdateResult( 0 , 0 , 1 , no ); } return UpdateResult( 0 , isOperatorUpdate , 0 , BSONObj() ); }
void IndexScanNode::computeProperties() { _sorts.clear(); BSONObj sortPattern; { BSONObjBuilder sortBob; BSONObj normalizedIndexKeyPattern(LiteParsedQuery::normalizeSortOrder(indexKeyPattern)); BSONObjIterator it(normalizedIndexKeyPattern); while (it.more()) { BSONElement elt = it.next(); // Zero is returned if elt is not a number. This happens when elt is hashed or // 2dsphere, our two projection indices. We want to drop those from the sort // pattern. int val = elt.numberInt() * direction; if (0 != val) { sortBob.append(elt.fieldName(), val); } } sortPattern = sortBob.obj(); } _sorts.insert(sortPattern); const int nFields = sortPattern.nFields(); if (nFields > 1) { // We're sorted not only by sortPattern but also by all prefixes of it. for (int i = 0; i < nFields; ++i) { // Make obj out of fields [0,i] BSONObjIterator it(sortPattern); BSONObjBuilder prefixBob; for (int j = 0; j <= i; ++j) { prefixBob.append(it.next()); } _sorts.insert(prefixBob.obj()); } } // If we are using the index {a:1, b:1} to answer the predicate {a: 10}, it's sorted // both by the index key pattern and by the pattern {b: 1}. // See if there are any fields with equalities for bounds. We can drop these // from any sort orders created. set<string> equalityFields; if (!bounds.isSimpleRange) { // Figure out how many fields are point intervals. for (size_t i = 0; i < bounds.fields.size(); ++i) { const OrderedIntervalList& oil = bounds.fields[i]; if (oil.intervals.size() != 1) { continue; } const Interval& ival = oil.intervals[0]; if (!ival.isPoint()) { continue; } equalityFields.insert(oil.name); } } if (equalityFields.empty()) { return; } // TODO: Each field in equalityFields could be dropped from the sort order since it is // a point interval. The full set of sort orders is as follows: // For each sort in _sorts: // For each drop in powerset(equalityFields): // Remove fields in 'drop' from 'sort' and add resulting sort to output. // Since this involves a powerset, we only remove point intervals that the prior sort // planning code removed, namely the contiguous prefix of the key pattern. BSONObjIterator it(sortPattern); BSONObjBuilder prefixBob; while (it.more()) { BSONElement elt = it.next(); // TODO: string slowness. fix when bounds are stringdata not string. if (equalityFields.end() == equalityFields.find(string(elt.fieldName()))) { prefixBob.append(elt); // This field isn't a point interval, can't drop. break; } } while (it.more()) { prefixBob.append(it.next()); } // If we have an index {a:1} and an equality on 'a' don't append an empty sort order. BSONObj filterPointsObj = prefixBob.obj(); if (!filterPointsObj.isEmpty()) { _sorts.insert(filterPointsObj); } }
ProjectionExec::ProjectionExec(const BSONObj& spec, const MatchExpression* queryExpression) : _include(true), _special(false), _source(spec), _includeID(true), _skip(0), _limit(-1), _arrayOpType(ARRAY_OP_NORMAL), _hasNonSimple(false), _hasDottedField(false), _queryExpression(queryExpression), _hasReturnKey(false) { // Are we including or excluding fields? // -1 when we haven't initialized it. // 1 when we're including // 0 when we're excluding. int include_exclude = -1; BSONObjIterator it(_source); while (it.more()) { BSONElement e = it.next(); if (!e.isNumber() && !e.isBoolean()) { _hasNonSimple = true; } if (Object == e.type()) { BSONObj obj = e.embeddedObject(); verify(1 == obj.nFields()); BSONElement e2 = obj.firstElement(); if (mongoutils::str::equals(e2.fieldName(), "$slice")) { if (e2.isNumber()) { int i = e2.numberInt(); if (i < 0) { add(e.fieldName(), i, -i); // limit is now positive } else { add(e.fieldName(), 0, i); } } else { verify(e2.type() == Array); BSONObj arr = e2.embeddedObject(); verify(2 == arr.nFields()); BSONObjIterator it(arr); int skip = it.next().numberInt(); int limit = it.next().numberInt(); verify(limit > 0); add(e.fieldName(), skip, limit); } } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) { _arrayOpType = ARRAY_OP_ELEM_MATCH; // Create a MatchExpression for the elemMatch. BSONObj elemMatchObj = e.wrap(); verify(elemMatchObj.isOwned()); _elemMatchObjs.push_back(elemMatchObj); StatusWithMatchExpression swme = MatchExpressionParser::parse(elemMatchObj); verify(swme.isOK()); // And store it in _matchers. _matchers[mongoutils::str::before(e.fieldName(), '.').c_str()] = swme.getValue(); add(e.fieldName(), true); } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) { verify(String == e2.type()); if (mongoutils::str::equals(e2.valuestr(), "text")) { _meta[e.fieldName()] = META_TEXT; } else if (mongoutils::str::equals(e2.valuestr(), "diskloc")) { _meta[e.fieldName()] = META_DISKLOC; } else if (mongoutils::str::equals(e2.valuestr(), "indexKey")) { _hasReturnKey = true; // The index key clobbers everything so just stop parsing here. return; } else { // This shouldn't happen, should be caught by parsing. verify(0); } } else { verify(0); } } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) { _includeID = false; } else { add(e.fieldName(), e.trueValue()); // Projections of dotted fields aren't covered. if (mongoutils::str::contains(e.fieldName(), '.')) { _hasDottedField = true; } // Validate input. if (include_exclude == -1) { // If we haven't specified an include/exclude, initialize include_exclude. // We expect further include/excludes to match it. include_exclude = e.trueValue(); _include = !e.trueValue(); } } if (mongoutils::str::contains(e.fieldName(), ".$")) { _arrayOpType = ARRAY_OP_POSITIONAL; } } }
void Projection::init( const BSONObj& o ) { massert( 10371 , "can only add to Projection once", _source.isEmpty()); _source = o; BSONObjIterator i( o ); int true_false = -1; while ( i.more() ) { BSONElement e = i.next(); if ( ! e.isNumber() ) _hasNonSimple = true; if (e.type() == Object) { BSONObj obj = e.embeddedObject(); BSONElement e2 = obj.firstElement(); if ( mongoutils::str::equals( e2.fieldName(), "$slice" ) ) { if (e2.isNumber()) { int i = e2.numberInt(); if (i < 0) add(e.fieldName(), i, -i); // limit is now positive else add(e.fieldName(), 0, i); } else if (e2.type() == Array) { BSONObj arr = e2.embeddedObject(); uassert(13099, "$slice array wrong size", arr.nFields() == 2 ); BSONObjIterator it(arr); int skip = it.next().numberInt(); int limit = it.next().numberInt(); uassert(13100, "$slice limit must be positive", limit > 0 ); add(e.fieldName(), skip, limit); } else { uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false); } } else if ( mongoutils::str::equals( e2.fieldName(), "$elemMatch" ) ) { // validate $elemMatch arguments and dependencies uassert( 16342, "elemMatch: invalid argument. object required.", e2.type() == Object ); uassert( 16343, "Cannot specify positional operator and $elemMatch" " (currently unsupported).", _arrayOpType != ARRAY_OP_POSITIONAL ); uassert( 16344, "Cannot use $elemMatch projection on a nested field" " (currently unsupported).", ! mongoutils::str::contains( e.fieldName(), '.' ) ); _arrayOpType = ARRAY_OP_ELEM_MATCH; // initialize new Matcher object(s) _matchers.insert( make_pair( mongoutils::str::before( e.fieldName(), '.' ), boost::make_shared<Matcher>( e.wrap(), true ) ) ); add( e.fieldName(), true ); } else { uasserted(13097, string("Unsupported projection option: ") + obj.firstElementFieldName() ); } } else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) { _includeID = false; } else { add( e.fieldName(), e.trueValue() ); // validate input if (true_false == -1) { true_false = e.trueValue(); _include = !e.trueValue(); } else { uassert( 10053 , "You cannot currently mix including and excluding fields. " "Contact us if this is an issue." , (bool)true_false == e.trueValue() ); } } if ( mongoutils::str::contains( e.fieldName(), ".$" ) ) { // positional op found; verify dependencies uassert( 16345, "Cannot exclude array elements with the positional operator" " (currently unsupported).", e.trueValue() ); uassert( 16346, "Cannot specify more than one positional array element per query" " (currently unsupported).", _arrayOpType != ARRAY_OP_POSITIONAL ); uassert( 16347, "Cannot specify positional operator and $elemMatch" " (currently unsupported).", _arrayOpType != ARRAY_OP_ELEM_MATCH ); _arrayOpType = ARRAY_OP_POSITIONAL; } } }
void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){ int flags = d.pullInt(); BSONObj query = d.nextJsObj(); uassert( 10201 , "invalid update" , d.moreJSObjs() ); BSONObj toupdate = d.nextJsObj(); BSONObj chunkFinder = query; bool upsert = flags & UpdateOption_Upsert; bool multi = flags & UpdateOption_Multi; if ( multi ) uassert( 10202 , "can't mix multi and upsert and sharding" , ! upsert ); if ( upsert && !(manager->hasShardKey(toupdate) || (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query)))) { throw UserException( 8012 , "can't upsert something without shard key" ); } bool save = false; if ( ! manager->hasShardKey( query ) ){ if ( multi ){ } else if ( query.nFields() != 1 || strcmp( query.firstElement().fieldName() , "_id" ) ){ throw UserException( 8013 , "can't do update with query that doesn't have the shard key" ); } else { save = true; chunkFinder = toupdate; } } if ( ! save ){ if ( toupdate.firstElement().fieldName()[0] == '$' ){ BSONObjIterator ops(toupdate); while(ops.more()){ BSONElement op(ops.next()); if (op.type() != Object) continue; BSONObjIterator fields(op.embeddedObject()); while(fields.more()){ const string field = fields.next().fieldName(); uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field)); } } } else if ( manager->hasShardKey( toupdate ) ){ uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 ); } else { uasserted(12376, "shard key must be in update object"); } } if ( multi ){ vector<shared_ptr<ChunkRange> > chunks; manager->getChunksForQuery( chunks , chunkFinder ); set<Shard> seen; for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){ shared_ptr<ChunkRange> c = *i; if ( seen.count( c->getShard() ) ) continue; doWrite( dbUpdate , r , c->getShard() ); seen.insert( c->getShard() ); } } else { ChunkPtr c = manager->findChunk( chunkFinder ); doWrite( dbUpdate , r , c->getShard() ); c->splitIfShould( d.msg().header()->dataLen() ); } }
// static bool WorkingSetCommon::isValidStatusMemberObject(const BSONObj& obj) { return obj.nFields() == 3 && obj.hasField("ok") && obj.hasField("code") && obj.hasField("errmsg"); }
void Projection::init( const BSONObj& o ) { massert( 10371 , "can only add to Projection once", _source.isEmpty()); _source = o; BSONObjIterator i( o ); int true_false = -1; while ( i.more() ) { BSONElement e = i.next(); if ( ! e.isNumber() ) _hasNonSimple = true; if (e.type() == Object) { BSONObj obj = e.embeddedObject(); BSONElement e2 = obj.firstElement(); if ( strcmp(e2.fieldName(), "$slice") == 0 ) { if (e2.isNumber()) { int i = e2.numberInt(); if (i < 0) add(e.fieldName(), i, -i); // limit is now positive else add(e.fieldName(), 0, i); } else if (e2.type() == Array) { BSONObj arr = e2.embeddedObject(); uassert(13099, "$slice array wrong size", arr.nFields() == 2 ); BSONObjIterator it(arr); int skip = it.next().numberInt(); int limit = it.next().numberInt(); uassert(13100, "$slice limit must be positive", limit > 0 ); add(e.fieldName(), skip, limit); } else { uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false); } } else { uassert(13097, string("Unsupported projection option: ") + obj.firstElementFieldName(), false); } } else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) { _includeID = false; } else { add (e.fieldName(), e.trueValue()); // validate input if (true_false == -1) { true_false = e.trueValue(); _include = !e.trueValue(); } else { uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." , (bool)true_false == e.trueValue() ); } } } }
intrusive_ptr<DocumentSourceSort> DocumentSourceSort::create( const intrusive_ptr<ExpressionContext>& pExpCtx, BSONObj sortOrder, long long limit, boost::optional<uint64_t> maxMemoryUsageBytes) { intrusive_ptr<DocumentSourceSort> pSort(new DocumentSourceSort(pExpCtx)); pSort->_maxMemoryUsageBytes = maxMemoryUsageBytes ? *maxMemoryUsageBytes : internalDocumentSourceSortMaxBlockingSortBytes.load(); pSort->_rawSort = sortOrder.getOwned(); for (auto&& keyField : sortOrder) { auto fieldName = keyField.fieldNameStringData(); SortPatternPart patternPart; if (keyField.type() == Object) { BSONObj metaDoc = keyField.Obj(); // this restriction is due to needing to figure out sort direction uassert(17312, "$meta is the only expression supported by $sort right now", metaDoc.firstElement().fieldNameStringData() == "$meta"); uassert(ErrorCodes::FailedToParse, "Cannot have additional keys in a $meta sort specification", metaDoc.nFields() == 1); VariablesParseState vps = pExpCtx->variablesParseState; patternPart.expression = ExpressionMeta::parse(pExpCtx, metaDoc.firstElement(), vps); // If sorting by textScore, sort highest scores first. If sorting by randVal, order // doesn't matter, so just always use descending. patternPart.isAscending = false; pSort->_sortPattern.push_back(std::move(patternPart)); continue; } uassert(15974, "$sort key ordering must be specified using a number or {$meta: 'textScore'}", keyField.isNumber()); int sortOrder = keyField.numberInt(); uassert(15975, "$sort key ordering must be 1 (for ascending) or -1 (for descending)", ((sortOrder == 1) || (sortOrder == -1))); patternPart.fieldPath = FieldPath{fieldName}; patternPart.isAscending = (sortOrder > 0); pSort->_paths.insert(patternPart.fieldPath->fullPath()); pSort->_sortPattern.push_back(std::move(patternPart)); } uassert(15976, "$sort stage must have at least one sort key", !pSort->_sortPattern.empty()); pSort->_sortKeyGen = SortKeyGenerator{ // The SortKeyGenerator expects the expressions to be serialized in order to detect a sort // by a metadata field. pSort->sortKeyPattern(SortKeySerialization::kForPipelineSerialization).toBson(), pExpCtx->getCollator()}; if (limit > 0) { pSort->setLimitSrc(DocumentSourceLimit::create(pExpCtx, limit)); } return pSort; }
jsval toval( const BSONElement& e ) { switch( e.type() ) { case EOO: case jstNULL: case Undefined: return JSVAL_NULL; case NumberDouble: case NumberInt: return toval( e.number() ); case Symbol: // TODO: should we make a special class for this case String: return toval( e.valuestr() ); case Bool: return e.boolean() ? JSVAL_TRUE : JSVAL_FALSE; case Object: { BSONObj embed = e.embeddedObject().getOwned(); return toval( &embed ); } case Array: { BSONObj embed = e.embeddedObject().getOwned(); if ( embed.isEmpty() ) { return OBJECT_TO_JSVAL( JS_NewArrayObject( _context , 0 , 0 ) ); } int n = embed.nFields(); JSObject * array = JS_NewArrayObject( _context , n , 0 ); assert( array ); jsval myarray = OBJECT_TO_JSVAL( array ); for ( int i=0; i<n; i++ ) { jsval v = toval( embed[i] ); assert( JS_SetElement( _context , array , i , &v ) ); } return myarray; } case jstOID: { OID oid = e.__oid(); JSObject * o = JS_NewObject( _context , &object_id_class , 0 , 0 ); setProperty( o , "str" , toval( oid.str().c_str() ) ); return OBJECT_TO_JSVAL( o ); } case RegEx: { const char * flags = e.regexFlags(); uintN flagNumber = 0; while ( *flags ) { switch ( *flags ) { case 'g': flagNumber |= JSREG_GLOB; break; case 'i': flagNumber |= JSREG_FOLD; break; case 'm': flagNumber |= JSREG_MULTILINE; break; //case 'y': flagNumber |= JSREG_STICKY; break; default: log() << "warning: unknown regex flag:" << *flags << endl; } flags++; } JSObject * r = JS_NewRegExpObject( _context , (char*)e.regex() , strlen( e.regex() ) , flagNumber ); assert( r ); return OBJECT_TO_JSVAL( r ); } case Code: { JSFunction * func = compileFunction( e.valuestr() ); return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) ); } case CodeWScope: { JSFunction * func = compileFunction( e.codeWScopeCode() ); BSONObj extraScope = e.codeWScopeObject(); if ( ! extraScope.isEmpty() ) { log() << "warning: CodeWScope doesn't transfer to db.eval" << endl; } return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) ); } case Date: return OBJECT_TO_JSVAL( js_NewDateObjectMsec( _context , (jsdouble) e.date().millis ) ); case MinKey: return OBJECT_TO_JSVAL( JS_NewObject( _context , &minkey_class , 0 , 0 ) ); case MaxKey: return OBJECT_TO_JSVAL( JS_NewObject( _context , &maxkey_class , 0 , 0 ) ); case Timestamp: { JSObject * o = JS_NewObject( _context , ×tamp_class , 0 , 0 ); setProperty( o , "t" , toval( (double)(e.timestampTime()) ) ); setProperty( o , "i" , toval( (double)(e.timestampInc()) ) ); return OBJECT_TO_JSVAL( o ); } case NumberLong: { boost::uint64_t val = (boost::uint64_t)e.numberLong(); JSObject * o = JS_NewObject( _context , &numberlong_class , 0 , 0 ); setProperty( o , "floatApprox" , toval( (double)(boost::int64_t)( val ) ) ); if ( (boost::int64_t)val != (boost::int64_t)(double)(boost::int64_t)( val ) ) { // using 2 doubles here instead of a single double because certain double // bit patterns represent undefined values and sm might trash them setProperty( o , "top" , toval( (double)(boost::uint32_t)( val >> 32 ) ) ); setProperty( o , "bottom" , toval( (double)(boost::uint32_t)( val & 0x00000000ffffffff ) ) ); } return OBJECT_TO_JSVAL( o ); } case DBRef: { JSObject * o = JS_NewObject( _context , &dbpointer_class , 0 , 0 ); setProperty( o , "ns" , toval( e.dbrefNS() ) ); JSObject * oid = JS_NewObject( _context , &object_id_class , 0 , 0 ); setProperty( oid , "str" , toval( e.dbrefOID().str().c_str() ) ); setProperty( o , "id" , OBJECT_TO_JSVAL( oid ) ); return OBJECT_TO_JSVAL( o ); } case BinData: { JSObject * o = JS_NewObject( _context , &bindata_class , 0 , 0 ); int len; const char * data = e.binData( len ); assert( JS_SetPrivate( _context , o , new BinDataHolder( data ) ) ); setProperty( o , "len" , toval( len ) ); setProperty( o , "type" , toval( (int)e.binDataType() ) ); return OBJECT_TO_JSVAL( o ); } }
BSONObj getBuildInfo(const BSONObj& a, void* data) { uassert(16822, "getBuildInfo accepts no arguments", a.nFields() == 0); BSONObjBuilder b; VersionInfoInterface::instance().appendBuildInfo(&b); return BSON("" << b.done()); }
int IndexCursor::skipToNextKey( const BSONObj ¤tKey ) { int skipPrefixIndex = _boundsIterator->advance( currentKey ); if ( skipPrefixIndex == -2 ) { // We are done iterating completely. _ok = false; return -2; } else if ( skipPrefixIndex == -1 ) { // We should skip nothing. return -1; } // We should skip to a further key, efficiently. // // If after(), skip to the first key greater/less than the key comprised // of the first "skipPrefixIndex" elements of currentKey, and the rest // set to MaxKey/MinKey for direction > 0 and direction < 0 respectively. // eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}, direction > 0, so we skip // to the first key greater than {a:1, b:maxkey, c:maxkey} // // If after() is false, we use the same key prefix but set the reamining // elements to the elements described by cmp(), in order. // eg: skipPrefixIndex = 1, currKey {a:1, b:2, c:1}) and cmp() [b:5, c:11] // so we use skip to {a:1, b:5, c:11}, also noting direction. if ( _boundsIterator->after() ) { skipPrefix( currentKey, skipPrefixIndex ); } else { BSONObjBuilder b(currentKey.objsize()); BSONObjIterator it = currentKey.begin(); const vector<const BSONElement *> &endKeys = _boundsIterator->cmp(); const int nFields = currentKey.nFields(); for ( int i = 0; i < nFields; i++ ) { if ( i < skipPrefixIndex ) { verify( it.more() ); b.append( it.next() ); } else { b.appendAs( *endKeys[i] , "" ); } } findKey( b.done() ); // Skip passed key prefixes that are not supposed to be inclusive // as described by _boundsIterator->inc() and endKeys // // We'll spend at worst nFields^2 time ensuring all key elements // are properly set if all the inclusive bits are false and we // keep landing on keys where the ith element of curr == endkeys[i]. // // This complexity is usually ok, since this skipping is supposed to // save us from really big linear scans across the key space in // some pathological cases. It's not clear whether or not small // cases are hurt too badly by this algorithm. bool allInclusive = true; const vector<bool> &inclusive = _boundsIterator->inc(); for ( int i = 0; i < nFields; i++ ) { if ( !inclusive[i] ) { allInclusive = false; break; } } again: while ( !allInclusive && ok() ) { BSONObj key = _currKey; it = key.begin(); dassert( nFields == key.nFields() ); for ( int i = 0; i < nFields; i++ ) { const BSONElement e = it.next(); if ( i >= skipPrefixIndex && !inclusive[i] && e.valuesEqual(*endKeys[i]) ) { // The ith element equals the ith endKey but it's not supposed to be inclusive. // Skipping to the next value for the ith element involves skipping a prefix // with i + 1 elements. skipPrefix( key, i + 1 ); goto again; } } break; } } return 0; }
BSONObj createCertificateRequest(const BSONObj& a, void* data) { #ifndef MONGO_CONFIG_SSL return BSON( "" << BSON("ok" << false << "errmsg" << "Cannot create a certificate signing request without SSL support")); #else if (a.nFields() != 1 || a.firstElement().type() != Object) { return BSON( "" << BSON("ok" << false << "errmsg" << "createCertificateRequest requires a single object argument")); } // args can optionally contain some to be determined fields... BSONObj args = a.firstElement().embeddedObject(); if (!args.hasField("CN")) { return BSON( "" << BSON("ok" << false << "errmsg" << "createCertificateRequest requires a Common Name (\"CN\") field")); } // Generate key pair and certificate signing request RSA* rsa; EVP_PKEY* pkey; X509_REQ* x509req; X509_NAME* name; BIO* out; char client_key[2048]; char client_csr[2048]; pkey = EVP_PKEY_new(); if (!pkey) { return BSON("" << BSON("ok" << false)); // fail("couldn't generate key"); } rsa = RSA_generate_key(2048, RSA_F4, NULL, NULL); if (!EVP_PKEY_assign_RSA(pkey, rsa)) { return BSON("" << BSON("ok" << false)); // fail("couldn't assign the key"); } x509req = X509_REQ_new(); X509_REQ_set_pubkey(x509req, pkey); name = X509_NAME_new(); X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC, (const unsigned char*)"IS", -1, -1, 0); X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC, (const unsigned char*)"MongoDB", -1, -1, 0); X509_NAME_add_entry_by_txt( name, "OU", MBSTRING_ASC, (const unsigned char*)"SkunkWorks client", -1, -1, 0); X509_NAME_add_entry_by_txt( name, "CN", MBSTRING_ASC, (const unsigned char*)args.getStringField("CN"), -1, -1, 0); X509_REQ_set_subject_name(x509req, name); X509_REQ_set_version(x509req, 2); if (!X509_REQ_sign(x509req, pkey, EVP_sha1())) { return BSON("" << BSON("ok" << false)); } // out = BIO_new_file("client.key.pem", "wb"); out = BIO_new(BIO_s_mem()); if (!PEM_write_bio_PrivateKey(out, pkey, NULL, NULL, 0, NULL, NULL)) { return BSON("" << BSON("ok" << false)); // fail("can't write private key"); } int i = BIO_read(out, &client_key, sizeof client_key); client_key[i] = '\0'; BIO_free_all(out); out = BIO_new(BIO_s_mem()); if (!PEM_write_bio_X509_REQ_NEW(out, x509req)) { return BSON("" << BSON("ok" << false)); // fail("coudln't write csr"); } i = BIO_read(out, &client_csr, sizeof client_csr); client_csr[i] = '\0'; BIO_free_all(out); EVP_PKEY_free(pkey); X509_REQ_free(x509req); return BSON("" << BSON("ok" << true << "certificateRequest" << client_csr << "privateKey" << client_key)); #endif }
void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj, const BSONObj& keyPattern, const S2IndexingParams& params, BSONObjSet* keys, MultikeyPaths* multikeyPaths) { BSONObjSet keysToAdd = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); // Does one of our documents have a geo field? bool haveGeoField = false; if (multikeyPaths) { invariant(multikeyPaths->empty()); multikeyPaths->resize(keyPattern.nFields()); } size_t posInIdx = 0; // We output keys in the same order as the fields we index. for (const auto keyElem : keyPattern) { // First, we get the keys that this field adds. Either they're added literally from // the value of the field, or they're transformed if the field is geo. BSONElementSet fieldElements; const bool expandArrayOnTrailingField = false; std::set<size_t>* arrayComponents = multikeyPaths ? &(*multikeyPaths)[posInIdx] : nullptr; dps::extractAllElementsAlongPath( obj, keyElem.fieldName(), fieldElements, expandArrayOnTrailingField, arrayComponents); // Trailing array values aren't being expanded, so we still need to determine whether the // last component of the indexed path 'keyElem.fieldName()' causes the index to be multikey. // We say that it does if // (a) the last component of the indexed path ever refers to an array value (regardless of // the number of array elements) // (b) the last component of the indexed path ever refers to GeoJSON data that requires // multiple cells for its covering. bool lastPathComponentCausesIndexToBeMultikey; BSONObjSet keysForThisField = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); if (IndexNames::GEO_2DSPHERE == keyElem.valuestr()) { if (params.indexVersion >= S2_INDEX_VERSION_2) { // For >= V2, // geo: null, // geo: undefined // geo: [] // should all behave like there is no geo field. So we look for these cases and // throw out the field elements if we find them. if (1 == fieldElements.size()) { BSONElement elt = *fieldElements.begin(); // Get the :null and :undefined cases. if (elt.isNull() || Undefined == elt.type()) { fieldElements.clear(); } else if (elt.isABSONObj()) { // And this is the :[] case. BSONObj obj = elt.Obj(); if (0 == obj.nFields()) { fieldElements.clear(); } } } // >= V2 2dsphere indices require that at least one geo field to be present in a // document in order to index it. if (fieldElements.size() > 0) { haveGeoField = true; } } lastPathComponentCausesIndexToBeMultikey = getS2GeoKeys(obj, fieldElements, params, &keysForThisField); } else { lastPathComponentCausesIndexToBeMultikey = getS2LiteralKeys(fieldElements, params.collator, &keysForThisField); } // We expect there to be the missing field element present in the keys if data is // missing. So, this should be non-empty. verify(!keysForThisField.empty()); if (multikeyPaths && lastPathComponentCausesIndexToBeMultikey) { const size_t pathLengthOfThisField = FieldRef{keyElem.fieldNameStringData()}.numParts(); invariant(pathLengthOfThisField > 0); (*multikeyPaths)[posInIdx].insert(pathLengthOfThisField - 1); } // We take the Cartesian product of all of the keys. This requires that we have // some keys to take the Cartesian product with. If keysToAdd.empty(), we // initialize it. if (keysToAdd.empty()) { keysToAdd = keysForThisField; ++posInIdx; continue; } BSONObjSet updatedKeysToAdd = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) { for (BSONObjSet::const_iterator newIt = keysForThisField.begin(); newIt != keysForThisField.end(); ++newIt) { BSONObjBuilder b; b.appendElements(*it); b.append(newIt->firstElement()); updatedKeysToAdd.insert(b.obj()); } } keysToAdd = updatedKeysToAdd; ++posInIdx; } // Make sure that if we're >= V2 there's at least one geo field present in the doc. if (params.indexVersion >= S2_INDEX_VERSION_2) { if (!haveGeoField) { return; } } if (keysToAdd.size() > params.maxKeysPerInsert) { warning() << "Insert of geo object generated a high number of keys." << " num keys: " << keysToAdd.size() << " obj inserted: " << redact(obj); } *keys = keysToAdd; }
/* ns: namespace, e.g. <database>.<collection> pattern: the "where" clause / criteria justOne: stop after 1 match god: allow access to system namespaces, and don't yield */ long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop, bool god, RemoveSaver * rs ) { if( !god ) { if ( strstr(ns, ".system.") ) { /* note a delete from system.indexes would corrupt the db if done here, as there are pointers into those objects in NamespaceDetails. */ uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) ); } if ( strchr( ns , '$' ) ) { log() << "cannot delete from collection with reserved $ in name: " << ns << endl; uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 ); } } { NamespaceDetails *d = nsdetails( ns ); if ( ! d ) return 0; uassert( 10101 , "can't remove from a capped collection" , ! d->isCapped() ); } long long nDeleted = 0; shared_ptr< Cursor > creal = NamespaceDetailsTransient::getCursor( ns, pattern ); if( !creal->ok() ) return nDeleted; shared_ptr< Cursor > cPtr = creal; auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) ); cc->setDoingDeletes( true ); CursorId id = cc->cursorid(); bool canYield = !god && !(creal->matcher() && creal->matcher()->docMatcher().atomic()); do { // TODO: we can generalize this I believe // bool willNeedRecord = (creal->matcher() && creal->matcher()->needRecord()) || pattern.isEmpty() || isSimpleIdQuery( pattern ); if ( ! willNeedRecord ) { // TODO: this is a total hack right now // check if the index full encompasses query if ( pattern.nFields() == 1 && str::equals( pattern.firstElement().fieldName() , creal->indexKeyPattern().firstElement().fieldName() ) ) willNeedRecord = true; } if ( canYield && ! cc->yieldSometimes( willNeedRecord ? ClientCursor::WillNeed : ClientCursor::MaybeCovered ) ) { cc.release(); // has already been deleted elsewhere // TODO should we assert or something? break; } if ( !cc->ok() ) { break; // if we yielded, could have hit the end } // this way we can avoid calling prepareToYield() every time (expensive) // as well as some other nuances handled cc->setDoingDeletes( true ); DiskLoc rloc = cc->currLoc(); BSONObj key = cc->currKey(); bool match = creal->currentMatches(); cc->advance(); if ( ! match ) continue; // SERVER-5198 Advance past the document to be modified, but see SERVER-5725. while( cc->ok() && rloc == cc->currLoc() ) { cc->advance(); } bool foundAllResults = ( justOne || !cc->ok() ); if ( !foundAllResults ) { // NOTE: Saving and restoring a btree cursor's position was historically described // as slow here. cc->c()->prepareToTouchEarlierIterate(); } if ( logop ) { BSONElement e; if( BSONObj::make( rloc.rec() ).getObjectID( e ) ) { BSONObjBuilder b; b.append( e ); bool replJustOne = true; logOp( "d", ns, b.done(), 0, &replJustOne ); } else { problem() << "deleted object without id, not logging" << endl; } } theDataFileMgr.deleteRecord(ns, rloc.rec(), rloc); nDeleted++; if ( foundAllResults ) { break; } cc->c()->recoverFromTouchingEarlierIterate(); if( !god ) getDur().commitIfNeeded(); if( debug && god && nDeleted == 100 ) log() << "warning high number of deletes with god=true which could use significant memory" << endl; } while ( cc->ok() ); if ( cc.get() && ClientCursor::find( id , false ) == 0 ) { // TODO: remove this and the id declaration above if this doesn't trigger // if it does, then i'm very confused (ERH 06/2011) error() << "this should be impossible" << endl; printStackTrace(); cc.release(); } return nDeleted; }
/** * Cleans up one range of orphaned data starting from a range that overlaps or starts at * 'startingFromKey'. If empty, startingFromKey is the minimum key of the sharded range. * * @return CleanupResult_Continue and 'stoppedAtKey' if orphaned range was found and cleaned * @return CleanupResult_Done if no orphaned ranges remain * @return CleanupResult_Error and 'errMsg' if an error occurred * * If the collection is not sharded, returns CleanupResult_Done. */ CleanupResult cleanupOrphanedData( const NamespaceString& ns, const BSONObj& startingFromKeyConst, bool secondaryThrottle, BSONObj* stoppedAtKey, string* errMsg ) { BSONObj startingFromKey = startingFromKeyConst; CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns.toString() ); if ( !metadata || metadata->getKeyPattern().isEmpty() ) { warning() << "skipping orphaned data cleanup for " << ns.toString() << ", collection is not sharded" << endl; return CleanupResult_Done; } BSONObj keyPattern = metadata->getKeyPattern(); if ( !startingFromKey.isEmpty() ) { if ( startingFromKey.nFields() != keyPattern.nFields() ) { *errMsg = stream() << "could not cleanup orphaned data, start key " << startingFromKey << " does not match shard key pattern " << keyPattern; warning() << *errMsg << endl; return CleanupResult_Error; } } else { startingFromKey = metadata->getMinKey(); } KeyRange orphanRange; if ( !metadata->getNextOrphanRange( startingFromKey, &orphanRange ) ) { LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString() << " starting from " << startingFromKey << ", no orphan ranges remain" << endl; return CleanupResult_Done; } *stoppedAtKey = orphanRange.maxKey; // We're done with this metadata now, no matter what happens metadata.reset(); LOG( 1 ) << "orphaned data cleanup requested for " << ns.toString() << " starting from " << startingFromKey << ", removing next orphan range" << " [" << orphanRange.minKey << "," << orphanRange.maxKey << ")" << endl; // Metadata snapshot may be stale now, but deleter checks metadata again in write lock // before delete. if ( !getDeleter()->deleteNow( ns.toString(), orphanRange.minKey, orphanRange.maxKey, keyPattern, secondaryThrottle, errMsg ) ) { warning() << *errMsg << endl; return CleanupResult_Error; } return CleanupResult_Continue; }
/* note: this is only (as-is) called for - not multi - not mods is indexed - not upsert */ static UpdateResult _updateById(bool isOperatorUpdate, int idIdxNo, ModSet* mods, int profile, NamespaceDetails* d, NamespaceDetailsTransient *nsdt, bool su, const char* ns, const BSONObj& updateobj, BSONObj patternOrig, bool logop, OpDebug& debug, bool fromMigrate = false) { DiskLoc loc; { IndexDetails& i = d->idx(idIdxNo); BSONObj key = i.getKeyFromQuery( patternOrig ); loc = i.idxInterface().findSingle(i, i.head, key); if( loc.isNull() ) { // no upsert support in _updateById yet, so we are done. return UpdateResult( 0 , 0 , 0 , BSONObj() ); } } Record* r = loc.rec(); if ( cc().allowedToThrowPageFaultException() && ! r->likelyInPhysicalMemory() ) { throw PageFaultException( r ); } /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some regular ones at the moment. */ if ( isOperatorUpdate ) { const BSONObj& onDisk = loc.obj(); auto_ptr<ModSetState> mss = mods->prepare( onDisk ); if( mss->canApplyInPlace() ) { mss->applyModsInPlace(true); DEBUGUPDATE( "\t\t\t updateById doing in place update" ); } else { BSONObj newObj = mss->createNewFromMods(); checkTooLarge(newObj); verify(nsdt); theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug); } if ( logop ) { DEV verify( mods->size() ); BSONObj pattern = patternOrig; BSONObj logObj = mss->getOpLogRewrite(); DEBUGUPDATE( "\t rewrite update: " << logObj ); // It is possible that the entire mod set was a no-op over this document. We // would have an empty log record in that case. If we call logOp, with an empty // record, that would be replicated as "clear this record", which is not what // we want. Therefore, to get a no-op in the replica, we simply don't log. if ( logObj.nFields() ) { logOp("u", ns, logObj, &pattern, 0, fromMigrate ); } } return UpdateResult( 1 , 1 , 1 , BSONObj() ); } // end $operator update // regular update BSONElementManipulator::lookForTimestamps( updateobj ); checkNoMods( updateobj ); verify(nsdt); theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug ); if ( logop ) { logOp("u", ns, updateobj, &patternOrig, 0, fromMigrate ); } return UpdateResult( 1 , 0 , 1 , BSONObj() ); }
bool MongoVersionRange::parseBSONElement(const BSONElement& el, string* errMsg) { string dummy; if (!errMsg) errMsg = &dummy; if (el.type() == String) { minVersion = el.String(); if (minVersion == "") { *errMsg = (string) "cannot parse single empty mongo version (" + el.toString() + ")"; return false; } return true; } else if (el.type() == Array || el.type() == Object) { BSONObj range = el.Obj(); if (range.nFields() != 2) { *errMsg = (string) "not enough fields in mongo version range (" + el.toString() + ")"; return false; } BSONObjIterator it(range); BSONElement subElA = it.next(); BSONElement subElB = it.next(); if (subElA.type() != String || subElB.type() != String) { *errMsg = (string) "wrong field type for mongo version range (" + el.toString() + ")"; return false; } minVersion = subElA.String(); maxVersion = subElB.String(); if (minVersion == "") { *errMsg = (string) "cannot parse first empty mongo version (" + el.toString() + ")"; return false; } if (maxVersion == "") { *errMsg = (string) "cannot parse second empty mongo version (" + el.toString() + ")"; return false; } if (versionCmp(minVersion, maxVersion) > 0) { string swap = minVersion; minVersion = maxVersion; maxVersion = swap; } return true; } else { *errMsg = (string) "wrong type for mongo version range " + el.toString(); return false; } }
bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { // // Correct behavior here is very finicky. // // 1. The first step is to append the error that occurred on the previous operation. // This adds an "err" field to the command, which is *not* the command failing. // // 2. Next we parse and validate write concern options. If these options are invalid // the command fails no matter what, even if we actually had an error earlier. The // reason for checking here is to match legacy behavior on these kind of failures - // we'll still get an "err" field for the write error. // // 3. If we had an error on the previous operation, we then return immediately. // // 4. Finally, we actually enforce the write concern. All errors *except* timeout are // reported with ok : 0.0, to match legacy behavior. // // There is a special case when "wOpTime" and "wElectionId" are explicitly provided by // the client (mongos) - in this case we *only* enforce the write concern if it is // valid. // // We always need to either report "err" (if ok : 1) or "errmsg" (if ok : 0), even if // err is null. // LastError *le = lastError.disableForCommand(); // Always append lastOp and connectionId Client& c = *txn->getClient(); if (repl::getGlobalReplicationCoordinator()->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet) { const OpTime lastOp = repl::ReplClientInfo::forClient(c).getLastOp(); if (!lastOp.isNull()) { result.append("lastOp", lastOp); } } // for sharding; also useful in general for debugging result.appendNumber( "connectionId" , c.getConnectionId() ); OpTime lastOpTime; BSONField<OpTime> wOpTimeField("wOpTime"); FieldParser::FieldState extracted = FieldParser::extract(cmdObj, wOpTimeField, &lastOpTime, &errmsg); if (!extracted) { result.append("badGLE", cmdObj); appendCommandStatus(result, false, errmsg); return false; } bool lastOpTimePresent = extracted != FieldParser::FIELD_NONE; if (!lastOpTimePresent) { // Use the client opTime if no wOpTime is specified lastOpTime = repl::ReplClientInfo::forClient(c).getLastOp(); } OID electionId; BSONField<OID> wElectionIdField("wElectionId"); extracted = FieldParser::extract(cmdObj, wElectionIdField, &electionId, &errmsg); if (!extracted) { result.append("badGLE", cmdObj); appendCommandStatus(result, false, errmsg); return false; } bool electionIdPresent = extracted != FieldParser::FIELD_NONE; bool errorOccurred = false; // Errors aren't reported when wOpTime is used if ( !lastOpTimePresent ) { if ( le->nPrev != 1 ) { errorOccurred = LastError::noError.appendSelf( result, false ); le->appendSelfStatus( result ); } else { errorOccurred = le->appendSelf( result, false ); } } BSONObj writeConcernDoc = cmdObj; // Use the default options if we have no gle options aside from wOpTime/wElectionId const int nFields = cmdObj.nFields(); bool useDefaultGLEOptions = (nFields == 1) || (nFields == 2 && lastOpTimePresent) || (nFields == 3 && lastOpTimePresent && electionIdPresent); WriteConcernOptions writeConcern; if (useDefaultGLEOptions) { writeConcern = repl::getGlobalReplicationCoordinator()->getGetLastErrorDefault(); } Status status = writeConcern.parse( writeConcernDoc ); // // Validate write concern no matter what, this matches 2.4 behavior // if ( status.isOK() ) { // Ensure options are valid for this host status = validateWriteConcern( writeConcern ); } if ( !status.isOK() ) { result.append( "badGLE", writeConcernDoc ); return appendCommandStatus( result, status ); } // Don't wait for replication if there was an error reported - this matches 2.4 behavior if ( errorOccurred ) { dassert( !lastOpTimePresent ); return true; } // No error occurred, so we won't duplicate these fields with write concern errors dassert( result.asTempObj()["err"].eoo() ); dassert( result.asTempObj()["code"].eoo() ); // If we got an electionId, make sure it matches if (electionIdPresent) { if (repl::getGlobalReplicationCoordinator()->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet) { // Ignore electionIds of 0 from mongos. if (electionId != OID()) { errmsg = "wElectionId passed but no replication active"; result.append("code", ErrorCodes::BadValue); return false; } } else { if (electionId != repl::getGlobalReplicationCoordinator()->getElectionId()) { LOG(3) << "oid passed in is " << electionId << ", but our id is " << repl::getGlobalReplicationCoordinator()->getElectionId(); errmsg = "election occurred after write"; result.append("code", ErrorCodes::WriteConcernFailed); return false; } } } txn->setWriteConcern(writeConcern); txn->setMessage( "waiting for write concern" ); WriteConcernResult wcResult; status = waitForWriteConcern( txn, lastOpTime, &wcResult ); wcResult.appendTo( writeConcern, &result ); // For backward compatibility with 2.4, wtimeout returns ok : 1.0 if ( wcResult.wTimedOut ) { dassert( !wcResult.err.empty() ); // so we always report err dassert( !status.isOK() ); result.append( "errmsg", "timed out waiting for slaves" ); result.append( "code", status.code() ); return true; } return appendCommandStatus( result, status ); }
StatusWith<TextMatchExpressionBase::TextParams> ExtensionsCallback::extractTextMatchExpressionParams(BSONElement text) { TextMatchExpressionBase::TextParams params; if (text.type() != Object) { return {ErrorCodes::BadValue, "$text expects an object"}; } BSONObj queryObj = text.Obj(); // // Parse required fields. // Status queryStatus = bsonExtractStringField(queryObj, "$search", ¶ms.query); if (!queryStatus.isOK()) { return queryStatus; } // // Parse optional fields. // int expectedFieldCount = 1; Status languageStatus = bsonExtractStringField(queryObj, "$language", ¶ms.language); if (languageStatus == ErrorCodes::TypeMismatch) { return languageStatus; } else if (languageStatus == ErrorCodes::NoSuchKey) { params.language = std::string(); } else { invariantOK(languageStatus); expectedFieldCount++; } Status caseSensitiveStatus = bsonExtractBooleanField(queryObj, "$caseSensitive", ¶ms.caseSensitive); if (caseSensitiveStatus == ErrorCodes::TypeMismatch) { return caseSensitiveStatus; } else if (caseSensitiveStatus == ErrorCodes::NoSuchKey) { params.caseSensitive = TextMatchExpressionBase::kCaseSensitiveDefault; } else { invariantOK(caseSensitiveStatus); expectedFieldCount++; } Status diacriticSensitiveStatus = bsonExtractBooleanField(queryObj, "$diacriticSensitive", ¶ms.diacriticSensitive); if (diacriticSensitiveStatus == ErrorCodes::TypeMismatch) { return diacriticSensitiveStatus; } else if (diacriticSensitiveStatus == ErrorCodes::NoSuchKey) { params.diacriticSensitive = TextMatchExpressionBase::kDiacriticSensitiveDefault; } else { invariantOK(diacriticSensitiveStatus); expectedFieldCount++; } if (queryObj.nFields() != expectedFieldCount) { return {ErrorCodes::BadValue, "extra fields in $text"}; } return {std::move(params)}; }
BSONObj getBuildInfo(const BSONObj& a, void* data) { uassert( 16822, "getBuildInfo accepts no arguments", a.nFields() == 0 ); BSONObjBuilder b; appendBuildInfo(b); return BSON( "" << b.done() ); }
JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval){ uassert( "mongo_find neesd 5 args" , argc == 5 ); shared_ptr< DBClientBase > * connHolder = (shared_ptr< DBClientBase >*)JS_GetPrivate( cx , obj ); uassert( "no connection!" , connHolder && connHolder->get() ); DBClientBase *conn = connHolder->get(); Convertor c( cx ); string ns = c.toString( argv[0] ); BSONObj q = c.toObject( argv[1] ); BSONObj f = c.toObject( argv[2] ); int nToReturn = (int) c.toNumber( argv[3] ); int nToSkip = (int) c.toNumber( argv[4] ); bool slaveOk = c.getBoolean( obj , "slaveOk" ); try { auto_ptr<DBClientCursor> cursor = conn->query( ns , q , nToReturn , nToSkip , f.nFields() ? &f : 0 , slaveOk ? Option_SlaveOk : 0 ); JSObject * mycursor = JS_NewObject( cx , &internal_cursor_class , 0 , 0 ); assert( JS_SetPrivate( cx , mycursor , new CursorHolder( cursor, *connHolder ) ) ); *rval = OBJECT_TO_JSVAL( mycursor ); return JS_TRUE; } catch ( ... ){ JS_ReportError( cx , "error doing query" ); return JS_FALSE; } }
static BSONElement oneArg(const BSONObj& args) { uassert( 12597 , "need to specify 1 argument" , args.nFields() == 1 ); return args.firstElement(); }
void ExpressionKeysPrivate::getS2Keys(const BSONObj& obj, const BSONObj& keyPattern, const S2IndexingParams& params, BSONObjSet* keys) { BSONObjSet keysToAdd; // Does one of our documents have a geo field? bool haveGeoField = false; // We output keys in the same order as the fields we index. BSONObjIterator i(keyPattern); while (i.more()) { BSONElement e = i.next(); // First, we get the keys that this field adds. Either they're added literally from // the value of the field, or they're transformed if the field is geo. BSONElementSet fieldElements; // false means Don't expand the last array, duh. obj.getFieldsDotted(e.fieldName(), fieldElements, false); BSONObjSet keysForThisField; if (IndexNames::GEO_2DSPHERE == e.valuestr()) { if (S2_INDEX_VERSION_2 == params.indexVersion) { // For V2, // geo: null, // geo: undefined // geo: [] // should all behave like there is no geo field. So we look for these cases and // throw out the field elements if we find them. if (1 == fieldElements.size()) { BSONElement elt = *fieldElements.begin(); // Get the :null and :undefined cases. if (elt.isNull() || Undefined == elt.type()) { fieldElements.clear(); } else if (elt.isABSONObj()) { // And this is the :[] case. BSONObj obj = elt.Obj(); if (0 == obj.nFields()) { fieldElements.clear(); } } } // V2 2dsphere indices require that at least one geo field to be present in a // document in order to index it. if (fieldElements.size() > 0) { haveGeoField = true; } } getS2GeoKeys(obj, fieldElements, params, &keysForThisField); } else { getS2LiteralKeys(fieldElements, &keysForThisField); } // We expect there to be the missing field element present in the keys if data is // missing. So, this should be non-empty. verify(!keysForThisField.empty()); // We take the Cartesian product of all of the keys. This requires that we have // some keys to take the Cartesian product with. If keysToAdd.empty(), we // initialize it. if (keysToAdd.empty()) { keysToAdd = keysForThisField; continue; } BSONObjSet updatedKeysToAdd; for (BSONObjSet::const_iterator it = keysToAdd.begin(); it != keysToAdd.end(); ++it) { for (BSONObjSet::const_iterator newIt = keysForThisField.begin(); newIt!= keysForThisField.end(); ++newIt) { BSONObjBuilder b; b.appendElements(*it); b.append(newIt->firstElement()); updatedKeysToAdd.insert(b.obj()); } } keysToAdd = updatedKeysToAdd; } // Make sure that if we're V2 there's at least one geo field present in the doc. if (S2_INDEX_VERSION_2 == params.indexVersion) { if (!haveGeoField) { return; } } if (keysToAdd.size() > params.maxKeysPerInsert) { warning() << "insert of geo object generated lots of keys (" << keysToAdd.size() << ") consider creating larger buckets. obj=" << obj; } *keys = keysToAdd; }
bool SizeMatchExpression::matchesArray( const BSONObj& anArray, MatchDetails* details ) const { if ( _size < 0 ) return false; return anArray.nFields() == _size; }
// static Status ParsedProjection::make(const BSONObj& spec, const MatchExpression* const query, ParsedProjection** out, const ExtensionsCallback& extensionsCallback) { // Whether we're including or excluding fields. enum class IncludeExclude { kUninitialized, kInclude, kExclude }; IncludeExclude includeExclude = IncludeExclude::kUninitialized; bool requiresDocument = false; bool includeID = true; bool hasIndexKeyProjection = false; bool wantGeoNearPoint = false; bool wantGeoNearDistance = false; bool wantSortKey = false; // Until we see a positional or elemMatch operator we're normal. ArrayOpType arrayOpType = ARRAY_OP_NORMAL; BSONObjIterator it(spec); while (it.more()) { BSONElement e = it.next(); if (Object == e.type()) { BSONObj obj = e.embeddedObject(); if (1 != obj.nFields()) { return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString()); } BSONElement e2 = obj.firstElement(); if (mongoutils::str::equals(e2.fieldName(), "$slice")) { if (e2.isNumber()) { // This is A-OK. } else if (e2.type() == Array) { BSONObj arr = e2.embeddedObject(); if (2 != arr.nFields()) { return Status(ErrorCodes::BadValue, "$slice array wrong size"); } BSONObjIterator it(arr); // Skip over 'skip'. it.next(); int limit = it.next().numberInt(); if (limit <= 0) { return Status(ErrorCodes::BadValue, "$slice limit must be positive"); } } else { return Status(ErrorCodes::BadValue, "$slice only supports numbers and [skip, limit] arrays"); } // Projections with $slice aren't covered. requiresDocument = true; } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) { // Validate $elemMatch arguments and dependencies. if (Object != e2.type()) { return Status(ErrorCodes::BadValue, "elemMatch: Invalid argument, object required."); } if (ARRAY_OP_POSITIONAL == arrayOpType) { return Status(ErrorCodes::BadValue, "Cannot specify positional operator and $elemMatch."); } if (mongoutils::str::contains(e.fieldName(), '.')) { return Status(ErrorCodes::BadValue, "Cannot use $elemMatch projection on a nested field."); } arrayOpType = ARRAY_OP_ELEM_MATCH; // Create a MatchExpression for the elemMatch. BSONObj elemMatchObj = e.wrap(); verify(elemMatchObj.isOwned()); // TODO: Is there a faster way of validating the elemMatchObj? StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(elemMatchObj, extensionsCallback); if (!statusWithMatcher.isOK()) { return statusWithMatcher.getStatus(); } // Projections with $elemMatch aren't covered. requiresDocument = true; } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) { // Field for meta must be top level. We can relax this at some point. if (mongoutils::str::contains(e.fieldName(), '.')) { return Status(ErrorCodes::BadValue, "field for $meta cannot be nested"); } // Make sure the argument to $meta is something we recognize. // e.g. {x: {$meta: "textScore"}} if (String != e2.type()) { return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj"); } if (e2.valuestr() != LiteParsedQuery::metaTextScore && e2.valuestr() != LiteParsedQuery::metaRecordId && e2.valuestr() != LiteParsedQuery::metaIndexKey && e2.valuestr() != LiteParsedQuery::metaGeoNearDistance && e2.valuestr() != LiteParsedQuery::metaGeoNearPoint && e2.valuestr() != LiteParsedQuery::metaSortKey) { return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str()); } // This clobbers everything else. if (e2.valuestr() == LiteParsedQuery::metaIndexKey) { hasIndexKeyProjection = true; } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) { wantGeoNearDistance = true; } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) { wantGeoNearPoint = true; } else if (e2.valuestr() == LiteParsedQuery::metaSortKey) { wantSortKey = true; } // Of the $meta projections, only sortKey can be covered. if (e2.valuestr() != LiteParsedQuery::metaSortKey) { requiresDocument = true; } } else { return Status(ErrorCodes::BadValue, string("Unsupported projection option: ") + e.toString()); } } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) { includeID = false; } else { // Projections of dotted fields aren't covered. if (mongoutils::str::contains(e.fieldName(), '.')) { requiresDocument = true; } // If we haven't specified an include/exclude, initialize includeExclude. We expect // further include/excludes to match it. if (includeExclude == IncludeExclude::kUninitialized) { includeExclude = e.trueValue() ? IncludeExclude::kInclude : IncludeExclude::kExclude; } else if ((includeExclude == IncludeExclude::kInclude && !e.trueValue()) || (includeExclude == IncludeExclude::kExclude && e.trueValue())) { return Status(ErrorCodes::BadValue, "Projection cannot have a mix of inclusion and exclusion."); } } if (_isPositionalOperator(e.fieldName())) { // Validate the positional op. if (!e.trueValue()) { return Status(ErrorCodes::BadValue, "Cannot exclude array elements with the positional operator."); } if (ARRAY_OP_POSITIONAL == arrayOpType) { return Status(ErrorCodes::BadValue, "Cannot specify more than one positional proj. per query."); } if (ARRAY_OP_ELEM_MATCH == arrayOpType) { return Status(ErrorCodes::BadValue, "Cannot specify positional operator and $elemMatch."); } std::string after = mongoutils::str::after(e.fieldName(), ".$"); if (mongoutils::str::contains(after, ".$")) { mongoutils::str::stream ss; ss << "Positional projection '" << e.fieldName() << "' contains " << "the positional operator more than once."; return Status(ErrorCodes::BadValue, ss); } std::string matchfield = mongoutils::str::before(e.fieldName(), '.'); if (!_hasPositionalOperatorMatch(query, matchfield)) { mongoutils::str::stream ss; ss << "Positional projection '" << e.fieldName() << "' does not " << "match the query document."; return Status(ErrorCodes::BadValue, ss); } arrayOpType = ARRAY_OP_POSITIONAL; } } // If includeExclude is uninitialized or set to exclude fields, then we can't use an index // because we don't know what fields we're missing. if (includeExclude == IncludeExclude::kUninitialized || includeExclude == IncludeExclude::kExclude) { requiresDocument = true; } // Fill out the returned obj. unique_ptr<ParsedProjection> pp(new ParsedProjection()); // The positional operator uses the MatchDetails from the query // expression to know which array element was matched. pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL; // Save the raw spec. It should be owned by the LiteParsedQuery. verify(spec.isOwned()); pp->_source = spec; pp->_returnKey = hasIndexKeyProjection; pp->_requiresDocument = requiresDocument; // Add meta-projections. pp->_wantGeoNearPoint = wantGeoNearPoint; pp->_wantGeoNearDistance = wantGeoNearDistance; pp->_wantSortKey = wantSortKey; // If it's possible to compute the projection in a covered fashion, populate _requiredFields // so the planner can perform projection analysis. if (!pp->_requiresDocument) { if (includeID) { pp->_requiredFields.push_back("_id"); } // The only way we could be here is if spec is only simple non-dotted-field inclusions or // the $meta sortKey projection. Therefore we can iterate over spec to get the fields // required. BSONObjIterator srcIt(spec); while (srcIt.more()) { BSONElement elt = srcIt.next(); // We've already handled the _id field before entering this loop. if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) { continue; } // $meta sortKey should not be checked as a part of _requiredFields, since it can // potentially produce a covered projection as long as the sort key is covered. if (BSONType::Object == elt.type()) { dassert(elt.Obj() == BSON("$meta" << "sortKey")); continue; } if (elt.trueValue()) { pp->_requiredFields.push_back(elt.fieldName()); } } } // returnKey clobbers everything except for sortKey meta-projection. if (hasIndexKeyProjection && !wantSortKey) { pp->_requiresDocument = false; } *out = pp.release(); return Status::OK(); }