Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx, const NamespaceString& collectionName, const BSONObj& query, const BSONObj& updatePattern, bool upsert, const BSONObj& writeConcern) { namespace mmb = mutablebson; const CollatorInterface* collator = nullptr; boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, collator)); UpdateDriver driver(std::move(expCtx)); std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters; driver.parse(updatePattern, arrayFilters); BSONObjCollection::iterator iter; Status status = _findOneIter(opCtx, collectionName, query, &iter); mmb::Document document; if (status.isOK()) { document.reset(*iter, mmb::Document::kInPlaceDisabled); const bool validateForStorage = false; const FieldRefSet emptyImmutablePaths; const bool isInsert = false; BSONObj logObj; status = driver.update( StringData(), &document, validateForStorage, emptyImmutablePaths, isInsert, &logObj); if (!status.isOK()) return status; BSONObj newObj = document.getObject().copy(); *iter = newObj; BSONObj idQuery = newObj["_id"_sd].Obj(); if (_authzManager) { _authzManager->logOp(opCtx, "u", collectionName, logObj, &idQuery); } return Status::OK(); } else if (status == ErrorCodes::NoMatchingDocument && upsert) { if (query.hasField("_id")) { document.root().appendElement(query["_id"]).transitional_ignore(); } const FieldRef idFieldRef("_id"); FieldRefSet immutablePaths; invariant(immutablePaths.insert(&idFieldRef)); status = driver.populateDocumentWithQueryFields(opCtx, query, immutablePaths, document); if (!status.isOK()) { return status; } const bool validateForStorage = false; const FieldRefSet emptyImmutablePaths; const bool isInsert = false; status = driver.update( StringData(), &document, validateForStorage, emptyImmutablePaths, isInsert); if (!status.isOK()) { return status; } return insert(opCtx, collectionName, document.getObject(), writeConcern); } else { return status; } }
Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery& query, const vector<FieldRef*>* immutablePathsPtr, mutablebson::Document& doc) const { EqualityMatches equalities; Status status = Status::OK(); if (isDocReplacement()) { FieldRefSet pathsToExtract; // TODO: Refactor update logic, make _id just another immutable field static const FieldRef idPath("_id"); static const vector<FieldRef*> emptyImmutablePaths; const vector<FieldRef*>& immutablePaths = immutablePathsPtr ? *immutablePathsPtr : emptyImmutablePaths; pathsToExtract.fillFrom(immutablePaths); pathsToExtract.insert(&idPath); // Extract only immutable fields from replacement-style status = pathsupport::extractFullEqualityMatches(*query.root(), pathsToExtract, &equalities); } else { // Extract all fields from op-style status = pathsupport::extractEqualityMatches(*query.root(), &equalities); } if (!status.isOK()) return status; status = pathsupport::addEqualitiesToDoc(equalities, &doc); return status; }
/** * Helper function to check if path conflicts are all prefixes. */ static Status checkPathIsPrefixOf(const FieldRef& path, const FieldRefSet& conflictPaths) { for (FieldRefSet::const_iterator it = conflictPaths.begin(); it != conflictPaths.end(); ++it) { const FieldRef* conflictingPath = *it; // Conflicts are always prefixes (or equal to) the path, or vice versa if (path.numParts() > conflictingPath->numParts()) { string errMsg = stream() << "field at '" << conflictingPath->dottedField() << "' must be exactly specified, field at sub-path '" << path.dottedField() << "'found"; return Status(ErrorCodes::NotExactValueField, errMsg); } } return Status::OK(); }
static Status _extractFullEqualityMatches(const MatchExpression& root, const FieldRefSet* fullPathsToExtract, EqualityMatches* equalities) { if (root.matchType() == MatchExpression::EQ) { // Extract equality matches const EqualityMatchExpression& eqChild = static_cast<const EqualityMatchExpression&>(root); FieldRef path(eqChild.path()); if (fullPathsToExtract) { FieldRefSet conflictPaths; fullPathsToExtract->findConflicts(&path, &conflictPaths); // Ignore if this path is unrelated to the full paths if (conflictPaths.empty()) return Status::OK(); // Make sure we're a prefix of all the conflict paths Status status = checkPathIsPrefixOf(path, conflictPaths); if (!status.isOK()) return status; } Status status = checkEqualityConflicts(*equalities, path); if (!status.isOK()) return status; equalities->insert(make_pair(eqChild.path(), &eqChild)); } else if (root.matchType() == MatchExpression::AND) { // Further explore $and matches for (size_t i = 0; i < root.numChildren(); ++i) { MatchExpression* child = root.getChild(i); Status status = _extractFullEqualityMatches(*child, fullPathsToExtract, equalities); if (!status.isOK()) return status; } } return Status::OK(); }
/** * Updates roleGraph for an update-type oplog operation on admin.system.roles. * * Treats all updates as upserts. */ Status handleOplogUpdate(OperationContext* opCtx, RoleGraph* roleGraph, const BSONObj& updatePattern, const BSONObj& queryPattern) { RoleName roleToUpdate; Status status = getRoleNameFromIdField(queryPattern["_id"], &roleToUpdate); if (!status.isOK()) return status; boost::intrusive_ptr<ExpressionContext> expCtx(new ExpressionContext(opCtx, nullptr)); UpdateDriver driver(std::move(expCtx)); driver.setFromOplogApplication(true); // Oplog updates do not have array filters. std::map<StringData, std::unique_ptr<ExpressionWithPlaceholder>> arrayFilters; driver.parse(updatePattern, arrayFilters); mutablebson::Document roleDocument; status = RoleGraph::getBSONForRole(roleGraph, roleToUpdate, roleDocument.root()); if (status == ErrorCodes::RoleNotFound) { // The query pattern will only contain _id, no other immutable fields are present const FieldRef idFieldRef("_id"); FieldRefSet immutablePaths; invariant(immutablePaths.insert(&idFieldRef)); status = driver.populateDocumentWithQueryFields( opCtx, queryPattern, immutablePaths, roleDocument); } if (!status.isOK()) return status; const bool validateForStorage = false; const FieldRefSet emptyImmutablePaths; status = driver.update(StringData(), &roleDocument, validateForStorage, emptyImmutablePaths); if (!status.isOK()) return status; // Now use the updated document to totally replace the role in the graph! RoleInfo role; status = parseRoleFromDocument(roleDocument.getObject(), &role); if (!status.isOK()) return status; status = roleGraph->replaceRole(role.name, role.roles, role.privileges, role.restrictions); return status; }
Status AuthzManagerExternalStateMock::updateOne(OperationContext* opCtx, const NamespaceString& collectionName, const BSONObj& query, const BSONObj& updatePattern, bool upsert, const BSONObj& writeConcern) { namespace mmb = mutablebson; UpdateDriver::Options updateOptions; UpdateDriver driver(updateOptions); std::map<StringData, std::unique_ptr<ArrayFilter>> arrayFilters; Status status = driver.parse(updatePattern, arrayFilters); if (!status.isOK()) return status; BSONObjCollection::iterator iter; status = _findOneIter(collectionName, query, &iter); mmb::Document document; if (status.isOK()) { document.reset(*iter, mmb::Document::kInPlaceDisabled); const BSONObj emptyOriginal; const bool validateForStorage = false; const FieldRefSet emptyImmutablePaths; BSONObj logObj; status = driver.update(StringData(), emptyOriginal, &document, validateForStorage, emptyImmutablePaths, &logObj); if (!status.isOK()) return status; BSONObj newObj = document.getObject().copy(); *iter = newObj; BSONObj idQuery = driver.makeOplogEntryQuery(newObj, false); if (_authzManager) { _authzManager->logOp(opCtx, "u", collectionName, logObj, &idQuery); } return Status::OK(); } else if (status == ErrorCodes::NoMatchingDocument && upsert) { if (query.hasField("_id")) { document.root().appendElement(query["_id"]).transitional_ignore(); } const FieldRef idFieldRef("_id"); FieldRefSet immutablePaths; invariant(immutablePaths.insert(&idFieldRef)); status = driver.populateDocumentWithQueryFields(opCtx, query, immutablePaths, document); if (!status.isOK()) { return status; } // The original document can be empty because it is only needed for validation of immutable // paths. const BSONObj emptyOriginal; const bool validateForStorage = false; const FieldRefSet emptyImmutablePaths; status = driver.update( StringData(), emptyOriginal, &document, validateForStorage, emptyImmutablePaths); if (!status.isOK()) { return status; } return insert(opCtx, collectionName, document.getObject(), writeConcern); } else { return status; } }
Status UpdateDriver::update(StringData matchedField, BSONObj original, mutablebson::Document* doc, bool validateForStorage, const FieldRefSet& immutablePaths, BSONObj* logOpRec, bool* docWasModified) { // TODO: assert that update() is called at most once in a !_multi case. _affectIndices = (isDocReplacement() && (_indexedFields != NULL)); _logDoc.reset(); LogBuilder logBuilder(_logDoc.root()); if (_root) { // We parsed using the new UpdateNode implementation. UpdateNode::ApplyParams applyParams(doc->root(), immutablePaths); applyParams.matchedField = matchedField; applyParams.insert = _insert; applyParams.fromReplication = _modOptions.fromReplication; applyParams.validateForStorage = validateForStorage; applyParams.indexData = _indexedFields; if (_logOp && logOpRec) { applyParams.logBuilder = &logBuilder; } auto applyResult = _root->apply(applyParams); if (applyResult.indexesAffected) { _affectIndices = true; doc->disableInPlaceUpdates(); } if (docWasModified) { *docWasModified = !applyResult.noop; } } else { // We parsed using the old ModifierInterface implementation. // Ask each of the mods to type check whether they can operate over the current document // and, if so, to change that document accordingly. FieldRefSet updatedPaths; for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) { ModifierInterface::ExecInfo execInfo; Status status = (*it)->prepare(doc->root(), matchedField, &execInfo); if (!status.isOK()) { return status; } if (execInfo.context == ModifierInterface::ExecInfo::INSERT_CONTEXT && !_insert) { continue; } // Gather which fields this mod is interested on and whether these fields were // "taken" by previous mods. Note that not all mods are multi-field mods. When we // see an empty field, we may stop looking for others. for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) { if (execInfo.fieldRef[i] == 0) { break; } // Record each field being updated but check for conflicts first const FieldRef* other; if (!updatedPaths.insert(execInfo.fieldRef[i], &other)) { return Status(ErrorCodes::ConflictingUpdateOperators, str::stream() << "Cannot update '" << other->dottedField() << "' and '" << execInfo.fieldRef[i]->dottedField() << "' at the same time"); } // We start with the expectation that a mod will be in-place. But if the mod // touched an indexed field and the mod will indeed be executed -- that is, it // is not a no-op and it is in a valid context -- then we switch back to a // non-in-place mode. // // TODO: make mightBeIndexed and fieldRef like each other. if (!_affectIndices && !execInfo.noOp && _indexedFields && _indexedFields->mightBeIndexed(execInfo.fieldRef[i]->dottedField())) { _affectIndices = true; doc->disableInPlaceUpdates(); } } if (!execInfo.noOp) { status = (*it)->apply(); if (docWasModified) *docWasModified = true; if (!status.isOK()) { return status; } } // If we require a replication oplog entry for this update, go ahead and generate one. if (!execInfo.noOp && _logOp && logOpRec) { status = (*it)->log(&logBuilder); if (!status.isOK()) { return status; } } } // Check for BSON depth and DBRef constraint violations. if (validateForStorage) { for (auto path = updatedPaths.begin(); path != updatedPaths.end(); ++path) { // Find the updated field in the updated document. auto newElem = doc->root(); for (size_t i = 0; i < (*path)->numParts(); ++i) { newElem = newElem[(*path)->getPart(i)]; if (!newElem.ok()) { break; } } // newElem might be missing if $unset/$renamed-away. if (newElem.ok()) { // Check parents. const std::uint32_t recursionLevel = 0; auto parentsDepth = storage_validation::storageValidParents(newElem, recursionLevel); // Check element and its children. const bool doRecursiveCheck = true; storage_validation::storageValid(newElem, doRecursiveCheck, parentsDepth); } } } for (auto path = immutablePaths.begin(); path != immutablePaths.end(); ++path) { if (!updatedPaths.findConflicts(*path, nullptr)) { continue; } // Find the updated field in the updated document. auto newElem = doc->root(); for (size_t i = 0; i < (*path)->numParts(); ++i) { newElem = newElem[(*path)->getPart(i)]; if (!newElem.ok()) { break; } uassert(ErrorCodes::NotSingleValueField, str::stream() << "After applying the update to the document, the (immutable) field '" << (*path)->dottedField() << "' was found to be an array or array descendant.", newElem.getType() != BSONType::Array); } auto oldElem = dotted_path_support::extractElementAtPath(original, (*path)->dottedField()); uassert(ErrorCodes::ImmutableField, str::stream() << "After applying the update, the '" << (*path)->dottedField() << "' (required and immutable) field was " "found to have been removed --" << original, newElem.ok() || !oldElem.ok()); if (newElem.ok() && oldElem.ok()) { uassert(ErrorCodes::ImmutableField, str::stream() << "After applying the update, the (immutable) field '" << (*path)->dottedField() << "' was found to have been altered to " << newElem.toString(), newElem.compareWithBSONElement(oldElem, nullptr, false) == 0); } } } if (_logOp && logOpRec) *logOpRec = _logDoc.getObject(); return Status::OK(); }
Status UpdateDriver::update(const StringData& matchedField, mutablebson::Document* doc, BSONObj* logOpRec) { // TODO: assert that update() is called at most once in a !_multi case. FieldRefSet targetFields; _affectIndices = false; _logDoc.reset(); LogBuilder logBuilder(_logDoc.root()); // Ask each of the mods to type check whether they can operate over the current document // and, if so, to change that document accordingly. for (vector<ModifierInterface*>::iterator it = _mods.begin(); it != _mods.end(); ++it) { ModifierInterface::ExecInfo execInfo; Status status = (*it)->prepare(doc->root(), matchedField, &execInfo); if (!status.isOK()) { return status; } // If a mod wants to be applied only if this is an upsert (or only if this is a // strict update), we should respect that. If a mod doesn't care, it would state // it is fine with ANY update context. const bool validContext = (execInfo.context == ModifierInterface::ExecInfo::ANY_CONTEXT || execInfo.context == _context); // Nothing to do if not in a valid context. if (!validContext) { continue; } // Gather which fields this mod is interested on and whether these fields were // "taken" by previous mods. Note that not all mods are multi-field mods. When we // see an empty field, we may stop looking for others. for (int i = 0; i < ModifierInterface::ExecInfo::MAX_NUM_FIELDS; i++) { if (execInfo.fieldRef[i] == 0) { break; } if (!targetFields.empty() || _mods.size() > 1) { const FieldRef* other; if (!targetFields.insert(execInfo.fieldRef[i], &other)) { return Status(ErrorCodes::ConflictingUpdateOperators, mongoutils::str::stream() << "Cannot update '" << other->dottedField() << "' and '" << execInfo.fieldRef[i]->dottedField() << "' at the same time"); } } // We start with the expectation that a mod will be in-place. But if the mod // touched an indexed field and the mod will indeed be executed -- that is, it // is not a no-op and it is in a valid context -- then we switch back to a // non-in-place mode. // // TODO: make mightBeIndexed and fieldRef like each other. if (!_affectIndices && !execInfo.noOp && _indexedFields.mightBeIndexed(execInfo.fieldRef[i]->dottedField())) { _affectIndices = true; doc->disableInPlaceUpdates(); } } if (!execInfo.noOp) { status = (*it)->apply(); if (!status.isOK()) { return status; } } // If we require a replication oplog entry for this update, go ahead and generate one. if (_logOp && logOpRec) { status = (*it)->log(&logBuilder); if (!status.isOK()) { return status; } } } if (_logOp && logOpRec) *logOpRec = _logDoc.getObject(); return Status::OK(); }
/** * This will verify that all updated fields are * 1.) Valid for storage (checking parent to make sure things like DBRefs are valid) * 2.) Compare updated immutable fields do not change values * * If updateFields is empty then it was replacement and/or we need to check all fields */ inline Status validate(const BSONObj& original, const FieldRefSet& updatedFields, const mb::Document& updated, const std::vector<FieldRef*>* immutableAndSingleValueFields, const ModifierInterface::Options& opts) { LOG(3) << "update validate options -- " << " updatedFields: " << updatedFields << " immutableAndSingleValueFields.size:" << (immutableAndSingleValueFields ? immutableAndSingleValueFields->size() : 0) << " validate:" << opts.enforceOkForStorage; // 1.) Loop through each updated field and validate for storage // and detect immutable field updates // The set of possibly changed immutable fields -- we will need to check their vals FieldRefSet changedImmutableFields; // Check to see if there were no fields specified or if we are not validating // The case if a range query, or query that didn't result in saved fields if (updatedFields.empty() || !opts.enforceOkForStorage) { if (opts.enforceOkForStorage) { // No specific fields were updated so the whole doc must be checked Status s = storageValid(updated, true); if (!s.isOK()) return s; } // Check all immutable fields if (immutableAndSingleValueFields) changedImmutableFields.fillFrom(*immutableAndSingleValueFields); } else { // TODO: Change impl so we don't need to create a new FieldRefSet // -- move all conflict logic into static function on FieldRefSet? FieldRefSet immutableFieldRef; if (immutableAndSingleValueFields) immutableFieldRef.fillFrom(*immutableAndSingleValueFields); FieldRefSet::const_iterator where = updatedFields.begin(); const FieldRefSet::const_iterator end = updatedFields.end(); for (; where != end; ++where) { const FieldRef& current = **where; // Find the updated field in the updated document. mutablebson::ConstElement newElem = updated.root(); size_t currentPart = 0; while (newElem.ok() && currentPart < current.numParts()) newElem = newElem[current.getPart(currentPart++)]; // newElem might be missing if $unset/$renamed-away if (newElem.ok()) { // Check element, and its children Status s = storageValid(newElem, true); if (!s.isOK()) return s; // Check parents to make sure they are valid as well. s = storageValidParents(newElem); if (!s.isOK()) return s; } // Check if the updated field conflicts with immutable fields immutableFieldRef.findConflicts(¤t, &changedImmutableFields); } } const bool checkIdField = (updatedFields.empty() && !original.isEmpty()) || updatedFields.findConflicts(&idFieldRef, NULL); // Add _id to fields to check since it too is immutable if (checkIdField) changedImmutableFields.keepShortest(&idFieldRef); else if (changedImmutableFields.empty()) { // Return early if nothing changed which is immutable return Status::OK(); } LOG(4) << "Changed immutable fields: " << changedImmutableFields; // 2.) Now compare values of the changed immutable fields (to make sure they haven't) const mutablebson::ConstElement newIdElem = updated.root()[idFieldName]; FieldRefSet::const_iterator where = changedImmutableFields.begin(); const FieldRefSet::const_iterator end = changedImmutableFields.end(); for (; where != end; ++where) { const FieldRef& current = **where; // Find the updated field in the updated document. mutablebson::ConstElement newElem = updated.root(); size_t currentPart = 0; while (newElem.ok() && currentPart < current.numParts()) newElem = newElem[current.getPart(currentPart++)]; if (!newElem.ok()) { if (original.isEmpty()) { // If the _id is missing and not required, then skip this check if (!(current.dottedField() == idFieldName)) return Status(ErrorCodes::NoSuchKey, mongoutils::str::stream() << "After applying the update, the new" << " document was missing the '" << current.dottedField() << "' (required and immutable) field."); } else { if (current.dottedField() != idFieldName) return Status(ErrorCodes::ImmutableField, mongoutils::str::stream() << "After applying the update to the document with " << newIdElem.toString() << ", the '" << current.dottedField() << "' (required and immutable) field was " "found to have been removed --" << original); } } else { // Find the potentially affected field in the original document. const BSONElement oldElem = dps::extractElementAtPath(original, current.dottedField()); const BSONElement oldIdElem = original.getField(idFieldName); // Ensure no arrays since neither _id nor shard keys can be in an array, or one. mb::ConstElement currElem = newElem; while (currElem.ok()) { if (currElem.getType() == Array) { return Status( ErrorCodes::NotSingleValueField, mongoutils::str::stream() << "After applying the update to the document {" << (oldIdElem.ok() ? oldIdElem.toString() : newIdElem.toString()) << " , ...}, the (immutable) field '" << current.dottedField() << "' was found to be an array or array descendant."); } currElem = currElem.parent(); } // If we have both (old and new), compare them. If we just have new we are good if (oldElem.ok() && newElem.compareWithBSONElement(oldElem, nullptr, false) != 0) { return Status(ErrorCodes::ImmutableField, mongoutils::str::stream() << "After applying the update to the document {" << oldElem.toString() << " , ...}, the (immutable) field '" << current.dottedField() << "' was found to have been altered to " << newElem.toString()); } } } return Status::OK(); }
BSONObj UpdateStage::applyUpdateOpsForInsert(OperationContext* opCtx, const CanonicalQuery* cq, const BSONObj& query, UpdateDriver* driver, mutablebson::Document* doc, bool isInternalRequest, const NamespaceString& ns, bool enforceOkForStorage, UpdateStats* stats) { // Since this is an insert (no docs found and upsert:true), we will be logging it // as an insert in the oplog. We don't need the driver's help to build the // oplog record, then. We also set the context of the update driver to the INSERT_CONTEXT. // Some mods may only work in that context (e.g. $setOnInsert). driver->setLogOp(false); driver->setInsert(true); FieldRefSet immutablePaths; if (!isInternalRequest) { auto immutablePathsVector = getImmutableFields(opCtx, ns); if (immutablePathsVector) { immutablePaths.fillFrom( transitional_tools_do_not_use::unspool_vector(*immutablePathsVector)); } } immutablePaths.keepShortest(&idFieldRef); if (cq) { uassertStatusOK(driver->populateDocumentWithQueryFields(*cq, immutablePaths, *doc)); if (driver->isDocReplacement()) stats->fastmodinsert = true; } else { fassert(17354, CanonicalQuery::isSimpleIdQuery(query)); BSONElement idElt = query[idFieldName]; fassert(17352, doc->root().appendElement(idElt)); } // Apply the update modifications here. Do not validate for storage, since we will validate the // entire document after the update. However, we ensure that no immutable fields are updated. const bool validateForStorage = false; if (isInternalRequest) { immutablePaths.clear(); } Status updateStatus = driver->update(StringData(), doc, validateForStorage, immutablePaths); if (!updateStatus.isOK()) { uasserted(16836, updateStatus.reason()); } // Ensure _id exists and is first auto idAndFirstStatus = ensureIdFieldIsFirst(doc); if (idAndFirstStatus.code() == ErrorCodes::InvalidIdField) { // _id field is missing addObjectIDIdField(doc); } else { uassertStatusOK(idAndFirstStatus); } // Validate that the object replacement or modifiers resulted in a document // that contains all the immutable keys and can be stored if it isn't coming // from a migration or via replication. if (!isInternalRequest) { if (enforceOkForStorage) { storage_validation::storageValid(*doc); } checkImmutablePathsPresent(*doc, immutablePaths); } BSONObj newObj = doc->getObject(); if (newObj.objsize() > BSONObjMaxUserSize) { uasserted(17420, str::stream() << "Document to upsert is larger than " << BSONObjMaxUserSize); } return newObj; }
BSONObj UpdateStage::transformAndUpdate(const Snapshotted<BSONObj>& oldObj, RecordId& recordId) { const UpdateRequest* request = _params.request; UpdateDriver* driver = _params.driver; CanonicalQuery* cq = _params.canonicalQuery; UpdateLifecycle* lifecycle = request->getLifecycle(); // If asked to return new doc, default to the oldObj, in case nothing changes. BSONObj newObj = oldObj.value(); // Ask the driver to apply the mods. It may be that the driver can apply those "in // place", that is, some values of the old document just get adjusted without any // change to the binary layout on the bson layer. It may be that a whole new document // is needed to accomodate the new bson layout of the resulting document. In any event, // only enable in-place mutations if the underlying storage engine offers support for // writing damage events. _doc.reset(oldObj.value(), (_collection->updateWithDamagesSupported() ? mutablebson::Document::kInPlaceEnabled : mutablebson::Document::kInPlaceDisabled)); BSONObj logObj; bool docWasModified = false; Status status = Status::OK(); const bool validateForStorage = getOpCtx()->writesAreReplicated() && _enforceOkForStorage; FieldRefSet immutablePaths; if (getOpCtx()->writesAreReplicated() && !request->isFromMigration()) { if (lifecycle) { auto immutablePathsVector = getImmutableFields(getOpCtx(), request->getNamespaceString()); if (immutablePathsVector) { immutablePaths.fillFrom( transitional_tools_do_not_use::unspool_vector(*immutablePathsVector)); } } immutablePaths.keepShortest(&idFieldRef); } if (!driver->needMatchDetails()) { // If we don't need match details, avoid doing the rematch status = driver->update( StringData(), &_doc, validateForStorage, immutablePaths, &logObj, &docWasModified); } else { // If there was a matched field, obtain it. MatchDetails matchDetails; matchDetails.requestElemMatchKey(); dassert(cq); verify(cq->root()->matchesBSON(oldObj.value(), &matchDetails)); string matchedField; if (matchDetails.hasElemMatchKey()) matchedField = matchDetails.elemMatchKey(); status = driver->update( matchedField, &_doc, validateForStorage, immutablePaths, &logObj, &docWasModified); } if (!status.isOK()) { uasserted(16837, status.reason()); } // Skip adding _id field if the collection is capped (since capped collection documents can // neither grow nor shrink). const auto createIdField = !_collection->isCapped(); // Ensure if _id exists it is first status = ensureIdFieldIsFirst(&_doc); if (status.code() == ErrorCodes::InvalidIdField) { // Create ObjectId _id field if we are doing that if (createIdField) { addObjectIDIdField(&_doc); } } else { uassertStatusOK(status); } // See if the changes were applied in place const char* source = NULL; const bool inPlace = _doc.getInPlaceUpdates(&_damages, &source); if (inPlace && _damages.empty()) { // An interesting edge case. A modifier didn't notice that it was really a no-op // during its 'prepare' phase. That represents a missed optimization, but we still // shouldn't do any real work. Toggle 'docWasModified' to 'false'. // // Currently, an example of this is '{ $push : { x : {$each: [], $sort: 1} } }' when the 'x' // array exists and is already sorted. docWasModified = false; } if (docWasModified) { // Prepare to write back the modified document WriteUnitOfWork wunit(getOpCtx()); RecordId newRecordId; OplogUpdateEntryArgs args; if (!request->isExplain()) { invariant(_collection); auto* css = CollectionShardingState::get(getOpCtx(), _collection->ns()); args.nss = _collection->ns(); args.uuid = _collection->uuid(); args.stmtId = request->getStmtId(); args.update = logObj; args.criteria = css->getMetadata().extractDocumentKey(newObj); uassert(16980, "Multi-update operations require all documents to have an '_id' field", !request->isMulti() || args.criteria.hasField("_id"_sd)); args.fromMigrate = request->isFromMigration(); args.storeDocOption = getStoreDocMode(*request); if (args.storeDocOption == OplogUpdateEntryArgs::StoreDocOption::PreImage) { args.preImageDoc = oldObj.value().getOwned(); } } if (inPlace) { if (!request->isExplain()) { newObj = oldObj.value(); const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize()); Snapshotted<RecordData> snap(oldObj.snapshotId(), oldRec); StatusWith<RecordData> newRecStatus = _collection->updateDocumentWithDamages( getOpCtx(), recordId, std::move(snap), source, _damages, &args); newObj = uassertStatusOK(std::move(newRecStatus)).releaseToBson(); } newRecordId = recordId; } else { // The updates were not in place. Apply them through the file manager. newObj = _doc.getObject(); uassert(17419, str::stream() << "Resulting document after update is larger than " << BSONObjMaxUserSize, newObj.objsize() <= BSONObjMaxUserSize); if (!request->isExplain()) { newRecordId = _collection->updateDocument(getOpCtx(), recordId, oldObj, newObj, true, driver->modsAffectIndices(), _params.opDebug, &args); } } invariant(oldObj.snapshotId() == getOpCtx()->recoveryUnit()->getSnapshotId()); wunit.commit(); // If the document moved, we might see it again in a collection scan (maybe it's // a document after our current document). // // If the document is indexed and the mod changes an indexed value, we might see // it again. For an example, see the comment above near declaration of // updatedRecordIds. // // This must be done after the wunit commits so we are sure we won't be rolling back. if (_updatedRecordIds && (newRecordId != recordId || driver->modsAffectIndices())) { _updatedRecordIds->insert(newRecordId); } } // Only record doc modifications if they wrote (exclude no-ops). Explains get // recorded as if they wrote. if (docWasModified || request->isExplain()) { _specificStats.nModified++; } return newObj; }