UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request, OpDebug* opDebug) { invariant(db); // Explain should never use this helper. invariant(!request.isExplain()); const NamespaceString& nsString = request.getNamespaceString(); Collection* collection = db->getCollection(nsString.ns()); // The update stage does not create its own collection. As such, if the update is // an upsert, create the collection that the update stage inserts into beforehand. if (!collection && request.isUpsert()) { // We have to have an exclusive lock on the db to be allowed to create the collection. // Callers should either get an X or create the collection. const Locker* locker = txn->lockState(); invariant(locker->isW() || locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X)); ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X); bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() && !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db()); if (userInitiatedWritesAndNotPrimary) { uassertStatusOK(Status(ErrorCodes::NotMaster, str::stream() << "Not primary while creating collection " << nsString.ns() << " during upsert")); } WriteUnitOfWork wuow(txn); collection = db->createCollection(txn, nsString.ns(), CollectionOptions()); invariant(collection); wuow.commit(); } // Parse the update, get an executor for it, run the executor, get stats out. ParsedUpdate parsedUpdate(txn, &request); uassertStatusOK(parsedUpdate.parseRequest()); PlanExecutor* rawExec; uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec)); boost::scoped_ptr<PlanExecutor> exec(rawExec); uassertStatusOK(exec->executePlan()); return UpdateStage::makeUpdateResult(exec.get(), opDebug); }
UpdateResult update(OperationContext* txn, Database* db, const UpdateRequest& request, OpDebug* opDebug) { invariant(db); // Explain should never use this helper. invariant(!request.isExplain()); const NamespaceString& nsString = request.getNamespaceString(); Collection* collection = db->getCollection(txn, nsString.ns()); // The update stage does not create its own collection. As such, if the update is // an upsert, create the collection that the update stage inserts into beforehand. if (!collection && request.isUpsert()) { // We have to have an exclusive lock on the db to be allowed to create the collection. // Callers should either get an X or create the collection. const Locker* locker = txn->lockState(); invariant(locker->isW() || locker->isLockHeldForMode(ResourceId(RESOURCE_DATABASE, nsString.db()), MODE_X)); ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X); WriteUnitOfWork wuow(txn); collection = db->createCollection(txn, nsString.ns()); invariant(collection); if (!request.isFromReplication()) { repl::logOp(txn, "c", (db->name() + ".$cmd").c_str(), BSON("create" << (nsString.coll()))); } wuow.commit(); } // Parse the update, get an executor for it, run the executor, get stats out. ParsedUpdate parsedUpdate(txn, &request); uassertStatusOK(parsedUpdate.parseRequest()); PlanExecutor* rawExec; uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, opDebug, &rawExec)); boost::scoped_ptr<PlanExecutor> exec(rawExec); uassertStatusOK(exec->executePlan()); return UpdateStage::makeUpdateResult(exec.get(), opDebug); }
Status UpdateExecutor::prepareInLock(Database* db) { // If we have a non-NULL PlanExecutor, then we've already done the in-lock preparation. if (_exec.get()) { return Status::OK(); } const NamespaceString& nsString = _request->getNamespaceString(); UpdateLifecycle* lifecycle = _request->getLifecycle(); validateUpdate(nsString.ns().c_str(), _request->getUpdates(), _request->getQuery()); Collection* collection = db->getCollection(_request->getOpCtx(), nsString.ns()); // The update stage does not create its own collection. As such, if the update is // an upsert, create the collection that the update stage inserts into beforehand. if (!collection && _request->isUpsert()) { OperationContext* const txn = _request->getOpCtx(); // We have to have an exclsive lock on the db to be allowed to create the collection. // Callers should either get an X or create the collection. const Locker* locker = txn->lockState(); invariant( locker->isW() || locker->isLockHeldForMode( ResourceId( RESOURCE_DATABASE, nsString.db() ), MODE_X ) ); Lock::DBLock lk(txn->lockState(), nsString.db(), MODE_X); WriteUnitOfWork wuow(txn); invariant(db->createCollection(txn, nsString.ns())); if (!_request->isFromReplication()) { repl::logOp(txn, "c", (db->name() + ".$cmd").c_str(), BSON("create" << (nsString.coll()))); } wuow.commit(); collection = db->getCollection(_request->getOpCtx(), nsString.ns()); invariant(collection); } // TODO: This seems a bit circuitious. _opDebug->updateobj = _request->getUpdates(); // If this is a user-issued update, then we want to return an error: you cannot perform // writes on a secondary. If this is an update to a secondary from the replication system, // however, then we make an exception and let the write proceed. In this case, // shouldCallLogOp() will be false. if (_request->shouldCallLogOp() && !repl::getGlobalReplicationCoordinator()->canAcceptWritesForDatabase(nsString.db())) { return Status(ErrorCodes::NotMaster, str::stream() << "Not primary while performing update on " << nsString.ns()); } if (lifecycle) { lifecycle->setCollection(collection); _driver.refreshIndexKeys(lifecycle->getIndexKeys(_request->getOpCtx())); } PlanExecutor* rawExec = NULL; Status getExecStatus = Status::OK(); if (_canonicalQuery.get()) { // This is the regular path for when we have a CanonicalQuery. getExecStatus = getExecutorUpdate(_request->getOpCtx(), db, _canonicalQuery.release(), _request, &_driver, _opDebug, &rawExec); } else { // This is the idhack fast-path for getting a PlanExecutor without doing the work // to create a CanonicalQuery. getExecStatus = getExecutorUpdate(_request->getOpCtx(), db, nsString.ns(), _request, &_driver, _opDebug, &rawExec); } if (!getExecStatus.isOK()) { return getExecStatus; } invariant(rawExec); _exec.reset(rawExec); // If yielding is allowed for this plan, then set an auto yield policy. Otherwise set // a manual yield policy. const bool canYield = !_request->isGod() && ( _canonicalQuery.get() ? !QueryPlannerCommon::hasNode(_canonicalQuery->root(), MatchExpression::ATOMIC) : !LiteParsedQuery::isQueryIsolated(_request->getQuery())); PlanExecutor::YieldPolicy policy = canYield ? PlanExecutor::YIELD_AUTO : PlanExecutor::YIELD_MANUAL; _exec->setYieldPolicy(policy); return Status::OK(); }
Status WriteCmd::explain(OperationContext* txn, const std::string& dbname, const BSONObj& cmdObj, ExplainCommon::Verbosity verbosity, BSONObjBuilder* out) const { // For now we only explain update and delete write commands. if ( BatchedCommandRequest::BatchType_Update != _writeType && BatchedCommandRequest::BatchType_Delete != _writeType ) { return Status( ErrorCodes::IllegalOperation, "Only update and delete write ops can be explained" ); } // Parse the batch request. BatchedCommandRequest request( _writeType ); std::string errMsg; if ( !request.parseBSON( cmdObj, &errMsg ) || !request.isValid( &errMsg ) ) { return Status( ErrorCodes::FailedToParse, errMsg ); } // Note that this is a runCommmand, and therefore, the database and the collection name // are in different parts of the grammar for the command. But it's more convenient to // work with a NamespaceString. We built it here and replace it in the parsed command. // Internally, everything work with the namespace string as opposed to just the // collection name. NamespaceString nsString(dbname, request.getNS()); request.setNSS(nsString); // Do the validation of the batch that is shared with non-explained write batches. Status isValid = WriteBatchExecutor::validateBatch( request ); if (!isValid.isOK()) { return isValid; } // Explain must do one additional piece of validation: For now we only explain // singleton batches. if ( request.sizeWriteOps() != 1u ) { return Status( ErrorCodes::InvalidLength, "explained write batches must be of size 1" ); } // Get a reference to the singleton batch item (it's the 0th item in the batch). BatchItemRef batchItem( &request, 0 ); if ( BatchedCommandRequest::BatchType_Update == _writeType ) { // Create the update request. UpdateRequest updateRequest( nsString ); updateRequest.setQuery( batchItem.getUpdate()->getQuery() ); updateRequest.setUpdates( batchItem.getUpdate()->getUpdateExpr() ); updateRequest.setMulti( batchItem.getUpdate()->getMulti() ); updateRequest.setUpsert( batchItem.getUpdate()->getUpsert() ); updateRequest.setUpdateOpLog( true ); UpdateLifecycleImpl updateLifecycle( true, updateRequest.getNamespaceString() ); updateRequest.setLifecycle( &updateLifecycle ); updateRequest.setExplain(); // Explained updates can yield. updateRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO); OpDebug* debug = &txn->getCurOp()->debug(); ParsedUpdate parsedUpdate( txn, &updateRequest ); Status parseStatus = parsedUpdate.parseRequest(); if ( !parseStatus.isOK() ) { return parseStatus; } // Explains of write commands are read-only, but we take write locks so // that timing info is more accurate. AutoGetDb autoDb( txn, nsString.db(), MODE_IX ); Lock::CollectionLock colLock( txn->lockState(), nsString.ns(), MODE_IX ); // We check the shard version explicitly here rather than using Client::Context, // as Context can do implicit database creation if the db does not exist. We want // explain to be a no-op that reports a trivial EOF plan against non-existent dbs // or collections. ensureShardVersionOKOrThrow( nsString.ns() ); // Get a pointer to the (possibly NULL) collection. Collection* collection = NULL; if ( autoDb.getDb() ) { collection = autoDb.getDb()->getCollection( txn, nsString.ns() ); } PlanExecutor* rawExec; uassertStatusOK(getExecutorUpdate(txn, collection, &parsedUpdate, debug, &rawExec)); boost::scoped_ptr<PlanExecutor> exec(rawExec); // Explain the plan tree. Explain::explainStages( exec.get(), verbosity, out ); return Status::OK(); } else { invariant( BatchedCommandRequest::BatchType_Delete == _writeType ); // Create the delete request. DeleteRequest deleteRequest( nsString ); deleteRequest.setQuery( batchItem.getDelete()->getQuery() ); deleteRequest.setMulti( batchItem.getDelete()->getLimit() != 1 ); deleteRequest.setUpdateOpLog(true); deleteRequest.setGod( false ); deleteRequest.setExplain(); // Explained deletes can yield. deleteRequest.setYieldPolicy(PlanExecutor::YIELD_AUTO); ParsedDelete parsedDelete(txn, &deleteRequest); Status parseStatus = parsedDelete.parseRequest(); if (!parseStatus.isOK()) { return parseStatus; } // Explains of write commands are read-only, but we take write locks so that timing // info is more accurate. AutoGetDb autoDb(txn, nsString.db(), MODE_IX); Lock::CollectionLock colLock(txn->lockState(), nsString.ns(), MODE_IX); // We check the shard version explicitly here rather than using Client::Context, // as Context can do implicit database creation if the db does not exist. We want // explain to be a no-op that reports a trivial EOF plan against non-existent dbs // or collections. ensureShardVersionOKOrThrow( nsString.ns() ); // Get a pointer to the (possibly NULL) collection. Collection* collection = NULL; if (autoDb.getDb()) { collection = autoDb.getDb()->getCollection(txn, nsString.ns()); } PlanExecutor* rawExec; uassertStatusOK(getExecutorDelete(txn, collection, &parsedDelete, &rawExec)); boost::scoped_ptr<PlanExecutor> exec(rawExec); // Explain the plan tree. Explain::explainStages(exec.get(), verbosity, out); return Status::OK(); } }