StatusWith<Shard::CommandResponse> ShardLocal::_runCommand(OperationContext* txn, const ReadPreferenceSetting& unused, const std::string& dbName, const BSONObj& cmdObj) { try { DBDirectClient client(txn); rpc::UniqueReply commandResponse = client.runCommandWithMetadata( dbName, cmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), cmdObj); BSONObj responseReply = commandResponse->getCommandReply().getOwned(); BSONObj responseMetadata = commandResponse->getMetadata().getOwned(); Status commandStatus = getStatusFromCommandResult(responseReply); Status writeConcernStatus = kInternalErrorStatus; if (commandStatus.isOK()) { writeConcernStatus = getWriteConcernStatusFromCommandResult(responseReply); } return Shard::CommandResponse{std::move(responseReply), std::move(responseMetadata), std::move(commandStatus), std::move(writeConcernStatus)}; } catch (const DBException& ex) { return ex.toStatus(); } }
void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* txn) { std::vector<std::string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); storageEngine->listDatabases(&dbNames); for (auto&& dbName : dbNames) { if (dbName != "local") { return; } } if (serverGlobalParams.clusterRole != ClusterRole::ShardServer) { // Insert featureCompatibilityDocument into admin.system.version. // Do not use writeConcern majority, because we may be holding locks. NamespaceString nss(FeatureCompatibilityVersion::kCollection); DBDirectClient client(txn); BSONObj result; client.runCommand( nss.db().toString(), BSON("insert" << nss.coll() << "documents" << BSON_ARRAY(BSON("_id" << FeatureCompatibilityVersion::kParameterName << FeatureCompatibilityVersion::kVersionField << FeatureCompatibilityVersion::kVersion34))), result); auto status = getStatusFromCommandResult(result); if (!status.isOK() && status != ErrorCodes::InterruptedAtShutdown) { uassertStatusOK(status); } // Update server parameter. serverGlobalParams.featureCompatibilityVersion.store( ServerGlobalParams::FeatureCompatibilityVersion_34); } }
void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version) { uassert(40284, "featureCompatibilityVersion must be '3.4' or '3.2'", version == FeatureCompatibilityVersion::kVersion34 || version == FeatureCompatibilityVersion::kVersion32); // Update admin.system.version. NamespaceString nss(FeatureCompatibilityVersion::kCollection); BSONObjBuilder updateCmd; updateCmd.append("update", nss.coll()); updateCmd.append( "updates", BSON_ARRAY(BSON("q" << BSON("_id" << FeatureCompatibilityVersion::kParameterName) << "u" << BSON(FeatureCompatibilityVersion::kVersionField << version) << "upsert" << true))); updateCmd.append("writeConcern", BSON("w" << "majority")); DBDirectClient client(txn); BSONObj result; client.runCommand(nss.db().toString(), updateCmd.obj(), result); uassertStatusOK(getStatusFromCommandResult(result)); uassertStatusOK(getWriteConcernStatusFromCommandResult(result)); // Update server parameter. if (version == FeatureCompatibilityVersion::kVersion34) { serverGlobalParams.featureCompatibilityVersion.store( ServerGlobalParams::FeatureCompatibilityVersion_34); } else if (version == FeatureCompatibilityVersion::kVersion32) { serverGlobalParams.featureCompatibilityVersion.store( ServerGlobalParams::FeatureCompatibilityVersion_32); } }
StatusWith<KillCursorsResponse> KillCursorsResponse::parseFromBSON(const BSONObj& cmdResponse) { Status cmdStatus = getStatusFromCommandResult(cmdResponse); if (!cmdStatus.isOK()) { return cmdStatus; } std::vector<CursorId> cursorsKilled; Status killedStatus = fillOutCursorArray(cmdResponse, kKilledField, &cursorsKilled); if (!killedStatus.isOK()) { return killedStatus; } std::vector<CursorId> cursorsNotFound; Status notFoundStatus = fillOutCursorArray(cmdResponse, kNotFoundField, &cursorsNotFound); if (!notFoundStatus.isOK()) { return notFoundStatus; } std::vector<CursorId> cursorsAlive; Status aliveStatus = fillOutCursorArray(cmdResponse, kAliveField, &cursorsAlive); if (!aliveStatus.isOK()) { return aliveStatus; } return KillCursorsResponse(cursorsKilled, cursorsNotFound, cursorsAlive); }
Status AuthzManagerExternalStateMongos::getRoleDescriptionsForDB( OperationContext* opCtx, const std::string& dbname, PrivilegeFormat showPrivileges, AuthenticationRestrictionsFormat showRestrictions, bool showBuiltinRoles, std::vector<BSONObj>* result) { BSONObjBuilder rolesInfoCmd; rolesInfoCmd << "rolesInfo" << 1 << "showBuiltinRoles" << showBuiltinRoles; addShowToBuilder(&rolesInfoCmd, showPrivileges, showRestrictions); BSONObjBuilder builder; const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, dbname, rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { return getStatusFromCommandResult(cmdResult); } for (BSONObjIterator it(cmdResult[rolesFieldName(showPrivileges)].Obj()); it.more(); it.next()) { result->push_back((*it).Obj().getOwned()); } return Status::OK(); }
Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* opCtx, const std::vector<RoleName>& roles, PrivilegeFormat showPrivileges, BSONObj* result) { BSONArrayBuilder rolesInfoCmdArray; for (const RoleName& roleName : roles) { rolesInfoCmdArray << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << roleName.getDB()); } BSONObjBuilder rolesInfoCmd; rolesInfoCmd.append("rolesInfo", rolesInfoCmdArray.arr()); addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges); BSONObjBuilder builder; const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { return getStatusFromCommandResult(cmdResult); } std::vector<BSONElement> foundRoles = cmdResult[rolesFieldName(showPrivileges)].Array(); if (foundRoles.size() == 0) { return Status(ErrorCodes::RoleNotFound, "Roles not found"); } *result = foundRoles[0].Obj().getOwned(); return Status::OK(); }
Status AuthzManagerExternalStateMongos::getRoleDescription(OperationContext* opCtx, const RoleName& roleName, PrivilegeFormat showPrivileges, BSONObj* result) { BSONObjBuilder rolesInfoCmd; rolesInfoCmd.append("rolesInfo", BSON_ARRAY(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << roleName.getDB()))); addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges); BSONObjBuilder builder; const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { return getStatusFromCommandResult(cmdResult); } std::vector<BSONElement> foundRoles = cmdResult[rolesFieldName(showPrivileges)].Array(); if (foundRoles.size() == 0) { return Status(ErrorCodes::RoleNotFound, "Role \"" + roleName.toString() + "\" not found"); } if (foundRoles.size() > 1) { return Status(ErrorCodes::RoleDataInconsistent, str::stream() << "Found multiple roles on the \"" << roleName.getDB() << "\" database with name \"" << roleName.getRole() << "\""); } *result = foundRoles[0].Obj().getOwned(); return Status::OK(); }
StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONObj& cmdObj) { executor::RemoteCommandResponse responseStatus( Status{ErrorCodes::InternalError, "Uninitialized value"}); auto executor = grid.getExecutorPool()->getArbitraryExecutor(); auto scheduleStatus = executor->scheduleRemoteCommand( executor::RemoteCommandRequest(_recipientHost, "admin", cmdObj, nullptr), [&responseStatus](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) { responseStatus = args.response; }); // TODO: Update RemoteCommandTargeter on NotMaster errors. if (!scheduleStatus.isOK()) { return scheduleStatus.getStatus(); } executor->wait(scheduleStatus.getValue()); if (!responseStatus.isOK()) { return responseStatus.status; } Status commandStatus = getStatusFromCommandResult(responseStatus.data); if (!commandStatus.isOK()) { return commandStatus; } return responseStatus.data.getOwned(); }
Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn, const ReadPreferenceSetting& unused, const std::string& dbName, Milliseconds maxTimeMSOverrideUnused, const BSONObj& cmdObj) { repl::OpTime currentOpTimeFromClient = repl::ReplClientInfo::forClient(txn->getClient()).getLastOp(); ON_BLOCK_EXIT([this, &txn, ¤tOpTimeFromClient] { _updateLastOpTimeFromClient(txn, currentOpTimeFromClient); }); try { DBDirectClient client(txn); rpc::UniqueReply commandResponse = client.runCommandWithMetadata( dbName, cmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), cmdObj); BSONObj responseReply = commandResponse->getCommandReply().getOwned(); BSONObj responseMetadata = commandResponse->getMetadata().getOwned(); Status commandStatus = getStatusFromCommandResult(responseReply); Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseReply); return Shard::HostWithResponse(boost::none, Shard::CommandResponse{std::move(responseReply), std::move(responseMetadata), std::move(commandStatus), std::move(writeConcernStatus)}); } catch (const DBException& ex) { return Shard::HostWithResponse(boost::none, ex.toStatus()); } }
void DBClientCursor::commandDataReceived() { int op = batch.m.operation(); invariant(op == opReply || op == dbCommandReply); batch.nReturned = 1; batch.pos = 0; auto commandReply = rpc::makeReply(&batch.m); auto commandStatus = getStatusFromCommandResult(commandReply->getCommandReply()); if (ErrorCodes::SendStaleConfig == commandStatus) { throw RecvStaleConfigException("stale config in DBClientCursor::dataReceived()", commandReply->getCommandReply()); } else if (!commandStatus.isOK()) { wasError = true; } if (_client->getReplyMetadataReader()) { uassertStatusOK(_client->getReplyMetadataReader()(commandReply->getMetadata(), _client->getServerAddress())); } // HACK: If we got an OP_COMMANDREPLY, take the reply object // and shove it in to an OP_REPLY message. if (op == dbCommandReply) { // Need to take ownership here as we destroy the underlying message. BSONObj reply = commandReply->getCommandReply().getOwned(); batch.m.reset(); replyToQuery(0, batch.m, reply); } QueryResult::View qr = batch.m.singleData().view2ptr(); batch.data = qr.data(); }
Status SessionsCollectionRS::setupSessionsCollection(OperationContext* opCtx) { return dispatch( NamespaceString::kLogicalSessionsNamespace, opCtx, [&] { auto existsStatus = checkSessionsCollectionExists(opCtx); if (existsStatus.isOK()) { return Status::OK(); } DBDirectClient client(opCtx); BSONObj cmd; if (existsStatus.code() == ErrorCodes::IndexOptionsConflict) { cmd = generateCollModCmd(); } else { // Creating the TTL index will auto-generate the collection. cmd = generateCreateIndexesCmd(); } BSONObj info; if (!client.runCommand( NamespaceString::kLogicalSessionsNamespace.db().toString(), cmd, info)) { return getStatusFromCommandResult(info); } return Status::OK(); }, [&](DBClientBase*) { return checkSessionsCollectionExists(opCtx); }); }
BSONObj ClusterAggregate::aggRunCommand(DBClientBase* conn, const Namespaces& namespaces, BSONObj cmd, int queryOptions) { // Temporary hack. See comment on declaration for details. massert(17016, "should only be running an aggregate command here", str::equals(cmd.firstElementFieldName(), "aggregate")); auto cursor = conn->query(namespaces.executionNss.db() + ".$cmd", cmd, -1, // nToReturn 0, // nToSkip NULL, // fieldsToReturn queryOptions); massert(17014, str::stream() << "aggregate command didn't return results on host: " << conn->toString(), cursor && cursor->more()); BSONObj result = cursor->nextSafe().getOwned(); if (ErrorCodes::SendStaleConfig == getStatusFromCommandResult(result)) { throw RecvStaleConfigException("command failed because of stale config", result); } auto executorPool = grid.getExecutorPool(); result = uassertStatusOK(storePossibleCursor(HostAndPort(cursor->originalHost()), result, namespaces.requestedNss, executorPool->getArbitraryExecutor(), grid.getCursorManager())); return result; }
void SessionCatalog::onStepUp(OperationContext* opCtx) { DBDirectClient client(opCtx); const size_t initialExtentSize = 0; const bool capped = false; const bool maxSize = 0; BSONObj result; if (client.createCollection(NamespaceString::kSessionTransactionsTableNamespace.ns(), initialExtentSize, capped, maxSize, &result)) { return; } const auto status = getStatusFromCommandResult(result); if (status == ErrorCodes::NamespaceExists) { return; } uasserted(status.code(), str::stream() << "Failed to create the " << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection due to " << status.reason()); }
StatusWith<CursorResponse> CursorResponse::parseFromBSON(const BSONObj& cmdResponse) { Status cmdStatus = getStatusFromCommandResult(cmdResponse); if (!cmdStatus.isOK()) { return cmdStatus; } std::string fullns; BSONObj batchObj; CursorId cursorId; BSONElement cursorElt = cmdResponse[kCursorField]; if (cursorElt.type() != BSONType::Object) { return {ErrorCodes::TypeMismatch, str::stream() << "Field '" << kCursorField << "' must be a nested object in: " << cmdResponse}; } BSONObj cursorObj = cursorElt.Obj(); BSONElement idElt = cursorObj[kIdField]; if (idElt.type() != BSONType::NumberLong) { return {ErrorCodes::TypeMismatch, str::stream() << "Field '" << kIdField << "' must be of type long in: " << cmdResponse}; } cursorId = idElt.Long(); BSONElement nsElt = cursorObj[kNsField]; if (nsElt.type() != BSONType::String) { return {ErrorCodes::TypeMismatch, str::stream() << "Field '" << kNsField << "' must be of type string in: " << cmdResponse}; } fullns = nsElt.String(); BSONElement batchElt = cursorObj[kBatchField]; if (batchElt.eoo()) { batchElt = cursorObj[kBatchFieldInitial]; } if (batchElt.type() != BSONType::Array) { return {ErrorCodes::TypeMismatch, str::stream() << "Must have array field '" << kBatchFieldInitial << "' or '" << kBatchField << "' in: " << cmdResponse}; } batchObj = batchElt.Obj(); std::vector<BSONObj> batch; for (BSONElement elt : batchObj) { if (elt.type() != BSONType::Object) { return { ErrorCodes::BadValue, str::stream() << "getMore response batch contains a non-object element: " << elt}; } batch.push_back(elt.Obj().getOwned()); } return {{NamespaceString(fullns), cursorId, batch}}; }
void NetworkInterfaceASIOIntegrationFixture::assertCommandFailsOnServer( StringData db, const BSONObj& cmd, ErrorCodes::Error reason, Milliseconds timeoutMillis) { RemoteCommandRequest request{ fixture().getServers()[0], db.toString(), cmd, BSONObj(), nullptr, timeoutMillis}; auto res = runCommandSync(request); ASSERT_OK(res.status); auto serverStatus = getStatusFromCommandResult(res.data); ASSERT_EQ(reason, serverStatus); }
void AsyncRequestsSender::_scheduleRequests() { invariant(!_stopRetrying); // Schedule remote work on hosts for which we have not sent a request or need to retry. for (size_t i = 0; i < _remotes.size(); ++i) { auto& remote = _remotes[i]; // First check if the remote had a retriable error, and if so, clear its response field so // it will be retried. if (remote.swResponse && !remote.done) { // We check both the response status and command status for a retriable error. Status status = remote.swResponse->getStatus(); if (status.isOK()) { status = getStatusFromCommandResult(remote.swResponse->getValue().data); } if (status.isOK()) { status = getWriteConcernStatusFromCommandResult(remote.swResponse->getValue().data); } if (!status.isOK()) { // There was an error with either the response or the command. auto shard = remote.getShard(); if (!shard) { remote.swResponse = Status(ErrorCodes::ShardNotFound, str::stream() << "Could not find shard " << remote.shardId); } else { if (remote.shardHostAndPort) { shard->updateReplSetMonitor(*remote.shardHostAndPort, status); } if (shard->isRetriableError(status.code(), _retryPolicy) && remote.retryCount < kMaxNumFailedHostRetryAttempts) { LOG(1) << "Command to remote " << remote.shardId << " at host " << *remote.shardHostAndPort << " failed with retriable error and will be retried " << causedBy(redact(status)); ++remote.retryCount; remote.swResponse.reset(); } } } } // If the remote does not have a response or pending request, schedule remote work for it. if (!remote.swResponse && !remote.cbHandle.isValid()) { auto scheduleStatus = _scheduleRequest(i); if (!scheduleStatus.isOK()) { remote.swResponse = std::move(scheduleStatus); // Push a noop response to the queue to indicate that a remote is ready for // re-processing due to failure. _responseQueue.producer.push(boost::none); } } } }
void FeatureCompatibilityVersion::set(OperationContext* txn, StringData version) { uassert(40284, "featureCompatibilityVersion must be '3.4' or '3.2'. See " "http://dochub.mongodb.org/core/3.4-feature-compatibility.", version == FeatureCompatibilityVersionCommandParser::kVersion34 || version == FeatureCompatibilityVersionCommandParser::kVersion32); DBDirectClient client(txn); NamespaceString nss(FeatureCompatibilityVersion::kCollection); if (version == FeatureCompatibilityVersionCommandParser::kVersion34) { // We build a v=2 index on the "admin.system.version" collection as part of setting the // featureCompatibilityVersion to 3.4. This is a new index version that isn't supported by // versions of MongoDB earlier than 3.4 that will cause 3.2 secondaries to crash when it is // replicated. std::vector<BSONObj> indexSpecs{k32IncompatibleIndexSpec}; { ScopedTransaction transaction(txn, MODE_IX); AutoGetOrCreateDb autoDB(txn, nss.db(), MODE_X); uassert(ErrorCodes::NotMaster, str::stream() << "Cannot set featureCompatibilityVersion to '" << version << "'. Not primary while attempting to create index on: " << nss.ns(), repl::ReplicationCoordinator::get(txn->getServiceContext()) ->canAcceptWritesFor(nss)); IndexBuilder builder(k32IncompatibleIndexSpec, false); auto status = builder.buildInForeground(txn, autoDB.getDb()); uassertStatusOK(status); MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN { WriteUnitOfWork wuow(txn); getGlobalServiceContext()->getOpObserver()->onCreateIndex( txn, autoDB.getDb()->getSystemIndexesName(), k32IncompatibleIndexSpec, false); wuow.commit(); } MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "FeatureCompatibilityVersion::set", nss.ns()); } // We then update the featureCompatibilityVersion document stored in the // "admin.system.version" collection. We do this after creating the v=2 index in order to // maintain the invariant that if the featureCompatibilityVersion is 3.4, then // 'k32IncompatibleIndexSpec' index exists on the "admin.system.version" collection. BSONObj updateResult; client.runCommand(nss.db().toString(), makeUpdateCommand(version, WriteConcernOptions::Majority), updateResult); uassertStatusOK(getStatusFromCommandResult(updateResult)); uassertStatusOK(getWriteConcernStatusFromCommandResult(updateResult)); // We then update the value of the featureCompatibilityVersion server parameter. serverGlobalParams.featureCompatibility.version.store( ServerGlobalParams::FeatureCompatibility::Version::k34); } else if (version == FeatureCompatibilityVersionCommandParser::kVersion32) {
void NetworkInterfaceIntegrationFixture::assertCommandOK(StringData db, const BSONObj& cmd, Milliseconds timeoutMillis) { RemoteCommandRequest request{ fixture().getServers()[0], db.toString(), cmd, BSONObj(), nullptr, timeoutMillis}; auto res = runCommandSync(request); ASSERT_OK(res.status); ASSERT_OK(getStatusFromCommandResult(res.data)); ASSERT(!res.data["writeErrors"]); }
Status updateShardCollectionsEntry(OperationContext* opCtx, const BSONObj& query, const BSONObj& update, const BSONObj& inc, const bool upsert) { invariant(query.hasField("_id")); if (upsert) { // If upserting, this should be an update from the config server that does not have shard // refresh information. invariant(!update.hasField(ShardCollectionType::refreshing())); invariant(!update.hasField(ShardCollectionType::refreshSequenceNumber())); invariant(inc.isEmpty()); } // Want to modify the document, not replace it. BSONObjBuilder updateBuilder; updateBuilder.append("$set", update); if (!inc.isEmpty()) { updateBuilder.append("$inc", inc); } std::unique_ptr<BatchedUpdateDocument> updateDoc(new BatchedUpdateDocument()); updateDoc->setQuery(query); updateDoc->setUpdateExpr(updateBuilder.obj()); updateDoc->setUpsert(upsert); std::unique_ptr<BatchedUpdateRequest> updateRequest(new BatchedUpdateRequest()); updateRequest->addToUpdates(updateDoc.release()); BatchedCommandRequest request(updateRequest.release()); request.setNS(NamespaceString(ShardCollectionType::ConfigNS)); request.setWriteConcern(kLocalWriteConcern.toBSON()); BSONObj cmdObj = request.toBSON(); try { DBDirectClient client(opCtx); rpc::UniqueReply commandResponse = client.runCommandWithMetadata( "config", cmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), cmdObj); BSONObj responseReply = commandResponse->getCommandReply().getOwned(); Status commandStatus = getStatusFromCommandResult(commandResponse->getCommandReply()); if (!commandStatus.isOK()) { return commandStatus; } return Status::OK(); } catch (const DBException& ex) { return {ex.toStatus().code(), str::stream() << "Failed to apply the update '" << request.toString() << "' to config.collections" << causedBy(ex.toStatus())}; } }
Status getStatusFromWriteCommandReply(const BSONObj& cmdResponse) { auto status = getStatusFromCommandResult(cmdResponse); if (!status.isOK()) { return status; } status = getFirstWriteErrorStatusFromCommandResult(cmdResponse); if (!status.isOK()) { return status; } return getWriteConcernStatusFromCommandResult(cmdResponse); }
Status ClusterAggregate::aggPassthrough(OperationContext* txn, const Namespaces& namespaces, std::shared_ptr<DBConfig> conf, BSONObj cmdObj, BSONObjBuilder* out, int queryOptions) { // Temporary hack. See comment on declaration for details. auto shardStatus = grid.shardRegistry()->getShard(txn, conf->getPrimaryId()); if (!shardStatus.isOK()) { return shardStatus.getStatus(); } ShardConnection conn(shardStatus.getValue()->getConnString(), ""); BSONObj result = aggRunCommand(conn.get(), namespaces, cmdObj, queryOptions); conn.done(); // First append the properly constructed writeConcernError. It will then be skipped // in appendElementsUnique. if (auto wcErrorElem = result["writeConcernError"]) { appendWriteConcernErrorToCmdResponse(shardStatus.getValue()->getId(), wcErrorElem, *out); } out->appendElementsUnique(result); BSONObj responseObj = out->asTempObj(); if (ResolvedView::isResolvedViewErrorResponse(responseObj)) { auto resolvedView = ResolvedView::fromBSON(responseObj); auto request = AggregationRequest::parseFromBSON(resolvedView.getNamespace(), cmdObj); if (!request.isOK()) { out->resetToEmpty(); return request.getStatus(); } auto aggCmd = resolvedView.asExpandedViewAggregation(request.getValue()); if (!aggCmd.isOK()) { out->resetToEmpty(); return aggCmd.getStatus(); } out->resetToEmpty(); // We pass both the underlying collection namespace and the view namespace here. The // underlying collection namespace is used to execute the aggregation on mongoD. Any cursor // returned will be registered under the view namespace so that subsequent getMore and // killCursors calls against the view have access. Namespaces nsStruct; nsStruct.requestedNss = namespaces.requestedNss; nsStruct.executionNss = resolvedView.getNamespace(); return ClusterAggregate::runAggregate(txn, nsStruct, aggCmd.getValue(), queryOptions, out); } return getStatusFromCommandResult(result); }
Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const NamespaceString& nss) { NamespaceString chunkMetadataNss(ChunkType::ShardNSPrefix + nss.ns()); try { DBDirectClient client(opCtx); // Delete the collections collection entry matching 'nss'. auto deleteDocs(stdx::make_unique<BatchedDeleteDocument>()); deleteDocs->setQuery(BSON(ShardCollectionType::uuid << nss.ns())); deleteDocs->setLimit(0); auto deleteRequest(stdx::make_unique<BatchedDeleteRequest>()); deleteRequest->addToDeletes(deleteDocs.release()); BatchedCommandRequest batchedDeleteRequest(deleteRequest.release()); batchedDeleteRequest.setNS(NamespaceString(ShardCollectionType::ConfigNS)); const BSONObj deleteCmdObj = batchedDeleteRequest.toBSON(); rpc::UniqueReply deleteCommandResponse = client.runCommandWithMetadata( "config", deleteCmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), deleteCmdObj); auto deleteStatus = getStatusFromCommandResult(deleteCommandResponse->getCommandReply()); if (!deleteStatus.isOK()) { return deleteStatus; } // Drop the config.chunks.ns collection specified by 'chunkMetadataNss'. BSONObj result; bool isOK = client.dropCollection(chunkMetadataNss.ns(), kLocalWriteConcern, &result); if (!isOK) { Status status = getStatusFromCommandResult(result); if (!status.isOK() && status.code() != ErrorCodes::NamespaceNotFound) { return status; } } return Status::OK(); } catch (const DBException& ex) { return ex.toStatus(); } }
void ReplicationCoordinatorImpl::_handleHeartbeatResponse( const ReplicationExecutor::RemoteCommandCallbackData& cbData) { // remove handle from queued heartbeats _untrackHeartbeatHandle(cbData.myHandle); // Parse and validate the response. At the end of this step, if responseStatus is OK then // hbResponse is valid. Status responseStatus = cbData.response.getStatus(); if (responseStatus == ErrorCodes::CallbackCanceled) { return; } const HostAndPort& target = cbData.request.target; ReplSetHeartbeatResponse hbResponse; BSONObj resp; if (responseStatus.isOK()) { resp = cbData.response.getValue().data; responseStatus = getStatusFromCommandResult(resp); } if (responseStatus.isOK()) { responseStatus = hbResponse.initialize(resp); } if (!responseStatus.isOK()) { LOG(1) << "Error in heartbeat request to " << target << ";" << responseStatus; if (!resp.isEmpty()) { LOG(3) << "heartbeat response: " << resp; } } const Date_t now = _replExecutor.now(); const OpTime lastApplied = _getLastOpApplied(); // Locks and unlocks _mutex. Milliseconds networkTime(0); StatusWith<ReplSetHeartbeatResponse> hbStatusResponse(hbResponse); if (cbData.response.isOK()) { networkTime = cbData.response.getValue().elapsedMillis; } else { hbStatusResponse = StatusWith<ReplSetHeartbeatResponse>(responseStatus); } HeartbeatResponseAction action = _topCoord->processHeartbeatResponse( now, networkTime, target, hbStatusResponse, lastApplied); _scheduleHeartbeatToTarget( target, std::max(now, action.getNextHeartbeatStartDate())); _handleHeartbeatResponseAction(action, hbStatusResponse); }
SessionsCollection::FindBatchFn SessionsCollection::makeFindFnForCommand(const NamespaceString& ns, DBClientBase* client) { auto send = [client, ns](BSONObj cmd) -> StatusWith<BSONObj> { BSONObj res; if (!client->runCommand(ns.db().toString(), cmd, res)) { return getStatusFromCommandResult(res); } return res; }; return send; }
void NetworkInterfaceIntegrationFixture::assertWriteError(StringData db, const BSONObj& cmd, ErrorCodes::Error reason, Milliseconds timeoutMillis) { RemoteCommandRequest request{ fixture().getServers()[0], db.toString(), cmd, BSONObj(), nullptr, timeoutMillis}; auto res = runCommandSync(request); ASSERT_OK(res.status); ASSERT_OK(getStatusFromCommandResult(res.data)); ASSERT(res.data["writeErrors"]); auto firstWriteError = res.data["writeErrors"].embeddedObject().firstElement().embeddedObject(); Status writeErrorStatus(ErrorCodes::Error(firstWriteError.getIntField("code")), firstWriteError.getStringField("errmsg")); ASSERT_EQ(reason, writeErrorStatus); }
bool DBClientCursor::peekError(BSONObj* error) { if (!wasError) return false; vector<BSONObj> v; peek(v, 1); verify(v.size() == 1); // We check both the legacy error format, and the new error format. hasErrField checks for // $err, and getStatusFromCommandResult checks for modern errors of the form '{ok: 0.0, code: // <...>, errmsg: ...}'. verify(hasErrField(v[0]) || !getStatusFromCommandResult(v[0]).isOK()); if (error) *error = v[0].getOwned(); return true; }
Status SessionsCollectionRS::setupSessionsCollection(OperationContext* opCtx) { return dispatch( NamespaceString::kLogicalSessionsNamespace, opCtx, [&] { // Creating the TTL index will auto-generate the collection. DBDirectClient client(opCtx); BSONObj info; auto cmd = generateCreateIndexesCmd(); if (!client.runCommand( NamespaceString::kLogicalSessionsNamespace.db().toString(), cmd, info)) { return getStatusFromCommandResult(info); } return Status::OK(); }, [&](DBClientBase*) { return checkSessionsCollectionExists(opCtx); }); }
StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAddShard( OperationContext* txn, RemoteCommandTargeter* targeter, const std::string& dbName, const BSONObj& cmdObj) { auto host = targeter->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly}, RemoteCommandTargeter::selectFindHostMaxWaitTime(txn)); if (!host.isOK()) { return host.getStatus(); } executor::RemoteCommandRequest request( host.getValue(), dbName, cmdObj, rpc::makeEmptyMetadata(), Seconds(30)); StatusWith<executor::RemoteCommandResponse> swResponse = Status(ErrorCodes::InternalError, "Internal error running command"); auto callStatus = _executorForAddShard->scheduleRemoteCommand( request, [&swResponse](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) { swResponse = args.response; }); if (!callStatus.isOK()) { return callStatus.getStatus(); } // Block until the command is carried out _executorForAddShard->wait(callStatus.getValue()); if (!swResponse.isOK()) { if (swResponse.getStatus().compareCode(ErrorCodes::ExceededTimeLimit)) { LOG(0) << "Operation for addShard timed out with status " << swResponse.getStatus(); } return swResponse.getStatus(); } BSONObj responseObj = swResponse.getValue().data.getOwned(); BSONObj responseMetadata = swResponse.getValue().metadata.getOwned(); Status commandStatus = getStatusFromCommandResult(responseObj); Status writeConcernStatus = getWriteConcernStatusFromCommandResult(responseObj); return Shard::CommandResponse(std::move(responseObj), std::move(responseMetadata), std::move(commandStatus), std::move(writeConcernStatus)); }
SessionsCollection::SendBatchFn SessionsCollection::makeSendFnForBatchWrite( const NamespaceString& ns, DBClientBase* client) { auto send = [client, ns](BSONObj batch) -> Status { BSONObj res; if (!client->runCommand(ns.db().toString(), batch, res)) { return getStatusFromCommandResult(res); } BatchedCommandResponse response; std::string errmsg; if (!response.parseBSON(res, &errmsg)) { return {ErrorCodes::FailedToParse, errmsg}; } return response.toStatus(); }; return send; }
void MigrationManager::_checkMigrationCallback( const executor::TaskExecutor::RemoteCommandCallbackArgs& callbackArgs, OperationContext* txn, Migration* migration, MigrationStatuses* migrationStatuses) { const auto& remoteCommandResponseWithStatus = callbackArgs.response; if (!remoteCommandResponseWithStatus.isOK()) { stdx::lock_guard<stdx::mutex> lk(_mutex); migrationStatuses->insert( MigrationStatuses::value_type(migration->chunkInfo.migrateInfo.getName(), std::move(remoteCommandResponseWithStatus.getStatus()))); return; } const auto& remoteCommandResponse = callbackArgs.response.getValue(); Status commandStatus = getStatusFromCommandResult(remoteCommandResponse.data); if (commandStatus == ErrorCodes::LockBusy && !migration->oldShard) { migration->oldShard = true; stdx::lock_guard<stdx::mutex> lk(_mutex); _rescheduleMigration(*migration); return; } // This extra parsing below is in order to preserve backwards compatibility with 3.2 and // earlier, where the move chunk command instead of returning a ChunkTooBig status includes an // extra field in the response. if (!commandStatus.isOK()) { bool chunkTooBig = false; bsonExtractBooleanFieldWithDefault( remoteCommandResponse.data, kChunkTooBig, false, &chunkTooBig); if (chunkTooBig) { commandStatus = {ErrorCodes::ChunkTooBig, commandStatus.reason()}; } } stdx::lock_guard<stdx::mutex> lk(_mutex); migrationStatuses->insert(MigrationStatuses::value_type( migration->chunkInfo.migrateInfo.getName(), std::move(commandStatus))); }