示例#1
0
InsertOp parseLegacyInsert(const Message& msgRaw) {
    DbMessage msg(msgRaw);

    InsertOp op;
    op.ns = NamespaceString(msg.getns());
    op.continueOnError = msg.reservedField() & InsertOption_ContinueOnError;
    uassert(ErrorCodes::InvalidLength, "Need at least one object to insert", msg.moreJSObjs());
    while (msg.moreJSObjs()) {
        op.documents.push_back(msg.nextJsObj());
    }
    // There is no limit on the number of inserts in a legacy batch.

    return op;
}
boost::optional<Document> DocumentSourceOut::getNext() {
    pExpCtx->checkForInterrupt();

    // make sure we only write out once
    if (_done)
        return boost::none;
    _done = true;

    verify(_mongod);
    DBClientBase* conn = _mongod->directClient();

    prepTempCollection();
    verify(_tempNs.size() != 0);

    vector<BSONObj> bufferedObjects;
    int bufferedBytes = 0;
    while (boost::optional<Document> next = pSource->getNext()) {
        BSONObj toInsert = next->toBson();
        bufferedBytes += toInsert.objsize();
        if (!bufferedObjects.empty() && bufferedBytes > BSONObjMaxUserSize) {
            spill(bufferedObjects);
            bufferedObjects.clear();
            bufferedBytes = toInsert.objsize();
        }
        bufferedObjects.push_back(toInsert);
    }

    if (!bufferedObjects.empty())
        spill(bufferedObjects);

    // Checking again to make sure we didn't become sharded while running.
    uassert(17018,
            str::stream() << "namespace '" << _outputNs.ns()
                          << "' became sharded so it can't be used for $out'",
            !_mongod->isSharded(_outputNs));

    BSONObj rename =
        BSON("renameCollection" << _tempNs.ns() << "to" << _outputNs.ns() << "dropTarget" << true);
    BSONObj info;
    bool ok = conn->runCommand("admin", rename, info);
    uassert(16997, str::stream() << "renameCollection for $out failed: " << info, ok);

    // We don't need to drop the temp collection in our destructor if the rename succeeded.
    _tempNs = NamespaceString("");

    // This "DocumentSource" doesn't produce output documents. This can change in the future
    // if we support using $out in "tee" mode.
    return boost::none;
}
示例#3
0
DeleteOp parseLegacyDelete(const Message& msgRaw) {
    DbMessage msg(msgRaw);

    DeleteOp op;
    op.ns = NamespaceString(msg.getns());

    // Legacy deletes only allowed one delete per operation. Layout is flags, query.
    op.deletes.emplace_back();
    auto& singleDelete = op.deletes.back();
    const int flags = msg.pullInt();
    singleDelete.multi = !(flags & RemoveOption_JustOne);
    singleDelete.query = msg.nextJsObj();

    return op;
}
示例#4
0
        virtual Status checkAuthForCommand(ClientBasic* client,
                                           const std::string& dbname,
                                           const BSONObj& cmdObj) {
            std::string ns = parseNs(dbname, cmdObj);

            ActionSet actions;
            actions.addAction(ActionType::insert);
            actions.addAction(ActionType::createIndex); // SERVER-11418

            if (!client->getAuthorizationSession()->isAuthorizedForActionsOnResource(
                    ResourcePattern::forExactNamespace(NamespaceString(ns)), actions)) {
                return Status(ErrorCodes::Unauthorized, "Unauthorized");
            }
            return Status::OK();
        }
示例#5
0
    Status WriteCmd::checkAuthForCommand( ClientBasic* client,
                                          const std::string& dbname,
                                          const BSONObj& cmdObj ) {

        Status status( auth::checkAuthForWriteCommand( client->getAuthorizationSession(),
                _writeType,
                NamespaceString( parseNs( dbname, cmdObj ) ),
                cmdObj ));

        if ( !status.isOK() ) {
            setLastError( status.code(), status.reason().c_str() );
        }

        return status;
    }
示例#6
0
 void DBClientCursor::initLazy( bool isRetry ) {
     massert( 15875 , "DBClientCursor::initLazy called on a client that doesn't support lazy" , _client->lazySupported() );
     if (DBClientWithCommands::RunCommandHookFunc hook = _client->getRunCommandHook()) {
         if (NamespaceString(ns).isCommand()) {
             BSONObjBuilder bob;
             bob.appendElements(query);
             hook(&bob);
             query = bob.obj();
         }
     }
     
     Message toSend;
     _assembleInit( toSend );
     _client->say( toSend, isRetry, &_originalHost );
 }
示例#7
0
bool Helpers::getLast(OperationContext* opCtx, const char* ns, BSONObj& result) {
    AutoGetCollectionForReadCommand autoColl(opCtx, NamespaceString(ns));
    auto exec = InternalPlanner::collectionScan(
        opCtx, ns, autoColl.getCollection(), PlanExecutor::NO_YIELD, InternalPlanner::BACKWARD);
    PlanExecutor::ExecState state = exec->getNext(&result, NULL);

    // Non-yielding collection scans from InternalPlanner will never error.
    invariant(PlanExecutor::ADVANCED == state || PlanExecutor::IS_EOF == state);

    if (PlanExecutor::ADVANCED == state) {
        result = result.getOwned();
        return true;
    }

    return false;
}
    void run() {
        WriteUnitOfWork wunit(&_opCtx);
        BSONObj x = BSON("x" << 1);
        ASSERT(x["_id"].type() == 0);
        Collection* collection =
            _context.db()->getOrCreateCollection(&_opCtx, NamespaceString(ns()));
        OpDebug* const nullOpDebug = nullptr;
        ASSERT(!collection->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK());

        StatusWith<BSONObj> fixed = fixDocumentForInsert(_opCtx.getServiceContext(), x);
        ASSERT(fixed.isOK());
        x = fixed.getValue();
        ASSERT(x["_id"].type() == jstOID);
        ASSERT(collection->insertDocument(&_opCtx, InsertStatement(x), nullOpDebug, true).isOK());
        wunit.commit();
    }
    void SyncClusterConnection::insert( const string &ns, BSONObj obj , int flags) {

        uassert(13119,
                (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString(),
                 NamespaceString(ns).coll == "system.indexes" || obj["_id"].type());

        string errmsg;
        if ( ! prepare( errmsg ) )
            throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );

        for ( size_t i=0; i<_conns.size(); i++ ) {
            _conns[i]->insert( ns , obj , flags);
        }

        _checkLast();
    }
Status CatalogManagerReplicaSet::getCollections(OperationContext* txn,
                                                const std::string* dbName,
                                                std::vector<CollectionType>* collections,
                                                OpTime* opTime) {
    BSONObjBuilder b;
    if (dbName) {
        invariant(!dbName->empty());
        b.appendRegex(CollectionType::fullNs(),
                      string(str::stream() << "^" << pcrecpp::RE::QuoteMeta(*dbName) << "\\."));
    }

    auto configShard = grid.shardRegistry()->getShard(txn, "config");
    auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector);
    if (!readHost.isOK()) {
        return readHost.getStatus();
    }

    auto findStatus = _exhaustiveFindOnConfig(readHost.getValue(),
                                              NamespaceString(CollectionType::ConfigNS),
                                              b.obj(),
                                              BSONObj(),
                                              boost::none);  // no limit
    if (!findStatus.isOK()) {
        return findStatus.getStatus();
    }

    const auto& docsOpTimePair = findStatus.getValue();

    for (const BSONObj& obj : docsOpTimePair.value) {
        const auto collectionResult = CollectionType::fromBSON(obj);
        if (!collectionResult.isOK()) {
            collections->clear();
            return {ErrorCodes::FailedToParse,
                    str::stream() << "error while parsing " << CollectionType::ConfigNS
                                  << " document: " << obj << " : "
                                  << collectionResult.getStatus().toString()};
        }

        collections->push_back(collectionResult.getValue());
    }

    if (opTime) {
        *opTime = docsOpTimePair.opTime;
    }

    return Status::OK();
}
Status CatalogManagerReplicaSet::_checkDbDoesNotExist(OperationContext* txn,
                                                      const string& dbName,
                                                      DatabaseType* db) {
    BSONObjBuilder queryBuilder;
    queryBuilder.appendRegex(
        DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i");

    const auto configShard = grid.shardRegistry()->getShard(txn, "config");
    const auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector);
    if (!readHost.isOK()) {
        return readHost.getStatus();
    }

    auto findStatus = _exhaustiveFindOnConfig(readHost.getValue(),
                                              NamespaceString(DatabaseType::ConfigNS),
                                              queryBuilder.obj(),
                                              BSONObj(),
                                              1);
    if (!findStatus.isOK()) {
        return findStatus.getStatus();
    }

    const auto& docs = findStatus.getValue().value;
    if (docs.empty()) {
        return Status::OK();
    }

    BSONObj dbObj = docs.front();
    std::string actualDbName = dbObj[DatabaseType::name()].String();
    if (actualDbName == dbName) {
        if (db) {
            auto parseDBStatus = DatabaseType::fromBSON(dbObj);
            if (!parseDBStatus.isOK()) {
                return parseDBStatus.getStatus();
            }

            *db = parseDBStatus.getValue();
        }

        return Status(ErrorCodes::NamespaceExists,
                      str::stream() << "database " << dbName << " already exists");
    }

    return Status(ErrorCodes::DatabaseDifferCase,
                  str::stream() << "can't have 2 databases that just differ on case "
                                << " have: " << actualDbName << " want to add: " << dbName);
}
示例#12
0
void SortKeyGenerator::getBoundsForSort(OperationContext* txn,
                                        const BSONObj& queryObj,
                                        const BSONObj& sortObj) {
    QueryPlannerParams params;
    params.options = QueryPlannerParams::NO_TABLE_SCAN;

    // We're creating a "virtual index" with key pattern equal to the sort order.
    IndexEntry sortOrder(
        sortObj, IndexNames::BTREE, true, false, false, "doesnt_matter", NULL, BSONObj());
    params.indices.push_back(sortOrder);

    auto statusWithQueryForSort = CanonicalQuery::canonicalize(
        txn, NamespaceString("fake.ns"), queryObj, ExtensionsCallbackNoop());
    verify(statusWithQueryForSort.isOK());
    std::unique_ptr<CanonicalQuery> queryForSort = std::move(statusWithQueryForSort.getValue());

    std::vector<QuerySolution*> solns;
    LOG(5) << "Sort key generation: Planning to obtain bounds for sort.";
    QueryPlanner::plan(*queryForSort, params, &solns);

    // TODO: are there ever > 1 solns?  If so, do we look for a specific soln?
    if (1 == solns.size()) {
        IndexScanNode* ixScan = NULL;
        QuerySolutionNode* rootNode = solns[0]->root.get();

        if (rootNode->getType() == STAGE_FETCH) {
            FetchNode* fetchNode = static_cast<FetchNode*>(rootNode);
            if (fetchNode->children[0]->getType() != STAGE_IXSCAN) {
                delete solns[0];
                // No bounds.
                return;
            }
            ixScan = static_cast<IndexScanNode*>(fetchNode->children[0]);
        } else if (rootNode->getType() == STAGE_IXSCAN) {
            ixScan = static_cast<IndexScanNode*>(rootNode);
        }

        if (ixScan) {
            _bounds.fields.swap(ixScan->bounds.fields);
            _hasBounds = true;
        }
    }

    for (size_t i = 0; i < solns.size(); ++i) {
        delete solns[i];
    }
}
示例#13
0
UpdateOp parseLegacyUpdate(const Message& msgRaw) {
    DbMessage msg(msgRaw);

    UpdateOp op;
    op.ns = NamespaceString(msg.getns());

    // Legacy updates only allowed one update per operation. Layout is flags, query, update.
    op.updates.emplace_back();
    auto& singleUpdate = op.updates.back();
    const int flags = msg.pullInt();
    singleUpdate.upsert = flags & UpdateOption_Upsert;
    singleUpdate.multi = flags & UpdateOption_Multi;
    singleUpdate.query = msg.nextJsObj();
    singleUpdate.update = msg.nextJsObj();

    return op;
}
示例#14
0
        Status checkAuthForCommand( ClientBasic* client,
                                    const std::string& dbname,
                                    const BSONObj& cmdObj ) {

            Status status = auth::checkAuthForWriteCommand( client->getAuthorizationSession(),
                                                            _writeType,
                                                            NamespaceString( parseNs( dbname,
                                                                                      cmdObj ) ),
                                                            cmdObj );

            // TODO: Remove this when we standardize GLE reporting from commands
            if ( !status.isOK() ) {
                setLastError( status.code(), status.reason().c_str() );
            }

            return status;
        }
TEST_F(KeysManagerShardedTest, GetKeyWithSingleKey) {
    keyManager()->startMonitoring(getServiceContext());

    KeysCollectionDocument origKey1(
        1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
    ASSERT_OK(insertToConfigCollection(
        operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey1.toBSON()));

    auto keyStatus =
        keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
    ASSERT_OK(keyStatus.getStatus());

    auto key = keyStatus.getValue();
    ASSERT_EQ(1, key.getKeyId());
    ASSERT_EQ(origKey1.getKey(), key.getKey());
    ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp());
}
示例#16
0
    Privilege AuthorizationManager::_modifyPrivilegeForSpecialCases(const Privilege& privilege) {
        ActionSet newActions;
        newActions.addAllActionsFromSet(privilege.getActions());
        std::string collectionName = NamespaceString(privilege.getResource()).coll;
        if (collectionName == "system.users") {
            newActions.removeAction(ActionType::find);
            newActions.removeAction(ActionType::insert);
            newActions.removeAction(ActionType::update);
            newActions.removeAction(ActionType::remove);
            newActions.addAction(ActionType::userAdmin);
        } else if (collectionName == "system.profle" && newActions.contains(ActionType::find)) {
            newActions.removeAction(ActionType::find);
            newActions.addAction(ActionType::profileRead);
        }

        return Privilege(privilege.getResource(), newActions);
    }
示例#17
0
文件: db.cpp 项目: hipsterbd/mongo
 void checkForIdIndexes(const std::string& dbName) {
     if (!replSettings.usingReplSets()) {
         // we only care about the _id index if we are in a replset
         return;
     }
     if (dbName == "local") {
         // we do not need an _id index on anything in the local database
         return;
     }
     const string systemNamespaces = cc().database()->name() + ".system.namespaces";
     shared_ptr<Cursor> cursor = theDataFileMgr.findAll(systemNamespaces);
     // gather collections
     vector<string> collections = vector<string>();
     for ( ; cursor && cursor->ok(); cursor->advance()) {
         const BSONObj entry = cursor->current();
         string name = entry["name"].valuestrsafe();
         if (name.find('$') == string::npos && !NamespaceString(name).isSystem()) {
             collections.push_back(name);
         }
     }
     // for each collection, ensure there is a $_id_ index
     for (vector<string>::iterator i = collections.begin();
             i != collections.end(); ++i) {
         bool idIndexExists = false;
         string indexName = str::stream() << *i << ".$_id_";
         boost::shared_ptr<Cursor> indexCursor = theDataFileMgr.findAll(systemNamespaces);
         for ( ; indexCursor && indexCursor->ok(); indexCursor->advance()) {
             const BSONObj entry = indexCursor->current();
             string name = entry["name"].valuestrsafe();
             if (!name.compare(indexName)) {
                 idIndexExists = true;
                 break;
             }
         }
         if (!idIndexExists) {
             log() << "WARNING: the collection '" << *i
                   << "' lacks a unique index on _id."
                   << " This index is needed for replication to function properly"
                   << startupWarningsLog;
             log() << "\t To fix this, on the primary run 'db." << i->substr(i->find('.')+1)
                   << ".createIndex({_id: 1}, {unique: true})'"
                   << startupWarningsLog;
         }
     }
 }
示例#18
0
        virtual void addRequiredPrivileges(const std::string& dbname,
                                           const BSONObj& cmdObj,
                                           std::vector<Privilege>* out) {
            ActionSet sourceActions;
            sourceActions.addAction(ActionType::find);
            out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), sourceActions));

            ActionSet targetActions;
            targetActions.addAction(ActionType::insert);
            targetActions.addAction(ActionType::createIndex);
            targetActions.addAction(ActionType::convertToCapped);
            std::string collection = cmdObj.getStringField("toCollection");
            uassert(16708, "bad 'toCollection' value", !collection.empty());

            out->push_back(Privilege(ResourcePattern::forExactNamespace(
                                             NamespaceString(dbname, collection)),
                                     targetActions));
        }
示例#19
0
    virtual void run() {
        const BSONObj inputBson = pipelineFromJsonArray(inputPipeJson());
        const BSONObj shardPipeExpected = pipelineFromJsonArray(shardPipeJson());
        const BSONObj mergePipeExpected = pipelineFromJsonArray(mergePipeJson());

        intrusive_ptr<ExpressionContext> ctx =
            new ExpressionContext(&_opCtx, NamespaceString("a.collection"));
        string errmsg;
        mergePipe = Pipeline::parseCommand(errmsg, inputBson, ctx);
        ASSERT_EQUALS(errmsg, "");
        ASSERT(mergePipe != NULL);

        shardPipe = mergePipe->splitForSharded();
        ASSERT(shardPipe != NULL);

        ASSERT_EQUALS(Value(shardPipe->writeExplainOps()), Value(shardPipeExpected["pipeline"]));
        ASSERT_EQUALS(Value(mergePipe->writeExplainOps()), Value(mergePipeExpected["pipeline"]));
    }
TEST_F(KeysManagerShardedTest, ShouldNotReturnKeysInFeatureCompatibilityVersion34) {
    serverGlobalParams.featureCompatibility.version.store(
        ServerGlobalParams::FeatureCompatibility::Version::k34);

    keyManager()->startMonitoring(getServiceContext());
    keyManager()->enableKeyGenerator(operationContext(), true);

    KeysCollectionDocument origKey(
        1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0)));
    ASSERT_OK(insertToConfigCollection(
        operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey.toBSON()));

    keyManager()->refreshNow(operationContext());

    auto keyStatus =
        keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0)));
    ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus());
}
示例#21
0
Status Command::_checkAuthorization(Command* c,
                                    ClientBasic* client,
                                    const std::string& dbname,
                                    const BSONObj& cmdObj,
                                    bool fromRepl) {
    namespace mmb = mutablebson;
    Status status = _checkAuthorizationImpl(c, client, dbname, cmdObj, fromRepl);
    if (!status.isOK()) {
        log() << status << std::endl;
    }
    mmb::Document cmdToLog(cmdObj, mmb::Document::kInPlaceDisabled);
    c->redactForLogging(&cmdToLog);
    audit::logCommandAuthzCheck(client,
                                NamespaceString(c->parseNs(dbname, cmdObj)),
                                cmdToLog,
                                status.code());
    return status;
}
示例#22
0
    void Pipeline::addRequiredPrivileges(Command* commandTemplate,
                                         const string& db,
                                         BSONObj cmdObj,
                                         vector<Privilege>* out) {
        ResourcePattern inputResource(commandTemplate->parseResourcePattern(db, cmdObj));
        uassert(17138,
                mongoutils::str::stream() << "Invalid input resource, " << inputResource.toString(),
                inputResource.isExactNamespacePattern());

        if (false && cmdObj["allowDiskUsage"].trueValue()) {
            // TODO no privilege for this yet.
        }

        out->push_back(Privilege(inputResource, ActionType::find));

        BSONObj pipeline = cmdObj.getObjectField("pipeline");
        BSONForEach(stageElem, pipeline) {
            BSONObj stage = stageElem.embeddedObjectUserCheck();
            if (str::equals(stage.firstElementFieldName(), "$out")) {
                // TODO Figure out how to handle temp collection privileges. For now, using the
                // output ns is ok since we only do db-level privilege checks.
                NamespaceString outputNs(db, stage.firstElement().str());
                uassert(17139,
                        mongoutils::str::stream() << "Invalid $out target namespace, " <<
                        outputNs.ns(),
                        outputNs.isValid());

                ActionSet actions;
                // logically on output ns
                actions.addAction(ActionType::remove);
                actions.addAction(ActionType::insert);

                // on temp ns due to implementation, but not logically on output ns
                actions.addAction(ActionType::createCollection);
                actions.addAction(ActionType::createIndex);
                actions.addAction(ActionType::dropCollection);
                actions.addAction(ActionType::renameCollectionSameDB);

                out->push_back(Privilege(ResourcePattern::forExactNamespace(outputNs), actions));
                out->push_back(Privilege(ResourcePattern::forExactNamespace(
                                                 NamespaceString(db, "system.indexes")),
                                         ActionType::find));
            }
        }
    void AuthzManagerExternalStateMongod::logOp(
            const char* op,
            const char* ns,
            const BSONObj& o,
            BSONObj* o2,
            bool* b,
            bool fromMigrateUnused,
            const BSONObj* fullObjUnused) {

        if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
            ns == AuthorizationManager::adminCommandNamespace.ns()) {

            boost::lock_guard<boost::mutex> lk(_roleGraphMutex);
            Status status = updateRoleGraphWithLogOpSignature(
                    &_roleGraph, op, NamespaceString(ns), o, o2);

            if (status == ErrorCodes::OplogOperationUnsupported) {
                _roleGraph = RoleGraph();
                _roleGraphState = roleGraphStateInitial;
                error() << "Unsupported modification to roles collection in oplog; "
                    "TODO how to remedy. " << status << " Oplog entry: " << op;
            }
            else if (!status.isOK()) {
                warning() << "Skipping bad update to roles collection in oplog. " << status <<
                    " Oplog entry: " << op;
            }
            status = _roleGraph.recomputePrivilegeData();
            if (status == ErrorCodes::GraphContainsCycle) {
                _roleGraphState = roleGraphStateHasCycle;
                error() << "Inconsistent role graph during authorization manager intialization.  "
                    "Only direct privileges available. " << status.reason() <<
                    " after applying oplog entry " << op;
            }
            else if (!status.isOK()) {
                _roleGraphState = roleGraphStateInitial;
                error() << "Error updating role graph; only builtin roles available. "
                    "TODO how to remedy. " << status << " Oplog entry: " << op;
            }
            else {
                _roleGraphState = roleGraphStateConsistent;
            }
        }
    }
示例#24
0
void ChunkManager::getShardIdsForQuery(set<ShardId>& shardIds, const BSONObj& query) const {
    auto statusWithCQ =
        CanonicalQuery::canonicalize(NamespaceString(_ns), query, WhereCallbackNoop());

    uassertStatusOK(statusWithCQ.getStatus());
    unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());

    // Query validation
    if (QueryPlannerCommon::hasNode(cq->root(), MatchExpression::GEO_NEAR)) {
        uassert(13501, "use geoNear command rather than $near query", false);
    }

    // Transforms query into bounds for each field in the shard key
    // for example :
    //   Key { a: 1, b: 1 },
    //   Query { a : { $gte : 1, $lt : 2 },
    //            b : { $gte : 3, $lt : 4 } }
    //   => Bounds { a : [1, 2), b : [3, 4) }
    IndexBounds bounds = getIndexBoundsForQuery(_keyPattern.toBSON(), *cq);

    // Transforms bounds for each shard key field into full shard key ranges
    // for example :
    //   Key { a : 1, b : 1 }
    //   Bounds { a : [1, 2), b : [3, 4) }
    //   => Ranges { a : 1, b : 3 } => { a : 2, b : 4 }
    BoundList ranges = _keyPattern.flattenBounds(bounds);

    for (BoundList::const_iterator it = ranges.begin(); it != ranges.end(); ++it) {
        getShardIdsForRange(shardIds, it->first /*min*/, it->second /*max*/);

        // once we know we need to visit all shards no need to keep looping
        if (shardIds.size() == _shardIds.size())
            break;
    }

    // SERVER-4914 Some clients of getShardIdsForQuery() assume at least one shard will be
    // returned.  For now, we satisfy that assumption by adding a shard with no matches rather
    // than return an empty set of shards.
    if (shardIds.empty()) {
        massert(16068, "no chunk ranges available", !_chunkRanges.ranges().empty());
        shardIds.insert(_chunkRanges.ranges().begin()->second->getShardId());
    }
}
    void AuthzManagerExternalStateLocal::logOp(
            const char* op,
            const char* ns,
            const BSONObj& o,
            BSONObj* o2,
            bool* b) {

        if (ns == AuthorizationManager::rolesCollectionNamespace.ns() ||
            ns == AuthorizationManager::adminCommandNamespace.ns()) {

            boost::lock_guard<boost::mutex> lk(_roleGraphMutex);
            Status status = _roleGraph.handleLogOp(op, NamespaceString(ns), o, o2);

            if (status == ErrorCodes::OplogOperationUnsupported) {
                _roleGraph = RoleGraph();
                _roleGraphState = roleGraphStateInitial;
                BSONObjBuilder oplogEntryBuilder;
                oplogEntryBuilder << "op" << op << "ns" << ns << "o" << o;
                if (o2)
                    oplogEntryBuilder << "o2" << *o2;
                if (b)
                    oplogEntryBuilder << "b" << *b;
                error() << "Unsupported modification to roles collection in oplog; "
                    "restart this process to reenable user-defined roles; " << status.reason() <<
                    "; Oplog entry: " << oplogEntryBuilder.done();
            }
            else if (!status.isOK()) {
                warning() << "Skipping bad update to roles collection in oplog. " << status <<
                    " Oplog entry: " << op;
            }
            status = _roleGraph.recomputePrivilegeData();
            if (status == ErrorCodes::GraphContainsCycle) {
                _roleGraphState = roleGraphStateHasCycle;
                error() << "Inconsistent role graph during authorization manager initialization.  "
                    "Only direct privileges available. " << status.reason() <<
                    " after applying oplog entry " << op;
            }
            else {
                fassert(17183, status);
                _roleGraphState = roleGraphStateConsistent;
            }
        }
    }
 Status AuthzManagerExternalState::insertPrivilegeDocument(const string& dbname,
                                                           const BSONObj& userObj,
                                                           const BSONObj& writeConcern) {
     Status status = insert(NamespaceString("admin.system.users"), userObj, writeConcern);
     if (status.isOK()) {
         return status;
     }
     if (status.code() == ErrorCodes::DuplicateKey) {
         std::string name = userObj[AuthorizationManager::USER_NAME_FIELD_NAME].String();
         std::string source = userObj[AuthorizationManager::USER_DB_FIELD_NAME].String();
         return Status(ErrorCodes::DuplicateKey,
                       mongoutils::str::stream() << "User \"" << name << "@" << source <<
                               "\" already exists");
     }
     if (status.code() == ErrorCodes::UnknownError) {
         return Status(ErrorCodes::UserModificationFailed, status.reason());
     }
     return status;
 }
StatusWith<OpTimePair<DatabaseType>> CatalogManagerReplicaSet::getDatabase(
    OperationContext* txn, const std::string& dbName) {
    invariant(nsIsDbOnly(dbName));

    // The two databases that are hosted on the config server are config and admin
    if (dbName == "config" || dbName == "admin") {
        DatabaseType dbt;
        dbt.setName(dbName);
        dbt.setSharded(false);
        dbt.setPrimary("config");

        return OpTimePair<DatabaseType>(dbt);
    }

    const auto configShard = grid.shardRegistry()->getShard(txn, "config");
    const auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector);
    if (!readHost.isOK()) {
        return readHost.getStatus();
    }

    auto findStatus = _exhaustiveFindOnConfig(readHost.getValue(),
                                              NamespaceString(DatabaseType::ConfigNS),
                                              BSON(DatabaseType::name(dbName)),
                                              BSONObj(),
                                              1);
    if (!findStatus.isOK()) {
        return findStatus.getStatus();
    }

    const auto& docsWithOpTime = findStatus.getValue();
    if (docsWithOpTime.value.empty()) {
        return {ErrorCodes::DatabaseNotFound, stream() << "database " << dbName << " not found"};
    }

    invariant(docsWithOpTime.value.size() == 1);

    auto parseStatus = DatabaseType::fromBSON(docsWithOpTime.value.front());
    if (!parseStatus.isOK()) {
        return parseStatus.getStatus();
    }

    return OpTimePair<DatabaseType>(parseStatus.getValue(), docsWithOpTime.opTime);
}
示例#28
0
 void _sleepInLock(mongo::OperationContext* opCtx,
                   long long millis,
                   LockMode mode,
                   const StringData& ns) {
     if (ns.empty()) {
         Lock::GlobalLock lk(opCtx, mode, Date_t::max(), Lock::InterruptBehavior::kThrow);
         opCtx->sleepFor(Milliseconds(millis));
     } else if (nsIsDbOnly(ns)) {
         uassert(50961, "lockTarget is not a valid namespace", NamespaceString::validDBName(ns));
         Lock::DBLock lk(opCtx, ns, mode, Date_t::max());
         opCtx->sleepFor(Milliseconds(millis));
     } else {
         uassert(50962,
                 "lockTarget is not a valid namespace",
                 NamespaceString::validCollectionComponent(ns));
         Lock::CollectionLock lk(opCtx, NamespaceString(ns), mode, Date_t::max());
         opCtx->sleepFor(Milliseconds(millis));
     }
 }
StatusWith<std::string> CatalogManagerReplicaSet::_generateNewShardName() const {
    const auto configShard = grid.shardRegistry()->getShard("config");
    const auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector);
    if (!readHost.isOK()) {
        return readHost.getStatus();
    }

    BSONObjBuilder shardNameRegex;
    shardNameRegex.appendRegex(ShardType::name(), "^shard");

    auto findStatus = grid.shardRegistry()->exhaustiveFind(readHost.getValue(),
                                                           NamespaceString(ShardType::ConfigNS),
                                                           shardNameRegex.obj(),
                                                           BSON(ShardType::name() << -1),
                                                           1);
    if (!findStatus.isOK()) {
        return findStatus.getStatus();
    }

    const auto& docs = findStatus.getValue();

    int count = 0;
    if (!docs.empty()) {
        const auto shardStatus = ShardType::fromBSON(docs.front());
        if (!shardStatus.isOK()) {
            return shardStatus.getStatus();
        }

        std::istringstream is(shardStatus.getValue().getName().substr(5));
        is >> count;
        count++;
    }

    // TODO fix so that we can have more than 10000 automatically generated shard names
    if (count < 9999) {
        std::stringstream ss;
        ss << "shard" << std::setfill('0') << std::setw(4) << count;
        return ss.str();
    }

    return Status(ErrorCodes::OperationFailed, "unable to generate new shard name");
}
Status CatalogManagerReplicaSet::getChunks(OperationContext* txn,
                                           const BSONObj& query,
                                           const BSONObj& sort,
                                           boost::optional<int> limit,
                                           vector<ChunkType>* chunks,
                                           OpTime* opTime) {
    chunks->clear();

    auto configShard = grid.shardRegistry()->getShard(txn, "config");
    auto readHostStatus = configShard->getTargeter()->findHost(kConfigReadSelector);
    if (!readHostStatus.isOK()) {
        return readHostStatus.getStatus();
    }

    // Convert boost::optional<int> to boost::optional<long long>.
    auto longLimit = limit ? boost::optional<long long>(*limit) : boost::none;
    auto findStatus = _exhaustiveFindOnConfig(
        readHostStatus.getValue(), NamespaceString(ChunkType::ConfigNS), query, sort, longLimit);
    if (!findStatus.isOK()) {
        return findStatus.getStatus();
    }

    const auto chunkDocsOpTimePair = findStatus.getValue();
    for (const BSONObj& obj : chunkDocsOpTimePair.value) {
        auto chunkRes = ChunkType::fromBSON(obj);
        if (!chunkRes.isOK()) {
            chunks->clear();
            return {ErrorCodes::FailedToParse,
                    stream() << "Failed to parse chunk with id ("
                             << obj[ChunkType::name()].toString()
                             << "): " << chunkRes.getStatus().toString()};
        }

        chunks->push_back(chunkRes.getValue());
    }

    if (opTime) {
        *opTime = chunkDocsOpTimePair.opTime;
    }

    return Status::OK();
}