Beispiel #1
0
INT32 _fmpController::_runLoop()
{
   INT32 rc = SDB_OK ;
   BSONObj obj ;
   BSONElement ele ;

   while ( TRUE )
   {
      INT32 step = FMP_CONTROL_STEP_INVALID ;
      rc = _readMsg( obj ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to read msg:%d",rc ) ;
         goto error ;
      }

      ele = obj.getField( FMP_CONTROL_FIELD ) ;
      if ( ele.eoo() )
      {
         step = FMP_CONTROL_STEP_DOWNLOAD ;
      }
      else if ( NumberInt != ele.type() )
      {
         PD_LOG( PDERROR, "failed to find control filed:%s",
                 obj.toString().c_str() ) ;
         rc = SDB_SYS ;
         goto error ;
      }
      else
      {
         step = ele.Int() ;
      }

      if ( FMP_CONTROL_STEP_QUIT == step )
      {
         _clear() ;
         rc = _writeMsg( OK_RES ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG( PDERROR, "failed to write res of reset:%d", rc  ) ;
            goto error ;
         }
         break ;
      }
      else if ( FMP_CONTROL_STEP_RESET == step )
      {
         _clear() ;
         rc = _writeMsg( OK_RES ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG( PDERROR, "failed to write res of reset:%d", rc  ) ;
            goto error ;
         }
         continue ;
      }
      else
      {
      }

      if ( !FMP_VALID_STEP(step) )
      {
         PD_LOG( PDERROR, "invalid step number[%d], now step[%d]",
                 ele.Int(), _step ) ;
         rc = SDB_SYS ;
         goto error ;
      }

      rc = _handleOneLoop( obj, step ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to handle one loop:%d", rc) ;
         _clear() ;
      }

      FMP_STEP_AUTO_CHANGE( step ) ;
   }
done:
   return rc ;
error:
   goto done ;
}
Beispiel #2
0
    string BSONElement::jsonString( JsonStringFormat format, bool includeFieldNames, int pretty ) const {
        BSONType t = type();
        if ( t == Undefined )
            return "";

        stringstream s;
        if ( includeFieldNames )
            s << '"' << escape( fieldName() ) << "\" : ";
        switch ( type() ) {
        case mongo::String:
        case Symbol:
            s << '"' << escape( valuestr() ) << '"';
            break;
        case NumberLong:
            s << _numberLong();
            break;
        case NumberInt:
        case NumberDouble:
            if ( number() >= -numeric_limits< double >::max() &&
                    number() <= numeric_limits< double >::max() ) {
                s.precision( 16 );
                s << number();
            } else {
                stringstream ss;
                ss << "Number " << number() << " cannot be represented in JSON";
                string message = ss.str();
                massert( 10311 ,  message.c_str(), false );
            }
            break;
        case mongo::Bool:
            s << ( boolean() ? "true" : "false" );
            break;
        case jstNULL:
            s << "null";
            break;
        case Object:
            s << embeddedObject().jsonString( format, pretty );
            break;
        case mongo::Array: {
            if ( embeddedObject().isEmpty() ) {
                s << "[]";
                break;
            }
            s << "[ ";
            BSONObjIterator i( embeddedObject() );
            BSONElement e = i.next();
            if ( !e.eoo() )
                while ( 1 ) {
                    if( pretty ) {
                        s << '\n';
                        for( int x = 0; x < pretty; x++ )
                            s << "  ";
                    }
                    s << e.jsonString( format, false, pretty?pretty+1:0 );
                    e = i.next();
                    if ( e.eoo() )
                        break;
                    s << ", ";
                }
            s << " ]";
            break;
        }
        case DBRef: {
            mongo::OID *x = (mongo::OID *) (valuestr() + valuestrsize());
            if ( format == TenGen )
                s << "Dbref( ";
            else
                s << "{ \"$ref\" : ";
            s << '"' << valuestr() << "\", ";
            if ( format != TenGen )
                s << "\"$id\" : ";
            s << '"' << *x << "\" ";
            if ( format == TenGen )
                s << ')';
            else
                s << '}';
            break;
        }
        case jstOID:
            if ( format == TenGen ) {
                s << "ObjectId( ";
            } else {
                s << "{ \"$oid\" : ";
            }
            s << '"' << __oid() << '"';
            if ( format == TenGen ) {
                s << " )";
            } else {
                s << " }";
            }
            break;
        case BinData: {
            int len = *(int *)( value() );
            BinDataType type = BinDataType( *(char *)( (int *)( value() ) + 1 ) );
            s << "{ \"$binary\" : \"";
            char *start = ( char * )( value() ) + sizeof( int ) + 1;
            base64::encode( s , start , len );
            s << "\", \"$type\" : \"" << hex;
            s.width( 2 );
            s.fill( '0' );
            s << type << dec;
            s << "\" }";
            break;
        }
        case mongo::Date:
            if ( format == Strict )
                s << "{ \"$date\" : ";
            else
                s << "Date( ";
            if( pretty ) {
                Date_t d = date();
                if( d == 0 ) s << '0';
                else
                    s << '"' << date().toString() << '"';
            } else
                s << date();
            if ( format == Strict )
                s << " }";
            else
                s << " )";
            break;
        case RegEx:
            if ( format == Strict ){
                s << "{ \"$regex\" : \"" << escape( regex() );
                s << "\", \"$options\" : \"" << regexFlags() << "\" }";
            } else {
                s << "/" << escape( regex() , true ) << "/";
                // FIXME Worry about alpha order?
                for ( const char *f = regexFlags(); *f; ++f ){
                    switch ( *f ) {
                    case 'g':
                    case 'i':
                    case 'm':
                        s << *f;
                    default:
                        break;
                    }
                }
            }
            break;

        case CodeWScope: {
            BSONObj scope = codeWScopeObject();
            if ( ! scope.isEmpty() ){
                s << "{ \"$code\" : " << _asCode() << " , "
                  << " \"$scope\" : " << scope.jsonString() << " }";
                break;
            }
        }


        case Code:
            s << _asCode();
            break;
            
        case Timestamp:
            s << "{ \"t\" : " << timestampTime() << " , \"i\" : " << timestampInc() << " }";
            break;

        case MinKey:
            s << "{ \"$minKey\" : 1 }";
            break;

        case MaxKey:
            s << "{ \"$maxKey\" : 1 }";
            break;

        default:
            stringstream ss;
            ss << "Cannot create a properly formatted JSON string with "
            << "element: " << toString() << " of type: " << type();
            string message = ss.str();
            massert( 10312 ,  message.c_str(), false );
        }
        return s.str();
    }
Beispiel #3
0
        virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            string source = cmdObj.getStringField( name.c_str() );
            string target = cmdObj.getStringField( "to" );

            if ( !NamespaceString::validCollectionComponent(target.c_str()) ) {
                errmsg = "invalid collection name: " + target;
                return false;
            }
            if ( source.empty() || target.empty() ) {
                errmsg = "invalid command syntax";
                return false;
            }

            if (!fromRepl) { // If it got through on the master, need to allow it here too
                Status sourceStatus = userAllowedWriteNS(source);
                if (!sourceStatus.isOK()) {
                    errmsg = "error with source namespace: " + sourceStatus.reason();
                    return false;
                }

                Status targetStatus = userAllowedWriteNS(target);
                if (!targetStatus.isOK()) {
                    errmsg = "error with target namespace: " + targetStatus.reason();
                    return false;
                }
            }

            string sourceDB = nsToDatabase(source);
            string targetDB = nsToDatabase(target);

            bool capped = false;
            long long size = 0;
            std::vector<BSONObj> indexesInProg;

            {
                Client::Context srcCtx( source );
                Collection* sourceColl = srcCtx.db()->getCollection( source );

                if ( !sourceColl ) {
                    errmsg = "source namespace does not exist";
                    return false;
                }

                // Ensure that collection name does not exceed maximum length.
                // Ensure that index names do not push the length over the max.
                // Iterator includes unfinished indexes.
                IndexCatalog::IndexIterator sourceIndIt =
                    sourceColl->getIndexCatalog()->getIndexIterator( true );
                int longestIndexNameLength = 0;
                while ( sourceIndIt.more() ) {
                    int thisLength = sourceIndIt.next()->indexName().length();
                    if ( thisLength > longestIndexNameLength )
                        longestIndexNameLength = thisLength;
                }

                unsigned int longestAllowed =
                    min(int(Namespace::MaxNsColletionLen),
                        int(Namespace::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength);
                if (target.size() > longestAllowed) {
                    StringBuilder sb;
                    sb << "collection name length of " << target.size()
                       << " exceeds maximum length of " << longestAllowed
                       << ", allowing for index names";
                    errmsg = sb.str();
                    return false;
                }

                {
                    const NamespaceDetails *nsd = nsdetails( source );
                    indexesInProg = stopIndexBuilds( dbname, cmdObj );
                    capped = nsd->isCapped();
                    if ( capped )
                        for( DiskLoc i = nsd->firstExtent(); !i.isNull(); i = i.ext()->xnext )
                            size += i.ext()->length;
                }
            }

            {
                Client::Context ctx( target );

                // Check if the target namespace exists and if dropTarget is true.
                // If target exists and dropTarget is not true, return false.
                if ( ctx.db()->getCollection( target ) ) {
                    if ( !cmdObj["dropTarget"].trueValue() ) {
                        errmsg = "target namespace exists";
                        return false;
                    }

                    Status s = cc().database()->dropCollection( target );
                    if ( !s.isOK() ) {
                        errmsg = s.toString();
                        restoreIndexBuildsOnSource( indexesInProg, source );
                        return false;
                    }
                }

                // If we are renaming in the same database, just
                // rename the namespace and we're done.
                if ( sourceDB == targetDB ) {
                    Status s = ctx.db()->renameCollection( source, target,
                                                           cmdObj["stayTemp"].trueValue() );
                    if ( !s.isOK() ) {
                        errmsg = s.toString();
                        restoreIndexBuildsOnSource( indexesInProg, source );
                        return false;
                    }
                    return true;
                }

                // Otherwise, we are enaming across databases, so we must copy all
                // the data and then remove the source collection.

                // Create the target collection.
                Collection* targetColl = NULL;
                if ( capped ) {
                    BSONObjBuilder spec;
                    spec.appendBool( "capped", true );
                    spec.append( "size", double( size ) );
                    spec.appendBool( "autoIndexId", false );
                    userCreateNS( target.c_str(), spec.obj(), errmsg, false );
                    targetColl = ctx.db()->getCollection( target );
                }
                else {
                    CollectionOptions options;
                    options.setNoIdIndex();
                    // No logOp necessary because the entire renameCollection command is one logOp.
                    targetColl = ctx.db()->createCollection( target, options );
                }
                if ( !targetColl ) {
                    errmsg = "Failed to create target collection.";
                    restoreIndexBuildsOnSource( indexesInProg, source );
                    return false;
                }
            }

            // Copy over all the data from source collection to target collection.
            bool insertSuccessful = true;
            boost::scoped_ptr<CollectionIterator> sourceIt;

            {
                Client::Context srcCtx( source );
                Collection* sourceColl = srcCtx.db()->getCollection( source );
                sourceIt.reset( sourceColl->getIterator( DiskLoc(), false, CollectionScanParams::FORWARD ) );
            }

            Collection* targetColl = NULL;
            while ( !sourceIt->isEOF() ) {
                BSONObj o;
                {
                    Client::Context srcCtx( source );
                    o = sourceIt->getNext().obj();
                }
                // Insert and check return status of insert.
                {
                    Client::Context ctx( target );
                    if ( !targetColl )
                        targetColl = ctx.db()->getCollection( target );
                    // No logOp necessary because the entire renameCollection command is one logOp.
                    Status s = targetColl->insertDocument( o, true ).getStatus();
                    if ( !s.isOK() ) {
                        insertSuccessful = false;
                        errmsg = s.toString();
                        break;
                    }
                }
            }

            // If inserts were unsuccessful, drop the target collection and return false.
            if ( !insertSuccessful ) {
                Client::Context ctx( target );
                Status s = ctx.db()->dropCollection( target );
                if ( !s.isOK() )
                    errmsg = s.toString();
                restoreIndexBuildsOnSource( indexesInProg, source );
                return false;
            }

            // Copy over the indexes to temp storage and then to the target..
            vector<BSONObj> copiedIndexes;
            bool indexSuccessful = true;
            {
                Client::Context srcCtx( source );
                Collection* sourceColl = srcCtx.db()->getCollection( source );
                IndexCatalog::IndexIterator sourceIndIt =
                    sourceColl->getIndexCatalog()->getIndexIterator( true );

                while ( sourceIndIt.more() ) {
                    BSONObj currIndex = sourceIndIt.next()->infoObj();

                    // Process the source index.
                    BSONObjBuilder b;
                    BSONObjIterator i( currIndex );
                    while( i.moreWithEOO() ) {
                        BSONElement e = i.next();
                        if ( e.eoo() )
                            break;
                        else if ( strcmp( e.fieldName(), "ns" ) == 0 )
                            b.append( "ns", target );
                        else
                            b.append( e );
                    }

                    BSONObj newIndex = b.obj();
                    copiedIndexes.push_back( newIndex );
                }
            }

            {
                Client::Context ctx( target );
                if ( !targetColl )
                    targetColl = ctx.db()->getCollection( target );

                for ( vector<BSONObj>::iterator it = copiedIndexes.begin();
                                                it != copiedIndexes.end(); ++it ) {
                    Status s = targetColl->getIndexCatalog()->createIndex( *it, true );
                    if ( !s.isOK() ) {
                        indexSuccessful = false;
                        errmsg = s.toString();
                        break;
                    }
                }

                // If indexes were unsuccessful, drop the target collection and return false.
                if ( !indexSuccessful ) {
                    Status s = ctx.db()->dropCollection( target );
                    if ( !s.isOK() )
                        errmsg = s.toString();
                    restoreIndexBuildsOnSource( indexesInProg, source );
                    return false;
                }
            }

            // Drop the source collection.
            {
                Client::Context srcCtx( source );
                Status s = srcCtx.db()->dropCollection( source );
                if ( !s.isOK() ) {
                    errmsg = s.toString();
                    restoreIndexBuildsOnSource( indexesInProg, source );
                    return false;
                }
            }

            return true;
        }
Beispiel #4
0
    void QueryPlanSet::init() {
        DEBUGQO( "QueryPlanSet::init " << ns << "\t" << _originalQuery );
        _plans.clear();
        _mayRecordPlan = true;
        _usingPrerecordedPlan = false;

        const char *ns = _frsp->ns();
        NamespaceDetails *d = nsdetails( ns );
        if ( !d || !_frsp->matchPossible() ) {
            // Table scan plan, when no matches are possible
            _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
            return;
        }

        BSONElement hint = _hint.firstElement();
        if ( !hint.eoo() ) {
            _mayRecordPlan = false;
            IndexDetails *id = parseHint( hint, d );
            if ( id ) {
                addHint( *id );
            }
            else {
                massert( 10366 ,  "natural order cannot be specified with $min/$max", _min.isEmpty() && _max.isEmpty() );
                // Table scan plan
                _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
            }
            return;
        }

        if ( !_min.isEmpty() || !_max.isEmpty() ) {
            string errmsg;
            BSONObj keyPattern;
            IndexDetails *idx = indexDetailsForRange( ns, errmsg, _min, _max, keyPattern );
            massert( 10367 ,  errmsg, idx );
            _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(*idx), *_frsp, *_originalFrsp, _originalQuery, _order, _min, _max ) ) );
            return;
        }

        if ( isSimpleIdQuery( _originalQuery ) ) {
            int idx = d->findIdIndex();
            if ( idx >= 0 ) {
                _usingPrerecordedPlan = true;
                _mayRecordPlan = false;
                _plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_frsp , *_originalFrsp , _originalQuery, _order ) ) );
                return;
            }
        }

        if ( _originalQuery.isEmpty() && _order.isEmpty() ) {
            _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) ) );
            return;
        }

        DEBUGQO( "\t special : " << _frsp->getSpecial() );
        if ( _frsp->getSpecial().size() ) {
            _special = _frsp->getSpecial();
            NamespaceDetails::IndexIterator i = d->ii();
            while( i.more() ) {
                int j = i.pos();
                IndexDetails& ii = i.next();
                const IndexSpec& spec = ii.getSpec();
                if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ) {
                    _usingPrerecordedPlan = true;
                    _mayRecordPlan = false;
                    _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_frsp , *_originalFrsp , _originalQuery, _order ,
                                                    BSONObj() , BSONObj() , _special ) ) );
                    return;
                }
            }
            uassert( 13038 , (string)"can't find special index: " + _special + " for: " + _originalQuery.toString() , 0 );
        }

        if ( _honorRecordedPlan ) {
            pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( *_frsp, _order );
            BSONObj bestIndex = best.first;
            long long oldNScanned = best.second;
            if ( !bestIndex.isEmpty() ) {
                QueryPlanPtr p;
                _oldNScanned = oldNScanned;
                if ( !strcmp( bestIndex.firstElement().fieldName(), "$natural" ) ) {
                    // Table scan plan
                    p.reset( new QueryPlan( d, -1, *_frsp, *_originalFrsp, _originalQuery, _order ) );
                }

                NamespaceDetails::IndexIterator i = d->ii();
                while( i.more() ) {
                    int j = i.pos();
                    IndexDetails& ii = i.next();
                    if( ii.keyPattern().woCompare(bestIndex) == 0 ) {
                        p.reset( new QueryPlan( d, j, *_frsp, *_originalFrsp, _originalQuery, _order ) );
                    }
                }

                massert( 10368 ,  "Unable to locate previously recorded index", p.get() );
                if ( !( _bestGuessOnly && p->scanAndOrderRequired() ) ) {
                    _usingPrerecordedPlan = true;
                    _mayRecordPlan = false;
                    _plans.push_back( p );
                    return;
                }
            }
        }

        addOtherPlans( false );
    }
Beispiel #5
0
void FTSIndexFormat::getKeys(const FTSSpec& spec, const BSONObj& obj, BSONObjSet* keys) {
    int extraSize = 0;
    vector<BSONElement> extrasBefore;
    vector<BSONElement> extrasAfter;

    // compute the non FTS key elements
    for (unsigned i = 0; i < spec.numExtraBefore(); i++) {
        BSONElement e = dps::extractElementAtPath(obj, spec.extraBefore(i));
        if (e.eoo())
            e = nullElt;
        uassert(16675, "cannot have a multi-key as a prefix to a text index", e.type() != Array);
        extrasBefore.push_back(e);
        extraSize += e.size();
    }
    for (unsigned i = 0; i < spec.numExtraAfter(); i++) {
        BSONElement e = dps::extractElementAtPath(obj, spec.extraAfter(i));
        if (e.eoo())
            e = nullElt;
        extrasAfter.push_back(e);
        extraSize += e.size();
    }


    TermFrequencyMap term_freqs;
    spec.scoreDocument(obj, &term_freqs);

    // create index keys from raw scores
    // only 1 per string

    uassert(16732,
            mongoutils::str::stream() << "too many unique keys for a single document to"
                                      << " have a text index, max is "
                                      << term_freqs.size()
                                      << obj["_id"],
            term_freqs.size() <= 400000);

    long long keyBSONSize = 0;
    const int MaxKeyBSONSizeMB = 4;

    for (TermFrequencyMap::const_iterator i = term_freqs.begin(); i != term_freqs.end(); ++i) {
        const string& term = i->first;
        double weight = i->second;

        // guess the total size of the btree entry based on the size of the weight, term tuple
        int guess = 5 /* bson overhead */ + 10 /* weight */ + 8 /* term overhead */ +
            /* term size (could be truncated/hashed) */
            guessTermSize(term, spec.getTextIndexVersion()) + extraSize;

        BSONObjBuilder b(guess);  // builds a BSON object with guess length.
        for (unsigned k = 0; k < extrasBefore.size(); k++) {
            b.appendAs(extrasBefore[k], "");
        }
        _appendIndexKey(b, weight, term, spec.getTextIndexVersion());
        for (unsigned k = 0; k < extrasAfter.size(); k++) {
            b.appendAs(extrasAfter[k], "");
        }
        BSONObj res = b.obj();

        verify(guess >= res.objsize());

        keys->insert(res);
        keyBSONSize += res.objsize();

        uassert(16733,
                mongoutils::str::stream()
                    << "trying to index text where term list is too big, max is "
                    << MaxKeyBSONSizeMB
                    << "mb "
                    << obj["_id"],
                keyBSONSize <= (MaxKeyBSONSizeMB * 1024 * 1024));
    }
}
    Status V2PrivilegeDocumentParser::checkValidPrivilegeDocument(const StringData& dbname,
                                                                  const BSONObj& doc) const {
        BSONElement userElement = doc[AuthorizationManager::USER_NAME_FIELD_NAME];
        BSONElement userSourceElement = doc[AuthorizationManager::USER_SOURCE_FIELD_NAME];
        BSONElement credentialsElement = doc[CREDENTIALS_FIELD_NAME];
        BSONElement rolesElement = doc[ROLES_FIELD_NAME];
        BSONElement delegatableRolesElement = doc[DELEGATABLE_ROLES_FIELD_NAME];

        // Validate the "user" element.
        if (userElement.type() != String)
            return _badValue("User document needs 'user' field to be a string", 0);
        if (makeStringDataFromBSONElement(userElement).empty())
            return _badValue("User document needs 'user' field to be non-empty", 0);

        // Validate the "userSource" element
        if (userSourceElement.type() != String ||
                makeStringDataFromBSONElement(userSourceElement).empty()) {
            return _badValue("User document needs 'userSource' field to be a non-empty string", 0);
        }
        StringData userSourceStr = makeStringDataFromBSONElement(userSourceElement);
        if (!NamespaceString::validDBName(userSourceStr) && userSourceStr != "$external") {
            return _badValue(mongoutils::str::stream() << "'" << userSourceStr <<
                                     "' is not a valid value for the userSource field.",
                             0);
        }
        if (userSourceStr != dbname) {
            return _badValue(mongoutils::str::stream() << "userSource '" << userSourceStr <<
                                     "' does not match database '" << dbname << "'", 0);
        }

        // Validate the "credentials" element
        if (credentialsElement.eoo() && userSourceStr != "$external") {
            return _badValue("User document needs 'credentials' field unless userSource is "
                            "'$external'",
                    0);
        }
        if (!credentialsElement.eoo()) {
            if (credentialsElement.type() != Object) {
                return _badValue("User document needs 'credentials' field to be an object", 0);
            }

            BSONObj credentialsObj = credentialsElement.Obj();
            if (credentialsObj.isEmpty()) {
                return _badValue("User document needs 'credentials' field to be a non-empty object",
                                 0);
            }
            BSONElement MongoCRElement = credentialsObj[MONGODB_CR_CREDENTIAL_FIELD_NAME];
            if (!MongoCRElement.eoo() && (MongoCRElement.type() != String ||
                    makeStringDataFromBSONElement(MongoCRElement).empty())) {
                return _badValue("MONGODB-CR credential must to be a non-empty string, if present",
                                 0);
            }
        }

        // Validate the "roles" element.
        Status status = _checkV2RolesArray(rolesElement);
        if (!status.isOK())
            return status;

        // Validate the "delegatableRoles" element.
        status = _checkV2RolesArray(delegatableRolesElement);
        if (!status.isOK())
            return status;

        return Status::OK();
    }
bool BatchedCommandResponse::parseBSON(const BSONObj& source, string* errMsg) {
    clear();

    std::string dummy;
    if (!errMsg)
        errMsg = &dummy;

    _status = getStatusFromCommandResult(source);
    _isStatusSet = true;

    // We're using appendNumber on generation so we'll try a smaller type
    // (int) first and then fall back to the original type (long long).
    BSONField<int> fieldN(n());
    int tempN;
    auto fieldState = FieldParser::extract(source, fieldN, &tempN, errMsg);
    if (fieldState == FieldParser::FIELD_INVALID) {
        // try falling back to a larger type
        fieldState = FieldParser::extract(source, n, &_n, errMsg);
        if (fieldState == FieldParser::FIELD_INVALID)
            return false;
        _isNSet = fieldState == FieldParser::FIELD_SET;
    } else if (fieldState == FieldParser::FIELD_SET) {
        _isNSet = true;
        _n = tempN;
    }

    // We're using appendNumber on generation so we'll try a smaller type
    // (int) first and then fall back to the original type (long long).
    BSONField<int> fieldNModified(nModified());
    int intNModified;
    fieldState = FieldParser::extract(source, fieldNModified, &intNModified, errMsg);
    if (fieldState == FieldParser::FIELD_INVALID) {
        // try falling back to a larger type
        fieldState = FieldParser::extract(source, nModified, &_nModified, errMsg);
        if (fieldState == FieldParser::FIELD_INVALID)
            return false;
        _isNModifiedSet = fieldState == FieldParser::FIELD_SET;
    } else if (fieldState == FieldParser::FIELD_SET) {
        _isNModifiedSet = true;
        _nModified = intNModified;
    }

    std::vector<BatchedUpsertDetail*>* tempUpsertDetails = NULL;
    fieldState = FieldParser::extract(source, upsertDetails, &tempUpsertDetails, errMsg);
    if (fieldState == FieldParser::FIELD_INVALID)
        return false;
    _upsertDetails.reset(tempUpsertDetails);

    const BSONElement opTimeElement = source["opTime"];
    _isLastOpSet = true;
    if (opTimeElement.eoo()) {
        _isLastOpSet = false;
    } else if (opTimeElement.type() == bsonTimestamp) {
        _lastOp = repl::OpTime(opTimeElement.timestamp(), repl::OpTime::kUninitializedTerm);
    } else if (opTimeElement.type() == Date) {
        _lastOp = repl::OpTime(Timestamp(opTimeElement.date()), repl::OpTime::kUninitializedTerm);
    } else if (opTimeElement.type() == Object) {
        Status status = bsonExtractOpTimeField(source, "opTime", &_lastOp);
        if (!status.isOK()) {
            return false;
        }
    } else {
        return false;
    }

    fieldState = FieldParser::extract(source, electionId, &_electionId, errMsg);
    if (fieldState == FieldParser::FIELD_INVALID)
        return false;
    _isElectionIdSet = fieldState == FieldParser::FIELD_SET;

    std::vector<WriteErrorDetail*>* tempErrDetails = NULL;
    fieldState = FieldParser::extract(source, writeErrors, &tempErrDetails, errMsg);
    if (fieldState == FieldParser::FIELD_INVALID)
        return false;
    _writeErrorDetails.reset(tempErrDetails);

    WriteConcernErrorDetail* wcError = NULL;
    fieldState = FieldParser::extract(source, writeConcernError, &wcError, errMsg);
    if (fieldState == FieldParser::FIELD_INVALID)
        return false;
    _wcErrDetails.reset(wcError);

    return true;
}
Beispiel #8
0
        bool run(OperationContext* txn,
                 const string& dbname,
                 BSONObj& cmdObj,
                 int,
                 string& errmsg,
                 BSONObjBuilder& result) {
            if (!cmdObj["start"].eoo()) {
                errmsg = "using deprecated 'start' argument to geoNear";
                return false;
            }

            const NamespaceString nss(parseNs(dbname, cmdObj));
            AutoGetCollectionForRead ctx(txn, nss);

            Collection* collection = ctx.getCollection();
            if ( !collection ) {
                errmsg = "can't find ns";
                return false;
            }

            IndexCatalog* indexCatalog = collection->getIndexCatalog();

            // cout << "raw cmd " << cmdObj.toString() << endl;

            // We seek to populate this.
            string nearFieldName;
            bool using2DIndex = false;
            if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
                return false;
            }

            PointWithCRS point;
            uassert(17304, "'near' field must be point",
                    GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());

            bool isSpherical = cmdObj["spherical"].trueValue();
            if (!using2DIndex) {
                uassert(17301, "2dsphere index must have spherical: true", isSpherical);
            }

            // Build the $near expression for the query.
            BSONObjBuilder nearBob;
            if (isSpherical) {
                nearBob.append("$nearSphere", cmdObj["near"].Obj());
            }
            else {
                nearBob.append("$near", cmdObj["near"].Obj());
            }

            if (!cmdObj["maxDistance"].eoo()) {
                uassert(17299, "maxDistance must be a number",cmdObj["maxDistance"].isNumber());
                nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
            }

            if (!cmdObj["minDistance"].eoo()) {
                uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
                uassert(17300, "minDistance must be a number",cmdObj["minDistance"].isNumber());
                nearBob.append("$minDistance", cmdObj["minDistance"].number());
            }

            if (!cmdObj["uniqueDocs"].eoo()) {
                warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
            }

            // And, build the full query expression.
            BSONObjBuilder queryBob;
            queryBob.append(nearFieldName, nearBob.obj());
            if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
                queryBob.appendElements(cmdObj["query"].Obj());
            }
            BSONObj rewritten = queryBob.obj();

            // cout << "rewritten query: " << rewritten.toString() << endl;

            long long numWanted = 100;
            const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
            BSONElement eNumWanted = cmdObj[limitName];
            if (!eNumWanted.eoo()) {
                uassert(17303, "limit must be number", eNumWanted.isNumber());
                numWanted = eNumWanted.safeNumberLong();
                uassert(17302, "limit must be >=0", numWanted >= 0);
            }

            bool includeLocs = false;
            if (!cmdObj["includeLocs"].eoo()) {
                includeLocs = cmdObj["includeLocs"].trueValue();
            }

            double distanceMultiplier = 1.0;
            BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
            if (!eDistanceMultiplier.eoo()) {
                uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
                distanceMultiplier = eDistanceMultiplier.number();
                uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
            }

            BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) <<
                                   "$dis" << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

            CanonicalQuery* cq;
            const WhereCallbackReal whereCallback(txn, nss.db());

            if (!CanonicalQuery::canonicalize(nss,
                                              rewritten,
                                              BSONObj(),
                                              projObj,
                                              0,
                                              numWanted,
                                              BSONObj(),
                                              &cq,
                                              whereCallback).isOK()) {
                errmsg = "Can't parse filter / create query";
                return false;
            }

            // Prevent chunks from being cleaned up during yields - this allows us to only check the
            // version on initial entry into geoNear.
            RangePreserver preserver(collection);

            PlanExecutor* rawExec;
            if (!getExecutor(txn, collection, cq, PlanExecutor::YIELD_AUTO, &rawExec, 0).isOK()) {
                errmsg = "can't get query executor";
                return false;
            }

            scoped_ptr<PlanExecutor> exec(rawExec);

            double totalDistance = 0;
            BSONObjBuilder resultBuilder(result.subarrayStart("results"));
            double farthestDist = 0;

            BSONObj currObj;
            long long results = 0;
            while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {

                // Come up with the correct distance.
                double dist = currObj["$dis"].number() * distanceMultiplier;
                totalDistance += dist;
                if (dist > farthestDist) { farthestDist = dist; }

                // Strip out '$dis' and '$pt' from the result obj.  The rest gets added as 'obj'
                // in the command result.
                BSONObjIterator resIt(currObj);
                BSONObjBuilder resBob;
                while (resIt.more()) {
                    BSONElement elt = resIt.next();
                    if (!mongoutils::str::equals("$pt", elt.fieldName())
                        && !mongoutils::str::equals("$dis", elt.fieldName())) {
                        resBob.append(elt);
                    }
                }
                BSONObj resObj = resBob.obj();

                // Don't make a too-big result object.
                if (resultBuilder.len() + resObj.objsize()> BSONObjMaxUserSize) {
                    warning() << "Too many geoNear results for query " << rewritten.toString()
                              << ", truncating output.";
                    break;
                }

                // Add the next result to the result builder.
                BSONObjBuilder oneResultBuilder(
                    resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
                oneResultBuilder.append("dis", dist);
                if (includeLocs) {
                    oneResultBuilder.appendAs(currObj["$pt"], "loc");
                }
                oneResultBuilder.append("obj", resObj);
                oneResultBuilder.done();
                ++results;
            }

            resultBuilder.done();

            // Fill out the stats subobj.
            BSONObjBuilder stats(result.subobjStart("stats"));

            // Fill in nscanned from the explain.
            PlanSummaryStats summary;
            Explain::getSummaryStats(exec.get(), &summary);
            stats.appendNumber("nscanned", summary.totalKeysExamined);
            stats.appendNumber("objectsLoaded", summary.totalDocsExamined);

            stats.append("avgDistance", totalDistance / results);
            stats.append("maxDistance", farthestDist);
            stats.append("time", txn->getCurOp()->elapsedMillis());
            stats.done();

            return true;
        }
Beispiel #9
0
    // static
    QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query,
                                                         const QueryPlannerParams& params,
                                                         QuerySolutionNode* solnRoot,
                                                         bool* blockingSortOut) {
        *blockingSortOut = false;

        const BSONObj& sortObj = query.getParsed().getSort();

        if (sortObj.isEmpty()) {
            return solnRoot;
        }

        // TODO: We could check sortObj for any projections other than :1 and :-1
        // and short-cut some of this.

        // If the sort is $natural, we ignore it, assuming that the caller has detected that and
        // outputted a collscan to satisfy the desired order.
        BSONElement natural = sortObj.getFieldDotted("$natural");
        if (!natural.eoo()) {
            return solnRoot;
        }

        // See if solnRoot gives us the sort.  If so, we're done.
        BSONObjSet sorts = solnRoot->getSort();

        // If the sort we want is in the set of sort orders provided already, bail out.
        if (sorts.end() != sorts.find(sortObj)) {
            return solnRoot;
        }

        // Sort is not provided.  See if we provide the reverse of our sort pattern.
        // If so, we can reverse the scan direction(s).
        BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj);
        if (sorts.end() != sorts.find(reverseSort)) {
            QueryPlannerCommon::reverseScans(solnRoot);
            QLOG() << "Reversing ixscan to provide sort. Result: "
                   << solnRoot->toString() << endl;
            return solnRoot;
        }

        // Sort not provided, can't reverse scans to get the sort.  One last trick: We can "explode"
        // index scans over point intervals to an OR of sub-scans in order to pull out a sort.
        // Let's try this.
        if (explodeForSort(query, params, &solnRoot)) {
            return solnRoot;
        }

        // If we're here, we need to add a sort stage.

        // If we're not allowed to put a blocking sort in, bail out.
        if (params.options & QueryPlannerParams::NO_BLOCKING_SORT) {
            delete solnRoot;
            return NULL;
        }

        // Add a fetch stage so we have the full object when we hit the sort stage.  TODO: Can we
        // pull the values that we sort by out of the key and if so in what cases?  Perhaps we can
        // avoid a fetch.
        if (!solnRoot->fetched()) {
            FetchNode* fetch = new FetchNode();
            fetch->children.push_back(solnRoot);
            solnRoot = fetch;
        }

        // And build the full sort stage.
        SortNode* sort = new SortNode();
        sort->pattern = sortObj;
        sort->query = query.getParsed().getFilter();
        sort->children.push_back(solnRoot);
        solnRoot = sort;
        // When setting the limit on the sort, we need to consider both
        // the limit N and skip count M. The sort should return an ordered list
        // N + M items so that the skip stage can discard the first M results.
        if (0 != query.getParsed().getNumToReturn()) {
            // Overflow here would be bad and could cause a nonsense limit. Cast
            // skip and limit values to unsigned ints to make sure that the
            // sum is never stored as signed. (See SERVER-13537).
            sort->limit = size_t(query.getParsed().getNumToReturn()) +
                          size_t(query.getParsed().getSkip());

            // This is a SORT with a limit. The wire protocol has a single quantity
            // called "numToReturn" which could mean either limit or batchSize.
            // We have no idea what the client intended. One way to handle the ambiguity
            // of a limited OR stage is to use the SPLIT_LIMITED_SORT hack.
            //
            // If numToReturn is really a limit, then we want to add a limit to this
            // SORT stage, and hence perform a topK.
            //
            // If numToReturn is really a batchSize, then we want to perform a regular
            // blocking sort.
            //
            // Since we don't know which to use, just join the two options with an OR,
            // with the topK first. If the client wants a limit, they'll get the efficiency
            // of topK. If they want a batchSize, the other OR branch will deliver the missing
            // results. The OR stage handles deduping.
            if (params.options & QueryPlannerParams::SPLIT_LIMITED_SORT
                && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)
                && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO)
                && !QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
                // If we're here then the SPLIT_LIMITED_SORT hack is turned on,
                // and the query is of a type that allows the hack.
                //
                // Not allowed for geo or text, because we assume elsewhere that those
                // stages appear just once.
                OrNode* orn = new OrNode();
                orn->children.push_back(sort);
                SortNode* sortClone = static_cast<SortNode*>(sort->clone());
                sortClone->limit = 0;
                orn->children.push_back(sortClone);
                solnRoot = orn;
            }
        }
        else {
            sort->limit = 0;
        }

        *blockingSortOut = true;

        return solnRoot;
    }
/* ****************************************************************************
*
* addTriggeredSubscriptions
*
*/
static bool addTriggeredSubscriptions(ContextRegistration                  cr,
                                      map<string, TriggeredSubscription*>& subs,
                                      std::string&                         err,
                                      std::string                          tenant)
{
    DBClientBase* connection = NULL;

    BSONArrayBuilder entitiesNoPatternA;
    std::vector<std::string> idJsV;
    std::vector<std::string> typeJsV;

    for (unsigned int ix = 0; ix < cr.entityIdVector.size(); ++ix ) {
        //FIXME: take into account subscriptions with no type
        EntityId* enP = cr.entityIdVector.get(ix);
        /* The registration of isPattern=true entities is not supported, so we don't include them here */
        if (enP->isPattern == "false") {
            entitiesNoPatternA.append(BSON(CASUB_ENTITY_ID << enP->id << CASUB_ENTITY_TYPE << enP->type << CASUB_ENTITY_ISPATTERN << "false"));
            idJsV.push_back(enP->id);
            typeJsV.push_back(enP->type);
        }
    }
    BSONArrayBuilder attrA;
    for (unsigned int ix = 0; ix < cr.contextRegistrationAttributeVector.size(); ++ix) {
        ContextRegistrationAttribute* craP = cr.contextRegistrationAttributeVector.get(ix);
        attrA.append(craP->name);
    }

    BSONObjBuilder queryNoPattern;
    queryNoPattern.append(CASUB_ENTITIES, BSON("$in" << entitiesNoPatternA.arr()));
    if (attrA.arrSize() > 0) {
        /* If we don't do this checking, the {$in: [] } in the attribute name part will
         * make the query fail*/
        //queryB.append(CASUB_ATTRS, BSON("$in" << attrA.arr()));
        queryNoPattern.append("$or", BSON_ARRAY(
                          BSON(CASUB_ATTRS << BSON("$in" << attrA.arr())) <<
                          BSON(CASUB_ATTRS << BSON("$size" << 0))
                          ));
    }
    else {
        queryNoPattern.append(CASUB_ATTRS, BSON("$size" << 0));
    }
    queryNoPattern.append(CASUB_EXPIRATION, BSON("$gt" << (long long) getCurrentTime()));

    /* This is JavaScript code that runs in MongoDB engine. As far as I know, this is the only
     * way to do a "reverse regex" query in MongoDB (see
     * http://stackoverflow.com/questions/15966991/mongodb-reverse-regex/15989520).
     * Note that although we are using a isPattern=true in the MongoDB query besides $where, we
     * also need to check that in the if statement in the JavaScript function given that a given
     * sub document could include both isPattern=true and isPattern=false documents */
    std::string idJsString = "[ ";
    for (unsigned int ix = 0; ix < idJsV.size(); ++ix ) {
        if (ix != idJsV.size()-1) {
            idJsString += "\""+idJsV[ix]+ "\" ,";
        }
        else {
            idJsString += "\"" +idJsV[ix]+ "\"";
        }
    }
    idJsString += " ]";

    std::string typeJsString = "[ ";
    for (unsigned int ix = 0; ix < typeJsV.size(); ++ix ) {
        if (ix != typeJsV.size()-1) {
            typeJsString += "\"" +typeJsV[ix] + "\" ,";
        }
        else {
            typeJsString += "\"" + typeJsV[ix] + "\"";
        }
    }
    typeJsString += " ]";

    std::string function = std::string("function()") +
         "{" +
            "enId = "+idJsString+ ";" +
            "enType = "+typeJsString+ ";" +
            "for (var i=0; i < this."+CASUB_ENTITIES+".length; i++) {" +
                "if (this."+CASUB_ENTITIES+"[i]."+CASUB_ENTITY_ISPATTERN+" == \"true\") {" +
                    "for (var j=0; j < enId.length; j++) {" +
                       "if (enId[j].match(this."+CASUB_ENTITIES+"[i]."+CASUB_ENTITY_ID+") && this."+CASUB_ENTITIES+"[i]."+CASUB_ENTITY_TYPE+" == enType[j]) {" +
                          "return true; " +
                       "}" +
                    "}" +
                "}" +
            "}" +
            "return false; " +
         "}";
    LM_T(LmtMongo, ("JS function: %s", function.c_str()));

    std::string entPatternQ = CSUB_ENTITIES "." CSUB_ENTITY_ISPATTERN;

    BSONObjBuilder queryPattern;
    queryPattern.append(entPatternQ, "true");
    queryPattern.append(CASUB_EXPIRATION, BSON("$gt" << (long long) getCurrentTime()));
    queryPattern.appendCode("$where", function);

    BSONObj query = BSON("$or" << BSON_ARRAY(queryNoPattern.obj() << queryPattern.obj()));

    /* Do the query */
    auto_ptr<DBClientCursor> cursor;
    LM_T(LmtMongo, ("query() in '%s' collection: '%s'", getSubscribeContextAvailabilityCollectionName(tenant).c_str(), query.toString().c_str()));
    try
    {
        connection = getMongoConnection();
        cursor = connection->query(getSubscribeContextAvailabilityCollectionName(tenant).c_str(), query);

        /*
         * We have observed that in some cases of DB errors (e.g. the database daemon is down) instead of
         * raising an exception, the query() method sets the cursor to NULL. In this case, we raise the
         * exception ourselves
         */
        if (cursor.get() == NULL)
        {
            throw DBException("Null cursor from mongo (details on this is found in the source code)", 0);
        }

        releaseMongoConnection(connection);

        LM_I(("Database Operation Successful (%s)", query.toString().c_str()));
    }
    catch (const DBException &e)
    {
        releaseMongoConnection(connection);

        err = std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
               " - query(): " + query.toString() +
               " - exception: " + e.what();
        LM_E(("Database Error (%s)", err.c_str()));

        return false;
    }
    catch (...)
    {
        releaseMongoConnection(connection);

        err = std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
               " - query(): " + query.toString() +
               " - exception: " + "generic";
        LM_E(("Database Error (%s)", err.c_str()));
        return false;
    }

    /* For each one of the subscriptions found, add it to the map (if not already there) */
    while (cursor->more())
    {
        BSONObj     sub     = cursor->next();
        BSONElement idField = sub.getField("_id");

        //
        // BSONElement::eoo returns true if 'not found', i.e. the field "_id" doesn't exist in 'sub'
        //
        // Now, if 'sub.getField("_id")' is not found, if we continue, calling OID() on it, then we get
        // an exception and the broker crashes.
        //
        if (idField.eoo() == true)
        {
          LM_E(("Database Error (error retrieving _id field in doc: %s)", sub.toString().c_str()));
          continue;
        }

        std::string subIdStr = idField.OID().toString();

        if (subs.count(subIdStr) == 0) {
            LM_T(LmtMongo, ("adding subscription: '%s'", sub.toString().c_str()));

            TriggeredSubscription* trigs = new TriggeredSubscription(sub.hasField(CASUB_FORMAT) ? stringToFormat(STR_FIELD(sub, CASUB_FORMAT)) : XML,
                                                                     STR_FIELD(sub, CASUB_REFERENCE),
                                                                     subToAttributeList(sub));

            subs.insert(std::pair<string, TriggeredSubscription*>(subIdStr, trigs));
        }
    }

    return true;
}
Status AuthzManagerExternalStateMongos::getUserDescription(OperationContext* opCtx,
                                                           const UserName& userName,
                                                           BSONObj* result) {
    if (!shouldUseRolesFromConnection(opCtx, userName)) {
        BSONObj usersInfoCmd =
            BSON("usersInfo" << BSON_ARRAY(BSON(AuthorizationManager::USER_NAME_FIELD_NAME
                                                << userName.getUser()
                                                << AuthorizationManager::USER_DB_FIELD_NAME
                                                << userName.getDB()))
                             << "showPrivileges"
                             << true
                             << "showCredentials"
                             << true
                             << "showAuthenticationRestrictions"
                             << true);
        BSONObjBuilder builder;
        const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
            opCtx, "admin", usersInfoCmd, &builder);
        BSONObj cmdResult = builder.obj();
        if (!ok) {
            return getStatusFromCommandResult(cmdResult);
        }

        std::vector<BSONElement> foundUsers = cmdResult["users"].Array();
        if (foundUsers.size() == 0) {
            return Status(ErrorCodes::UserNotFound,
                          "User \"" + userName.toString() + "\" not found");
        }

        if (foundUsers.size() > 1) {
            return Status(ErrorCodes::UserDataInconsistent,
                          str::stream() << "Found multiple users on the \"" << userName.getDB()
                                        << "\" database with name \""
                                        << userName.getUser()
                                        << "\"");
        }
        *result = foundUsers[0].Obj().getOwned();
        return Status::OK();
    } else {
        // Obtain privilege information from the config servers for all roles acquired from the X509
        // certificate.
        BSONArrayBuilder userRolesBuilder;
        auto& sslPeerInfo = SSLPeerInfo::forSession(opCtx->getClient()->session());
        for (const RoleName& role : sslPeerInfo.roles) {
            userRolesBuilder.append(BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME
                                         << role.getRole()
                                         << AuthorizationManager::ROLE_DB_FIELD_NAME
                                         << role.getDB()));
        }
        BSONArray providedRoles = userRolesBuilder.arr();

        BSONObj rolesInfoCmd = BSON("rolesInfo" << providedRoles << "showPrivileges"
                                                << "asUserFragment");

        BSONObjBuilder cmdResultBuilder;
        const bool cmdOk = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand(
            opCtx, "admin", rolesInfoCmd, &cmdResultBuilder);
        BSONObj cmdResult = cmdResultBuilder.obj();
        if (!cmdOk || !cmdResult["userFragment"].ok()) {
            return Status(ErrorCodes::FailedToParse,
                          "Unable to get resolved X509 roles from config server: " +
                              getStatusFromCommandResult(cmdResult).toString());
        }
        cmdResult = cmdResult["userFragment"].Obj().getOwned();
        BSONElement userRoles = cmdResult["roles"];
        BSONElement userInheritedRoles = cmdResult["inheritedRoles"];
        BSONElement userInheritedPrivileges = cmdResult["inheritedPrivileges"];

        if (userRoles.eoo() || userInheritedRoles.eoo() || userInheritedPrivileges.eoo() ||
            !userRoles.isABSONObj() || !userInheritedRoles.isABSONObj() ||
            !userInheritedPrivileges.isABSONObj()) {
            return Status(
                ErrorCodes::UserDataInconsistent,
                "Recieved malformed response to request for X509 roles from config server");
        }

        *result = BSON("_id" << userName.getUser() << "user" << userName.getUser() << "db"
                             << userName.getDB()
                             << "credentials"
                             << BSON("external" << true)
                             << "roles"
                             << BSONArray(cmdResult["roles"].Obj())
                             << "inheritedRoles"
                             << BSONArray(cmdResult["inheritedRoles"].Obj())
                             << "inheritedPrivileges"
                             << BSONArray(cmdResult["inheritedPrivileges"].Obj()));
        return Status::OK();
    }
}
Beispiel #12
0
Status MemberConfig::initialize(const BSONObj& mcfg, ReplicaSetTagConfig* tagConfig) {
    Status status = bsonCheckOnlyHasFields(
        "replica set member configuration", mcfg, kLegalMemberConfigFieldNames);
    if (!status.isOK())
        return status;

    //
    // Parse _id field.
    //
    BSONElement idElement = mcfg[kIdFieldName];
    if (idElement.eoo()) {
        return Status(ErrorCodes::NoSuchKey, str::stream() << kIdFieldName << " field is missing");
    }
    if (!idElement.isNumber()) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kIdFieldName << " field has non-numeric type "
                                    << typeName(idElement.type()));
    }
    _id = idElement.numberInt();

    //
    // Parse h field.
    //
    std::string hostAndPortString;
    status = bsonExtractStringField(mcfg, kHostFieldName, &hostAndPortString);
    if (!status.isOK())
        return status;
    boost::trim(hostAndPortString);
    status = _host.initialize(hostAndPortString);
    if (!status.isOK())
        return status;
    if (!_host.hasPort()) {
        // make port explicit even if default.
        _host = HostAndPort(_host.host(), _host.port());
    }

    //
    // Parse votes field.
    //
    BSONElement votesElement = mcfg[kVotesFieldName];
    if (votesElement.eoo()) {
        _votes = kVotesFieldDefault;
    } else if (votesElement.isNumber()) {
        _votes = votesElement.numberInt();
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kVotesFieldName << " field value has non-numeric type "
                                    << typeName(votesElement.type()));
    }

    //
    // Parse priority field.
    //
    BSONElement priorityElement = mcfg[kPriorityFieldName];
    if (priorityElement.eoo()) {
        _priority = kPriorityFieldDefault;
    } else if (priorityElement.isNumber()) {
        _priority = priorityElement.numberDouble();
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kPriorityFieldName << " field has non-numeric type "
                                    << typeName(priorityElement.type()));
    }

    //
    // Parse arbiterOnly field.
    //
    status = bsonExtractBooleanFieldWithDefault(
        mcfg, kArbiterOnlyFieldName, kArbiterOnlyFieldDefault, &_arbiterOnly);
    if (!status.isOK())
        return status;

    //
    // Parse slaveDelay field.
    //
    BSONElement slaveDelayElement = mcfg[kSlaveDelayFieldName];
    if (slaveDelayElement.eoo()) {
        _slaveDelay = kSlaveDelayFieldDefault;
    } else if (slaveDelayElement.isNumber()) {
        _slaveDelay = Seconds(slaveDelayElement.numberInt());
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << kSlaveDelayFieldName << " field value has non-numeric type "
                                    << typeName(slaveDelayElement.type()));
    }

    //
    // Parse hidden field.
    //
    status =
        bsonExtractBooleanFieldWithDefault(mcfg, kHiddenFieldName, kHiddenFieldDefault, &_hidden);
    if (!status.isOK())
        return status;

    //
    // Parse buildIndexes field.
    //
    status = bsonExtractBooleanFieldWithDefault(
        mcfg, kBuildIndexesFieldName, kBuildIndexesFieldDefault, &_buildIndexes);
    if (!status.isOK())
        return status;

    //
    // Parse "tags" field.
    //
    _tags.clear();
    BSONElement tagsElement;
    status = bsonExtractTypedField(mcfg, kTagsFieldName, Object, &tagsElement);
    if (status.isOK()) {
        for (BSONObj::iterator tagIter(tagsElement.Obj()); tagIter.more();) {
            const BSONElement& tag = tagIter.next();
            if (tag.type() != String) {
                return Status(ErrorCodes::TypeMismatch,
                              str::stream() << "tags." << tag.fieldName()
                                            << " field has non-string value of type "
                                            << typeName(tag.type()));
            }
            _tags.push_back(tagConfig->makeTag(tag.fieldNameStringData(), tag.valueStringData()));
        }
    } else if (ErrorCodes::NoSuchKey != status) {
        return status;
    }

    //
    // Add internal tags based on other member properties.
    //

    // Add a voter tag if this non-arbiter member votes; use _id for uniquity.
    const std::string id = str::stream() << _id;
    if (isVoter() && !_arbiterOnly) {
        _tags.push_back(tagConfig->makeTag(kInternalVoterTagName, id));
    }

    // Add an electable tag if this member is electable.
    if (isElectable()) {
        _tags.push_back(tagConfig->makeTag(kInternalElectableTagName, id));
    }

    // Add a tag for generic counting of this node.
    if (!_arbiterOnly) {
        _tags.push_back(tagConfig->makeTag(kInternalAllTagName, id));
    }

    return Status::OK();
}
Status WriteConcernOptions::parse(const BSONObj& obj) {
    reset();
    if (obj.isEmpty()) {
        return Status(ErrorCodes::FailedToParse, "write concern object cannot be empty");
    }

    BSONElement jEl;
    BSONElement fsyncEl;
    BSONElement wEl;


    for (auto e : obj) {
        const auto fieldName = e.fieldNameStringData();
        if (fieldName == kJFieldName) {
            jEl = e;
            if (!jEl.isNumber() && jEl.type() != Bool) {
                return Status(ErrorCodes::FailedToParse, "j must be numeric or a boolean value");
            }
        } else if (fieldName == kFSyncFieldName) {
            fsyncEl = e;
            if (!fsyncEl.isNumber() && fsyncEl.type() != Bool) {
                return Status(ErrorCodes::FailedToParse,
                              "fsync must be numeric or a boolean value");
            }
        } else if (fieldName == kWFieldName) {
            wEl = e;
        } else if (fieldName == kWTimeoutFieldName) {
            wTimeout = e.numberInt();
        } else if (fieldName == kWElectionIdFieldName) {
            // Ignore.
        } else if (fieldName == kWOpTimeFieldName) {
            // Ignore.
        } else if (fieldName.equalCaseInsensitive(kGetLastErrorFieldName)) {
            // Ignore GLE field.
        } else {
            return Status(ErrorCodes::FailedToParse,
                          str::stream() << "unrecognized write concern field: " << fieldName);
        }
    }

    const bool j = jEl.trueValue();
    const bool fsync = fsyncEl.trueValue();

    if (j && fsync)
        return Status(ErrorCodes::FailedToParse, "fsync and j options cannot be used together");

    if (j) {
        syncMode = SyncMode::JOURNAL;
    } else if (fsync) {
        syncMode = SyncMode::FSYNC;
    } else if (!jEl.eoo()) {
        syncMode = SyncMode::NONE;
    }

    if (wEl.isNumber()) {
        wNumNodes = wEl.numberInt();
    } else if (wEl.type() == String) {
        wMode = wEl.valuestrsafe();
    } else if (wEl.eoo() || wEl.type() == jstNULL || wEl.type() == Undefined) {
        wNumNodes = 1;
    } else {
        return Status(ErrorCodes::FailedToParse, "w has to be a number or a string");
    }

    return Status::OK();
}
Beispiel #14
0
INT32 _fmpController::_handleOneLoop( const BSONObj &obj,
                                      INT32 step )
{
   INT32 rc = SDB_OK ;
   BSONObj res ;

   if ( FMP_CONTROL_STEP_BEGIN == step )
   {
      UINT32 seqID = 1 ;
      BSONElement beSeq = obj.getField( FMP_SEQ_ID ) ;
      if ( beSeq.isNumber() )
      {
         seqID = (UINT32)beSeq.numberInt() ;
      }
      BSONElement diag = obj.getField( FMP_DIAG_PATH ) ;
      if ( !diag.eoo() && String == diag.type() )
      {
         CHAR diaglogShort[ OSS_MAX_PATHSIZE + 1 ] = { 0 } ;
         ossSnprintf( diaglogShort, OSS_MAX_PATHSIZE, "%s_%u.%s",
                      PD_FMP_DIAGLOG_PREFIX, seqID, PD_FMP_DIAGLOG_SUBFIX ) ;

         CHAR diaglog[ OSS_MAX_PATHSIZE + 1 ] = {0} ;
         engine::utilBuildFullPath( diag.valuestrsafe(), diaglogShort,
                                    OSS_MAX_PATHSIZE, diaglog ) ;
         sdbEnablePD( diaglog ) ;
      }
      BSONElement localService = obj.getField( FMP_LOCAL_SERVICE ) ;
      if ( !localService.eoo() && String == localService.type() &&
           0 == ossStrlen(FMP_COORD_SERVICE) )
      {
         ossMemcpy( FMP_COORD_SERVICE, localService.valuestrsafe(),
                    ossStrlen( localService.valuestrsafe() ) + 1 ) ;
      }
      BSONElement localUser = obj.getField( FMP_LOCAL_USERNAME ) ;
      if ( String == localUser.type() )
      {
         ossStrncpy( g_UserName, localUser.valuestrsafe(),
                     OSS_MAX_PATHSIZE ) ;
      }
      BSONElement localPass = obj.getField( FMP_LOCAL_PASSWORD ) ;
      if ( String == localPass.type() )
      {
         ossStrncpy( g_Password, localPass.valuestrsafe(),
                     OSS_MAX_PATHSIZE ) ;
      }
      BSONElement fType = obj.getField( FMP_FUNC_TYPE ) ;
      if ( fType.eoo() )
      {
         rc = _createVM( FMP_FUNC_TYPE_JS ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to create vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to create vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }

         rc = _vm->init( obj ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to init vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to init vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }
      }
      else if ( NumberInt != fType.type() )
      {
         PD_LOG( PDERROR, "invalid type of func type:%s",
                 fType.toString().c_str() ) ;
         rc = SDB_SYS ;
         res = BSON( FMP_ERR_MSG << "invalid type of func type" <<
                     FMP_RES_CODE << SDB_SYS ) ;
         goto error ;
      }
      else
      {
         rc = _createVM( fType.Int() ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to create vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to create vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }

         rc = _vm->init( obj ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG(PDERROR, "failed to init vm:%d", rc ) ;
            res = BSON( FMP_ERR_MSG << "failed to init vm" <<
                        FMP_RES_CODE << rc ) ;
            goto error ;
         }
      }
   }
   else if ( FMP_CONTROL_STEP_DOWNLOAD == step )
   {
      SDB_ASSERT( NULL != _vm, "impossible" ) ;
      rc = _vm->eval( obj, res ) ;
      if ( SDB_OK  != rc )
      {
         PD_LOG( PDERROR, "failed to pre eval func:%s, rc:%d",
                 obj.toString(FALSE, TRUE).c_str(), rc ) ;
         if ( res.isEmpty() )
         {
            res = BSON( FMP_ERR_MSG << "failed to pre eval func" <<
                        FMP_RES_CODE << rc ) ;
         }
         goto error ;
      }
   }
   else if ( FMP_CONTROL_STEP_EVAL == step )
   {
      rc = _vm->initGlobalDB( res ) ;
      if ( rc )
      {
         PD_LOG( PDWARNING, "Failed to init global db: %s",
                 res.toString( FALSE, TRUE ).c_str() ) ;
      }

      rc = _vm->eval( obj, res ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to eval func:%s, rc:%d",
                 obj.toString(FALSE, TRUE).c_str(), rc ) ;
         if ( res.isEmpty() )
         {
            res = BSON( FMP_ERR_MSG << "failed to eval func" <<
                        FMP_RES_CODE << rc ) ;
         }
         goto error ;
      }
   }
   else if ( FMP_CONTROL_STEP_FETCH == step )
   {
      BSONObj next ;
      rc = _vm->fetch( next ) ;
      if ( !next.isEmpty() )
      {
         res = next ;
      }
      else
      {
         PD_LOG( PDERROR, "a empty obj was fetched out" ) ;
         rc = SDB_SYS ;
         res = BSON( FMP_ERR_MSG << "a empty obj was fetched out" <<
                     FMP_RES_CODE << rc ) ;
         goto error ;
      }

      if ( SDB_DMS_EOC == rc )
      {
         _clear() ;
      }
      else if ( SDB_OK != rc )
      {
         goto error ;
      }
   }
   else
   {
      SDB_ASSERT( FALSE, "impossible" ) ;
   }

done:
   {
   INT32 rrc = SDB_OK ;
   if ( !res.isEmpty() )
   {
      rrc = _writeMsg( res ) ;
   }
   else
   {
      rrc = _writeMsg( BSON( FMP_RES_CODE << rc ) ) ;
   }
   if ( SDB_OK != rrc )
   {
      rc = rrc ;
      PD_LOG( PDERROR, "failed to write msg:%d", rc ) ;
   }
   }
   return rc ;
error:
   goto done ;
}
Beispiel #15
0
        bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {

            if ( !globalScriptEngine ) {
                errmsg = "server-side JavaScript execution is disabled";
                return false;
            }
            
            /* db.$cmd.findOne( { group : <p> } ) */
            const BSONObj& p = jsobj.firstElement().embeddedObjectUserCheck();

            BSONObj q;
            if ( p["cond"].type() == Object )
                q = p["cond"].embeddedObject();
            else if ( p["condition"].type() == Object )
                q = p["condition"].embeddedObject();
            else
                q = getQuery( p );

            if ( p["ns"].type() != String ) {
                errmsg = "ns has to be set";
                return false;
            }

            string ns = dbname + "." + p["ns"].String();

            BSONObj key;
            string keyf;
            if ( p["key"].type() == Object ) {
                key = p["key"].embeddedObjectUserCheck();
                if ( ! p["$keyf"].eoo() ) {
                    errmsg = "can't have key and $keyf";
                    return false;
                }
            }
            else if ( p["$keyf"].type() ) {
                keyf = p["$keyf"]._asCode();
            }
            else {
                // no key specified, will use entire object as key
            }

            BSONElement reduce = p["$reduce"];
            if ( reduce.eoo() ) {
                errmsg = "$reduce has to be set";
                return false;
            }

            BSONElement initial = p["initial"];
            if ( initial.type() != Object ) {
                errmsg = "initial has to be an object";
                return false;
            }


            string finalize;
            if (p["finalize"].type())
                finalize = p["finalize"]._asCode();

            return group( dbname , ns , q ,
                          key , keyf , reduce._asCode() , reduce.type() != CodeWScope ? 0 : reduce.codeWScopeScopeDataUnsafe() ,
                          initial.embeddedObject() , finalize ,
                          errmsg , result );
        }
Beispiel #16
0
Status ModifierAddToSet::init(const BSONElement& modExpr, const Options& opts, bool* positional) {
    // Perform standard field name and updateable checks.
    _fieldRef.parse(modExpr.fieldName());
    Status status = fieldchecker::isUpdatable(_fieldRef);
    if (!status.isOK()) {
        return status;
    }

    // If a $-positional operator was used, get the index in which it occurred
    // and ensure only one occurrence.
    size_t foundCount;
    bool foundDollar = fieldchecker::isPositional(_fieldRef, &_posDollar, &foundCount);

    if (positional)
        *positional = foundDollar;

    if (foundDollar && foundCount > 1) {
        return Status(ErrorCodes::BadValue,
                      str::stream() << "Too many positional (i.e. '$') elements found in path '"
                                    << _fieldRef.dottedField()
                                    << "'");
    }

    // TODO: The driver could potentially do this re-writing.

    // If the type of the value is 'Object', we might be dealing with a $each. See if that
    // is the case.
    if (modExpr.type() == mongo::Object) {
        BSONElement modExprObjPayload = modExpr.embeddedObject().firstElement();
        if (!modExprObjPayload.eoo() && StringData(modExprObjPayload.fieldName()) == "$each") {
            // It is a $each. Verify that the payload is an array as is required for $each,
            // set our flag, and store the array as our value.
            if (modExprObjPayload.type() != mongo::Array) {
                return Status(ErrorCodes::BadValue,
                              str::stream() << "The argument to $each in $addToSet must "
                                               "be an array but it was of type "
                                            << typeName(modExprObjPayload.type()));
            }

            status = _valDoc.root().appendElement(modExprObjPayload);
            if (!status.isOK())
                return status;

            _val = _valDoc.root().leftChild();
        }
    }

    // If this wasn't an 'each', turn it into one. No need to sort or de-dup since we only
    // have one element.
    if (_val == _valDoc.end()) {
        mb::Element each = _valDoc.makeElementArray("$each");

        status = each.appendElement(modExpr);
        if (!status.isOK())
            return status;

        status = _valDoc.root().pushBack(each);
        if (!status.isOK())
            return status;

        _val = each;
    }

    // Check if no invalid data (such as fields with '$'s) are being used in the $each
    // clause.
    mb::ConstElement valCursor = _val.leftChild();
    while (valCursor.ok()) {
        const BSONType type = valCursor.getType();
        dassert(valCursor.hasValue());
        switch (type) {
            case mongo::Object: {
                Status s = valCursor.getValueObject().storageValidEmbedded();
                if (!s.isOK())
                    return s;

                break;
            }
            case mongo::Array: {
                Status s = valCursor.getValueArray().storageValidEmbedded();
                if (!s.isOK())
                    return s;

                break;
            }
            default:
                break;
        }

        valCursor = valCursor.rightSibling();
    }

    setCollator(opts.collator);
    return Status::OK();
}
    Status V1PrivilegeDocumentParser::checkValidPrivilegeDocument(const StringData& dbname,
                                                                  const BSONObj& doc) const {
        BSONElement userElement = doc[AuthorizationManager::USER_NAME_FIELD_NAME];
        BSONElement userSourceElement = doc[AuthorizationManager::USER_SOURCE_FIELD_NAME];
        BSONElement passwordElement = doc[AuthorizationManager::PASSWORD_FIELD_NAME];
        BSONElement rolesElement = doc[ROLES_FIELD_NAME];
        BSONElement otherDBRolesElement = doc[OTHER_DB_ROLES_FIELD_NAME];
        BSONElement readOnlyElement = doc[READONLY_FIELD_NAME];

        // Validate the "user" element.
        if (userElement.type() != String)
            return _badValue("system.users entry needs 'user' field to be a string", 14051);
        if (makeStringDataFromBSONElement(userElement).empty())
            return _badValue("system.users entry needs 'user' field to be non-empty", 14053);

        // Must set exactly one of "userSource" and "pwd" fields.
        if (userSourceElement.eoo() == passwordElement.eoo()) {
            return _badValue("system.users entry must have either a 'pwd' field or a 'userSource' "
                             "field, but not both", 0);
        }

        if (!AuthorizationManager::getSupportOldStylePrivilegeDocuments() && rolesElement.eoo()) {
            return _oldPrivilegeFormatNotSupported();
        }

        // Cannot have both "roles" and "readOnly" elements.
        if (!rolesElement.eoo() && !readOnlyElement.eoo()) {
            return _badValue("system.users entry must not have both 'roles' and 'readOnly' fields",
                             0);
        }

        // Validate the "pwd" element, if present.
        if (!passwordElement.eoo()) {
            if (passwordElement.type() != String)
                return _badValue("system.users entry needs 'pwd' field to be a string", 14052);
            if (makeStringDataFromBSONElement(passwordElement).empty())
                return _badValue("system.users entry needs 'pwd' field to be non-empty", 14054);
        }

        // Validate the "userSource" element, if present.
        if (!userSourceElement.eoo()) {
            if (userSourceElement.type() != String ||
                makeStringDataFromBSONElement(userSourceElement).empty()) {

                return _badValue("system.users entry needs 'userSource' field to be a non-empty "
                                 "string, if present", 0);
            }
            if (userSourceElement.str() == dbname) {
                return _badValue(mongoutils::str::stream() << "'" << dbname <<
                                 "' is not a valid value for the userSource field in " <<
                                 dbname << ".system.users entries",
                                 0);
            }
            if (rolesElement.eoo()) {
                return _badValue("system.users entry needs 'roles' field if 'userSource' field "
                                 "is present.", 0);
            }
        }

        // Validate the "roles" element.
        if (!rolesElement.eoo()) {
            Status status = _checkV1RolesArray(rolesElement);
            if (!status.isOK())
                return status;
        }

        if (!otherDBRolesElement.eoo()) {
            if (dbname != ADMIN_DBNAME) {
                return _badValue("Only admin.system.users entries may contain 'otherDBRoles' "
                                 "fields", 0);
            }
            if (rolesElement.eoo()) {
                return _badValue("system.users entries with 'otherDBRoles' fields must contain "
                                 "'roles' fields", 0);
            }
            if (otherDBRolesElement.type() != Object) {
                return _badValue("'otherDBRoles' field must be an object when present in "
                                 "system.users entries", 0);
            }
            for (BSONObjIterator iter(otherDBRolesElement.embeddedObject());
                 iter.more(); iter.next()) {

                Status status = _checkV1RolesArray(*iter);
                if (!status.isOK())
                    return status;
            }
        }

        return Status::OK();
    }
Status ReplSetHeartbeatResponse::initialize(const BSONObj& doc, long long term) {
    // Old versions set this even though they returned not "ok"
    _mismatch = doc[kMismatchFieldName].trueValue();
    if (_mismatch)
        return Status(ErrorCodes::InconsistentReplicaSetNames, "replica set name doesn't match.");

    // Old versions sometimes set the replica set name ("set") but ok:0
    const BSONElement replSetNameElement = doc[kReplSetFieldName];
    if (replSetNameElement.eoo()) {
        _setName.clear();
    } else if (replSetNameElement.type() != String) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kReplSetFieldName
                                    << "\" field in response to replSetHeartbeat to have "
                                       "type String, but found "
                                    << typeName(replSetNameElement.type()));
    } else {
        _setName = replSetNameElement.String();
    }

    if (_setName.empty() && !doc[kOkFieldName].trueValue()) {
        std::string errMsg = doc[kErrMsgFieldName].str();

        BSONElement errCodeElem = doc[kErrorCodeFieldName];
        if (errCodeElem.ok()) {
            if (!errCodeElem.isNumber())
                return Status(ErrorCodes::BadValue, "Error code is not a number!");

            int errorCode = errCodeElem.numberInt();
            return Status(ErrorCodes::Error(errorCode), errMsg);
        }
        return Status(ErrorCodes::UnknownError, errMsg);
    }

    const BSONElement hasDataElement = doc[kHasDataFieldName];
    _hasDataSet = !hasDataElement.eoo();
    _hasData = hasDataElement.trueValue();

    const BSONElement electionTimeElement = doc[kElectionTimeFieldName];
    if (electionTimeElement.eoo()) {
        _electionTimeSet = false;
    } else if (electionTimeElement.type() == bsonTimestamp) {
        _electionTimeSet = true;
        _electionTime = electionTimeElement.timestamp();
    } else if (electionTimeElement.type() == Date) {
        _electionTimeSet = true;
        _electionTime = Timestamp(electionTimeElement.date());
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kElectionTimeFieldName
                                    << "\" field in response to replSetHeartbeat "
                                       "command to have type Date or Timestamp, but found type "
                                    << typeName(electionTimeElement.type()));
    }

    const BSONElement timeElement = doc[kTimeFieldName];
    if (timeElement.eoo()) {
        _timeSet = false;
    } else if (timeElement.isNumber()) {
        _timeSet = true;
        _time = Seconds(timeElement.numberLong());
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kTimeFieldName
                                    << "\" field in response to replSetHeartbeat "
                                       "command to have a numeric type, but found type "
                                    << typeName(timeElement.type()));
    }

    _isReplSet = doc[kIsReplSetFieldName].trueValue();

    Status termStatus = bsonExtractIntegerField(doc, kTermFieldName, &_term);
    if (!termStatus.isOK() && termStatus != ErrorCodes::NoSuchKey) {
        return termStatus;
    }

    Status status = bsonExtractOpTimeField(doc, kDurableOpTimeFieldName, &_durableOpTime);
    if (!status.isOK()) {
        if (status != ErrorCodes::NoSuchKey) {
            return status;
        }
    } else {
        _durableOpTimeSet = true;
    }

    // In order to support both the 3.0(V0) and 3.2(V1) heartbeats we must parse the OpTime
    // field based on its type. If it is a Date, we parse it as the timestamp and use
    // initialize's term argument to complete the OpTime type. If it is an Object, then it's
    // V1 and we construct an OpTime out of its nested fields.
    const BSONElement appliedOpTimeElement = doc[kAppliedOpTimeFieldName];
    if (appliedOpTimeElement.eoo()) {
        _appliedOpTimeSet = false;
    } else if (appliedOpTimeElement.type() == bsonTimestamp) {
        _appliedOpTimeSet = true;
        _appliedOpTime = OpTime(appliedOpTimeElement.timestamp(), term);
    } else if (appliedOpTimeElement.type() == Date) {
        _appliedOpTimeSet = true;
        _appliedOpTime = OpTime(Timestamp(appliedOpTimeElement.date()), term);
    } else if (appliedOpTimeElement.type() == Object) {
        Status status = bsonExtractOpTimeField(doc, kAppliedOpTimeFieldName, &_appliedOpTime);
        _appliedOpTimeSet = true;
        // since a v1 OpTime was in the response, the member must be part of a replset
        _isReplSet = true;
    } else {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kAppliedOpTimeFieldName
                                    << "\" field in response to replSetHeartbeat "
                                       "command to have type Date or Timestamp, but found type "
                                    << typeName(appliedOpTimeElement.type()));
    }

    const BSONElement electableElement = doc[kIsElectableFieldName];
    if (electableElement.eoo()) {
        _electableSet = false;
    } else {
        _electableSet = true;
        _electable = electableElement.trueValue();
    }

    const BSONElement memberStateElement = doc[kMemberStateFieldName];
    if (memberStateElement.eoo()) {
        _stateSet = false;
    } else if (memberStateElement.type() != NumberInt && memberStateElement.type() != NumberLong) {
        return Status(
            ErrorCodes::TypeMismatch,
            str::stream() << "Expected \"" << kMemberStateFieldName
                          << "\" field in response to replSetHeartbeat "
                             "command to have type NumberInt or NumberLong, but found type "
                          << typeName(memberStateElement.type()));
    } else {
        long long stateInt = memberStateElement.numberLong();
        if (stateInt < 0 || stateInt > MemberState::RS_MAX) {
            return Status(
                ErrorCodes::BadValue,
                str::stream() << "Value for \"" << kMemberStateFieldName
                              << "\" in response to replSetHeartbeat is "
                                 "out of range; legal values are non-negative and no more than "
                              << MemberState::RS_MAX);
        }
        _stateSet = true;
        _state = MemberState(static_cast<int>(stateInt));
    }

    _stateDisagreement = doc[kHasStateDisagreementFieldName].trueValue();


    // Not required for the case of uninitialized members -- they have no config
    const BSONElement configVersionElement = doc[kConfigVersionFieldName];

    // If we have an optime then we must have a configVersion
    if (_appliedOpTimeSet && configVersionElement.eoo()) {
        return Status(ErrorCodes::NoSuchKey,
                      str::stream() << "Response to replSetHeartbeat missing required \""
                                    << kConfigVersionFieldName
                                    << "\" field even though initialized");
    }

    // If there is a "v" (config version) then it must be an int.
    if (!configVersionElement.eoo() && configVersionElement.type() != NumberInt) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kConfigVersionFieldName
                                    << "\" field in response to replSetHeartbeat to have "
                                       "type NumberInt, but found "
                                    << typeName(configVersionElement.type()));
    }
    _configVersion = configVersionElement.numberInt();

    const BSONElement hbMsgElement = doc[kHbMessageFieldName];
    if (hbMsgElement.eoo()) {
        _hbmsg.clear();
    } else if (hbMsgElement.type() != String) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kHbMessageFieldName
                                    << "\" field in response to replSetHeartbeat to have "
                                       "type String, but found "
                                    << typeName(hbMsgElement.type()));
    } else {
        _hbmsg = hbMsgElement.String();
    }

    const BSONElement syncingToElement = doc[kSyncSourceFieldName];
    if (syncingToElement.eoo()) {
        _syncingTo = HostAndPort();
    } else if (syncingToElement.type() != String) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kSyncSourceFieldName
                                    << "\" field in response to replSetHeartbeat to "
                                       "have type String, but found "
                                    << typeName(syncingToElement.type()));
    } else {
        _syncingTo = HostAndPort(syncingToElement.String());
    }

    const BSONElement rsConfigElement = doc[kConfigFieldName];
    if (rsConfigElement.eoo()) {
        _configSet = false;
        _config = ReplicaSetConfig();
        return Status::OK();
    } else if (rsConfigElement.type() != Object) {
        return Status(ErrorCodes::TypeMismatch,
                      str::stream() << "Expected \"" << kConfigFieldName
                                    << "\" in response to replSetHeartbeat to have type "
                                       "Object, but found "
                                    << typeName(rsConfigElement.type()));
    }
    _configSet = true;

    return _config.initialize(rsConfigElement.Obj());
}
Beispiel #19
0
    void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::BatchData>& result,
                                                  Fetcher::NextAction* nextAction,
                                                  BSONObjBuilder* getMoreBob) {

        boost::lock_guard<boost::mutex> lk(_mutex);

        _active = false;

        if (!result.isOK()) {
            _work(result.getStatus());
            return;
        }

        auto batchData(result.getValue());
        auto&& documents = batchData.documents;

        // We may be called with multiple batches leading to a need to grow _collectionInfos.
        _collectionInfos.reserve(_collectionInfos.size() + documents.size());
        std::copy_if(documents.begin(), documents.end(),
                     std::back_inserter(_collectionInfos),
                     _listCollectionsPredicate);

        // The fetcher will continue to call with kGetMore until an error or the last batch.
        if (*nextAction == Fetcher::NextAction::kGetMore) {
            invariant(getMoreBob);
            getMoreBob->append("getMore", batchData.cursorId);
            getMoreBob->append("collection", batchData.nss.coll());

            _active = true;
            return;
        }

        // Nothing to do for an empty database.
        if (_collectionInfos.empty()) {
            _work(Status::OK());
            return;
        }

        _collectionNamespaces.reserve(_collectionInfos.size());
        std::set<std::string> seen;
        for (auto&& info : _collectionInfos) {
            BSONElement nameElement = info.getField(kNameFieldName);
            if (nameElement.eoo()) {
                _work(Status(ErrorCodes::FailedToParse, str::stream() <<
                             "collection info must contain '" << kNameFieldName << "' " <<
                             "field : " << info));
                return;
            }
            if (nameElement.type() != mongo::String) {
                _work(Status(ErrorCodes::TypeMismatch, str::stream() <<
                             "'" << kNameFieldName << "' field must be a string: " << info));
                return;
            }
            const std::string collectionName = nameElement.String();
            if (seen.find(collectionName) != seen.end()) {
                _work(Status(ErrorCodes::DuplicateKey, str::stream() <<
                             "collection info contains duplicate collection name " <<
                             "'" << collectionName << "': " << info));
                return;
            }

            BSONElement optionsElement = info.getField(kOptionsFieldName);
            if (optionsElement.eoo()) {
                _work(Status(ErrorCodes::FailedToParse, str::stream() <<
                             "collection info must contain '" << kOptionsFieldName << "' " <<
                             "field : " << info));
                return;
            }
            if (!optionsElement.isABSONObj()) {
                _work(Status(ErrorCodes::TypeMismatch, str::stream() <<
                             "'" << kOptionsFieldName << "' field must be an object: " << info));
                return;
            }
            const BSONObj optionsObj = optionsElement.Obj();
            CollectionOptions options;
            Status parseStatus = options.parse(optionsObj);
            if (!parseStatus.isOK()) {
                _work(parseStatus);
                return;
            }
            seen.insert(collectionName);

            _collectionNamespaces.emplace_back(_dbname, collectionName);
            auto&& nss = *_collectionNamespaces.crbegin();

            try {
                _collectionCloners.emplace_back(
                    _executor,
                    _source,
                    nss,
                    options,
                    stdx::bind(&DatabaseCloner::_collectionClonerCallback,
                               this,
                               stdx::placeholders::_1,
                               nss),
                    _createStorageInterface());
            }
            catch (const UserException& ex) {
                _work(ex.toStatus());
                return;
            }
        }

        for (auto&& collectionCloner : _collectionCloners) {
            collectionCloner.setScheduleDbWorkFn(_scheduleDbWorkFn);
        }

        // Start first collection cloner.
        _currentCollectionClonerIter = _collectionCloners.begin();

        LOG(1) << "    cloning collection " << _currentCollectionClonerIter->getSourceNamespace();

        Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter);
        if (!startStatus.isOK()) {
            LOG(1) << "    failed to start collection cloning on "
                   << _currentCollectionClonerIter->getSourceNamespace()
                   << ": " << startStatus;
            _work(startStatus);
            return;
        }

        _active = true;
    }
Beispiel #20
0
Status ModifierCurrentDate::init(const BSONElement& modExpr,
                                 const Options& opts,
                                 bool* positional) {
    _updatePath.parse(modExpr.fieldName());
    Status status = fieldchecker::isUpdatable(_updatePath);
    if (!status.isOK()) {
        return status;
    }

    // If a $-positional operator was used, get the index in which it occurred
    // and ensure only one occurrence.
    size_t foundCount;
    bool foundDollar =
        fieldchecker::isPositional(_updatePath, &_pathReplacementPosition, &foundCount);

    if (positional)
        *positional = foundDollar;

    if (foundDollar && foundCount > 1) {
        return Status(ErrorCodes::BadValue,
                      str::stream() << "Too many positional (i.e. '$') elements found in path '"
                                    << _updatePath.dottedField()
                                    << "'");
    }

    // Validate and store the type to produce
    switch (modExpr.type()) {
        case Bool:
            _typeIsDate = true;
            break;
        case Object: {
            const BSONObj argObj = modExpr.embeddedObject();
            const BSONElement typeElem = argObj.getField(kType);
            bool badInput = typeElem.eoo() || !(typeElem.type() == String);

            if (!badInput) {
                std::string typeVal = typeElem.String();
                badInput = !(typeElem.String() == kDate || typeElem.String() == kTimestamp);
                if (!badInput)
                    _typeIsDate = (typeVal == kDate);

                if (!badInput) {
                    // Check to make sure only the $type field was given as an arg
                    BSONObjIterator i(argObj);
                    const bool onlyHasTypeField =
                        ((i.next().fieldNameStringData() == kType) && i.next().eoo());
                    if (!onlyHasTypeField) {
                        return Status(ErrorCodes::BadValue,
                                      str::stream()
                                          << "The only valid field of the option is '$type': "
                                             "{$currentDate: {field : {$type: 'date/timestamp'}}}; "
                                          << "arg: "
                                          << argObj);
                    }
                }
            }

            if (badInput) {
                return Status(ErrorCodes::BadValue,
                              "The '$type' string field is required "
                              "to be 'date' or 'timestamp': "
                              "{$currentDate: {field : {$type: 'date'}}}");
            }
            break;
        }
        default:
            return Status(ErrorCodes::BadValue,
                          str::stream() << typeName(modExpr.type())
                                        << " is not valid type for $currentDate."
                                           " Please use a boolean ('true')"
                                           " or a $type expression ({$type: 'timestamp/date'}).");
    }

    return Status::OK();
}
Beispiel #21
0
    // static
    void ExpressionKeysPrivate::get2DKeys(const BSONObj &obj,
                                         const TwoDIndexingParams& params,
                                         BSONObjSet* keys,
                                         std::vector<BSONObj>* locs) {
        BSONElementMSet bSet;

        // Get all the nested location fields, but don't return individual elements from
        // the last array, if it exists.
        obj.getFieldsDotted(params.geo.c_str(), bSet, false);

        if (bSet.empty())
            return;

        for (BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI) {
            BSONElement geo = *setI;

            if (geo.eoo() || !geo.isABSONObj())
                continue;

            //
            // Grammar for location lookup:
            // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc
            // loc  ::= { <k1> : #, <k2> : # }|[#, #]|{}
            //
            // Empty locations are ignored, preserving single-location semantics
            //

            BSONObj embed = geo.embeddedObject();
            if (embed.isEmpty())
                continue;

            // Differentiate between location arrays and locations
            // by seeing if the first element value is a number
            bool singleElement = embed.firstElement().isNumber();

            BSONObjIterator oi(embed);

            while (oi.more()) {
                BSONObj locObj;

                if (singleElement) {
                    locObj = embed;
                } else {
                    BSONElement locElement = oi.next();

                    uassert(16804, mongoutils::str::stream() <<
                            "location object expected, location array not in correct format",
                            locElement.isABSONObj());

                    locObj = locElement.embeddedObject();
                    if(locObj.isEmpty())
                        continue;
                }

                BSONObjBuilder b(64);

                // Remember the actual location object if needed
                if (locs)
                    locs->push_back(locObj);

                // Stop if we don't need to get anything but location objects
                if (!keys) {
                    if (singleElement) break;
                    else continue;
                }

                params.geoHashConverter->hash(locObj, &obj).appendHashMin(&b, "");

                // Go through all the other index keys
                for (vector<pair<string, int> >::const_iterator i = params.other.begin();
                     i != params.other.end(); ++i) {
                    // Get *all* fields for the index key
                    BSONElementSet eSet;
                    obj.getFieldsDotted(i->first, eSet);

                    if (eSet.size() == 0)
                        b.appendNull("");
                    else if (eSet.size() == 1)
                        b.appendAs(*(eSet.begin()), "");
                    else {
                        // If we have more than one key, store as an array of the objects
                        BSONArrayBuilder aBuilder;

                        for (BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end();
                             ++ei) {
                            aBuilder.append(*ei);
                        }

                        b.append("", aBuilder.arr());
                    }
                }
                keys->insert(b.obj());
                if(singleElement) break;
            }
        }
    }
Beispiel #22
0
        BSONObj FTSSpec::fixSpec( const BSONObj& spec ) {
            if ( spec["textIndexVersion"].numberInt() == TEXT_INDEX_VERSION_1 ) {
                return _fixSpecV1( spec );
            }

            map<string,int> m;

            BSONObj keyPattern;
            {
                BSONObjBuilder b;

                // Populate m and keyPattern.
                {
                    bool addedFtsStuff = false;
                    BSONObjIterator i( spec["key"].Obj() );
                    while ( i.more() ) {
                        BSONElement e = i.next();
                        if ( str::equals( e.fieldName(), "_fts" ) ) {
                            uassert( 17271,
                                     "expecting _fts:\"text\"",
                                     INDEX_NAME == e.valuestrsafe() );
                            addedFtsStuff = true;
                            b.append( e );
                        }
                        else if ( str::equals( e.fieldName(), "_ftsx" ) ) {
                            uassert( 17272, "expecting _ftsx:1", e.numberInt() == 1 );
                            b.append( e );
                        }
                        else if ( e.type() == String && INDEX_NAME == e.valuestr() ) {

                            if ( !addedFtsStuff ) {
                                _addFTSStuff( &b );
                                addedFtsStuff = true;
                            }

                            m[e.fieldName()] = 1;
                        }
                        else {
                            uassert( 17273,
                                     "expected value 1 or -1 for non-text key in compound index",
                                     e.numberInt() == 1 || e.numberInt() == -1 );
                            b.append( e );
                        }
                    }
                    verify( addedFtsStuff );
                }
                keyPattern = b.obj();

                // Verify that index key is in the correct format: extraBefore fields, then text
                // fields, then extraAfter fields.
                {
                    BSONObjIterator i( spec["key"].Obj() );
                    BSONElement e;

                    // extraBefore fields
                    do {
                        verify( i.more() );
                        e = i.next();
                    } while ( INDEX_NAME != e.valuestrsafe() );

                    // text fields
                    bool alreadyFixed = str::equals( e.fieldName(), "_fts" );
                    if ( alreadyFixed ) {
                        uassert( 17288, "expected _ftsx after _fts", i.more() );
                        e = i.next();
                        uassert( 17274,
                                 "expected _ftsx after _fts",
                                 str::equals( e.fieldName(), "_ftsx" ) );
                        e = i.next();
                    }
                    else {
                        do {
                            uassert( 17289,
                                     "text index with reserved fields _fts/ftsx not allowed",
                                     !str::equals( e.fieldName(), "_fts" ) &&
                                         !str::equals( e.fieldName(), "_ftsx" ) );
                            e = i.next();
                        } while ( !e.eoo() && INDEX_NAME == e.valuestrsafe() );
                    }

                    // extraAfterFields
                    while ( !e.eoo() ) {
                        uassert( 17290,
                                 "compound text index key suffix fields must have value 1",
                                 e.numberInt() == 1 && !str::equals( "_ftsx", e.fieldName() ) );
                        e = i.next();
                    }
                }

            }

            if ( spec["weights"].type() == Object ) {
                BSONObjIterator i( spec["weights"].Obj() );
                while ( i.more() ) {
                    BSONElement e = i.next();
                    uassert( 17283,
                             "weight for text index needs numeric type",
                             e.isNumber() );
                    m[e.fieldName()] = e.numberInt();

                    // Verify weight refers to a valid field.
                    if ( str::equals( e.fieldName(), "$**" ) ) {
                        continue;
                    }
                    FieldRef keyField( e.fieldName() );
                    uassert( 17294,
                             "weight cannot be on an empty field",
                             keyField.numParts() != 0 );
                    for ( size_t i = 0; i < keyField.numParts(); i++ ) {
                        StringData part = keyField.getPart(i);
                        uassert( 17291, "weight cannot have empty path component", !part.empty() );
                        uassert( 17292,
                                 "weight cannot have path component with $ prefix",
                                 !part.startsWith( "$" ) );
                    }
                }
            }
            else if ( spec["weights"].str() == WILDCARD ) {
                m[WILDCARD] = 1;
            }
            else if ( !spec["weights"].eoo() ) {
                uasserted( 17284, "text index option 'weights' must be an object" );
            }

            BSONObj weights;
            {
                BSONObjBuilder b;
                for ( map<string,int>::iterator i = m.begin(); i != m.end(); ++i ) {
                    uassert( 16674, "score for word too high",
                             i->second > 0 && i->second < MAX_WORD_WEIGHT );
                    b.append( i->first, i->second );
                }
                weights = b.obj();
            }

            BSONElement default_language_elt = spec["default_language"];
            string default_language( default_language_elt.str() );
            if ( default_language_elt.eoo() ) {
                default_language = moduleDefaultLanguage;
            }
            else {
                uassert( 17263,
                         "default_language needs a string type",
                         default_language_elt.type() == String );
            }
            uassert( 17264,
                     "default_language is not valid",
                     FTSLanguage::make( default_language,
                                        TEXT_INDEX_VERSION_2 ).getStatus().isOK() );

            BSONElement language_override_elt = spec["language_override"];
            string language_override( language_override_elt.str() );
            if ( language_override_elt.eoo() ) {
                language_override = "language";
            }
            else {
                uassert( 17136,
                         "language_override is not valid",
                         language_override_elt.type() == String
                             && validateOverride( language_override ) );
            }

            int version = -1;
            int textIndexVersion = TEXT_INDEX_VERSION_2;

            BSONObjBuilder b;
            BSONObjIterator i( spec );
            while ( i.more() ) {
                BSONElement e = i.next();
                if ( str::equals( e.fieldName(), "key" ) ) {
                    b.append( "key", keyPattern );
                }
                else if ( str::equals( e.fieldName(), "weights" ) ) {
                    b.append( "weights", weights );
                    weights = BSONObj();
                }
                else if ( str::equals( e.fieldName(), "default_language" ) ) {
                    b.append( "default_language", default_language);
                    default_language = "";
                }
                else if ( str::equals( e.fieldName(), "language_override" ) ) {
                    b.append( "language_override", language_override);
                    language_override = "";
                }
                else if ( str::equals( e.fieldName(), "v" ) ) {
                    version = e.numberInt();
                }
                else if ( str::equals( e.fieldName(), "textIndexVersion" ) ) {
                    uassert( 17293,
                             "text index option 'textIndexVersion' must be a number",
                             e.isNumber() );
                    textIndexVersion = e.numberInt();
                    uassert( 16730,
                             str::stream() << "bad textIndexVersion: " << textIndexVersion,
                             textIndexVersion == TEXT_INDEX_VERSION_2 );
                }
                else {
                    b.append( e );
                }
            }

            if ( !weights.isEmpty() ) {
                b.append( "weights", weights );
            }
            if ( !default_language.empty() ) {
                b.append( "default_language", default_language);
            }
            if ( !language_override.empty() ) {
                b.append( "language_override", language_override);
            }
            if ( version >= 0 ) {
                b.append( "v", version );
            }
            b.append( "textIndexVersion", textIndexVersion );

            return b.obj();
        }
Status parseRolesInfoCommand(const BSONObj& cmdObj, StringData dbname, RolesInfoArgs* parsedArgs) {
    stdx::unordered_set<std::string> validFieldNames;
    validFieldNames.insert("rolesInfo");
    validFieldNames.insert("showPrivileges");
    validFieldNames.insert("showAuthenticationRestrictions");
    validFieldNames.insert("showBuiltinRoles");

    Status status = _checkNoExtraFields(cmdObj, "rolesInfo", validFieldNames);
    if (!status.isOK()) {
        return status;
    }

    if (cmdObj["rolesInfo"].numberInt() == 1) {
        parsedArgs->allForDB = true;
    } else if (cmdObj["rolesInfo"].type() == Array) {
        status = parseRoleNamesFromBSONArray(
            BSONArray(cmdObj["rolesInfo"].Obj()), dbname, &parsedArgs->roleNames);
        if (!status.isOK()) {
            return status;
        }
    } else {
        RoleName name;
        status = _parseNameFromBSONElement(cmdObj["rolesInfo"],
                                           dbname,
                                           AuthorizationManager::ROLE_NAME_FIELD_NAME,
                                           AuthorizationManager::ROLE_DB_FIELD_NAME,
                                           &name);
        if (!status.isOK()) {
            return status;
        }
        parsedArgs->roleNames.push_back(name);
    }

    BSONElement showPrivileges = cmdObj["showPrivileges"];
    if (showPrivileges.eoo()) {
        parsedArgs->privilegeFormat = PrivilegeFormat::kOmit;
    } else if (showPrivileges.isNumber() || showPrivileges.isBoolean()) {
        parsedArgs->privilegeFormat =
            showPrivileges.trueValue() ? PrivilegeFormat::kShowSeparate : PrivilegeFormat::kOmit;
    } else if (showPrivileges.type() == BSONType::String &&
               showPrivileges.String() == "asUserFragment") {
        parsedArgs->privilegeFormat = PrivilegeFormat::kShowAsUserFragment;
    } else {
        return Status(ErrorCodes::FailedToParse,
                      str::stream() << "Failed to parse 'showPrivileges'. 'showPrivileges' should "
                                       "either be a boolean or the string 'asUserFragment', given: "
                                    << showPrivileges.toString());
    }

    const auto showAuthenticationRestrictions = cmdObj["showAuthenticationRestrictions"];
    if (showAuthenticationRestrictions.eoo()) {
        parsedArgs->authenticationRestrictionsFormat = AuthenticationRestrictionsFormat::kOmit;
    } else if (parsedArgs->privilegeFormat == PrivilegeFormat::kShowAsUserFragment) {
        return Status(
            ErrorCodes::UnsupportedFormat,
            "showAuthenticationRestrictions may not be used with showPrivileges='asUserFragment'");
    } else {
        bool show;
        status = bsonExtractBooleanField(cmdObj, "showAuthenticationRestrictions", &show);
        if (!status.isOK()) {
            return status;
        }
        parsedArgs->authenticationRestrictionsFormat = show
            ? AuthenticationRestrictionsFormat::kShow
            : AuthenticationRestrictionsFormat::kOmit;
    }

    status = bsonExtractBooleanFieldWithDefault(
        cmdObj, "showBuiltinRoles", false, &parsedArgs->showBuiltinRoles);
    if (!status.isOK()) {
        return status;
    }

    return Status::OK();
}
void BtreeKeyGeneratorV0::getKeysImpl(std::vector<const char*> fieldNames,
                                      std::vector<BSONElement> fixed,
                                      const BSONObj& obj,
                                      BSONObjSet* keys,
                                      MultikeyPaths* multikeyPaths) const {
    BSONElement arrElt;
    unsigned arrIdx = ~0;
    unsigned numNotFound = 0;

    for (unsigned i = 0; i < fieldNames.size(); ++i) {
        if (*fieldNames[i] == '\0')
            continue;

        BSONElement e = obj.getFieldDottedOrArray(fieldNames[i]);

        if (e.eoo()) {
            e = nullElt;  // no matching field
            numNotFound++;
        }

        if (e.type() != Array)
            fieldNames[i] = "";  // no matching field or non-array match

        if (*fieldNames[i] == '\0')
            // no need for further object expansion (though array expansion still possible)
            fixed[i] = e;

        if (e.type() == Array && arrElt.eoo()) {
            // we only expand arrays on a single path -- track the path here
            arrIdx = i;
            arrElt = e;
        }

        // enforce single array path here
        if (e.type() == Array && e.rawdata() != arrElt.rawdata()) {
            assertParallelArrays(e.fieldName(), arrElt.fieldName());
        }
    }

    bool allFound = true;  // have we found elements for all field names in the key spec?
    for (std::vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end();
         ++i) {
        if (**i != '\0') {
            allFound = false;
            break;
        }
    }

    if (_isSparse && numNotFound == _fieldNames.size()) {
        // we didn't find any fields
        // so we're not going to index this document
        return;
    }

    bool insertArrayNull = false;

    if (allFound) {
        if (arrElt.eoo()) {
            // no terminal array element to expand
            BSONObjBuilder b(_sizeTracker);
            for (std::vector<BSONElement>::iterator i = fixed.begin(); i != fixed.end(); ++i)
                b.appendAs(*i, "");
            keys->insert(b.obj());
        } else {
            // terminal array element to expand, so generate all keys
            BSONObjIterator i(arrElt.embeddedObject());
            if (i.more()) {
                while (i.more()) {
                    BSONObjBuilder b(_sizeTracker);
                    for (unsigned j = 0; j < fixed.size(); ++j) {
                        if (j == arrIdx)
                            b.appendAs(i.next(), "");
                        else
                            b.appendAs(fixed[j], "");
                    }
                    keys->insert(b.obj());
                }
            } else if (fixed.size() > 1) {
                insertArrayNull = true;
            }
        }
    } else {
        // nonterminal array element to expand, so recurse
        verify(!arrElt.eoo());
        BSONObjIterator i(arrElt.embeddedObject());
        if (i.more()) {
            while (i.more()) {
                BSONElement e = i.next();
                if (e.type() == Object) {
                    getKeysImpl(fieldNames, fixed, e.embeddedObject(), keys, multikeyPaths);
                }
            }
        } else {
            insertArrayNull = true;
        }
    }

    if (insertArrayNull) {
        // x : [] - need to insert undefined
        BSONObjBuilder b(_sizeTracker);
        for (unsigned j = 0; j < fixed.size(); ++j) {
            if (j == arrIdx) {
                b.appendUndefined("");
            } else {
                BSONElement e = fixed[j];
                if (e.eoo())
                    b.appendNull("");
                else
                    b.appendAs(e, "");
            }
        }
        keys->insert(b.obj());
    }
}
Beispiel #25
0
bool ExistsMatchExpression::matchesSingleElement( const BSONElement& e ) const {
    return !e.eoo();
}
void BtreeKeyGeneratorV1::getKeysImplWithArray(
    std::vector<const char*> fieldNames,
    std::vector<BSONElement> fixed,
    const BSONObj& obj,
    BSONObjSet* keys,
    unsigned numNotFound,
    const std::vector<PositionalPathInfo>& positionalInfo,
    MultikeyPaths* multikeyPaths) const {
    BSONElement arrElt;

    // A set containing the position of any indexed fields in the key pattern that traverse through
    // the 'arrElt' array value.
    std::set<size_t> arrIdxs;

    // A vector with size equal to the number of elements in the index key pattern. Each element in
    // the vector, if initialized, refers to the component within the indexed field that traverses
    // through the 'arrElt' array value. We say that this component within the indexed field
    // corresponds to a path that causes the index to be multikey if the 'arrElt' array value
    // contains multiple elements.
    //
    // For example, consider the index {'a.b': 1, 'a.c'} and the document
    // {a: [{b: 1, c: 'x'}, {b: 2, c: 'y'}]}. The path "a" causes the index to be multikey, so we'd
    // have a std::vector<boost::optional<size_t>>{{0U}, {0U}}.
    //
    // Furthermore, due to how positional key patterns are specified, it's possible for an indexed
    // field to cause the index to be multikey at a different component than another indexed field
    // that also traverses through the 'arrElt' array value. It's then also possible for an indexed
    // field not to cause the index to be multikey, even if it traverses through the 'arrElt' array
    // value, because only a particular element would be indexed.
    //
    // For example, consider the index {'a.b': 1, 'a.b.0'} and the document {a: {b: [1, 2]}}. The
    // path "a.b" causes the index to be multikey, but the key pattern "a.b.0" only indexes the
    // first element of the array, so we'd have a
    // std::vector<boost::optional<size_t>>{{1U}, boost::none}.
    std::vector<boost::optional<size_t>> arrComponents(fieldNames.size());

    bool mayExpandArrayUnembedded = true;
    for (size_t i = 0; i < fieldNames.size(); ++i) {
        if (*fieldNames[i] == '\0') {
            continue;
        }

        bool arrayNestedArray;
        // Extract element matching fieldName[ i ] from object xor array.
        BSONElement e =
            extractNextElement(obj, positionalInfo[i], &fieldNames[i], &arrayNestedArray);

        if (e.eoo()) {
            // if field not present, set to null
            fixed[i] = nullElt;
            // done expanding this field name
            fieldNames[i] = "";
            numNotFound++;
        } else if (e.type() == Array) {
            arrIdxs.insert(i);
            if (arrElt.eoo()) {
                // we only expand arrays on a single path -- track the path here
                arrElt = e;
            } else if (e.rawdata() != arrElt.rawdata()) {
                // enforce single array path here
                assertParallelArrays(e.fieldName(), arrElt.fieldName());
            }
            if (arrayNestedArray) {
                mayExpandArrayUnembedded = false;
            }
        } else {
            // not an array - no need for further expansion
            fixed[i] = e;
        }
    }

    if (arrElt.eoo()) {
        // No array, so generate a single key.
        if (_isSparse && numNotFound == fieldNames.size()) {
            return;
        }
        BSONObjBuilder b(_sizeTracker);
        for (std::vector<BSONElement>::iterator i = fixed.begin(); i != fixed.end(); ++i) {
            b.appendAs(*i, "");
        }
        keys->insert(b.obj());
    } else if (arrElt.embeddedObject().firstElement().eoo()) {
        // Empty array, so set matching fields to undefined.
        _getKeysArrEltFixed(&fieldNames,
                            &fixed,
                            undefinedElt,
                            keys,
                            numNotFound,
                            arrElt,
                            arrIdxs,
                            true,
                            _emptyPositionalInfo,
                            multikeyPaths);
    } else {
        BSONObj arrObj = arrElt.embeddedObject();

        // For positional key patterns, e.g. {'a.1.b': 1}, we lookup the indexed array element
        // and then traverse the remainder of the field path up front. This prevents us from
        // having to look up the indexed element again on each recursive call (i.e. once per
        // array element).
        std::vector<PositionalPathInfo> subPositionalInfo(fixed.size());
        for (size_t i = 0; i < fieldNames.size(); ++i) {
            const bool fieldIsArray = arrIdxs.find(i) != arrIdxs.end();

            if (*fieldNames[i] == '\0') {
                // We've reached the end of the path.
                if (multikeyPaths && fieldIsArray && mayExpandArrayUnembedded) {
                    // The 'arrElt' array value isn't expanded into multiple elements when the last
                    // component of the indexed field is positional and 'arrElt' contains nested
                    // array values. In all other cases, the 'arrElt' array value may be expanded
                    // into multiple element and can therefore cause the index to be multikey.
                    arrComponents[i] = _pathLengths[i] - 1;
                }
                continue;
            }

            // The earlier call to BSONObj::getFieldDottedOrArray(fieldNames[i]) modified
            // fieldNames[i] to refer to the suffix of the path immediately following the 'arrElt'
            // array value. If we haven't reached the end of this indexed field yet, then we must
            // have traversed through 'arrElt'.
            invariant(fieldIsArray);

            StringData part = fieldNames[i];
            part = part.substr(0, part.find('.'));
            subPositionalInfo[i].positionallyIndexedElt = arrObj[part];
            if (subPositionalInfo[i].positionallyIndexedElt.eoo()) {
                // We aren't indexing a particular element of the 'arrElt' array value, so it may be
                // expanded into multiple elements. It can therefore cause the index to be multikey.
                if (multikeyPaths) {
                    // We need to determine which component of the indexed field causes the index to
                    // be multikey as a result of the 'arrElt' array value. Since
                    //
                    //   NumComponents("<pathPrefix>") + NumComponents("<pathSuffix>")
                    //       = NumComponents("<pathPrefix>.<pathSuffix>"),
                    //
                    // we can compute the number of components in a prefix of the indexed field by
                    // subtracting the number of components in the suffix 'fieldNames[i]' from the
                    // number of components in the indexed field '_fieldNames[i]'.
                    //
                    // For example, consider the indexed field "a.b.c" and the suffix "c". The path
                    // "a.b.c" has 3 components and the suffix "c" has 1 component. Subtracting the
                    // latter from the former yields the number of components in the prefix "a.b",
                    // i.e. 2.
                    size_t fullPathLength = _pathLengths[i];
                    size_t suffixPathLength = FieldRef{fieldNames[i]}.numParts();
                    invariant(suffixPathLength < fullPathLength);
                    arrComponents[i] = fullPathLength - suffixPathLength - 1;
                }
                continue;
            }

            // We're indexing an array element by its position. Traverse the remainder of the
            // field path now.
            //
            // Indexing an array element by its position selects a particular element of the
            // 'arrElt' array value when generating keys. It therefore cannot cause the index to be
            // multikey.
            subPositionalInfo[i].arrayObj = arrObj;
            subPositionalInfo[i].remainingPath = fieldNames[i];
            subPositionalInfo[i].dottedElt =
                arrObj.getFieldDottedOrArray(subPositionalInfo[i].remainingPath);
        }

        // Generate a key for each element of the indexed array.
        size_t nArrObjFields = 0;
        for (const auto arrObjElem : arrObj) {
            _getKeysArrEltFixed(&fieldNames,
                                &fixed,
                                arrObjElem,
                                keys,
                                numNotFound,
                                arrElt,
                                arrIdxs,
                                mayExpandArrayUnembedded,
                                subPositionalInfo,
                                multikeyPaths);
            ++nArrObjFields;
        }

        if (multikeyPaths && nArrObjFields > 1) {
            // The 'arrElt' array value contains multiple elements, so we say that it causes the
            // index to be multikey.
            for (size_t i = 0; i < arrComponents.size(); ++i) {
                if (auto arrComponent = arrComponents[i]) {
                    (*multikeyPaths)[i].insert(*arrComponent);
                }
            }
        }
    }
}
Beispiel #27
0
    // Copies ops out of the bgsync queue into the deque passed in as a parameter.
    // Returns true if the batch should be ended early.
    // Batch should end early if we encounter a command, or if
    // there are no further ops in the bgsync queue to read.
    // This function also blocks 1 second waiting for new ops to appear in the bgsync
    // queue.  We can't block forever because there are maintenance things we need
    // to periodically check in the loop.
    bool SyncTail::tryPopAndWaitForMore(SyncTail::OpQueue* ops) {
        BSONObj op;
        // Check to see if there are ops waiting in the bgsync queue
        bool peek_success = peek(&op);

        if (!peek_success) {
            // if we don't have anything in the queue, wait a bit for something to appear
            if (ops->empty()) {
                // block up to 1 second
                _networkQueue->waitForMore();
                return false;
            }

            // otherwise, apply what we have
            return true;
        }

        // check for commands
        if ((op["op"].valuestrsafe()[0] == 'c') ||
            // Index builds are acheived through the use of an insert op, not a command op.
            // The following line is the same as what the insert code uses to detect an index build.
            (NamespaceString(op["ns"].valuestrsafe()).coll == "system.indexes")) {
            if (ops->empty()) {
                // apply commands one-at-a-time
                ops->push_back(op);
                _networkQueue->consume();
            }

            // otherwise, apply what we have so far and come back for the command
            return true;
        }

        // check for oplog version change
        BSONElement elemVersion = op["v"];
        int curVersion = 0;
        if (elemVersion.eoo())
            // missing version means version 1
            curVersion = 1;
        else
            curVersion = elemVersion.Int();

        if (curVersion != oplogVersion) {
            // Version changes cause us to end a batch.
            // If we are starting a new batch, reset version number
            // and continue.
            if (ops->empty()) {
                oplogVersion = curVersion;
            } 
            else {
                // End batch early
                return true;
            }
        }
    
        // Copy the op to the deque and remove it from the bgsync queue.
        ops->push_back(op);
        _networkQueue->consume();

        // Go back for more ops
        return false;
    }
Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const {
    BSONElement userElement = doc[AuthorizationManager::USER_NAME_FIELD_NAME];
    BSONElement userDBElement = doc[AuthorizationManager::USER_DB_FIELD_NAME];
    BSONElement credentialsElement = doc[CREDENTIALS_FIELD_NAME];
    BSONElement rolesElement = doc[ROLES_FIELD_NAME];

    // Validate the "user" element.
    if (userElement.type() != String)
        return _badValue("User document needs 'user' field to be a string");
    if (userElement.valueStringData().empty())
        return _badValue("User document needs 'user' field to be non-empty");

    // Validate the "db" element
    if (userDBElement.type() != String || userDBElement.valueStringData().empty()) {
        return _badValue("User document needs 'db' field to be a non-empty string");
    }
    StringData userDBStr = userDBElement.valueStringData();
    if (!NamespaceString::validDBName(userDBStr, NamespaceString::DollarInDbNameBehavior::Allow) &&
        userDBStr != "$external") {
        return _badValue(mongoutils::str::stream() << "'" << userDBStr
                                                   << "' is not a valid value for the db field.");
    }

    // Validate the "credentials" element
    if (credentialsElement.eoo()) {
        return _badValue("User document needs 'credentials' object");
    }
    if (credentialsElement.type() != Object) {
        return _badValue("User document needs 'credentials' field to be an object");
    }

    BSONObj credentialsObj = credentialsElement.Obj();
    if (credentialsObj.isEmpty()) {
        return _badValue("User document needs 'credentials' field to be a non-empty object");
    }
    if (userDBStr == "$external") {
        BSONElement externalElement = credentialsObj[MONGODB_EXTERNAL_CREDENTIAL_FIELD_NAME];
        if (externalElement.eoo() || externalElement.type() != Bool || !externalElement.Bool()) {
            return _badValue(
                "User documents for users defined on '$external' must have "
                "'credentials' field set to {external: true}");
        }
    } else {
        const auto validateScram = [&credentialsObj](const auto& fieldName) {
            auto scramElement = credentialsObj[fieldName];

            if (scramElement.eoo()) {
                return Status(ErrorCodes::NoSuchKey,
                              str::stream() << fieldName << " does not exist");
            }
            if (scramElement.type() != Object) {
                return _badValue(str::stream() << fieldName
                                               << " credential must be an object, if present");
            }
            return Status::OK();
        };

        const auto sha1status = validateScram(SCRAMSHA1_CREDENTIAL_FIELD_NAME);
        if (!sha1status.isOK() && (sha1status.code() != ErrorCodes::NoSuchKey)) {
            return sha1status;
        }
        const auto sha256status = validateScram(SCRAMSHA256_CREDENTIAL_FIELD_NAME);
        if (!sha256status.isOK() && (sha256status.code() != ErrorCodes::NoSuchKey)) {
            return sha256status;
        }

        if (!sha1status.isOK() && !sha256status.isOK()) {
            return _badValue(
                "User document must provide credentials for all "
                "non-external users");
        }
    }

    // Validate the "roles" element.
    Status status = _checkV2RolesArray(rolesElement);
    if (!status.isOK())
        return status;

    // Validate the "authenticationRestrictions" element.
    status = initializeAuthenticationRestrictionsFromUserDocument(doc, nullptr);
    if (!status.isOK()) {
        return status;
    }

    return Status::OK();
}
Beispiel #29
0
    void Model::save( bool safe ){
        ScopedDbConnection conn( modelServer() );

        BSONObjBuilder b;
        serialize( b );
        
        BSONElement myId;
        {
            BSONObjIterator i = b.iterator();
            while ( i.more() ){
                BSONElement e = i.next();
                if ( strcmp( e.fieldName() , "_id" ) == 0 ){
                    myId = e;
                    break;
                }
            }
        }

        if ( myId.type() ){
            if ( _id.isEmpty() ){
                _id = myId.wrap();
            }
            else if ( myId.woCompare( _id.firstElement() ) ){
                stringstream ss;
                ss << "_id from serialize and stored differ: ";
                ss << "[" << myId << "] != ";
                ss << "[" << _id.firstElement() << "]";
                throw UserException( 13121 , ss.str() );
            }
        }

        if ( _id.isEmpty() ){
            OID oid;
            oid.init();
            b.appendOID( "_id" , &oid );
            
            BSONObj o = b.obj();
            conn->insert( getNS() , o );
            _id = o["_id"].wrap().getOwned();

            log(4) << "inserted new model " << getNS() << "  " << o << endl;
        }
        else {
            if ( myId.eoo() ){
                myId = _id["_id"];
                b.append( myId );
            }
            
            assert( ! myId.eoo() );

            BSONObjBuilder qb;
            qb.append( myId );
            
            BSONObj q = qb.obj();
            BSONObj o = b.obj();

            log(4) << "updated model" << getNS() << "  " << q << " " << o << endl;

            conn->update( getNS() , q , o , true );
            
        }
        
        string errmsg = "";
        if ( safe )
            errmsg = conn->getLastError();

        conn.done();

        if ( safe && errmsg.size() )
            throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
    }
/* ****************************************************************************
*
* addTriggeredSubscriptions -
*/
static bool addTriggeredSubscriptions
(
  ContextRegistration                   cr,
  map<string, TriggeredSubscription*>&  subs,
  std::string&                          err,
  std::string                           tenant
)
{

  BSONArrayBuilder          entitiesNoPatternA;
  std::vector<std::string>  idJsV;
  std::vector<std::string>  typeJsV;

  for (unsigned int ix = 0; ix < cr.entityIdVector.size(); ++ix)
  {
    // FIXME: take into account subscriptions with no type
    EntityId* enP = cr.entityIdVector[ix];

    // The registration of isPattern=true entities is not supported, so we don't include them here
    if (enP->isPattern == "false")
    {
      entitiesNoPatternA.append(BSON(CASUB_ENTITY_ID << enP->id <<
                                     CASUB_ENTITY_TYPE << enP->type <<
                                     CASUB_ENTITY_ISPATTERN << "false"));
      idJsV.push_back(enP->id);
      typeJsV.push_back(enP->type);
    }
  }

  BSONArrayBuilder attrA;
  for (unsigned int ix = 0; ix < cr.contextRegistrationAttributeVector.size(); ++ix)
  {
    ContextRegistrationAttribute* craP = cr.contextRegistrationAttributeVector[ix];
    attrA.append(craP->name);
  }

  BSONObjBuilder queryNoPattern;
  queryNoPattern.append(CASUB_ENTITIES, BSON("$in" << entitiesNoPatternA.arr()));
  if (attrA.arrSize() > 0)
  {
    // If we don't do this checking, the {$in: [] } in the attribute name part will
    // make the query fail
    //

    // queryB.append(CASUB_ATTRS, BSON("$in" << attrA.arr()));
    queryNoPattern.append("$or", BSON_ARRAY(
                            BSON(CASUB_ATTRS << BSON("$in" << attrA.arr())) <<
                            BSON(CASUB_ATTRS << BSON("$size" << 0))));
  }
  else
  {
    queryNoPattern.append(CASUB_ATTRS, BSON("$size" << 0));
  }
  queryNoPattern.append(CASUB_EXPIRATION, BSON("$gt" << (long long) getCurrentTime()));


  //
  // This is JavaScript code that runs in MongoDB engine. As far as I know, this is the only
  // way to do a "reverse regex" query in MongoDB (see
  // http://stackoverflow.com/questions/15966991/mongodb-reverse-regex/15989520).
  // Note that although we are using a isPattern=true in the MongoDB query besides $where, we
  // also need to check that in the if statement in the JavaScript function given that a given
  // sub document could include both isPattern=true and isPattern=false documents
  //
  std::string idJsString = "[ ";

  for (unsigned int ix = 0; ix < idJsV.size(); ++ix)
  {
    if (ix != idJsV.size() - 1)
    {
      idJsString += "\""+idJsV[ix]+ "\" ,";
    }
    else
    {
      idJsString += "\"" +idJsV[ix]+ "\"";
    }
  }
  idJsString += " ]";

  std::string typeJsString = "[ ";

  for (unsigned int ix = 0; ix < typeJsV.size(); ++ix)
  {
    if (ix != typeJsV.size() - 1)
    {
      typeJsString += "\"" +typeJsV[ix] + "\" ,";
    }
    else
    {
      typeJsString += "\"" + typeJsV[ix] + "\"";
    }
  }
  typeJsString += " ]";

  std::string function = std::string("function()") +
    "{" +
      "enId = " + idJsString + ";" +
      "enType = " + typeJsString + ";" +
      "for (var i=0; i < this." + CASUB_ENTITIES + ".length; i++) {" +
        "if (this." + CASUB_ENTITIES + "[i]." + CASUB_ENTITY_ISPATTERN + " == \"true\") {" +
          "for (var j = 0; j < enId.length; j++) {" +
            "if (enId[j].match(this." + CASUB_ENTITIES + "[i]." + CASUB_ENTITY_ID +
              ") && this." + CASUB_ENTITIES + "[i]." + CASUB_ENTITY_TYPE + " == enType[j]) {" +
              "return true; " +
            "}" +
          "}" +
        "}" +
      "}" +
      "return false; " +
    "}";
  LM_T(LmtMongo, ("JS function: %s", function.c_str()));


  std::string     entPatternQ = CSUB_ENTITIES "." CSUB_ENTITY_ISPATTERN;
  BSONObjBuilder  queryPattern;

  queryPattern.append(entPatternQ, "true");
  queryPattern.append(CASUB_EXPIRATION, BSON("$gt" << (long long) getCurrentTime()));
  queryPattern.appendCode("$where", function);

  auto_ptr<DBClientCursor> cursor;
  BSONObj                  query = BSON("$or" << BSON_ARRAY(queryNoPattern.obj() << queryPattern.obj()));

  TIME_STAT_MONGO_READ_WAIT_START();
  DBClientBase* connection = getMongoConnection();
  if (!collectionQuery(connection, getSubscribeContextAvailabilityCollectionName(tenant), query, &cursor, &err))
  {
    TIME_STAT_MONGO_READ_WAIT_STOP();
    releaseMongoConnection(connection);
    return false;
  }
  TIME_STAT_MONGO_READ_WAIT_STOP();

  /* For each one of the subscriptions found, add it to the map (if not already there) */
  while (moreSafe(cursor))
  {
    BSONObj     sub;
    std::string err;
    if (!nextSafeOrErrorF(cursor, &sub, &err))
    {
      LM_E(("Runtime Error (exception in nextSafe(): %s - query: %s)", err.c_str(), query.toString().c_str()));
      continue;
    }
    BSONElement idField = getFieldF(sub, "_id");

    //
    // BSONElement::eoo returns true if 'not found', i.e. the field "_id" doesn't exist in 'sub'
    //
    // Now, if 'getFieldF(sub, "_id")' is not found, if we continue, calling OID() on it, then we get
    // an exception and the broker crashes.
    //
    if (idField.eoo() == true)
    {
      std::string details = std::string("error retrieving _id field in doc: '") + sub.toString() + "'";
      alarmMgr.dbError(details);
      continue;
    }
    alarmMgr.dbErrorReset();

    std::string subIdStr = idField.OID().toString();

    if (subs.count(subIdStr) == 0)
    {
      ngsiv2::HttpInfo httpInfo;

      httpInfo.url = getStringFieldF(sub, CASUB_REFERENCE);

      LM_T(LmtMongo, ("adding subscription: '%s'", sub.toString().c_str()));

      //
      // FIXME P4: Once ctx availability notification formats get defined for NGSIv2,
      //           the first parameter for TriggeredSubscription will have "normalized" as default value
      //
      TriggeredSubscription* trigs = new TriggeredSubscription(
        sub.hasField(CASUB_FORMAT)? stringToRenderFormat(getStringFieldF(sub, CASUB_FORMAT)) : NGSI_V1_LEGACY,
        httpInfo,
        subToAttributeList(sub));

      subs.insert(std::pair<string, TriggeredSubscription*>(subIdStr, trigs));
    }
  }
  releaseMongoConnection(connection);

  return true;
}