Exemplo n.º 1
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB__CLSSPLIT_INIT, "_rtnSplit::init" )
   INT32 _rtnSplit::init ( INT32 flags, INT64 numToSkip, INT64 numToReturn,
                           const CHAR * pMatcherBuff,
                           const CHAR * pSelectBuff,
                           const CHAR * pOrderByBuff,
                           const CHAR * pHintBuff )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY ( SDB__CLSSPLIT_INIT ) ;
      const CHAR *pCollectionName = NULL ;
      const CHAR *pTargetName     = NULL ;
      const CHAR *pSourceName     = NULL ;

      try
      {
         BSONObj boRequest ( pMatcherBuff ) ;
         BSONElement beName       = boRequest.getField ( CAT_COLLECTION_NAME ) ;
         BSONElement beTarget     = boRequest.getField ( CAT_TARGET_NAME ) ;
         BSONElement beSplitKey   = boRequest.getField ( CAT_SPLITVALUE_NAME ) ;
         BSONElement beSource     = boRequest.getField ( CAT_SOURCE_NAME ) ;
         BSONElement bePercent    = boRequest.getField ( CAT_SPLITPERCENT_NAME ) ;

         PD_CHECK ( !beName.eoo() && beName.type() == String,
                    SDB_INVALIDARG, error, PDERROR,
                    "Invalid collection name: %s", beName.toString().c_str() ) ;
         pCollectionName = beName.valuestr() ;
         PD_CHECK ( ossStrlen ( pCollectionName ) <
                       DMS_COLLECTION_SPACE_NAME_SZ +
                       DMS_COLLECTION_NAME_SZ + 1,
                    SDB_INVALIDARG, error, PDERROR,
                    "Collection name is too long: %s", pCollectionName ) ;
         ossStrncpy ( _szCollection, pCollectionName,
                         DMS_COLLECTION_SPACE_NAME_SZ +
                          DMS_COLLECTION_NAME_SZ + 1 ) ;
         PD_CHECK ( !beTarget.eoo() && beTarget.type() == String,
                    SDB_INVALIDARG, error, PDERROR,
                    "Invalid target group name: %s",
                    beTarget.toString().c_str() ) ;
         pTargetName = beTarget.valuestr() ;
         PD_CHECK ( ossStrlen ( pTargetName ) < OP_MAXNAMELENGTH,
                    SDB_INVALIDARG, error, PDERROR,
                    "target group name is too long: %s",
                    pTargetName ) ;
         ossStrncpy ( _szTargetName, pTargetName, OP_MAXNAMELENGTH ) ;
         PD_CHECK ( !beSource.eoo() && beSource.type() == String,
                    SDB_INVALIDARG, error, PDERROR,
                    "Invalid source group name: %s",
                    beSource.toString().c_str() ) ;
         pSourceName = beSource.valuestr() ;
         PD_CHECK ( ossStrlen ( pSourceName ) < OP_MAXNAMELENGTH,
                    SDB_INVALIDARG, error, PDERROR,
                    "source group name is too long: %s",
                    pSourceName ) ;
         ossStrncpy ( _szSourceName, pSourceName, OP_MAXNAMELENGTH ) ;
         PD_CHECK ( !beSplitKey.eoo() && beSplitKey.type() == Object,
                    SDB_INVALIDARG, error, PDERROR,
                    "Invalid split key: %s",
                    beSplitKey.toString().c_str() ) ;
         _splitKey = beSplitKey.embeddedObject () ;
         _percent = bePercent.numberDouble() ;
      }
      catch ( std::exception &e )
      {
         PD_RC_CHECK ( SDB_SYS, PDERROR,
                       "Exception handled when parsing split request: %s",
                       e.what() ) ;
      }
      PD_TRACE4 ( SDB__CLSSPLIT_INIT,
                  PD_PACK_STRING ( pCollectionName ),
                  PD_PACK_STRING ( pTargetName ),
                  PD_PACK_STRING ( pSourceName ),
                  PD_PACK_STRING ( _splitKey.toString().c_str() ) ) ;

   done:
      PD_TRACE_EXITRC ( SDB__CLSSPLIT_INIT, rc ) ;
      return rc ;
   error:
      goto done ;
   }
Exemplo n.º 2
0
    /* slave: pull some data from the master's oplog
       note: not yet in db mutex at this point.
       @return -1 error
               0 ok, don't sleep
               1 ok, sleep
    */
    int ReplSource::sync_pullOpLog(int& nApplied) {
        int okResultCode = 1;
        string ns = string("local.oplog.$") + sourceName();
        log(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';

        bool tailing = true;
        oplogReader.tailCheck();

        bool initial = syncedTo.isNull();

        if ( !oplogReader.haveCursor() || initial ) {
            if ( initial ) {
                // Important to grab last oplog timestamp before listing databases.
                syncToTailOfRemoteLog();
                BSONObj info;
                bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
                massert( 10389 ,  "Unable to get database list", ok );
                BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
                while( i.moreWithEOO() ) {
                    BSONElement e = i.next();
                    if ( e.eoo() )
                        break;
                    string name = e.embeddedObject().getField( "name" ).valuestr();
                    if ( !e.embeddedObject().getBoolField( "empty" ) ) {
                        if ( name != "local" ) {
                            if ( only.empty() || only == name ) {
                                log( 2 ) << "adding to 'addDbNextPass': "******"$gte", syncedTo.asDate());
            BSONObjBuilder query;
            query.append("ts", q.done());
            if ( !only.empty() ) {
                // note we may here skip a LOT of data table scanning, a lot of work for the master.
                query.appendRegex("ns", string("^") + only); // maybe append "\\." here?
            }
            BSONObj queryObj = query.done();
            // e.g. queryObj = { ts: { $gte: syncedTo } }

            oplogReader.tailingQuery(ns.c_str(), queryObj);
            tailing = false;
        }
        else {
            log(2) << "repl: tailing=true\n";
        }

        if( !oplogReader.haveCursor() ) {
            problem() << "repl: dbclient::query returns null (conn closed?)" << endl;
            oplogReader.resetConnection();
            return -1;
        }

        // show any deferred database creates from a previous pass
        {
            set<string>::iterator i = addDbNextPass.begin();
            if ( i != addDbNextPass.end() ) {
                BSONObjBuilder b;
                b.append("ns", *i + '.');
                b.append("op", "db");
                BSONObj op = b.done();
                sync_pullOpLog_applyOperation(op, false);
            }
        }

        if ( !oplogReader.more() ) {
            if ( tailing ) {
                log(2) << "repl: tailing & no new activity\n";
                if( oplogReader.awaitCapable() )
                    okResultCode = 0; // don't sleep

            }
            else {
                log() << "repl:   " << ns << " oplog is empty\n";
            }
            {
                dblock lk;
                save();
            }
            return okResultCode;
        }

        OpTime nextOpTime;
        {
            BSONObj op = oplogReader.next();
            BSONElement ts = op.getField("ts");
            if ( ts.type() != Date && ts.type() != Timestamp ) {
                string err = op.getStringField("$err");
                if ( !err.empty() ) {
                    // 13051 is "tailable cursor requested on non capped collection"
                    if (op.getIntField("code") == 13051) {
                        problem() << "trying to slave off of a non-master" << '\n';
                        massert( 13344 ,  "trying to slave off of a non-master", false );
                    }
                    else {
                        problem() << "repl: $err reading remote oplog: " + err << '\n';
                        massert( 10390 ,  "got $err reading remote oplog", false );
                    }
                }
                else {
                    problem() << "repl: bad object read from remote oplog: " << op.toString() << '\n';
                    massert( 10391 , "repl: bad object read from remote oplog", false);
                }
            }

            nextOpTime = OpTime( ts.date() );
            log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
            if ( initial ) {
                log(1) << "repl:   initial run\n";
            }
            if( tailing ) {
                if( !( syncedTo < nextOpTime ) ) {
                    log() << "repl ASSERTION failed : syncedTo < nextOpTime" << endl;
                    log() << "repl syncTo:     " << syncedTo.toStringLong() << endl;
                    log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl;
                    assert(false);
                }
                oplogReader.putBack( op ); // op will be processed in the loop below
                nextOpTime = OpTime(); // will reread the op below
            }
            else if ( nextOpTime != syncedTo ) { // didn't get what we queried for - error
                Nullstream& l = log();
                l << "repl:   nextOpTime " << nextOpTime.toStringLong() << ' ';
                if ( nextOpTime < syncedTo )
                    l << "<??";
                else
                    l << ">";

                l << " syncedTo " << syncedTo.toStringLong() << '\n';
                log() << "repl:   time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
                log() << "repl:   tailing: " << tailing << '\n';
                log() << "repl:   data too stale, halting replication" << endl;
                replInfo = replAllDead = "data too stale halted replication";
                assert( syncedTo < nextOpTime );
                throw SyncException();
            }
            else {
                /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
            }
        }

        // apply operations
        {
            int n = 0;
            time_t saveLast = time(0);
            while ( 1 ) {

                bool moreInitialSyncsPending = !addDbNextPass.empty() && n; // we need "&& n" to assure we actually process at least one op to get a sync point recorded in the first place.

                if ( moreInitialSyncsPending || !oplogReader.more() ) {
                    dblock lk;

                    // NOTE aaron 2011-03-29 This block may be unnecessary, but I'm leaving it in place to avoid changing timing behavior.
                    {
                        dbtemprelease t;
                        if ( !moreInitialSyncsPending && oplogReader.more() ) {
                            continue;
                        }
                        // otherwise, break out of loop so we can set to completed or clone more dbs
                    }
                    
                    if( oplogReader.awaitCapable() && tailing )
                        okResultCode = 0; // don't sleep
                    syncedTo = nextOpTime;
                    save(); // note how far we are synced up to now
                    log() << "repl:   applied " << n << " operations" << endl;
                    nApplied = n;
                    log() << "repl:  end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() << endl;
                    break;
                }
                else {
                }

                OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
                    // periodically note our progress, in case we are doing a lot of work and crash
                    dblock lk;
                    syncedTo = nextOpTime;
                    // can't update local log ts since there are pending operations from our peer
                    save();
                    log() << "repl:   checkpoint applied " << n << " operations" << endl;
                    log() << "repl:   syncedTo: " << syncedTo.toStringLong() << endl;
                    saveLast = time(0);
                    n = 0;
                }

                BSONObj op = oplogReader.next();

                unsigned b = replApplyBatchSize;
                bool justOne = b == 1;
                scoped_ptr<writelock> lk( justOne ? 0 : new writelock() );
                while( 1 ) {

                    BSONElement ts = op.getField("ts");
                    if( !( ts.type() == Date || ts.type() == Timestamp ) ) {
                        log() << "sync error: problem querying remote oplog record" << endl;
                        log() << "op: " << op.toString() << endl;
                        log() << "halting replication" << endl;
                        replInfo = replAllDead = "sync error: no ts found querying remote oplog record";
                        throw SyncException();
                    }
                    OpTime last = nextOpTime;
                    nextOpTime = OpTime( ts.date() );
                    if ( !( last < nextOpTime ) ) {
                        log() << "sync error: last applied optime at slave >= nextOpTime from master" << endl;
                        log() << " last:       " << last.toStringLong() << endl;
                        log() << " nextOpTime: " << nextOpTime.toStringLong() << endl;
                        log() << " halting replication" << endl;
                        replInfo = replAllDead = "sync error last >= nextOpTime";
                        uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false);
                    }
                    if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) {
                        assert( justOne );
                        oplogReader.putBack( op );
                        _sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
                        dblock lk;
                        if ( n > 0 ) {
                            syncedTo = last;
                            save();
                        }
                        log() << "repl:   applied " << n << " operations" << endl;
                        log() << "repl:   syncedTo: " << syncedTo.toStringLong() << endl;
                        log() << "waiting until: " << _sleepAdviceTime << " to continue" << endl;
                        return okResultCode;
                    }

                    sync_pullOpLog_applyOperation(op, !justOne);
                    n++;

                    if( --b == 0 )
                        break;
                    // if to here, we are doing mulpile applications in a singel write lock acquisition
                    if( !oplogReader.moreInCurrentBatch() ) {
                        // break if no more in batch so we release lock while reading from the master
                        break;
                    }
                    op = oplogReader.next();

                    getDur().commitIfNeeded();
                }
            }
        }

        return okResultCode;
    }
Exemplo n.º 3
0
    int run() {
        
        if ( hasParam( "repair" ) ){
            warning() << "repair is a work in progress" << endl;
            return repair();
        }

        {
            string q = getParam("query");
            if ( q.size() )
                _query = fromjson( q );
        }

        string opLogName = "";
        unsigned long long opLogStart = 0;
        if (hasParam("oplog")) {
            if (hasParam("query") || hasParam("db") || hasParam("collection")) {
                cout << "oplog mode is only supported on full dumps" << endl;
                return -1;
            }


            BSONObj isMaster;
            conn("true").simpleCommand("admin", &isMaster, "isMaster");

            if (isMaster.hasField("hosts")) { // if connected to replica set member
                opLogName = "local.oplog.rs";
            }
            else {
                opLogName = "local.oplog.$main";
                if ( ! isMaster["ismaster"].trueValue() ) {
                    cout << "oplog mode is only supported on master or replica set member" << endl;
                    return -1;
                }
            }

            auth("local");

            BSONObj op = conn(true).findOne(opLogName, Query().sort("$natural", -1), 0, QueryOption_SlaveOk);
            if (op.isEmpty()) {
                cout << "No operations in oplog. Please ensure you are connecting to a master." << endl;
                return -1;
            }

            assert(op["ts"].type() == Timestamp);
            opLogStart = op["ts"]._numberLong();
        }

        // check if we're outputting to stdout
        string out = getParam("out");
        if ( out == "-" ) {
            if ( _db != "*" && _coll != "*" ) {
                writeCollectionStdout( _db+"."+_coll );
                return 0;
            }
            else {
                cout << "You must specify database and collection to print to stdout" << endl;
                return -1;
            }
        }

        _usingMongos = isMongos();

        path root( out );
        string db = _db;

        if ( db == "*" ) {
            cout << "all dbs" << endl;
            auth( "admin" );

            BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
            if ( ! res["databases"].isABSONObj() ) {
                error() << "output of listDatabases isn't what we expected, no 'databases' field:\n" << res << endl;
                return -2;
            }
            BSONObj dbs = res["databases"].embeddedObjectUserCheck();
            set<string> keys;
            dbs.getFieldNames( keys );
            for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
                string key = *i;
                
                if ( ! dbs[key].isABSONObj() ) {
                    error() << "database field not an object key: " << key << " value: " << dbs[key] << endl;
                    return -3;
                }

                BSONObj dbobj = dbs[key].embeddedObjectUserCheck();

                const char * dbName = dbobj.getField( "name" ).valuestr();
                if ( (string)dbName == "local" )
                    continue;

                go ( dbName , root / dbName );
            }
        }
        else {
            auth( db );
            go( db , root / db );
        }

        if (!opLogName.empty()) {
            BSONObjBuilder b;
            b.appendTimestamp("$gt", opLogStart);

            _query = BSON("ts" << b.obj());

            writeCollectionFile( opLogName , root / "oplog.bson" );
        }

        return 0;
    }
Exemplo n.º 4
0
    bool _userCreateNS(const char *ns, const BSONObj& options, string& err, bool *deferIdIndex) {
        LOG(1) << "create collection " << ns << ' ' << options << endl;

        if ( nsdetails(ns) ) {
            err = "collection already exists";
            return false;
        }

        long long size = Extent::initialSize(128);
        {
            BSONElement e = options.getField("size");
            if ( e.isNumber() ) {
                size = e.numberLong();
                uassert( 10083 , "create collection invalid size spec", size >= 0 );

                size += 0xff;
                size &= 0xffffffffffffff00LL;
                if ( size < Extent::minSize() )
                    size = Extent::minSize();
            }
        }

        bool newCapped = false;
        long long mx = 0;
        if( options["capped"].trueValue() ) {
            newCapped = true;
            BSONElement e = options.getField("max");
            if ( e.isNumber() ) {
                mx = e.numberLong();
                uassert( 16495,
                         "max in a capped collection has to be < 2^31 or not set",
                         NamespaceDetails::validMaxCappedDocs(&mx) );
            }
        }


        cc().database()->createCollection( ns, options["capped"].trueValue(), &options );

        Collection* collection = cc().database()->getCollection( ns );
        verify( collection );

        // $nExtents just for debug/testing.
        BSONElement e = options.getField( "$nExtents" );

        if ( e.type() == Array ) {
            // We create one extent per array entry, with size specified
            // by the array value.
            BSONObjIterator i( e.embeddedObject() );
            while( i.more() ) {
                BSONElement e = i.next();
                int size = int( e.number() );
                verify( size <= 0x7fffffff );
                // $nExtents is just for testing - always allocate new extents
                // rather than reuse existing extents so we have some predictibility
                // in the extent size used by our tests
                collection->increaseStorageSize( (int)size, false );
            }
        }
        else if ( int( e.number() ) > 0 ) {
            // We create '$nExtents' extents, each of size 'size'.
            int nExtents = int( e.number() );
            verify( size <= 0x7fffffff );
            for ( int i = 0; i < nExtents; ++i ) {
                verify( size <= 0x7fffffff );
                // $nExtents is just for testing - always allocate new extents
                // rather than reuse existing extents so we have some predictibility
                // in the extent size used by our tests
                collection->increaseStorageSize( (int)size, false );
            }
        }
        else {
            // This is the non test case, where we don't have a $nExtents spec.
            while ( size > 0 ) {
                const int max = Extent::maxSize();
                const int min = Extent::minSize();
                int desiredExtentSize = static_cast<int> (size > max ? max : size);
                desiredExtentSize = static_cast<int> (desiredExtentSize < min ? min : desiredExtentSize);

                desiredExtentSize &= 0xffffff00;
                Extent* e = collection->increaseStorageSize( (int)desiredExtentSize, true );
                size -= e->length;
            }
        }

        NamespaceDetails *d = nsdetails(ns);
        verify(d);

        bool ensure = true;

        // respect autoIndexId if set. otherwise, create an _id index for all colls, except for
        // capped ones in local w/o autoIndexID (reason for the exception is for the oplog and
        //  non-replicated capped colls)
        if( options.hasField( "autoIndexId" ) ||
            (newCapped && nsToDatabase( ns ) == "local" ) ) {
            ensure = options.getField( "autoIndexId" ).trueValue();
        }

        if( ensure ) {
            if( deferIdIndex )
                *deferIdIndex = true;
            else
                ensureIdIndexForNewNs( ns );
        }

        if ( mx > 0 )
            d->setMaxCappedDocs( mx );

        if ( options["flags"].numberInt() ) {
            d->replaceUserFlags( options["flags"].numberInt() );
        }

        return true;
    }
Exemplo n.º 5
0
Arquivo: dump.cpp Projeto: vrtx/mongo
    int run() {
        if (mongoDumpGlobalParams.repair) {
            return repair();
        }

        {
            if (mongoDumpGlobalParams.query.size()) {
                _query = fromjson(mongoDumpGlobalParams.query);
            }
        }

        if (mongoDumpGlobalParams.dumpUsersAndRoles) {
            uassertStatusOK(auth::getRemoteStoredAuthorizationVersion(&conn(true),
                            &_serverAuthzVersion));
            uassert(17369,
                    mongoutils::str::stream() << "Backing up users and roles is only supported for "
                    "clusters with auth schema versions 1 or 3, found: " <<
                    _serverAuthzVersion,
                    _serverAuthzVersion == AuthorizationManager::schemaVersion24 ||
                    _serverAuthzVersion == AuthorizationManager::schemaVersion26Final);
        }

        string opLogName = "";
        unsigned long long opLogStart = 0;
        if (mongoDumpGlobalParams.useOplog) {

            BSONObj isMaster;
            conn("true").simpleCommand("admin", &isMaster, "isMaster");

            if (isMaster.hasField("hosts")) { // if connected to replica set member
                opLogName = "local.oplog.rs";
            }
            else {
                opLogName = "local.oplog.$main";
                if ( ! isMaster["ismaster"].trueValue() ) {
                    toolError() << "oplog mode is only supported on master or replica set member"
                                << std::endl;
                    return -1;
                }
            }

            BSONObj op = conn(true).findOne(opLogName, Query().sort("$natural", -1), 0, QueryOption_SlaveOk);
            if (op.isEmpty()) {
                toolError() << "No operations in oplog. Please ensure you are connecting to a "
                            << "master." << std::endl;
                return -1;
            }

            verify(op["ts"].type() == Timestamp);
            opLogStart = op["ts"]._numberLong();
        }

        // check if we're outputting to stdout
        if (mongoDumpGlobalParams.outputDirectory == "-") {
            if (toolGlobalParams.db != "" && toolGlobalParams.coll != "") {
                writeCollectionStdout(toolGlobalParams.db + "." + toolGlobalParams.coll);
                return 0;
            }
            else {
                toolError() << "You must specify database and collection to print to stdout"
                            << std::endl;
                return -1;
            }
        }

        _usingMongos = isMongos();

        boost::filesystem::path root(mongoDumpGlobalParams.outputDirectory);

        if (toolGlobalParams.db == "") {
            if (toolGlobalParams.coll != "") {
                toolError() << "--db must be specified with --collection" << std::endl;
                return -1;
            }

            toolInfoLog() << "all dbs" << std::endl;

            BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
            if ( ! res["databases"].isABSONObj() ) {
                toolError() << "output of listDatabases isn't what we expected, no 'databases' "
                            << "field:\n" << res << std::endl;
                return -2;
            }
            BSONObj dbs = res["databases"].embeddedObjectUserCheck();
            set<string> keys;
            dbs.getFieldNames( keys );
            for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
                string key = *i;

                if ( ! dbs[key].isABSONObj() ) {
                    toolError() << "database field not an document key: " << key << " value: "
                                << dbs[key] << std::endl;
                    return -3;
                }

                BSONObj dbobj = dbs[key].embeddedObjectUserCheck();

                const char * dbName = dbobj.getField( "name" ).valuestr();
                if ( (string)dbName == "local" )
                    continue;

                boost::filesystem::path outdir = root / dbName;
                toolInfoLog() << "DATABASE: " << dbName << "\t to \t" << outdir.string()
                              << std::endl;
                go ( dbName , "", _query, outdir, "" );
            }
        }
        else {
            boost::filesystem::path outdir = root / toolGlobalParams.db;
            toolInfoLog() << "DATABASE: " << toolGlobalParams.db << "\t to \t" << outdir.string()
                          << std::endl;
            go(toolGlobalParams.db, toolGlobalParams.coll, _query, outdir, "");
            if (mongoDumpGlobalParams.dumpUsersAndRoles &&
                    _serverAuthzVersion == AuthorizationManager::schemaVersion26Final &&
                    toolGlobalParams.db != "admin") {
                toolInfoLog() << "Backing up user and role data for the " << toolGlobalParams.db <<
                              " database";
                Query query = Query(BSON("db" << toolGlobalParams.db));
                go("admin", "system.users", query, outdir, "$admin.system.users");
                go("admin", "system.roles", query, outdir, "$admin.system.roles");
            }
        }

        if (!opLogName.empty()) {
            BSONObjBuilder b;
            b.appendTimestamp("$gt", opLogStart);

            _query = BSON("ts" << b.obj());

            writeCollectionFile( opLogName , _query, root / "oplog.bson" );
        }

        return 0;
    }
Exemplo n.º 6
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB_RTNEXPLAIN, "rtnExplain" )
   INT32 rtnExplain( const CHAR *pCollectionName,
                     const BSONObj &selector,
                     const BSONObj &matcher,
                     const BSONObj &orderBy,
                     const BSONObj &hint,
                     SINT32 flags,
                     SINT64 numToSkip,
                     SINT64 numToReturn,
                     pmdEDUCB *cb, SDB_DMSCB *dmsCB,
                     SDB_RTNCB *rtnCB, INT64 &contextID,
                     rtnContextBase **ppContext )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY ( SDB_RTNEXPLAIN ) ;
      SDB_ASSERT ( cb, "educb can't be NULL" ) ;
      SDB_ASSERT ( dmsCB, "dmsCB can't be NULL" ) ;
      SDB_ASSERT ( rtnCB, "rtnCB can't be NULL" ) ;
      BSONObj explainOptions ;
      BSONObj realHint ;
      BSONElement ele = hint.getField( FIELD_NAME_OPTIONS ) ;
      if ( Object == ele.type() )
      {
         explainOptions = ele.embeddedObject() ;
      }

      ele = hint.getField( FIELD_NAME_HINT ) ;
      if ( Object == ele.type() )
      {
         realHint = ele.embeddedObject() ;
      }

      rtnQueryOptions options( matcher, selector,
                               orderBy, realHint,
                               pCollectionName,
                               numToSkip, numToReturn,
                               OSS_BIT_CLEAR( flags, FLG_QUERY_EXPLAIN),
                               FALSE ) ;

      rtnContextExplain *context = NULL ;
      rc = rtnCB->contextNew( RTN_CONTEXT_EXPLAIN,
                              ( rtnContext **)( &context ),
                              contextID, cb ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to create explain context:%d", rc ) ;
         goto error ;
      }

      rc = context->open( options, explainOptions ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to open explain context:%d", rc ) ;
         goto error ;
      }

      if ( ppContext )
      {
         *ppContext = context ;
      }
   done:
      PD_TRACE_EXITRC( SDB_RTNEXPLAIN, rc ) ;
      return rc ;
   error:
      if ( -1 != contextID )
      {
         rtnCB->contextDelete( contextID, cb ) ;
         contextID = -1 ;
      }
      goto done ;
   }
Exemplo n.º 7
0
   INT32 _spdFMP::_extractMsg( BSONObj &msg, BOOLEAN &extracted )
   {
      INT32 rc = SDB_OK ;
      extracted = FALSE ;
      SDB_ASSERT( 0 <= _expect, "impossible" ) ;

      /// magic has already been found.
      if ( sizeof( SPD_MAGIC ) == _expect )
      {
found:
         if ( (_totalRead - _itr) < (INT32)sizeof(INT32) )
         {
            extracted = FALSE ;
            goto done ;
         }
         else
         {
            SINT32 bsonLen = *((SINT32 *)(_readBuf+_itr)) ;
            if ( (_totalRead - _itr) < bsonLen )
            {
               rc = FALSE ;
               goto done ;
            }
            else if ( (_totalRead - _itr) == bsonLen )
            {
               SDB_ASSERT( _itr >= (INT32)sizeof( SPD_MAGIC ) ,
                           "impossible" ) ;
               BSONObj tmp ;

               try
               {
                  tmp = BSONObj( _readBuf + _itr ) ;
               }
               catch ( std::exception &e )
               {
                  PD_LOG( PDERROR, "unexpected err happened:%s", e.what() ) ;
                  rc = SDB_SYS ;
                  goto error ;
               }

               if ( sizeof( SPD_MAGIC ) == _itr )
               {
                  /// only a bson msg.
                  msg = tmp ;
                  extracted = TRUE ;
               }
               else
               {
                  /// not only a bson msg.
                  _readBuf[_itr - 4] = '\0' ;
                  BSONElement retCode = tmp.getField( FMP_RES_CODE ) ;
                  BSONElement errMsg = tmp.getField( FMP_ERR_MSG ) ;

                  /// some code like 'print' may return msg more than a bsonobj.
                  /// we must parse it's return code. if it is ok, we ignore
                  /// print. else we put it into errmsg.
                  if ( !retCode.eoo() && NumberInt != retCode.type() )
                  {
                     rc = SDB_SYS ;
                     PD_LOG( PDERROR,
                             "invalid type of retCode:%d", retCode.type() ) ;
                     goto error ;
                  }
                  else if ( !retCode.eoo() ) 
                  {
                     if ( SDB_OK != retCode.Int() )
                     {
                        if ( !errMsg.eoo() )
                        {
                           msg = tmp ;
                        }
                        else
                        {
                           BSONObjBuilder builder ;
                           builder.append( FMP_ERR_MSG, _readBuf ) ;
                           builder.append( retCode ) ;
                           msg = builder.obj() ;
                        }
                     }
                     else
                     {
                        msg = tmp ;
                     }
                  }
                  else
                  {
                     /// retCode is eoo.
                     msg = tmp ;
                  }

                  extracted = TRUE ;
               }
            }
            else
            {
               SDB_ASSERT( FALSE, "impossible" ) ;
               rc = SDB_SYS ;
               PD_LOG( PDERROR, "left len can not be lager than objsize" ) ;
               goto error ;
            }
         }
      }
      else
      {
         while ( _itr < _totalRead  && (UINT32)_expect < sizeof( SPD_MAGIC ) )
         {
            if ( SPD_MAGIC[_expect] == _readBuf[_itr] )
            {
               ++_itr ;
               ++_expect ;
               if ( sizeof( SPD_MAGIC ) == _expect )
               {
                  goto found ;
               }
            }
            else if ( 0 == _expect )
            {
               ++_itr ;
            }
            else
            {
               _expect = 0 ;
            }
         }
      }

   done:
      return rc ;
   error:
      goto done ;
   }
Exemplo n.º 8
0
    void go( const string db , const boost::filesystem::path outdir ) {
        log() << "DATABASE: " << db << "\t to \t" << outdir.string() << endl;

        boost::filesystem::create_directories( outdir );

        map <string, BSONObj> collectionOptions;
        multimap <string, BSONObj> indexes;
        vector <string> collections;

        // Save indexes for database
        string ins = db + ".system.indexes";
        auto_ptr<DBClientCursor> cursor = conn( true ).query( ins.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
        while ( cursor->more() ) {
            BSONObj obj = cursor->nextSafe();
            const string name = obj.getField( "ns" ).valuestr();
            indexes.insert( pair<string, BSONObj> (name, obj.getOwned()) );
        }

        string sns = db + ".system.namespaces";
        cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
        while ( cursor->more() ) {
            BSONObj obj = cursor->nextSafe();
            const string name = obj.getField( "name" ).valuestr();
            if (obj.hasField("options")) {
                collectionOptions[name] = obj.getField("options").embeddedObject().getOwned();
            }

            // skip namespaces with $ in them only if we don't specify a collection to dump
            if ( _coll == "" && name.find( ".$" ) != string::npos ) {
                log(1) << "\tskipping collection: " << name << endl;
                continue;
            }

            const string filename = name.substr( db.size() + 1 );

            //if a particular collections is specified, and it's not this one, skip it
            if ( _coll != "" && db + "." + _coll != name && _coll != name )
                continue;

            // raise error before writing collection with non-permitted filename chars in the name
            size_t hasBadChars = name.find_first_of("/\0");
            if (hasBadChars != string::npos){
              error() << "Cannot dump "  << name << ". Collection has '/' or null in the collection name." << endl;
              continue;
            }
            
            // Don't dump indexes
            if ( endsWith(name.c_str(), ".system.indexes") ) {
              continue;
            }
            
            if ( _coll != "" && db + "." + _coll != name && _coll != name )
              continue;
            
            collections.push_back(name);
        }
        
        for (vector<string>::iterator it = collections.begin(); it != collections.end(); ++it) {
            string name = *it;
            const string filename = name.substr( db.size() + 1 );
            writeCollectionFile( name , outdir / ( filename + ".bson" ) );
            writeMetadataFile( name, outdir / (filename + ".metadata.json"), collectionOptions, indexes);
        }

    }
Exemplo n.º 9
0
int LocalMemoryGridFile::flush() {
	trace() << " -> LocalMemoryGridFile::flush {file: " << _filename << "}" << endl;
	if (!_dirty) {
		// Since, there are no dirty chunks, this does not need a flush
		info() << "buffers are not dirty.. need not flush {filename: " << _filename << "}" << endl;
		return 0;
	}

	size_t bufferLen = 0;
	boost::shared_array<char> buffer = createFlushBuffer(bufferLen);
	if (!buffer.get() && bufferLen > 0) {
		// Failed to create flush buffer
		return -ENOMEM;
	}

	// Get the existing gridfile from GridFS to get metadata and delete the
	// file from the system
	try {
		ScopedDbConnection dbc(globalFSOptions._connectString);
		GridFS gridFS(dbc.conn(), globalFSOptions._db, globalFSOptions._collPrefix);
		GridFile origGridFile = gridFS.findFile(BSON("filename" << _filename));

		if (!origGridFile.exists()) {
			dbc.done();
			warn() << "Requested file not found for flushing back data {file: " << _filename << "}" << endl;
			return -EBADF;
		}

		//TODO: Make checks for appropriate object correctness
		//i.e. do not update anything that is not a Regular File
		//Check what happens in case of a link

		gridFS.removeFile(_filename);
		trace() << "Removing the current file from GridFS {file: " << _filename << "}" << endl;
		//TODO: Check for remove status if that was successfull or not
		//TODO: Rather have an update along with active / passive flag for the
		//file

		try {
			GridFS gridFS(dbc.conn(), globalFSOptions._db, globalFSOptions._collPrefix);

			// Create an empty file to signify the file creation and open a local file for the same
			trace() << "Adding new file to GridFS {file: " << _filename << "}" << endl;
			BSONObj fileObj = gridFS.storeFile(buffer.get(), bufferLen, _filename);
			if (!fileObj.isValid()) {
				warn() << "Failed to save file object in data flush {file: " << _filename << "}" << std::endl;
				dbc.done();
				return -EBADF;
			}

			// Update the last updated date for the document
			BSONObj metadata = origGridFile.getMetadata();
			BSONElement fileObjId = fileObj.getField("_id");
			dbc->update(globalFSOptions._filesNS, BSON("_id" << fileObjId.OID()), 
					BSON("$set" << BSON(
								"uploadDate" << origGridFile.getUploadDate() 
								<< "metadata.type" << "file"
								<< "metadata.filename" << mgridfs::getPathBasename(_filename)
								<< "metadata.directory" << mgridfs::getPathDirname(_filename)
								<< "metadata.lastUpdated" << jsTime()
								<< "metadata.uid" << metadata["uid"]
								<< "metadata.gid" << metadata["gid"]
								<< "metadata.mode" << metadata["mode"]
							)
						)
					);
	} catch (DBException& e) {
			error() << "Caught exception in saving remote file in flush {code: " << e.getCode() << ", what: " << e.what()
				<< ", exception: " << e.toString() << "}" << endl;
			return -EIO;
		}

		dbc.done();
	} catch (DBException& e) {
		// Something failed in getting the file from GridFS
		error() << "Caught exception in getting remote file for flush {code: " << e.getCode() << ", what: " << e.what()
			<< ", exception: " << e.toString() << "}" << endl;
		return -EIO;
	}

	_dirty = false;
	debug() << "Completed flushing the file content to GridFS {file: " << _filename << "}" << endl;
	return 0;
}
Exemplo n.º 10
0
    bool dbEval(const string& dbName, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
        BSONElement e = cmd.firstElement();
        uassert( 10046 ,  "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );

        const char *code = 0;
        switch ( e.type() ) {
        case String:
        case Code:
            code = e.valuestr();
            break;
        case CodeWScope:
            code = e.codeWScopeCode();
            break;
        default:
            verify(0);
        }
        verify( code );

        if ( ! globalScriptEngine ) {
            errmsg = "db side execution is disabled";
            return false;
        }

        const string userToken = ClientBasic::getCurrent()->getAuthorizationManager()
                                                          ->getAuthenticatedPrincipalNamesToken();
        auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbName, "dbeval" + userToken );
        ScriptingFunction f = s->createFunction(code);
        if ( f == 0 ) {
            errmsg = (string)"compile failed: " + s->getError();
            return false;
        }

        if ( e.type() == CodeWScope )
            s->init( e.codeWScopeScopeDataUnsafe() );
        s->localConnect( dbName.c_str() );

        BSONObj args;
        {
            BSONElement argsElement = cmd.getField("args");
            if ( argsElement.type() == Array ) {
                args = argsElement.embeddedObject();
                if ( edebug ) {
                    out() << "args:" << args.toString() << endl;
                    out() << "code:\n" << code << endl;
                }
            }
        }

        int res;
        {
            Timer t;
            res = s->invoke(f, &args, 0, cmdLine.quota ? 10 * 60 * 1000 : 0 );
            int m = t.millis();
            if ( m > cmdLine.slowMS ) {
                out() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
                if ( m >= 1000 ) log() << code << endl;
                else OCCASIONALLY log() << code << endl;
            }
        }
        if (res || s->isLastRetNativeCode()) {
            result.append("errno", (double) res);
            errmsg = "invoke failed: ";
            if (s->isLastRetNativeCode())
                errmsg += "cannot return native function";
            else
                errmsg += s->getError();
            return false;
        }

        s->append( result , "retval" , "__returnValue" );

        return true;
    }
Exemplo n.º 11
0
    static bool dbEval(OperationContext* txn,
                       const string& dbName,
                       const BSONObj& cmd,
                       BSONObjBuilder& result,
                       string& errmsg) {

        BSONElement e = cmd.firstElement();
        uassert( 10046 ,  "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );

        const char *code = 0;
        switch ( e.type() ) {
        case String:
        case Code:
            code = e.valuestr();
            break;
        case CodeWScope:
            code = e.codeWScopeCode();
            break;
        default:
            verify(0);
        }
        verify( code );

        if ( ! globalScriptEngine ) {
            errmsg = "db side execution is disabled";
            return false;
        }

        scoped_ptr<Scope> s(globalScriptEngine->newScope());

        ScriptingFunction f = s->createFunction(code);
        if ( f == 0 ) {
            errmsg = (string)"compile failed: " + s->getError();
            return false;
        }

        s->localConnectForDbEval(txn, dbName.c_str());

        if ( e.type() == CodeWScope )
            s->init( e.codeWScopeScopeDataUnsafe() );

        BSONObj args;
        {
            BSONElement argsElement = cmd.getField("args");
            if ( argsElement.type() == Array ) {
                args = argsElement.embeddedObject();
                if ( edebug ) {
                    log() << "args:" << args.toString() << endl;
                    log() << "code:\n" << code << endl;
                }
            }
        }

        int res;
        {
            Timer t;
            res = s->invoke(f, &args, 0, storageGlobalParams.quota ? 10 * 60 * 1000 : 0);
            int m = t.millis();
            if (m > serverGlobalParams.slowMS) {
                log() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
                if ( m >= 1000 ) log() << code << endl;
                else OCCASIONALLY log() << code << endl;
            }
        }
        if (res || s->isLastRetNativeCode()) {
            result.append("errno", (double) res);
            errmsg = "invoke failed: ";
            if (s->isLastRetNativeCode())
                errmsg += "cannot return native function";
            else
                errmsg += s->getError();
            return false;
        }

        s->append( result , "retval" , "__returnValue" );

        return true;
    }
Exemplo n.º 12
0
/**
 * This will verify that all updated fields are
 *   1.) Valid for storage (checking parent to make sure things like DBRefs are valid)
 *   2.) Compare updated immutable fields do not change values
 *
 * If updateFields is empty then it was replacement and/or we need to check all fields
 */
inline Status validate(const BSONObj& original,
                       const FieldRefSet& updatedFields,
                       const mb::Document& updated,
                       const std::vector<FieldRef*>* immutableAndSingleValueFields,
                       const ModifierInterface::Options& opts) {
    LOG(3) << "update validate options -- "
           << " updatedFields: " << updatedFields << " immutableAndSingleValueFields.size:"
           << (immutableAndSingleValueFields ? immutableAndSingleValueFields->size() : 0)
           << " validate:" << opts.enforceOkForStorage;

    // 1.) Loop through each updated field and validate for storage
    // and detect immutable field updates

    // The set of possibly changed immutable fields -- we will need to check their vals
    FieldRefSet changedImmutableFields;

    // Check to see if there were no fields specified or if we are not validating
    // The case if a range query, or query that didn't result in saved fields
    if (updatedFields.empty() || !opts.enforceOkForStorage) {
        if (opts.enforceOkForStorage) {
            // No specific fields were updated so the whole doc must be checked
            Status s = storageValid(updated, true);
            if (!s.isOK())
                return s;
        }

        // Check all immutable fields
        if (immutableAndSingleValueFields)
            changedImmutableFields.fillFrom(*immutableAndSingleValueFields);
    } else {
        // TODO: Change impl so we don't need to create a new FieldRefSet
        //       -- move all conflict logic into static function on FieldRefSet?
        FieldRefSet immutableFieldRef;
        if (immutableAndSingleValueFields)
            immutableFieldRef.fillFrom(*immutableAndSingleValueFields);

        FieldRefSet::const_iterator where = updatedFields.begin();
        const FieldRefSet::const_iterator end = updatedFields.end();
        for (; where != end; ++where) {
            const FieldRef& current = **where;

            // Find the updated field in the updated document.
            mutablebson::ConstElement newElem = updated.root();
            size_t currentPart = 0;
            while (newElem.ok() && currentPart < current.numParts())
                newElem = newElem[current.getPart(currentPart++)];

            // newElem might be missing if $unset/$renamed-away
            if (newElem.ok()) {
                // Check element, and its children
                Status s = storageValid(newElem, true);
                if (!s.isOK())
                    return s;

                // Check parents to make sure they are valid as well.
                s = storageValidParents(newElem);
                if (!s.isOK())
                    return s;
            }
            // Check if the updated field conflicts with immutable fields
            immutableFieldRef.findConflicts(&current, &changedImmutableFields);
        }
    }

    const bool checkIdField = (updatedFields.empty() && !original.isEmpty()) ||
        updatedFields.findConflicts(&idFieldRef, NULL);

    // Add _id to fields to check since it too is immutable
    if (checkIdField)
        changedImmutableFields.keepShortest(&idFieldRef);
    else if (changedImmutableFields.empty()) {
        // Return early if nothing changed which is immutable
        return Status::OK();
    }

    LOG(4) << "Changed immutable fields: " << changedImmutableFields;
    // 2.) Now compare values of the changed immutable fields (to make sure they haven't)

    const mutablebson::ConstElement newIdElem = updated.root()[idFieldName];

    FieldRefSet::const_iterator where = changedImmutableFields.begin();
    const FieldRefSet::const_iterator end = changedImmutableFields.end();
    for (; where != end; ++where) {
        const FieldRef& current = **where;

        // Find the updated field in the updated document.
        mutablebson::ConstElement newElem = updated.root();
        size_t currentPart = 0;
        while (newElem.ok() && currentPart < current.numParts())
            newElem = newElem[current.getPart(currentPart++)];

        if (!newElem.ok()) {
            if (original.isEmpty()) {
                // If the _id is missing and not required, then skip this check
                if (!(current.dottedField() == idFieldName))
                    return Status(ErrorCodes::NoSuchKey,
                                  mongoutils::str::stream() << "After applying the update, the new"
                                                            << " document was missing the '"
                                                            << current.dottedField()
                                                            << "' (required and immutable) field.");

            } else {
                if (current.dottedField() != idFieldName)
                    return Status(ErrorCodes::ImmutableField,
                                  mongoutils::str::stream()
                                      << "After applying the update to the document with "
                                      << newIdElem.toString()
                                      << ", the '"
                                      << current.dottedField()
                                      << "' (required and immutable) field was "
                                         "found to have been removed --"
                                      << original);
            }
        } else {
            // Find the potentially affected field in the original document.
            const BSONElement oldElem = dps::extractElementAtPath(original, current.dottedField());
            const BSONElement oldIdElem = original.getField(idFieldName);

            // Ensure no arrays since neither _id nor shard keys can be in an array, or one.
            mb::ConstElement currElem = newElem;
            while (currElem.ok()) {
                if (currElem.getType() == Array) {
                    return Status(
                        ErrorCodes::NotSingleValueField,
                        mongoutils::str::stream()
                            << "After applying the update to the document {"
                            << (oldIdElem.ok() ? oldIdElem.toString() : newIdElem.toString())
                            << " , ...}, the (immutable) field '"
                            << current.dottedField()
                            << "' was found to be an array or array descendant.");
                }
                currElem = currElem.parent();
            }

            // If we have both (old and new), compare them. If we just have new we are good
            if (oldElem.ok() && newElem.compareWithBSONElement(oldElem, nullptr, false) != 0) {
                return Status(ErrorCodes::ImmutableField,
                              mongoutils::str::stream()
                                  << "After applying the update to the document {"
                                  << oldElem.toString()
                                  << " , ...}, the (immutable) field '"
                                  << current.dottedField()
                                  << "' was found to have been altered to "
                                  << newElem.toString());
            }
        }
    }

    return Status::OK();
}
Exemplo n.º 13
0
/* ****************************************************************************
*
* attributeType -
*
*/
static std::string attributeType
(
  const std::string&                    tenant,
  const std::vector<std::string>&       servicePathV,
  const std::string                     entityType,
  const std::string                     attrName
)
{
  std::string  idType         = std::string("_id.")    + ENT_ENTITY_TYPE;
  std::string  idServicePath  = std::string("_id.")    + ENT_SERVICE_PATH;
  std::string  attributeName  = std::string(ENT_ATTRS) + "." + attrName;

  BSONObj query = BSON(idType        << entityType <<
                       idServicePath << fillQueryServicePath(servicePathV) <<
                       attributeName << BSON("$exists" << true));

  std::auto_ptr<DBClientCursor> cursor;
  DBClientBase*                 connection = NULL;

  LM_T(LmtMongo, ("query() in '%s' collection: '%s'",
                  getEntitiesCollectionName(tenant).c_str(),
                  query.toString().c_str()));

  try
  {
    connection = getMongoConnection();
    cursor     = connection->query(getEntitiesCollectionName(tenant).c_str(), query);

    /*
     * We have observed that in some cases of DB errors (e.g. the database daemon is down) instead of
     * raising an exception, the query() method sets the cursor to NULL. In this case, we raise the
     * exception ourselves
     */
    if (cursor.get() == NULL)
    {
      throw DBException("Null cursor from mongo (details on this is found in the source code)", 0);
    }
    releaseMongoConnection(connection);

    LM_I(("Database Operation Successful (%s)", query.toString().c_str()));
  }
  catch (const DBException &e)
  {
    releaseMongoConnection(connection);
    LM_E(("Database Error ('%s', '%s')", query.toString().c_str(), e.what()));
    return "";
  }
  catch (...)
  {
    releaseMongoConnection(connection);
    LM_E(("Database Error ('%s', '%s')", query.toString().c_str(), "generic exception"));
    return "";
  }

  while (cursor->more())
  {
    BSONObj r = cursor->next();

    LM_T(LmtMongo, ("retrieved document: '%s'", r.toString().c_str()));

    /* It could happen that different entities within the same entity type may have attributes with the same name
     * but different types. In that case, one type (at random) is returned. A list could be returned but the
     * NGSIv2 operations only allow to set one type */
    return r.getField(ENT_ATTRS).embeddedObject().getField(attrName).embeddedObject().getStringField(ENT_ATTRS_TYPE);
  }

  return "";
}
Exemplo n.º 14
0
/* ****************************************************************************
*
* mongoEntityTypes -
*/
HttpStatusCode mongoEntityTypes
(
  EntityTypesResponse*                  responseP,
  const std::string&                    tenant,
  const std::vector<std::string>&       servicePathV,
  std::map<std::string, std::string>&   uriParams
)
{
  unsigned int   offset         = atoi(uriParams[URI_PARAM_PAGINATION_OFFSET].c_str());
  unsigned int   limit          = atoi(uriParams[URI_PARAM_PAGINATION_LIMIT].c_str());
  std::string    detailsString  = uriParams[URI_PARAM_PAGINATION_DETAILS];
  bool           details        = (strcasecmp("on", detailsString.c_str()) == 0)? true : false;
  DBClientBase*  connection     = NULL;
  bool           reqSemTaken    = false;

  LM_T(LmtMongo, ("Query Entity Types"));
  LM_T(LmtPagination, ("Offset: %d, Limit: %d, Details: %s", offset, limit, (details == true)? "true" : "false"));

  reqSemTake(__FUNCTION__, "query types request", SemReadOp, &reqSemTaken);

  /* Compose query based on this aggregation command:  
   *
   * db.runCommand({aggregate: "entities",
   *                pipeline: [ {$match: { "_id.servicePath": /.../ } },
   *                            {$project: {_id: 1, "attrNames": 1} },
   *                            {$project: { "attrNames"
   *                                  {$cond: [ {$eq: [ "$attrNames", [ ] ] }, [null], "$attrNames"] }
   *                               }
   *                            },
   *                            {$unwind: "$attrNames"},
   *                            {$group: {_id: "$_id.type", attrs: {$addToSet: "$attrNames"}} },
   *                            {$sort: {_id: 1} }
   *                          ]
   *                })   
   *
   * The $cond part is hard... more information at http://stackoverflow.com/questions/27510143/empty-array-prevents-document-to-appear-in-query
   * As a consequence, some "null" values may appear in the resulting attrs vector, which are pruned by the result processing logic.
   *
   * FIXME P6: in the future, we can interpret the collapse parameter at this layer. If collapse=true so we don't need attributes, the
   * following command can be used:
   *
   * db.runCommand({aggregate: "entities", pipeline: [ {$group: {_id: "$_id.type"} }]})
   *
   *
   *
   */

  BSONObj result;

  //
  // Building the projection part of the query that includes types that have no attributes
  // See bug: https://github.com/telefonicaid/fiware-orion/issues/686
  //
  BSONArrayBuilder  emptyArrayBuilder;
  BSONArrayBuilder  nulledArrayBuilder;

  nulledArrayBuilder.appendNull();

  // We are using the $cond: [ .. ] and not the $cond: { .. } one, as the former is the only one valid in MongoDB 2.4
  BSONObj projection = BSON(
    "$project" << BSON(
      ENT_ATTRNAMES << BSON(
        "$cond" << BSON_ARRAY(
          BSON("$eq" << BSON_ARRAY(S_ATTRNAMES << emptyArrayBuilder.arr()) ) <<
          nulledArrayBuilder.arr() <<
          S_ATTRNAMES
        )
      )
    )
  );

  BSONObj cmd = BSON("aggregate" << COL_ENTITIES <<
                     "pipeline" << BSON_ARRAY(
                                              BSON("$match" << BSON(C_ID_SERVICEPATH << fillQueryServicePath(servicePathV))) <<
                                              BSON("$project" << BSON("_id" << 1 << ENT_ATTRNAMES << 1)) <<
                                              projection <<
                                              BSON("$unwind" << S_ATTRNAMES) <<
                                              BSON("$group" << BSON("_id" << CS_ID_ENTITY << "attrs" << BSON("$addToSet" << S_ATTRNAMES))) <<
                                              BSON("$sort" << BSON("_id" << 1))
                                             )
                     );

  LM_T(LmtMongo, ("runCommand() in '%s' database: '%s'", composeDatabaseName(tenant).c_str(), cmd.toString().c_str()));

  try
  {
    connection = getMongoConnection();
    connection->runCommand(composeDatabaseName(tenant).c_str(), cmd, result);
    releaseMongoConnection(connection);

    LM_I(("Database Operation Successful (%s)", cmd.toString().c_str()));
  }
  catch (const DBException& e)
  {
    releaseMongoConnection(connection);

    std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
      " - command: " + cmd.toString() +
      " - exception: " + e.what();

    LM_E(("Database Error (%s)", err.c_str()));
    responseP->statusCode.fill(SccReceiverInternalError, err);
    reqSemGive(__FUNCTION__, "query types request", reqSemTaken);

    return SccOk;
  }
  catch (...)
  {
    releaseMongoConnection(connection);

    std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
      " - command: " + cmd.toString() +
      " - exception: " + "generic";

    LM_E(("Database Error (%s)", err.c_str()));
    responseP->statusCode.fill(SccReceiverInternalError, err);
    reqSemGive(__FUNCTION__, "query types request", reqSemTaken);

    return SccOk;
  }

  // Processing result to build response
  LM_T(LmtMongo, ("aggregation result: %s", result.toString().c_str()));

  std::vector<BSONElement> resultsArray = result.getField("result").Array();

  if (resultsArray.size() == 0)
  {
    responseP->statusCode.fill(SccContextElementNotFound);
    reqSemGive(__FUNCTION__, "query types request", reqSemTaken);

    return SccOk;
  }

  /* Another strategy to implement pagination is to use the $skip and $limit operators in the
   * aggregation framework. However, doing so, we don't know the total number of results, which can
   * be needed in the case of details=on (using that approach, we need to do two queries: one to get
   * the count and other to get the actual results with $skip and $limit, in the same "transaction" to
   * avoid incoherence between both if some entity type is created or deleted in the process).
   *
   * However, considering that the number of types will be small compared with the number of entities,
   * the current approach seems to be ok
   */
  for (unsigned int ix = offset; ix < MIN(resultsArray.size(), offset + limit); ++ix)
  {
    BSONObj                   resultItem  = resultsArray[ix].embeddedObject();
    TypeEntity*               entityType  = new TypeEntity(resultItem.getStringField("_id"));
    std::vector<BSONElement>  attrsArray  = resultItem.getField("attrs").Array();

    entityType->count = countEntities(tenant, servicePathV, entityType->type);

    if (!attrsArray[0].isNull())
    {
      for (unsigned int jx = 0; jx < attrsArray.size(); ++jx)
      {
        /* This is where NULL elements in the resulting attrs vector are pruned */
        if (attrsArray[jx].isNull())
        {
          continue;
        }

        /* Note that we need and extra query() to the database (inside attributeType() function) to get each attribute type.
         * This could be unefficient, specially if the number of attributes is large */
        std::string attrType = attributeType(tenant, servicePathV, entityType->type , attrsArray[jx].str());

        ContextAttribute* ca = new ContextAttribute(attrsArray[jx].str(), attrType, "");
        entityType->contextAttributeVector.push_back(ca);
      }
    }

    responseP->typeEntityVector.push_back(entityType);
  }

  char detailsMsg[256];
  if (responseP->typeEntityVector.size() > 0)
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Count: %d", (int) resultsArray.size());
      responseP->statusCode.fill(SccOk, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccOk);
    }
  }
  else
  {
    if (details)
    {      
      snprintf(detailsMsg, sizeof(detailsMsg), "Number of types: %d. Offset is %d", (int) resultsArray.size(), offset);
      responseP->statusCode.fill(SccContextElementNotFound, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccContextElementNotFound);
    }
  }

  reqSemGive(__FUNCTION__, "query types request", reqSemTaken);

  return SccOk;

}
/* ****************************************************************************
*
* createEntity -
*/
TEST(mongoNotifyContextRequest, createEntity)
{
    HttpStatusCode         ms;
    NotifyContextRequest   req;
    NotifyContextResponse  res;

    /* Prepare database */
    prepareDatabase();

    /* Forge the request */
    ContextElementResponse cer;
    req.subscriptionId.set("51307b66f481db11bf860001");
    req.originator.set("localhost");
    cer.contextElement.entityId.fill("E10", "T10", "false");
    ContextAttribute ca("A1", "TA1", "new_val");
    cer.contextElement.contextAttributeVector.push_back(&ca);
    cer.statusCode.fill(SccOk);
    req.contextElementResponseVector.push_back(&cer);

    /* Prepare mock */
    TimerMock* timerMock = new TimerMock();
    ON_CALL(*timerMock, getCurrentTime())
            .WillByDefault(Return(1360232700));
    setTimer(timerMock);

    /* Invoke the function in mongoBackend library */
    ms = mongoNotifyContext(&req, &res);

    /* Check response is as expected */
    EXPECT_EQ(SccOk, ms);

    EXPECT_EQ(SccOk, res.responseCode.code);
    EXPECT_EQ("OK", res.responseCode.reasonPhrase);
    EXPECT_EQ(0, res.responseCode.details.size());

    /* Check that every involved collection at MongoDB is as expected */
    /* Note we are using EXPECT_STREQ() for some cases, as Mongo Driver returns const char*, not string
     * objects (see http://code.google.com/p/googletest/wiki/Primer#String_Comparison) */

    DBClientBase* connection = getMongoConnection();

    /* entities collection */
    BSONObj ent;
    std::vector<BSONElement> attrs;
    ASSERT_EQ(6, connection->count(ENTITIES_COLL, BSONObj()));

    ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E1" << "_id.type" << "T1"));
    EXPECT_STREQ("E1", C_STR_FIELD(ent.getObjectField("_id"), "id"));
    EXPECT_STREQ("T1", C_STR_FIELD(ent.getObjectField("_id"), "type"));
    EXPECT_FALSE(ent.hasField("modDate"));
    attrs = ent.getField("attrs").Array();
    ASSERT_EQ(2, attrs.size());
    BSONObj a1 = getAttr(attrs, "A1", "TA1");
    BSONObj a2 = getAttr(attrs, "A2", "TA2");
    EXPECT_STREQ("A1", C_STR_FIELD(a1, "name"));
    EXPECT_STREQ("TA1",C_STR_FIELD(a1, "type"));
    EXPECT_STREQ("val1", C_STR_FIELD(a1, "value"));
    EXPECT_FALSE(a1.hasField("modDate"));
    EXPECT_STREQ("A2", C_STR_FIELD(a2, "name"));
    EXPECT_STREQ("TA2", C_STR_FIELD(a2, "type"));
    EXPECT_FALSE(a2.hasField("value"));
    EXPECT_FALSE(a2.hasField("modDate"));

    ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E2" << "_id.type" << "T2"));
    EXPECT_STREQ("E2", C_STR_FIELD(ent.getObjectField("_id"), "id"));
    EXPECT_STREQ("T2", C_STR_FIELD(ent.getObjectField("_id"), "type"));
    EXPECT_FALSE(ent.hasField("modDate"));
    attrs = ent.getField("attrs").Array();
    ASSERT_EQ(2, attrs.size());
    BSONObj a3 = getAttr(attrs, "A3", "TA3");
    BSONObj a4 = getAttr(attrs, "A4", "TA4");
    EXPECT_STREQ("A3", C_STR_FIELD(a3, "name"));
    EXPECT_STREQ("TA3", C_STR_FIELD(a3, "type"));
    EXPECT_STREQ("val3", C_STR_FIELD(a3, "value"));
    EXPECT_FALSE(a3.hasField("modDate"));
    EXPECT_STREQ("A4", C_STR_FIELD(a4, "name"));
    EXPECT_STREQ("TA4", C_STR_FIELD(a4, "type"));
    EXPECT_FALSE(a4.hasField("value"));
    EXPECT_FALSE(a4.hasField("modDate"));

    ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E3" << "_id.type" << "T3"));
    EXPECT_STREQ("E3", C_STR_FIELD(ent.getObjectField("_id"), "id"));
    EXPECT_STREQ("T3", C_STR_FIELD(ent.getObjectField("_id"), "type"));
    EXPECT_FALSE(ent.hasField("modDate"));
    attrs = ent.getField("attrs").Array();
    ASSERT_EQ(2, attrs.size());
    BSONObj a5 = getAttr(attrs, "A5", "TA5");
    BSONObj a6 = getAttr(attrs, "A6", "TA6");
    EXPECT_STREQ("A5", C_STR_FIELD(a5, "name"));
    EXPECT_STREQ("TA5", C_STR_FIELD(a5, "type"));
    EXPECT_STREQ("val5", C_STR_FIELD(a5, "value"));
    EXPECT_FALSE(a5.hasField("modDate"));
    EXPECT_STREQ("A6", C_STR_FIELD(a6, "name"));
    EXPECT_STREQ("TA6", C_STR_FIELD(a6, "type"));
    EXPECT_FALSE(a6.hasField("value"));
    EXPECT_FALSE(a6.hasField("modDate"));

    ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E1" << "_id.type" << "T1bis"));
    EXPECT_STREQ("E1", C_STR_FIELD(ent.getObjectField("_id"), "id"));
    EXPECT_STREQ("T1bis", C_STR_FIELD(ent.getObjectField("_id"), "type"));
    EXPECT_FALSE(ent.hasField("modDate"));
    attrs = ent.getField("attrs").Array();
    ASSERT_EQ(1, attrs.size());
    a1 = getAttr(attrs, "A1", "TA1");
    EXPECT_STREQ("A1", C_STR_FIELD(a1, "name"));
    EXPECT_STREQ("TA1",C_STR_FIELD(a1, "type"));
    EXPECT_STREQ("val1bis2", C_STR_FIELD(a1, "value"));
    EXPECT_FALSE(a1.hasField("modDate"));

    ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E10" << "_id.type" << "T10"));
    EXPECT_STREQ("E10", C_STR_FIELD(ent.getObjectField("_id"), "id"));
    EXPECT_STREQ("T10", C_STR_FIELD(ent.getObjectField("_id"), "type"));
    EXPECT_TRUE(ent.hasField("creDate"));
    EXPECT_TRUE(ent.hasField("modDate"));
    attrs = ent.getField("attrs").Array();
    ASSERT_EQ(1, attrs.size());
    a1 = getAttr(attrs, "A1", "TA1");
    EXPECT_STREQ("A1", C_STR_FIELD(a1, "name"));
    EXPECT_STREQ("TA1",C_STR_FIELD(a1, "type"));
    EXPECT_STREQ("new_val", C_STR_FIELD(a1, "value"));
    EXPECT_TRUE(a1.hasField("creDate"));
    EXPECT_TRUE(a1.hasField("modDate"));

    /* Note "_id.type: {$exists: false}" is a way for querying for entities without type */
    ent = connection->findOne(ENTITIES_COLL, BSON("_id.id" << "E1" << "_id.type" << BSON("$exists" << false)));
    EXPECT_STREQ("E1", C_STR_FIELD(ent.getObjectField("_id"), "id"));
    EXPECT_FALSE(ent.getObjectField("_id").hasField("type"));
    EXPECT_FALSE(ent.hasField("modDate"));
    attrs = ent.getField("attrs").Array();
    ASSERT_EQ(2, attrs.size());
    a1 = getAttr(attrs, "A1", "TA1");
    a2 = getAttr(attrs, "A2", "TA2");
    EXPECT_STREQ("A1", C_STR_FIELD(a1, "name"));
    EXPECT_STREQ("TA1",C_STR_FIELD(a1, "type"));
    EXPECT_STREQ("val1-nt", C_STR_FIELD(a1, "value"));
    EXPECT_FALSE(a1.hasField("modDate"));
    EXPECT_STREQ("A2", C_STR_FIELD(a2, "name"));
    EXPECT_STREQ("TA2", C_STR_FIELD(a2, "type"));
    EXPECT_FALSE(a2.hasField("value"));
    EXPECT_FALSE(a2.hasField("modDate"));

    /* Release connection */
    mongoDisconnect();

    /* Release mock */
    delete timerMock;

}
Exemplo n.º 16
0
/* ****************************************************************************
*
* SubscriptionCache::insert - 
*/
void SubscriptionCache::insert(const std::string& tenant, BSONObj bobj)
{
  BSONElement  idField = bobj.getField("_id");

  if (idField.eoo() == true)
  {
    LM_E(("Database Error (error retrieving _id field in doc: '%s')", bobj.toString().c_str()));
    return;
  }


  //
  // 01. Extract values from database object 'bobj'
  //
  std::string               subId             = idField.OID().toString();
  int64_t                   expiration        = bobj.hasField(CSUB_EXPIRATION)? bobj.getField(CSUB_EXPIRATION).Long() : 0;
  std::string               reference         = bobj.getField(CSUB_REFERENCE).String();
  int64_t                   throttling        = bobj.hasField(CSUB_THROTTLING)? bobj.getField(CSUB_THROTTLING).Long() : -1;
  std::vector<BSONElement>  eVec              = bobj.getField(CSUB_ENTITIES).Array();
  std::vector<BSONElement>  attrVec           = bobj.getField(CSUB_ATTRS).Array();
  std::vector<BSONElement>  condVec           = bobj.getField(CSUB_CONDITIONS).Array();
  std::string               formatString      = bobj.hasField(CSUB_FORMAT)? bobj.getField(CSUB_FORMAT).String() : "XML";
  std::string               servicePath       = bobj.hasField(CSUB_SERVICE_PATH)? bobj.getField(CSUB_SERVICE_PATH).String() : "/";
  Format                    format            = stringToFormat(formatString);
  int                       lastNotification  = bobj.hasField(CSUB_LASTNOTIFICATION)? bobj.getField(CSUB_LASTNOTIFICATION).Int() : 0;
  std::vector<EntityInfo*>  eiV;
  std::vector<std::string>  attrV;
  Restriction               restriction;
  NotifyConditionVector     notifyConditionVector;


  //
  // 02. Push Entity-data names to EntityInfo Vector (eiV)
  //
  for (unsigned int ix = 0; ix < eVec.size(); ++ix)
  {
    BSONObj entity = eVec[ix].embeddedObject();

    if (!entity.hasField(CSUB_ENTITY_ID))
    {
      LM_W(("Runtime Error (got a subscription without id)"));
      continue;
    }

    std::string id = entity.getStringField(ENT_ENTITY_ID);
    
    if (!entity.hasField(CSUB_ENTITY_ISPATTERN))
    {
      continue;
    }

    std::string isPattern = entity.getStringField(CSUB_ENTITY_ISPATTERN);
    if (isPattern != "true")
    {
      continue;
    }

    std::string  type = "";
    if (entity.hasField(CSUB_ENTITY_TYPE))
    {
      type = entity.getStringField(CSUB_ENTITY_TYPE);
    }

    EntityInfo* eiP = new EntityInfo(id, type);
    eiV.push_back(eiP);
  }

  if (eiV.size() == 0)
  {
    return;
  }


  //
  // 03. Push attribute names to Attribute Vector (attrV)
  //
  for (unsigned int ix = 0; ix < attrVec.size(); ++ix)
  {
    std::string attributeName = attrVec[ix].String();

    attrV.push_back(attributeName);
  }


  //
  // 04. FIXME P4: Restriction not implemented
  //
  
  

  //
  // 05. Fill in notifyConditionVector from condVec
  //
  for (unsigned int ix = 0; ix < condVec.size(); ++ix)
  {
    BSONObj                   condition = condVec[ix].embeddedObject();
    std::string               condType;
    std::vector<BSONElement>  valueVec;

    condType = condition.getStringField(CSUB_CONDITIONS_TYPE);
    if (condType != "ONCHANGE")
    {
      continue;
    }

    NotifyCondition* ncP = new NotifyCondition();
    ncP->type = condType;

    valueVec = condition.getField(CSUB_CONDITIONS_VALUE).Array();
    for (unsigned int vIx = 0; vIx < valueVec.size(); ++vIx)
    {
      std::string condValue;

      condValue = valueVec[vIx].String();
      ncP->condValueList.push_back(condValue);
    }

    notifyConditionVector.push_back(ncP);
  }

  if (notifyConditionVector.size() == 0)
  {
    for (unsigned int ix = 0; ix < eiV.size(); ++ix)
    {
      delete(eiV[ix]);
    }
    eiV.clear();

    restriction.release();

    return;
  }


  //
  // 06. Create Subscription and add it to the subscription-cache
  //
  Subscription* subP = new Subscription(tenant,
                                        servicePath,
                                        subId,
                                        eiV,
                                        attrV,
                                        throttling,
                                        expiration,
                                        restriction,
                                        notifyConditionVector,
                                        reference,
                                        lastNotification,
                                        format);
  
  subCache->insert(subP);

  notifyConditionVector.release();  // Subscription constructor makes a copy
}
Exemplo n.º 17
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB_RTNQUERY, "rtnQuery" )
   INT32 rtnQuery ( const CHAR *pCollectionName,
                    const BSONObj &selector,
                    const BSONObj &matcher,
                    const BSONObj &orderBy,
                    const BSONObj &hint,
                    SINT32 flags,
                    pmdEDUCB *cb,
                    SINT64 numToSkip,
                    SINT64 numToReturn,
                    SDB_DMSCB *dmsCB,
                    SDB_RTNCB *rtnCB,
                    SINT64 &contextID,
                    rtnContextBase **ppContext,
                    BOOLEAN enablePrefetch )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY ( SDB_RTNQUERY ) ;
      dmsStorageUnitID suID = DMS_INVALID_CS ;
      contextID             = -1 ;

      SDB_ASSERT ( pCollectionName, "collection name can't be NULL" ) ;
      SDB_ASSERT ( cb, "educb can't be NULL" ) ;
      SDB_ASSERT ( dmsCB, "dmsCB can't be NULL" ) ;
      SDB_ASSERT ( rtnCB, "rtnCB can't be NULL" ) ;

      dmsStorageUnit *su = NULL ;
      dmsMBContext *mbContext = NULL ;
      rtnContextData *dataContext = NULL ;
      const CHAR *pCollectionShortName = NULL ;
      rtnAccessPlanManager *apm = NULL ;
      optAccessPlan *plan = NULL ;

      BSONObj hintTmp = hint ;
      BSONObj blockObj ;
      BSONObj *pBlockObj = NULL ;
      const CHAR *indexName = NULL ;
      const CHAR *scanType  = NULL ;
      INT32 indexLID = DMS_INVALID_EXTENT ;
      INT32 direction = 0 ;

      if ( FLG_QUERY_EXPLAIN & flags )
      {
         rc = rtnExplain( pCollectionName,
                          selector,
                          matcher,
                          orderBy,
                          hint,
                          flags, numToSkip,
                          numToReturn,
                          cb, dmsCB, rtnCB,
                          contextID,
                          ppContext ) ;
         if ( SDB_OK != rc )
         {
            PD_LOG( PDERROR, "failed to explain query:%d", rc ) ;
            goto error ;
         }
         else
         {
            goto done ;
         }
      }

      rc = rtnResolveCollectionNameAndLock ( pCollectionName, dmsCB, &su,
                                             &pCollectionShortName, suID ) ;
      PD_RC_CHECK( rc, PDERROR, "Failed to resolve collection name %s",
                   pCollectionName ) ;

      rc = su->data()->getMBContext( &mbContext, pCollectionShortName, -1 ) ;
      PD_RC_CHECK( rc, PDERROR, "Failed to get dms mb context, rc: %d", rc ) ;

      rc = rtnCB->contextNew ( ( flags & FLG_QUERY_PARALLED ) ?
                               RTN_CONTEXT_PARADATA : RTN_CONTEXT_DATA,
                               (rtnContext**)&dataContext,
                               contextID, cb ) ;
      PD_RC_CHECK( rc, PDERROR, "Failed to create new data context" ) ;

      if ( Object == hint.getField( FIELD_NAME_META ).type() )
      {
         BSONObjBuilder build ;
         rc = _rtnParseQueryMeta( hint.getField( FIELD_NAME_META ).embeddedObject(),
                                  scanType, indexName, indexLID, direction,
                                  blockObj ) ;
         PD_RC_CHECK( rc, PDERROR, "Failed to parase query meta[%s], rc: %d",
                      hint.toString().c_str(), rc ) ;

         pBlockObj = &blockObj ;

         if ( indexName )
         {
            build.append( "", indexName ) ;
         }
         else
         {
            build.appendNull( "" ) ;
         }
         hintTmp = build.obj () ;
      }

      apm = su->getAPM() ;
      SDB_ASSERT ( apm, "apm shouldn't be NULL" ) ;

      rc = apm->getPlan ( matcher,
                          orderBy, // orderBy
                          hintTmp, // hint
                          pCollectionShortName,
                          &plan ) ;
      if ( rc )
      {
         PD_LOG ( PDERROR, "Failed to get access plan for %s, context %lld, "
                  "rc: %d", pCollectionName, contextID, rc ) ;
         goto error ;
      }
      else if ( ( flags & FLG_QUERY_FORCE_HINT ) && !hintTmp.isEmpty() &&
                plan->isHintFailed() )
      {
         PD_LOG( PDERROR, "Query used force hint[%s] failed",
                 hintTmp.toString().c_str() ) ;
         rc = SDB_RTN_INVALID_HINT ;
         goto error ;
      }

      if ( pBlockObj )
      {
         if ( !indexName && TBSCAN != plan->getScanType() )
         {
            PD_LOG( PDERROR, "Scan type[%d] must be TBSCAN",
                    plan->getScanType() ) ;
            rc = SDB_SYS ;
            goto error ;
         }
         else if ( indexName && ( IXSCAN != plan->getScanType() ||
                   indexLID != plan->getIndexLID() ) )
         {
            PD_LOG( PDERROR, "Scan type[%d] error or indexLID[%d] is the "
                    "same with [%d]", plan->getScanType(),
                    plan->getIndexLID(), indexLID ) ;
            rc = SDB_IXM_NOTEXIST ;
            goto error ;
         }
      }

      if ( flags & FLG_QUERY_STRINGOUT )
      {
         dataContext->getSelector().setStringOutput( TRUE ) ;
      }

      rc = dataContext->open( su, mbContext, plan, cb, selector,
                              plan->sortRequired() ? -1 : numToReturn,
                              plan->sortRequired() ? 0 : numToSkip,
                              pBlockObj, direction ) ;
      PD_RC_CHECK( rc, PDERROR, "Open data context failed, rc: %d", rc ) ;

      suID = DMS_INVALID_CS ;
      plan = NULL ;
      mbContext = NULL ;

      if ( cb->getMonConfigCB()->timestampON )
      {
         dataContext->getMonCB()->recordStartTimestamp() ;
      }

      if ( dataContext->getPlan()->sortRequired() )
      {
         rc = rtnSort ( (rtnContext**)&dataContext, orderBy, cb, numToSkip,
                        numToReturn, rtnCB, contextID ) ;
         PD_RC_CHECK( rc, PDERROR, "Failed to sort, rc: %d", rc ) ;
      }

      if ( ppContext )
      {
         *ppContext = dataContext ;
      }
      if ( enablePrefetch )
      {
         dataContext->enablePrefetch ( cb ) ;
      }

   done :
      PD_TRACE_EXITRC ( SDB_RTNQUERY, rc ) ;
      return rc ;
   error :
      if ( su && mbContext )
      {
         su->data()->releaseMBContext( mbContext ) ;
      }
      if ( plan )
      {
         plan->release() ;
      }
      if ( DMS_INVALID_CS != suID )
      {
         dmsCB->suUnlock( suID ) ;
      }
      if ( -1 != contextID )
      {
         rtnCB->contextDelete ( contextID, cb ) ;
         contextID = -1 ;
      }
      goto done ;
   }
Exemplo n.º 18
0
        bool wrappedRun(OperationContext* txn,
                        const string& dbname,
                        BSONObj& jsobj,
                        string& errmsg,
                        BSONObjBuilder& anObjBuilder) {
            const std::string coll = jsobj.firstElement().valuestrsafe();
            if (coll.empty()) {
                errmsg = "no collection name specified";
                return false;
            }

            const std::string toDeleteNs = dbname + '.' + coll;
            if (!serverGlobalParams.quiet) {
                LOG(0) << "CMD: dropIndexes " << toDeleteNs << endl;
            }

            Client::Context ctx(txn, toDeleteNs);
            Database* db = ctx.db();

            Collection* collection = db->getCollection( txn, toDeleteNs );
            if ( ! collection ) {
                errmsg = "ns not found";
                return false;
            }

            stopIndexBuilds(txn, db, jsobj);

            IndexCatalog* indexCatalog = collection->getIndexCatalog();
            anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal(txn) );


            BSONElement f = jsobj.getField("index");
            if ( f.type() == String ) {

                string indexToDelete = f.valuestr();

                if ( indexToDelete == "*" ) {
                    Status s = indexCatalog->dropAllIndexes(txn, false);
                    if ( !s.isOK() ) {
                        appendCommandStatus( anObjBuilder, s );
                        return false;
                    }
                    anObjBuilder.append("msg", "non-_id indexes dropped for collection");
                    return true;
                }

                IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( txn,
                                                                                        indexToDelete );
                if ( desc == NULL ) {
                    errmsg = str::stream() << "index not found with name [" << indexToDelete << "]";
                    return false;
                }

                if ( desc->isIdIndex() ) {
                    errmsg = "cannot drop _id index";
                    return false;
                }

                Status s = indexCatalog->dropIndex(txn, desc);
                if ( !s.isOK() ) {
                    appendCommandStatus( anObjBuilder, s );
                    return false;
                }

                return true;
            }

            if ( f.type() == Object ) {
                IndexDescriptor* desc =
                    collection->getIndexCatalog()->findIndexByKeyPattern( txn, f.embeddedObject() );
                if ( desc == NULL ) {
                    errmsg = "can't find index with key:";
                    errmsg += f.embeddedObject().toString();
                    return false;
                }

                if ( desc->isIdIndex() ) {
                    errmsg = "cannot drop _id index";
                    return false;
                }

                Status s = indexCatalog->dropIndex(txn, desc);
                if ( !s.isOK() ) {
                    appendCommandStatus( anObjBuilder, s );
                    return false;
                }

                return true;
            }

            errmsg = "invalid index name spec";
            return false;
        }
Exemplo n.º 19
0
Status ModifierCurrentDate::init(const BSONElement& modExpr,
                                 const Options& opts,
                                 bool* positional) {
    _updatePath.parse(modExpr.fieldName());
    Status status = fieldchecker::isUpdatable(_updatePath);
    if (!status.isOK()) {
        return status;
    }

    // If a $-positional operator was used, get the index in which it occurred
    // and ensure only one occurrence.
    size_t foundCount;
    bool foundDollar =
        fieldchecker::isPositional(_updatePath, &_pathReplacementPosition, &foundCount);

    if (positional)
        *positional = foundDollar;

    if (foundDollar && foundCount > 1) {
        return Status(ErrorCodes::BadValue,
                      str::stream() << "Too many positional (i.e. '$') elements found in path '"
                                    << _updatePath.dottedField()
                                    << "'");
    }

    // Validate and store the type to produce
    switch (modExpr.type()) {
        case Bool:
            _typeIsDate = true;
            break;
        case Object: {
            const BSONObj argObj = modExpr.embeddedObject();
            const BSONElement typeElem = argObj.getField(kType);
            bool badInput = typeElem.eoo() || !(typeElem.type() == String);

            if (!badInput) {
                std::string typeVal = typeElem.String();
                badInput = !(typeElem.String() == kDate || typeElem.String() == kTimestamp);
                if (!badInput)
                    _typeIsDate = (typeVal == kDate);

                if (!badInput) {
                    // Check to make sure only the $type field was given as an arg
                    BSONObjIterator i(argObj);
                    const bool onlyHasTypeField =
                        ((i.next().fieldNameStringData() == kType) && i.next().eoo());
                    if (!onlyHasTypeField) {
                        return Status(ErrorCodes::BadValue,
                                      str::stream()
                                          << "The only valid field of the option is '$type': "
                                             "{$currentDate: {field : {$type: 'date/timestamp'}}}; "
                                          << "arg: "
                                          << argObj);
                    }
                }
            }

            if (badInput) {
                return Status(ErrorCodes::BadValue,
                              "The '$type' string field is required "
                              "to be 'date' or 'timestamp': "
                              "{$currentDate: {field : {$type: 'date'}}}");
            }
            break;
        }
        default:
            return Status(ErrorCodes::BadValue,
                          str::stream() << typeName(modExpr.type())
                                        << " is not valid type for $currentDate."
                                           " Please use a boolean ('true')"
                                           " or a $type expression ({$type: 'timestamp/date'}).");
    }

    return Status::OK();
}
Exemplo n.º 20
0
/* ****************************************************************************
*
* mongoAttributesForEntityType -
*/
HttpStatusCode mongoAttributesForEntityType
(
  std::string                           entityType,
  EntityTypeAttributesResponse*         responseP,
  const std::string&                    tenant,
  const std::vector<std::string>&       servicePathV,
  std::map<std::string, std::string>&   uriParams
)
{
  unsigned int offset         = atoi(uriParams[URI_PARAM_PAGINATION_OFFSET].c_str());
  unsigned int limit          = atoi(uriParams[URI_PARAM_PAGINATION_LIMIT].c_str());
  std::string  detailsString  = uriParams[URI_PARAM_PAGINATION_DETAILS];
  bool         details        = (strcasecmp("on", detailsString.c_str()) == 0)? true : false;

  // Setting the name of the entity type for the response
  responseP->entityType.type = entityType;

  LM_T(LmtMongo, ("Query Types Attribute for <%s>", entityType.c_str()));
  LM_T(LmtPagination, ("Offset: %d, Limit: %d, Details: %s", offset, limit, (details == true)? "true" : "false"));

  reqSemTake(__FUNCTION__, "query types attributes request");

  DBClientBase* connection = getMongoConnection();

  /* Compose query based on this aggregation command:
   *
   * FIXME P9: taking into account that type is no longer used as part of the attribute "key", not sure if the
   * aggregation query below is fully correct
   *
   * db.runCommand({aggregate: "entities",
   *                pipeline: [ {$match: { "_id.type": "TYPE" } },
   *                            {$project: {_id: 1, "attrs.name": 1, "attrs.type": 1} },
   *                            {$unwind: "$attrs"},
   *                            {$group: {_id: "$_id.type", attrs: {$addToSet: "$attrs"}} },
   *                            {$unwind: "$attrs"},
   *                            {$group: {_id: "$attrs" }},
   *                            {$sort: {_id.name: 1, _id.type: 1} }
   *                          ]
   *                })
   *
   */

  BSONObj result;
  BSONObj cmd = BSON("aggregate" << COL_ENTITIES <<
                     "pipeline" << BSON_ARRAY(
                                              BSON("$match" << BSON(C_ID_ENTITY << entityType)) <<
                                              BSON("$project" << BSON("_id" << 1 << C_ATTR_NAME << 1 << C_ATTR_TYPE << 1)) <<
                                              BSON("$unwind" << S_ATTRS) <<
                                              BSON("$group" << BSON("_id" << CS_ID_ENTITY << "attrs" << BSON("$addToSet" << S_ATTRS))) <<
                                              BSON("$unwind" << S_ATTRS) <<
                                              BSON("$group" << BSON("_id" << S_ATTRS)) <<
                                              BSON("$sort" << BSON(C_ID_NAME << 1 << C_ID_TYPE << 1))
                                             )
                    );

  LM_T(LmtMongo, ("runCommand() in '%s' database: '%s'", composeDatabaseName(tenant).c_str(), cmd.toString().c_str()));

  mongoSemTake(__FUNCTION__, "aggregation command"); 
  try
  {

    connection->runCommand(composeDatabaseName(tenant).c_str(), cmd, result);
    mongoSemGive(__FUNCTION__, "aggregation command");
    LM_I(("Database Operation Successful (%s)", cmd.toString().c_str()));
  }
  catch (const DBException& e)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + e.what();

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + "generic";

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }

  /* Processing result to build response*/
  LM_T(LmtMongo, ("aggregation result: %s", result.toString().c_str()));

  std::vector<BSONElement> resultsArray = result.getField("result").Array();

  /* See comment above in the other method regarding this strategy to implement pagination */
  for (unsigned int ix = offset; ix < MIN(resultsArray.size(), offset + limit); ++ix)
  {
    BSONElement        idField    = resultsArray[ix].embeddedObject().getField("_id");

    //
    // BSONElement::eoo returns true if 'not found', i.e. the field "_id" doesn't exist in 'sub'
    //
    // Now, if 'resultsArray[ix].embeddedObject().getField("_id")' is not found, if we continue,
    // calling embeddedObject() on it, then we get an exception and the broker crashes.
    //
    if (idField.eoo() == true)
    {
      LM_E(("Database Error (error retrieving _id field in doc: %s)", resultsArray[ix].embeddedObject().toString().c_str()));
      continue;
    }

    BSONObj            resultItem = idField.embeddedObject();
    ContextAttribute*  ca         = new ContextAttribute(resultItem.getStringField(ENT_ATTRS_NAME), resultItem.getStringField(ENT_ATTRS_TYPE));
    responseP->entityType.contextAttributeVector.push_back(ca);
  }

  char detailsMsg[256];
  if (responseP->entityType.contextAttributeVector.size() > 0)
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Count: %d", (int) resultsArray.size());
      responseP->statusCode.fill(SccOk, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccOk);
    }
  }
  else
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Number of attributes: %d. Offset is %d", (int) resultsArray.size(), offset);
      responseP->statusCode.fill(SccContextElementNotFound, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccContextElementNotFound);
    }
  }

  reqSemGive(__FUNCTION__, "query types request");

  return SccOk;
}
Exemplo n.º 21
0
Status ProjectionExec::transform(const BSONObj& in,
                                 BSONObjBuilder* bob,
                                 const MatchDetails* details) const {
    const ArrayOpType& arrayOpType = _arrayOpType;

    BSONObjIterator it(in);
    while (it.more()) {
        BSONElement elt = it.next();

        // Case 1: _id
        if (mongoutils::str::equals("_id", elt.fieldName())) {
            if (_includeID) {
                bob->append(elt);
            }
            continue;
        }

        // Case 2: no array projection for this field.
        Matchers::const_iterator matcher = _matchers.find(elt.fieldName());
        if (_matchers.end() == matcher) {
            Status s = append(bob, elt, details, arrayOpType);
            if (!s.isOK()) {
                return s;
            }
            continue;
        }

        // Case 3: field has array projection with $elemMatch specified.
        if (ARRAY_OP_ELEM_MATCH != arrayOpType) {
            return Status(ErrorCodes::BadValue, "Matchers are only supported for $elemMatch");
        }

        MatchDetails arrayDetails;
        arrayDetails.requestElemMatchKey();

        if (matcher->second->matchesBSON(in, &arrayDetails)) {
            FieldMap::const_iterator fieldIt = _fields.find(elt.fieldName());
            if (_fields.end() == fieldIt) {
                return Status(ErrorCodes::BadValue,
                              "$elemMatch specified, but projection field not found.");
            }

            BSONArrayBuilder arrBuilder;
            BSONObjBuilder subBob;

            if (in.getField(elt.fieldName()).eoo()) {
                return Status(ErrorCodes::InternalError,
                              "$elemMatch called on document element with eoo");
            }

            if (in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()).eoo()) {
                return Status(ErrorCodes::InternalError,
                              "$elemMatch called on array element with eoo");
            }

            arrBuilder.append(
                in.getField(elt.fieldName()).Obj().getField(arrayDetails.elemMatchKey()));
            subBob.appendArray(matcher->first, arrBuilder.arr());
            Status status = append(bob, subBob.done().firstElement(), details, arrayOpType);
            if (!status.isOK()) {
                return status;
            }
        }
    }

    return Status::OK();
}
Exemplo n.º 22
0
/* ****************************************************************************
*
* mongoEntityTypes -
*/
HttpStatusCode mongoEntityTypes
(
  EntityTypesResponse*                  responseP,
  const std::string&                    tenant,
  const std::vector<std::string>&       servicePathV,
  std::map<std::string, std::string>&   uriParams
)
{
  unsigned int offset         = atoi(uriParams[URI_PARAM_PAGINATION_OFFSET].c_str());
  unsigned int limit          = atoi(uriParams[URI_PARAM_PAGINATION_LIMIT].c_str());
  std::string  detailsString  = uriParams[URI_PARAM_PAGINATION_DETAILS];
  bool         details        = (strcasecmp("on", detailsString.c_str()) == 0)? true : false;

  LM_T(LmtMongo, ("Query Entity Types"));
  LM_T(LmtPagination, ("Offset: %d, Limit: %d, Details: %s", offset, limit, (details == true)? "true" : "false"));

  reqSemTake(__FUNCTION__, "query types request");

  DBClientBase* connection = getMongoConnection();

  /* Compose query based on this aggregation command:
   *
   * FIXME P9: taking into account that type is no longer used as part of the attribute "key", not sure if the
   * aggregation query below is fully correct
   *
   * db.runCommand({aggregate: "entities",
   *                pipeline: [ {$project: {_id: 1, "attrs.name": 1, "attrs.type": 1} },
   *                            {$unwind: "$attrs"},
   *                            {$group: {_id: "$_id.type", attrs: {$addToSet: "$attrs"}} },
   *                            {$sort: {_id: 1} }
   *                          ]
   *                })
   *
   * FIXME P6: in the future, we can interpret the collapse parameter at this layer. If collapse=true so we don't need attributes, the
   * following command can be used:
   *
   * db.runCommand({aggregate: "entities", pipeline: [ {$group: {_id: "$_id.type"} }]})
   *
   */

  BSONObj result;
  BSONObj cmd = BSON("aggregate" << COL_ENTITIES <<
                     "pipeline" << BSON_ARRAY(
                                              BSON("$project" << BSON("_id" << 1 << C_ATTR_NAME << 1 << C_ATTR_TYPE << 1)) <<
                                              BSON("$unwind" << S_ATTRS) <<
                                              BSON("$group" << BSON("_id" << CS_ID_ENTITY << "attrs" << BSON("$addToSet" << S_ATTRS))) <<
                                              BSON("$sort" << BSON("_id" << 1))
                                             )
                    );

  LM_T(LmtMongo, ("runCommand() in '%s' database: '%s'", composeDatabaseName(tenant).c_str(), cmd.toString().c_str()));

  mongoSemTake(__FUNCTION__, "aggregation command");  
  try
  {

    connection->runCommand(composeDatabaseName(tenant).c_str(), cmd, result);
    mongoSemGive(__FUNCTION__, "aggregation command");
    LM_I(("Database Operation Successful (%s)", cmd.toString().c_str()));
  }
  catch (const DBException& e)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + e.what();

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + "generic";

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }

  /* Processing result to build response*/
  LM_T(LmtMongo, ("aggregation result: %s", result.toString().c_str()));

  std::vector<BSONElement> resultsArray = result.getField("result").Array();

  /* Another strategy to implement pagination is to use the $skip and $limit operators in the
   * aggregation framework. However, doing so, we don't know the total number of results, which can
   * be needed in the case of details=on (using that approach, we need to do two queries: one to get
   * the count and other to get the actual results with $skip and $limit, in the same "transaction" to
   * avoid incoherence between both if some entity type is created or deleted in the process).
   *
   * However, considering that the number of types will be small compared with the number of entities,
   * the current approach seems to be ok
   */
  for (unsigned int ix = offset; ix < MIN(resultsArray.size(), offset + limit); ++ix)
  {

    BSONObj                  resultItem = resultsArray[ix].embeddedObject();
    TypeEntity*              type       = new TypeEntity(resultItem.getStringField("_id"));
    std::vector<BSONElement> attrsArray = resultItem.getField("attrs").Array();

    for (unsigned int jx = 0; jx < attrsArray.size(); ++jx)
    {
      BSONObj jAttr = attrsArray[jx].embeddedObject();
      ContextAttribute* ca = new ContextAttribute(jAttr.getStringField(ENT_ATTRS_NAME), jAttr.getStringField(ENT_ATTRS_TYPE));
      type->contextAttributeVector.push_back(ca);
    }

    responseP->typeEntityVector.push_back(type);

  }

  char detailsMsg[256];
  if (responseP->typeEntityVector.size() > 0)
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Count: %d", (int) resultsArray.size());
      responseP->statusCode.fill(SccOk, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccOk);
    }
  }
  else
  {
    if (details)
    {      
      snprintf(detailsMsg, sizeof(detailsMsg), "Number of types: %d. Offset is %d", (int) resultsArray.size(), offset);
      responseP->statusCode.fill(SccContextElementNotFound, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccContextElementNotFound);
    }
  }

  reqSemGive(__FUNCTION__, "query types request");

  return SccOk;

}
Exemplo n.º 23
0
Arquivo: dump.cpp Projeto: vrtx/mongo
    void go(const string& db,
            const string& coll,
            const Query& query,
            const boost::filesystem::path& outdir,
            const string& outFilename) {
        // Can only provide outFilename if db and coll are provided
        fassert(17368, outFilename.empty() || (!coll.empty() && !db.empty()));
        boost::filesystem::create_directories( outdir );

        map <string, BSONObj> collectionOptions;
        multimap <string, BSONObj> indexes;
        vector <string> collections;

        // Save indexes for database
        string ins = db + ".system.indexes";
        auto_ptr<DBClientCursor> cursor = conn( true ).query( ins.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
        while ( cursor->more() ) {
            BSONObj obj = cursor->nextSafe();
            const string name = obj.getField( "ns" ).valuestr();
            indexes.insert( pair<string, BSONObj> (name, obj.getOwned()) );
        }

        string sns = db + ".system.namespaces";
        cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
        while ( cursor->more() ) {
            BSONObj obj = cursor->nextSafe();
            const string name = obj.getField( "name" ).valuestr();
            if (obj.hasField("options")) {
                collectionOptions[name] = obj.getField("options").embeddedObject().getOwned();
            }

            // skip namespaces with $ in them only if we don't specify a collection to dump
            if (coll == "" && name.find(".$") != string::npos) {
                if (logger::globalLogDomain()->shouldLog(logger::LogSeverity::Debug(1))) {
                    toolInfoLog() << "\tskipping collection: " << name << std::endl;
                }
                continue;
            }

            const string filename = name.substr( db.size() + 1 );

            //if a particular collections is specified, and it's not this one, skip it
            if (coll != "" && db + "." + coll != name && coll != name) {
                continue;
            }

            // raise error before writing collection with non-permitted filename chars in the name
            size_t hasBadChars = name.find_first_of("/\0");
            if (hasBadChars != string::npos) {
                toolError() << "Cannot dump "  << name
                            << ". Collection has '/' or null in the collection name." << std::endl;
                continue;
            }

            if (nsToCollectionSubstring(name) == "system.indexes") {
                // Create system.indexes.bson for compatibility with pre 2.2 mongorestore
                const string filename = name.substr( db.size() + 1 );
                writeCollectionFile( name.c_str() , query, outdir / ( filename + ".bson" ) );
                // Don't dump indexes as *.metadata.json
                continue;
            }

            if (nsToCollectionSubstring(name) == "system.users" &&
                    !mongoDumpGlobalParams.dumpUsersAndRoles) {
                continue;
            }

            collections.push_back(name);
        }

        for (vector<string>::iterator it = collections.begin(); it != collections.end(); ++it) {
            string name = *it;
            const string filename = outFilename != "" ? outFilename : name.substr( db.size() + 1 );
            writeCollectionFile( name , query, outdir / ( filename + ".bson" ) );
            writeMetadataFile( name, outdir / (filename + ".metadata.json"), collectionOptions, indexes);
        }

    }
Exemplo n.º 24
0
    bool ReplSource::handleDuplicateDbName( const BSONObj &op, const char *ns, const char *db ) {
        if ( dbHolder()._isLoaded( ns, dbpath ) ) {
            // Database is already present.
            return true;   
        }
        BSONElement ts = op.getField( "ts" );
        if ( ( ts.type() == Date || ts.type() == Timestamp ) && ___databaseIgnorer.ignoreAt( db, ts.date() ) ) {
            // Database is ignored due to a previous indication that it is
            // missing from master after optime "ts".
            return false;   
        }
        if ( Database::duplicateUncasedName( false, db, dbpath ).empty() ) {
            // No duplicate database names are present.
            return true;
        }
        
        OpTime lastTime;
        bool dbOk = false;
        {
            dbtemprelease release;
        
            // We always log an operation after executing it (never before), so
            // a database list will always be valid as of an oplog entry generated
            // before it was retrieved.
            
            BSONObj last = oplogReader.findOne( this->ns().c_str(), Query().sort( BSON( "$natural" << -1 ) ) );
            if ( !last.isEmpty() ) {
	            BSONElement ts = last.getField( "ts" );
	            massert( 14032, "Invalid 'ts' in remote log", ts.type() == Date || ts.type() == Timestamp );
	            lastTime = OpTime( ts.date() );
            }

            BSONObj info;
            bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
            massert( 14033, "Unable to get database list", ok );
            BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
            while( i.more() ) {
                BSONElement e = i.next();
            
                const char * name = e.embeddedObject().getField( "name" ).valuestr();
                if ( strcasecmp( name, db ) != 0 )
                    continue;
                
                if ( strcmp( name, db ) == 0 ) {
                    // The db exists on master, still need to check that no conflicts exist there.
                    dbOk = true;
                    continue;
                }
                
                // The master has a db name that conflicts with the requested name.
                dbOk = false;
                break;
            }
        }
        
        if ( !dbOk ) {
            ___databaseIgnorer.doIgnoreUntilAfter( db, lastTime );
            incompleteCloneDbs.erase(db);
            addDbNextPass.erase(db);
            return false;   
        }
        
        // Check for duplicates again, since we released the lock above.
        set< string > duplicates;
        Database::duplicateUncasedName( false, db, dbpath, &duplicates );
        
        // The database is present on the master and no conflicting databases
        // are present on the master.  Drop any local conflicts.
        for( set< string >::const_iterator i = duplicates.begin(); i != duplicates.end(); ++i ) {
            ___databaseIgnorer.doIgnoreUntilAfter( *i, lastTime );
            incompleteCloneDbs.erase(*i);
            addDbNextPass.erase(*i);
            Client::Context ctx(*i);
            dropDatabase(*i);
        }
        
        massert( 14034, "Duplicate database names present after attempting to delete duplicates",
                Database::duplicateUncasedName( false, db, dbpath ).empty() );
        return true;
    }
// static
StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationContext* opCtx,
                                                                      const string& ns,
                                                                      const BSONObj& cmdObj) {
    // query - required
    BSONElement queryElt = cmdObj.getField("query");
    if (queryElt.eoo()) {
        return Status(ErrorCodes::BadValue, "required field query missing");
    }
    if (!queryElt.isABSONObj()) {
        return Status(ErrorCodes::BadValue, "required field query must be an object");
    }
    if (queryElt.eoo()) {
        return Status(ErrorCodes::BadValue, "required field query missing");
    }
    BSONObj queryObj = queryElt.Obj();

    // sort - optional
    BSONElement sortElt = cmdObj.getField("sort");
    BSONObj sortObj;
    if (!sortElt.eoo()) {
        if (!sortElt.isABSONObj()) {
            return Status(ErrorCodes::BadValue, "optional field sort must be an object");
        }
        sortObj = sortElt.Obj();
    }

    // projection - optional
    BSONElement projElt = cmdObj.getField("projection");
    BSONObj projObj;
    if (!projElt.eoo()) {
        if (!projElt.isABSONObj()) {
            return Status(ErrorCodes::BadValue, "optional field projection must be an object");
        }
        projObj = projElt.Obj();
    }

    // collation - optional
    BSONObj collationObj;
    if (auto collationElt = cmdObj["collation"]) {
        if (!collationElt.isABSONObj()) {
            return Status(ErrorCodes::BadValue, "optional field collation must be an object");
        }
        collationObj = collationElt.Obj();
        if (collationObj.isEmpty()) {
            return Status(ErrorCodes::BadValue,
                          "optional field collation cannot be an empty object");
        }
    }

    // Create canonical query
    const NamespaceString nss(ns);
    auto qr = stdx::make_unique<QueryRequest>(std::move(nss));
    qr->setFilter(queryObj);
    qr->setSort(sortObj);
    qr->setProj(projObj);
    qr->setCollation(collationObj);
    const ExtensionsCallbackReal extensionsCallback(opCtx, &nss);
    const boost::intrusive_ptr<ExpressionContext> expCtx;
    auto statusWithCQ =
        CanonicalQuery::canonicalize(opCtx,
                                     std::move(qr),
                                     expCtx,
                                     extensionsCallback,
                                     MatchExpressionParser::kAllowAllSpecialFeatures);
    if (!statusWithCQ.isOK()) {
        return statusWithCQ.getStatus();
    }

    return std::move(statusWithCQ.getValue());
}
Exemplo n.º 26
0
        bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
            BSONElement e = jsobj.firstElement();
            const string toDeleteNs = dbname + '.' + e.valuestr();
            if (!serverGlobalParams.quiet) {
                MONGO_TLOG(0) << "CMD: dropIndexes " << toDeleteNs << endl;
            }

            Lock::DBWrite dbXLock(dbname);
            Client::Context ctx(toDeleteNs);

            Collection* collection = cc().database()->getCollection( toDeleteNs );
            if ( ! collection ) {
                errmsg = "ns not found";
                return false;
            }

            stopIndexBuilds(cc().database(), jsobj);

            IndexCatalog* indexCatalog = collection->getIndexCatalog();
            anObjBuilder.appendNumber("nIndexesWas", indexCatalog->numIndexesTotal() );


            BSONElement f = jsobj.getField("index");
            if ( f.type() == String ) {

                string indexToDelete = f.valuestr();

                if ( indexToDelete == "*" ) {
                    Status s = indexCatalog->dropAllIndexes( false );
                    if ( !s.isOK() ) {
                        appendCommandStatus( anObjBuilder, s );
                        return false;
                    }
                    anObjBuilder.append("msg", "non-_id indexes dropped for collection");
                    return true;
                }

                IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByName( indexToDelete );
                if ( desc == NULL ) {
                    errmsg = str::stream() << "index not found with name [" << indexToDelete << "]";
                    return false;
                }

                if ( desc->isIdIndex() ) {
                    errmsg = "cannot drop _id index";
                    return false;
                }

                Status s = indexCatalog->dropIndex( desc );
                if ( !s.isOK() ) {
                    appendCommandStatus( anObjBuilder, s );
                    return false;
                }

                return true;
            }

            if ( f.type() == Object ) {
                IndexDescriptor* desc = collection->getIndexCatalog()->findIndexByKeyPattern( f.embeddedObject() );
                if ( desc == NULL ) {
                    errmsg = "can't find index with key:";
                    errmsg += f.embeddedObject().toString();
                    return false;
                }

                if ( desc->isIdIndex() ) {
                    errmsg = "cannot drop _id index";
                    return false;
                }

                Status s = indexCatalog->dropIndex( desc );
                if ( !s.isOK() ) {
                    appendCommandStatus( anObjBuilder, s );
                    return false;
                }

                return true;
            }

            errmsg = "invalid index name spec";
            return false;
        }
Exemplo n.º 27
0
StatusWith<BalancerSettingsType> BalancerSettingsType::fromBSON(const BSONObj& obj) {
    BalancerSettingsType settings;

    {
        bool stopped;
        Status status = bsonExtractBooleanFieldWithDefault(obj, kStopped, false, &stopped);
        if (!status.isOK())
            return status;
        if (stopped) {
            settings._mode = kOff;
        } else {
            std::string modeStr;
            status = bsonExtractStringFieldWithDefault(obj, kMode, kBalancerModes[kFull], &modeStr);
            if (!status.isOK())
                return status;
            auto it = std::find(std::begin(kBalancerModes), std::end(kBalancerModes), modeStr);
            if (it == std::end(kBalancerModes)) {
                return Status(ErrorCodes::BadValue, "Invalid balancer mode");
            }

            settings._mode = static_cast<BalancerMode>(it - std::begin(kBalancerModes));
        }
    }

    {
        BSONElement activeWindowElem;
        Status status = bsonExtractTypedField(obj, kActiveWindow, Object, &activeWindowElem);
        if (status.isOK()) {
            const BSONObj balancingWindowObj = activeWindowElem.Obj();
            if (balancingWindowObj.isEmpty()) {
                return Status(ErrorCodes::BadValue, "activeWindow not specified");
            }

            // Check if both 'start' and 'stop' are present
            const std::string start = balancingWindowObj.getField("start").str();
            const std::string stop = balancingWindowObj.getField("stop").str();

            if (start.empty() || stop.empty()) {
                return Status(ErrorCodes::BadValue,
                              str::stream()
                                  << "must specify both start and stop of balancing window: "
                                  << balancingWindowObj);
            }

            // Check that both 'start' and 'stop' are valid time-of-day
            boost::posix_time::ptime startTime;
            boost::posix_time::ptime stopTime;
            if (!toPointInTime(start, &startTime) || !toPointInTime(stop, &stopTime)) {
                return Status(ErrorCodes::BadValue,
                              str::stream() << kActiveWindow << " format is "
                                            << " { start: \"hh:mm\" , stop: \"hh:mm\" }");
            }

            // Check that start and stop designate different time points
            if (startTime == stopTime) {
                return Status(ErrorCodes::BadValue,
                              str::stream() << "start and stop times must be different");
            }

            settings._activeWindowStart = startTime;
            settings._activeWindowStop = stopTime;
        } else if (status != ErrorCodes::NoSuchKey) {
            return status;
        }
    }

    {
        auto secondaryThrottleStatus =
            MigrationSecondaryThrottleOptions::createFromBalancerConfig(obj);
        if (!secondaryThrottleStatus.isOK()) {
            return secondaryThrottleStatus.getStatus();
        }

        settings._secondaryThrottle = std::move(secondaryThrottleStatus.getValue());
    }

    {
        bool waitForDelete;
        Status status =
            bsonExtractBooleanFieldWithDefault(obj, kWaitForDelete, false, &waitForDelete);
        if (!status.isOK())
            return status;

        settings._waitForDelete = waitForDelete;
    }

    return settings;
}
Exemplo n.º 28
0
   INT32 _omTaskStrategyInfo::fromBSON( const BSONObj &obj )
   {
      INT32 rc = SDB_OK ;
      BSONElement beField ;
      BSONObj ipsObj ;

      beField = obj.getField( OM_REST_FIELD_RULE_ID ) ;
      if ( !beField.isNumber() )
      {
         PD_LOG( PDERROR, "Field[%s] must be number",
                 beField.toString( TRUE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }
      setID( beField.numberLong() ) ;

      beField = obj.getField( OM_REST_FIELD_TASK_ID ) ;
      if ( !beField.isNumber() )
      {
         PD_LOG( PDERROR, "Field[%s] must be number",
                 beField.toString( TRUE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }
      setTaskID( beField.numberLong() ) ;

      beField = obj.getField( OM_REST_FIELD_TASK_NAME ) ;
      if ( String != beField.type() )
      {
         PD_LOG( PDERROR, "Field[%s] must be string",
                 beField.toString( TRUE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }
      setTaskName( beField.str() ) ;

      beField = obj.getField( OM_REST_FIELD_NICE ) ;
      if ( !beField.isNumber() )
      {
         PD_LOG( PDERROR, "Field[%s] must be number",
                 beField.toString( TRUE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }
      setNice( beField.numberInt() ) ;

      beField = obj.getField( OM_REST_FIELD_USER_NAME ) ;
      if ( String != beField.type() )
      {
         PD_LOG( PDERROR, "Field[%s] must be string",
                 beField.toString( TRUE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }
      setUserName( beField.str() ) ;

      beField = obj.getField( OM_REST_FIELD_IPS ) ;
      if ( Array != beField.type() )
      {
         PD_LOG( PDERROR, "Field[%s] must be string array",
                 beField.toString( TRUE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }
      else
      {
         clearIPSet() ;

         string tmpStr ;
         BSONElement e ;
         BSONObjIterator itr( beField.embeddedObject() ) ;
         while( itr.more() )
         {
            e = itr.next() ;
            if ( String != beField.type() )
            {
               PD_LOG( PDERROR, "Field[%s] must be string array",
                       beField.toString( TRUE, TRUE ).c_str() ) ;
               rc = SDB_INVALIDARG ;
               goto error ;
            }
            tmpStr = e.str() ;

            if ( !tmpStr.empty() )
            {
               addIP( tmpStr ) ;
            }
         }
      }

   done:
      return rc ;
   error:
      goto done ;
   }
/* ****************************************************************************
*
* mongoUpdateContextAvailabilitySubscription - 
*/
HttpStatusCode mongoUpdateContextAvailabilitySubscription(UpdateContextAvailabilitySubscriptionRequest* requestP, UpdateContextAvailabilitySubscriptionResponse* responseP, Format inFormat, const std::string& tenant)
{
  LM_T(LmtMongo, ("Update Context Subscription"));
  reqSemTake(__FUNCTION__, "ngsi9 update subscription request");

  DBClientConnection* connection = getMongoConnection();

  /* Look for document */
  BSONObj  sub;
  try {
      OID id = OID(requestP->subscriptionId.get());

      mongoSemTake(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection");
      sub = connection->findOne(getSubscribeContextAvailabilityCollectionName(tenant).c_str(), BSON("_id" << id));
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection");
  }
  catch( const AssertionException &e ) {
      /* This happens when OID format is wrong */
      // FIXME: this checking should be done at parsing stage, without progressing to
      // mongoBackend. By the moment we can live this here, but we should remove in the future
      // (old issue #95)
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection (mongo assertion exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo assertion exception)");

      responseP->errorCode.fill(SccContextElementNotFound);
      return SccOk;
  }
  catch( const DBException &e ) {
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo db exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - findOne() _id: " + requestP->subscriptionId.get() +
                                " - exception: " + e.what());
      return SccOk;
  }
  catch(...) {
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo generic exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - findOne() _id: " + requestP->subscriptionId.get() +
                                " - exception: " + "generic");
      return SccOk;
  }

  if (sub.isEmpty()) {
     responseP->errorCode.fill(SccContextElementNotFound);
     reqSemGive(__FUNCTION__, "ngsi9 update subscription request (no subscriptions found)");
     return SccOk;
  }

  /* We start with an empty BSONObjBuilder and process requestP for all the fields that can
   * be updated. I don't like too much this strategy (I would have preferred to start with
   * a copy of the original document, then modify as neded, but this doesn't seem to be easy
   * using the API provide by the Mongo C++ driver)
   *
   * FIXME: a better implementation strategy could be doing an findAndModify() query to do the
   * update, so detecting if the document was not found, instead of using findOne() + update()
   * with $set operation. One operations to MongoDb. vs two operations.
   */
  BSONObjBuilder newSub;

  /* Entities (mandatory) */
  BSONArrayBuilder entities;
  for (unsigned int ix = 0; ix < requestP->entityIdVector.size(); ++ix) {
      EntityId* en = requestP->entityIdVector.get(ix);
      if (en->type == "") {
          entities.append(BSON(CASUB_ENTITY_ID << en->id <<
                               CASUB_ENTITY_ISPATTERN << en->isPattern));
      }
      else {
          entities.append(BSON(CASUB_ENTITY_ID << en->id <<
                               CASUB_ENTITY_TYPE << en->type <<
                               CASUB_ENTITY_ISPATTERN << en->isPattern));
      }

  }
  newSub.append(CASUB_ENTITIES, entities.arr());

  /* Attributes (always taken into account) */
  BSONArrayBuilder attrs;
  for (unsigned int ix = 0; ix < requestP->attributeList.size(); ++ix) {
      attrs.append(requestP->attributeList.get(ix));
  }
  newSub.append(CASUB_ATTRS, attrs.arr());

  /* Duration (optional) */
  if (requestP->duration.isEmpty()) {
      newSub.append(CASUB_EXPIRATION, sub.getField(CASUB_EXPIRATION).numberLong());
  }
  else {
      long long expiration = getCurrentTime() + requestP->duration.parse();
      newSub.append(CASUB_EXPIRATION, expiration);
      LM_T(LmtMongo, ("New subscription expiration: %l", expiration));
  }

  /* Reference is not updatable, so it is appended directly */
  newSub.append(CASUB_REFERENCE, STR_FIELD(sub, CASUB_REFERENCE));

  int count = sub.hasField(CASUB_COUNT) ? sub.getIntField(CASUB_COUNT) : 0;

  /* The hasField check is needed due to lastNotification/count could not be present in the original doc */
  if (sub.hasField(CASUB_LASTNOTIFICATION)) {
      newSub.append(CASUB_LASTNOTIFICATION, sub.getIntField(CASUB_LASTNOTIFICATION));
  }
  if (sub.hasField(CASUB_COUNT)) {
      newSub.append(CASUB_COUNT, count);
  }

  /* Adding format to use in notifications */
  newSub.append(CASUB_FORMAT, std::string(formatToString(inFormat)));

  /* Update document in MongoDB */
  BSONObj update = newSub.obj();
  LM_T(LmtMongo, ("update() in '%s' collection _id '%s': %s}", getSubscribeContextAvailabilityCollectionName(tenant).c_str(),
                  requestP->subscriptionId.get().c_str(),
                  update.toString().c_str()));
  try {
      mongoSemTake(__FUNCTION__, "update in SubscribeContextAvailabilityCollection");
      connection->update(getSubscribeContextAvailabilityCollectionName(tenant).c_str(), BSON("_id" << OID(requestP->subscriptionId.get())), update);
      mongoSemGive(__FUNCTION__, "update in SubscribeContextAvailabilityCollection");
  }
  catch( const DBException &e ) {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextAvailabilityCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo db exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                " - update() doc: " + update.toString() +
                                " - exception: " + e.what());

      return SccOk;
  }
  catch(...) {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextAvailabilityCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo generic exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                " - update() doc: " + update.toString() +
                                " - exception: " + "generic");
      return SccOk;
  }

  /* Send notifications for matching context registrations */
  processAvailabilitySubscription(requestP->entityIdVector, requestP->attributeList, requestP->subscriptionId.get(), STR_FIELD(sub, CASUB_REFERENCE), inFormat, tenant);

  /* Duration is an optional parameter, it is only added in the case they
   * was used for update */
  if (!requestP->duration.isEmpty()) {      
      responseP->duration = requestP->duration;
  }

  responseP->subscriptionId = requestP->subscriptionId;

  reqSemGive(__FUNCTION__, "ngsi9 update subscription request");

  return SccOk;
}
Exemplo n.º 30
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB_RTNCOORDREMOVELOB_EXECUTE, "rtnCoordRemoveLob::execute" )
   INT32 rtnCoordRemoveLob::execute( CHAR *pReceiveBuffer, SINT32 packSize,
                                     CHAR **ppResultBuffer, pmdEDUCB *cb,
                                     MsgOpReply &replyHeader,
                                     BSONObj** ppErrorObj )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY( SDB_RTNCOORDREMOVELOB_EXECUTE ) ;
      const MsgOpLob *header = NULL ;
      const MsgHeader *baseHeader = ( const MsgHeader * )pReceiveBuffer ;
      BSONObj obj ;
      BSONElement ele ;
      const CHAR *fullName = NULL ;
      _rtnCoordLobStream stream ;

      replyHeader.header.messageLength = sizeof( MsgOpReply ) ;
      replyHeader.header.opCode = MSG_BS_LOB_CLOSE_RES ;
      replyHeader.header.requestID = baseHeader->requestID ;
      replyHeader.header.routeID.value = 0 ;
      replyHeader.header.TID = baseHeader->TID ;
      replyHeader.contextID = -1 ;
      replyHeader.flags = SDB_OK ;
      replyHeader.numReturned = 0 ;
      replyHeader.startFrom = 0 ;

      rc = msgExtractRemoveLobRequest( pReceiveBuffer, &header,
                                       obj ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to extract remove msg:%d", rc ) ;
         goto error ;
      }

      ele = obj.getField( FIELD_NAME_COLLECTION ) ;
      if ( String != ele.type() )
      {
         PD_LOG( PDERROR, "invalid type of field \"collection\":%s",
                 obj.toString( FALSE, TRUE ).c_str() ) ;
         rc = SDB_SYS ;
         goto error ;
      }
      fullName = ele.valuestr() ;

      ele = obj.getField( FIELD_NAME_LOB_OID ) ;
      if ( jstOID != ele.type() )
      {
         PD_LOG( PDERROR, "invalid type of field \"oid\":%s",
                 obj.toString( FALSE, TRUE ).c_str() ) ;
         rc = SDB_SYS ;
         goto error ;
      }

      rc = stream.open( fullName,
                        ele.__oid(), SDB_LOB_MODE_REMOVE,
                        cb ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to remove lob:%s, rc:%d",
                 ele.__oid().str().c_str(), rc ) ;
         goto error ;
      }
      else
      {
      }

      rc = stream.truncate( 0, cb ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "faield to truncate lob:%d", rc ) ;
         goto error ;
      }

   done:
      {
      INT32 rcTmp = SDB_OK ;
      rcTmp = stream.close( cb ) ;
      if ( SDB_OK != rcTmp )
      {
         PD_LOG( PDERROR, "failed to remove lob:%d", rcTmp ) ;
         rc = rc == SDB_OK ? rcTmp : rc ;
         replyHeader.flags = rc ; 
      }
      }
      PD_TRACE_EXITRC( SDB_RTNCOORDREMOVELOB_EXECUTE, rc ) ;
      return rc ;
   error:
      replyHeader.flags = rc ; 
      goto done ;
   }