Exemplo n.º 1
0
    bool mergeChunks( OperationContext* txn,
                      const NamespaceString& nss,
                      const BSONObj& minKey,
                      const BSONObj& maxKey,
                      const OID& epoch,
                      string* errMsg ) {

        //
        // Get sharding state up-to-date
        //

        ConnectionString configLoc = ConnectionString::parse( shardingState.getConfigServer(),
                                                              *errMsg );
        if ( !configLoc.isValid() ){
            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get the distributed lock
        //

        ScopedDistributedLock collLock( configLoc, nss.ns() );
        collLock.setLockMessage( stream() << "merging chunks in " << nss.ns() << " from "
                                          << minKey << " to " << maxKey );

        Status acquisitionStatus = collLock.tryAcquire();
        if (!acquisitionStatus.isOK()) {
            *errMsg = stream() << "could not acquire collection lock for " << nss.ns()
                               << " to merge chunks in [" << minKey << "," << maxKey << ")"
                               << causedBy(acquisitionStatus);

            warning() << *errMsg << endl;
            return false;
        }

        //
        // We now have the collection lock, refresh metadata to latest version and sanity check
        //

        ChunkVersion shardVersion;
        Status status = shardingState.refreshMetadataNow(txn, nss.ns(), &shardVersion);

        if ( !status.isOK() ) {

            *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for "
                                    << nss.ns() << causedBy( status.reason() );

            warning() << *errMsg << endl;
            return false;
        }

        if ( epoch.isSet() && shardVersion.epoch() != epoch ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " has changed" << " since merge was sent" << "(sent epoch : "
                               << epoch.toString()
                               << ", current epoch : " << shardVersion.epoch().toString() << ")";

            warning() << *errMsg << endl;
            return false;
        }

        CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss.ns() );

        if ( !metadata || metadata->getKeyPattern().isEmpty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " is not sharded";

            warning() << *errMsg << endl;
            return false;
        }

        dassert( metadata->getShardVersion().equals( shardVersion ) );

        if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) {

            *errMsg = stream() << "could not merge chunks, the range "
                               << rangeToString( minKey, maxKey ) << " is not valid"
                               << " for collection " << nss.ns() << " with key pattern "
                               << metadata->getKeyPattern();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Get merged chunk information
        //

        ChunkVersion mergeVersion = metadata->getCollVersion();
        mergeVersion.incMinor();

        OwnedPointerVector<ChunkType> chunksToMerge;

        ChunkType itChunk;
        itChunk.setMin( minKey );
        itChunk.setMax( minKey );
        itChunk.setNS( nss.ns() );
        itChunk.setShard( shardingState.getShardName() );

        while ( itChunk.getMax().woCompare( maxKey ) < 0 &&
                metadata->getNextChunk( itChunk.getMax(), &itChunk ) ) {
            auto_ptr<ChunkType> saved( new ChunkType );
            itChunk.cloneTo( saved.get() );
            chunksToMerge.mutableVector().push_back( saved.release() );
        }

        if ( chunksToMerge.empty() ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " and ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Validate the range starts and ends at chunks and has no holes, error if not valid
        //

        BSONObj firstDocMin = ( *chunksToMerge.begin() )->getMin();
        BSONObj firstDocMax = ( *chunksToMerge.begin() )->getMax();
        // minKey is inclusive
        bool minKeyInRange = rangeContains( firstDocMin, firstDocMax, minKey );

        if ( !minKeyInRange ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range starting at " << minKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        BSONObj lastDocMin = ( *chunksToMerge.rbegin() )->getMin();
        BSONObj lastDocMax = ( *chunksToMerge.rbegin() )->getMax();
        // maxKey is exclusive
        bool maxKeyInRange = lastDocMin.woCompare( maxKey ) < 0 &&
                lastDocMax.woCompare( maxKey ) >= 0;

        if ( !maxKeyInRange ) {
            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " range ending at " << maxKey
                               << " does not belong to shard " << shardingState.getShardName();

            warning() << *errMsg << endl;
            return false;
        }

        bool validRangeStartKey = firstDocMin.woCompare( minKey ) == 0;
        bool validRangeEndKey = lastDocMax.woCompare( maxKey ) == 0;

        if ( !validRangeStartKey || !validRangeEndKey ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " does not contain a chunk "
                               << ( !validRangeStartKey ? "starting at " + minKey.toString() : "" )
                               << ( !validRangeStartKey && !validRangeEndKey ? " or " : "" )
                               << ( !validRangeEndKey ? "ending at " + maxKey.toString() : "" );

            warning() << *errMsg << endl;
            return false;
        }

        if ( chunksToMerge.size() == 1 ) {

            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " already contains chunk for " << rangeToString( minKey, maxKey );

            warning() << *errMsg << endl;
            return false;
        }

        bool holeInRange = false;

        // Look for hole in range
        ChunkType* prevChunk = *chunksToMerge.begin();
        ChunkType* nextChunk = NULL;
        for ( OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin();
                it != chunksToMerge.end(); ++it ) {
            if ( it == chunksToMerge.begin() ) continue;

            nextChunk = *it;
            if ( prevChunk->getMax().woCompare( nextChunk->getMin() ) != 0 ) {
                holeInRange = true;
                break;
            }
            prevChunk = nextChunk;
        }

        if ( holeInRange ) {

            dassert( NULL != nextChunk );
            *errMsg = stream() << "could not merge chunks, collection " << nss.ns()
                               << " has a hole in the range " << rangeToString( minKey, maxKey )
                               << " at " << rangeToString( prevChunk->getMax(),
                                                           nextChunk->getMin() );

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Run apply ops command
        //

        BSONObj applyOpsCmd = buildApplyOpsCmd( chunksToMerge,
                                                shardVersion,
                                                mergeVersion );

        bool ok;
        BSONObj result;
        try {
            ScopedDbConnection conn( configLoc, 30.0 );
            ok = conn->runCommand( "config", applyOpsCmd, result );
            if ( !ok ) *errMsg = result.toString();
            conn.done();
        }
        catch( const DBException& ex ) {
            ok = false;
            *errMsg = ex.toString();
        }

        if ( !ok ) {
            *errMsg = stream() << "could not merge chunks for " << nss.ns()
                               << ", writing to config failed" << causedBy( errMsg );

            warning() << *errMsg << endl;
            return false;
        }

        //
        // Install merged chunk metadata
        //

        {
            Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X);
            shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion);
        }

        //
        // Log change
        //

        BSONObj mergeLogEntry = buildMergeLogEntry( chunksToMerge,
                                                    shardVersion,
                                                    mergeVersion );

        configServer.logChange( "merge", nss.ns(), mergeLogEntry );

        return true;
    }
Exemplo n.º 2
0
    /* called on a reconfig AND on initiate
       throws
       @param initial true when initiating
    */
    void checkMembersUpForConfigChange(const ReplSetConfig& cfg, bool initial) {
        int failures = 0, majority = 0;
        int me = 0;
        stringstream selfs;
        for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
            if( i->h.isSelf() ) {
                me++;
                if( me > 1 )
                    selfs << ',';
                selfs << i->h.toString();
                if( !i->potentiallyHot() ) {
                    uasserted(13420, "initiation and reconfiguration of a replica set must be sent to a node that can become primary");
                }
                majority += i->votes;
            }
        }
        majority = (majority / 2) + 1;
        
        uassert(13278, "bad config: isSelf is true for multiple hosts: " + selfs.str(), me <= 1); // dups?
        if( me != 1 ) {
            stringstream ss;
            ss << "can't find self in the replset config";
            if( !cmdLine.isDefaultPort() ) ss << " my port: " << cmdLine.port;
            if( me != 0 ) ss << " found: " << me;
            uasserted(13279, ss.str());
        }

        for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
            // we know we're up
            if (i->h.isSelf()) {
                continue;
            }

            BSONObj res;
            {
                bool ok = false;
                try {
                    int theirVersion = -1000;
                    ok = requestHeartbeat(cfg._id, "", i->h.toString(), res, -1, theirVersion, initial/*check if empty*/);
                    if( theirVersion >= cfg.version ) {
                        stringstream ss;
                        ss << "replSet member " << i->h.toString() << " has too new a config version (" << theirVersion << ") to reconfigure";
                        uasserted(13259, ss.str());
                    }
                }
                catch(DBException& e) {
                    log() << "replSet cmufcc requestHeartbeat " << i->h.toString() << " : " << e.toString() << rsLog;
                }
                catch(...) {
                    log() << "replSet cmufcc error exception in requestHeartbeat?" << rsLog;
                }
                if( res.getBoolField("mismatch") )
                    uasserted(13145, "set name does not match the set name host " + i->h.toString() + " expects");
                if( *res.getStringField("set") ) {
                    if( cfg.version <= 1 ) {
                        // this was to be initiation, no one shoudl be initiated already.
                        uasserted(13256, "member " + i->h.toString() + " is already initiated");
                    }
                    else {
                        // Assure no one has a newer config.
                        if( res["v"].Int() >= cfg.version ) {
                            uasserted(13341, "member " + i->h.toString() + " has a config version >= to the new cfg version; cannot change config");
                        }
                    }
                }
                if( !ok && !res["rs"].trueValue() ) {
                    if( !res.isEmpty() ) {
                        /* strange.  got a response, but not "ok". log it. */
                        log() << "replSet warning " << i->h.toString() << " replied: " << res.toString() << rsLog;
                    }

                    bool allowFailure = false;
                    failures += i->votes;
                    if( res.isEmpty() && !initial && failures >= majority ) {
                        const Member* m = theReplSet->findById( i->_id );
                        if( m ) {
                            assert( m->h().toString() == i->h.toString() );
                        }
                        // it's okay if the down member isn't part of the config,
                        // we might be adding a new member that isn't up yet
                        allowFailure = true;
                    }

                    if( !allowFailure ) {
                        string msg = string("need all members up to initiate, not ok : ") + i->h.toString();
                        if( !initial )
                            msg = string("need most members up to reconfigure, not ok : ") + i->h.toString();
                        uasserted(13144, msg);
                    }
                }
            }
            if( initial ) {
                bool hasData = res["hasData"].Bool();
                uassert(13311, "member " + i->h.toString() + " has data already, cannot initiate set.  All members except initiator must be empty.",
                        !hasData || i->h.isSelf());
            }
        }
    }
Exemplo n.º 3
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB_CLSCATAMATCHER_PARSEANOBJ, "clsCatalogMatcher::parseAnObj" )
   INT32 clsCatalogMatcher::parseAnObj( const BSONObj &matcher,
                                        clsCatalogPredicateTree &predicateSet )
   {
      INT32 rc = SDB_OK;
      PD_TRACE_ENTRY ( SDB_CLSCATAMATCHER_PARSEANOBJ ) ;
      clsCatalogPredicateTree *pPredicateSet = NULL;
      BOOLEAN isNew = FALSE ;

      try
      {
         BSONObjIterator i( matcher ) ;
         while ( i.more() )
         {
            BSONElement beTmp = i.next();
            const CHAR *pFieldName = beTmp.fieldName() ;
            if ( predicateSet.getLogicType() == CLS_CATA_LOGIC_OR )
            {
               pPredicateSet = SDB_OSS_NEW
                  clsCatalogPredicateTree( _shardingKey ) ;
               PD_CHECK( pPredicateSet != NULL, SDB_OOM, error, PDERROR,
                         "malloc failed" ) ;
               isNew = TRUE ;
            }
            else
            {
               pPredicateSet = &predicateSet ;
            }
            if ( MTH_OPERATOR_EYECATCHER == pFieldName[0] )
            {
               rc = parseLogicOp( beTmp, *pPredicateSet );
            }
            else
            {
               rc = parseCmpOp( beTmp, *pPredicateSet );
            }
            PD_RC_CHECK( rc, PDERROR, "Failed to parse the field(rc=%d)",
                         rc ) ;
            if ( isNew )
            {
               predicateSet.addChild( pPredicateSet ) ;
            }
            if ( predicateSet.getLogicType() == CLS_CATA_LOGIC_OR &&
                 predicateSet.isUniverse() )
            {
               goto done ;
            }
         }
      }
      catch ( std::exception &e )
      {
         rc = SDB_INVALIDARG ;
         PD_RC_CHECK( rc, PDERROR, "Failed to parse the matcher(%s), "
                      "occured unexpected error:%s",
                      matcher.toString( false, false ).c_str(),
                      e.what() ) ;
      }

   done:
      PD_TRACE_EXITRC ( SDB_CLSCATAMATCHER_PARSEANOBJ, rc ) ;
      return rc;
   error:
      if ( isNew )
      {
         SDB_OSS_DEL( pPredicateSet );
         pPredicateSet = NULL;
      }
      goto done;
   }
Exemplo n.º 4
0
        void operator()( DBClientCursorBatchIterator &i ) {
            mongolock l( true );
            if ( context ) {
                context->relocked();
            }

            while( i.moreInCurrentBatch() ) {
                if ( n % 128 == 127 /*yield some*/ ) {
                    time_t now = time(0);
                    if( now - lastLog >= 60 ) { 
                        // report progress
                        if( lastLog )
                            log() << "clone " << to_collection << ' ' << n << endl;
                        lastLog = now;
                    }
                    mayInterrupt( _mayBeInterrupted );
                    dbtempreleaseif t( _mayYield );
                }

                BSONObj tmp = i.nextSafe();

                /* assure object is valid.  note this will slow us down a little. */
                if ( !tmp.valid() ) {
                    stringstream ss;
                    ss << "Cloner: skipping corrupt object from " << from_collection;
                    BSONElement e = tmp.firstElement();
                    try {
                        e.validate();
                        ss << " firstElement: " << e;
                    }
                    catch( ... ) {
                        ss << " firstElement corrupt";
                    }
                    out() << ss.str() << endl;
                    continue;
                }

                ++n;

                BSONObj js = tmp;
                if ( isindex ) {
                    verify( strstr(from_collection, "system.indexes") );
                    js = fixindex(tmp);
                    storedForLater->push_back( js.getOwned() );
                    continue;
                }

                try {
                    theDataFileMgr.insertWithObjMod(to_collection, js);
                    if ( logForRepl )
                        logOp("i", to_collection, js);

                    getDur().commitIfNeeded();
                }
                catch( UserException& e ) {
                    log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
                }

                RARELY if ( time( 0 ) - saveLast > 60 ) {
                    log() << n << " objects cloned so far from collection " << from_collection << endl;
                    saveLast = time( 0 );
                }
            }
        }
Exemplo n.º 5
0
 static void runRollbackInsertFromOplog(const char* ns, BSONObj op) {
     // handle add index case
     if (mongoutils::str::endsWith(ns, ".system.indexes")) {
         throw RollbackOplogException(str::stream() << "Not rolling back an add index on " << ns << ". Op: " << op.toString(false, true));
     }
     else {
         // the rollback of a normal insert is to do the delete
         runDeleteFromOplog(ns, op);
     }
 }
Exemplo n.º 6
0
        bool run( const string& dbname,
                  BSONObj& cmdObj,
                  int,
                  string& errmsg,
                  BSONObjBuilder& result,
                  bool ) {

            string ns;
            if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
                return false;
            }

            if ( ns.size() == 0 ) {
                errmsg = "no namespace specified";
                return false;
            }

            vector<BSONObj> bounds;
            if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
                return false;
            }

            if ( bounds.size() == 0 ) {
                errmsg = "no bounds were specified";
                return false;
            }

            if ( bounds.size() != 2 ) {
                errmsg = "only a min and max bound may be specified";
                return false;
            }

            BSONObj minKey = bounds[0];
            BSONObj maxKey = bounds[1];

            if ( minKey.isEmpty() ) {
                errmsg = "no min key specified";
                return false;
            }

            if ( maxKey.isEmpty() ) {
                errmsg = "no max key specified";
                return false;
            }

            ShardPtr mergeShard = guessMergeShard( NamespaceString( ns ), minKey );

            if ( !mergeShard ) {
                errmsg = (string)"could not find shard for merge range starting at "
                                 + minKey.toString();
                return false;
            }

            BSONObjBuilder remoteCmdObjB;
            remoteCmdObjB.append( cmdObj[ MergeChunksPassCommand::nsField() ] );
            remoteCmdObjB.append( cmdObj[ MergeChunksPassCommand::boundsField() ] );
            remoteCmdObjB.append( MergeChunksPassCommand::configField(),
                                  configServer.getPrimary().getAddress().toString() );
            remoteCmdObjB.append( MergeChunksPassCommand::shardNameField(),
                                  mergeShard->getName() );

            BSONObj remoteResult;
            // Throws, but handled at level above.  Don't want to rewrap to preserve exception
            // formatting.
            ScopedDbConnection conn( mergeShard->getAddress() );
            bool ok = conn->runCommand( "admin", remoteCmdObjB.obj(), remoteResult );
            conn.done();

            // Always refresh our chunks afterwards
            refreshChunkCache( NamespaceString( ns ) );

            result.appendElements( remoteResult );
            return ok;
        }
Exemplo n.º 7
0
    /* copy the specified collection
       isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
    */
    void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, Query query) {
        auto_ptr<DBClientCursor> c;
        {
            dbtemprelease r;
            c = conn->query( from_collection, query, 0, 0, 0, Option_NoCursorTimeout | ( slaveOk ? Option_SlaveOk : 0 ) );
        }
        
        list<BSONObj> storedForLater;
        
        assert( c.get() );
        long long n = 0;
        time_t saveLast = time( 0 );
        while ( 1 ) {
            {
                dbtemprelease r;
                if ( !c->more() )
                    break;
            }
            BSONObj tmp = c->next();

            /* assure object is valid.  note this will slow us down a little. */
            if ( !tmp.valid() ) {
                stringstream ss;
                ss << "skipping corrupt object from " << from_collection;
                BSONElement e = tmp.firstElement();
                try {
                    e.validate();
                    ss << " firstElement: " << e;
                }
                catch( ... ){
                    ss << " firstElement corrupt";
                }
                out() << ss.str() << endl;
                continue;
            }

            ++n;
            
            BSONObj js = tmp;
            if ( isindex ) {
                assert( strstr(from_collection, "system.indexes") );
                js = fixindex(tmp);
                storedForLater.push_back( js.getOwned() );
                continue;
            }

            try { 
                theDataFileMgr.insert(to_collection, js);
                if ( logForRepl )
                    logOp("i", to_collection, js);
            }
            catch( UserException& e ) { 
                log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
            }
            
            RARELY if ( time( 0 ) - saveLast > 60 ) {
                log() << n << " objects cloned so far from collection " << from_collection << endl;
                saveLast = time( 0 );
            }
        }

        if ( storedForLater.size() ){
            for ( list<BSONObj>::iterator i = storedForLater.begin(); i!=storedForLater.end(); i++ ){
                BSONObj js = *i;
                try { 
                    theDataFileMgr.insert(to_collection, js);
                    if ( logForRepl )
                        logOp("i", to_collection, js);
                }
                catch( UserException& e ) { 
                    log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
                }
            }
        }
    }
Exemplo n.º 8
0
void DocumentSourceOut::initialize() {
    DBClientBase* conn = pExpCtx->mongoProcessInterface->directClient();

    // Save the original collection options and index specs so we can check they didn't change
    // during computation.
    _originalOutOptions = pExpCtx->mongoProcessInterface->getCollectionOptions(_outputNs);
    _originalIndexes = conn->getIndexSpecs(_outputNs.ns());

    // Check if it's sharded or capped to make sure we have a chance of succeeding before we do all
    // the work. If the collection becomes capped during processing, the collection options will
    // have changed, and the $out will fail. If it becomes sharded during processing, the final
    // rename will fail.
    uassert(17017,
            str::stream() << "namespace '" << _outputNs.ns()
                          << "' is sharded so it can't be used for $out'",
            !pExpCtx->mongoProcessInterface->isSharded(pExpCtx->opCtx, _outputNs));
    uassert(17152,
            str::stream() << "namespace '" << _outputNs.ns()
                          << "' is capped so it can't be used for $out",
            _originalOutOptions["capped"].eoo());

    // We will write all results into a temporary collection, then rename the temporary collection
    // to be the target collection once we are done.
    _tempNs = NamespaceString(str::stream() << _outputNs.db() << ".tmp.agg_out."
                                            << aggOutCounter.addAndFetch(1));

    // Create output collection, copying options from existing collection if any.
    {
        BSONObjBuilder cmd;
        cmd << "create" << _tempNs.coll();
        cmd << "temp" << true;
        cmd.appendElementsUnique(_originalOutOptions);

        BSONObj info;
        bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info);
        uassert(16994,
                str::stream() << "failed to create temporary $out collection '" << _tempNs.ns()
                              << "': "
                              << info.toString(),
                ok);
    }

    // copy indexes to _tempNs
    for (std::list<BSONObj>::const_iterator it = _originalIndexes.begin();
         it != _originalIndexes.end();
         ++it) {
        MutableDocument index((Document(*it)));
        index.remove("_id");  // indexes shouldn't have _ids but some existing ones do
        index["ns"] = Value(_tempNs.ns());

        BSONObj indexBson = index.freeze().toBson();
        conn->insert(_tempNs.getSystemIndexesCollection(), indexBson);
        BSONObj err = conn->getLastErrorDetailed();
        uassert(16995,
                str::stream() << "copying index for $out failed."
                              << " index: "
                              << indexBson
                              << " error: "
                              << err,
                DBClientBase::getLastErrorString(err).empty());
    }
    _initialized = true;
}
Exemplo n.º 9
0
/* ****************************************************************************
*
* mongoAttributesForEntityType -
*/
HttpStatusCode mongoAttributesForEntityType
(
  std::string                           entityType,
  EntityTypeAttributesResponse*         responseP,
  const std::string&                    tenant,
  const std::vector<std::string>&       servicePathV,
  std::map<std::string, std::string>&   uriParams
)
{
  unsigned int offset         = atoi(uriParams[URI_PARAM_PAGINATION_OFFSET].c_str());
  unsigned int limit          = atoi(uriParams[URI_PARAM_PAGINATION_LIMIT].c_str());
  std::string  detailsString  = uriParams[URI_PARAM_PAGINATION_DETAILS];
  bool         details        = (strcasecmp("on", detailsString.c_str()) == 0)? true : false;

  // Setting the name of the entity type for the response
  responseP->entityType.type = entityType;

  LM_T(LmtMongo, ("Query Types Attribute for <%s>", entityType.c_str()));
  LM_T(LmtPagination, ("Offset: %d, Limit: %d, Details: %s", offset, limit, (details == true)? "true" : "false"));

  reqSemTake(__FUNCTION__, "query types attributes request");

  DBClientBase* connection = getMongoConnection();

  /* Compose query based on this aggregation command:
   *
   * FIXME P9: taking into account that type is no longer used as part of the attribute "key", not sure if the
   * aggregation query below is fully correct
   *
   * db.runCommand({aggregate: "entities",
   *                pipeline: [ {$match: { "_id.type": "TYPE" , "_id.servicePath": /.../ } },
   *                            {$project: {_id: 1, "attrs.name": 1, "attrs.type": 1} },
   *                            {$unwind: "$attrs"},
   *                            {$group: {_id: "$_id.type", attrs: {$addToSet: "$attrs"}} },
   *                            {$unwind: "$attrs"},
   *                            {$group: {_id: "$attrs" }},
   *                            {$sort: {_id.name: 1, _id.type: 1} }
   *                          ]
   *                })
   *
   */

  BSONObj result;
  BSONObj cmd = BSON("aggregate" << COL_ENTITIES <<
                     "pipeline" << BSON_ARRAY(
                                              BSON("$match" << BSON(C_ID_ENTITY << entityType << C_ID_SERVICEPATH << fillQueryServicePath(servicePathV))) <<
                                              BSON("$project" << BSON("_id" << 1 << C_ATTR_NAME << 1 << C_ATTR_TYPE << 1)) <<
                                              BSON("$unwind" << S_ATTRS) <<
                                              BSON("$group" << BSON("_id" << CS_ID_ENTITY << "attrs" << BSON("$addToSet" << S_ATTRS))) <<
                                              BSON("$unwind" << S_ATTRS) <<
                                              BSON("$group" << BSON("_id" << S_ATTRS)) <<
                                              BSON("$sort" << BSON(C_ID_NAME << 1 << C_ID_TYPE << 1))
                                             )
                    );

  LM_T(LmtMongo, ("runCommand() in '%s' database: '%s'", composeDatabaseName(tenant).c_str(), cmd.toString().c_str()));

  mongoSemTake(__FUNCTION__, "aggregation command"); 
  try
  {

    connection->runCommand(composeDatabaseName(tenant).c_str(), cmd, result);
    mongoSemGive(__FUNCTION__, "aggregation command");
    LM_I(("Database Operation Successful (%s)", cmd.toString().c_str()));
  }
  catch (const DBException& e)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + e.what();

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + "generic";

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }

  /* Processing result to build response*/
  LM_T(LmtMongo, ("aggregation result: %s", result.toString().c_str()));

  std::vector<BSONElement> resultsArray = result.getField("result").Array();

  /* See comment above in the other method regarding this strategy to implement pagination */
  for (unsigned int ix = offset; ix < MIN(resultsArray.size(), offset + limit); ++ix)
  {
    BSONElement        idField    = resultsArray[ix].embeddedObject().getField("_id");

    //
    // BSONElement::eoo returns true if 'not found', i.e. the field "_id" doesn't exist in 'sub'
    //
    // Now, if 'resultsArray[ix].embeddedObject().getField("_id")' is not found, if we continue,
    // calling embeddedObject() on it, then we get an exception and the broker crashes.
    //
    if (idField.eoo() == true)
    {
      LM_E(("Database Error (error retrieving _id field in doc: %s)", resultsArray[ix].embeddedObject().toString().c_str()));
      continue;
    }

    BSONObj            resultItem = idField.embeddedObject();
    ContextAttribute*  ca         = new ContextAttribute(resultItem.getStringField(ENT_ATTRS_NAME), resultItem.getStringField(ENT_ATTRS_TYPE));
    responseP->entityType.contextAttributeVector.push_back(ca);
  }

  char detailsMsg[256];
  if (responseP->entityType.contextAttributeVector.size() > 0)
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Count: %d", (int) resultsArray.size());
      responseP->statusCode.fill(SccOk, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccOk);
    }
  }
  else
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Number of attributes: %d. Offset is %d", (int) resultsArray.size(), offset);
      responseP->statusCode.fill(SccContextElementNotFound, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccContextElementNotFound);
    }
  }

  reqSemGive(__FUNCTION__, "query types request");

  return SccOk;
}
Exemplo n.º 10
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB__IXMINXCB2, "_ixmIndexCB::_ixmIndexCB" )
   _ixmIndexCB::_ixmIndexCB ( dmsExtentID extentID,
                              const BSONObj &infoObj,
                              UINT16 mbID ,
                              _dmsStorageIndex *pIndexSu,
                              _dmsContext *context )
   {
      SDB_ASSERT ( pIndexSu, "index su can't be NULL" ) ;
      PD_TRACE_ENTRY ( SDB__IXMINXCB2 );

      ixmIndexCBExtent *pExtent = NULL ;
      _isInitialized = FALSE ;
      dmsExtRW extRW ;
      _pIndexSu = pIndexSu ;
      _pContext = context ;
      _extentID = extentID ;
      _pageSize = _pIndexSu->pageSize() ;

      _extent = (const ixmIndexCBExtent*)pIndexSu->beginFixedAddr ( extentID,
                                                                    1 ) ;
      extRW = pIndexSu->extent2RW( extentID, context->mbID() ) ;
      pExtent = extRW.writePtr<ixmIndexCBExtent>( 0, _pageSize ) ;

      if ( infoObj.objsize() + IXM_INDEX_CB_EXTENT_METADATA_SIZE >=
           (UINT32)_pageSize )
      {
         PD_LOG ( PDERROR, "index object is too big: %s",
                  infoObj.toString().c_str() ) ;
         goto error ;
      }

      pExtent->_type = IXM_EXTENT_TYPE_NONE ;
      if ( !generateIndexType( infoObj, pExtent->_type ) )
      {
         goto error ;
      }


      pExtent->_flag           = DMS_EXTENT_FLAG_INUSE ;
      pExtent->_eyeCatcher [0] = IXM_EXTENT_CB_EYECATCHER0 ;
      pExtent->_eyeCatcher [1] = IXM_EXTENT_CB_EYECATCHER1 ;
      pExtent->_indexFlag      = IXM_INDEX_FLAG_INVALID ;
      pExtent->_mbID           = mbID ;
      pExtent->_version        = DMS_EXTENT_CURRENT_V ;
      pExtent->_logicID        = DMS_INVALID_EXTENT ;
      pExtent->_scanExtLID     = DMS_INVALID_EXTENT ;
      pExtent->_rootExtentID   = DMS_INVALID_EXTENT ;
      ossMemset( pExtent->_reserved, 0, sizeof( pExtent->_reserved ) ) ;
      if ( !infoObj.hasField (DMS_ID_KEY_NAME) )
      {
         _IDToInsert oid ;
         oid._oid.init() ;
         *(INT32*)(((CHAR*)pExtent) +IXM_INDEX_CB_EXTENT_METADATA_SIZE) =
               infoObj.objsize() + sizeof(_IDToInsert) ;
         ossMemcpy ( ((CHAR*)pExtent) +
                     IXM_INDEX_CB_EXTENT_METADATA_SIZE +
                     sizeof(INT32),
                     (CHAR*)(&oid),
                     sizeof(_IDToInsert)) ;
         ossMemcpy ( ((CHAR*)pExtent) +
                     IXM_INDEX_CB_EXTENT_METADATA_SIZE +
                     sizeof(INT32) +
                     sizeof(_IDToInsert),
                     infoObj.objdata()+sizeof(INT32),
                     infoObj.objsize()-sizeof(INT32) ) ;
      }
      else
      {
         ossMemcpy ( ((CHAR*)pExtent) +
                     IXM_INDEX_CB_EXTENT_METADATA_SIZE,
                     infoObj.objdata(),
                     infoObj.objsize() ) ;
      }
      _init() ;

   done :
      PD_TRACE_EXIT ( SDB__IXMINXCB2 );
      return ;
   error :
      goto done ;
   }
Exemplo n.º 11
0
void OplogReader::tailingQuery(const char* ns, const BSONObj& query) {
    verify(!haveCursor());
    LOG(2) << ns << ".find(" << query.toString() << ')' << endl;
    cursor.reset(_conn->query(ns, query, 0, 0, nullptr, _tailingQueryOptions).release());
}
Exemplo n.º 12
0
 string CollectionMetadata::rangeToString( const BSONObj& inclusiveLower,
                                           const BSONObj& exclusiveUpper ) const {
     stringstream ss;
     ss << "[" << inclusiveLower.toString() << ", " << exclusiveUpper.toString() << ")";
     return ss.str();
 }
/* ****************************************************************************
*
* mongoUpdateContextSubscription - 
*/
HttpStatusCode mongoUpdateContextSubscription(UpdateContextSubscriptionRequest* requestP, UpdateContextSubscriptionResponse* responseP, Format inFormat, const std::string& tenant)
{
  reqSemTake(__FUNCTION__, "ngsi10 update subscription request");

  LM_T(LmtMongo, ("Update Context Subscription"));

  DBClientBase* connection = getMongoConnection();

  /* Look for document */
  BSONObj  sub;
  try
  {
      OID id = OID(requestP->subscriptionId.get());

      mongoSemTake(__FUNCTION__, "findOne in SubscribeContextCollection");
      sub = connection->findOne(getSubscribeContextCollectionName(tenant).c_str(), BSON("_id" << id));
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection");
      LM_I(("Database Operation Successful (findOne _id: %s)", id.toString().c_str()));
  }
  catch (const AssertionException &e)
  {
      /* This happens when OID format is wrong */
      // FIXME P4: this checking should be done at the parsing stage, without progressing to
      // mongoBackend. For the moment we can leave this here, but we should remove it in the future
      // (old issue #95)
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection (mongo assertion exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo assertion exception)");

      responseP->subscribeError.errorCode.fill(SccContextElementNotFound);
      LM_W(("Bad Input (invalid OID format)"));
      return SccOk;
  }
  catch (const DBException &e)
  {
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo db exception)");

      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - findOne() _id: " + requestP->subscriptionId.get() +
                                               " - exception: " + e.what());
      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "findOne in SubscribeContextCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo generic exception)");

      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - findOne() _id: " + requestP->subscriptionId.get() +
                                               " - exception: " + "generic");
      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }


  if (sub.isEmpty()) {
      responseP->subscribeError.errorCode.fill(SccContextElementNotFound);
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (no subscriptions found)");
      return SccOk;
  }

  /* We start with an empty BSONObjBuilder and process requestP for all the fields that can
   * be updated. I don't like too much this strategy (I would have preferred to start with
   * a copy of the original document, then modify as neded, but this doesn't seem to be easy
   * using the API provide by the Mongo C++ driver)
   *
   * FIXME: a better implementation strategy could be doing an findAndModify() query to do the
   * update, so detecting if the document was not found, instead of using findOne() + update()
   * with $set operation. One operations to MongoDb. vs two operations.
   */
  BSONObjBuilder newSub;

  /* Entities, attribute list and reference are not updatable, so they are appended directly */
  newSub.appendArray(CSUB_ENTITIES, sub.getField(CSUB_ENTITIES).Obj());
  newSub.appendArray(CSUB_ATTRS, sub.getField(CSUB_ATTRS).Obj());
  newSub.append(CSUB_REFERENCE, STR_FIELD(sub, CSUB_REFERENCE));

  /* Duration update */
  if (requestP->duration.isEmpty()) {      
      newSub.append(CSUB_EXPIRATION, sub.getField(CSUB_EXPIRATION).numberLong());
  }
  else {
      long long expiration = getCurrentTime() + requestP->duration.parse();
      newSub.append(CSUB_EXPIRATION, expiration);
      LM_T(LmtMongo, ("New subscription expiration: %l", expiration));
  }

  /* Restriction update */
  // FIXME: Restrictions not implemented yet

  /* Throttling update */
  if (!requestP->throttling.isEmpty()) {
      /* Throttling equal to 0 removes throttling */
      long long throttling = requestP->throttling.parse();
      if (throttling != 0) {
          newSub.append(CSUB_THROTTLING, throttling);
      }
  }
  else {
      /* The hasField check is needed due to Throttling could not be present in the original doc */
      if (sub.hasField(CSUB_THROTTLING)) {
          newSub.append(CSUB_THROTTLING, sub.getField(CSUB_THROTTLING).numberLong());
      }
  }

  /* Notify conditions */
  bool notificationDone = false;
  if (requestP->notifyConditionVector.size() == 0) {
      newSub.appendArray(CSUB_CONDITIONS, sub.getField(CSUB_CONDITIONS).embeddedObject());
  }
  else {
      /* Destroy any previous ONTIMEINTERVAL thread */
      getNotifier()->destroyOntimeIntervalThreads(requestP->subscriptionId.get());

      /* Build conditions array (including side-effect notifications and threads creation)
       * In order to do so, we have to create and EntityIdVector and AttributeList from sub
       * document, given the processConditionVector() signature */
       EntityIdVector enV = subToEntityIdVector(sub);
       AttributeList attrL = subToAttributeList(sub);

       BSONArray conds = processConditionVector(&requestP->notifyConditionVector,
                                                enV,
                                                attrL,
                                                requestP->subscriptionId.get(),
                                                C_STR_FIELD(sub, CSUB_REFERENCE),
                                                &notificationDone,
                                                inFormat,
                                                tenant);
       newSub.appendArray(CSUB_CONDITIONS, conds);

       /* Remove EntityIdVector and AttributeList dynamic memory */
       enV.release();
       attrL.release();
  }

  int count = sub.hasField(CSUB_COUNT) ? sub.getIntField(CSUB_COUNT) : 0;

  /* Last notification */
  if (notificationDone) {
      newSub.append(CSUB_LASTNOTIFICATION, getCurrentTime());
      newSub.append(CSUB_COUNT, count + 1);
  }
  else {
      /* The hasField check is needed due to lastNotification/count could not be present in the original doc */
      if (sub.hasField(CSUB_LASTNOTIFICATION)) {
          newSub.append(CSUB_LASTNOTIFICATION, sub.getIntField(CSUB_LASTNOTIFICATION));
      }
      if (sub.hasField(CSUB_COUNT)) {
          newSub.append(CSUB_COUNT, count);
      }
  }

  /* Adding format to use in notifications */
  newSub.append(CSUB_FORMAT, std::string(formatToString(inFormat)));

  /* Update document in MongoDB */
  BSONObj update = newSub.obj();
  try
  {
      LM_T(LmtMongo, ("update() in '%s' collection _id '%s': %s}", getSubscribeContextCollectionName(tenant).c_str(),
                         requestP->subscriptionId.get().c_str(),
                         update.toString().c_str()));
      mongoSemTake(__FUNCTION__, "update in SubscribeContextCollection");
      connection->update(getSubscribeContextCollectionName(tenant).c_str(), BSON("_id" << OID(requestP->subscriptionId.get())), update);
      mongoSemGive(__FUNCTION__, "update in SubscribeContextCollection");
      LM_I(("Database Operation Successful (update _id: %s, %s)", requestP->subscriptionId.get().c_str(), update.toString().c_str()));
  }
  catch (const DBException &e)
  {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo db exception)");
      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                               " - update() doc: " + update.toString() +
                                               " - exception: " + e.what());

      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi10 update subscription request (mongo generic exception)");
      responseP->subscribeError.errorCode.fill(SccReceiverInternalError,
                                               std::string("collection: ") + getSubscribeContextCollectionName(tenant).c_str() +
                                               " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                               " - update() doc: " + update.toString() +
                                               " - exception: " + "generic");

      LM_E(("Database Error (%s)", responseP->subscribeError.errorCode.details.c_str()));
      return SccOk;
  }

  /* Duration and throttling are optional parameters, they are only added in the case they
   * was used for update */
  if (!requestP->duration.isEmpty()) {      
      responseP->subscribeResponse.duration = requestP->duration;
  }
  if (!requestP->throttling.isEmpty()) {      
      responseP->subscribeResponse.throttling = requestP->throttling;
  }  
  responseP->subscribeResponse.subscriptionId = requestP->subscriptionId;

  reqSemGive(__FUNCTION__, "ngsi10 update subscription request");
  return SccOk;
}
/* ****************************************************************************
*
* mongoUpdateContextAvailabilitySubscription - 
*/
HttpStatusCode mongoUpdateContextAvailabilitySubscription(UpdateContextAvailabilitySubscriptionRequest* requestP, UpdateContextAvailabilitySubscriptionResponse* responseP, Format inFormat, const std::string& tenant)
{
  LM_T(LmtMongo, ("Update Context Subscription"));
  reqSemTake(__FUNCTION__, "ngsi9 update subscription request");

  DBClientConnection* connection = getMongoConnection();

  /* Look for document */
  BSONObj  sub;
  try {
      OID id = OID(requestP->subscriptionId.get());

      mongoSemTake(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection");
      sub = connection->findOne(getSubscribeContextAvailabilityCollectionName(tenant).c_str(), BSON("_id" << id));
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection");
  }
  catch( const AssertionException &e ) {
      /* This happens when OID format is wrong */
      // FIXME: this checking should be done at parsing stage, without progressing to
      // mongoBackend. By the moment we can live this here, but we should remove in the future
      // (old issue #95)
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection (mongo assertion exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo assertion exception)");

      responseP->errorCode.fill(SccContextElementNotFound);
      return SccOk;
  }
  catch( const DBException &e ) {
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo db exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - findOne() _id: " + requestP->subscriptionId.get() +
                                " - exception: " + e.what());
      return SccOk;
  }
  catch(...) {
      mongoSemGive(__FUNCTION__, "findOne from SubscribeContextAvailabilityCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo generic exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - findOne() _id: " + requestP->subscriptionId.get() +
                                " - exception: " + "generic");
      return SccOk;
  }

  if (sub.isEmpty()) {
     responseP->errorCode.fill(SccContextElementNotFound);
     reqSemGive(__FUNCTION__, "ngsi9 update subscription request (no subscriptions found)");
     return SccOk;
  }

  /* We start with an empty BSONObjBuilder and process requestP for all the fields that can
   * be updated. I don't like too much this strategy (I would have preferred to start with
   * a copy of the original document, then modify as neded, but this doesn't seem to be easy
   * using the API provide by the Mongo C++ driver)
   *
   * FIXME: a better implementation strategy could be doing an findAndModify() query to do the
   * update, so detecting if the document was not found, instead of using findOne() + update()
   * with $set operation. One operations to MongoDb. vs two operations.
   */
  BSONObjBuilder newSub;

  /* Entities (mandatory) */
  BSONArrayBuilder entities;
  for (unsigned int ix = 0; ix < requestP->entityIdVector.size(); ++ix) {
      EntityId* en = requestP->entityIdVector.get(ix);
      if (en->type == "") {
          entities.append(BSON(CASUB_ENTITY_ID << en->id <<
                               CASUB_ENTITY_ISPATTERN << en->isPattern));
      }
      else {
          entities.append(BSON(CASUB_ENTITY_ID << en->id <<
                               CASUB_ENTITY_TYPE << en->type <<
                               CASUB_ENTITY_ISPATTERN << en->isPattern));
      }

  }
  newSub.append(CASUB_ENTITIES, entities.arr());

  /* Attributes (always taken into account) */
  BSONArrayBuilder attrs;
  for (unsigned int ix = 0; ix < requestP->attributeList.size(); ++ix) {
      attrs.append(requestP->attributeList.get(ix));
  }
  newSub.append(CASUB_ATTRS, attrs.arr());

  /* Duration (optional) */
  if (requestP->duration.isEmpty()) {
      newSub.append(CASUB_EXPIRATION, sub.getField(CASUB_EXPIRATION).numberLong());
  }
  else {
      long long expiration = getCurrentTime() + requestP->duration.parse();
      newSub.append(CASUB_EXPIRATION, expiration);
      LM_T(LmtMongo, ("New subscription expiration: %l", expiration));
  }

  /* Reference is not updatable, so it is appended directly */
  newSub.append(CASUB_REFERENCE, STR_FIELD(sub, CASUB_REFERENCE));

  int count = sub.hasField(CASUB_COUNT) ? sub.getIntField(CASUB_COUNT) : 0;

  /* The hasField check is needed due to lastNotification/count could not be present in the original doc */
  if (sub.hasField(CASUB_LASTNOTIFICATION)) {
      newSub.append(CASUB_LASTNOTIFICATION, sub.getIntField(CASUB_LASTNOTIFICATION));
  }
  if (sub.hasField(CASUB_COUNT)) {
      newSub.append(CASUB_COUNT, count);
  }

  /* Adding format to use in notifications */
  newSub.append(CASUB_FORMAT, std::string(formatToString(inFormat)));

  /* Update document in MongoDB */
  BSONObj update = newSub.obj();
  LM_T(LmtMongo, ("update() in '%s' collection _id '%s': %s}", getSubscribeContextAvailabilityCollectionName(tenant).c_str(),
                  requestP->subscriptionId.get().c_str(),
                  update.toString().c_str()));
  try {
      mongoSemTake(__FUNCTION__, "update in SubscribeContextAvailabilityCollection");
      connection->update(getSubscribeContextAvailabilityCollectionName(tenant).c_str(), BSON("_id" << OID(requestP->subscriptionId.get())), update);
      mongoSemGive(__FUNCTION__, "update in SubscribeContextAvailabilityCollection");
  }
  catch( const DBException &e ) {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextAvailabilityCollection (mongo db exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo db exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                " - update() doc: " + update.toString() +
                                " - exception: " + e.what());

      return SccOk;
  }
  catch(...) {
      mongoSemGive(__FUNCTION__, "update in SubscribeContextAvailabilityCollection (mongo generic exception)");
      reqSemGive(__FUNCTION__, "ngsi9 update subscription request (mongo generic exception)");

      responseP->errorCode.fill(SccReceiverInternalError,
                                std::string("collection: ") + getSubscribeContextAvailabilityCollectionName(tenant).c_str() +
                                " - update() _id: " + requestP->subscriptionId.get().c_str() +
                                " - update() doc: " + update.toString() +
                                " - exception: " + "generic");
      return SccOk;
  }

  /* Send notifications for matching context registrations */
  processAvailabilitySubscription(requestP->entityIdVector, requestP->attributeList, requestP->subscriptionId.get(), STR_FIELD(sub, CASUB_REFERENCE), inFormat, tenant);

  /* Duration is an optional parameter, it is only added in the case they
   * was used for update */
  if (!requestP->duration.isEmpty()) {      
      responseP->duration = requestP->duration;
  }

  responseP->subscriptionId = requestP->subscriptionId;

  reqSemGive(__FUNCTION__, "ngsi9 update subscription request");

  return SccOk;
}
Exemplo n.º 15
0
        bool group( OperationContext* txn,
                    Database* db,
                    const std::string& ns,
                    const BSONObj& query,
                    BSONObj keyPattern,
                    const std::string& keyFunctionCode,
                    const std::string& reduceCode,
                    const char * reduceScope,
                    BSONObj initial,
                    const std::string& finalize,
                    string& errmsg,
                    BSONObjBuilder& result ) {

            const string userToken = ClientBasic::getCurrent()->getAuthorizationSession()
                                                              ->getAuthenticatedUserNamesToken();
            auto_ptr<Scope> s = globalScriptEngine->getPooledScope(db->name(), "group" + userToken);

            if ( reduceScope )
                s->init( reduceScope );

            s->setObject( "$initial" , initial , true );

            s->exec( "$reduce = " + reduceCode , "$group reduce setup" , false , true , true , 100 );
            s->exec( "$arr = [];" , "$group reduce setup 2" , false , true , true , 100 );
            ScriptingFunction f = s->createFunction(
                                      "function(){ "
                                      "  if ( $arr[n] == null ){ "
                                      "    next = {}; "
                                      "    Object.extend( next , $key ); "
                                      "    Object.extend( next , $initial , true ); "
                                      "    $arr[n] = next; "
                                      "    next = null; "
                                      "  } "
                                      "  $reduce( obj , $arr[n] ); "
                                      "}" );

            ScriptingFunction keyFunction = 0;
            if ( keyFunctionCode.size() ) {
                keyFunction = s->createFunction( keyFunctionCode.c_str() );
            }


            double keysize = keyPattern.objsize() * 3;
            double keynum = 1;

            Collection* collection = db->getCollection( txn, ns );

            const WhereCallbackReal whereCallback(txn, StringData(db->name()));

            map<BSONObj,int,BSONObjCmp> map;
            list<BSONObj> blah;

            if (collection) {
                CanonicalQuery* cq;
                if (!CanonicalQuery::canonicalize(ns, query, &cq, whereCallback).isOK()) {
                    uasserted(17212, "Can't canonicalize query " + query.toString());
                    return 0;
                }

                Runner* rawRunner;
                if (!getRunner(txn,collection, cq, &rawRunner).isOK()) {
                    uasserted(17213, "Can't get runner for query " + query.toString());
                    return 0;
                }

                auto_ptr<Runner> runner(rawRunner);
                const ScopedRunnerRegistration safety(runner.get());

                BSONObj obj;
                Runner::RunnerState state;
                while (Runner::RUNNER_ADVANCED == (state = runner->getNext(&obj, NULL))) {
                    BSONObj key = getKey(obj , keyPattern , keyFunction , keysize / keynum,
                                         s.get() );
                    keysize += key.objsize();
                    keynum++;

                    int& n = map[key];
                    if ( n == 0 ) {
                        n = map.size();
                        s->setObject( "$key" , key , true );
                        uassert(17203, "group() can't handle more than 20000 unique keys",
                                n <= 20000 );
                    }

                    s->setObject( "obj" , obj , true );
                    s->setNumber( "n" , n - 1 );
                    if ( s->invoke( f , 0, 0 , 0 , true ) ) {
                        throw UserException(17214,
                                            (string)"reduce invoke failed: " + s->getError());
                    }
                }
            }

            if (!finalize.empty()) {
                s->exec( "$finalize = " + finalize , "$group finalize define" ,
                         false , true , true , 100 );
                ScriptingFunction g = s->createFunction(
                                          "function(){ "
                                          "  for(var i=0; i < $arr.length; i++){ "
                                          "  var ret = $finalize($arr[i]); "
                                          "  if (ret !== undefined) "
                                          "    $arr[i] = ret; "
                                          "  } "
                                          "}" );
                s->invoke( g , 0, 0 , 0 , true );
            }

            result.appendArray( "retval" , s->getObject( "$arr" ) );
            result.append( "count" , keynum - 1 );
            result.append( "keys" , (int)(map.size()) );
            s->exec( "$arr = [];" , "$group reduce setup 2" , false , true , true , 100 );
            s->gc();

            return true;
        }
Exemplo n.º 16
0
/* ****************************************************************************
*
* mongoEntityTypes -
*/
HttpStatusCode mongoEntityTypes
(
  EntityTypesResponse*                  responseP,
  const std::string&                    tenant,
  const std::vector<std::string>&       servicePathV,
  std::map<std::string, std::string>&   uriParams
)
{
  unsigned int offset         = atoi(uriParams[URI_PARAM_PAGINATION_OFFSET].c_str());
  unsigned int limit          = atoi(uriParams[URI_PARAM_PAGINATION_LIMIT].c_str());
  std::string  detailsString  = uriParams[URI_PARAM_PAGINATION_DETAILS];
  bool         details        = (strcasecmp("on", detailsString.c_str()) == 0)? true : false;
  LM_T(LmtMongo, ("Query Entity Types"));
  LM_T(LmtPagination, ("Offset: %d, Limit: %d, Details: %s", offset, limit, (details == true)? "true" : "false"));

  reqSemTake(__FUNCTION__, "query types request");

  DBClientBase* connection = getMongoConnection();

  /* Compose query based on this aggregation command:
   *
   * FIXME P9: taking into account that type is no longer used as part of the attribute "key", not sure if the
   * aggregation query below is fully correct
   *
   * db.runCommand({aggregate: "entities",
   *                pipeline: [ {$match: { "_id.servicePath": /.../ } },
   *                            {$project: {_id: 1, "attrs.name": 1, "attrs.type": 1} },
   *                            {$project: { "attrs"
   *                                  {$cond: [ {$eq: [ "$attrs", [ ] ] }, [null], "$attrs"] }
   *                               }
   *                            },
   *                            {$unwind: "$attrs"},
   *                            {$group: {_id: "$_id.type", attrs: {$addToSet: "$attrs"}} },
   *                            {$sort: {_id: 1} }
   *                          ]
   *                })
   *
   * The $cond part is hard... more information at http://stackoverflow.com/questions/27510143/empty-array-prevents-document-to-appear-in-query
   * As a consequence, some "null" values may appear in the resulting attrs vector, which are prunned by the result processing logic.
   *
   * FIXME P6: in the future, we can interpret the collapse parameter at this layer. If collapse=true so we don't need attributes, the
   * following command can be used:
   *
   * db.runCommand({aggregate: "entities", pipeline: [ {$group: {_id: "$_id.type"} }]})
   *
   */

  BSONObj result;

  // Building the projection part of the query that includes types that have no attributes
  // See bug: https://github.com/telefonicaid/fiware-orion/issues/686
  BSONArrayBuilder emptyArrayBuilder;
  BSONArrayBuilder nulledArrayBuilder;
  nulledArrayBuilder.appendNull();

  // We are using the $cond: [ .. ] and not the $cond: { .. } one, as the former is the only one valid in MongoDB 2.4
  BSONObj projection = BSON(
    "$project" << BSON(
      "attrs" << BSON(
        "$cond" << BSON_ARRAY(
          BSON("$eq" << BSON_ARRAY(S_ATTRS << emptyArrayBuilder.arr()) ) <<
          nulledArrayBuilder.arr() <<
          S_ATTRS
        )
      )
    )
  );

  BSONObj cmd = BSON("aggregate" << COL_ENTITIES <<
                     "pipeline" << BSON_ARRAY(
                                              BSON("$match" << BSON(C_ID_SERVICEPATH << fillQueryServicePath(servicePathV))) <<
                                              BSON("$project" << BSON("_id" << 1 << C_ATTR_NAME << 1 << C_ATTR_TYPE << 1)) <<
                                              projection <<
                                              BSON("$unwind" << S_ATTRS) <<
                                              BSON("$group" << BSON("_id" << CS_ID_ENTITY << "attrs" << BSON("$addToSet" << S_ATTRS))) <<
                                              BSON("$sort" << BSON("_id" << 1))
                                             )
                     );

  LM_T(LmtMongo, ("runCommand() in '%s' database: '%s'", composeDatabaseName(tenant).c_str(), cmd.toString().c_str()));

  mongoSemTake(__FUNCTION__, "aggregation command");  
  try
  {

    connection->runCommand(composeDatabaseName(tenant).c_str(), cmd, result);
    mongoSemGive(__FUNCTION__, "aggregation command");
    LM_I(("Database Operation Successful (%s)", cmd.toString().c_str()));
  }
  catch (const DBException& e)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + e.what();

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }
  catch (...)
  {
      mongoSemGive(__FUNCTION__, "aggregation command");
      std::string err = std::string("database: ") + composeDatabaseName(tenant).c_str() +
              " - command: " + cmd.toString() +
              " - exception: " + "generic";

      LM_E(("Database Error (%s)", err.c_str()));
      responseP->statusCode.fill(SccReceiverInternalError, err);
      reqSemGive(__FUNCTION__, "query types request");
      return SccOk;
  }

  /* Processing result to build response */
  LM_T(LmtMongo, ("aggregation result: %s", result.toString().c_str()));

  std::vector<BSONElement> resultsArray = result.getField("result").Array();

  /* Another strategy to implement pagination is to use the $skip and $limit operators in the
   * aggregation framework. However, doing so, we don't know the total number of results, which can
   * be needed in the case of details=on (using that approach, we need to do two queries: one to get
   * the count and other to get the actual results with $skip and $limit, in the same "transaction" to
   * avoid incoherence between both if some entity type is created or deleted in the process).
   *
   * However, considering that the number of types will be small compared with the number of entities,
   * the current approach seems to be ok
   */
  for (unsigned int ix = offset; ix < MIN(resultsArray.size(), offset + limit); ++ix)
  {
    BSONObj                  resultItem = resultsArray[ix].embeddedObject();
    TypeEntity*              type       = new TypeEntity(resultItem.getStringField("_id"));
    std::vector<BSONElement> attrsArray = resultItem.getField("attrs").Array();

    if (!attrsArray[0].isNull())
    {
      for (unsigned int jx = 0; jx < attrsArray.size(); ++jx)
      {
        /* This is the place in which null elements in the resulting attrs vector are prunned */
        if (attrsArray[jx].isNull())
        {
          continue;
        }
        
        BSONObj jAttr = attrsArray[jx].embeddedObject();
        ContextAttribute* ca = new ContextAttribute(jAttr.getStringField(ENT_ATTRS_NAME), jAttr.getStringField(ENT_ATTRS_TYPE));
        type->contextAttributeVector.push_back(ca);
      }
    }

    responseP->typeEntityVector.push_back(type);
  }

  char detailsMsg[256];
  if (responseP->typeEntityVector.size() > 0)
  {
    if (details)
    {
      snprintf(detailsMsg, sizeof(detailsMsg), "Count: %d", (int) resultsArray.size());
      responseP->statusCode.fill(SccOk, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccOk);
    }
  }
  else
  {
    if (details)
    {      
      snprintf(detailsMsg, sizeof(detailsMsg), "Number of types: %d. Offset is %d", (int) resultsArray.size(), offset);
      responseP->statusCode.fill(SccContextElementNotFound, detailsMsg);
    }
    else
    {
      responseP->statusCode.fill(SccContextElementNotFound);
    }
  }

  reqSemGive(__FUNCTION__, "query types request");

  return SccOk;

}
Exemplo n.º 17
0
// static
Status ParsedProjection::make(const BSONObj& spec,
                              const MatchExpression* const query,
                              ParsedProjection** out,
                              const MatchExpressionParser::WhereCallback& whereCallback) {
    // Are we including or excluding fields?  Values:
    // -1 when we haven't initialized it.
    // 1 when we're including
    // 0 when we're excluding.
    int include_exclude = -1;

    // If any of these are 'true' the projection isn't covered.
    bool include = true;
    bool hasNonSimple = false;
    bool hasDottedField = false;

    bool includeID = true;

    bool hasIndexKeyProjection = false;

    bool wantGeoNearPoint = false;
    bool wantGeoNearDistance = false;

    // Until we see a positional or elemMatch operator we're normal.
    ArrayOpType arrayOpType = ARRAY_OP_NORMAL;

    BSONObjIterator it(spec);
    while (it.more()) {
        BSONElement e = it.next();

        if (!e.isNumber() && !e.isBoolean()) {
            hasNonSimple = true;
        }

        if (Object == e.type()) {
            BSONObj obj = e.embeddedObject();
            if (1 != obj.nFields()) {
                return Status(ErrorCodes::BadValue, ">1 field in obj: " + obj.toString());
            }

            BSONElement e2 = obj.firstElement();
            if (mongoutils::str::equals(e2.fieldName(), "$slice")) {
                if (e2.isNumber()) {
                    // This is A-OK.
                } else if (e2.type() == Array) {
                    BSONObj arr = e2.embeddedObject();
                    if (2 != arr.nFields()) {
                        return Status(ErrorCodes::BadValue, "$slice array wrong size");
                    }

                    BSONObjIterator it(arr);
                    // Skip over 'skip'.
                    it.next();
                    int limit = it.next().numberInt();
                    if (limit <= 0) {
                        return Status(ErrorCodes::BadValue, "$slice limit must be positive");
                    }
                } else {
                    return Status(ErrorCodes::BadValue,
                                  "$slice only supports numbers and [skip, limit] arrays");
                }
            } else if (mongoutils::str::equals(e2.fieldName(), "$elemMatch")) {
                // Validate $elemMatch arguments and dependencies.
                if (Object != e2.type()) {
                    return Status(ErrorCodes::BadValue,
                                  "elemMatch: Invalid argument, object required.");
                }

                if (ARRAY_OP_POSITIONAL == arrayOpType) {
                    return Status(ErrorCodes::BadValue,
                                  "Cannot specify positional operator and $elemMatch.");
                }

                if (mongoutils::str::contains(e.fieldName(), '.')) {
                    return Status(ErrorCodes::BadValue,
                                  "Cannot use $elemMatch projection on a nested field.");
                }

                arrayOpType = ARRAY_OP_ELEM_MATCH;

                // Create a MatchExpression for the elemMatch.
                BSONObj elemMatchObj = e.wrap();
                verify(elemMatchObj.isOwned());

                // TODO: Is there a faster way of validating the elemMatchObj?
                StatusWithMatchExpression swme =
                    MatchExpressionParser::parse(elemMatchObj, whereCallback);
                if (!swme.isOK()) {
                    return swme.getStatus();
                }
                delete swme.getValue();
            } else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
                // Field for meta must be top level.  We can relax this at some point.
                if (mongoutils::str::contains(e.fieldName(), '.')) {
                    return Status(ErrorCodes::BadValue, "field for $meta cannot be nested");
                }

                // Make sure the argument to $meta is something we recognize.
                // e.g. {x: {$meta: "textScore"}}
                if (String != e2.type()) {
                    return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
                }

                if (e2.valuestr() != LiteParsedQuery::metaTextScore &&
                    e2.valuestr() != LiteParsedQuery::metaRecordId &&
                    e2.valuestr() != LiteParsedQuery::metaIndexKey &&
                    e2.valuestr() != LiteParsedQuery::metaGeoNearDistance &&
                    e2.valuestr() != LiteParsedQuery::metaGeoNearPoint) {
                    return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str());
                }

                // This clobbers everything else.
                if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
                    hasIndexKeyProjection = true;
                } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
                    wantGeoNearDistance = true;
                } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
                    wantGeoNearPoint = true;
                }
            } else {
                return Status(ErrorCodes::BadValue,
                              string("Unsupported projection option: ") + e.toString());
            }
        } else if (mongoutils::str::equals(e.fieldName(), "_id") && !e.trueValue()) {
            includeID = false;
        } else {
            // Projections of dotted fields aren't covered.
            if (mongoutils::str::contains(e.fieldName(), '.')) {
                hasDottedField = true;
            }

            // Validate input.
            if (include_exclude == -1) {
                // If we haven't specified an include/exclude, initialize include_exclude.
                // We expect further include/excludes to match it.
                include_exclude = e.trueValue();
                include = !e.trueValue();
            } else if (static_cast<bool>(include_exclude) != e.trueValue()) {
                // Make sure that the incl./excl. matches the previous.
                return Status(ErrorCodes::BadValue,
                              "Projection cannot have a mix of inclusion and exclusion.");
            }
        }


        if (_isPositionalOperator(e.fieldName())) {
            // Validate the positional op.
            if (!e.trueValue()) {
                return Status(ErrorCodes::BadValue,
                              "Cannot exclude array elements with the positional operator.");
            }

            if (ARRAY_OP_POSITIONAL == arrayOpType) {
                return Status(ErrorCodes::BadValue,
                              "Cannot specify more than one positional proj. per query.");
            }

            if (ARRAY_OP_ELEM_MATCH == arrayOpType) {
                return Status(ErrorCodes::BadValue,
                              "Cannot specify positional operator and $elemMatch.");
            }

            std::string after = mongoutils::str::after(e.fieldName(), ".$");
            if (mongoutils::str::contains(after, ".$")) {
                mongoutils::str::stream ss;
                ss << "Positional projection '" << e.fieldName() << "' contains "
                   << "the positional operator more than once.";
                return Status(ErrorCodes::BadValue, ss);
            }

            std::string matchfield = mongoutils::str::before(e.fieldName(), '.');
            if (!_hasPositionalOperatorMatch(query, matchfield)) {
                mongoutils::str::stream ss;
                ss << "Positional projection '" << e.fieldName() << "' does not "
                   << "match the query document.";
                return Status(ErrorCodes::BadValue, ss);
            }

            arrayOpType = ARRAY_OP_POSITIONAL;
        }
    }

    // Fill out the returned obj.
    unique_ptr<ParsedProjection> pp(new ParsedProjection());

    // The positional operator uses the MatchDetails from the query
    // expression to know which array element was matched.
    pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;

    // Save the raw spec.  It should be owned by the LiteParsedQuery.
    verify(spec.isOwned());
    pp->_source = spec;
    pp->_returnKey = hasIndexKeyProjection;

    // Dotted fields aren't covered, non-simple require match details, and as for include, "if
    // we default to including then we can't use an index because we don't know what we're
    // missing."
    pp->_requiresDocument = include || hasNonSimple || hasDottedField;

    // Add geoNear projections.
    pp->_wantGeoNearPoint = wantGeoNearPoint;
    pp->_wantGeoNearDistance = wantGeoNearDistance;

    // If it's possible to compute the projection in a covered fashion, populate _requiredFields
    // so the planner can perform projection analysis.
    if (!pp->_requiresDocument) {
        if (includeID) {
            pp->_requiredFields.push_back("_id");
        }

        // The only way we could be here is if spec is only simple non-dotted-field projections.
        // Therefore we can iterate over spec to get the fields required.
        BSONObjIterator srcIt(spec);
        while (srcIt.more()) {
            BSONElement elt = srcIt.next();
            // We've already handled the _id field before entering this loop.
            if (includeID && mongoutils::str::equals(elt.fieldName(), "_id")) {
                continue;
            }
            if (elt.trueValue()) {
                pp->_requiredFields.push_back(elt.fieldName());
            }
        }
    }

    // returnKey clobbers everything.
    if (hasIndexKeyProjection) {
        pp->_requiresDocument = false;
    }

    *out = pp.release();
    return Status::OK();
}
Exemplo n.º 18
0
   // PD_TRACE_DECLARE_FUNCTION ( SDB__RTNCONTEXTLOB_OPEN, "_rtnContextLob::open" )
   INT32 _rtnContextLob::open( const BSONObj &lob,
                               BOOLEAN isLocal, 
                               _pmdEDUCB *cb,
                               SDB_DPSCB *dpsCB )
   {
      INT32 rc = SDB_OK ;
      PD_TRACE_ENTRY( SDB__RTNCONTEXTLOB_OPEN ) ;
      BSONElement mode ;
      BSONElement oid ;
      BSONElement fullName = lob.getField( FIELD_NAME_COLLECTION ) ;
      if ( String != fullName.type() )
      {
         PD_LOG( PDERROR, "can not find collection name in lob[%s]",
                 lob.toString( FALSE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

      oid = lob.getField( FIELD_NAME_LOB_OID ) ;
      if ( jstOID != oid.type() )
      {
         PD_LOG( PDERROR, "invalid oid in meta bsonobj:%s",
                 lob.toString( FALSE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

      mode = lob.getField( FIELD_NAME_LOB_OPEN_MODE ) ;
      if ( NumberInt != mode.type() )
      {
         PD_LOG( PDERROR, "invalid mode in meta bsonobj:%s",
                 lob.toString( FALSE, TRUE ).c_str() ) ;
         rc = SDB_INVALIDARG ;
         goto error ;
      }

      if ( isLocal )
      {
         _stream = SDB_OSS_NEW _rtnLocalLobStream() ;
      }
      else
      {
         _stream = SDB_OSS_NEW _rtnCoordLobStream() ;
      }

      if ( NULL == _stream )
      {
         PD_LOG( PDERROR, "failed to allocate mem." ) ;
         rc = SDB_OOM ;
         goto error ;
      }

      _stream->setDPSCB( dpsCB ) ;
      rc = _stream->open( fullName.valuestr(),
                          oid.OID(),
                          mode.Int(), cb ) ;
      if ( SDB_OK != rc )
      {
         PD_LOG( PDERROR, "failed to open lob stream:%d", rc ) ;
         goto error ;
      }

      _isOpened = TRUE ;
      _hitEnd = FALSE ;
   done:
      PD_TRACE_EXITRC( SDB__RTNCONTEXTLOB_OPEN, rc ) ;
      return rc ;
   error:
      goto done ;
   }
Exemplo n.º 19
0
    bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot) {

		massert( "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );

        string todb = cc().database()->name;
        stringstream a,b;
        a << "localhost:" << cmdLine.port;
        b << "127.0.0.1:" << cmdLine.port;
        bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost );
        if ( masterSameProcess ) {
            if ( fromdb == todb && cc().database()->path == dbpath ) {
                // guard against an "infinite" loop
                /* if you are replicating, the local.sources config may be wrong if you get this */
                errmsg = "can't clone from self (localhost).";
                return false;
            }
        }
        /* todo: we can put these releases inside dbclient or a dbclient specialization.
           or just wait until we get rid of global lock anyway.
           */
        string ns = fromdb + ".system.namespaces";
        list<BSONObj> toClone;
        {  
            dbtemprelease r;
		
            auto_ptr<DBClientCursor> c;
            {
                if ( !masterSameProcess ) {
                    auto_ptr< DBClientConnection > c( new DBClientConnection() );
                    if ( !c->connect( masterHost, errmsg ) )
                        return false;
                    if( !replAuthenticate(c.get()) )
                        return false;
                    
                    conn = c;
                } else {
                    conn.reset( new DBDirectClient() );
                }
                c = conn->query( ns.c_str(), BSONObj(), 0, 0, 0, slaveOk ? Option_SlaveOk : 0 );
            }

            if ( c.get() == 0 ) {
                errmsg = "query failed " + ns;
                return false;
            }
            
            while ( c->more() ){
                BSONObj collection = c->next();

                log(2) << "\t cloner got " << collection << endl;

                BSONElement e = collection.findElement("name");
                if ( e.eoo() ) {
                    string s = "bad system.namespaces object " + collection.toString();
                    massert(s.c_str(), false);
                }
                assert( !e.eoo() );
                assert( e.type() == String );
                const char *from_name = e.valuestr();

                if( strstr(from_name, ".system.") ) { 
                    /* system.users is cloned -- but nothing else from system. */
                    if( legalClientSystemNS( from_name , true ) == 0 ){
                        log(2) << "\t\t not cloning because system collection" << endl;
                        continue;
                    }
                }
                else if( strchr(from_name, '$') ) {
                    // don't clone index namespaces -- we take care of those separately below.
                    log(2) << "\t\t not cloning because has $ " << endl;
                    continue;
                }            
                
                toClone.push_back( collection.getOwned() );
            }
        }

        for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ){
            {
                dbtemprelease r;
            }
            BSONObj collection = *i;
            log(2) << "  really will clone: " << collection << endl;
            const char * from_name = collection["name"].valuestr();
            BSONObj options = collection.getObjectField("options");
            
            /* change name "<fromdb>.collection" -> <todb>.collection */
            const char *p = strchr(from_name, '.');
            assert(p);
            string to_name = todb + p;

            {
                string err;
                const char *toname = to_name.c_str();
                userCreateNS(toname, options, err, logForRepl);
            }
            log(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
            Query q;
            if( snapshot ) 
                q.snapshot();
            copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk, q);
        }

        // now build the indexes
        string system_indexes_from = fromdb + ".system.indexes";
        string system_indexes_to = todb + ".system.indexes";
        /* [dm]: is the ID index sometimes not called "_id_"?  There is other code in the system that looks for a "_id" prefix 
                 rather than this exact value.  we should standardize.  OR, remove names - which is in the bugdb.  Anyway, this 
                 is dubious here at the moment.
        */
        copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, BSON( "name" << NE << "_id_" ) );

        return true;
    }
Exemplo n.º 20
0
    // Fill _results with all of the results in the annulus defined by _innerRadius and
    // _outerRadius.  If no results are found, grow the annulus and repeat until success (or
    // until the edge of the world).
    void S2NearIndexCursor::fillResults() {
        verify(_results.empty());
        if (_innerRadius >= _outerRadius) { return; }
        if (_innerRadius > _maxDistance) { return; }

        // We iterate until 1. our search radius is too big or 2. we find results.
        do {
            // Some of these arguments are opaque, look at the definitions of the involved classes.
            FieldRangeSet frs(_descriptor->parentNS().c_str(), makeFRSObject(), false, false);
            shared_ptr<FieldRangeVector> frv(new FieldRangeVector(frs, _specForFRV, 1));
            scoped_ptr<BtreeCursor> cursor(BtreeCursor::make(nsdetails(_descriptor->parentNS()),
                        _descriptor->getOnDisk(), frv, 0, 1));

            // The cursor may return the same obj more than once for a given
            // FRS, so we make sure to only consider it once in any given annulus.
            //
            // We don't want this outside of the 'do' loop because the covering
            // for an annulus may return an object whose distance to the query
            // point is actually contained in a subsequent annulus.  If we
            // didn't consider every object in a given annulus we might miss
            // the point.
            //
            // We don't use a global 'seen' because we get that by requiring
            // the distance from the query point to the indexed geo to be
            // within our 'current' annulus, and I want to dodge all yield
            // issues if possible.
            unordered_set<DiskLoc, DiskLoc::Hasher> seen;

            LOG(1) << "looking at annulus from " << _innerRadius << " to " << _outerRadius << endl;
            LOG(1) << "Total # returned: " << _stats._numReturned << endl;
            // Do the actual search through this annulus.
            for (; cursor->ok(); cursor->advance()) {
                // Don't bother to look at anything we've returned.
                if (_returned.end() != _returned.find(cursor->currLoc())) {
                    ++_stats._returnSkip;
                    continue;
                }

                ++_stats._nscanned;
                if (seen.end() != seen.find(cursor->currLoc())) {
                    ++_stats._btreeDups;
                    continue;
                }

                // Get distance interval from our query point to the cell.
                // If it doesn't overlap with our current shell, toss.
                BSONObj currKey(cursor->currKey());
                BSONObjIterator it(currKey);
                BSONElement geoKey;
                for (int i = 0; i <= _nearFieldIndex; ++i) {
                    geoKey = it.next();
                }

                S2Cell keyCell = S2Cell(S2CellId::FromString(geoKey.String()));
                if (!_annulus.MayIntersect(keyCell)) {
                    ++_stats._keyGeoSkip;
                    continue;
                }

                // We have to add this document to seen *AFTER* the key intersection test.
                // A geometry may have several keys, one of which may be in our search shell and one
                // of which may be outside of it.  We don't want to ignore a document just because
                // one of its covers isn't inside this annulus.
                seen.insert(cursor->currLoc());

                // At this point forward, we will not examine the document again in this annulus.

                const BSONObj& indexedObj = cursor->currLoc().obj();

                // Match against indexed geo fields.
                ++_stats._geoMatchTested;
                size_t geoFieldsMatched = 0;
                // See if the object actually overlaps w/the geo query fields.
                for (size_t i = 0; i < _indexedGeoFields.size(); ++i) {
                    BSONElementSet geoFieldElements;
                    indexedObj.getFieldsDotted(_indexedGeoFields[i].getField(), geoFieldElements,
                            false);
                    if (geoFieldElements.empty()) { continue; }

                    bool match = false;

                    for (BSONElementSet::iterator oi = geoFieldElements.begin();
                            !match && (oi != geoFieldElements.end()); ++oi) {
                        if (!oi->isABSONObj()) { continue; }
                        const BSONObj &geoObj = oi->Obj();
                        GeometryContainer geoContainer;
                        uassert(16762, "ill-formed geometry: " + geoObj.toString(),
                                geoContainer.parseFrom(geoObj));
                        match = _indexedGeoFields[i].satisfiesPredicate(geoContainer);
                    }

                    if (match) { ++geoFieldsMatched; }
                }

                if (geoFieldsMatched != _indexedGeoFields.size()) {
                    continue;
                }

                // Get all the fields with that name from the document.
                BSONElementSet geoFieldElements;
                indexedObj.getFieldsDotted(_nearQuery.field, geoFieldElements, false);
                if (geoFieldElements.empty()) { continue; }

                ++_stats._inAnnulusTested;
                double minDistance = 1e20;
                // Look at each field in the document and take the min. distance.
                for (BSONElementSet::iterator oi = geoFieldElements.begin();
                        oi != geoFieldElements.end(); ++oi) {
                    if (!oi->isABSONObj()) { continue; }
                    BSONObj obj = oi->Obj();
                    double dist;
                    bool ret = S2SearchUtil::distanceBetween(_nearQuery.centroid.point,
                                                             obj, &dist);
                    if (!ret) {
                        warning() << "unknown geometry: " << obj.toString();
                        dist = numeric_limits<double>::max();
                    }

                    minDistance = min(dist, minDistance);
                }

                // We could be in an annulus, yield, add new points closer to
                // query point than the last point we returned, then unyield.
                // This would return points out of order.
                if (minDistance < _returnedDistance) { continue; }

                // If the min. distance satisfies our distance criteria
                if (minDistance >= _innerRadius && minDistance < _outerRadius) {
                    // The result is valid.  We have to de-dup ourselves here.
                    if (_returned.end() == _returned.find(cursor->currLoc())) {
                        _results.push(Result(cursor->currLoc(), cursor->currKey(),
                                    minDistance));
                    }
                }
            }

            if (_results.empty()) {
                LOG(1) << "results empty!\n";
                _radiusIncrement *= 2;
                nextAnnulus();
            } else if (_results.size() < 300) {
                _radiusIncrement *= 2;
            } else if (_results.size() > 600) {
                _radiusIncrement /= 2;
            }
        } while (_results.empty()
                && _innerRadius < _maxDistance
                && _innerRadius < _outerRadius);
        LOG(1) << "Filled shell with " << _results.size() << " results" << endl;
    }
Exemplo n.º 21
0
        virtual bool run(OperationContext* txn,
                         const string& dbname,
                         BSONObj& cmdObj,
                         int,
                         string& errmsg,
                         BSONObjBuilder& result,
                         bool fromRepl) {

            string fromhost = cmdObj.getStringField("fromhost");
            bool fromSelf = fromhost.empty();
            if ( fromSelf ) {
                /* copy from self */
                stringstream ss;
                ss << "localhost:" << serverGlobalParams.port;
                fromhost = ss.str();
            }

            CloneOptions cloneOptions;
            cloneOptions.fromDB = cmdObj.getStringField("fromdb");
            cloneOptions.logForRepl = !fromRepl;
            cloneOptions.slaveOk = cmdObj["slaveOk"].trueValue();
            cloneOptions.useReplAuth = false;
            cloneOptions.snapshot = true;
            cloneOptions.mayYield = true;
            cloneOptions.mayBeInterrupted = false;

            string todb = cmdObj.getStringField("todb");
            if ( fromhost.empty() || todb.empty() || cloneOptions.fromDB.empty() ) {
                errmsg = "params missing - {copydb: 1, fromhost: <connection string>, "
                         "fromdb: <db>, todb: <db>}";
                return false;
            }

            if ( !NamespaceString::validDBName( todb ) ) {
                errmsg = "invalid todb name: " + todb;
                return false;
            }

            Cloner cloner;

            // Get MONGODB-CR parameters
            string username = cmdObj.getStringField( "username" );
            string nonce = cmdObj.getStringField( "nonce" );
            string key = cmdObj.getStringField( "key" );

            if ( !username.empty() && !nonce.empty() && !key.empty() ) {
                uassert( 13008, "must call copydbgetnonce first", authConn_.get() );
                BSONObj ret;
                {
                    if ( !authConn_->runCommand( cloneOptions.fromDB,
                                                 BSON( "authenticate" << 1 << "user" << username
                                                       << "nonce" << nonce << "key" << key ), ret ) ) {
                        errmsg = "unable to login " + ret.toString();
                        return false;
                    }
                }
                cloner.setConnection( authConn_.release() );
            }
            else if (cmdObj.hasField(saslCommandConversationIdFieldName) &&
                     cmdObj.hasField(saslCommandPayloadFieldName)) {
                uassert( 25487, "must call copydbsaslstart first", authConn_.get() );
                BSONObj ret;
                if ( !authConn_->runCommand( cloneOptions.fromDB,
                                             BSON( "saslContinue" << 1 <<
                                                   cmdObj[saslCommandConversationIdFieldName] <<
                                                   cmdObj[saslCommandPayloadFieldName] ),
                                             ret ) ) {
                    errmsg = "unable to login " + ret.toString();
                    return false;
                }

                if (!ret["done"].Bool()) {
                    result.appendElements( ret );
                    return true;
                }

                result.append("done", true);
                cloner.setConnection( authConn_.release() );
            }
            else if (!fromSelf) {
                // If fromSelf leave the cloner's conn empty, it will use a DBDirectClient instead.

                ConnectionString cs = ConnectionString::parse(fromhost, errmsg);
                if (!cs.isValid()) {
                    return false;
                }

                DBClientBase* conn = cs.connect(errmsg);
                if (!conn) {
                    return false;
                }
                cloner.setConnection(conn);
            }

            if (fromSelf) {
                // SERVER-4328 todo lock just the two db's not everything for the fromself case
                Lock::GlobalWrite lk(txn->lockState());
                return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
            }

            Lock::DBLock lk (txn->lockState(), todb, MODE_X);
            return cloner.go(txn, todb, fromhost, cloneOptions, NULL, errmsg);
        }
Exemplo n.º 22
0
    void ShardingConnectionHook::onCreate( DBClientBase * conn ) {
        if( !noauth ) {
            bool result;
            string err;
            LOG(2) << "calling onCreate auth for " << conn->toString() << endl;

            if ( conn->type() == ConnectionString::SET && !authOnPrimaryOnly ) {
                DBClientReplicaSet* setConn = dynamic_cast<DBClientReplicaSet*>(conn);
                verify(setConn);
                result = setConn->authAny( "local",
                                           internalSecurity.user,
                                           internalSecurity.pwd,
                                           err,
                                           false );
            }
            else {
                result = conn->auth( "local",
                                     internalSecurity.user,
                                     internalSecurity.pwd,
                                     err,
                                     false );
            }

            uassert( 15847, str::stream() << "can't authenticate to server "
                                          << conn->getServerAddress() << causedBy( err ), result );
        }

        if ( _shardedConnections && versionManager.isVersionableCB( conn ) ) {

            // We must initialize sharding on all connections, so that we get exceptions if sharding is enabled on
            // the collection.
            BSONObj result;
            bool ok = versionManager.initShardVersionCB( conn, result );

            // assert that we actually successfully setup sharding
            uassert( 15907, str::stream() << "could not initialize sharding on connection " << (*conn).toString() <<
                        ( result["errmsg"].type() == String ? causedBy( result["errmsg"].String() ) :
                                                              causedBy( (string)"unknown failure : " + result.toString() ) ), ok );

        }
    }
Exemplo n.º 23
0
// static
Status SyncTail::syncApply(OperationContext* txn,
                           const BSONObj& op,
                           bool convertUpdateToUpsert,
                           ApplyOperationInLockFn applyOperationInLock,
                           ApplyCommandInLockFn applyCommandInLock,
                           IncrementOpsAppliedStatsFn incrementOpsAppliedStats) {
    if (inShutdown()) {
        return Status::OK();
    }

    // Count each log op application as a separate operation, for reporting purposes
    CurOp individualOp(txn);

    const char* ns = op.getStringField("ns");
    verify(ns);

    const char* opType = op["op"].valuestrsafe();

    bool isCommand(opType[0] == 'c');
    bool isNoOp(opType[0] == 'n');

    if ((*ns == '\0') || (*ns == '.')) {
        // this is ugly
        // this is often a no-op
        // but can't be 100% sure
        if (!isNoOp) {
            error() << "skipping bad op in oplog: " << op.toString();
        }
        return Status::OK();
    }

    if (isCommand) {
        MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
            // a command may need a global write lock. so we will conservatively go
            // ahead and grab one here. suboptimal. :-(
            Lock::GlobalWrite globalWriteLock(txn->lockState());

            // special case apply for commands to avoid implicit database creation
            Status status = applyCommandInLock(txn, op);
            incrementOpsAppliedStats();
            return status;
        }
        MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "syncApply_command", ns);
    }

    auto applyOp = [&](Database* db) {
        // For non-initial-sync, we convert updates to upserts
        // to suppress errors when replaying oplog entries.
        txn->setReplicatedWrites(false);
        DisableDocumentValidation validationDisabler(txn);

        Status status =
            applyOperationInLock(txn, db, op, convertUpdateToUpsert, incrementOpsAppliedStats);
        if (!status.isOK() && status.code() == ErrorCodes::WriteConflict) {
            throw WriteConflictException();
        }
        return status;
    };

    if (isNoOp || (opType[0] == 'i' && nsToCollectionSubstring(ns) == "system.indexes")) {
        auto opStr = isNoOp ? "syncApply_noop" : "syncApply_indexBuild";
        MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
            Lock::DBLock dbLock(txn->lockState(), nsToDatabaseSubstring(ns), MODE_X);
            OldClientContext ctx(txn, ns);
            return applyOp(ctx.db());
        }
        MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, opStr, ns);
    }
/* ****************************************************************************
*
* updateInCache -
*/
static void updateInCache
(
  const BSONObj&             doc,
  const SubscriptionUpdate&  subUp,
  const std::string&         tenant,
  long long                  lastNotification
)
{
  //
  // StringFilter in Scope?
  //
  // Any Scope of type SCOPE_TYPE_SIMPLE_QUERY in subUp.restriction.scopeVector?
  // If so, set it as string filter to the sub-cache item
  //
  StringFilter*  stringFilterP   = NULL;
  StringFilter*  mdStringFilterP = NULL;

  for (unsigned int ix = 0; ix < subUp.restriction.scopeVector.size(); ++ix)
  {
    if (subUp.restriction.scopeVector[ix]->type == SCOPE_TYPE_SIMPLE_QUERY)
    {
      stringFilterP = subUp.restriction.scopeVector[ix]->stringFilterP;
    }

    if (subUp.restriction.scopeVector[ix]->type == SCOPE_TYPE_SIMPLE_QUERY_MD)
    {
      mdStringFilterP = subUp.restriction.scopeVector[ix]->mdStringFilterP;
    }
  }

  //
  // Modification of the subscription cache
  //
  // The subscription "before this update" is looked up in cache and referenced by 'cSubP'.
  // The "updated subscription information" is in 'newSubObject' (mongo BSON object format).
  //
  // All we need to do now for the cache is to:
  //   1. Remove 'cSubP' from sub-cache (if present)
  //   2. Create 'newSubObject' in sub-cache (if applicable)
  //
  // The subscription is already updated in mongo.
  //
  //
  // There are four different scenarios here:
  //   1. Old sub was in cache, new sub enters cache
  //   2. Old sub was NOT in cache, new sub enters cache
  //   3. Old subwas in cache, new sub DOES NOT enter cache
  //   4. Old sub was NOT in cache, new sub DOES NOT enter cache
  //
  // This is resolved by two separate functions, one that removes the old one,
  // if found (subCacheItemLookup+subCacheItemRemove), and the other one that inserts the sub,
  // IF it should be inserted (subCacheItemInsert).
  // If inserted, subCacheUpdateStatisticsIncrement is called to update the statistics counter of insertions.
  //


  // 0. Lookup matching subscription in subscription-cache

  cacheSemTake(__FUNCTION__, "Updating cached subscription");

  //
  // Second lookup for the same in the mongo update subscription process.
  // However, we have to do it, as the item in the cache could have been changed in the meanwhile.
  //
  LM_T(LmtSubCache, ("update: %s", doc.toString().c_str()));

  CachedSubscription* subCacheP        = subCacheItemLookup(tenant.c_str(), subUp.id.c_str());
  char*               servicePathCache = (char*) ((subCacheP == NULL)? "" : subCacheP->servicePath);
  std::string         q;
  std::string         mq;
  std::string         geom;
  std::string         coords;
  std::string         georel;
  RenderFormat        renderFormat = NGSI_V2_NORMALIZED;  // Default value

  if (doc.hasField(CSUB_FORMAT))
  {
    renderFormat = stringToRenderFormat(getStringFieldF(doc, CSUB_FORMAT));
  }

  if (doc.hasField(CSUB_EXPR))
  {
    BSONObj expr = getObjectFieldF(doc, CSUB_EXPR);

    q      = expr.hasField(CSUB_EXPR_Q)?      getStringFieldF(expr, CSUB_EXPR_Q)      : "";
    mq     = expr.hasField(CSUB_EXPR_MQ)?     getStringFieldF(expr, CSUB_EXPR_MQ)     : "";
    geom   = expr.hasField(CSUB_EXPR_GEOM)?   getStringFieldF(expr, CSUB_EXPR_GEOM)   : "";
    coords = expr.hasField(CSUB_EXPR_COORDS)? getStringFieldF(expr, CSUB_EXPR_COORDS) : "";
    georel = expr.hasField(CSUB_EXPR_GEOREL)? getStringFieldF(expr, CSUB_EXPR_GEOREL) : "";
  }


  int mscInsert = mongoSubCacheItemInsert(tenant.c_str(),
                                          doc,
                                          subUp.id.c_str(),
                                          servicePathCache,
                                          lastNotification,
                                          doc.hasField(CSUB_EXPIRATION)? getLongFieldF(doc, CSUB_EXPIRATION) : 0,
                                          doc.hasField(CSUB_STATUS)? getStringFieldF(doc, CSUB_STATUS) : STATUS_ACTIVE,
                                          q,
                                          mq,
                                          geom,
                                          coords,
                                          georel,
                                          stringFilterP,
                                          mdStringFilterP,
                                          renderFormat);

  if (mscInsert == 0)  // 0: Insertion was really made
  {
    subCacheUpdateStatisticsIncrement();

    if (subCacheP != NULL)
    {
      LM_T(LmtSubCache, ("Calling subCacheItemRemove"));
      subCacheItemRemove(subCacheP);
    }
  }

  cacheSemGive(__FUNCTION__, "Updating cached subscription");
}
Exemplo n.º 25
0
        virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            log() << "replSet replSetInitiate admin command received from client" << rsLog;

            if( !replSet ) {
                errmsg = "server is not running with --replSet";
                return false;
            }
            if( theReplSet ) {
                errmsg = "already initialized";
                result.append("info", "try querying " + rsConfigNs + " to see current configuration");
                return false;
            }

            {
                // just make sure we can get a write lock before doing anything else.  we'll reacquire one
                // later.  of course it could be stuck then, but this check lowers the risk if weird things
                // are up.
                time_t t = time(0);
                writelock lk("");
                if( time(0)-t > 10 ) {
                    errmsg = "took a long time to get write lock, so not initiating.  Initiate when server less busy?";
                    return false;
                }

                /* check that we don't already have an oplog.  that could cause issues.
                   it is ok if the initiating member has *other* data than that.
                   */
                BSONObj o;
                if( Helpers::getFirst(rsoplog, o) ) {
                    errmsg = rsoplog + string(" is not empty on the initiating member.  cannot initiate.");
                    return false;
                }
            }

            if( ReplSet::startupStatus == ReplSet::BADCONFIG ) {
                errmsg = "server already in BADCONFIG state (check logs); not initiating";
                result.append("info", ReplSet::startupStatusMsg.get());
                return false;
            }
            if( ReplSet::startupStatus != ReplSet::EMPTYCONFIG ) {
                result.append("startupStatus", ReplSet::startupStatus);
                errmsg = "all members and seeds must be reachable to initiate set";
                result.append("info", cmdLine._replSet);
                return false;
            }

            BSONObj configObj;

            if( cmdObj["replSetInitiate"].type() != Object ) {
                result.append("info2", "no configuration explicitly specified -- making one");
                log() << "replSet info initiate : no configuration specified.  Using a default configuration for the set" << rsLog;

                string name;
                vector<HostAndPort> seeds;
                set<HostAndPort> seedSet;
                parseReplsetCmdLine(cmdLine._replSet, name, seeds, seedSet); // may throw...

                bob b;
                b.append("_id", name);
                bob members;
                members.append("0", BSON( "_id" << 0 << "host" << HostAndPort::Me().toString() ));
                for( unsigned i = 0; i < seeds.size(); i++ )
                    members.append(bob::numStr(i+1), BSON( "_id" << i+1 << "host" << seeds[i].toString()));
                b.appendArray("members", members.obj());
                configObj = b.obj();
                log() << "replSet created this configuration for initiation : " << configObj.toString() << rsLog;
            }
            else {
                configObj = cmdObj["replSetInitiate"].Obj();
            }

            bool parsed = false;
            try {
                ReplSetConfig newConfig(configObj);
                parsed = true;

                if( newConfig.version > 1 ) {
                    errmsg = "can't initiate with a version number greater than 1";
                    return false;
                }

                log() << "replSet replSetInitiate config object parses ok, " << newConfig.members.size() << " members specified" << rsLog;

                checkMembersUpForConfigChange(newConfig, true);

                log() << "replSet replSetInitiate all members seem up" << rsLog;

                createOplog();

                writelock lk("");
                bo comment = BSON( "msg" << "initiating set");
                newConfig.saveConfigLocally(comment);
                log() << "replSet replSetInitiate config now saved locally.  Should come online in about a minute." << rsLog;
                result.append("info", "Config now saved locally.  Should come online in about a minute.");
                ReplSet::startupStatus = ReplSet::SOON;
                ReplSet::startupStatusMsg.set("Received replSetInitiate - should come online shortly.");
            }
            catch( DBException& e ) {
                log() << "replSet replSetInitiate exception: " << e.what() << rsLog;
                if( !parsed )
                    errmsg = string("couldn't parse cfg object ") + e.what();
                else
                    errmsg = string("couldn't initiate : ") + e.what();
                return false;
            }

            return true;
        }
Exemplo n.º 26
0
    bool run(OperationContext* txn,
             const string& dbname,
             BSONObj& cmdObj,
             int,
             string& errmsg,
             BSONObjBuilder& result) {
        if (!cmdObj["start"].eoo()) {
            errmsg = "using deprecated 'start' argument to geoNear";
            return false;
        }

        const NamespaceString nss(parseNs(dbname, cmdObj));
        AutoGetCollectionForRead ctx(txn, nss);

        Collection* collection = ctx.getCollection();
        if (!collection) {
            errmsg = "can't find ns";
            return false;
        }

        IndexCatalog* indexCatalog = collection->getIndexCatalog();

        // cout << "raw cmd " << cmdObj.toString() << endl;

        // We seek to populate this.
        string nearFieldName;
        bool using2DIndex = false;
        if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
            return false;
        }

        PointWithCRS point;
        uassert(17304,
                "'near' field must be point",
                GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());

        bool isSpherical = cmdObj["spherical"].trueValue();
        if (!using2DIndex) {
            uassert(17301, "2dsphere index must have spherical: true", isSpherical);
        }

        // Build the $near expression for the query.
        BSONObjBuilder nearBob;
        if (isSpherical) {
            nearBob.append("$nearSphere", cmdObj["near"].Obj());
        } else {
            nearBob.append("$near", cmdObj["near"].Obj());
        }

        if (!cmdObj["maxDistance"].eoo()) {
            uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber());
            nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
        }

        if (!cmdObj["minDistance"].eoo()) {
            uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
            uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber());
            nearBob.append("$minDistance", cmdObj["minDistance"].number());
        }

        if (!cmdObj["uniqueDocs"].eoo()) {
            warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
        }

        // And, build the full query expression.
        BSONObjBuilder queryBob;
        queryBob.append(nearFieldName, nearBob.obj());
        if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
            queryBob.appendElements(cmdObj["query"].Obj());
        }
        BSONObj rewritten = queryBob.obj();

        // Extract the collation, if it exists.
        // TODO SERVER-23473: Pass this collation spec object down so that it can be converted into
        // a CollatorInterface.
        BSONObj collation;
        {
            BSONElement collationElt;
            Status collationEltStatus =
                bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElt);
            if (!collationEltStatus.isOK() && (collationEltStatus != ErrorCodes::NoSuchKey)) {
                return appendCommandStatus(result, collationEltStatus);
            }
            if (collationEltStatus.isOK()) {
                collation = collationElt.Obj();
            }
        }

        long long numWanted = 100;
        const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
        BSONElement eNumWanted = cmdObj[limitName];
        if (!eNumWanted.eoo()) {
            uassert(17303, "limit must be number", eNumWanted.isNumber());
            numWanted = eNumWanted.safeNumberLong();
            uassert(17302, "limit must be >=0", numWanted >= 0);
        }

        bool includeLocs = false;
        if (!cmdObj["includeLocs"].eoo()) {
            includeLocs = cmdObj["includeLocs"].trueValue();
        }

        double distanceMultiplier = 1.0;
        BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
        if (!eDistanceMultiplier.eoo()) {
            uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
            distanceMultiplier = eDistanceMultiplier.number();
            uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
        }

        BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis"
                                     << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

        const ExtensionsCallbackReal extensionsCallback(txn, &nss);
        auto statusWithCQ = CanonicalQuery::canonicalize(
            nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), extensionsCallback);
        if (!statusWithCQ.isOK()) {
            errmsg = "Can't parse filter / create query";
            return false;
        }
        unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());

        // Prevent chunks from being cleaned up during yields - this allows us to only check the
        // version on initial entry into geoNear.
        RangePreserver preserver(collection);

        auto statusWithPlanExecutor =
            getExecutor(txn, collection, std::move(cq), PlanExecutor::YIELD_AUTO, 0);
        if (!statusWithPlanExecutor.isOK()) {
            errmsg = "can't get query executor";
            return false;
        }

        unique_ptr<PlanExecutor> exec = std::move(statusWithPlanExecutor.getValue());

        double totalDistance = 0;
        BSONObjBuilder resultBuilder(result.subarrayStart("results"));
        double farthestDist = 0;

        BSONObj currObj;
        long long results = 0;
        PlanExecutor::ExecState state;
        while (PlanExecutor::ADVANCED == (state = exec->getNext(&currObj, NULL))) {
            // Come up with the correct distance.
            double dist = currObj["$dis"].number() * distanceMultiplier;
            totalDistance += dist;
            if (dist > farthestDist) {
                farthestDist = dist;
            }

            // Strip out '$dis' and '$pt' from the result obj.  The rest gets added as 'obj'
            // in the command result.
            BSONObjIterator resIt(currObj);
            BSONObjBuilder resBob;
            while (resIt.more()) {
                BSONElement elt = resIt.next();
                if (!mongoutils::str::equals("$pt", elt.fieldName()) &&
                    !mongoutils::str::equals("$dis", elt.fieldName())) {
                    resBob.append(elt);
                }
            }
            BSONObj resObj = resBob.obj();

            // Don't make a too-big result object.
            if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) {
                warning() << "Too many geoNear results for query " << rewritten.toString()
                          << ", truncating output.";
                break;
            }

            // Add the next result to the result builder.
            BSONObjBuilder oneResultBuilder(
                resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
            oneResultBuilder.append("dis", dist);
            if (includeLocs) {
                oneResultBuilder.appendAs(currObj["$pt"], "loc");
            }
            oneResultBuilder.append("obj", resObj);
            oneResultBuilder.done();

            ++results;

            // Break if we have the number of requested result documents.
            if (results >= numWanted) {
                break;
            }
        }

        resultBuilder.done();

        // Return an error if execution fails for any reason.
        if (PlanExecutor::FAILURE == state || PlanExecutor::DEAD == state) {
            log() << "Plan executor error during geoNear command: " << PlanExecutor::statestr(state)
                  << ", stats: " << Explain::getWinningPlanStats(exec.get());

            return appendCommandStatus(result,
                                       Status(ErrorCodes::OperationFailed,
                                              str::stream()
                                                  << "Executor error during geoNear command: "
                                                  << WorkingSetCommon::toStatusString(currObj)));
        }

        PlanSummaryStats summary;
        Explain::getSummaryStats(*exec, &summary);

        // Fill out the stats subobj.
        BSONObjBuilder stats(result.subobjStart("stats"));

        stats.appendNumber("nscanned", summary.totalKeysExamined);
        stats.appendNumber("objectsLoaded", summary.totalDocsExamined);

        if (results > 0) {
            stats.append("avgDistance", totalDistance / results);
        }
        stats.append("maxDistance", farthestDist);
        stats.append("time", CurOp::get(txn)->elapsedMillis());
        stats.done();

        collection->infoCache()->notifyOfQuery(txn, summary.indexesUsed);

        CurOp::get(txn)->debug().setPlanSummaryMetrics(summary);

        return true;
    }
Exemplo n.º 27
0
    bool dbEval(const string& dbName, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
        BSONElement e = cmd.firstElement();
        uassert( 10046 ,  "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );

        const char *code = 0;
        switch ( e.type() ) {
        case String:
        case Code:
            code = e.valuestr();
            break;
        case CodeWScope:
            code = e.codeWScopeCode();
            break;
        default:
            verify(0);
        }
        verify( code );

        if ( ! globalScriptEngine ) {
            errmsg = "db side execution is disabled";
            return false;
        }

        auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbName, "dbeval" );
        ScriptingFunction f = s->createFunction(code);
        if ( f == 0 ) {
            errmsg = (string)"compile failed: " + s->getError();
            return false;
        }

        if ( e.type() == CodeWScope )
            s->init( e.codeWScopeScopeDataUnsafe() );
        s->localConnect( dbName.c_str() );

        BSONObj args;
        {
            BSONElement argsElement = cmd.getField("args");
            if ( argsElement.type() == Array ) {
                args = argsElement.embeddedObject();
                if ( edebug ) {
                    out() << "args:" << args.toString() << endl;
                    out() << "code:\n" << code << endl;
                }
            }
        }

        int res;
        {
            Timer t;
            res = s->invoke(f, &args, 0, cmdLine.quota ? 10 * 60 * 1000 : 0 );
            int m = t.millis();
            if ( m > cmdLine.slowMS ) {
                out() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
                if ( m >= 1000 ) log() << code << endl;
                else OCCASIONALLY log() << code << endl;
            }
        }
        if (res || s->isLastRetNativeCode()) {
            result.append("errno", (double) res);
            errmsg = "invoke failed: ";
            if (s->isLastRetNativeCode())
                errmsg += "cannot return native function";
            else
                errmsg += s->getError();
            return false;
        }

        s->append( result , "retval" , "__returnValue" );

        return true;
    }
Exemplo n.º 28
0
    bool run(OperationContext* txn,
             const string& dbname,
             BSONObj& cmdObj,
             int,
             string& errmsg,
             BSONObjBuilder& result) {
        if (!cmdObj["start"].eoo()) {
            errmsg = "using deprecated 'start' argument to geoNear";
            return false;
        }

        const NamespaceString nss(parseNs(dbname, cmdObj));
        AutoGetCollectionForRead ctx(txn, nss);

        Collection* collection = ctx.getCollection();
        if (!collection) {
            errmsg = "can't find ns";
            return false;
        }

        IndexCatalog* indexCatalog = collection->getIndexCatalog();

        // cout << "raw cmd " << cmdObj.toString() << endl;

        // We seek to populate this.
        string nearFieldName;
        bool using2DIndex = false;
        if (!getFieldName(txn, collection, indexCatalog, &nearFieldName, &errmsg, &using2DIndex)) {
            return false;
        }

        PointWithCRS point;
        uassert(17304,
                "'near' field must be point",
                GeoParser::parseQueryPoint(cmdObj["near"], &point).isOK());

        bool isSpherical = cmdObj["spherical"].trueValue();
        if (!using2DIndex) {
            uassert(17301, "2dsphere index must have spherical: true", isSpherical);
        }

        // Build the $near expression for the query.
        BSONObjBuilder nearBob;
        if (isSpherical) {
            nearBob.append("$nearSphere", cmdObj["near"].Obj());
        } else {
            nearBob.append("$near", cmdObj["near"].Obj());
        }

        if (!cmdObj["maxDistance"].eoo()) {
            uassert(17299, "maxDistance must be a number", cmdObj["maxDistance"].isNumber());
            nearBob.append("$maxDistance", cmdObj["maxDistance"].number());
        }

        if (!cmdObj["minDistance"].eoo()) {
            uassert(17298, "minDistance doesn't work on 2d index", !using2DIndex);
            uassert(17300, "minDistance must be a number", cmdObj["minDistance"].isNumber());
            nearBob.append("$minDistance", cmdObj["minDistance"].number());
        }

        if (!cmdObj["uniqueDocs"].eoo()) {
            warning() << nss << ": ignoring deprecated uniqueDocs option in geoNear command";
        }

        // And, build the full query expression.
        BSONObjBuilder queryBob;
        queryBob.append(nearFieldName, nearBob.obj());
        if (!cmdObj["query"].eoo() && cmdObj["query"].isABSONObj()) {
            queryBob.appendElements(cmdObj["query"].Obj());
        }
        BSONObj rewritten = queryBob.obj();

        // cout << "rewritten query: " << rewritten.toString() << endl;

        long long numWanted = 100;
        const char* limitName = !cmdObj["num"].eoo() ? "num" : "limit";
        BSONElement eNumWanted = cmdObj[limitName];
        if (!eNumWanted.eoo()) {
            uassert(17303, "limit must be number", eNumWanted.isNumber());
            numWanted = eNumWanted.safeNumberLong();
            uassert(17302, "limit must be >=0", numWanted >= 0);
        }

        bool includeLocs = false;
        if (!cmdObj["includeLocs"].eoo()) {
            includeLocs = cmdObj["includeLocs"].trueValue();
        }

        double distanceMultiplier = 1.0;
        BSONElement eDistanceMultiplier = cmdObj["distanceMultiplier"];
        if (!eDistanceMultiplier.eoo()) {
            uassert(17296, "distanceMultiplier must be a number", eDistanceMultiplier.isNumber());
            distanceMultiplier = eDistanceMultiplier.number();
            uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
        }

        BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis"
                                     << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));

        const WhereCallbackReal whereCallback(txn, nss.db());
        auto statusWithCQ = CanonicalQuery::canonicalize(
            nss, rewritten, BSONObj(), projObj, 0, numWanted, BSONObj(), whereCallback);
        if (!statusWithCQ.isOK()) {
            errmsg = "Can't parse filter / create query";
            return false;
        }
        unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());

        // Prevent chunks from being cleaned up during yields - this allows us to only check the
        // version on initial entry into geoNear.
        RangePreserver preserver(collection);

        PlanExecutor* rawExec;
        if (!getExecutor(txn, collection, cq.release(), PlanExecutor::YIELD_AUTO, &rawExec, 0)
                 .isOK()) {
            errmsg = "can't get query executor";
            return false;
        }

        unique_ptr<PlanExecutor> exec(rawExec);

        double totalDistance = 0;
        BSONObjBuilder resultBuilder(result.subarrayStart("results"));
        double farthestDist = 0;

        BSONObj currObj;
        long long results = 0;
        while ((results < numWanted) && PlanExecutor::ADVANCED == exec->getNext(&currObj, NULL)) {
            // Come up with the correct distance.
            double dist = currObj["$dis"].number() * distanceMultiplier;
            totalDistance += dist;
            if (dist > farthestDist) {
                farthestDist = dist;
            }

            // Strip out '$dis' and '$pt' from the result obj.  The rest gets added as 'obj'
            // in the command result.
            BSONObjIterator resIt(currObj);
            BSONObjBuilder resBob;
            while (resIt.more()) {
                BSONElement elt = resIt.next();
                if (!mongoutils::str::equals("$pt", elt.fieldName()) &&
                    !mongoutils::str::equals("$dis", elt.fieldName())) {
                    resBob.append(elt);
                }
            }
            BSONObj resObj = resBob.obj();

            // Don't make a too-big result object.
            if (resultBuilder.len() + resObj.objsize() > BSONObjMaxUserSize) {
                warning() << "Too many geoNear results for query " << rewritten.toString()
                          << ", truncating output.";
                break;
            }

            // Add the next result to the result builder.
            BSONObjBuilder oneResultBuilder(
                resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
            oneResultBuilder.append("dis", dist);
            if (includeLocs) {
                oneResultBuilder.appendAs(currObj["$pt"], "loc");
            }
            oneResultBuilder.append("obj", resObj);
            oneResultBuilder.done();
            ++results;
        }

        resultBuilder.done();

        // Fill out the stats subobj.
        BSONObjBuilder stats(result.subobjStart("stats"));

        // Fill in nscanned from the explain.
        PlanSummaryStats summary;
        Explain::getSummaryStats(exec.get(), &summary);
        stats.appendNumber("nscanned", summary.totalKeysExamined);
        stats.appendNumber("objectsLoaded", summary.totalDocsExamined);

        stats.append("avgDistance", totalDistance / results);
        stats.append("maxDistance", farthestDist);
        stats.append("time", CurOp::get(txn)->elapsedMillis());
        stats.done();

        return true;
    }
Exemplo n.º 29
0
        static bool run2DSphereGeoNear(NamespaceDetails* nsDetails, int idxNo, BSONObj& cmdObj,
                                       const GeoNearArguments &parsedArgs, string& errmsg,
                                       BSONObjBuilder& result) {
            auto_ptr<IndexDescriptor> descriptor(CatalogHack::getDescriptor(nsDetails, idxNo));
            auto_ptr<S2AccessMethod> sam(new S2AccessMethod(descriptor.get()));
            const S2IndexingParams& params = sam->getParams();
            auto_ptr<S2NearIndexCursor> nic(new S2NearIndexCursor(descriptor.get(), params));

            vector<string> geoFieldNames;
            BSONObjIterator i(descriptor->keyPattern());
            while (i.more()) {
                BSONElement e = i.next();
                if (e.type() == String && IndexNames::GEO_2DSPHERE == e.valuestr()) {
                    geoFieldNames.push_back(e.fieldName());
                }
            }

            // NOTE(hk): If we add a new argument to geoNear, we could have a
            // 2dsphere index with multiple indexed geo fields, and the geoNear
            // could pick the one to run over.  Right now, we just require one.
            uassert(16552, "geoNear requires exactly one indexed geo field", 1 == geoFieldNames.size());
            NearQuery nearQuery(geoFieldNames[0]);
            uassert(16679, "Invalid geometry given as arguments to geoNear: " + cmdObj.toString(),
                    nearQuery.parseFromGeoNear(cmdObj, params.radius));
            uassert(16683, "geoNear on 2dsphere index requires spherical",
                    parsedArgs.isSpherical);

            // NOTE(hk): For a speedup, we could look through the query to see if
            // we've geo-indexed any of the fields in it.
            vector<GeoQuery> regions;

            nic->seek(parsedArgs.query, nearQuery, regions);

            // We do pass in the query above, but it's just so we can possibly use it in our index
            // scan.  We have to do our own matching.
            auto_ptr<Matcher> matcher(new Matcher(parsedArgs.query));

            double totalDistance = 0;
            BSONObjBuilder resultBuilder(result.subarrayStart("results"));
            double farthestDist = 0;

            int results;
            for (results = 0; results < parsedArgs.numWanted && !nic->isEOF(); ++results) {
                BSONObj currObj = nic->getValue().obj();
                if (!matcher->matches(currObj)) {
                    --results;
                    nic->next();
                    continue;
                }

                double dist = nic->currentDistance();
                // If we got the distance in radians, output it in radians too.
                if (nearQuery.fromRadians) { dist /= params.radius; }
                dist *= parsedArgs.distanceMultiplier;
                totalDistance += dist;
                if (dist > farthestDist) { farthestDist = dist; }

                BSONObjBuilder oneResultBuilder(
                    resultBuilder.subobjStart(BSONObjBuilder::numStr(results)));
                oneResultBuilder.append("dis", dist);
                if (parsedArgs.includeLocs) {
                    BSONElementSet geoFieldElements;
                    currObj.getFieldsDotted(geoFieldNames[0], geoFieldElements, false);
                    for (BSONElementSet::iterator oi = geoFieldElements.begin();
                            oi != geoFieldElements.end(); ++oi) {
                        if (oi->isABSONObj()) {
                            oneResultBuilder.appendAs(*oi, "loc");
                        }
                    }
                }

                oneResultBuilder.append("obj", currObj);
                oneResultBuilder.done();
                nic->next();
            }

            resultBuilder.done();

            BSONObjBuilder stats(result.subobjStart("stats"));
            stats.appendNumber("nscanned", nic->nscanned());
            stats.append("avgDistance", totalDistance / results);
            stats.append("maxDistance", farthestDist);
            stats.append("time", cc().curop()->elapsedMillis());
            stats.done();

            return true;
        }
Exemplo n.º 30
0
   INT32 _coordShardKicker::_kickShardingKey( const CoordCataInfoPtr &cataInfo,
                                              const BSONObj &updator,
                                              BSONObj &newUpdator,
                                              BOOLEAN &hasShardingKey )
   {
      INT32 rc = SDB_OK ;
      UINT32 skSiteID = cataInfo->getShardingKeySiteID() ;

      if ( skSiteID > 0 )
      {
         map< UINT32, BOOLEAN >::iterator it = _skSiteIDs.find( skSiteID );
         if ( it != _skSiteIDs.end() )
         {
            newUpdator = updator ;
            hasShardingKey = it->second ;
            goto done ;
         }
      }

      try
      {
         BSONObjBuilder bobNewUpdator( updator.objsize() ) ;
         BSONObj boShardingKey ;
         BSONObj subObj ;

         BOOLEAN isReplace = _isUpdateReplace( updator ) ;
         cataInfo->getShardingKey( boShardingKey ) ;

         BSONObjIterator iter( updator ) ;
         while ( iter.more() )
         {
            BSONElement beTmp = iter.next() ;
            if ( beTmp.type() != Object )
            {
               rc = SDB_INVALIDARG;
               PD_LOG( PDERROR, "updator's element must be an Object type:"
                       "updator=%s", updator.toString().c_str() ) ;
               goto error ;
            }

            subObj = beTmp.embeddedObject() ;
            if ( isReplace &&
                 0 == ossStrcmp( beTmp.fieldName(),
                                 CMD_ADMIN_PREFIX FIELD_OP_VALUE_KEEP ) )
            {
               _addKeys( subObj ) ;
               continue ;
            }

            BSONObjBuilder subBuilder( bobNewUpdator.subobjStart(
                                       beTmp.fieldName() ) ) ;
            BSONObjIterator iterField( subObj ) ;
            while( iterField.more() )
            {
               BSONElement beField = iterField.next() ;
               BSONObjIterator iterKey( boShardingKey ) ;
               BOOLEAN isKey = FALSE ;
               while( iterKey.more() )
               {
                  BSONElement beKey = iterKey.next();
                  const CHAR *pKey = beKey.fieldName();
                  const CHAR *pField = beField.fieldName();
                  while( *pKey == *pField && *pKey != '\0' )
                  {
                     ++pKey ;
                     ++pField ;
                  }

                  if ( *pKey == *pField ||
                       ( '\0' == *pKey && '.' == *pField ) ||
                       ( '.' == *pKey && '\0' == *pField ) )
                  {
                     isKey = TRUE ;
                     break ;
                  }
               }
               if ( isKey )
               {
                  hasShardingKey = TRUE;
               }
               else
               {
                  subBuilder.append( beField ) ;
               }
            } // while( iterField.more() )

            subBuilder.done() ;
         } // while ( iter.more() )

         if ( isReplace )
         {
            UINT32 count = _addKeys( boShardingKey ) ;
            if ( count > 0 )
            {
               hasShardingKey = TRUE ;
            }

            if ( !_setKeys.empty() )
            {
               BSONObjBuilder keepBuilder( bobNewUpdator.subobjStart(
                                           CMD_ADMIN_PREFIX FIELD_OP_VALUE_KEEP ) ) ;
               SET_SHARDINGKEY::iterator itKey = _setKeys.begin() ;
               while( itKey != _setKeys.end() )
               {
                  keepBuilder.append( itKey->_pStr, (INT32)1 ) ;
                  ++itKey ;
               }
               keepBuilder.done() ;
            }
         } // if ( isReplace )
         newUpdator = bobNewUpdator.obj() ;
      }
      catch ( std::exception &e )
      {
         rc = SDB_INVALIDARG;
         PD_LOG ( PDERROR,"Failed to kick sharding key from the record,"
                  "occured unexpected error: %s", e.what() ) ;

         goto error;
      }

      if ( skSiteID > 0 )
      {
         _skSiteIDs.insert(
                    pair< UINT32, BOOLEAN >( skSiteID, hasShardingKey ) ) ;
      }

   done:
      return rc;
   error:
      goto done;
   }