Status ChunkManagerTargeter::targetDoc( const BSONObj& doc, ShardEndpoint** endpoint ) const {

        if ( !_primary && !_manager ) return Status( ErrorCodes::NamespaceNotFound, "" );

        if ( _manager ) {
            if ( !_manager->hasShardKey( doc ) ) {
                return Status( ErrorCodes::ShardKeyNotFound,
                               stream() << "document " << doc
                                        << " does not contain shard key for pattern "
                                        << _manager->getShardKey().key() );
            }
            ChunkPtr chunk = _manager->findChunkForDoc( doc );
            *endpoint = new ShardEndpoint( chunk->getShard().getName(),
                                           _manager->getVersion( chunk->getShard() ),
                                           chunk->getShard().getAddress() );

            _stats->chunkSizeDelta[chunk->getMin()] += doc.objsize();
        }
        else {
            *endpoint = new ShardEndpoint( _primary->getName(),
                                           ChunkVersion::UNSHARDED(),
                                           _primary->getAddress() );
        }

        return Status::OK();
    }
示例#2
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            
            while ( d.moreJSObjs() ){
                BSONObj o = d.nextJsObj();
                if ( ! manager->hasShardKey( o ) ){

                    bool bad = true;

                    if ( manager->getShardKey().partOfShardKey( "_id" ) ){
                        BSONObjBuilder b;
                        b.appendOID( "_id" , 0 , true );
                        b.appendElements( o );
                        o = b.obj();
                        bad = ! manager->hasShardKey( o );
                    }
                    
                    if ( bad ){
                        log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                        throw UserException( 8011 , "tried to insert object without shard key" );
                    }
                    
                }
                
                ChunkPtr c = manager->findChunk( o );
                log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                insert( c->getShard() , r.getns() , o );

                r.gotInsert();
                
                c->splitIfShould( o.objsize() );
            }            
        }
示例#3
0
        void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags ) {
            ChunkManagerPtr manager = conf->getChunkManager(ns);
            if ( ! manager->hasShardKey( o ) ) {

                bool bad = true;

                if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                    BSONObjBuilder b;
                    b.appendOID( "_id" , 0 , true );
                    b.appendElements( o );
                    o = b.obj();
                    bad = ! manager->hasShardKey( o );
                }

                if ( bad ) {
                    log() << "tried to insert object without shard key: " << ns << "  " << o << endl;
                    uasserted( 14842 , "tried to insert object without shard key" );
                }

            }

            // Many operations benefit from having the shard key early in the object
            o = manager->getShardKey().moveToFront(o);

            const int maxTries = 30;

            for ( int i=0; i<maxTries; i++ ) {
                try {
                    ChunkPtr c = manager->findChunk( o );
                    log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                    insert( c->getShard() , ns , o , flags);

//                    r.gotInsert();
//                    if ( r.getClientInfo()->autoSplitOk() )
                        c->splitIfShould( o.objsize() );
                    break;
                }
                catch ( StaleConfigException& e ) {
                    int logLevel = i < ( maxTries / 2 );
                    LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
//                    r.reset();

                    unsigned long long old = manager->getSequenceNumber();
                    manager = conf->getChunkManager(ns);

                    LOG( logLevel ) << "  sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl;

                    if (!manager) {
                        uasserted(14843, "collection no longer sharded");
                    }
                }
                sleepmillis( i * 20 );
            }
        }
示例#4
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {

            while ( d.moreJSObjs() ) {
                BSONObj o = d.nextJsObj();
                if ( ! manager->hasShardKey( o ) ) {

                    bool bad = true;

                    if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                        BSONObjBuilder b;
                        b.appendOID( "_id" , 0 , true );
                        b.appendElements( o );
                        o = b.obj();
                        bad = ! manager->hasShardKey( o );
                    }

                    if ( bad ) {
                        log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                        throw UserException( 8011 , "tried to insert object without shard key" );
                    }

                }

                // Many operations benefit from having the shard key early in the object
                o = manager->getShardKey().moveToFront(o);

                const int maxTries = 10;

                bool gotThrough = false;
                for ( int i=0; i<maxTries; i++ ) {
                    try {
                        ChunkPtr c = manager->findChunk( o );
                        log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                        insert( c->getShard() , r.getns() , o );

                        r.gotInsert();
                        if ( r.getClientInfo()->autoSplitOk() )
                            c->splitIfShould( o.objsize() );
                        gotThrough = true;
                        break;
                    }
                    catch ( StaleConfigException& e ) {
                        log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
                        r.reset();
                        manager = r.getChunkManager();
                        uassert(14804, "collection no longer sharded", manager);
                    }
                    sleepmillis( i * 200 );
                }
                
                assert( inShutdown() || gotThrough );
            }
        }
示例#5
0
    void DistributionStatus::populateShardToChunksMap(const vector<Shard>& allShards,
                                                      const ChunkManager& chunkMgr,
                                                      ShardToChunksMap* shardToChunksMap) {
        // Makes sure there is an entry in shardToChunksMap for every shard.
        for (vector<Shard>::const_iterator it = allShards.begin();
                it != allShards.end(); ++it) {

            OwnedPointerVector<ChunkType>*& chunkList =
                    (*shardToChunksMap)[it->getName()];

            if (chunkList == NULL) {
                chunkList = new OwnedPointerVector<ChunkType>();
            }
        }

        const ChunkMap& chunkMap = chunkMgr.getChunkMap();
        for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
            const ChunkPtr chunkPtr = it->second;

            auto_ptr<ChunkType> chunk(new ChunkType());
            chunk->setNS(chunkPtr->getns());
            chunk->setMin(chunkPtr->getMin().getOwned());
            chunk->setMax(chunkPtr->getMax().getOwned());
            chunk->setJumbo(chunkPtr->isJumbo()); // TODO: is this reliable?
            const string shardName(chunkPtr->getShard().getName());
            chunk->setShard(shardName);

            (*shardToChunksMap)[shardName]->push_back(chunk.release());
        }
    }
示例#6
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            
            while ( d.moreJSObjs() ){
                BSONObj o = d.nextJsObj();
                if ( ! manager->hasShardKey( o ) ){

                    bool bad = true;

                    if ( manager->getShardKey().partOfShardKey( "_id" ) ){
                        BSONObjBuilder b;
                        b.appendOID( "_id" , 0 , true );
                        b.appendElements( o );
                        o = b.obj();
                        bad = ! manager->hasShardKey( o );
                    }
                    
                    if ( bad ){
                        log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                        throw UserException( 8011 , "tried to insert object without shard key" );
                    }
                    
                }
                
                bool gotThrough = false;
                for ( int i=0; i<10; i++ ){
                    try {
                        ChunkPtr c = manager->findChunk( o );
                        log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                        insert( c->getShard() , r.getns() , o );
                        
                        r.gotInsert();
                        c->splitIfShould( o.objsize() );
                        gotThrough = true;
                        break;
                    }
                    catch ( StaleConfigException& ){
                        log(1) << "retrying insert because of StaleConfigException: " << o << endl;
                        r.reset();
                        manager = r.getChunkManager();
                    }
                    sleepmillis( i * 200 );
                }

                assert( gotThrough );

            }            
        }
示例#7
0
            bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
                ShardConnection::sync();

                Timer t;
                string ns = cmdObj.firstElement().valuestrsafe();
                if ( ns.size() == 0 ){
                    errmsg = "no ns";
                    return false;
                }

                DBConfigPtr config = grid.getDBConfig( ns );
                if ( ! config->isSharded( ns ) ){
                    errmsg = "ns not sharded.  have to shard before can move a chunk";
                    return false;
                }

                BSONObj find = cmdObj.getObjectField( "find" );
                if ( find.isEmpty() ){
                    errmsg = "need to specify find.  see help";
                    return false;
                }

                string toString = cmdObj["to"].valuestrsafe();
                if ( ! toString.size()  ){
                    errmsg = "you have to specify where you want to move the chunk";
                    return false;
                }
                
                Shard to = Shard::make( toString );

                // so far, chunk size serves test purposes; it may or may not become a supported parameter
                long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
                if ( maxChunkSizeBytes == 0 ) {
                    maxChunkSizeBytes = Chunk::MaxChunkSize;
                }

                tlog() << "CMD: movechunk: " << cmdObj << endl;

                ChunkManagerPtr info = config->getChunkManager( ns );
                ChunkPtr c = info->findChunk( find );
                const Shard& from = c->getShard();

                if ( from == to ){
                    errmsg = "that chunk is already on that shard";
                    return false;
                }
                
                BSONObj res;
                if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ){
                    errmsg = "move failed";
                    result.append( "cause" , res );
                    return false;
                }

                result.append( "millis" , t.millis() );
                return true;
            }
示例#8
0
    void ChunkRangeManager::assertValid() const {
        if (_ranges.empty())
            return;

        try {
            // No Nulls
            for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
                verify(it->second);
            }

            // Check endpoints
            verify(allOfType(MinKey, _ranges.begin()->second->getMin()));
            verify(allOfType(MaxKey, boost::prior(_ranges.end())->second->getMax()));

            // Make sure there are no gaps or overlaps
            for (ChunkRangeMap::const_iterator it=boost::next(_ranges.begin()), end=_ranges.end(); it != end; ++it) {
                ChunkRangeMap::const_iterator last = boost::prior(it);
                verify(it->second->getMin() == last->second->getMax());
            }

            // Check Map keys
            for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
                verify(it->first == it->second->getMax());
            }

            // Make sure we match the original chunks
            const ChunkMap chunks = _ranges.begin()->second->getManager()->_chunkMap;
            for ( ChunkMap::const_iterator i=chunks.begin(); i!=chunks.end(); ++i ) {
                const ChunkPtr chunk = i->second;

                ChunkRangeMap::const_iterator min = _ranges.upper_bound(chunk->getMin());
                ChunkRangeMap::const_iterator max = _ranges.lower_bound(chunk->getMax());

                verify(min != _ranges.end());
                verify(max != _ranges.end());
                verify(min == max);
                verify(min->second->getShard() == chunk->getShard());
                verify(min->second->containsKey( chunk->getMin() ));
                verify(min->second->containsKey( chunk->getMax() ) || (min->second->getMax() == chunk->getMax()));
            }

        }
        catch (...) {
            error() << "\t invalid ChunkRangeMap! printing ranges:";

            for (ChunkRangeMap::const_iterator it = _ranges.begin(), end = _ranges.end(); it != end; ++it) {
                log() << it->first << ": " << it->second->toString();
            }

            throw;
        }
    }
    Status ChunkManagerTargeter::targetInsert( const BSONObj& doc,
                                               ShardEndpoint** endpoint ) const {

        if ( !_primary && !_manager )  {
            return Status( ErrorCodes::NamespaceNotFound,
                           str::stream() << "could not target insert in collection "
                                         << getNS().ns()
                                         << "; no metadata found" );
        }

        if ( _primary ) {
            *endpoint = new ShardEndpoint( _primary->getName(),
                                           ChunkVersion::UNSHARDED() );
        }
        else {

            //
            // Sharded collections have the following requirements for targeting:
            //
            // Inserts must contain the exact shard key.
            //

            if ( !_manager->hasShardKey( doc ) ) {
                return Status( ErrorCodes::ShardKeyNotFound,
                               stream() << "document " << doc
                                        << " does not contain shard key for pattern "
                                        << _manager->getShardKey().key() );
            }

            ChunkPtr chunk = _manager->findChunkForDoc( doc );
            *endpoint = new ShardEndpoint( chunk->getShard().getName(),
                                           _manager->getVersion( chunk->getShard() ) );

            // Track autosplit stats for sharded collections
            _stats->chunkSizeDelta[chunk->getMin()] += doc.objsize();
        }

        return Status::OK();
    }
    Status ChunkManagerTargeter::targetShardKey(const BSONObj& doc,
                                                ShardEndpoint** endpoint) const {

        invariant(NULL != _manager);
        dassert(_manager->hasShardKey(doc));

        ChunkPtr chunk = _manager->findChunkForDoc(doc);

        Shard shard = chunk->getShard();
        *endpoint = new ShardEndpoint(shard.getName(),
                                      _manager->getVersion(StringData(shard.getName())));

        return Status::OK();
    }
示例#11
0
        // TODO:  Same limitations as other mongos metadata commands, sometimes we'll be stale here
        // and fail.  Need to better integrate targeting with commands.
        ShardPtr guessMergeShard( const NamespaceString& nss, const BSONObj& minKey ) {

            DBConfigPtr config = grid.getDBConfig( nss.ns() );
            if ( !config->isSharded( nss ) ) {
                config->reload();
                if ( !config->isSharded( nss ) ) {
                    return ShardPtr();
                }
            }

            ChunkManagerPtr manager = config->getChunkManager( nss );
            if ( !manager ) return ShardPtr();
            ChunkPtr chunk = manager->findChunkForDoc( minKey );
            if ( !chunk ) return ShardPtr();
            return ShardPtr( new Shard( chunk->getShard() ) );
        }
示例#12
0
        void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            int flags = d.pullInt();
            
            BSONObj query = d.nextJsObj();
            uassert( 10201 ,  "invalid update" , d.moreJSObjs() );
            BSONObj toupdate = d.nextJsObj();

            BSONObj chunkFinder = query;
            
            bool upsert = flags & UpdateOption_Upsert;
            bool multi = flags & UpdateOption_Multi;

            uassert( 10202 ,  "can't mix multi and upsert and sharding" , ! ( upsert && multi ) );

            if ( upsert && !(manager->hasShardKey(toupdate) ||
                             (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))))
            {
                throw UserException( 8012 , "can't upsert something without shard key" );
            }

            bool save = false;
            if ( ! manager->hasShardKey( query ) ){
                if ( multi ){
                }
                else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ){
                    throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" );
                }
                else {
                    save = true;
                    chunkFinder = toupdate;
                }
            }

            
            if ( ! save ){
                if ( toupdate.firstElement().fieldName()[0] == '$' ){
                    BSONObjIterator ops(toupdate);
                    while(ops.more()){
                        BSONElement op(ops.next());
                        if (op.type() != Object)
                            continue;
                        BSONObjIterator fields(op.embeddedObject());
                        while(fields.more()){
                            const string field = fields.next().fieldName();
                            uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field));
                        }
                    }
                } else if ( manager->hasShardKey( toupdate ) ){
                    uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 );
                } else {
                    uasserted(12376, "shard key must be in update object");
                }
            }
            
            if ( multi ){
                set<Shard> shards;
                manager->getShardsForQuery( shards , chunkFinder );
                int * x = (int*)(r.d().afterNS());
                x[0] |= UpdateOption_Broadcast;
                for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++){
                    doWrite( dbUpdate , r , *i , false );
                }
            }
            else {
                int left = 5;
                while ( true ){
                    try {
                        ChunkPtr c = manager->findChunk( chunkFinder );
                        doWrite( dbUpdate , r , c->getShard() );
                        c->splitIfShould( d.msg().header()->dataLen() );
                        break;
                    }
                    catch ( StaleConfigException& e ){
                        if ( left <= 0 )
                            throw e;
                        left--;
                        log() << "update failed b/c of StaleConfigException, retrying " 
                              << " left:" << left << " ns: " << r.getns() << " query: " << query << endl;
                        r.reset( false );
                        manager = r.getChunkManager();
                    }
                }
            }

        }
示例#13
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
            const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding.
            map<ChunkPtr, vector<BSONObj> > insertsForChunk; // Group bulk insert for appropriate shards
            try {
                while ( d.moreJSObjs() ) {
                    BSONObj o = d.nextJsObj();
                    if ( ! manager->hasShardKey( o ) ) {

                        bool bad = true;

                        if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                            BSONObjBuilder b;
                            b.appendOID( "_id" , 0 , true );
                            b.appendElements( o );
                            o = b.obj();
                            bad = ! manager->hasShardKey( o );
                        }

                        if ( bad ) {
                            log() << "tried to insert object with no valid shard key: " << r.getns() << "  " << o << endl;
                            uasserted( 8011 , "tried to insert object with no valid shard key" );
                        }

                    }

                    // Many operations benefit from having the shard key early in the object
                    o = manager->getShardKey().moveToFront(o);
                    insertsForChunk[manager->findChunk(o)].push_back(o);
                }
                for (map<ChunkPtr, vector<BSONObj> >::iterator it = insertsForChunk.begin(); it != insertsForChunk.end(); ++it) {
                    ChunkPtr c = it->first;
                    vector<BSONObj> objs = it->second;
                    const int maxTries = 30;

                    bool gotThrough = false;
                    for ( int i=0; i<maxTries; i++ ) {
                        try {
                            LOG(4) << "  server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl;
                            insert( c->getShard() , r.getns() , objs , flags);

                            int bytesWritten = 0;
                            for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) {
                                r.gotInsert(); // Record the correct number of individual inserts
                                bytesWritten += (*vecIt).objsize();
                            }
                            if ( r.getClientInfo()->autoSplitOk() )
                                c->splitIfShould( bytesWritten );
                            gotThrough = true;
                            break;
                        }
                        catch ( StaleConfigException& e ) {
                            int logLevel = i < ( maxTries / 2 );
                            LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents because of StaleConfigException: " << e << endl;
                            r.reset();

                            manager = r.getChunkManager();
                            if( ! manager ) {
                                uasserted(14804, "collection no longer sharded");
                            }

                            unsigned long long old = manager->getSequenceNumber();
                            
                            LOG( logLevel ) << "  sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
                        }
                        sleepmillis( i * 20 );
                    }

                    assert( inShutdown() || gotThrough ); // not caught below
                }
            } catch (const UserException&){
                if (!d.moreJSObjs()){
                    throw;
                }
                // Ignore and keep going. ContinueOnError is implied with sharding.
            }
        }
示例#14
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
            const int flags = d.reservedField();
            bool keepGoing = flags & InsertOption_KeepGoing; // modified before assertion if should abort

            while ( d.moreJSObjs() ) {
                try {
                    BSONObj o = d.nextJsObj();
                    if ( ! manager->hasShardKey( o ) ) {

                        bool bad = true;

                        if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                            BSONObjBuilder b;
                            b.appendOID( "_id" , 0 , true );
                            b.appendElements( o );
                            o = b.obj();
                            bad = ! manager->hasShardKey( o );
                        }

                        if ( bad ) {
                            log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                            uasserted( 8011 , "tried to insert object without shard key" );
                        }

                    }

                    // Many operations benefit from having the shard key early in the object
                    o = manager->getShardKey().moveToFront(o);

                    const int maxTries = 30;

                    bool gotThrough = false;
                    for ( int i=0; i<maxTries; i++ ) {
                        try {
                            ChunkPtr c = manager->findChunk( o );
                            log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                            insert( c->getShard() , r.getns() , o , flags);

                            r.gotInsert();
                            if ( r.getClientInfo()->autoSplitOk() )
                                c->splitIfShould( o.objsize() );
                            gotThrough = true;
                            break;
                        }
                        catch ( StaleConfigException& e ) {
                            int logLevel = i < ( maxTries / 2 );
                            LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
                            r.reset();
                            
                            unsigned long long old = manager->getSequenceNumber();
                            manager = r.getChunkManager();
                            
                            LOG( logLevel ) << "  sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;

                            if (!manager) {
                                keepGoing = false;
                                uasserted(14804, "collection no longer sharded");
                            }
                        }
                        sleepmillis( i * 20 );
                    }
                    
                    assert( inShutdown() || gotThrough ); // not caught below
                } catch (const UserException&){
                    if (!keepGoing || !d.moreJSObjs()){
                        throw;
                    }
                    // otherwise ignore and keep going
                }
            }
        }
示例#15
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager, vector<BSONObj>& insertsRemaining, map<ChunkPtr, vector<BSONObj> > insertsForChunks, int retries = 0 ) {

            uassert( 16055, str::stream() << "too many retries during bulk insert, " << insertsRemaining.size() << " inserts remaining", retries < 30 );
            uassert( 16056, str::stream() << "shutting down server during bulk insert, " << insertsRemaining.size() << " inserts remaining", ! inShutdown() );

            const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding.

            _groupInserts( manager, insertsRemaining, insertsForChunks );

            while( ! insertsForChunks.empty() ){

                ChunkPtr c = insertsForChunks.begin()->first;
                vector<BSONObj>& objs = insertsForChunks.begin()->second;

                const Shard& shard = c->getShard();
                const string& ns = r.getns();

                ShardConnection dbcon( shard, ns, manager );

                try {

                    LOG(4) << "  server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl;

                    // Taken from single-shard bulk insert, should not need multiple methods in future
                    // insert( c->getShard() , r.getns() , objs , flags);

                    // It's okay if the version is set here, an exception will be thrown if the version is incompatible
                    dbcon.setVersion();

                    dbcon->insert( ns , objs , flags);
                    // TODO: Option for safe inserts here - can then use this for all inserts

                    dbcon.done();

                    int bytesWritten = 0;
                    for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) {
                        r.gotInsert(); // Record the correct number of individual inserts
                        bytesWritten += (*vecIt).objsize();
                    }

                    // TODO: The only reason we're grouping by chunks here is for auto-split, more efficient
                    // to track this separately and bulk insert to shards
                    if ( r.getClientInfo()->autoSplitOk() )
                        c->splitIfShould( bytesWritten );

                }
                catch ( StaleConfigException& e ) {
                    // Cleanup the connection
                    dbcon.done();

                    // Assume the inserts did *not* succeed, so we don't want to erase them

                    int logLevel = retries < 2;
                    LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents to chunk " << c << " because of StaleConfigException: " << e << endl;

                    if( retries > 2 ){
                        versionManager.forceRemoteCheckShardVersionCB( e.getns() );
                    }

                    // TODO:  Replace with actual chunk handling code, simplify request
                    r.reset();
                    manager = r.getChunkManager();

                    if( ! manager ) {
                        // TODO : We can probably handle this better?
                        uasserted( 14804, "collection no longer sharded" );
                    }
                    // End TODO

                    // We may need to regroup at least some of our inserts since our chunk manager may have changed
                    _insert( r, d, manager, insertsRemaining, insertsForChunks, retries + 1 );
                    return;
                }
                catch( UserException& ){
                    // Unexpected exception, so don't clean up the conn
                    dbcon.kill();

                    // These inserts won't be retried, as something weird happened here
                    insertsForChunks.erase( insertsForChunks.begin() );

                    // Throw if this is the last chunk bulk-inserted to
                    if( insertsForChunks.empty() ){
                        throw;
                    }
                }

                insertsForChunks.erase( insertsForChunks.begin() );
            }
        }
示例#16
0
        void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            int flags = d.pullInt();
            
            BSONObj query = d.nextJsObj();
            uassert( 10201 ,  "invalid update" , d.moreJSObjs() );
            BSONObj toupdate = d.nextJsObj();

            BSONObj chunkFinder = query;
            
            bool upsert = flags & UpdateOption_Upsert;
            bool multi = flags & UpdateOption_Multi;

            if ( multi )
                uassert( 10202 ,  "can't mix multi and upsert and sharding" , ! upsert );

            if ( upsert && !(manager->hasShardKey(toupdate) ||
                             (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))))
            {
                throw UserException( 8012 , "can't upsert something without shard key" );
            }

            bool save = false;
            if ( ! manager->hasShardKey( query ) ){
                if ( multi ){
                }
                else if ( query.nFields() != 1 || strcmp( query.firstElement().fieldName() , "_id" ) ){
                    throw UserException( 8013 , "can't do update with query that doesn't have the shard key" );
                }
                else {
                    save = true;
                    chunkFinder = toupdate;
                }
            }

            
            if ( ! save ){
                if ( toupdate.firstElement().fieldName()[0] == '$' ){
                    BSONObjIterator ops(toupdate);
                    while(ops.more()){
                        BSONElement op(ops.next());
                        if (op.type() != Object)
                            continue;
                        BSONObjIterator fields(op.embeddedObject());
                        while(fields.more()){
                            const string field = fields.next().fieldName();
                            uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field));
                        }
                    }
                } else if ( manager->hasShardKey( toupdate ) ){
                    uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 );
                } else {
                    uasserted(12376, "shard key must be in update object");
                }
            }
            
            if ( multi ){
                vector<shared_ptr<ChunkRange> > chunks;
                manager->getChunksForQuery( chunks , chunkFinder );
                set<Shard> seen;
                for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){
                    shared_ptr<ChunkRange> c = *i;
                    if ( seen.count( c->getShard() ) )
                        continue;
                    doWrite( dbUpdate , r , c->getShard() );
                    seen.insert( c->getShard() );
                }
            }
            else {
                ChunkPtr c = manager->findChunk( chunkFinder );
                doWrite( dbUpdate , r , c->getShard() );
                c->splitIfShould( d.msg().header()->dataLen() );
            }

        }