Beispiel #1
0
/*
====================
Load
====================
*/
VOID EMap::Load(const GData* data)
{
	GUARD(Map::Load);

	CHECK(data);
	U8*data_ptr = (U8*)data->Ptr();

	// check the map header
	CHECK(*(U32*)data_ptr == (MAKEFOURCC('G','M','A','P')));
	data_ptr += sizeof(U32);

	// get the stride of the map
	mWidth = *(U32*)data_ptr;
	data_ptr += sizeof(U32);
	mHeight = *(U32*)data_ptr;
	data_ptr += sizeof(U32);

	// load all of the chunks
	for(U32 j = 0; j < mHeight; j++)
	{
		for(U32 i = 0; i < mWidth; i++)
		{
			ChunkPtr chunk = GNEW(EChunk); CHECK(chunk);
			chunk->Load(data_ptr);
			mChunks.push_back(chunk);
		}
	}

	UNGUARD;
}
    Status ChunkManagerTargeter::targetDoc( const BSONObj& doc, ShardEndpoint** endpoint ) const {

        if ( !_primary && !_manager ) return Status( ErrorCodes::NamespaceNotFound, "" );

        if ( _manager ) {
            if ( !_manager->hasShardKey( doc ) ) {
                return Status( ErrorCodes::ShardKeyNotFound,
                               stream() << "document " << doc
                                        << " does not contain shard key for pattern "
                                        << _manager->getShardKey().key() );
            }
            ChunkPtr chunk = _manager->findChunkForDoc( doc );
            *endpoint = new ShardEndpoint( chunk->getShard().getName(),
                                           _manager->getVersion( chunk->getShard() ),
                                           chunk->getShard().getAddress() );

            _stats->chunkSizeDelta[chunk->getMin()] += doc.objsize();
        }
        else {
            *endpoint = new ShardEndpoint( _primary->getName(),
                                           ChunkVersion::UNSHARDED(),
                                           _primary->getAddress() );
        }

        return Status::OK();
    }
Beispiel #3
0
ChunkPtr ChunkManager::findIntersectingChunk(const BSONObj& shardKey) const {
    {
        BSONObj chunkMin;
        ChunkPtr chunk;
        {
            ChunkMap::const_iterator it = _chunkMap.upper_bound(shardKey);
            if (it != _chunkMap.end()) {
                chunkMin = it->first;
                chunk = it->second;
            }
        }

        if (chunk) {
            if (chunk->containsKey(shardKey)) {
                return chunk;
            }

            log() << chunkMin;
            log() << *chunk;
            log() << shardKey;

            reload();
            msgasserted(13141, "Chunk map pointed to incorrect chunk");
        }
    }

    msgasserted(8070,
                str::stream() << "couldn't find a chunk intersecting: " << shardKey
                              << " for ns: " << _ns << " at version: " << _version.toString()
                              << ", number of chunks: " << _chunkMap.size());
}
Beispiel #4
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            
            while ( d.moreJSObjs() ){
                BSONObj o = d.nextJsObj();
                if ( ! manager->hasShardKey( o ) ){

                    bool bad = true;

                    if ( manager->getShardKey().partOfShardKey( "_id" ) ){
                        BSONObjBuilder b;
                        b.appendOID( "_id" , 0 , true );
                        b.appendElements( o );
                        o = b.obj();
                        bad = ! manager->hasShardKey( o );
                    }
                    
                    if ( bad ){
                        log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                        throw UserException( 8011 , "tried to insert object without shard key" );
                    }
                    
                }
                
                ChunkPtr c = manager->findChunk( o );
                log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                insert( c->getShard() , r.getns() , o );

                r.gotInsert();
                
                c->splitIfShould( o.objsize() );
            }            
        }
void LTTumblingWindowRandomizer::Prefetch() const
{
    size_t position = m_chunkPosition;
    size_t sweepIndex = m_sweepCount;

    // Prefetch does not change any state that cannot be recalculated,
    // only prefetches data.
    int64_t range = m_randomizationRange;
    m_prefetchedChunks.clear();
    m_prefetchedSequences.clear();

    size_t lastSequencePositionInWindow = 0;
    size_t lastWindowPosition = m_chunkPosition;
    while (range > 0)
    {
        auto desc = m_prefetchedChunkDescriptions[position];
        if (position % Config().m_numberOfWorkers == Config().m_workerRank) // Need to add to the window
        {
            size_t oldSize = m_prefetchedSequences.size();

            // Query deserializer.
            ChunkPtr data = m_deserializer->GetChunk(desc.m_id);
            data->SequenceInfos(m_prefetchedSequences);
            m_prefetchedChunks.push_back(std::make_tuple(desc, data));

            if (!m_sampleBasedRandomizationWindow)
                --range;
            else
                for (size_t i = oldSize; i < m_prefetchedSequences.size(); ++i)
                    range -= m_prefetchedSequences[i].m_numberOfSamples;
        }
        else
        {
            // Empty, we do not need data , only for tracking the current chunk.
            m_prefetchedChunks.push_back(std::make_tuple(ChunkInfo{}, nullptr));
        }

        if (position == m_originalChunkDescriptions.size() - 1)
        {
            // Sweep boundary, randomize all sequences in the window from the previous sweep.
            RandomizeWindow(sweepIndex, lastWindowPosition, lastSequencePositionInWindow);

            // Switch to next sweep, randomize chunks.
            sweepIndex++;
            RandomizeChunks(sweepIndex);

            // Put a marker and reset window position to the beginning of the sweep.
            m_prefetchedSequences.push_back(s_endOfSweep);
            lastWindowPosition = 0;
            lastSequencePositionInWindow = m_prefetchedSequences.size();
        }

        position = (position + 1) % m_originalChunkDescriptions.size();
    }

    // Rerandomize the last part of the sequences.
    RandomizeWindow(sweepIndex, lastWindowPosition, lastSequencePositionInWindow);
}
Beispiel #6
0
            bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){
                ShardConnection::sync();

                Timer t;
                string ns = cmdObj.firstElement().valuestrsafe();
                if ( ns.size() == 0 ){
                    errmsg = "no ns";
                    return false;
                }

                DBConfigPtr config = grid.getDBConfig( ns );
                if ( ! config->isSharded( ns ) ){
                    errmsg = "ns not sharded.  have to shard before can move a chunk";
                    return false;
                }

                BSONObj find = cmdObj.getObjectField( "find" );
                if ( find.isEmpty() ){
                    errmsg = "need to specify find.  see help";
                    return false;
                }

                string toString = cmdObj["to"].valuestrsafe();
                if ( ! toString.size()  ){
                    errmsg = "you have to specify where you want to move the chunk";
                    return false;
                }
                
                Shard to = Shard::make( toString );

                // so far, chunk size serves test purposes; it may or may not become a supported parameter
                long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
                if ( maxChunkSizeBytes == 0 ) {
                    maxChunkSizeBytes = Chunk::MaxChunkSize;
                }

                tlog() << "CMD: movechunk: " << cmdObj << endl;

                ChunkManagerPtr info = config->getChunkManager( ns );
                ChunkPtr c = info->findChunk( find );
                const Shard& from = c->getShard();

                if ( from == to ){
                    errmsg = "that chunk is already on that shard";
                    return false;
                }
                
                BSONObj res;
                if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ){
                    errmsg = "move failed";
                    result.append( "cause" , res );
                    return false;
                }

                result.append( "millis" , t.millis() );
                return true;
            }
Beispiel #7
0
    int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks , bool secondaryThrottle ) {
        int movedCount = 0;

        for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
            const CandidateChunk& chunkInfo = *it->get();

            DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
            verify( cfg );

            ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
            verify( cm );

            ChunkPtr c = cm->findChunk( chunkInfo.chunk.min );
            if ( c->getMin().woCompare( chunkInfo.chunk.min ) || c->getMax().woCompare( chunkInfo.chunk.max ) ) {
                // likely a split happened somewhere
                cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
                verify( cm );

                c = cm->findChunk( chunkInfo.chunk.min );
                if ( c->getMin().woCompare( chunkInfo.chunk.min ) || c->getMax().woCompare( chunkInfo.chunk.max ) ) {
                    log() << "chunk mismatch after reload, ignoring will retry issue " << chunkInfo.chunk.toString() << endl;
                    continue;
                }
            }

            BSONObj res;
            if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , secondaryThrottle , res ) ) {
                movedCount++;
                continue;
            }

            // the move requires acquiring the collection metadata's lock, which can fail
            log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
                  << " chunk: " << chunkInfo.chunk << endl;

            if ( res["chunkTooBig"].trueValue() ) {
                // reload just to be safe
                cm = cfg->getChunkManager( chunkInfo.ns );
                verify( cm );
                c = cm->findChunk( chunkInfo.chunk.min );
                
                log() << "forcing a split because migrate failed for size reasons" << endl;
                
                res = BSONObj();
                c->singleSplit( true , res );
                log() << "forced split results: " << res << endl;
                
                if ( ! res["ok"].trueValue() ) {
                    log() << "marking chunk as jumbo: " << c->toString() << endl;
                    c->markAsJumbo();
                    // we increment moveCount so we do another round right away
                    movedCount++;
                }

            }
        }

        return movedCount;
    }
Beispiel #8
0
        void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags ) {
            ChunkManagerPtr manager = conf->getChunkManager(ns);
            if ( ! manager->hasShardKey( o ) ) {

                bool bad = true;

                if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                    BSONObjBuilder b;
                    b.appendOID( "_id" , 0 , true );
                    b.appendElements( o );
                    o = b.obj();
                    bad = ! manager->hasShardKey( o );
                }

                if ( bad ) {
                    log() << "tried to insert object without shard key: " << ns << "  " << o << endl;
                    uasserted( 14842 , "tried to insert object without shard key" );
                }

            }

            // Many operations benefit from having the shard key early in the object
            o = manager->getShardKey().moveToFront(o);

            const int maxTries = 30;

            for ( int i=0; i<maxTries; i++ ) {
                try {
                    ChunkPtr c = manager->findChunk( o );
                    log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                    insert( c->getShard() , ns , o , flags);

//                    r.gotInsert();
//                    if ( r.getClientInfo()->autoSplitOk() )
                        c->splitIfShould( o.objsize() );
                    break;
                }
                catch ( StaleConfigException& e ) {
                    int logLevel = i < ( maxTries / 2 );
                    LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
//                    r.reset();

                    unsigned long long old = manager->getSequenceNumber();
                    manager = conf->getChunkManager(ns);

                    LOG( logLevel ) << "  sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl;

                    if (!manager) {
                        uasserted(14843, "collection no longer sharded");
                    }
                }
                sleepmillis( i * 20 );
            }
        }
void PeerConnection::HandleStringMessage(ChunkPtr chunk, uint16_t sid) {
  auto cur_channel = GetChannel(sid);
  if (!cur_channel) {
    logger->warn("Received msg on unknown channel: {}", sid);
    return;
  }

  std::string cur_msg(reinterpret_cast<char *>(chunk->Data()), chunk->Length());

  cur_channel->OnStringMsg(cur_msg);
}
Beispiel #10
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {

            while ( d.moreJSObjs() ) {
                BSONObj o = d.nextJsObj();
                if ( ! manager->hasShardKey( o ) ) {

                    bool bad = true;

                    if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                        BSONObjBuilder b;
                        b.appendOID( "_id" , 0 , true );
                        b.appendElements( o );
                        o = b.obj();
                        bad = ! manager->hasShardKey( o );
                    }

                    if ( bad ) {
                        log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                        throw UserException( 8011 , "tried to insert object without shard key" );
                    }

                }

                // Many operations benefit from having the shard key early in the object
                o = manager->getShardKey().moveToFront(o);

                const int maxTries = 10;

                bool gotThrough = false;
                for ( int i=0; i<maxTries; i++ ) {
                    try {
                        ChunkPtr c = manager->findChunk( o );
                        log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                        insert( c->getShard() , r.getns() , o );

                        r.gotInsert();
                        if ( r.getClientInfo()->autoSplitOk() )
                            c->splitIfShould( o.objsize() );
                        gotThrough = true;
                        break;
                    }
                    catch ( StaleConfigException& e ) {
                        log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl;
                        r.reset();
                        manager = r.getChunkManager();
                        uassert(14804, "collection no longer sharded", manager);
                    }
                    sleepmillis( i * 200 );
                }
                
                assert( inShutdown() || gotThrough );
            }
        }
Beispiel #11
0
            virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){

                result << "shardinfo" << old->toString();

                result.appendBool( "auto" , middle.isEmpty() );

                if ( middle.isEmpty() )
                    middle = old->pickSplitPoint();

                result.append( "middle" , middle );

                return true;
            }
Beispiel #12
0
            virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){
                assert( old.get() );
                log() << "splitting: " << ns << "  shard: " << old << endl;

                if ( middle.isEmpty() )
                    old->split();
                else {
                    vector<BSONObj> splitPoints;
                    splitPoints.push_back( middle );
                    old->multiSplit( splitPoints );
                }

                return true;
            }
    Status ChunkManagerTargeter::targetShardKey(const BSONObj& doc,
                                                ShardEndpoint** endpoint) const {

        invariant(NULL != _manager);
        dassert(_manager->hasShardKey(doc));

        ChunkPtr chunk = _manager->findChunkForDoc(doc);

        Shard shard = chunk->getShard();
        *endpoint = new ShardEndpoint(shard.getName(),
                                      _manager->getVersion(StringData(shard.getName())));

        return Status::OK();
    }
Beispiel #14
0
            bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
                ShardConnection::sync();

                string ns = cmdObj.firstElement().valuestrsafe();
                if ( ns.size() == 0 ) {
                    errmsg = "no ns";
                    return false;
                }

                DBConfigPtr config = grid.getDBConfig( ns );
                if ( ! config->isSharded( ns ) ) {
                    errmsg = "ns not sharded.  have to shard before can split";
                    return false;
                }

                BSONObj find = cmdObj.getObjectField( "find" );
                if ( find.isEmpty() ) {
                    find = cmdObj.getObjectField( "middle" );

                    if ( find.isEmpty() ) {
                        errmsg = "need to specify find or middle";
                        return false;
                    }
                }

                ChunkManagerPtr info = config->getChunkManager( ns );
                ChunkPtr chunk = info->findChunk( find );
                BSONObj middle = cmdObj.getObjectField( "middle" );

                assert( chunk.get() );
                log() << "splitting: " << ns << "  shard: " << chunk << endl;

                BSONObj res;
                ChunkPtr p;
                if ( middle.isEmpty() ) {
                    p = chunk->singleSplit( true /* force a split even if not enough data */ , res );

                }
                else {
                    // sanity check if the key provided is a valid split point
                    if ( ( middle == chunk->getMin() ) || ( middle == chunk->getMax() ) ) {
                        errmsg = "cannot split on initial or final chunk's key";
                        return false;
                    }

                    vector<BSONObj> splitPoints;
                    splitPoints.push_back( middle );
                    p = chunk->multiSplit( splitPoints , res );
                }

                if ( p.get() == NULL ) {
                    errmsg = "split failed";
                    result.append( "cause" , res );
                    return false;
                }

                return true;
            }
Beispiel #15
0
        // TODO:  Same limitations as other mongos metadata commands, sometimes we'll be stale here
        // and fail.  Need to better integrate targeting with commands.
        ShardPtr guessMergeShard( const NamespaceString& nss, const BSONObj& minKey ) {

            DBConfigPtr config = grid.getDBConfig( nss.ns() );
            if ( !config->isSharded( nss ) ) {
                config->reload();
                if ( !config->isSharded( nss ) ) {
                    return ShardPtr();
                }
            }

            ChunkManagerPtr manager = config->getChunkManager( nss );
            if ( !manager ) return ShardPtr();
            ChunkPtr chunk = manager->findChunkForDoc( minKey );
            if ( !chunk ) return ShardPtr();
            return ShardPtr( new Shard( chunk->getShard() ) );
        }
void PeerConnection::HandleNewDataChannel(ChunkPtr chunk, uint16_t sid) {
  uint8_t *raw_msg = chunk->Data();
  dc_open_msg open_msg;
  open_msg.chan_type = raw_msg[1];
  open_msg.priority = (raw_msg[2] << 8) + raw_msg[3];
  open_msg.reliability = (raw_msg[4] << 24) + (raw_msg[5] << 16) + (raw_msg[6] << 8) + raw_msg[7];
  open_msg.label_len = (raw_msg[8] << 8) + raw_msg[9];
  open_msg.protocol_len = (raw_msg[10] << 8) + raw_msg[11];

  std::string label(reinterpret_cast<char *>(raw_msg + 12), open_msg.label_len);
  std::string protocol(reinterpret_cast<char *>(raw_msg + 12 + open_msg.label_len), open_msg.protocol_len);

  SPDLOG_DEBUG(logger, "Creating channel with sid: {}, chan_type: {}, label: {}, protocol: {}", sid, open_msg.chan_type, label, protocol);

  // TODO: Support overriding an existing channel
  auto new_channel = std::make_shared<DataChannel>(this, sid, open_msg.chan_type, label, protocol);

  data_channels[sid] = new_channel;

  if (this->new_channel_cb) {
    this->new_channel_cb(new_channel);
  } else {
    logger->warn("No new channel callback, ignoring new channel");
  }
}
Beispiel #17
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            
            while ( d.moreJSObjs() ){
                BSONObj o = d.nextJsObj();
                if ( ! manager->hasShardKey( o ) ){

                    bool bad = true;

                    if ( manager->getShardKey().partOfShardKey( "_id" ) ){
                        BSONObjBuilder b;
                        b.appendOID( "_id" , 0 , true );
                        b.appendElements( o );
                        o = b.obj();
                        bad = ! manager->hasShardKey( o );
                    }
                    
                    if ( bad ){
                        log() << "tried to insert object without shard key: " << r.getns() << "  " << o << endl;
                        throw UserException( 8011 , "tried to insert object without shard key" );
                    }
                    
                }
                
                bool gotThrough = false;
                for ( int i=0; i<10; i++ ){
                    try {
                        ChunkPtr c = manager->findChunk( o );
                        log(4) << "  server:" << c->getShard().toString() << " " << o << endl;
                        insert( c->getShard() , r.getns() , o );
                        
                        r.gotInsert();
                        c->splitIfShould( o.objsize() );
                        gotThrough = true;
                        break;
                    }
                    catch ( StaleConfigException& ){
                        log(1) << "retrying insert because of StaleConfigException: " << o << endl;
                        r.reset();
                        manager = r.getChunkManager();
                    }
                    sleepmillis( i * 200 );
                }

                assert( gotThrough );

            }            
        }
Beispiel #18
0
    int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) {
        int movedCount = 0;

        for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
            const CandidateChunk& chunkInfo = *it->get();

            DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
            assert( cfg );

            ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
            assert( cm );

            const BSONObj& chunkToMove = chunkInfo.chunk;
            ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
            if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
                // likely a split happened somewhere
                cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
                assert( cm );

                c = cm->findChunk( chunkToMove["min"].Obj() );
                if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
                    log() << "chunk mismatch after reload, ignoring will retry issue cm: "
                          << c->getMin() << " min: " << chunkToMove["min"].Obj() << endl;
                    continue;
                }
            }

            BSONObj res;
            if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , res ) ) {
                movedCount++;
                continue;
            }

            // the move requires acquiring the collection metadata's lock, which can fail
            log() << "balacer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
                  << " chunk: " << chunkToMove << endl;

            if ( res["chunkTooBig"].trueValue() ) {
                // reload just to be safe
                cm = cfg->getChunkManager( chunkInfo.ns );
                assert( cm );
                c = cm->findChunk( chunkToMove["min"].Obj() );
                
                log() << "forcing a split because migrate failed for size reasons" << endl;
                
                res = BSONObj();
                c->singleSplit( true , res );
                log() << "forced split results: " << res << endl;

                // TODO: if the split fails, mark as jumbo SERVER-2571
            }
        }

        return movedCount;
    }
Status ChunkManagerTargeter::targetShardKey(OperationContext* txn,
                                            const BSONObj& shardKey,
                                            long long estDataSize,
                                            ShardEndpoint** endpoint) const {
    invariant(NULL != _manager);

    ChunkPtr chunk = _manager->findIntersectingChunk(txn, shardKey);

    // Track autosplit stats for sharded collections
    // Note: this is only best effort accounting and is not accurate.
    if (estDataSize > 0) {
        _stats.chunkSizeDelta[chunk->getMin()] += estDataSize;
    }

    *endpoint = new ShardEndpoint(chunk->getShardId(), _manager->getVersion(chunk->getShardId()));

    return Status::OK();
}
Beispiel #20
0
void ChunkRangeManager::assertValid() const {
    if (_ranges.empty())
        return;

    try {
        // No Nulls
        for (ChunkRangeMap::const_iterator it = _ranges.begin(), end = _ranges.end(); it != end;
             ++it) {
            verify(it->second);
        }

        // Check endpoints
        verify(allOfType(MinKey, _ranges.begin()->second->getMin()));
        verify(allOfType(MaxKey, boost::prior(_ranges.end())->second->getMax()));

        // Make sure there are no gaps or overlaps
        for (ChunkRangeMap::const_iterator it = boost::next(_ranges.begin()), end = _ranges.end();
             it != end;
             ++it) {
            ChunkRangeMap::const_iterator last = boost::prior(it);
            verify(it->second->getMin() == last->second->getMax());
        }

        // Check Map keys
        for (ChunkRangeMap::const_iterator it = _ranges.begin(), end = _ranges.end(); it != end;
             ++it) {
            verify(it->first == it->second->getMax());
        }

        // Make sure we match the original chunks
        const ChunkMap chunks = _ranges.begin()->second->getManager()->_chunkMap;
        for (ChunkMap::const_iterator i = chunks.begin(); i != chunks.end(); ++i) {
            const ChunkPtr chunk = i->second;

            ChunkRangeMap::const_iterator min = _ranges.upper_bound(chunk->getMin());
            ChunkRangeMap::const_iterator max = _ranges.lower_bound(chunk->getMax());

            verify(min != _ranges.end());
            verify(max != _ranges.end());
            verify(min == max);
            verify(min->second->getShardId() == chunk->getShardId());
            verify(min->second->containsKey(chunk->getMin()));
            verify(min->second->containsKey(chunk->getMax()) ||
                   (min->second->getMax() == chunk->getMax()));
        }

    } catch (...) {
        error() << "\t invalid ChunkRangeMap! printing ranges:";

        for (ChunkRangeMap::const_iterator it = _ranges.begin(), end = _ranges.end(); it != end;
             ++it) {
            log() << it->first << ": " << it->second->toString();
        }

        throw;
    }
}
Beispiel #21
0
void BioGenSource::getChunkBiomes(int a_ChunkX, int a_ChunkZ, ChunkPtr a_DestChunk)
{
	cChunkDef::BiomeMap biomes;
	{
		QMutexLocker lock(&m_Mtx);
		m_BiomeGen->GenBiomes(a_ChunkX, a_ChunkZ, biomes);
	}
	Chunk::Image img;
	biomesToImage(biomes, img);
	a_DestChunk->setImage(img);
}
Beispiel #22
0
    /**
     * Splits the chunks touched based from the targeter stats if needed.
     */
    static void splitIfNeeded( const string& ns, const TargeterStats& stats ) {
        if ( !Chunk::ShouldAutoSplit ) {
            return;
        }

        DBConfigPtr config;

        try {
            config = grid.getDBConfig( ns );
        }
        catch ( const DBException& ex ) {
            warning() << "failed to get database config for " << ns
                      << " while checking for auto-split: " << causedBy( ex ) << endl;
            return;
        }

        ChunkManagerPtr chunkManager;
        ShardPtr dummyShard;
        config->getChunkManagerOrPrimary( ns, chunkManager, dummyShard );

        if ( !chunkManager ) {
            return;
        }

        for ( map<BSONObj, int>::const_iterator it = stats.chunkSizeDelta.begin();
            it != stats.chunkSizeDelta.end(); ++it ) {

            ChunkPtr chunk;
            try {
                chunk = chunkManager->findIntersectingChunk( it->first );
            }
            catch ( const AssertionException& ex ) {
                warning() << "could not find chunk while checking for auto-split: "
                          << causedBy( ex ) << endl;
                return;
            }

            chunk->splitIfShould( it->second );
        }
    }
Beispiel #23
0
    void DistributionStatus::populateShardToChunksMap(const vector<Shard>& allShards,
                                                      const ChunkManager& chunkMgr,
                                                      ShardToChunksMap* shardToChunksMap) {
        // Makes sure there is an entry in shardToChunksMap for every shard.
        for (vector<Shard>::const_iterator it = allShards.begin();
                it != allShards.end(); ++it) {

            OwnedPointerVector<ChunkType>*& chunkList =
                    (*shardToChunksMap)[it->getName()];

            if (chunkList == NULL) {
                chunkList = new OwnedPointerVector<ChunkType>();
            }
        }

        const ChunkMap& chunkMap = chunkMgr.getChunkMap();
        for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
            const ChunkPtr chunkPtr = it->second;

            auto_ptr<ChunkType> chunk(new ChunkType());
            chunk->setNS(chunkPtr->getns());
            chunk->setMin(chunkPtr->getMin().getOwned());
            chunk->setMax(chunkPtr->getMax().getOwned());
            chunk->setJumbo(chunkPtr->isJumbo()); // TODO: is this reliable?
            const string shardName(chunkPtr->getShard().getName());
            chunk->setShard(shardName);

            (*shardToChunksMap)[shardName]->push_back(chunk.release());
        }
    }
    Status ChunkManagerTargeter::targetInsert( const BSONObj& doc,
                                               ShardEndpoint** endpoint ) const {

        if ( !_primary && !_manager )  {
            return Status( ErrorCodes::NamespaceNotFound,
                           str::stream() << "could not target insert in collection "
                                         << getNS().ns()
                                         << "; no metadata found" );
        }

        if ( _primary ) {
            *endpoint = new ShardEndpoint( _primary->getName(),
                                           ChunkVersion::UNSHARDED() );
        }
        else {

            //
            // Sharded collections have the following requirements for targeting:
            //
            // Inserts must contain the exact shard key.
            //

            if ( !_manager->hasShardKey( doc ) ) {
                return Status( ErrorCodes::ShardKeyNotFound,
                               stream() << "document " << doc
                                        << " does not contain shard key for pattern "
                                        << _manager->getShardKey().key() );
            }

            ChunkPtr chunk = _manager->findChunkForDoc( doc );
            *endpoint = new ShardEndpoint( chunk->getShard().getName(),
                                           _manager->getVersion( chunk->getShard() ) );

            // Track autosplit stats for sharded collections
            _stats->chunkSizeDelta[chunk->getMin()] += doc.objsize();
        }

        return Status::OK();
    }
Beispiel #25
0
    int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) {
        int movedCount = 0;

        for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ){
            const CandidateChunk& chunkInfo = *it->get();

            DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
            assert( cfg );
        
            ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
            assert( cm );
        
            const BSONObj& chunkToMove = chunkInfo.chunk;
            ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
            if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) ){
                // likely a split happened somewhere
                cm = cfg->getChunkManager( chunkInfo.ns , true );
                assert( cm );

                c = cm->findChunk( chunkToMove["min"].Obj() );
                if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) ){
                    log() << "balancer: chunk mismatch after reload, ignoring will retry issue cm: " 
                          << c->getMin() << " min: " << chunkToMove["min"].Obj() << endl;
                    continue;
                }
            }
        
            string errmsg;
            if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , errmsg ) ){
                movedCount++;
                continue;
            }

            log() << "balancer: MOVE FAILED **** " << errmsg << "\n"
                  << "  from: " << chunkInfo.from << " to: " << " chunk: " << chunkToMove << endl;
        }

        return movedCount;
    }
// Matches DataChannel onmessage
void PeerConnection::OnSCTPMsgReceived(ChunkPtr chunk, uint16_t sid, uint32_t ppid) {
  SPDLOG_TRACE(logger, "OnSCTPMsgReceived(): Handling an sctp message");
  if (ppid == PPID_CONTROL) {
    SPDLOG_TRACE(logger, "Control PPID");
    if (chunk->Data()[0] == DC_TYPE_OPEN) {
      SPDLOG_TRACE(logger, "New channel time!");
      HandleNewDataChannel(chunk, sid);
    } else if (chunk->Data()[0] == DC_TYPE_ACK) {
      SPDLOG_TRACE(logger, "DC ACK");
      // HandleDataChannelAck(chunk, sid); XXX: Don't care right now
    } else {
      SPDLOG_TRACE(logger, "Unknown msg_type for ppid control: {}", chunk->Data()[0]);
    }
  } else if ((ppid == PPID_STRING) || (ppid == PPID_STRING_EMPTY)) {
    SPDLOG_TRACE(logger, "String msg");
    HandleStringMessage(chunk, sid);
  } else if ((ppid == PPID_BINARY) || (ppid == PPID_BINARY_EMPTY)) {
    SPDLOG_TRACE(logger, "Binary msg");
    HandleBinaryMessage(chunk, sid);
  } else {
    logger->error("Unknown ppid={}", ppid);
  }
}
Beispiel #27
0
void DistributionStatus::populateShardToChunksMap(const ShardStatisticsVector& allShards,
                                                  const ChunkManager& chunkMgr,
                                                  ShardToChunksMap* shardToChunksMap) {
    // Makes sure there is an entry in shardToChunksMap for every shard.
    for (const auto& stat : allShards) {
        (*shardToChunksMap)[stat.shardId];
    }

    const ChunkMap& chunkMap = chunkMgr.getChunkMap();
    for (ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it) {
        const ChunkPtr chunkPtr = it->second;

        ChunkType chunk;
        chunk.setNS(chunkMgr.getns());
        chunk.setMin(chunkPtr->getMin().getOwned());
        chunk.setMax(chunkPtr->getMax().getOwned());
        chunk.setJumbo(chunkPtr->isJumbo());  // TODO: is this reliable?

        const string shardName(chunkPtr->getShardId());
        chunk.setShard(shardName);

        (*shardToChunksMap)[shardName].push_back(chunk);
    }
}
Beispiel #28
0
        void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){
            int flags = d.pullInt();
            
            BSONObj query = d.nextJsObj();
            uassert( 10201 ,  "invalid update" , d.moreJSObjs() );
            BSONObj toupdate = d.nextJsObj();

            BSONObj chunkFinder = query;
            
            bool upsert = flags & UpdateOption_Upsert;
            bool multi = flags & UpdateOption_Multi;

            uassert( 10202 ,  "can't mix multi and upsert and sharding" , ! ( upsert && multi ) );

            if ( upsert && !(manager->hasShardKey(toupdate) ||
                             (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query))))
            {
                throw UserException( 8012 , "can't upsert something without shard key" );
            }

            bool save = false;
            if ( ! manager->hasShardKey( query ) ){
                if ( multi ){
                }
                else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ){
                    throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" );
                }
                else {
                    save = true;
                    chunkFinder = toupdate;
                }
            }

            
            if ( ! save ){
                if ( toupdate.firstElement().fieldName()[0] == '$' ){
                    BSONObjIterator ops(toupdate);
                    while(ops.more()){
                        BSONElement op(ops.next());
                        if (op.type() != Object)
                            continue;
                        BSONObjIterator fields(op.embeddedObject());
                        while(fields.more()){
                            const string field = fields.next().fieldName();
                            uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field));
                        }
                    }
                } else if ( manager->hasShardKey( toupdate ) ){
                    uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 );
                } else {
                    uasserted(12376, "shard key must be in update object");
                }
            }
            
            if ( multi ){
                set<Shard> shards;
                manager->getShardsForQuery( shards , chunkFinder );
                int * x = (int*)(r.d().afterNS());
                x[0] |= UpdateOption_Broadcast;
                for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++){
                    doWrite( dbUpdate , r , *i , false );
                }
            }
            else {
                int left = 5;
                while ( true ){
                    try {
                        ChunkPtr c = manager->findChunk( chunkFinder );
                        doWrite( dbUpdate , r , c->getShard() );
                        c->splitIfShould( d.msg().header()->dataLen() );
                        break;
                    }
                    catch ( StaleConfigException& e ){
                        if ( left <= 0 )
                            throw e;
                        left--;
                        log() << "update failed b/c of StaleConfigException, retrying " 
                              << " left:" << left << " ns: " << r.getns() << " query: " << query << endl;
                        r.reset( false );
                        manager = r.getChunkManager();
                    }
                }
            }

        }
Beispiel #29
0
    int Balancer::_moveChunks(const vector<CandidateChunkPtr>* candidateChunks,
                              bool secondaryThrottle,
                              bool waitForDelete)
    {
        int movedCount = 0;

        for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
            const CandidateChunk& chunkInfo = *it->get();

            // Changes to metadata, borked metadata, and connectivity problems should cause us to
            // abort this chunk move, but shouldn't cause us to abort the entire round of chunks.
            // TODO: Handle all these things more cleanly, since they're expected problems
            try {

                DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
                verify( cfg );

                // NOTE: We purposely do not reload metadata here, since _doBalanceRound already
                // tried to do so once.
                ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
                verify( cm );

                ChunkPtr c = cm->findIntersectingChunk( chunkInfo.chunk.min );
                if ( c->getMin().woCompare( chunkInfo.chunk.min ) || c->getMax().woCompare( chunkInfo.chunk.max ) ) {
                    // likely a split happened somewhere
                    cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
                    verify( cm );

                    c = cm->findIntersectingChunk( chunkInfo.chunk.min );
                    if ( c->getMin().woCompare( chunkInfo.chunk.min ) || c->getMax().woCompare( chunkInfo.chunk.max ) ) {
                        log() << "chunk mismatch after reload, ignoring will retry issue " << chunkInfo.chunk.toString() << endl;
                        continue;
                    }
                }

                BSONObj res;
                if (c->moveAndCommit(Shard::make(chunkInfo.to),
                                     Chunk::MaxChunkSize,
                                     secondaryThrottle,
                                     waitForDelete,
                                     0, /* maxTimeMS */
                                     res)) {
                    movedCount++;
                    continue;
                }

                // the move requires acquiring the collection metadata's lock, which can fail
                log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
                      << " chunk: " << chunkInfo.chunk << endl;

                if ( res["chunkTooBig"].trueValue() ) {
                    // reload just to be safe
                    cm = cfg->getChunkManager( chunkInfo.ns );
                    verify( cm );
                    c = cm->findIntersectingChunk( chunkInfo.chunk.min );

                    log() << "forcing a split because migrate failed for size reasons" << endl;

                    res = BSONObj();
                    c->singleSplit( true , res );
                    log() << "forced split results: " << res << endl;

                    if ( ! res["ok"].trueValue() ) {
                        log() << "marking chunk as jumbo: " << c->toString() << endl;
                        c->markAsJumbo();
                        // we increment moveCount so we do another round right away
                        movedCount++;
                    }

                }
            }
            catch( const DBException& ex ) {
                warning() << "could not move chunk " << chunkInfo.chunk.toString()
                          << ", continuing balancing round" << causedBy( ex ) << endl;
            }
        }

        return movedCount;
    }
Beispiel #30
0
        void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
            const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding.
            map<ChunkPtr, vector<BSONObj> > insertsForChunk; // Group bulk insert for appropriate shards
            try {
                while ( d.moreJSObjs() ) {
                    BSONObj o = d.nextJsObj();
                    if ( ! manager->hasShardKey( o ) ) {

                        bool bad = true;

                        if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
                            BSONObjBuilder b;
                            b.appendOID( "_id" , 0 , true );
                            b.appendElements( o );
                            o = b.obj();
                            bad = ! manager->hasShardKey( o );
                        }

                        if ( bad ) {
                            log() << "tried to insert object with no valid shard key: " << r.getns() << "  " << o << endl;
                            uasserted( 8011 , "tried to insert object with no valid shard key" );
                        }

                    }

                    // Many operations benefit from having the shard key early in the object
                    o = manager->getShardKey().moveToFront(o);
                    insertsForChunk[manager->findChunk(o)].push_back(o);
                }
                for (map<ChunkPtr, vector<BSONObj> >::iterator it = insertsForChunk.begin(); it != insertsForChunk.end(); ++it) {
                    ChunkPtr c = it->first;
                    vector<BSONObj> objs = it->second;
                    const int maxTries = 30;

                    bool gotThrough = false;
                    for ( int i=0; i<maxTries; i++ ) {
                        try {
                            LOG(4) << "  server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl;
                            insert( c->getShard() , r.getns() , objs , flags);

                            int bytesWritten = 0;
                            for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) {
                                r.gotInsert(); // Record the correct number of individual inserts
                                bytesWritten += (*vecIt).objsize();
                            }
                            if ( r.getClientInfo()->autoSplitOk() )
                                c->splitIfShould( bytesWritten );
                            gotThrough = true;
                            break;
                        }
                        catch ( StaleConfigException& e ) {
                            int logLevel = i < ( maxTries / 2 );
                            LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents because of StaleConfigException: " << e << endl;
                            r.reset();

                            manager = r.getChunkManager();
                            if( ! manager ) {
                                uasserted(14804, "collection no longer sharded");
                            }

                            unsigned long long old = manager->getSequenceNumber();
                            
                            LOG( logLevel ) << "  sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
                        }
                        sleepmillis( i * 20 );
                    }

                    assert( inShutdown() || gotThrough ); // not caught below
                }
            } catch (const UserException&){
                if (!d.moreJSObjs()){
                    throw;
                }
                // Ignore and keep going. ContinueOnError is implied with sharding.
            }
        }