Esempio n. 1
0
void PartCorresponder::match1DGridChunk( QVector< QVector<SliceChunk> > sortedChunk, const QVector<ParticleMesh *> & input, QVector<Particles> & particles )
{
	QVector< QVector<SliceChunk> > readyChunkPairs;

	auto & sortedChunkFront = sortedChunk.front();
	auto & sortedChunkBack = sortedChunk.back();

	// Different number of chunks
	if( sortedChunkFront.size() != sortedChunkBack.size() )
	{
		int targetSize = std::min( sortedChunkFront.size(), sortedChunkBack.size() );

		if(targetSize == 1)
		{
			QVector<SliceChunk> chunkPairs;
			if( sortedChunkFront.size() == 1 ) chunkPairs.push_back( sortedChunkFront.front() );
			else chunkPairs.push_back( mergeChunks( sortedChunkFront, input.front(), particles.front() ) );

			if( sortedChunkBack.size() == 1) chunkPairs.push_back( sortedChunkBack.front() );
			else chunkPairs.push_back( mergeChunks( sortedChunkBack, input.back(), particles.back() ) );

			readyChunkPairs.push_back( chunkPairs );
		}
		else
		{
			// For now we use basic matching.. later we should either split / merge
			for(auto v : distributeVectors(sortedChunkFront.size(), sortedChunkBack.size()))
			{
				QVector<SliceChunk> p;
				p << sortedChunkFront[v.first] << sortedChunkBack[v.second];
				readyChunkPairs.push_back( p );
			}
		}
	}
	else
	{
		// Same number of elements, simply match them up
		for(size_t i = 0; i < sortedChunk.front().size(); i++)
		{
			readyChunkPairs.push_back( QVector<SliceChunk>() << sortedChunkFront.at(i) << sortedChunkBack.at(i) );
		}
	}

	// Match each pair of chunks
	for(auto & pairChunk : readyChunkPairs)
		matchChunk(pairChunk, input, particles);
}
Esempio n. 2
0
std::shared_ptr<MemChunk>
SyntheticDimChunkMerger::getMergedChunk(AttributeID attId,
                                        const std::shared_ptr<Query>& query)
{
    std::shared_ptr<MemChunk> result;
    std::shared_ptr<ChunkIterator> dstIterator;

    for (std::vector<std::shared_ptr<MemChunk> >::iterator chunkIt =  _partialChunks.begin();
         chunkIt !=  _partialChunks.end(); ++chunkIt) {
        std::shared_ptr<MemChunk>& chunk = *chunkIt;
        if (!chunk) {
            continue;
        }
        if (!result) {
            result = chunk;
            // During redim, there is always a empty tag, and the chunk can't be sparse.
            assert(result->getArrayDesc().getEmptyBitmapAttribute());
            continue;
        }
        result->setCount(0); // unknown

        if (!dstIterator) {
            _syntheticDimHelper.updateMapCoordToCount(result.get());
            dstIterator = result->getIterator(query,
                                              ChunkIterator::APPEND_CHUNK|
                                              ChunkIterator::APPEND_EMPTY_BITMAP|
                                              ChunkIterator::NO_EMPTY_CHECK);
        }
        mergeChunks(dstIterator, chunk);
        chunk.reset();
    }
    if (dstIterator) {
        dstIterator->flush();
        dstIterator.reset();
    }
    clear();
    checkChunkMagic(*result);
    return result;
}
Esempio n. 3
0
        bool run( const string& dbname,
                  BSONObj& cmdObj,
                  int,
                  string& errmsg,
                  BSONObjBuilder& result,
                  bool ) {

            string ns;
            if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) {
                return false;
            }

            if ( ns.size() == 0 ) {
                errmsg = "no namespace specified";
                return false;
            }

            vector<BSONObj> bounds;
            if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) {
                return false;
            }

            if ( bounds.size() == 0 ) {
                errmsg = "no bounds were specified";
                return false;
            }

            if ( bounds.size() != 2 ) {
                errmsg = "only a min and max bound may be specified";
                return false;
            }

            BSONObj minKey = bounds[0];
            BSONObj maxKey = bounds[1];

            if ( minKey.isEmpty() ) {
                errmsg = "no min key specified";
                return false;
            }

            if ( maxKey.isEmpty() ) {
                errmsg = "no max key specified";
                return false;
            }

            //
            // This might be the first call from mongos, so we may need to pass the config and shard
            // information to initialize the shardingState.
            //

            string config;
            FieldParser::FieldState extracted = FieldParser::extract( cmdObj,
                                                                      configField,
                                                                      &config,
                                                                      &errmsg );
            if ( !extracted ) return false;
            if ( extracted != FieldParser::FIELD_NONE ) {
                ShardingState::initialize( config );
            }
            else if ( !shardingState.enabled() ) {
                errmsg =
                    "sharding state must be enabled or config server specified to merge chunks";
                return false;
            }

            // ShardName is optional, but might not be set yet
            string shardName;
            extracted = FieldParser::extract( cmdObj, shardNameField, &shardName, &errmsg );

            if ( !extracted ) return false;
            if ( extracted != FieldParser::FIELD_NONE ) {
                shardingState.gotShardName( shardName );
            }

            //
            // Epoch is optional, and if not set indicates we should use the latest epoch
            //

            OID epoch;
            if ( !FieldParser::extract( cmdObj, epochField, &epoch, &errmsg ) ) {
                return false;
            }

            return mergeChunks( NamespaceString( ns ), minKey, maxKey, epoch, true, &errmsg );
        }