BSONObj BalanceChunkRequest::serializeToRebalanceCommandForConfig(const ChunkType& chunk) { invariantOK(chunk.validate()); BSONObjBuilder cmdBuilder; cmdBuilder.append(kConfigSvrMoveChunk, 1); cmdBuilder.appendElements(chunk.toBSON()); return cmdBuilder.obj(); }
BSONObj buildOpMergeChunk(const ChunkType& mergedChunk) { BSONObjBuilder opB; // Op basics opB.append("op", "u"); opB.appendBool("b", false); // no upserting opB.append("ns", ChunkType::ConfigNS); // New object opB.append("o", mergedChunk.toBSON()); // Query object opB.append("o2", BSON(ChunkType::name(mergedChunk.getName()))); return opB.obj(); }
Status ChunkManager::createFirstChunks(OperationContext* txn, const ShardId& primaryShardId, const vector<BSONObj>* initPoints, const set<ShardId>* initShardIds) { // TODO distlock? // TODO: Race condition if we shard the collection and insert data while we split across // the non-primary shard. vector<BSONObj> splitPoints; vector<ShardId> shardIds; calcInitSplitsAndShards(txn, primaryShardId, initPoints, initShardIds, &splitPoints, &shardIds); // this is the first chunk; start the versioning from scratch ChunkVersion version(1, 0, OID::gen()); log() << "going to create " << splitPoints.size() + 1 << " chunk(s) for: " << _ns << " using new epoch " << version.epoch(); for (unsigned i = 0; i <= splitPoints.size(); i++) { BSONObj min = i == 0 ? _keyPattern.getKeyPattern().globalMin() : splitPoints[i - 1]; BSONObj max = i < splitPoints.size() ? splitPoints[i] : _keyPattern.getKeyPattern().globalMax(); ChunkType chunk; chunk.setName(Chunk::genID(_ns, min)); chunk.setNS(_ns); chunk.setMin(min); chunk.setMax(max); chunk.setShard(shardIds[i % shardIds.size()]); chunk.setVersion(version); Status status = grid.catalogManager(txn) ->insertConfigDocument(txn, ChunkType::ConfigNS, chunk.toBSON()); if (!status.isOK()) { const string errMsg = str::stream() << "Creating first chunks failed: " << status.reason(); error() << errMsg; return Status(status.code(), errMsg); } version.incMinor(); } _version = ChunkVersion(0, 0, version.epoch()); return Status::OK(); }
void CommitChunkMigrationRequest::appendAsCommand( BSONObjBuilder* builder, const NamespaceString& nss, const ShardId& fromShard, const ShardId& toShard, const ChunkType& migratedChunkType, const boost::optional<ChunkType>& controlChunkType) { invariant(builder->asTempObj().isEmpty()); invariant(nss.isValid()); builder->append(kConfigSvrCommitChunkMigration, nss.ns()); builder->append(kFromShard, fromShard.toString()); builder->append(kToShard, toShard.toString()); builder->append(kMigratedChunk, migratedChunkType.toBSON()); if (controlChunkType) { builder->append(kControlChunk, controlChunkType->toBSON()); } }
BSONObj buildMergeLogEntry( const OwnedPointerVector<ChunkType>& chunksToMerge, const ChunkVersion& currShardVersion, const ChunkVersion& newMergedVersion ) { BSONObjBuilder logDetailB; BSONArrayBuilder mergedB( logDetailB.subarrayStart( "merged" ) ); for ( OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin(); it != chunksToMerge.end(); ++it ) { ChunkType* chunkToMerge = *it; mergedB.append( chunkToMerge->toBSON() ); } mergedB.done(); currShardVersion.addToBSON( logDetailB, "prevShardVersion" ); newMergedVersion.addToBSON( logDetailB, "mergedVersion" ); return logDetailB.obj(); }
/** * Stores ranges for a particular collection and shard starting from some version */ void storeCollectionRanges( const NamespaceString& nss, const string& shardName, const vector<KeyRange>& ranges, const ChunkVersion& startVersion ) { // Get key pattern from first range ASSERT_GREATER_THAN( ranges.size(), 0u ); CollectionType coll; coll.setNS( nss.ns() ); coll.setKeyPattern( ranges.begin()->keyPattern ); coll.setEpoch( startVersion.epoch() ); coll.setUpdatedAt( 1ULL ); string errMsg; ASSERT( coll.isValid( &errMsg ) ); DBDirectClient client(&_txn); client.update( CollectionType::ConfigNS, BSON( CollectionType::ns( coll.getNS() ) ), coll.toBSON(), true, false ); ChunkVersion nextVersion = startVersion; for ( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) { ChunkType chunk; // TODO: We should not rely on the serialized ns, minkey being unique in the future, // causes problems since it links string serialization to correctness. chunk.setName( Chunk::genID( nss, it->minKey ) ); chunk.setShard( shardName ); chunk.setNS( nss.ns() ); chunk.setVersion( nextVersion ); chunk.setMin( it->minKey ); chunk.setMax( it->maxKey ); nextVersion.incMajor(); client.insert( ChunkType::ConfigNS, chunk.toBSON() ); } }
BSONObj BalanceChunkRequest::serializeToMoveCommandForConfig( const ChunkType& chunk, const ShardId& newShardId, int64_t maxChunkSizeBytes, const MigrationSecondaryThrottleOptions& secondaryThrottle, bool waitForDelete) { invariantOK(chunk.validate()); BSONObjBuilder cmdBuilder; cmdBuilder.append(kConfigSvrMoveChunk, 1); cmdBuilder.appendElements(chunk.toBSON()); cmdBuilder.append(kToShardId, newShardId); cmdBuilder.append(kMaxChunkSizeBytes, static_cast<long long>(maxChunkSizeBytes)); { BSONObjBuilder secondaryThrottleBuilder(cmdBuilder.subobjStart(kSecondaryThrottle)); secondaryThrottle.append(&secondaryThrottleBuilder); secondaryThrottleBuilder.doneFast(); } cmdBuilder.append(kWaitForDelete, waitForDelete); return cmdBuilder.obj(); }