void ShardingTestFixture::expectUpdateCollection(const HostAndPort& expectedHost, const CollectionType& coll) { onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQUALS(expectedHost, request.target); ASSERT_EQUALS(BSON(rpc::kReplSetMetadataFieldName << 1), request.metadata); ASSERT_EQUALS("config", request.dbname); BatchedUpdateRequest actualBatchedUpdate; std::string errmsg; ASSERT_TRUE(actualBatchedUpdate.parseBSON(request.dbname, request.cmdObj, &errmsg)); ASSERT_EQUALS(CollectionType::ConfigNS, actualBatchedUpdate.getNS().ns()); auto updates = actualBatchedUpdate.getUpdates(); ASSERT_EQUALS(1U, updates.size()); auto update = updates.front(); ASSERT_TRUE(update->getUpsert()); ASSERT_FALSE(update->getMulti()); ASSERT_EQUALS(update->getQuery(), BSON(CollectionType::fullNs(coll.getNs().toString()))); ASSERT_EQUALS(update->getUpdateExpr(), coll.toBSON()); BatchedCommandResponse response; response.setOk(true); response.setNModified(1); return response.toBSON(); }); }
ChunkManager::ChunkManager(const CollectionType& coll) : _ns(coll.getNs().ns()), _keyPattern(coll.getKeyPattern()), _unique(coll.getUnique()), _sequenceNumber(NextSequenceNumber.addAndFetch(1)), _chunkRanges() { _version = ChunkVersion::fromBSON(coll.toBSON()); }
Status ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection( OperationContext* opCtx, const NamespaceString& nss, const CollectionType& coll, const bool upsert) { fassert(28634, coll.validate()); auto status = _updateConfigDocument(opCtx, CollectionType::ConfigNS, BSON(CollectionType::fullNs(nss.ns())), coll.toBSON(), upsert, ShardingCatalogClient::kMajorityWriteConcern); return status.getStatus().withContext(str::stream() << "Collection metadata write failed"); }
Status CatalogManager::updateCollection(const std::string& collNs, const CollectionType& coll) { fassert(28634, coll.validate()); BatchedCommandResponse response; Status status = update(CollectionType::ConfigNS, BSON(CollectionType::fullNs(collNs)), coll.toBSON(), true, // upsert false, // multi &response); if (!status.isOK()) { return Status(status.code(), str::stream() << "collection metadata write failed: " << response.toBSON() << "; status: " << status.toString()); } return Status::OK(); }
/** * Stores ranges for a particular collection and shard starting from some version */ void storeCollectionRanges( const NamespaceString& nss, const string& shardName, const vector<KeyRange>& ranges, const ChunkVersion& startVersion ) { // Get key pattern from first range ASSERT_GREATER_THAN( ranges.size(), 0u ); CollectionType coll; coll.setNS( nss.ns() ); coll.setKeyPattern( ranges.begin()->keyPattern ); coll.setEpoch( startVersion.epoch() ); coll.setUpdatedAt( 1ULL ); string errMsg; ASSERT( coll.isValid( &errMsg ) ); DBDirectClient client(&_txn); client.update( CollectionType::ConfigNS, BSON( CollectionType::ns( coll.getNS() ) ), coll.toBSON(), true, false ); ChunkVersion nextVersion = startVersion; for ( vector<KeyRange>::const_iterator it = ranges.begin(); it != ranges.end(); ++it ) { ChunkType chunk; // TODO: We should not rely on the serialized ns, minkey being unique in the future, // causes problems since it links string serialization to correctness. chunk.setName( Chunk::genID( nss, it->minKey ) ); chunk.setShard( shardName ); chunk.setNS( nss.ns() ); chunk.setVersion( nextVersion ); chunk.setMin( it->minKey ); chunk.setMax( it->maxKey ); nextVersion.incMajor(); client.insert( ChunkType::ConfigNS, chunk.toBSON() ); } }