MigrationStatuses MigrationManager::executeMigrationsForAutoBalance( OperationContext* txn, const vector<MigrateInfo>& migrateInfos, uint64_t maxChunkSizeBytes, const MigrationSecondaryThrottleOptions& secondaryThrottle, bool waitForDelete) { vector<std::pair<shared_ptr<Notification<Status>>, MigrateInfo>> responses; for (const auto& migrateInfo : migrateInfos) { responses.emplace_back(_schedule(txn, migrateInfo, false, // Config server takes the collection dist lock maxChunkSizeBytes, secondaryThrottle, waitForDelete), migrateInfo); } MigrationStatuses migrationStatuses; vector<MigrateInfo> rescheduledMigrations; // Wait for all the scheduled migrations to complete and note the ones, which failed with a // LockBusy error code. These need to be executed serially, without the distributed lock being // held by the config server for backwards compatibility with 3.2 shards. for (auto& response : responses) { auto notification = std::move(response.first); auto migrateInfo = std::move(response.second); Status responseStatus = notification->get(); if (responseStatus == ErrorCodes::LockBusy) { rescheduledMigrations.emplace_back(std::move(migrateInfo)); } else { migrationStatuses.emplace(migrateInfo.getName(), std::move(responseStatus)); } } // Schedule all 3.2 compatibility migrations sequentially for (const auto& migrateInfo : rescheduledMigrations) { Status responseStatus = _schedule(txn, migrateInfo, true, // Shard takes the collection dist lock maxChunkSizeBytes, secondaryThrottle, waitForDelete) ->get(); migrationStatuses.emplace(migrateInfo.getName(), std::move(responseStatus)); } invariant(migrationStatuses.size() == migrateInfos.size()); return migrationStatuses; }
MigrationStatuses MigrationManager::executeMigrationsForAutoBalance( OperationContext* txn, const vector<MigrateInfo>& migrateInfos, uint64_t maxChunkSizeBytes, const MigrationSecondaryThrottleOptions& secondaryThrottle, bool waitForDelete) { MigrationStatuses migrationStatuses; { std::map<MigrationIdentifier, ScopedMigrationRequest> scopedMigrationRequests; vector<std::pair<shared_ptr<Notification<RemoteCommandResponse>>, MigrateInfo>> responses; for (const auto& migrateInfo : migrateInfos) { // Write a document to the config.migrations collection, in case this migration must be // recovered by the Balancer. Fail if the chunk is already moving. auto statusWithScopedMigrationRequest = ScopedMigrationRequest::writeMigration(txn, migrateInfo, waitForDelete); if (!statusWithScopedMigrationRequest.isOK()) { migrationStatuses.emplace(migrateInfo.getName(), std::move(statusWithScopedMigrationRequest.getStatus())); continue; } scopedMigrationRequests.emplace(migrateInfo.getName(), std::move(statusWithScopedMigrationRequest.getValue())); responses.emplace_back( _schedule(txn, migrateInfo, maxChunkSizeBytes, secondaryThrottle, waitForDelete), migrateInfo); } // Wait for all the scheduled migrations to complete. for (auto& response : responses) { auto notification = std::move(response.first); auto migrateInfo = std::move(response.second); const auto& remoteCommandResponse = notification->get(); auto it = scopedMigrationRequests.find(migrateInfo.getName()); invariant(it != scopedMigrationRequests.end()); Status commandStatus = _processRemoteCommandResponse(remoteCommandResponse, &it->second); migrationStatuses.emplace(migrateInfo.getName(), std::move(commandStatus)); } } invariant(migrationStatuses.size() == migrateInfos.size()); return migrationStatuses; }