bool WriteBatchExecutor::applyUpdate( const string& ns, const BatchedUpdateDocument& updateOp, CurOp* currentOp, WriteStats* stats, BatchedErrorDetail* error ) { OpDebug& opDebug = currentOp->debug(); _opCounters->gotUpdate(); BSONObj queryObj = updateOp.getQuery(); BSONObj updateObj = updateOp.getUpdateExpr(); bool multi = updateOp.isMultiSet() ? updateOp.getMulti() : false; bool upsert = updateOp.isUpsertSet() ? updateOp.getUpsert() : false; currentOp->setQuery( queryObj ); opDebug.op = dbUpdate; opDebug.query = queryObj; bool resExisting = false; long long resNum = 0; OID resUpserted = OID(); try { const NamespaceString requestNs( ns ); UpdateRequest request( requestNs ); request.setQuery( queryObj ); request.setUpdates( updateObj ); request.setUpsert( upsert ); request.setMulti( multi ); request.setUpdateOpLog(); UpdateResult res = update( request, &opDebug ); resExisting = res.existing; resNum = res.numMatched; resUpserted = res.upserted; stats->numUpdated += !resUpserted.isSet() ? resNum : 0; stats->numUpserted += resUpserted.isSet() ? 1 : 0; } catch ( const UserException& ex ) { opDebug.exceptionInfo = ex.getInfo(); toBatchedError( ex, error ); return false; } _le->recordUpdate( resExisting, resNum, resUpserted ); return true; }
void ClientCursor::updateSlaveLocation(OperationContext* txn, CurOp& curop) { if (_slaveReadTill.isNull()) return; verify(str::startsWith(_ns.c_str(), "local.oplog.")); Client* c = curop.getClient(); verify(c); OID rid = c->getRemoteID(); if (!rid.isSet()) return; repl::getGlobalReplicationCoordinator()->setLastOptimeForSlave(rid, _slaveReadTill); }
void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ) { static mongo::mutex xxx( "WriteBackManager::queueWriteBack tmp" ); static OID lastOID; scoped_lock lk( xxx ); const BSONElement& e = o["id"]; if ( lastOID.isSet() ) { if ( e.OID() < lastOID ) { log() << "this could fail" << endl; printStackTrace(); } } lastOID = e.OID(); getWritebackQueue( remote )->queue.push( o ); }
void CollectionType::parseBSON(BSONObj source) { clear(); bool ok = true; ok &= FieldParser::extract(source, ns, "", &_ns); ok &= FieldParser::extract(source, primary, "", &_primary); ok &= FieldParser::extract(source, keyPattern, BSONObj(), &_keyPattern); ok &= FieldParser::extract(source, unique, false, &_unique); ok &= FieldParser::extract(source, createdAt, 0ULL, &_createdAt); ok &= FieldParser::extract(source, noBalance, false, &_noBalance); ok &= FieldParser::extract(source, epoch, OID(), &_epoch); if (! ok) { clear(); return; } // // backward compatibility // // 'createAt' used to be called 'lastmod' up to 2.2. Date_t lastmod; if (! FieldParser::extract(source, DEPRECATED_lastmod, 0ULL, &lastmod)) { clear(); return; } else if (lastmod != 0ULL) { _createdAt = lastmod; } // There was a flag to mark a collection as loggically dropped, up to 2.2. bool dropped; if (! FieldParser::extract(source, DEPRECATED_dropped, false, &dropped) || dropped) { clear(); return; } // 'lastmodEpoch' was a transition format to 'epoch', up to 2.2 OID lastmodEpoch; if (! FieldParser::extract(source, DEPRECATED_lastmodEpoch, OID(), &lastmodEpoch)) { clear(); return; } else if (lastmodEpoch.isSet()) { _epoch = lastmodEpoch; } }
bool mergeChunks( OperationContext* txn, const NamespaceString& nss, const BSONObj& minKey, const BSONObj& maxKey, const OID& epoch, string* errMsg ) { // // Get sharding state up-to-date // ConnectionString configLoc = ConnectionString::parse( shardingState.getConfigServer(), *errMsg ); if ( !configLoc.isValid() ){ warning() << *errMsg << endl; return false; } // // Get the distributed lock // ScopedDistributedLock collLock( configLoc, nss.ns() ); collLock.setLockMessage( stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey ); Status acquisitionStatus = collLock.tryAcquire(); if (!acquisitionStatus.isOK()) { *errMsg = stream() << "could not acquire collection lock for " << nss.ns() << " to merge chunks in [" << minKey << "," << maxKey << ")" << causedBy(acquisitionStatus); warning() << *errMsg << endl; return false; } // // We now have the collection lock, refresh metadata to latest version and sanity check // ChunkVersion shardVersion; Status status = shardingState.refreshMetadataNow(txn, nss.ns(), &shardVersion); if ( !status.isOK() ) { *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for " << nss.ns() << causedBy( status.reason() ); warning() << *errMsg << endl; return false; } if ( epoch.isSet() && shardVersion.epoch() != epoch ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has changed" << " since merge was sent" << "(sent epoch : " << epoch.toString() << ", current epoch : " << shardVersion.epoch().toString() << ")"; warning() << *errMsg << endl; return false; } CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( nss.ns() ); if ( !metadata || metadata->getKeyPattern().isEmpty() ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " is not sharded"; warning() << *errMsg << endl; return false; } dassert( metadata->getShardVersion().equals( shardVersion ) ); if ( !metadata->isValidKey( minKey ) || !metadata->isValidKey( maxKey ) ) { *errMsg = stream() << "could not merge chunks, the range " << rangeToString( minKey, maxKey ) << " is not valid" << " for collection " << nss.ns() << " with key pattern " << metadata->getKeyPattern(); warning() << *errMsg << endl; return false; } // // Get merged chunk information // ChunkVersion mergeVersion = metadata->getCollVersion(); mergeVersion.incMinor(); OwnedPointerVector<ChunkType> chunksToMerge; ChunkType itChunk; itChunk.setMin( minKey ); itChunk.setMax( minKey ); itChunk.setNS( nss.ns() ); itChunk.setShard( shardingState.getShardName() ); while ( itChunk.getMax().woCompare( maxKey ) < 0 && metadata->getNextChunk( itChunk.getMax(), &itChunk ) ) { auto_ptr<ChunkType> saved( new ChunkType ); itChunk.cloneTo( saved.get() ); chunksToMerge.mutableVector().push_back( saved.release() ); } if ( chunksToMerge.empty() ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " range starting at " << minKey << " and ending at " << maxKey << " does not belong to shard " << shardingState.getShardName(); warning() << *errMsg << endl; return false; } // // Validate the range starts and ends at chunks and has no holes, error if not valid // BSONObj firstDocMin = ( *chunksToMerge.begin() )->getMin(); BSONObj firstDocMax = ( *chunksToMerge.begin() )->getMax(); // minKey is inclusive bool minKeyInRange = rangeContains( firstDocMin, firstDocMax, minKey ); if ( !minKeyInRange ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " range starting at " << minKey << " does not belong to shard " << shardingState.getShardName(); warning() << *errMsg << endl; return false; } BSONObj lastDocMin = ( *chunksToMerge.rbegin() )->getMin(); BSONObj lastDocMax = ( *chunksToMerge.rbegin() )->getMax(); // maxKey is exclusive bool maxKeyInRange = lastDocMin.woCompare( maxKey ) < 0 && lastDocMax.woCompare( maxKey ) >= 0; if ( !maxKeyInRange ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " range ending at " << maxKey << " does not belong to shard " << shardingState.getShardName(); warning() << *errMsg << endl; return false; } bool validRangeStartKey = firstDocMin.woCompare( minKey ) == 0; bool validRangeEndKey = lastDocMax.woCompare( maxKey ) == 0; if ( !validRangeStartKey || !validRangeEndKey ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " does not contain a chunk " << ( !validRangeStartKey ? "starting at " + minKey.toString() : "" ) << ( !validRangeStartKey && !validRangeEndKey ? " or " : "" ) << ( !validRangeEndKey ? "ending at " + maxKey.toString() : "" ); warning() << *errMsg << endl; return false; } if ( chunksToMerge.size() == 1 ) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " already contains chunk for " << rangeToString( minKey, maxKey ); warning() << *errMsg << endl; return false; } bool holeInRange = false; // Look for hole in range ChunkType* prevChunk = *chunksToMerge.begin(); ChunkType* nextChunk = NULL; for ( OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin(); it != chunksToMerge.end(); ++it ) { if ( it == chunksToMerge.begin() ) continue; nextChunk = *it; if ( prevChunk->getMax().woCompare( nextChunk->getMin() ) != 0 ) { holeInRange = true; break; } prevChunk = nextChunk; } if ( holeInRange ) { dassert( NULL != nextChunk ); *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has a hole in the range " << rangeToString( minKey, maxKey ) << " at " << rangeToString( prevChunk->getMax(), nextChunk->getMin() ); warning() << *errMsg << endl; return false; } // // Run apply ops command // BSONObj applyOpsCmd = buildApplyOpsCmd( chunksToMerge, shardVersion, mergeVersion ); bool ok; BSONObj result; try { ScopedDbConnection conn( configLoc, 30.0 ); ok = conn->runCommand( "config", applyOpsCmd, result ); if ( !ok ) *errMsg = result.toString(); conn.done(); } catch( const DBException& ex ) { ok = false; *errMsg = ex.toString(); } if ( !ok ) { *errMsg = stream() << "could not merge chunks for " << nss.ns() << ", writing to config failed" << causedBy( errMsg ); warning() << *errMsg << endl; return false; } // // Install merged chunk metadata // { Lock::DBLock writeLk(txn->lockState(), nss.db(), newlm::MODE_X); shardingState.mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion); } // // Log change // BSONObj mergeLogEntry = buildMergeLogEntry( chunksToMerge, shardVersion, mergeVersion ); configServer.logChange( "merge", nss.ns(), mergeLogEntry ); return true; }
/** * Upgrade v3 to v4 described here. * * This upgrade takes a config server without collection epochs (potentially) and adds * epochs to all mongo processes. * */ bool doUpgradeV3ToV4(const ConnectionString& configLoc, const VersionType& lastVersionInfo, string* errMsg) { string dummy; if (!errMsg) errMsg = &dummy; verify(lastVersionInfo.getCurrentVersion() == UpgradeHistory_NoEpochVersion); if (lastVersionInfo.isUpgradeIdSet() && lastVersionInfo.getUpgradeId().isSet()) { // // Another upgrade failed, so cleanup may be necessary // BSONObj lastUpgradeState = lastVersionInfo.getUpgradeState(); bool inCriticalSection; if (!FieldParser::extract(lastUpgradeState, inCriticalSectionField, &inCriticalSection, errMsg)) { *errMsg = stream() << "problem reading previous upgrade state" << causedBy(errMsg); return false; } if (inCriticalSection) { // Manual intervention is needed here. Somehow our upgrade didn't get applied // consistently across config servers. *errMsg = cannotCleanupMessage; return false; } if (!_cleanupUpgradeState(configLoc, lastVersionInfo.getUpgradeId(), errMsg)) { // If we can't cleanup the old upgrade state, the user might have done it for us, // not a fatal problem (we'll just end up with extra collections). warning() << "could not cleanup previous upgrade state" << causedBy(errMsg) << endl; *errMsg = ""; } } // // Check the versions of other mongo processes in the cluster before upgrade. // We can't upgrade if there are active pre-v2.2 processes in the cluster // Status mongoVersionStatus = checkClusterMongoVersions(configLoc, string(minMongoProcessVersion)); if (!mongoVersionStatus.isOK()) { *errMsg = stream() << "cannot upgrade with pre-v" << minMongoProcessVersion << " mongo processes active in the cluster" << causedBy(mongoVersionStatus); return false; } VersionType newVersionInfo; lastVersionInfo.cloneTo(&newVersionInfo); // Set our upgrade id and state OID upgradeId = OID::gen(); newVersionInfo.setUpgradeId(upgradeId); newVersionInfo.setUpgradeState(BSONObj()); // Write our upgrade id and state { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; verify(newVersionInfo.isValid(NULL)); conn->update(VersionType::ConfigNS, BSON("_id" << 1 << VersionType::version_DEPRECATED(3)), newVersionInfo.toBSON()); _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not initialize version info for upgrade" << causedBy(e); return false; } connPtr->done(); } // // First lock all collection namespaces that exist // OwnedPointerMap<string, CollectionType> ownedCollections; const map<string, CollectionType*>& collections = ownedCollections.map(); Status findCollectionsStatus = findAllCollectionsV3(configLoc, &ownedCollections); if (!findCollectionsStatus.isOK()) { *errMsg = stream() << "could not read collections from config server" << causedBy(findCollectionsStatus); return false; } // // Acquire locks for all sharded collections // Something that didn't involve getting thousands of locks would be better. // OwnedPointerVector<ScopedDistributedLock> collectionLocks; log() << "acquiring locks for " << collections.size() << " sharded collections..." << endl; // WARNING - this string is used programmatically when forcing locks, be careful when // changing! // TODO: Add programmatic "why" field to lock collection string lockMessage = str::stream() << "ensuring epochs for config upgrade" << " (" << upgradeId.toString() << ")"; if (!_acquireAllCollectionLocks(configLoc, collections, lockMessage, 20 * 60 * 1000, &collectionLocks, errMsg)) { *errMsg = stream() << "could not acquire all namespace locks for upgrade" << " (" << upgradeId.toString() << ")" << causedBy(errMsg); return false; } // We are now preventing all splits and migrates for all sharded collections // Get working and backup suffixes string workingSuffix = genWorkingSuffix(upgradeId); string backupSuffix = genBackupSuffix(upgradeId); log() << "copying collection and chunk metadata to working and backup collections..." << endl; // Get a backup and working copy of the config.collections and config.chunks collections Status copyStatus = copyFrozenCollection(configLoc, CollectionType::ConfigNS, CollectionType::ConfigNS + workingSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << CollectionType::ConfigNS << " to " << (CollectionType::ConfigNS + workingSuffix) << causedBy(copyStatus); return false; } copyStatus = copyFrozenCollection(configLoc, CollectionType::ConfigNS, CollectionType::ConfigNS + backupSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << CollectionType::ConfigNS << " to " << (CollectionType::ConfigNS + backupSuffix) << causedBy(copyStatus); return false; } copyStatus = copyFrozenCollection(configLoc, ChunkType::ConfigNS, ChunkType::ConfigNS + workingSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to " << (ChunkType::ConfigNS + workingSuffix) << causedBy(copyStatus); return false; } copyStatus = copyFrozenCollection(configLoc, ChunkType::ConfigNS, ChunkType::ConfigNS + backupSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to " << (ChunkType::ConfigNS + backupSuffix) << causedBy(copyStatus); return false; } // // Go through sharded collections one-by-one and add epochs where missing // for (map<string, CollectionType*>::const_iterator it = collections.begin(); it != collections.end(); ++it) { // Create a copy so that we can change the epoch later CollectionType collection; it->second->cloneTo(&collection); log() << "checking epochs for " << collection.getNS() << " collection..." << endl; OID epoch = collection.getEpoch(); // // Go through chunks to find epoch if we haven't found it or to verify epoch is the same // OwnedPointerVector<ChunkType> ownedChunks; const vector<ChunkType*>& chunks = ownedChunks.vector(); Status findChunksStatus = findAllChunks(configLoc, collection.getNS(), &ownedChunks); if (!findChunksStatus.isOK()) { *errMsg = stream() << "could not read chunks from config server" << causedBy(findChunksStatus); return false; } for (vector<ChunkType*>::const_iterator chunkIt = chunks.begin(); chunkIt != chunks.end(); ++chunkIt) { const ChunkType& chunk = *(*chunkIt); // If our chunk epoch is set and doesn't match if (epoch.isSet() && chunk.getVersion().epoch().isSet() && chunk.getVersion().epoch() != epoch) { *errMsg = stream() << "chunk epoch for " << chunk.toString() << " in " << collection.getNS() << " does not match found epoch " << epoch; return false; } else if (!epoch.isSet() && chunk.getVersion().epoch().isSet()) { epoch = chunk.getVersion().epoch(); } } // // Write collection epoch if needed // if (!collection.getEpoch().isSet()) { OID newEpoch = OID::gen(); log() << "writing new epoch " << newEpoch << " for " << collection.getNS() << " collection..." << endl; scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; conn->update(CollectionType::ConfigNS + workingSuffix, BSON(CollectionType::ns(collection.getNS())), BSON("$set" << BSON(CollectionType::DEPRECATED_lastmodEpoch(newEpoch)))); _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not write a new epoch for " << collection.getNS() << causedBy(e); return false; } connPtr->done(); collection.setEpoch(newEpoch); } epoch = collection.getEpoch(); verify(epoch.isSet()); // // Now write verified epoch to all chunks // log() << "writing epoch " << epoch << " for " << chunks.size() << " chunks in " << collection.getNS() << " collection..." << endl; { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; // Multi-update of all chunks conn->update(ChunkType::ConfigNS + workingSuffix, BSON(ChunkType::ns(collection.getNS())), BSON("$set" << BSON(ChunkType::DEPRECATED_epoch(epoch))), false, true); // multi _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not write a new epoch " << epoch.toString() << " for chunks in " << collection.getNS() << causedBy(e); return false; } connPtr->done(); } } // // Paranoid verify the collection writes // { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; // Find collections with no epochs BSONObj emptyDoc = conn->findOne(CollectionType::ConfigNS + workingSuffix, BSON("$unset" << BSON(CollectionType::DEPRECATED_lastmodEpoch() << 1))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "collection " << emptyDoc << " is still missing epoch after config upgrade"; connPtr->done(); return false; } // Find collections with empty epochs emptyDoc = conn->findOne(CollectionType::ConfigNS + workingSuffix, BSON(CollectionType::DEPRECATED_lastmodEpoch(OID()))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "collection " << emptyDoc << " still has empty epoch after config upgrade"; connPtr->done(); return false; } // Find chunks with no epochs emptyDoc = conn->findOne(ChunkType::ConfigNS + workingSuffix, BSON("$unset" << BSON(ChunkType::DEPRECATED_epoch() << 1))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "chunk " << emptyDoc << " is still missing epoch after config upgrade"; connPtr->done(); return false; } // Find chunks with empty epochs emptyDoc = conn->findOne(ChunkType::ConfigNS + workingSuffix, BSON(ChunkType::DEPRECATED_epoch(OID()))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "chunk " << emptyDoc << " still has empty epoch after config upgrade"; connPtr->done(); return false; } } catch (const DBException& e) { *errMsg = stream() << "could not verify epoch writes" << causedBy(e); return false; } connPtr->done(); } // // Double check that our collections haven't changed // Status idCheckStatus = checkIdsTheSame(configLoc, CollectionType::ConfigNS, CollectionType::ConfigNS + workingSuffix); if (!idCheckStatus.isOK()) { *errMsg = stream() << CollectionType::ConfigNS << " was modified while working on upgrade" << causedBy(idCheckStatus); return false; } idCheckStatus = checkIdsTheSame(configLoc, ChunkType::ConfigNS, ChunkType::ConfigNS + workingSuffix); if (!idCheckStatus.isOK()) { *errMsg = stream() << ChunkType::ConfigNS << " was modified while working on upgrade" << causedBy(idCheckStatus); return false; } // // ENTER CRITICAL SECTION // newVersionInfo.setUpgradeState(BSON(inCriticalSectionField(true))); { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; verify(newVersionInfo.isValid(NULL)); conn->update(VersionType::ConfigNS, BSON("_id" << 1 << VersionType::version_DEPRECATED(3)), newVersionInfo.toBSON()); _checkGLE(conn); } catch (const DBException& e) { // No cleanup message here since we're not sure if we wrote or not, and // not dangerous either way except to prevent further updates (at which point // the message is printed) *errMsg = stream() << "could not update version info to enter critical update section" << causedBy(e); return false; } // AT THIS POINT ANY FAILURE REQUIRES MANUAL INTERVENTION! connPtr->done(); } log() << "entered critical section for config upgrade" << endl; Status overwriteStatus = overwriteCollection(configLoc, CollectionType::ConfigNS + workingSuffix, CollectionType::ConfigNS); if (!overwriteStatus.isOK()) { error() << cleanupMessage << endl; *errMsg = stream() << "could not overwrite collection " << CollectionType::ConfigNS << " with working collection " << (CollectionType::ConfigNS + workingSuffix) << causedBy(overwriteStatus); return false; } overwriteStatus = overwriteCollection(configLoc, ChunkType::ConfigNS + workingSuffix, ChunkType::ConfigNS); if (!overwriteStatus.isOK()) { error() << cleanupMessage << endl; *errMsg = stream() << "could not overwrite collection " << ChunkType::ConfigNS << " with working collection " << (ChunkType::ConfigNS + workingSuffix) << causedBy(overwriteStatus); return false; } // // Finally update the version to latest and add clusterId to version // OID newClusterId = OID::gen(); // Note: hardcoded versions, since this is a very particular upgrade // Note: DO NOT CLEAR the config version unless bumping the minCompatibleVersion, // we want to save the excludes that were set. newVersionInfo.setMinCompatibleVersion(UpgradeHistory_NoEpochVersion); newVersionInfo.setCurrentVersion(UpgradeHistory_MandatoryEpochVersion); newVersionInfo.setClusterId(newClusterId); // Leave critical section newVersionInfo.unsetUpgradeId(); newVersionInfo.unsetUpgradeState(); log() << "writing new version info and clusterId " << newClusterId << "..." << endl; { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; verify(newVersionInfo.isValid(NULL)); conn->update(VersionType::ConfigNS, BSON("_id" << 1 << VersionType::version_DEPRECATED(UpgradeHistory_NoEpochVersion)), newVersionInfo.toBSON()); _checkGLE(conn); } catch (const DBException& e) { error() << cleanupMessage << endl; *errMsg = stream() << "could not write new version info " << "and exit critical upgrade section" << causedBy(e); return false; } connPtr->done(); } // // END CRITICAL SECTION // return true; }
void LocksType::setLockID(const OID& lockID) { invariant(lockID.isSet()); _lockID = lockID; }
bool mergeChunks(OperationContext* txn, const NamespaceString& nss, const BSONObj& minKey, const BSONObj& maxKey, const OID& epoch, string* errMsg) { // Get the distributed lock string whyMessage = stream() << "merging chunks in " << nss.ns() << " from " << minKey << " to " << maxKey; auto scopedDistLock = grid.catalogManager(txn)->distLock( txn, nss.ns(), whyMessage, DistLockManager::kSingleLockAttemptTimeout); if (!scopedDistLock.isOK()) { *errMsg = stream() << "could not acquire collection lock for " << nss.ns() << " to merge chunks in [" << minKey << "," << maxKey << ")" << causedBy(scopedDistLock.getStatus()); warning() << *errMsg; return false; } ShardingState* shardingState = ShardingState::get(txn); // // We now have the collection lock, refresh metadata to latest version and sanity check // ChunkVersion shardVersion; Status status = shardingState->refreshMetadataNow(txn, nss.ns(), &shardVersion); if (!status.isOK()) { *errMsg = str::stream() << "could not merge chunks, failed to refresh metadata for " << nss.ns() << causedBy(status.reason()); warning() << *errMsg; return false; } if (epoch.isSet() && shardVersion.epoch() != epoch) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has changed" << " since merge was sent" << "(sent epoch : " << epoch.toString() << ", current epoch : " << shardVersion.epoch().toString() << ")"; warning() << *errMsg; return false; } shared_ptr<CollectionMetadata> metadata = shardingState->getCollectionMetadata(nss.ns()); if (!metadata || metadata->getKeyPattern().isEmpty()) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " is not sharded"; warning() << *errMsg; return false; } dassert(metadata->getShardVersion().equals(shardVersion)); if (!metadata->isValidKey(minKey) || !metadata->isValidKey(maxKey)) { *errMsg = stream() << "could not merge chunks, the range " << rangeToString(minKey, maxKey) << " is not valid" << " for collection " << nss.ns() << " with key pattern " << metadata->getKeyPattern(); warning() << *errMsg; return false; } // // Get merged chunk information // ChunkVersion mergeVersion = metadata->getCollVersion(); mergeVersion.incMinor(); std::vector<ChunkType> chunksToMerge; ChunkType itChunk; itChunk.setMin(minKey); itChunk.setMax(minKey); itChunk.setNS(nss.ns()); itChunk.setShard(shardingState->getShardName()); while (itChunk.getMax().woCompare(maxKey) < 0 && metadata->getNextChunk(itChunk.getMax(), &itChunk)) { chunksToMerge.push_back(itChunk); } if (chunksToMerge.empty()) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " range starting at " << minKey << " and ending at " << maxKey << " does not belong to shard " << shardingState->getShardName(); warning() << *errMsg; return false; } // // Validate the range starts and ends at chunks and has no holes, error if not valid // BSONObj firstDocMin = chunksToMerge.front().getMin(); BSONObj firstDocMax = chunksToMerge.front().getMax(); // minKey is inclusive bool minKeyInRange = rangeContains(firstDocMin, firstDocMax, minKey); if (!minKeyInRange) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " range starting at " << minKey << " does not belong to shard " << shardingState->getShardName(); warning() << *errMsg; return false; } BSONObj lastDocMin = chunksToMerge.back().getMin(); BSONObj lastDocMax = chunksToMerge.back().getMax(); // maxKey is exclusive bool maxKeyInRange = lastDocMin.woCompare(maxKey) < 0 && lastDocMax.woCompare(maxKey) >= 0; if (!maxKeyInRange) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " range ending at " << maxKey << " does not belong to shard " << shardingState->getShardName(); warning() << *errMsg; return false; } bool validRangeStartKey = firstDocMin.woCompare(minKey) == 0; bool validRangeEndKey = lastDocMax.woCompare(maxKey) == 0; if (!validRangeStartKey || !validRangeEndKey) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " does not contain a chunk " << (!validRangeStartKey ? "starting at " + minKey.toString() : "") << (!validRangeStartKey && !validRangeEndKey ? " or " : "") << (!validRangeEndKey ? "ending at " + maxKey.toString() : ""); warning() << *errMsg; return false; } if (chunksToMerge.size() == 1) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " already contains chunk for " << rangeToString(minKey, maxKey); warning() << *errMsg; return false; } // Look for hole in range for (size_t i = 1; i < chunksToMerge.size(); ++i) { if (chunksToMerge[i - 1].getMax().woCompare(chunksToMerge[i].getMin()) != 0) { *errMsg = stream() << "could not merge chunks, collection " << nss.ns() << " has a hole in the range " << rangeToString(minKey, maxKey) << " at " << rangeToString(chunksToMerge[i - 1].getMax(), chunksToMerge[i].getMin()); warning() << *errMsg; return false; } } // // Run apply ops command // Status applyOpsStatus = runApplyOpsCmd(txn, chunksToMerge, shardVersion, mergeVersion); if (!applyOpsStatus.isOK()) { warning() << applyOpsStatus; return false; } // // Install merged chunk metadata // { ScopedTransaction transaction(txn, MODE_IX); Lock::DBLock writeLk(txn->lockState(), nss.db(), MODE_IX); Lock::CollectionLock collLock(txn->lockState(), nss.ns(), MODE_X); shardingState->mergeChunks(txn, nss.ns(), minKey, maxKey, mergeVersion); } // // Log change // BSONObj mergeLogEntry = buildMergeLogEntry(chunksToMerge, shardVersion, mergeVersion); grid.catalogManager(txn)->logChange(txn, "merge", nss.ns(), mergeLogEntry); return true; }
void ShardCollectionType::setEpoch(const OID& epoch) { invariant(epoch.isSet()); _epoch = epoch; }
void WriteBackListener::run(){ OID lastID; lastID.clear(); int secsToSleep = 0; while ( ! inShutdown() && Shard::isMember( _addr ) ){ if ( lastID.isSet() ){ scoped_lock lk( _seenWritebacksLock ); _seenWritebacks.insert( lastID ); lastID.clear(); } try { ScopedDbConnection conn( _addr ); BSONObj result; { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ){ log() << "writebacklisten command failed! " << result << endl; conn.done(); continue; } } log(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ){ string ns = data["ns"].valuestrsafe(); { BSONElement e = data["id"]; if ( e.type() == jstOID ) lastID = e.OID(); } int len; Message m( (void*)data["msg"].binData( len ) , false ); massert( 10427 , "invalid writeback message" , m.header()->valid() ); DBConfigPtr db = grid.getDBConfig( ns ); ShardChunkVersion needVersion( data["version"] ); log(1) << "writeback id: " << lastID << " needVersion : " << needVersion.toString() << " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3) if ( logLevel ) log(1) << debugString( m ) << endl; if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ){ // this means when the write went originally, the version was old // if we're here, it means we've already updated the config, so don't need to do again //db->getChunkManager( ns , true ); // SERVER-1349 } else { // we received a writeback object that was sent to a previous version of a shard // the actual shard may not have the object the writeback operation is for // we need to reload the chunk manager and get the new shard versions db->getChunkManager( ns , true ); } Request r( m , 0 ); r.init(); r.process(); } else if ( result["noop"].trueValue() ){ // no-op } else { log() << "unknown writeBack result: " << result << endl; } conn.done(); secsToSleep = 0; continue; } catch ( std::exception e ){ if ( inShutdown() ){ // we're shutting down, so just clean up return; } log() << "WriteBackListener exception : " << e.what() << endl; // It's possible this shard was removed Shard::reloadShardInfo(); } catch ( ... ){ log() << "WriteBackListener uncaught exception!" << endl; } secsToSleep++; sleepsecs(secsToSleep); if ( secsToSleep > 10 ) secsToSleep = 0; } log() << "WriteBackListener exiting : address no longer in cluster " << _addr; }