ChunkManager::ChunkManager(const CollectionType& coll) : _ns(coll.getNs().ns()), _keyPattern(coll.getKeyPattern()), _unique(coll.getUnique()), _sequenceNumber(NextSequenceNumber.addAndFetch(1)), _chunkRanges() { // coll does not have correct version. Use same initial version as _load and createFirstChunks. _version = ChunkVersion(0, 0, coll.getEpoch()); }
Status _findAllCollections(const ConnectionString& configLoc, bool optionalEpochs, OwnedPointerMap<string, CollectionType>* collections) { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; scoped_ptr<DBClientCursor> cursor(_safeCursor(conn->query(CollectionType::ConfigNS, Query()))); while (cursor->more()) { BSONObj collDoc = cursor->nextSafe(); CollectionType* coll = new CollectionType(); string errMsg; coll->parseBSON(collDoc, &errMsg); // Needed for the v3 to v4 upgrade bool epochNotSet = !coll->isEpochSet() || !coll->getEpoch().isSet(); if (optionalEpochs && epochNotSet) { // Set our epoch to something here, just to allow coll->setEpoch(OID::gen()); } if (errMsg != "" || !coll->isValid(&errMsg)) { return Status(ErrorCodes::UnsupportedFormat, stream() << "invalid collection " << collDoc << " read from the config server" << causedBy(errMsg)); } if (coll->isDroppedSet() && coll->getDropped()) { continue; } if (optionalEpochs && epochNotSet) { coll->setEpoch(OID()); } collections->mutableMap().insert(make_pair(coll->getNS(), coll)); } } catch (const DBException& e) { return e.toStatus(); } connPtr->done(); return Status::OK(); }
Status MetadataLoader::_initCollection(CatalogManager* catalogManager, const string& ns, const string& shard, CollectionMetadata* metadata) const { auto coll = catalogManager->getCollection(ns); if (!coll.isOK()) { return coll.getStatus(); } CollectionType collInfo = coll.getValue(); if (collInfo.getDropped()) { return Status(ErrorCodes::NamespaceNotFound, str::stream() << "could not load metadata, collection " << ns << " was dropped"); } metadata->_keyPattern = collInfo.getKeyPattern().toBSON(); metadata->fillKeyPatternFields(); metadata->_shardVersion = ChunkVersion(0, 0, collInfo.getEpoch()); metadata->_collVersion = ChunkVersion(0, 0, collInfo.getEpoch()); return Status::OK(); }
/** * Upgrade v3 to v4 described here. * * This upgrade takes a config server without collection epochs (potentially) and adds * epochs to all mongo processes. * */ bool doUpgradeV3ToV4(const ConnectionString& configLoc, const VersionType& lastVersionInfo, string* errMsg) { string dummy; if (!errMsg) errMsg = &dummy; verify(lastVersionInfo.getCurrentVersion() == UpgradeHistory_NoEpochVersion); if (lastVersionInfo.isUpgradeIdSet() && lastVersionInfo.getUpgradeId().isSet()) { // // Another upgrade failed, so cleanup may be necessary // BSONObj lastUpgradeState = lastVersionInfo.getUpgradeState(); bool inCriticalSection; if (!FieldParser::extract(lastUpgradeState, inCriticalSectionField, &inCriticalSection, errMsg)) { *errMsg = stream() << "problem reading previous upgrade state" << causedBy(errMsg); return false; } if (inCriticalSection) { // Manual intervention is needed here. Somehow our upgrade didn't get applied // consistently across config servers. *errMsg = cannotCleanupMessage; return false; } if (!_cleanupUpgradeState(configLoc, lastVersionInfo.getUpgradeId(), errMsg)) { // If we can't cleanup the old upgrade state, the user might have done it for us, // not a fatal problem (we'll just end up with extra collections). warning() << "could not cleanup previous upgrade state" << causedBy(errMsg) << endl; *errMsg = ""; } } // // Check the versions of other mongo processes in the cluster before upgrade. // We can't upgrade if there are active pre-v2.2 processes in the cluster // Status mongoVersionStatus = checkClusterMongoVersions(configLoc, string(minMongoProcessVersion)); if (!mongoVersionStatus.isOK()) { *errMsg = stream() << "cannot upgrade with pre-v" << minMongoProcessVersion << " mongo processes active in the cluster" << causedBy(mongoVersionStatus); return false; } VersionType newVersionInfo; lastVersionInfo.cloneTo(&newVersionInfo); // Set our upgrade id and state OID upgradeId = OID::gen(); newVersionInfo.setUpgradeId(upgradeId); newVersionInfo.setUpgradeState(BSONObj()); // Write our upgrade id and state { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; verify(newVersionInfo.isValid(NULL)); conn->update(VersionType::ConfigNS, BSON("_id" << 1 << VersionType::version_DEPRECATED(3)), newVersionInfo.toBSON()); _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not initialize version info for upgrade" << causedBy(e); return false; } connPtr->done(); } // // First lock all collection namespaces that exist // OwnedPointerMap<string, CollectionType> ownedCollections; const map<string, CollectionType*>& collections = ownedCollections.map(); Status findCollectionsStatus = findAllCollectionsV3(configLoc, &ownedCollections); if (!findCollectionsStatus.isOK()) { *errMsg = stream() << "could not read collections from config server" << causedBy(findCollectionsStatus); return false; } // // Acquire locks for all sharded collections // Something that didn't involve getting thousands of locks would be better. // OwnedPointerVector<ScopedDistributedLock> collectionLocks; log() << "acquiring locks for " << collections.size() << " sharded collections..." << endl; // WARNING - this string is used programmatically when forcing locks, be careful when // changing! // TODO: Add programmatic "why" field to lock collection string lockMessage = str::stream() << "ensuring epochs for config upgrade" << " (" << upgradeId.toString() << ")"; if (!_acquireAllCollectionLocks(configLoc, collections, lockMessage, 20 * 60 * 1000, &collectionLocks, errMsg)) { *errMsg = stream() << "could not acquire all namespace locks for upgrade" << " (" << upgradeId.toString() << ")" << causedBy(errMsg); return false; } // We are now preventing all splits and migrates for all sharded collections // Get working and backup suffixes string workingSuffix = genWorkingSuffix(upgradeId); string backupSuffix = genBackupSuffix(upgradeId); log() << "copying collection and chunk metadata to working and backup collections..." << endl; // Get a backup and working copy of the config.collections and config.chunks collections Status copyStatus = copyFrozenCollection(configLoc, CollectionType::ConfigNS, CollectionType::ConfigNS + workingSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << CollectionType::ConfigNS << " to " << (CollectionType::ConfigNS + workingSuffix) << causedBy(copyStatus); return false; } copyStatus = copyFrozenCollection(configLoc, CollectionType::ConfigNS, CollectionType::ConfigNS + backupSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << CollectionType::ConfigNS << " to " << (CollectionType::ConfigNS + backupSuffix) << causedBy(copyStatus); return false; } copyStatus = copyFrozenCollection(configLoc, ChunkType::ConfigNS, ChunkType::ConfigNS + workingSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to " << (ChunkType::ConfigNS + workingSuffix) << causedBy(copyStatus); return false; } copyStatus = copyFrozenCollection(configLoc, ChunkType::ConfigNS, ChunkType::ConfigNS + backupSuffix); if (!copyStatus.isOK()) { *errMsg = stream() << "could not copy " << ChunkType::ConfigNS << " to " << (ChunkType::ConfigNS + backupSuffix) << causedBy(copyStatus); return false; } // // Go through sharded collections one-by-one and add epochs where missing // for (map<string, CollectionType*>::const_iterator it = collections.begin(); it != collections.end(); ++it) { // Create a copy so that we can change the epoch later CollectionType collection; it->second->cloneTo(&collection); log() << "checking epochs for " << collection.getNS() << " collection..." << endl; OID epoch = collection.getEpoch(); // // Go through chunks to find epoch if we haven't found it or to verify epoch is the same // OwnedPointerVector<ChunkType> ownedChunks; const vector<ChunkType*>& chunks = ownedChunks.vector(); Status findChunksStatus = findAllChunks(configLoc, collection.getNS(), &ownedChunks); if (!findChunksStatus.isOK()) { *errMsg = stream() << "could not read chunks from config server" << causedBy(findChunksStatus); return false; } for (vector<ChunkType*>::const_iterator chunkIt = chunks.begin(); chunkIt != chunks.end(); ++chunkIt) { const ChunkType& chunk = *(*chunkIt); // If our chunk epoch is set and doesn't match if (epoch.isSet() && chunk.getVersion().epoch().isSet() && chunk.getVersion().epoch() != epoch) { *errMsg = stream() << "chunk epoch for " << chunk.toString() << " in " << collection.getNS() << " does not match found epoch " << epoch; return false; } else if (!epoch.isSet() && chunk.getVersion().epoch().isSet()) { epoch = chunk.getVersion().epoch(); } } // // Write collection epoch if needed // if (!collection.getEpoch().isSet()) { OID newEpoch = OID::gen(); log() << "writing new epoch " << newEpoch << " for " << collection.getNS() << " collection..." << endl; scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; conn->update(CollectionType::ConfigNS + workingSuffix, BSON(CollectionType::ns(collection.getNS())), BSON("$set" << BSON(CollectionType::DEPRECATED_lastmodEpoch(newEpoch)))); _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not write a new epoch for " << collection.getNS() << causedBy(e); return false; } connPtr->done(); collection.setEpoch(newEpoch); } epoch = collection.getEpoch(); verify(epoch.isSet()); // // Now write verified epoch to all chunks // log() << "writing epoch " << epoch << " for " << chunks.size() << " chunks in " << collection.getNS() << " collection..." << endl; { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; // Multi-update of all chunks conn->update(ChunkType::ConfigNS + workingSuffix, BSON(ChunkType::ns(collection.getNS())), BSON("$set" << BSON(ChunkType::DEPRECATED_epoch(epoch))), false, true); // multi _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not write a new epoch " << epoch.toString() << " for chunks in " << collection.getNS() << causedBy(e); return false; } connPtr->done(); } } // // Paranoid verify the collection writes // { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; // Find collections with no epochs BSONObj emptyDoc = conn->findOne(CollectionType::ConfigNS + workingSuffix, BSON("$unset" << BSON(CollectionType::DEPRECATED_lastmodEpoch() << 1))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "collection " << emptyDoc << " is still missing epoch after config upgrade"; connPtr->done(); return false; } // Find collections with empty epochs emptyDoc = conn->findOne(CollectionType::ConfigNS + workingSuffix, BSON(CollectionType::DEPRECATED_lastmodEpoch(OID()))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "collection " << emptyDoc << " still has empty epoch after config upgrade"; connPtr->done(); return false; } // Find chunks with no epochs emptyDoc = conn->findOne(ChunkType::ConfigNS + workingSuffix, BSON("$unset" << BSON(ChunkType::DEPRECATED_epoch() << 1))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "chunk " << emptyDoc << " is still missing epoch after config upgrade"; connPtr->done(); return false; } // Find chunks with empty epochs emptyDoc = conn->findOne(ChunkType::ConfigNS + workingSuffix, BSON(ChunkType::DEPRECATED_epoch(OID()))); if (!emptyDoc.isEmpty()) { *errMsg = stream() << "chunk " << emptyDoc << " still has empty epoch after config upgrade"; connPtr->done(); return false; } } catch (const DBException& e) { *errMsg = stream() << "could not verify epoch writes" << causedBy(e); return false; } connPtr->done(); } // // Double check that our collections haven't changed // Status idCheckStatus = checkIdsTheSame(configLoc, CollectionType::ConfigNS, CollectionType::ConfigNS + workingSuffix); if (!idCheckStatus.isOK()) { *errMsg = stream() << CollectionType::ConfigNS << " was modified while working on upgrade" << causedBy(idCheckStatus); return false; } idCheckStatus = checkIdsTheSame(configLoc, ChunkType::ConfigNS, ChunkType::ConfigNS + workingSuffix); if (!idCheckStatus.isOK()) { *errMsg = stream() << ChunkType::ConfigNS << " was modified while working on upgrade" << causedBy(idCheckStatus); return false; } // // ENTER CRITICAL SECTION // newVersionInfo.setUpgradeState(BSON(inCriticalSectionField(true))); { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; verify(newVersionInfo.isValid(NULL)); conn->update(VersionType::ConfigNS, BSON("_id" << 1 << VersionType::version_DEPRECATED(3)), newVersionInfo.toBSON()); _checkGLE(conn); } catch (const DBException& e) { // No cleanup message here since we're not sure if we wrote or not, and // not dangerous either way except to prevent further updates (at which point // the message is printed) *errMsg = stream() << "could not update version info to enter critical update section" << causedBy(e); return false; } // AT THIS POINT ANY FAILURE REQUIRES MANUAL INTERVENTION! connPtr->done(); } log() << "entered critical section for config upgrade" << endl; Status overwriteStatus = overwriteCollection(configLoc, CollectionType::ConfigNS + workingSuffix, CollectionType::ConfigNS); if (!overwriteStatus.isOK()) { error() << cleanupMessage << endl; *errMsg = stream() << "could not overwrite collection " << CollectionType::ConfigNS << " with working collection " << (CollectionType::ConfigNS + workingSuffix) << causedBy(overwriteStatus); return false; } overwriteStatus = overwriteCollection(configLoc, ChunkType::ConfigNS + workingSuffix, ChunkType::ConfigNS); if (!overwriteStatus.isOK()) { error() << cleanupMessage << endl; *errMsg = stream() << "could not overwrite collection " << ChunkType::ConfigNS << " with working collection " << (ChunkType::ConfigNS + workingSuffix) << causedBy(overwriteStatus); return false; } // // Finally update the version to latest and add clusterId to version // OID newClusterId = OID::gen(); // Note: hardcoded versions, since this is a very particular upgrade // Note: DO NOT CLEAR the config version unless bumping the minCompatibleVersion, // we want to save the excludes that were set. newVersionInfo.setMinCompatibleVersion(UpgradeHistory_NoEpochVersion); newVersionInfo.setCurrentVersion(UpgradeHistory_MandatoryEpochVersion); newVersionInfo.setClusterId(newClusterId); // Leave critical section newVersionInfo.unsetUpgradeId(); newVersionInfo.unsetUpgradeState(); log() << "writing new version info and clusterId " << newClusterId << "..." << endl; { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; verify(newVersionInfo.isValid(NULL)); conn->update(VersionType::ConfigNS, BSON("_id" << 1 << VersionType::version_DEPRECATED(UpgradeHistory_NoEpochVersion)), newVersionInfo.toBSON()); _checkGLE(conn); } catch (const DBException& e) { error() << cleanupMessage << endl; *errMsg = stream() << "could not write new version info " << "and exit critical upgrade section" << causedBy(e); return false; } connPtr->done(); } // // END CRITICAL SECTION // return true; }
Status MetadataLoader::initCollection( const string& ns, const string& shard, CollectionMetadata* metadata ) { // // Bring collection entry from the config server. // BSONObj collObj; { try { ScopedDbConnection conn( _configLoc.toString(), 30 ); collObj = conn->findOne( CollectionType::ConfigNS, QUERY(CollectionType::ns()<<ns)); conn.done(); } catch ( const DBException& e ) { string errMsg = str::stream() << "could not query collection metadata" << causedBy( e ); // We deliberately do not return conn to the pool, since it was involved // with the error here. return Status( ErrorCodes::HostUnreachable, errMsg ); } } CollectionType collDoc; string errMsg; if ( !collDoc.parseBSON( collObj, &errMsg ) || !collDoc.isValid( &errMsg ) ) { return Status( ErrorCodes::FailedToParse, errMsg ); } // // Load or generate default chunks for collection config. // if ( collDoc.isKeyPatternSet() && !collDoc.getKeyPattern().isEmpty() ) { metadata->_keyPattern = collDoc.getKeyPattern(); metadata->_shardVersion = ChunkVersion( 0, 0, collDoc.getEpoch() ); metadata->_collVersion = ChunkVersion( 0, 0, collDoc.getEpoch() ); return Status::OK(); } else if ( collDoc.isPrimarySet() && collDoc.getPrimary() == shard ) { if ( shard == "" ) { warning() << "shard not verified, assuming collection " << ns << " is unsharded on this shard" << endl; } metadata->_keyPattern = BSONObj(); metadata->_shardVersion = ChunkVersion( 1, 0, collDoc.getEpoch() ); metadata->_collVersion = metadata->_shardVersion; return Status::OK(); } else { errMsg = str::stream() << "collection " << ns << " does not have a shard key " << "and primary " << ( collDoc.isPrimarySet() ? collDoc.getPrimary() : "" ) << " does not match this shard " << shard; return Status( ErrorCodes::RemoteChangeDetected, errMsg ); } }
bool MetadataLoader::initCollection(const string& ns, const string& shard, const CollectionManager* oldManager, CollectionManager* manager, string* errMsg) { // // Bring collection entry from the config server. // BSONObj collObj; { scoped_ptr<ScopedDbConnection> connPtr; try { connPtr.reset( ScopedDbConnection::getInternalScopedDbConnection(_configLoc.toString(), 30)); ScopedDbConnection& conn = *connPtr; collObj = conn->findOne(CollectionType::ConfigNS, QUERY(CollectionType::ns()<<ns)); } catch (const DBException& e) { *errMsg = str::stream() << "caught exception accessing the config servers " << causedBy(e); // We deliberately do not return connPtr to the pool, since it was involved // with the error here. return false; } connPtr->done(); } CollectionType collDoc; if (!collDoc.parseBSON(collObj, errMsg) || !collDoc.isValid(errMsg)) { return false; } // // Load or generate default chunks for collection config. // if (!collDoc.getKeyPattern().isEmpty()) { manager->_key = collDoc.getKeyPattern(); if(!initChunks(collDoc, ns, shard, oldManager, manager, errMsg)){ return false; } } else if(collDoc.getPrimary() == shard) { if (shard == "") { warning() << "shard not verified, assuming collection " << ns << " is unsharded on this shard" << endl; } manager->_key = BSONObj(); manager->_maxShardVersion = ChunkVersion(1, 0, collDoc.getEpoch()); manager->_maxCollVersion = manager->_maxShardVersion; } else { *errMsg = str::stream() << "collection " << ns << " does not have a shard key " << "and primary " << collDoc.getPrimary() << " does not match this shard " << shard; return false; } return true; }
bool MetadataLoader::initChunks(const CollectionType& collDoc, const string& ns, const string& shard, const CollectionManager* oldManager, CollectionManager* manager, string* errMsg) { map<string,ChunkVersion> versionMap; manager->_maxCollVersion = ChunkVersion(0, 0, collDoc.getEpoch()); // Check to see if we should use the old version or not. if (oldManager) { ChunkVersion oldVersion = oldManager->getMaxShardVersion(); if (oldVersion.isSet() && oldVersion.hasCompatibleEpoch(collDoc.getEpoch())) { // Our epoch for coll version and shard version should be the same. verify(oldManager->getMaxCollVersion().hasCompatibleEpoch(collDoc.getEpoch())); versionMap[shard] = oldManager->_maxShardVersion; manager->_maxCollVersion = oldManager->_maxCollVersion; // TODO: This could be made more efficient if copying not required, but // not as frequently reloaded as in mongos. manager->_chunksMap = oldManager->_chunksMap; LOG(2) << "loading new chunks for collection " << ns << " using old chunk manager w/ version " << oldManager->getMaxShardVersion() << " and " << manager->_chunksMap.size() << " chunks" << endl; } } // Exposes the new 'manager's range map and version to the "differ," who // would ultimately be responsible of filling them up. SCMConfigDiffTracker differ(shard); differ.attach(ns, manager->_chunksMap, manager->_maxCollVersion, versionMap); try { scoped_ptr<ScopedDbConnection> connPtr( ScopedDbConnection::getInternalScopedDbConnection(_configLoc.toString(), 30)); ScopedDbConnection& conn = *connPtr; auto_ptr<DBClientCursor> cursor = conn->query(ChunkType::ConfigNS, differ.configDiffQuery()); if (!cursor.get()) { // 'errMsg' was filled by the getChunkCursor() call. manager->_maxCollVersion = ChunkVersion(); manager->_chunksMap.clear(); connPtr->done(); return false; } // Diff tracker should *always* find at least one chunk if collection exists. int diffsApplied = differ.calculateConfigDiff(*cursor); if (diffsApplied > 0) { LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << ns << " with version " << manager->_maxCollVersion << endl; manager->_maxShardVersion = versionMap[shard]; manager->fillRanges(); connPtr->done(); return true; } else if(diffsApplied == 0) { *errMsg = str::stream() << "no chunks found when reloading " << ns << ", previous version was " << manager->_maxCollVersion.toString(); warning() << *errMsg << endl; manager->_maxCollVersion = ChunkVersion(); manager->_chunksMap.clear(); connPtr->done(); return false; } else{ // TODO: make this impossible by making sure we don't migrate / split on this // shard during the reload. No chunks were found for the ns. *errMsg = str::stream() << "invalid chunks found when reloading " << ns << ", previous version was " << manager->_maxCollVersion.toString() << ", this should be rare"; warning() << errMsg << endl; manager->_maxCollVersion = ChunkVersion(); manager->_chunksMap.clear(); connPtr->done(); return false; } } catch (const DBException& e) { *errMsg = str::stream() << "caught exception accessing the config servers" << causedBy(e); // We deliberately do not return connPtr to the pool, since it was involved // with the error here. return false; } }
Status MetadataLoader::initCollection( const string& ns, const string& shard, CollectionMetadata* metadata ) const { // // Bring collection entry from the config server. // BSONObj collDoc; { try { ScopedDbConnection conn( _configLoc.toString(), 30 ); collDoc = conn->findOne( CollectionType::ConfigNS, QUERY(CollectionType::ns()<<ns)); conn.done(); } catch ( const DBException& e ) { string errMsg = str::stream() << "could not query collection metadata" << causedBy( e ); // We deliberately do not return conn to the pool, since it was involved // with the error here. return Status( ErrorCodes::HostUnreachable, errMsg ); } } string errMsg; if ( collDoc.isEmpty() ) { errMsg = str::stream() << "could not load metadata, collection " << ns << " not found"; warning() << errMsg << endl; return Status( ErrorCodes::NamespaceNotFound, errMsg ); } CollectionType collInfo; if ( !collInfo.parseBSON( collDoc, &errMsg ) || !collInfo.isValid( &errMsg ) ) { errMsg = str::stream() << "could not parse metadata for collection " << ns << causedBy( errMsg ); warning() << errMsg << endl; return Status( ErrorCodes::FailedToParse, errMsg ); } if ( collInfo.isDroppedSet() && collInfo.getDropped() ) { errMsg = str::stream() << "could not load metadata, collection " << ns << " was dropped"; warning() << errMsg << endl; return Status( ErrorCodes::NamespaceNotFound, errMsg ); } if ( collInfo.isKeyPatternSet() && !collInfo.getKeyPattern().isEmpty() ) { // Sharded collection, need to load chunks metadata->_keyPattern = collInfo.getKeyPattern(); metadata->_shardVersion = ChunkVersion( 0, 0, collInfo.getEpoch() ); metadata->_collVersion = ChunkVersion( 0, 0, collInfo.getEpoch() ); return Status::OK(); } else if ( collInfo.isPrimarySet() && collInfo.getPrimary() == shard ) { // A collection with a non-default primary // Empty primary field not allowed if set dassert( collInfo.getPrimary() != "" ); metadata->_keyPattern = BSONObj(); metadata->_shardVersion = ChunkVersion( 1, 0, collInfo.getEpoch() ); metadata->_collVersion = metadata->_shardVersion; return Status::OK(); } else { // A collection with a primary that doesn't match this shard or is empty, the primary // may have changed before we loaded. errMsg = // br str::stream() << "collection " << ns << " does not have a shard key " << "and primary " << ( collInfo.isPrimarySet() ? collInfo.getPrimary() : "" ) << " does not match this shard " << shard; warning() << errMsg << endl; metadata->_collVersion = ChunkVersion( 0, 0, OID() ); return Status( ErrorCodes::RemoteChangeDetected, errMsg ); } }