void ChunkManager::drop(){ uassert( 10174 , "config servers not all up" , configServer.allUp() ); map<string,ShardChunkVersion> seen; log(1) << "ChunkManager::drop : " << _ns << endl; // lock all shards so no one can do a split/migrate for ( vector<Chunk*>::const_iterator i=_chunks.begin(); i!=_chunks.end(); i++ ){ Chunk * c = *i; ShardChunkVersion& version = seen[ c->getShard() ]; if ( version ) continue; version = lockNamespaceOnServer( c->getShard() , _ns ); if ( version ) continue; // rollback uassert( 10175 , "don't know how to rollback locks b/c drop can't lock all shards" , 0 ); } log(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl; // wipe my meta-data _chunks.clear(); // delete data from mongod for ( map<string,ShardChunkVersion>::iterator i=seen.begin(); i!=seen.end(); i++ ){ string shard = i->first; ScopedDbConnection conn( shard ); conn->dropCollection( _ns ); conn.done(); } log(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl; // clean up database meta-data uassert( 10176 , "no sharding data?" , _config->removeSharding( _ns ) ); _config->save(); // remove chunk data Chunk temp(0); ScopedDbConnection conn( temp.modelServer() ); conn->remove( temp.getNS() , BSON( "ns" << _ns ) ); conn.done(); log(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl; for ( map<string,ShardChunkVersion>::iterator i=seen.begin(); i!=seen.end(); i++ ){ ScopedDbConnection conn( i->first ); BSONObj res; if ( ! setShardVersion( conn.conn() , _ns , 0 , true , res ) ) throw UserException( 8071 , (string)"OH KNOW, cleaning up after drop failed: " + res.toString() ); conn.done(); } log(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl; }
Shard * Shard::split( const BSONObj& m ){ uassert( "can't split as shard that doesn't have a manager" , _manager ); log(1) << " before split on: " << m << "\n" << "\t self : " << toString() << endl; uassert( "locking namespace on server failed" , lockNamespaceOnServer( getServer() , _ns ) ); Shard * s = new Shard( _manager ); s->_ns = _ns; s->_server = _server; s->_min = m.getOwned(); s->_max = _max; s->_markModified(); _markModified(); _manager->_shards.push_back( s ); _max = m.getOwned(); log(1) << " after split:\n" << "\t left : " << toString() << "\n" << "\t right: "<< s->toString() << endl; _manager->save(); return s; }
Chunk * Chunk::split( const BSONObj& m ){ uassert( 10165 , "can't split as shard that doesn't have a manager" , _manager ); log(1) << " before split on: " << m << "\n" << "\t self : " << toString() << endl; uassert( 10166 , "locking namespace on server failed" , lockNamespaceOnServer( getShard() , _ns ) ); Chunk * s = new Chunk( _manager ); s->_ns = _ns; s->_shard = _shard; s->setMin(m.getOwned()); s->setMax(_max); s->_markModified(); _markModified(); _manager->_chunks.push_back( s ); setMax(m.getOwned()); log(1) << " after split:\n" << "\t left : " << toString() << "\n" << "\t right: "<< s->toString() << endl; _manager->save(); return s; }
bool lockNamespaceOnServer( const Shard& shard, const string& ns ){ ScopedDbConnection conn( shard.getConnString() ); bool res = lockNamespaceOnServer( conn.conn() , ns ); conn.done(); return res; }