void ShardChunkManager::_init( const string& configServer , const string& ns , const string& shardName, ShardChunkManagerPtr oldManager ) { // have to get a connection to the config db // special case if I'm the configdb since I'm locked and if I connect to myself // its a deadlock scoped_ptr<ScopedDbConnection> scoped; scoped_ptr<DBDirectClient> direct; DBClientBase * conn; if ( configServer.empty() ) { direct.reset( new DBDirectClient() ); conn = direct.get(); } else { scoped.reset( ScopedDbConnection::getInternalScopedDbConnection( configServer, 30.0 ) ); conn = scoped->get(); } // get this collection's sharding key BSONObj collectionDoc = conn->findOne( "config.collections", BSON( "_id" << ns ) ); if( collectionDoc.isEmpty() ){ warning() << ns << " does not exist as a sharded collection" << endl; return; } if( collectionDoc["dropped"].Bool() ){ warning() << ns << " was dropped. Re-shard collection first." << endl; return; } _fillCollectionKey( collectionDoc ); map<string,ShardChunkVersion> versionMap; versionMap[ shardName ] = _version; _collVersion = ShardChunkVersion( 0, OID() ); // Check to see if we have an old ShardChunkManager to use if( oldManager && oldManager->_collVersion.isSet() ){ versionMap[ shardName ] = oldManager->_version; _collVersion = oldManager->_collVersion; // TODO: This could be made more efficient if copying not required, but not as // frequently reloaded as in mongos. _chunksMap = oldManager->_chunksMap; LOG(2) << "loading new chunks for collection " << ns << " using old chunk manager w/ version " << _collVersion << " and " << _chunksMap.size() << " chunks" << endl; } // Attach our config diff tracker to our range map and versions SCMConfigDiffTracker differ( shardName ); differ.attach( ns, _chunksMap, _collVersion, versionMap ); // Need to do the query ourselves, since we may use direct conns to the db Query query = differ.configDiffQuery(); auto_ptr<DBClientCursor> cursor = conn->query( "config.chunks" , query ); uassert( 16181, str::stream() << "could not initialize cursor to config server chunks collection for ns " << ns, cursor.get() ); // Diff tracker should *always* find at least one chunk if collection exists int diffsApplied = differ.calculateConfigDiff( *cursor ); if( diffsApplied > 0 ){ LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << ns << " with version " << _collVersion << endl; // Save the new version of this shard _version = versionMap[ shardName ]; _fillRanges(); } else if( diffsApplied == 0 ){ // No chunks were found for the ns warning() << "no chunks found when reloading " << ns << ", previous version was " << _collVersion << endl; _version = ShardChunkVersion( 0, OID() ); _collVersion = ShardChunkVersion( 0, OID() ); _chunksMap.clear(); } else{ // TODO: make this impossible by making sure we don't migrate / split on this shard during the // reload // No chunks were found for the ns warning() << "invalid chunks found when reloading " << ns << ", previous version was " << _collVersion << ", this should be rare" << endl; // Handle the same way as a connectivity error, for now // TODO: handle inline uassert( 16229, str::stream() << "could not initialize cursor to config server chunks collection for ns " << ns, cursor.get() ); } if ( scoped.get() ) scoped->done(); if ( _chunksMap.empty() ) log() << "no chunk for collection " << ns << " on shard " << shardName << endl; }
bool compareImage_v2(MyImage &img_logo, MyImage &img_pic) { int row_n = img_pic.getHeight() / BLOCK_SIZE; int col_n = img_pic.getWidth() / BLOCK_SIZE; int *his_logo = getHistogram(img_logo, 0, 0, img_logo.getHeight(), img_logo.getWidth()); //int *his_logo = getHistogram_H(img_logo, 0, 0, img_logo.getHeight(), img_logo.getWidth()); //int *his_pic = getHistogram_H(img_pic, 0, 0, img_pic.getHeight(), img_pic.getWidth()); //print_arr(his_logo, histo_size); //print_arr(his_logo, histo_size_h); //delete his_pic; double *norm_logo = normalize(his_logo, histo_size); //double *norm_logo = normalize(his_logo, histo_size_h); //print_arr_d(norm_logo, histo_size); //print_arr_d(norm_logo, histo_size_h); int **block_histos = new int*[row_n * col_n]; int total_blc = 0; for(int i = 0; i < img_pic.getHeight(); i += BLOCK_SIZE) { for(int j = 0; j < img_pic.getWidth(); j += BLOCK_SIZE) { //TRACE("i: %d, j: %d\n", i, j); block_histos[total_blc++] = getHistogram(img_pic, i, j, i + BLOCK_SIZE, j + BLOCK_SIZE); // print_arr(block_histos[total_blc - 1], H_N); } } int window_size = min(row_n, col_n); //double min_diff = 1000.0; //int min_x = -1, min_y = -1, min_size = -1; std::priority_queue<Box, std::vector<Box>, CompareBox> best_boxes; int max_heap_size = 5; while(window_size > 0) { for(int row = 0; row <= row_n - window_size; row++) { for(int col = 0; col <= col_n - window_size; col++) { int *local_histo = new int[histo_size]; //int *local_histo = new int[histo_size_h]; for(int x = 0; x < histo_size; x++) local_histo[x] = 0; //for(int x = 0; x < histo_size_h; x++) local_histo[x] = 0; for(int x = row; x < row + window_size; x++) { for(int y = col; y < col + window_size; y++) { int block_index = x * col_n + y; for(int z = 0; z < histo_size; z++) //for(int z = 0; z < histo_size_h; z++) local_histo[z] += block_histos[block_index][z]; } } double *norm_local = normalize(local_histo, histo_size); //double *norm_local = normalize(local_histo, histo_size_h); //print_arr_d(norm_local, H_N); //print_arr_d(norm_logo, H_N); double diff = differ(norm_local, norm_logo, histo_size); //double diff = differ(norm_local, norm_logo, histo_size_h); //TRACE("row: %d, col: %d, size: %d, diff: %lf\n", row, col, window_size, diff); /* if(row == 3 && col == 6 && window_size == 2) { print_arr_d(norm_local, histo_size); TRACE("diff: %lf\n", diff); } */ if(best_boxes.size() == max_heap_size && best_boxes.top().diff > diff) { delete best_boxes.top().histogram; best_boxes.pop(); } if(best_boxes.size() < max_heap_size) { Box new_box = {row, col, window_size, diff, local_histo}; //TRACE("r: %d, c: %d, size: %d, diff: %lf\n", new_box.row, new_box.col, new_box.len, new_box.diff); best_boxes.push(new_box); } else delete local_histo; delete norm_local; } } window_size--; } //TRACE("row: %d, col: %d, size: %d, diff: %lf\n", min_y, min_x, min_size, min_diff); //int length = min_size * BLOCK_SIZE; img_pic.RGBtoGray(); img_logo.RGBtoGray(); unsigned char **pyramid = create_img_pyr(img_logo, 0); int min_err = (1 << 31) - 1; double min_diff = 10.0; //printf("here\n"); while(!best_boxes.empty()) { Box best_box = best_boxes.top(); TRACE("row: %d, col: %d, size: %d, diff: %lf\n", best_box.row, best_box.col, best_box.len, best_box.diff); //printf("row: %d, col: %d, size: %d, diff: %lf\n", best_box.row, best_box.col, best_box.len, best_box.diff); //print_arr(best_box.histogram, histo_size); //print_arr(best_box.histogram, histo_size_h); img_pic.DrawBox(best_box.row * BLOCK_SIZE, best_box.col * BLOCK_SIZE, (best_box.row + best_box.len)* BLOCK_SIZE, (best_box.col + best_box.len) * BLOCK_SIZE); best_boxes.pop(); if(best_boxes.empty()) min_err = diff_pic(pyramid[best_box.len - 1], best_box.len, img_pic, best_box.row * BLOCK_SIZE, best_box.col * BLOCK_SIZE, (best_box.row + best_box.len)* BLOCK_SIZE, (best_box.col + best_box.len) * BLOCK_SIZE); //TRACE("cur_err: %d\n", cur_err); //printf("cur_err: %d\n", cur_err); //if(cur_err < min_err) min_err = cur_err; if(best_box.diff < min_diff) min_diff = best_box.diff; delete best_box.histogram; } TRACE("min_square_error: %d\n", min_err); TRACE("min_diff: %f\n", min_diff); delete his_logo; delete norm_logo; //delete [] block_histos; for(int i = 0; i < total_blc; i++) { delete block_histos[i]; } for(int i = 0; i < 9; i++) { delete pyramid[i]; } delete pyramid; delete block_histos; if(min_diff <= 0.02) return true; if(min_diff >= 0.33) return false; return min_err < 400000; }
bool ChunkManager::_load(OperationContext* txn, ChunkMap& chunkMap, set<ShardId>& shardIds, ShardVersionMap* shardVersions, const ChunkManager* oldManager) { // Reset the max version, but not the epoch, when we aren't loading from the oldManager _version = ChunkVersion(0, 0, _version.epoch()); // If we have a previous version of the ChunkManager to work from, use that info to reduce // our config query if (oldManager && oldManager->getVersion().isSet()) { // Get the old max version _version = oldManager->getVersion(); // Load a copy of the old versions *shardVersions = oldManager->_shardVersions; // Load a copy of the chunk map, replacing the chunk manager with our own const ChunkMap& oldChunkMap = oldManager->getChunkMap(); // Could be v.expensive // TODO: If chunks were immutable and didn't reference the manager, we could do more // interesting things here for (const auto& oldChunkMapEntry : oldChunkMap) { shared_ptr<Chunk> oldC = oldChunkMapEntry.second; shared_ptr<Chunk> newC(new Chunk( this, oldC->getMin(), oldC->getMax(), oldC->getShardId(), oldC->getLastmod())); newC->setBytesWritten(oldC->getBytesWritten()); chunkMap.insert(make_pair(oldC->getMax(), newC)); } LOG(2) << "loading chunk manager for collection " << _ns << " using old chunk manager w/ version " << _version.toString() << " and " << oldChunkMap.size() << " chunks"; } // Attach a diff tracker for the versioned chunk data CMConfigDiffTracker differ(this); differ.attach(_ns, chunkMap, _version, *shardVersions); // Diff tracker should *always* find at least one chunk if collection exists // Get the diff query required auto diffQuery = differ.configDiffQuery(); repl::OpTime opTime; std::vector<ChunkType> chunks; uassertStatusOK(grid.catalogManager(txn)->getChunks( txn, diffQuery.query, diffQuery.sort, boost::none, &chunks, &opTime)); invariant(opTime >= _configOpTime); _configOpTime = opTime; int diffsApplied = differ.calculateConfigDiff(txn, chunks); if (diffsApplied > 0) { LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << _ns << " with version " << _version; // Add all existing shards we find to the shards set for (ShardVersionMap::iterator it = shardVersions->begin(); it != shardVersions->end();) { shared_ptr<Shard> shard = grid.shardRegistry()->getShard(txn, it->first); if (shard) { shardIds.insert(it->first); ++it; } else { shardVersions->erase(it++); } } _configOpTime = opTime; return true; } else if (diffsApplied == 0) { // No chunks were found for the ns warning() << "no chunks found when reloading " << _ns << ", previous version was " << _version; // Set all our data to empty chunkMap.clear(); shardVersions->clear(); _version = ChunkVersion(0, 0, OID()); _configOpTime = opTime; return true; } else { // diffsApplied < 0 bool allInconsistent = (differ.numValidDiffs() == 0); if (allInconsistent) { // All versions are different, this can be normal warning() << "major change in chunk information found when reloading " << _ns << ", previous version was " << _version; } else { // Inconsistent load halfway through (due to yielding cursor during load) // should be rare warning() << "inconsistent chunks found when reloading " << _ns << ", previous version was " << _version << ", this should be rare"; } // Set all our data to empty to be extra safe chunkMap.clear(); shardVersions->clear(); _version = ChunkVersion(0, 0, OID()); return allInconsistent; } }
vector<vector<string>> findLadders(string beginWord, string endWord, unordered_set<string> &wordList) { // Start typing your C/C++ solution below // DO NOT write int main() function vector<vector<string>> res; vector<string> temp; if(differ(beginWord, endWord)<=1) { temp.push_back(beginWord); temp.push_back(endWord); res.push_back(temp); return res; } unordered_map<string, vector<string>> path; unordered_set<string> last_word; unordered_set<string> last_use; bool flag = false; string none; path[beginWord].push_back(none); queue<pair<string, int>> q; q.push(pair<string, int> (beginWord, 1)); wordList.erase(beginWord); string str, nxt; int cntStep; int lastStep =0; int minStep; while (!q.empty()) { str = q.front().first; cntStep = q.front().second; if(flag && cntStep >= minStep) break; if(lastStep != cntStep) { lastStep =cntStep; for(auto it = last_use.begin(); it != last_use.end(); it++) wordList.erase(*it); last_use.clear(); } q.pop(); for (int i = 0; i < str.length(); i++) for (char j = 'a'; j <= 'z'; j++) { if (str[i] == j) continue; nxt = str; nxt[i] = j; if(nxt == endWord) { flag = true; minStep = cntStep+1; if(last_word.count(str)) break; last_word.insert(str); deque<string> strpath; strpath.push_front(endWord); strpath.push_front(str); dfs_insert(res, strpath, path, str); } else if (wordList.count(nxt)) { last_use.insert(nxt); path[nxt].push_back(str); q.push(pair<string, int> (nxt, cntStep+1)); } } } return res; }
Status MetadataLoader::initChunks( const string& ns, const string& shard, const CollectionMetadata* oldMetadata, CollectionMetadata* metadata ) const { map<string, ChunkVersion> versionMap; // Preserve the epoch versionMap[shard] = metadata->_shardVersion; OID epoch = metadata->getCollVersion().epoch(); bool fullReload = true; // Check to see if we should use the old version or not. if ( oldMetadata ) { // If our epochs are compatible, it's useful to use the old metadata for diffs if ( oldMetadata->getCollVersion().hasCompatibleEpoch( epoch ) ) { fullReload = false; dassert( oldMetadata->isValid() ); versionMap[shard] = oldMetadata->_shardVersion; metadata->_collVersion = oldMetadata->_collVersion; // TODO: This could be made more efficient if copying not required, but // not as frequently reloaded as in mongos. metadata->_chunksMap = oldMetadata->_chunksMap; LOG( 2 ) << "loading new chunks for collection " << ns << " using old metadata w/ version " << oldMetadata->getShardVersion() << " and " << metadata->_chunksMap.size() << " chunks" << endl; } else { warning() << "reloading collection metadata for " << ns << " with new epoch " << epoch.toString() << ", the current epoch is " << oldMetadata->getCollVersion().epoch().toString() << endl; } } // Exposes the new metadata's range map and version to the "differ," who // would ultimately be responsible of filling them up. SCMConfigDiffTracker differ( shard ); differ.attach( ns, metadata->_chunksMap, metadata->_collVersion, versionMap ); try { ScopedDbConnection conn( _configLoc.toString(), 30 ); auto_ptr<DBClientCursor> cursor = conn->query( ChunkType::ConfigNS, differ.configDiffQuery() ); if ( !cursor.get() ) { // Make our metadata invalid metadata->_collVersion = ChunkVersion( 0, 0, OID() ); metadata->_chunksMap.clear(); conn.done(); return Status( ErrorCodes::HostUnreachable, "problem opening chunk metadata cursor" ); } // // The diff tracker should always find at least one chunk (the highest chunk we saw // last time). If not, something has changed on the config server (potentially between // when we read the collection data and when we read the chunks data). // int diffsApplied = differ.calculateConfigDiff( *cursor ); if ( diffsApplied > 0 ) { // Chunks found, return ok LOG(2) << "loaded " << diffsApplied << " chunks into new metadata for " << ns << " with version " << metadata->_collVersion << endl; metadata->_shardVersion = versionMap[shard]; metadata->fillRanges(); conn.done(); dassert( metadata->isValid() ); return Status::OK(); } else if ( diffsApplied == 0 ) { // No chunks found, the collection is dropping or we're confused // If this is a full reload, assume it is a drop for backwards compatibility // TODO: drop the config.collections entry *before* the chunks and eliminate this // ambiguity string errMsg = str::stream() << "no chunks found when reloading " << ns << ", previous version was " << metadata->_collVersion.toString() << ( fullReload ? ", this is a drop" : "" ); warning() << errMsg << endl; metadata->_collVersion = ChunkVersion( 0, 0, OID() ); metadata->_chunksMap.clear(); conn.done(); return fullReload ? Status( ErrorCodes::NamespaceNotFound, errMsg ) : Status( ErrorCodes::RemoteChangeDetected, errMsg ); } else { // Invalid chunks found, our epoch may have changed because we dropped/recreated // the collection. string errMsg = // br str::stream() << "invalid chunks found when reloading " << ns << ", previous version was " << metadata->_collVersion.toString() << ", this should be rare"; warning() << errMsg << endl; metadata->_collVersion = ChunkVersion( 0, 0, OID() ); metadata->_chunksMap.clear(); conn.done(); return Status( ErrorCodes::RemoteChangeDetected, errMsg ); } } catch ( const DBException& e ) { string errMsg = str::stream() << "problem querying chunks metadata" << causedBy( e ); // We deliberately do not return connPtr to the pool, since it was involved // with the error here. return Status( ErrorCodes::HostUnreachable, errMsg ); } }
int main(int argc, char *argv[]) { printf("Debut initialisation\n"); /// Chargement des objets //on ne stocke plus dans des mesh, mais dans un tableau possible de mesh à charger. L'id du meche à charger par le patron correspond //à l'indice dans le tableau de mesh mesh.push_back(new MeshObj("Others\\legoTexture.obj",NULL)); mesh.push_back(new MeshObj("Others\\brique_lego.obj", NULL)); printf("Chargement des objets réussi\n"); /// Initialisation de glut glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glClearColor(0, 0, 0, 0); glEnable(GL_CULL_FACE); glCullFace(GL_BACK); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LESS); glShadeModel(GL_SMOOTH); /// Initialisation d'ARToolKit et de la fenetre + appel boucle infini arInit(); arVideoCapStart(); glutPositionWindow((glutGet(GLUT_SCREEN_WIDTH)-cparam.xsize)/2, (glutGet(GLUT_SCREEN_HEIGHT)-cparam.ysize)/2); glutReshapeFunc(resize); glutMotionFunc(mouseMove); //init du menu d'aide (aide mouvement) menu.addBoutton("img\\delete.png",true,0,cparam.ysize-75,75,cparam.ysize); menu.addBoutton("img\\move.png",true,75,cparam.ysize-75,150,cparam.ysize); menu.addBoutton("img\\resize.png",true,150,cparam.ysize-75,225,cparam.ysize); //init bouttons help/scan //quit quit.addBoutton("img\\quit.png",true,cparam.xsize-120,30,cparam.xsize-16,54+30,true); quit.addBoutton("img\\quit1.png",true,cparam.xsize-120,30,cparam.xsize-16,54+30,false); difQuit=differ(2000); //help; help.addBoutton("img\\aide1.png",true, cparam.xsize-120,54+35,cparam.xsize-16,54+54+35,true); help.addBoutton("img\\aide2.png",true, cparam.xsize-120,54+35,cparam.xsize-16,54+54+35,false); //activé help.addBoutton("img\\aide3.png",true, cparam.xsize-120,54+35,cparam.xsize-16,54+54+35,false); //selectioné difAide=differ(2000); menuShow=false; //scan scan.addBoutton("img\\scan5.png",true,cparam.xsize-120,54+40+54,cparam.xsize-16,54+54+54+40,true); scan.addBoutton("img\\scan6.png",true,cparam.xsize-120,54+40+54,cparam.xsize-16,54+54+54+40,false); scan.addBoutton("img\\scan7.png",true,cparam.xsize-120,54+40+54,cparam.xsize-16,54+54+54+40,false); difScan=differ(2000); /*FMOD_System_Create(&systemSon); FMOD_System_Init(systemSon, 2, FMOD_INIT_NORMAL, NULL); if(!FMOD_System_CreateSound(systemSon, "Data\\mouseclickDown.wav", FMOD_CREATESAMPLE, 0, &clickDown)) printf("chargement son: ok\n"); else printf("chargement son: echec\n"); if(!FMOD_System_CreateSound(systemSon, "Data\\mouseclickUp.wav", FMOD_CREATESAMPLE, 0, &clickUP)) printf("chargement son: ok\n"); else printf("chargement son: echec\n");*/ difIndex=differ(2000); difMajeur=differ(2000); printf("Fin initialisation\n"); argMainLoop(mouseClick, key, mainLoop); return EXIT_SUCCESS; }
void run() { int numShards = 10; int numInitialChunks = 5; int maxChunks = 100000; // Needed to not overflow the BSONArray's max bytes int keySize = 2; BSONArrayBuilder chunksB; BSONObj lastSplitPt; ShardChunkVersion version( 1, 0, OID() ); // // Generate numChunks with a given key size over numShards // All chunks have double key values, so we can split them a bunch // for( int i = -1; i < numInitialChunks; i++ ){ BSONObjBuilder splitPtB; for( int k = 0; k < keySize; k++ ){ string field = string( "k" ) + string( 1, (char)('0' + k) ); if( i < 0 ) splitPtB.appendMinKey( field ); else if( i < numInitialChunks - 1 ) splitPtB.append( field, (double)i ); else splitPtB.appendMaxKey( field ); } BSONObj splitPt = splitPtB.obj(); if( i >= 0 ){ BSONObjBuilder chunkB; chunkB.append( "min", lastSplitPt ); chunkB.append( "max", splitPt ); int shardNum = rand( numShards ); chunkB.append( "shard", "shard" + string( 1, (char)('A' + shardNum) ) ); rand( 2 ) ? version.incMajor() : version.incMinor(); version.addToBSON( chunkB, "lastmod" ); chunksB.append( chunkB.obj() ); } lastSplitPt = splitPt; } BSONArray chunks = chunksB.arr(); // log() << "Chunks generated : " << chunks << endl; DBClientMockCursor chunksCursor( chunks ); // Setup the empty ranges and versions first RangeMap ranges; ShardChunkVersion maxVersion = ShardChunkVersion( 0, 0, OID() ); VersionMap maxShardVersions; // Create a differ which will track our progress boost::shared_ptr< DefaultDiffAdapter > differ( _inverse ? new InverseDiffAdapter() : new DefaultDiffAdapter() ); differ->attach( "test", ranges, maxVersion, maxShardVersions ); // Validate initial load differ->calculateConfigDiff( chunksCursor ); validate( chunks, ranges, maxVersion, maxShardVersions ); // Generate a lot of diffs, and keep validating that updating from the diffs always // gives us the right ranges and versions int numDiffs = 135; // Makes about 100000 chunks overall int numChunks = numInitialChunks; for( int i = 0; i < numDiffs; i++ ){ // log() << "Generating new diff... " << i << endl; BSONArrayBuilder diffsB; BSONArrayBuilder newChunksB; BSONObjIterator chunksIt( chunks ); while( chunksIt.more() ){ BSONObj chunk = chunksIt.next().Obj(); int randChoice = rand( 10 ); if( randChoice < 2 && numChunks < maxChunks ){ // Simulate a split // log() << " ...starting a split with chunk " << chunk << endl; BSONObjBuilder leftB; BSONObjBuilder rightB; BSONObjBuilder midB; for( int k = 0; k < keySize; k++ ){ string field = string( "k" ) + string( 1, (char)('0' + k) ); BSONType maxType = chunk["max"].Obj()[field].type(); double max = maxType == NumberDouble ? chunk["max"].Obj()[field].Number() : 0.0; BSONType minType = chunk["min"].Obj()[field].type(); double min = minType == NumberDouble ? chunk["min"].Obj()[field].Number() : 0.0; if( minType == MinKey ){ midB.append( field, max - 1.0 ); } else if( maxType == MaxKey ){ midB.append( field, min + 1.0 ); } else { midB.append( field, ( max + min ) / 2.0 ); } } BSONObj midPt = midB.obj(); // Only happens if we can't split the min chunk if( midPt.isEmpty() ) continue; leftB.append( chunk["min"] ); leftB.append( "max", midPt ); rightB.append( "min", midPt ); rightB.append( chunk["max"] ); leftB.append( chunk["shard"] ); rightB.append( chunk["shard"] ); version.incMajor(); version._minor = 0; version.addToBSON( leftB, "lastmod" ); version.incMinor(); version.addToBSON( rightB, "lastmod" ); BSONObj left = leftB.obj(); BSONObj right = rightB.obj(); // log() << " ... split into " << left << " and " << right << endl; newChunksB.append( left ); newChunksB.append( right ); diffsB.append( right ); diffsB.append( left ); numChunks++; } else if( randChoice < 4 && chunksIt.more() ){ // Simulate a migrate // log() << " ...starting a migrate with chunk " << chunk << endl; BSONObj prevShardChunk; while( chunksIt.more() ){ prevShardChunk = chunksIt.next().Obj(); if( prevShardChunk["shard"].String() == chunk["shard"].String() ) break; // log() << "... appending chunk from diff shard: " << prevShardChunk << endl; newChunksB.append( prevShardChunk ); prevShardChunk = BSONObj(); } // We need to move between different shards, hence the weirdness in logic here if( ! prevShardChunk.isEmpty() ){ BSONObjBuilder newShardB; BSONObjBuilder prevShardB; newShardB.append( chunk["min"] ); newShardB.append( chunk["max"] ); prevShardB.append( prevShardChunk["min"] ); prevShardB.append( prevShardChunk["max"] ); int shardNum = rand( numShards ); newShardB.append( "shard", "shard" + string( 1, (char)('A' + shardNum) ) ); prevShardB.append( prevShardChunk["shard"] ); version.incMajor(); version._minor = 0; version.addToBSON( newShardB, "lastmod" ); version.incMinor(); version.addToBSON( prevShardB, "lastmod" ); BSONObj newShard = newShardB.obj(); BSONObj prevShard = prevShardB.obj(); // log() << " ... migrated to " << newShard << " and updated " << prevShard << endl; newChunksB.append( newShard ); newChunksB.append( prevShard ); diffsB.append( newShard ); diffsB.append( prevShard ); } else{ // log() << "... appending chunk, no more left: " << chunk << endl; newChunksB.append( chunk ); } } else{ // log() << "Appending chunk : " << chunk << endl; newChunksB.append( chunk ); } } BSONArray diffs = diffsB.arr(); chunks = newChunksB.arr(); // log() << "Diffs generated : " << diffs << endl; // log() << "All chunks : " << chunks << endl; // Rarely entirely clear out our data if( rand( 10 ) < 1 ){ diffs = chunks; ranges.clear(); maxVersion = ShardChunkVersion( 0, 0, OID() ); maxShardVersions.clear(); } // log() << "Total number of chunks : " << numChunks << " iteration " << i << endl; DBClientMockCursor diffCursor( diffs ); differ->calculateConfigDiff( diffCursor ); validate( chunks, ranges, maxVersion, maxShardVersions ); } }
bool MetadataLoader::initChunks(const CollectionType& collDoc, const string& ns, const string& shard, const CollectionManager* oldManager, CollectionManager* manager, string* errMsg) { map<string,ChunkVersion> versionMap; manager->_maxCollVersion = ChunkVersion(0, 0, collDoc.getEpoch()); // Check to see if we should use the old version or not. if (oldManager) { ChunkVersion oldVersion = oldManager->getMaxShardVersion(); if (oldVersion.isSet() && oldVersion.hasCompatibleEpoch(collDoc.getEpoch())) { // Our epoch for coll version and shard version should be the same. verify(oldManager->getMaxCollVersion().hasCompatibleEpoch(collDoc.getEpoch())); versionMap[shard] = oldManager->_maxShardVersion; manager->_maxCollVersion = oldManager->_maxCollVersion; // TODO: This could be made more efficient if copying not required, but // not as frequently reloaded as in mongos. manager->_chunksMap = oldManager->_chunksMap; LOG(2) << "loading new chunks for collection " << ns << " using old chunk manager w/ version " << oldManager->getMaxShardVersion() << " and " << manager->_chunksMap.size() << " chunks" << endl; } } // Exposes the new 'manager's range map and version to the "differ," who // would ultimately be responsible of filling them up. SCMConfigDiffTracker differ(shard); differ.attach(ns, manager->_chunksMap, manager->_maxCollVersion, versionMap); try { ScopedDbConnection conn(_configLoc.toString(), 30); auto_ptr<DBClientCursor> cursor = conn->query(ChunkType::ConfigNS, differ.configDiffQuery()); if (!cursor.get()) { // 'errMsg' was filled by the getChunkCursor() call. manager->_maxCollVersion = ChunkVersion(); manager->_chunksMap.clear(); conn.done(); return false; } // Diff tracker should *always* find at least one chunk if this shard owns a chunk. int diffsApplied = differ.calculateConfigDiff(*cursor); if (diffsApplied > 0) { LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << ns << " with version " << manager->_maxCollVersion << endl; manager->_maxShardVersion = versionMap[shard]; manager->fillRanges(); conn.done(); return true; } else if(diffsApplied == 0) { warning() << "no chunks found when reloading " << ns << ", previous version was " << manager->_maxCollVersion.toString() << endl; manager->_maxCollVersion = ChunkVersion(); manager->_chunksMap.clear(); conn.done(); return true; } else{ // TODO: make this impossible by making sure we don't migrate / split on this // shard during the reload. No chunks were found for the ns. *errMsg = str::stream() << "invalid chunks found when reloading " << ns << ", previous version was " << manager->_maxCollVersion.toString() << ", this should be rare"; warning() << errMsg << endl; manager->_maxCollVersion = ChunkVersion(); manager->_chunksMap.clear(); conn.done(); return false; } } catch (const DBException& e) { *errMsg = str::stream() << "caught exception accessing the config servers" << causedBy(e); // We deliberately do not return connPtr to the pool, since it was involved // with the error here. return false; } }