Uint32 PeerUploader::handleRequests(ChunkManager & cman) { Uint32 ret = uploaded; uploaded = 0; // if we have choked the peer do not upload if (peer->areWeChoked()) return ret; while (requests.size() > 0) { Request r = requests.front(); Chunk* c = cman.getChunk(r.getIndex()); if (c && c->getStatus() == Chunk::ON_DISK) { if (!peer->sendChunk(r.getIndex(),r.getOffset(),r.getLength(),c)) { if (peer->getStats().fast_extensions) peer->sendReject(r); } } else { // remove requests we can't satisfy Out(SYS_CON|LOG_DEBUG) << "Cannot satisfy request" << endl; if (peer->getStats().fast_extensions) peer->sendReject(r); } requests.pop_front(); } return ret; }
void handleIndexWrite( int op , Request& r ) { DbMessage& d = r.d(); if ( op == dbInsert ) { while( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); const char * ns = o["ns"].valuestr(); if ( r.getConfig()->isSharded( ns ) ) { BSONObj newIndexKey = o["key"].embeddedObjectUserCheck(); uassert( 10205 , (string)"can't use unique indexes with sharding ns:" + ns + " key: " + o["key"].embeddedObjectUserCheck().toString() , IndexDetails::isIdIndexPattern( newIndexKey ) || ! o["unique"].trueValue() || r.getConfig()->getChunkManager( ns )->getShardKey().uniqueAllowd( newIndexKey ) ); ChunkManager * cm = r.getConfig()->getChunkManager( ns ); assert( cm ); for ( int i=0; i<cm->numChunks(); i++) doWrite( op , r , cm->getChunk(i)->getShard() ); } else { doWrite( op , r , r.primaryShard() ); } r.gotInsert(); } } else if ( op == dbUpdate ) { throw UserException( 8050 , "can't update system.indexes" ); } else if ( op == dbDelete ) { // TODO throw UserException( 8051 , "can't delete indexes on sharded collection yet" ); } else { log() << "handleIndexWrite invalid write op: " << op << endl; throw UserException( 8052 , "handleIndexWrite invalid write op" ); } }