void WriteBatchExecutor::execUpdate( const BatchItemRef& updateItem, BSONObj* upsertedId, WriteErrorDetail** error ) { // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, updateItem ) ); incOpStats( updateItem ); WriteOpResult result; WriteUnitOfWork wunit(_txn->recoveryUnit()); multiUpdate( _txn, updateItem, &result ); wunit.commit(); if ( !result.getStats().upsertedID.isEmpty() ) { *upsertedId = result.getStats().upsertedID; } // END CURRENT OP incWriteStats( updateItem, result.getStats(), result.getError(), currentOp.get() ); finishCurrentOp( _txn, _client, currentOp.get(), result.getError() ); if ( result.getError() ) { result.getError()->setIndex( updateItem.getItemIndex() ); *error = result.releaseError(); } }
void WriteBatchExecutor::execOneInsert(ExecInsertsState* state, WriteErrorDetail** error) { BatchItemRef currInsertItem(state->request, state->currIndex); scoped_ptr<CurOp> currentOp(beginCurrentOp(_client, currInsertItem)); incOpStats(currInsertItem); WriteOpResult result; insertOne(state, &result); if (state->hasLock()) { // Normally, unlocking records lock time stats on the active CurOp. However, // insertOne() may not release the lock. In that case, record time by hand. state->getLock().recordTime(); // If we deschedule here, there could be substantial unaccounted locked time. // Any time from here will be attributed to the next insert in the batch, or // not attributed to any operation if this is the last op in the batch. state->getLock().resetTime(); } incWriteStats(currInsertItem, result.getStats(), result.getError(), currentOp.get()); finishCurrentOp(_txn, _client, currentOp.get(), result.getError()); if (result.getError()) { *error = result.releaseError(); } }
void WriteBatchExecutor::execRemove( const BatchItemRef& removeItem, WriteErrorDetail** error ) { // Removes are similar to updates, but page faults are handled externally // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, removeItem ) ); incOpStats( removeItem ); WriteOpResult result; // NOTE: Deletes will not fault outside the lock once any data has been written PageFaultRetryableSection pageFaultSection; while ( true ) { try { multiRemove( removeItem, &result ); break; } catch (PageFaultException& pfe) { pfe.touch(); invariant(!result.getError()); continue; } fassertFailed(17429); } // END CURRENT OP incWriteStats( removeItem, result.getStats(), result.getError(), currentOp.get() ); finishCurrentOp( _client, currentOp.get(), result.getError() ); if ( result.getError() ) { result.getError()->setIndex( removeItem.getItemIndex() ); *error = result.releaseError(); } }
void WriteBatchExecutor::execUpdate( const BatchItemRef& updateItem, BSONObj* upsertedId, WriteErrorDetail** error ) { // Updates currently do a lot of the lock management internally const BatchedCommandRequest& request = *updateItem.getRequest(); const NamespaceString nss( updateItem.getRequest()->getNS() ); // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, updateItem ) ); incOpStats( updateItem ); WriteOpResult result; { /////////////////////////////////////////// Lock::DBWrite writeLock( nss.ns() ); /////////////////////////////////////////// // Check version once we're locked if ( checkShardVersion( &shardingState, request, &result.error ) ) { // Context once we're locked, to set more details in currentOp() // TODO: better constructor? Client::Context writeContext( nss.ns(), storageGlobalParams.dbpath, false /* don't check version */); multiUpdate( updateItem, &result ); incWriteStats( updateItem, result.stats, result.error, currentOp.get() ); if ( !result.stats.upsertedID.isEmpty() ) { *upsertedId = result.stats.upsertedID.getOwned(); } } } // END CURRENT OP finishCurrentOp( _client, currentOp.get(), result.error ); if ( result.error ) { result.error->setIndex( updateItem.getItemIndex() ); *error = result.releaseError(); } }
void WriteBatchExecutor::execRemove( const BatchItemRef& removeItem, WriteErrorDetail** error ) { // Removes are similar to updates, but page faults are handled externally // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, removeItem ) ); incOpStats( removeItem ); WriteOpResult result; multiRemove( _txn, removeItem, &result ); // END CURRENT OP incWriteStats( removeItem, result.getStats(), result.getError(), currentOp.get() ); finishCurrentOp( _txn, _client, currentOp.get(), result.getError() ); if ( result.getError() ) { result.getError()->setIndex( removeItem.getItemIndex() ); *error = result.releaseError(); } }
void WriteBatchExecutor::execRemove( const BatchItemRef& removeItem, WriteErrorDetail** error ) { // Removes are similar to updates, but page faults are handled externally // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, removeItem ) ); incOpStats( removeItem ); WriteOpResult result; while ( true ) { multiRemove( removeItem, &result ); if ( !result.fault ) { incWriteStats( removeItem, result.stats, result.error, currentOp.get() ); break; } // // Check page fault out of lock // dassert( result.fault ); result.fault->touch(); result.reset(); } // END CURRENT OP finishCurrentOp( _client, currentOp.get(), result.error ); if ( result.error ) { result.error->setIndex( removeItem.getItemIndex() ); *error = result.releaseError(); } }
void WriteBatchExecutor::execUpdate( const BatchItemRef& updateItem, BSONObj* upsertedId, WriteErrorDetail** error ) { // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, updateItem ) ); incOpStats( updateItem ); WriteOpResult result; multiUpdate( updateItem, &result ); incWriteStats( updateItem, result.stats, result.error, currentOp.get() ); if ( !result.stats.upsertedID.isEmpty() ) { *upsertedId = result.stats.upsertedID; } // END CURRENT OP finishCurrentOp( _client, currentOp.get(), result.error ); if ( result.error ) { result.error->setIndex( updateItem.getItemIndex() ); *error = result.releaseError(); } }
void WriteBatchExecutor::execInserts( const BatchedCommandRequest& request, std::vector<WriteErrorDetail*>* errors ) { // Bulk insert is a bit different from other bulk operations in that multiple request docs // can be processed at once inside the write lock. const NamespaceString nss( request.getTargetingNS() ); scoped_ptr<BatchItemRef> currInsertItem( new BatchItemRef( &request, 0 ) ); // Go through our request and do some preprocessing on insert documents outside the lock to // validate and put them in a normalized form - i.e. put _id in front and fill in // timestamps. The insert document may also be invalid. // TODO: Might be more efficient to do in batches. vector<StatusWith<BSONObj> > normalInserts; normalizeInserts( request, &normalInserts ); while ( currInsertItem->getItemIndex() < static_cast<int>( request.sizeWriteOps() ) ) { WriteOpResult currResult; // Don't (re-)acquire locks and create database until it's necessary if ( !normalInserts[currInsertItem->getItemIndex()].isOK() ) { currResult.error = toWriteError( normalInserts[currInsertItem->getItemIndex()].getStatus() ); } else { PageFaultRetryableSection pFaultSection; //////////////////////////////////// Lock::DBWrite writeLock( nss.ns() ); //////////////////////////////////// // Check version inside of write lock if ( checkIsMasterForCollection( nss, &currResult.error ) && checkShardVersion( &shardingState, request, &currResult.error ) && checkIndexConstraints( &shardingState, request, &currResult.error ) ) { // // Get the collection for the insert // scoped_ptr<Client::Context> writeContext; Collection* collection = NULL; try { // Context once we're locked, to set more details in currentOp() // TODO: better constructor? writeContext.reset( new Client::Context( request.getNS(), storageGlobalParams.dbpath, false /* don't check version */) ); Database* database = writeContext->db(); dassert( database ); collection = database->getCollection( nss.ns() ); if ( !collection ) { // Implicitly create if it doesn't exist collection = database->createCollection( nss.ns() ); if ( !collection ) { currResult.error = toWriteError( Status( ErrorCodes::InternalError, "could not create collection" ) ); } } } catch ( const DBException& ex ) { Status status(ex.toStatus()); if (ErrorCodes::isInterruption(status.code())) { throw; } currResult.error = toWriteError(status); } // // Perform writes inside write lock // while ( collection && currInsertItem->getItemIndex() < static_cast<int>( request.sizeWriteOps() ) ) { // // BEGIN CURRENT OP // scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, *currInsertItem ) ); incOpStats( *currInsertItem ); // Get the actual document we want to write, assuming it's valid const StatusWith<BSONObj>& normalInsert = // normalInserts[currInsertItem->getItemIndex()]; const BSONObj& normalInsertDoc = normalInsert.getValue().isEmpty() ? currInsertItem->getDocument() : normalInsert.getValue(); if ( !normalInsert.isOK() ) { // This insert failed on preprocessing currResult.error = toWriteError( normalInsert.getStatus() ); } else if ( !request.isInsertIndexRequest() ) { // Try the insert singleInsert( *currInsertItem, normalInsertDoc, collection, &currResult ); } else { // Try the create index singleCreateIndex( *currInsertItem, normalInsertDoc, collection, &currResult ); } // // END CURRENT OP // finishCurrentOp( _client, currentOp.get(), currResult.error ); // Faults release the write lock if ( currResult.fault ) break; // In general, we might have stats and errors incWriteStats( *currInsertItem, currResult.stats, currResult.error, currentOp.get() ); // Errors release the write lock if ( currResult.error ) break; // Increment in the write lock and reset the stats for next time currInsertItem.reset( new BatchItemRef( &request, currInsertItem->getItemIndex() + 1 ) ); currResult.reset(); // Destruct curop so that our parent curop is restored, so that we // record the yield count in the parent. currentOp.reset(NULL); // yield sometimes int micros = ClientCursor::suggestYieldMicros(); if (micros > 0) { ClientCursor::staticYield(micros, "", NULL); } } } } // END WRITE LOCK // // Store the current error if it exists // if ( currResult.error ) { errors->push_back( currResult.releaseError() ); errors->back()->setIndex( currInsertItem->getItemIndex() ); // Break early for ordered batches if ( request.getOrdered() ) break; } // // Fault or increment // if ( currResult.fault ) { // Check page fault out of lock currResult.fault->touch(); } else { // Increment if not a fault currInsertItem.reset( new BatchItemRef( &request, currInsertItem->getItemIndex() + 1 ) ); } } }
void WriteBatchExecutor::execRemove( const BatchItemRef& removeItem, WriteErrorDetail** error ) { // Removes are similar to updates, but page faults are handled externally const BatchedCommandRequest& request = *removeItem.getRequest(); const NamespaceString nss( removeItem.getRequest()->getNS() ); // BEGIN CURRENT OP scoped_ptr<CurOp> currentOp( beginCurrentOp( _client, removeItem ) ); incOpStats( removeItem ); WriteOpResult result; while ( true ) { { // NOTE: Deletes will not fault outside the lock once any data has been written PageFaultRetryableSection pFaultSection; /////////////////////////////////////////// Lock::DBWrite writeLock( nss.ns() ); /////////////////////////////////////////// // Check version once we're locked if ( !checkShardVersion( &shardingState, request, &result.error ) ) { // Version error break; } // Context once we're locked, to set more details in currentOp() // TODO: better constructor? Client::Context writeContext( nss.ns(), storageGlobalParams.dbpath, false /* don't check version */); multiRemove( removeItem, &result ); if ( !result.fault ) { incWriteStats( removeItem, result.stats, result.error, currentOp.get() ); break; } } // // Check page fault out of lock // dassert( result.fault ); result.fault->touch(); result.reset(); } // END CURRENT OP finishCurrentOp( _client, currentOp.get(), result.error ); if ( result.error ) { result.error->setIndex( removeItem.getItemIndex() ); *error = result.releaseError(); } }