void BatchSafeWriter::safeWriteBatch( DBClientBase* conn, const BatchedCommandRequest& request, BatchedCommandResponse* response ) { // N starts at zero, and we add to it for each item response->setN( 0 ); for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) { BatchItemRef itemRef( &request, static_cast<int>( i ) ); LastError lastError; _safeWriter->safeWrite( conn, itemRef, &lastError ); // Register the error if we need to BatchedErrorDetail* batchError = lastErrorToBatchError( lastError ); if ( batchError ) { batchError->setIndex( i ); response->addToErrDetails( batchError ); } response->setN( response->getN() + lastError.nObjects ); if ( !lastError.upsertedId.isEmpty() ) { BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail; upsertedId->setIndex( i ); upsertedId->setUpsertedID( lastError.upsertedId ); response->addToUpsertDetails( upsertedId ); } // Break on first error if we're ordered if ( request.getOrdered() && BatchSafeWriter::isFailedOp( lastError ) ) break; } if ( request.sizeWriteOps() == 1 && response->isErrDetailsSet() && !response->isErrCodeSet() ) { // Promote single error to batch error const BatchedErrorDetail* error = response->getErrDetailsAt( 0 ); response->setErrCode( error->getErrCode() ); if ( error->isErrInfoSet() ) response->setErrInfo( error->getErrInfo() ); response->setErrMessage( error->getErrMessage() ); response->unsetErrDetails(); } if ( request.sizeWriteOps() == 1 && response->isUpsertDetailsSet() ) { // Promote single upsert to batch upsert const BatchedUpsertDetail* upsertedId = response->getUpsertDetailsAt( 0 ); response->setSingleUpserted( upsertedId->getUpsertedID() ); response->unsetUpsertDetails(); } response->setOk( !response->isErrCodeSet() ); dassert( response->isValid( NULL ) ); }
void WriteBatchExecutor::bulkExecute( const BatchedCommandRequest& request, std::vector<BatchedUpsertDetail*>* upsertedIds, std::vector<WriteErrorDetail*>* errors ) { if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert ) { execInserts( request, errors ); } else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) { for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) { WriteErrorDetail* error = NULL; BSONObj upsertedId; execUpdate( BatchItemRef( &request, i ), &upsertedId, &error ); if ( !upsertedId.isEmpty() ) { BatchedUpsertDetail* batchUpsertedId = new BatchedUpsertDetail; batchUpsertedId->setIndex( i ); batchUpsertedId->setUpsertedID( upsertedId ); upsertedIds->push_back( batchUpsertedId ); } if ( error ) { errors->push_back( error ); if ( request.getOrdered() ) break; } } } else { dassert( request.getBatchType() == BatchedCommandRequest::BatchType_Delete ); for ( size_t i = 0; i < request.sizeWriteOps(); i++ ) { WriteErrorDetail* error = NULL; execRemove( BatchItemRef( &request, i ), &error ); if ( error ) { errors->push_back( error ); if ( request.getOrdered() ) break; } } } // Fill in stale version errors for unordered batches (update/delete can't do this on own) if ( !errors->empty() && !request.getOrdered() ) { const WriteErrorDetail* finalError = errors->back(); if ( finalError->getErrCode() == ErrorCodes::StaleShardVersion ) { for ( size_t i = finalError->getIndex() + 1; i < request.sizeWriteOps(); i++ ) { WriteErrorDetail* dupStaleError = new WriteErrorDetail; finalError->cloneTo( dupStaleError ); errors->push_back( dupStaleError ); } } } }
void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, const BatchedCommandResponse& response, TrackedErrors* trackedErrors) { if (!response.getOk()) { WriteErrorDetail error; cloneCommandErrorTo(response, &error); // Treat command errors exactly like other failures of the batch // Note that no errors will be tracked from these failures - as-designed noteBatchError(targetedBatch, error); return; } dassert(response.getOk()); // Stop tracking targeted batch _targeted.erase(&targetedBatch); // Increment stats for this batch incBatchStats(_clientRequest->getBatchType(), response, _stats.get()); // // Assign errors to particular items. // Write Concern errors are stored and handled later. // // Special handling for write concern errors, save for later if (response.isWriteConcernErrorSet()) { unique_ptr<ShardWCError> wcError( new ShardWCError(targetedBatch.getEndpoint(), *response.getWriteConcernError())); _wcErrors.mutableVector().push_back(wcError.release()); } vector<WriteErrorDetail*> itemErrors; // Handle batch and per-item errors if (response.isErrDetailsSet()) { // Per-item errors were set itemErrors.insert( itemErrors.begin(), response.getErrDetails().begin(), response.getErrDetails().end()); // Sort per-item errors by index std::sort(itemErrors.begin(), itemErrors.end(), WriteErrorDetailComp()); } // // Go through all pending responses of the op and sorted remote reponses, populate errors // This will either set all errors to the batch error or apply per-item errors as-needed // // If the batch is ordered, cancel all writes after the first error for retargeting. // bool ordered = _clientRequest->getOrdered(); vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin(); int index = 0; WriteErrorDetail* lastError = NULL; for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin(); it != targetedBatch.getWrites().end(); ++it, ++index) { const TargetedWrite* write = *it; WriteOp& writeOp = _writeOps[write->writeOpRef.first]; dassert(writeOp.getWriteState() == WriteOpState_Pending); // See if we have an error for the write WriteErrorDetail* writeError = NULL; if (itemErrorIt != itemErrors.end() && (*itemErrorIt)->getIndex() == index) { // We have an per-item error for this write op's index writeError = *itemErrorIt; ++itemErrorIt; } // Finish the response (with error, if needed) if (NULL == writeError) { if (!ordered || !lastError) { writeOp.noteWriteComplete(*write); } else { // We didn't actually apply this write - cancel so we can retarget dassert(writeOp.getNumTargeted() == 1u); writeOp.cancelWrites(lastError); } } else { writeOp.noteWriteError(*write, *writeError); lastError = writeError; } } // Track errors we care about, whether batch or individual errors if (NULL != trackedErrors) { trackErrors(targetedBatch.getEndpoint(), itemErrors, trackedErrors); } // Track upserted ids if we need to if (response.isUpsertDetailsSet()) { const vector<BatchedUpsertDetail*>& upsertedIds = response.getUpsertDetails(); for (vector<BatchedUpsertDetail*>::const_iterator it = upsertedIds.begin(); it != upsertedIds.end(); ++it) { // The child upserted details don't have the correct index for the full batch const BatchedUpsertDetail* childUpsertedId = *it; // Work backward from the child batch item index to the batch item index int childBatchIndex = childUpsertedId->getIndex(); int batchIndex = targetedBatch.getWrites()[childBatchIndex]->writeOpRef.first; // Push the upserted id with the correct index into the batch upserted ids BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail; upsertedId->setIndex(batchIndex); upsertedId->setUpsertedID(childUpsertedId->getUpsertedID()); _upsertedIds.mutableVector().push_back(upsertedId); } } }
void BatchSafeWriter::safeWriteBatch( DBClientBase* conn, const BatchedCommandRequest& request, BatchedCommandResponse* response ) { const NamespaceString nss( request.getNS() ); // N starts at zero, and we add to it for each item response->setN( 0 ); for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) { // Break on first error if we're ordered if ( request.getOrdered() && response->isErrDetailsSet() ) break; BatchItemRef itemRef( &request, static_cast<int>( i ) ); bool isLastItem = ( i == request.sizeWriteOps() - 1 ); BSONObj writeConcern; if ( isLastItem && request.isWriteConcernSet() ) { writeConcern = request.getWriteConcern(); // Pre-2.4.2 mongods react badly to 'w' being set on config servers if ( nss.db() == "config" ) writeConcern = fixWCForConfig( writeConcern ); } BSONObj gleResult; GLEErrors errors; Status status = _safeWriter->safeWrite( conn, itemRef, writeConcern, &gleResult ); if ( status.isOK() ) { status = extractGLEErrors( gleResult, &errors ); } if ( !status.isOK() ) { response->clear(); response->setOk( false ); response->setErrCode( status.code() ); response->setErrMessage( status.reason() ); return; } // // STATS HANDLING // GLEStats stats; extractGLEStats( gleResult, &stats ); // Special case for making legacy "n" field result for insert match the write // command result. if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert && !errors.writeError.get() ) { // n is always 0 for legacy inserts. dassert( stats.n == 0 ); stats.n = 1; } response->setN( response->getN() + stats.n ); if ( !stats.upsertedId.isEmpty() ) { BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail; upsertedId->setIndex( i ); upsertedId->setUpsertedID( stats.upsertedId ); response->addToUpsertDetails( upsertedId ); } response->setLastOp( stats.lastOp ); // // WRITE ERROR HANDLING // // If any error occurs (except stale config) the previous GLE was not enforced bool enforcedWC = !errors.writeError.get() || errors.writeError->getErrCode() == ErrorCodes::StaleShardVersion; // Save write error if ( errors.writeError.get() ) { errors.writeError->setIndex( i ); response->addToErrDetails( errors.writeError.release() ); } // // WRITE CONCERN ERROR HANDLING // // The last write is weird, since we enforce write concern and check the error through // the same GLE if possible. If the last GLE was an error, the write concern may not // have been enforced in that same GLE, so we need to send another after resetting the // error. if ( isLastItem ) { // Try to enforce the write concern if everything succeeded (unordered or ordered) // OR if something succeeded and we're unordered. bool needToEnforceWC = !response->isErrDetailsSet() || ( !request.getOrdered() && response->sizeErrDetails() < request.sizeWriteOps() ); if ( !enforcedWC && needToEnforceWC ) { dassert( !errors.writeError.get() ); // emptied above // Might have gotten a write concern validity error earlier, these are // enforced even if the wc isn't applied, so we ignore. errors.wcError.reset(); Status status = _safeWriter->enforceWriteConcern( conn, nss.db().toString(), writeConcern, &gleResult ); if ( status.isOK() ) { status = extractGLEErrors( gleResult, &errors ); } if ( !status.isOK() ) { response->clear(); response->setOk( false ); response->setErrCode( status.code() ); response->setErrMessage( status.reason() ); return; } } // END Write concern retry if ( errors.wcError.get() ) { response->setWriteConcernError( errors.wcError.release() ); } } } response->setOk( true ); dassert( response->isValid( NULL ) ); }
void BatchSafeWriter::safeWriteBatch( DBClientBase* conn, const BatchedCommandRequest& request, BatchedCommandResponse* response ) { const NamespaceString nss( request.getNS() ); // N starts at zero, and we add to it for each item response->setN( 0 ); // GLE path always sets nModified to -1 (sentinel) to indicate we should omit it later. response->setNModified(-1); for ( size_t i = 0; i < request.sizeWriteOps(); ++i ) { // Break on first error if we're ordered if ( request.getOrdered() && response->isErrDetailsSet() ) break; BatchItemRef itemRef( &request, static_cast<int>( i ) ); BSONObj gleResult; GLEErrors errors; Status status = _safeWriter->safeWrite( conn, itemRef, WriteConcernOptions::Acknowledged, &gleResult ); if ( status.isOK() ) { status = extractGLEErrors( gleResult, &errors ); } if ( !status.isOK() ) { response->clear(); response->setOk( false ); response->setErrCode( ErrorCodes::RemoteResultsUnavailable ); StringBuilder builder; builder << "could not get write error from safe write"; builder << causedBy( status.toString() ); response->setErrMessage( builder.str() ); return; } if ( errors.wcError.get() ) { response->setWriteConcernError( errors.wcError.release() ); } // // STATS HANDLING // GLEStats stats; extractGLEStats( gleResult, &stats ); // Special case for making legacy "n" field result for insert match the write // command result. if ( request.getBatchType() == BatchedCommandRequest::BatchType_Insert && !errors.writeError.get() ) { // n is always 0 for legacy inserts. dassert( stats.n == 0 ); stats.n = 1; } response->setN( response->getN() + stats.n ); if ( !stats.upsertedId.isEmpty() ) { BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail; upsertedId->setIndex( i ); upsertedId->setUpsertedID( stats.upsertedId ); response->addToUpsertDetails( upsertedId ); } response->setLastOp( stats.lastOp ); // Save write error if ( errors.writeError.get() ) { errors.writeError->setIndex( i ); response->addToErrDetails( errors.writeError.release() ); } } // // WRITE CONCERN ERROR HANDLING // // The last write is weird, since we enforce write concern and check the error through // the same GLE if possible. If the last GLE was an error, the write concern may not // have been enforced in that same GLE, so we need to send another after resetting the // error. BSONObj writeConcern; if ( request.isWriteConcernSet() ) { writeConcern = request.getWriteConcern(); // Pre-2.4.2 mongods react badly to 'w' being set on config servers if ( nss.db() == "config" ) writeConcern = fixWCForConfig( writeConcern ); } bool needToEnforceWC = WriteConcernOptions::Acknowledged.woCompare(writeConcern) != 0 && WriteConcernOptions::Unacknowledged.woCompare(writeConcern) != 0; if ( needToEnforceWC && ( !response->isErrDetailsSet() || ( !request.getOrdered() && // Not all errored. Note: implicit response->isErrDetailsSet(). response->sizeErrDetails() < request.sizeWriteOps() ))) { // Might have gotten a write concern validity error earlier, these are // enforced even if the wc isn't applied, so we ignore. response->unsetWriteConcernError(); const string dbName( nss.db().toString() ); Status status( Status::OK() ); if ( response->isErrDetailsSet() ) { const WriteErrorDetail* lastError = response->getErrDetails().back(); // If last write op was an error. if ( lastError->getIndex() == static_cast<int>( request.sizeWriteOps() - 1 )) { // Reset previous errors so we can apply the write concern no matter what // as long as it is valid. status = _safeWriter->clearErrors( conn, dbName ); } } BSONObj gleResult; if ( status.isOK() ) { status = _safeWriter->enforceWriteConcern( conn, dbName, writeConcern, &gleResult ); } GLEErrors errors; if ( status.isOK() ) { status = extractGLEErrors( gleResult, &errors ); } if ( !status.isOK() ) { auto_ptr<WCErrorDetail> wcError( new WCErrorDetail ); wcError->setErrCode( status.code() ); wcError->setErrMessage( status.reason() ); response->setWriteConcernError( wcError.release() ); } else if ( errors.wcError.get() ) { response->setWriteConcernError( errors.wcError.release() ); } } response->setOk( true ); dassert( response->isValid( NULL ) ); }