bool batchErrorToLastError(const BatchedCommandRequest& request, const BatchedCommandResponse& response, LastError* error) { unique_ptr<WriteErrorDetail> commandError; WriteErrorDetail* lastBatchError = NULL; if (!response.getOk()) { // Command-level error, all writes failed commandError.reset(new WriteErrorDetail); buildErrorFromResponse(response, commandError.get()); lastBatchError = commandError.get(); } else if (response.isErrDetailsSet()) { // The last error in the batch is always reported - this matches expected COE // semantics for insert batches. For updates and deletes, error is only reported // if the error was on the last item. const bool lastOpErrored = response.getErrDetails().back()->getIndex() == static_cast<int>(request.sizeWriteOps() - 1); if (request.getBatchType() == BatchedCommandRequest::BatchType_Insert || lastOpErrored) { lastBatchError = response.getErrDetails().back(); } } else { // We don't care about write concern errors, these happen in legacy mode in GLE. } // Record an error if one exists if (lastBatchError) { string errMsg = lastBatchError->getErrMessage(); error->setLastError(lastBatchError->getErrCode(), errMsg.empty() ? "see code for details" : errMsg.c_str()); return true; } // Record write stats otherwise // NOTE: For multi-write batches, our semantics change a little because we don't have // un-aggregated "n" stats. if (request.getBatchType() == BatchedCommandRequest::BatchType_Update) { BSONObj upsertedId; if (response.isUpsertDetailsSet()) { // Only report the very last item's upserted id if applicable if (response.getUpsertDetails().back()->getIndex() + 1 == static_cast<int>(request.sizeWriteOps())) { upsertedId = response.getUpsertDetails().back()->getUpsertedID(); } } int numUpserted = 0; if (response.isUpsertDetailsSet()) numUpserted = response.sizeUpsertDetails(); int numMatched = response.getN() - numUpserted; dassert(numMatched >= 0); // Wrap upserted id in "upserted" field BSONObj leUpsertedId; if (!upsertedId.isEmpty()) leUpsertedId = upsertedId.firstElement().wrap(kUpsertedFieldName); error->recordUpdate(numMatched > 0, response.getN(), leUpsertedId); } else if (request.getBatchType() == BatchedCommandRequest::BatchType_Delete) { error->recordDelete(response.getN()); } return false; }
void BatchWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch, const BatchedCommandResponse& response, TrackedErrors* trackedErrors) { if (!response.getOk()) { WriteErrorDetail error; cloneCommandErrorTo(response, &error); // Treat command errors exactly like other failures of the batch // Note that no errors will be tracked from these failures - as-designed noteBatchError(targetedBatch, error); return; } dassert(response.getOk()); // Stop tracking targeted batch _targeted.erase(&targetedBatch); // Increment stats for this batch incBatchStats(_clientRequest->getBatchType(), response, _stats.get()); // // Assign errors to particular items. // Write Concern errors are stored and handled later. // // Special handling for write concern errors, save for later if (response.isWriteConcernErrorSet()) { unique_ptr<ShardWCError> wcError( new ShardWCError(targetedBatch.getEndpoint(), *response.getWriteConcernError())); _wcErrors.mutableVector().push_back(wcError.release()); } vector<WriteErrorDetail*> itemErrors; // Handle batch and per-item errors if (response.isErrDetailsSet()) { // Per-item errors were set itemErrors.insert( itemErrors.begin(), response.getErrDetails().begin(), response.getErrDetails().end()); // Sort per-item errors by index std::sort(itemErrors.begin(), itemErrors.end(), WriteErrorDetailComp()); } // // Go through all pending responses of the op and sorted remote reponses, populate errors // This will either set all errors to the batch error or apply per-item errors as-needed // // If the batch is ordered, cancel all writes after the first error for retargeting. // bool ordered = _clientRequest->getOrdered(); vector<WriteErrorDetail*>::iterator itemErrorIt = itemErrors.begin(); int index = 0; WriteErrorDetail* lastError = NULL; for (vector<TargetedWrite*>::const_iterator it = targetedBatch.getWrites().begin(); it != targetedBatch.getWrites().end(); ++it, ++index) { const TargetedWrite* write = *it; WriteOp& writeOp = _writeOps[write->writeOpRef.first]; dassert(writeOp.getWriteState() == WriteOpState_Pending); // See if we have an error for the write WriteErrorDetail* writeError = NULL; if (itemErrorIt != itemErrors.end() && (*itemErrorIt)->getIndex() == index) { // We have an per-item error for this write op's index writeError = *itemErrorIt; ++itemErrorIt; } // Finish the response (with error, if needed) if (NULL == writeError) { if (!ordered || !lastError) { writeOp.noteWriteComplete(*write); } else { // We didn't actually apply this write - cancel so we can retarget dassert(writeOp.getNumTargeted() == 1u); writeOp.cancelWrites(lastError); } } else { writeOp.noteWriteError(*write, *writeError); lastError = writeError; } } // Track errors we care about, whether batch or individual errors if (NULL != trackedErrors) { trackErrors(targetedBatch.getEndpoint(), itemErrors, trackedErrors); } // Track upserted ids if we need to if (response.isUpsertDetailsSet()) { const vector<BatchedUpsertDetail*>& upsertedIds = response.getUpsertDetails(); for (vector<BatchedUpsertDetail*>::const_iterator it = upsertedIds.begin(); it != upsertedIds.end(); ++it) { // The child upserted details don't have the correct index for the full batch const BatchedUpsertDetail* childUpsertedId = *it; // Work backward from the child batch item index to the batch item index int childBatchIndex = childUpsertedId->getIndex(); int batchIndex = targetedBatch.getWrites()[childBatchIndex]->writeOpRef.first; // Push the upserted id with the correct index into the batch upserted ids BatchedUpsertDetail* upsertedId = new BatchedUpsertDetail; upsertedId->setIndex(batchIndex); upsertedId->setUpsertedID(childUpsertedId->getUpsertedID()); _upsertedIds.mutableVector().push_back(upsertedId); } } }
void batchErrorToLastError( const BatchedCommandRequest& request, const BatchedCommandResponse& response, LastError* error ) { scoped_ptr<BatchedErrorDetail> topLevelError; BatchedErrorDetail* lastBatchError = NULL; if ( !response.getOk() ) { int code = response.getErrCode(); // Check for batch error // We don't care about write concern errors, these happen in legacy mode in GLE if ( code != ErrorCodes::WriteConcernFailed && !response.isErrDetailsSet() ) { // Top-level error, all writes failed topLevelError.reset( new BatchedErrorDetail ); buildErrorFromResponse( response, topLevelError.get() ); lastBatchError = topLevelError.get(); } else if ( response.isErrDetailsSet() ) { // The last error in the batch is always reported - this matches expected COE // semantics for insert batches and works for single writes lastBatchError = response.getErrDetails().back(); } } // Record an error if one exists if ( lastBatchError ) { error->raiseError( lastBatchError->getErrCode(), lastBatchError->getErrMessage().c_str() ); return; } // Record write stats otherwise // NOTE: For multi-write batches, our semantics change a little because we don't have // un-aggregated "n" stats. if ( request.getBatchType() == BatchedCommandRequest::BatchType_Update ) { BSONObj upsertedId; if ( response.isSingleUpsertedSet() ) upsertedId = response.getSingleUpserted(); else if( response.isUpsertDetailsSet() ) { // Only report the very last item's upserted id if applicable if ( response.getUpsertDetails().back()->getIndex() + 1 == static_cast<int>( request.sizeWriteOps() ) ) { upsertedId = response.getUpsertDetails().back()->getUpsertedID(); } } int numUpserted = 0; if ( response.isSingleUpsertedSet() ) ++numUpserted; else if ( response.isUpsertDetailsSet() ) numUpserted += response.sizeUpsertDetails(); int numUpdated = response.getN() - numUpserted; dassert( numUpdated >= 0 ); error->recordUpdate( numUpdated > 0, response.getN(), upsertedId ); } else if ( request.getBatchType() == BatchedCommandRequest::BatchType_Delete ) { error->recordDelete( response.getN() ); } }