short ex_tcb::handleError(ex_queue_pair *qparent, ComDiagsArea *inDiagsArea) { if (qparent->up->isFull()) return 1; // Return EOF. ex_queue_entry * up_entry = qparent->up->getTailEntry(); ex_queue_entry * pentry_down = qparent->down->getHeadEntry(); up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_SQLERROR; ComDiagsArea *diagsArea = up_entry->getDiagsArea(); if (diagsArea == NULL) diagsArea = ComDiagsArea::allocate(this->getGlobals()->getDefaultHeap()); else diagsArea->incrRefCount (); // the setDiagsArea below will decr the ref count if (inDiagsArea) diagsArea->mergeAfter(*inDiagsArea); up_entry->setDiagsArea (diagsArea); // insert into parent qparent->up->insert(); return 0; }
void ExFastExtractTcb::updateWorkATPDiagsArea(ex_queue_entry * centry) { if (centry->getDiagsArea()) { if (workAtp_->getDiagsArea()) { workAtp_->getDiagsArea()->mergeAfter(*centry->getDiagsArea()); } else { ComDiagsArea * da = centry->getDiagsArea(); workAtp_->setDiagsArea(da); da->incrRefCount(); centry->setDiagsArea(0); } } }
ComDiagsArea *ExExeUtilLongRunningTcb::getDiagAreaFromUpQueueTail() { ex_queue_entry * up_entry = qparent_.up->getTailEntry(); ComDiagsArea *diagsArea = up_entry->getDiagsArea(); if (diagsArea == NULL) diagsArea = ComDiagsArea::allocate(this->getGlobals()->getDefaultHeap()); else diagsArea->incrRefCount (); // setDiagsArea call below will decr ref count // this is the side-effect of this function. Merge in this object's // diagsarea. if (getDiagsArea()) diagsArea->mergeAfter(*getDiagsArea()); up_entry->setDiagsArea (diagsArea); return diagsArea; }
void ExSequenceTcb::updateDiagsArea(ex_queue_entry * centry) { if (centry->getDiagsArea()) { if (workAtp_->getDiagsArea()) { // LCOV_EXCL_START workAtp_->getDiagsArea()->mergeAfter(*centry->getDiagsArea()); // LCOV_EXCL_STOP } else { ComDiagsArea * da = centry->getDiagsArea(); workAtp_->setDiagsArea(da); da->incrRefCount(); centry->setDiagsArea(0); } } }
ExProbeCacheTcb::MoveStatus ExProbeCacheTcb::moveReplyToCache(ex_queue_entry &reply, ExPCE &pcEntry) { if (moveInnerExpr()) { ex_assert(!pcEntry.innerRowTupp_.isAllocated(), "reusing an allocated innerRowTupp"); if (pool_->getFreeTuple(pcEntry.innerRowTupp_)) return MOVE_BLOCKED; workAtp_->getTupp(probeCacheTdb().innerRowDataIdx_) = pcEntry.innerRowTupp_; // Evaluate the move expression on the reply. ex_expr::exp_return_type innerMoveRtn = moveInnerExpr()->eval(reply.getAtp(),workAtp_); if (innerMoveRtn == ex_expr::EXPR_ERROR) return MOVE_ERROR; } else { ex_assert(pcEntry.innerRowTupp_.isAllocated() == FALSE, "Incorrectly initialized inneRowTupp_"); } // Initialize ExPCE members pcEntry.upstateStatus_ = ex_queue::Q_OK_MMORE; ComDiagsArea *da = reply.getAtp()->getDiagsArea(); if (da) { pcEntry.diagsArea_ = da; da->incrRefCount(); } return MOVE_OK; }
short ex_tcb::handleDone(ex_queue_pair *qparent, ComDiagsArea *inDiagsArea) { if (qparent->up->isFull()) return 1; // Return EOF. ex_queue_entry * up_entry = qparent->up->getTailEntry(); ex_queue_entry * pentry_down = qparent->down->getHeadEntry(); if (inDiagsArea && inDiagsArea->getNumber(DgSqlCode::WARNING_) > 0) { ComDiagsArea *diagsArea = up_entry->getDiagsArea(); if (diagsArea == NULL) diagsArea = ComDiagsArea::allocate(this->getGlobals()->getDefaultHeap()); else diagsArea->incrRefCount (); // the setDiagsArea below will decr the ref count if (inDiagsArea) diagsArea->mergeAfter(*inDiagsArea); up_entry->setDiagsArea (diagsArea); } up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_NO_DATA; // insert into parent qparent->up->insert(); // pstate.matches_ = 0; qparent->down->removeHead(); return 0; }
////////////////////////////////////////////////////// // work() for ExExeUtilLongRunningTcb ////////////////////////////////////////////////////// short ExExeUtilLongRunningTcb::work() { short rc = 0; Lng32 cliRC = 0; Int64 rowsDeleted = 0; Int64 transactions = 0; // if no parent request, return if (qparent_.down->isEmpty()) return WORK_OK; // if no room in up queue, won't be able to return data/status. // Come back later. if (qparent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry_down = qparent_.down->getHeadEntry(); ExExeUtilPrivateState & pstate = *((ExExeUtilPrivateState*) pentry_down->pstate); // Get the globals stucture of the ESP if this is an ESP ExExeStmtGlobals *exeGlob = getGlobals()->castToExExeStmtGlobals(); ExEspStmtGlobals *espGlob = exeGlob->castToExEspStmtGlobals(); Int32 espNum = 1; // this is an ESP? if (espGlob != NULL) { espNum = (Int32) espGlob->getMyInstanceNumber(); } while (1) { switch (step_) { case INITIAL_: { step_ = LONG_RUNNING_; } break; case LONG_RUNNING_: { rc = doLongRunning(); if ((rc < 0) || (rc == 100)) { finalizeDoLongRunning(); if (rc <0) step_ = ERROR_; else // rc == 100 - done with all the transactions. step_ = DONE_; } // continue in LONG_RUNNING_ state if (rc >= 0) - success and warning. } break; case DONE_: { if (qparent_.up->isFull()) return WORK_OK; // Return EOF. ex_queue_entry * up_entry = qparent_.up->getTailEntry(); up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_NO_DATA; // before sending the Q_NO_DATA, send the rowcount as well thro' // the diagsArea. getDiagsArea()->setRowCount(getRowsDeleted()); ComDiagsArea *diagsArea = getDiagAreaFromUpQueueTail(); if (lrTdb().longRunningQueryPlan()) { (*diagsArea) << DgSqlCode(8450) << DgString0((char*)exeUtilTdb().getTableName()) << DgInt0(espNum) << DgInt1((Lng32)getTransactionCount()); } // insert into parent qparent_.up->insert(); //pstate.matches_ = 0; // reset the parameters. step_ = INITIAL_; transactions_ = 0; rowsDeleted_ = 0; initial_ = 1; // clear diags if any if (getDiagsArea()) { getDiagsArea()->clear(); } qparent_.down->removeHead(); return WORK_OK; } break; case ERROR_: { if (qparent_.up->isFull()) return WORK_OK; // Return EOF. ex_queue_entry * up_entry = qparent_.up->getTailEntry(); up_entry->upState.parentIndex = pentry_down->downState.parentIndex; up_entry->upState.setMatchNo(0); up_entry->upState.status = ex_queue::Q_SQLERROR; // get rows deleted so far. getDiagsArea()->setRowCount(getRowsDeleted()); ComDiagsArea *diagsArea = up_entry->getDiagsArea(); if (diagsArea == NULL) diagsArea = ComDiagsArea::allocate(this->getGlobals()->getDefaultHeap()); else diagsArea->incrRefCount (); // setDiagsArea call below will decr ref count if (getDiagsArea()) diagsArea->mergeAfter(*getDiagsArea()); up_entry->setDiagsArea (diagsArea); // insert into parent qparent_.up->insert(); // clear diags if any, since we already sent the information // up and don't want to send it again as part of DONE_ if (getDiagsArea()) { rowsDeleted_ = 0; getDiagsArea()->clear(); } step_ = DONE_; } break; } // switch } // while }
// work - doit... // // short ExSequenceTcb::work() { // If there are no parent requests on the queue, then there cannot // be anything to do here. // if (qparent_.down->isEmpty()) return WORK_OK; ex_queue_entry * pentry_down; ExSequencePrivateState * pstate; ex_queue::down_request request; // Take any new parent requests and pass them on to the child as long // as the child's queue is not full. processedInputs_ maintains the // Queue index of the last request that was passed on. // for(queue_index tail = qparent_.down->getTailIndex(); (processedInputs_ != tail) && (!qchild_.down->isFull()); processedInputs_++ ) { pentry_down = qparent_.down->getQueueEntry(processedInputs_); pstate = (ExSequencePrivateState*) pentry_down->pstate; request = pentry_down->downState.request; // If the request has already been cancelled don't pass it to the // child. Instead, just mark the request as done. This will trigger // a EOD reply when this request gets worked on. // if (request == ex_queue::GET_NOMORE) { // LCOV_EXCL_START pstate->step_ = ExSeq_DONE; // LCOV_EXCL_STOP } else { pstate->step_ = ExSeq_WORKING_READ; // Pass the request to the child // ex_queue_entry * centry = qchild_.down->getTailEntry(); centry->downState.request = ex_queue::GET_ALL; centry->downState.requestValue = 11; centry->downState.parentIndex = processedInputs_; centry->passAtp(pentry_down); qchild_.down->insert(); } } // end for processedInputs_ pentry_down = qparent_.down->getHeadEntry(); pstate = (ExSequencePrivateState*) pentry_down->pstate; request = pentry_down->downState.request; // Take any child replies and process them. Return the processed // rows as long the parent queue has room. // while (1) { // If we have satisfied the parent request (or it was cancelled), // then stop processing rows, cancel any outstanding child // requests, and set this request to the CANCELLED state. // if ((pstate->step_ == ExSeq_WORKING_READ) || (pstate->step_ == ExSeq_WORKING_RETURN)) { if ((request == ex_queue::GET_NOMORE) || ((request == ex_queue::GET_N) && (pentry_down->downState.requestValue <= (Lng32)pstate->matchCount_))) { qchild_.down->cancelRequestWithParentIndex (qparent_.down->getHeadIndex()); pstate->step_ = ExSeq_CANCELLED; } } switch (pstate->step_) { // ExSeq_CANCELLED // // Transition to this state from ... // 1. ExSeq_Error - After the error has been processed. // 2. ExSeq_Working - If enough rows have been returned. // 3. ExSeq_Working - If the request was cancelled. // // Remain in this state until .. // 1. All rows from the child including EOD are consumed // // Transition from this state to ... // 1. ExSeq_DONE - In all cases. // case ExSeq_CANCELLED: { // There are no extra rows to process from the child yet, // so try again later. // if (qchild_.up->isEmpty()) { return WORK_OK; } ex_queue_entry * centry = qchild_.up->getHeadEntry(); ex_queue::up_status child_status = centry->upState.status; // If this is the EOD, transition to the ExSeq_DONE state. // if (child_status == ex_queue::Q_NO_DATA) pstate->step_ = ExSeq_DONE; // Discard the child row. qchild_.up->removeHead(); break; } // ExSeq_ERROR // // Transition to this state from ... // 1. ExSeq_WORKING_READ - a child reply with the type SQLERROR. // 2. ExSeq_WORKING_RETURN // 3. ExSeq_OVERFLOW_READ // 4. ExSeq_OVERFLOW_WRITE // Remain in this state until .. // 1. The error row has been returned to the parent. // // Transition from this state to ... // 1. ExSeq_CANCELLED - In all cases. // case ExSeq_ERROR: { // If there is no room in the parent queue for the reply, // try again later. // if (qparent_.up->isFull()) // LCOV_EXCL_START return WORK_OK; // LCOV_EXCL_STOP ex_queue_entry *pentry_up = qparent_.up->getTailEntry(); // Cancel the child request - there must be a child request in // progress to get to the ExSeq_ERROR state. // qchild_.down->cancelRequestWithParentIndex (qparent_.down->getHeadIndex()); // Construct and return the error row. // if (workAtp_->getDiagsArea()) { ComDiagsArea * da = workAtp_->getDiagsArea(); pentry_up->setDiagsArea(da); da->incrRefCount(); workAtp_->setDiagsArea(0); } pentry_up->upState.status = ex_queue::Q_SQLERROR; pentry_up->upState.parentIndex = pentry_down->downState.parentIndex; pentry_up->upState.downIndex = qparent_.down->getHeadIndex(); pentry_up->upState.setMatchNo(pstate->matchCount_); qparent_.up->insert(); // Transition to the ExSeq_CANCELLED state. // pstate->step_ = ExSeq_CANCELLED; break; } // ExSeq_WORKING_READ // // Transition to this state from ... // 1. ExSeq_EMPTY - If a request is started. // 2. ExSeq_WORKING_RETURN - // 3. ExSeq_OVERFLOW_WRITE - // Remain in this state until ... // 1. All child replies including EOD have been processed. // 2. A SQLERROR row is received. // 3. Enough rows have been returned. // 4. The request is cancelled. // 5. End of partition is reached // Transition from this state to ... // 2. ExSeq_ERROR - If an SQLERROR rows is received. // 3. ExSeq_CANCELLED - If the request is cancelled. // 4. ExSeq_WORKING_RETURN // 5. ExSeq_OVERFLOW_WRITE - case ExSeq_WORKING_READ: { if(!isUnboundedFollowing() && isHistoryFull()) { pstate->step_ = ExSeq_WORKING_RETURN; break; } // If there are no replies, try again later. // if (qchild_.up->isEmpty()) return WORK_OK; ex_queue_entry * centry = qchild_.up->getHeadEntry(); switch (centry->upState.status) { // A data row from the child. // case ex_queue::Q_OK_MMORE: { tupp_descriptor histTupp; workAtp_->copyAtp(pentry_down->getAtp()); workAtp_->getTupp(myTdb().tuppIndex_) = &histTupp; if ( checkPartitionChangeExpr() && currentHistRowPtr_) { workAtp_->getTupp (myTdb().tuppIndex_).setDataPointer(currentHistRowPtr_); // Check whether the partition changed ex_expr::exp_return_type retCode = checkPartitionChangeExpr()->eval(workAtp_, centry->getAtp()); if (retCode == ex_expr::EXPR_ERROR) { // LCOV_EXCL_START updateDiagsArea(centry); pstate->step_ = ExSeq_ERROR; break; // LCOV_EXCL_STOP } if ( retCode == ex_expr::EXPR_FALSE) { setPartitionEnd(TRUE); pstate->step_ = ExSeq_END_OF_PARTITION; break; } } if (isUnboundedFollowing() ) { if (OLAPBuffersFlushed_) { OLAPBuffersFlushed_ = FALSE;// current row is the first one in first buffer already } else { NABoolean noMemory = advanceHistoryRow( TRUE /* checkMemoryPressure */); if (noMemory) { pstate->step_ = ExSeq_OVERFLOW_WRITE; cluster_->nextBufferToFlush_ = firstOLAPBuffer_; cluster_->afterLastBufferToFlush_ = NULL;//flush them all // If it is the first overflow, for this partition if ( ! memoryPressureDetected_ ) { memoryPressureDetected_ = TRUE; } // memory pressure detected break; } } } else { advanceHistoryRow(); } workAtp_->getTupp (myTdb().tuppIndex_).setDataPointer(currentHistRowPtr_); ex_expr::exp_return_type retCode = ex_expr::EXPR_OK; // Apply the read phase sequence function expression to compute // the values of the sequence functions. if (sequenceExpr()) { retCode = sequenceExpr()->eval(workAtp_, centry->getAtp()); if (retCode == ex_expr::EXPR_ERROR) { updateDiagsArea(centry); pstate->step_ = ExSeq_ERROR; break; } } // merge the child's diags area into the work atp updateDiagsArea(centry); qchild_.up->removeHead(); break; } // The EOD from the child. Transition to ExSeq_DONE. // case ex_queue::Q_NO_DATA: { setPartitionEnd(TRUE); if (isHistoryEmpty()) { pstate->step_ = ExSeq_DONE; qchild_.up->removeHead(); } else { pstate->step_ = ExSeq_END_OF_PARTITION; } } break; // An SQLERROR from the child. Transition to ExSeq_ERROR. // case ex_queue::Q_SQLERROR: updateDiagsArea(centry); pstate->step_ = ExSeq_ERROR; break; } } break; // ExSeq_WORKING_RETURN // // Transition to this state from ... // 1. ExSeq_WORKING_READ - // 2. ExSeq_OVERFLOW_READ - // 3. ExSeq_END_OF_PARTITION - // Remain in this state until ... // 1. All rows are returned. // 2. A SQLERROR row is received. // 3. Enough rows have been returned. // // Transition from this state to ... // 1. ExSeq_DONE - If all the child rows including EOD have // been processed. // 2. ExSeq_ERROR - If an SQLERROR rows is received. // 3. ExSeq_CANCELLED - If enough rows have been returned. // 4. ExSeq_CANCELLED - If the request is cancelled. // 5. ExSeq_WORKING_RETURN // 6. ExSeq_DONE // 7. ExSeq_OVERFLOW_READ case ExSeq_WORKING_RETURN: { // If there is not room in the parent Queue for the reply, // try again later. // if (qparent_.up->isFull()) return WORK_OK; if(isHistoryEmpty()) { ex_queue_entry * centry = NULL; if(!qchild_.up->isEmpty()) { centry = qchild_.up->getHeadEntry(); } if(centry && (centry->upState.status == ex_queue::Q_NO_DATA)) { pstate->step_ = ExSeq_DONE; qchild_.up->removeHead(); } else { pstate->step_ = ExSeq_WORKING_READ; if (getPartitionEnd()) { initializeHistory(); } } break; } if(!canReturnRows() && !getPartitionEnd() && !isUnboundedFollowing() && !isOverflowStarted()) // redundant? because not unbounded ... { pstate->step_ = ExSeq_WORKING_READ; break; } ex_queue_entry * pentry_up = qparent_.up->getTailEntry(); pentry_up->copyAtp(pentry_down); // Try to allocate a tupp. // if (pool_->get_free_tuple(pentry_up->getTupp(myTdb().tuppIndex_), recLen())) // LCOV_EXCL_START return WORK_POOL_BLOCKED; // LCOV_EXCL_STOP char *tuppData = pentry_up->getTupp (myTdb().tuppIndex_).getDataPointer(); advanceReturnHistoryRow(); char *histData = currentRetHistRowPtr_; pentry_up->getTupp (myTdb().tuppIndex_).setDataPointer(histData); ex_expr::exp_return_type retCode = ex_expr::EXPR_OK; // Apply the return phase expression if(returnExpr()) { retCode = returnExpr()->eval(pentry_up->getAtp(),workAtp_); if (retCode == ex_expr::EXPR_ERROR) { // LCOV_EXCL_START pstate->step_ = ExSeq_ERROR; break; // LCOV_EXCL_STOP } } retCode = ex_expr::EXPR_OK; //Apply post predicate expression if (postPred()) { retCode = postPred()->eval(pentry_up->getAtp(),pentry_up->getAtp()); if (retCode == ex_expr::EXPR_ERROR) { // LCOV_EXCL_START pstate->step_ = ExSeq_ERROR; break; // LCOV_EXCL_STOP } } //pentry_up->getAtp()->display("return eval result", myTdb().getCriDescUp()); // // Case-10-030724-7963: we are done pointing the tupp at the // history buffer, so point it back to the SQL buffer. // pentry_up->getTupp (myTdb().tuppIndex_).setDataPointer(tuppData); switch(retCode) { case ex_expr::EXPR_OK: case ex_expr::EXPR_TRUE: case ex_expr::EXPR_NULL: // Copy the row that was computed in the history buffer, // to the space previously allocated in the SQL buffer. str_cpy_all(tuppData, histData, recLen()); // Return the processed row. // // Finalize the queue entry, then insert it // pentry_up->upState.status = ex_queue::Q_OK_MMORE; pentry_up->upState.parentIndex = pentry_down->downState.parentIndex; pentry_up->upState.downIndex = qparent_.down->getHeadIndex(); pstate->matchCount_++; pentry_up->upState.setMatchNo(pstate->matchCount_); qparent_.up->insert(); break; // If the selection predicate returns FALSE, // do not return the child row. // case ex_expr::EXPR_FALSE: break; // If the selection predicate returns an ERROR, // go to the error processing state. // case ex_expr::EXPR_ERROR: // LCOV_EXCL_START pstate->step_ = ExSeq_ERROR; // LCOV_EXCL_STOP break; } // MV -- // Now, if there are no errors so far, evaluate the // cancel expression if ((pstate->step_ != ExSeq_ERROR) && cancelExpr()) { // Temporarily point the tupp to the tail of the // history buffer for evaluating the // expressions. // pentry_up->getTupp (myTdb().tuppIndex_).setDataPointer(histData); retCode = cancelExpr()->eval(pentry_up->getAtp(),pentry_up->getAtp()); // We are done pointing the tupp at the history // buffer, so point it back to the SQL buffer. // pentry_up->getTupp (myTdb().tuppIndex_).setDataPointer(tuppData); if (retCode == ex_expr::EXPR_TRUE) { qchild_.down->cancelRequestWithParentIndex (qparent_.down->getHeadIndex()); pstate->step_ = ExSeq_CANCELLED; } } updateHistRowsToReturn(); if ( isOverflowStarted() ) { numberOfRowsReturnedBeforeReadOF_ ++; if (numberOfRowsReturnedBeforeReadOF_ == maxNumberOfRowsReturnedBeforeReadOF_) { firstOLAPBufferFromOF_ = currentRetOLAPBuffer_->getNext(); if (firstOLAPBufferFromOF_ == NULL) { firstOLAPBufferFromOF_ = firstOLAPBuffer_; } for( Int32 i = 0; i < numberOfWinOLAPBuffers_; i++) { firstOLAPBufferFromOF_ = firstOLAPBufferFromOF_->getNext(); if (firstOLAPBufferFromOF_ == NULL) { firstOLAPBufferFromOF_ = firstOLAPBuffer_; } } numberOfOLAPBuffersFromOF_ = numberOfOLAPBuffers_ - numberOfWinOLAPBuffers_; cluster_->nextBufferToRead_ = firstOLAPBufferFromOF_; HashBuffer * afterLast = firstOLAPBufferFromOF_; // last buffer to read into is the current buffer - maybe ? for ( Lng32 bufcount = numberOfOLAPBuffersFromOF_ ; bufcount ; bufcount-- ) { afterLast = afterLast->getNext() ; // Don't cycle back if bufcount == 1 because the logic in // Cluster::read relies on the NULL ptr to stop reading if ( bufcount > 1 && ! afterLast ) afterLast = firstOLAPBuffer_; // cycle back } // The last buffer to read to is always the current buffer // ex_assert ( afterLast == currentRetOLAPBuffer_->getNext(), // "Miscalculated the last buffer to read into"); cluster_->afterLastBufferToRead_ = afterLast; pstate->step_ = ExSeq_OVERFLOW_READ; } } } break; // ExSeq_END_OF_PARTITION // // Transition to this state from ... // 1. ExSeq_WORKING_READ - // Transition from this state to ... // 1. ExSeq_OVERFLOW_WRITE // 2. ExSeq_WORKING_RETURN case ExSeq_END_OF_PARTITION: { setPartitionEnd(TRUE); if (lastRow_ && isUnboundedFollowing()) { ex_assert(currentHistRowPtr_ != NULL, "ExSequenceTcb::work() - currentHistRowPtr_ is a NULL pointer"); str_cpy_all(lastRow_, currentHistRowPtr_, recLen()); } if ( isOverflowStarted() ) // we are overflowing { cluster_->nextBufferToFlush_ = firstOLAPBuffer_; // do not flush beyond the current buffer cluster_->afterLastBufferToFlush_ = currentOLAPBuffer_->getNext(); pstate->step_ = ExSeq_OVERFLOW_WRITE; } else { pstate->step_ = ExSeq_WORKING_RETURN; } } break; // ExSeq_OVERFLOW_WRITE // // Transition to this state from ... // 1. ExSeq_WORKING_READ - // 2. ExSeq_END_OF_PARTITION - // Remain in this state until ... // 1. OLAPbuffers are written to oveflow space. // 2. An error occurs // // Transition from this state to ... // 1. ExSeq_OVERFLOW_READ // 2. ExSeq_ERROR - If an error occurs case ExSeq_OVERFLOW_WRITE: { if (!overflowEnabled_) { // LCOV_EXCL_START // used for debugging when CmpCommon::getDefault(EXE_BMO_DISABLE_OVERFLOW)is set to off ; updateDiagsArea(EXE_OLAP_OVERFLOW_NOT_SUPPORTED); pstate->step_ = ExSeq_ERROR; break; // LCOV_EXCL_STOP } ex_assert(isUnboundedFollowing(),""); if ( ! cluster_->flush(&rc_) ) { // flush the buffers // LCOV_EXCL_START // if no errors this code path is not visited if ( rc_ ) { // some error updateDiagsArea( rc_); pstate->step_ = ExSeq_ERROR; break; } // LCOV_EXCL_STOP // not all the buffers are completely flushed. An I/O is pending // LCOV_EXCL_START // maybe we cane remove in the future return WORK_OK; // LCOV_EXCL_STOP } // At this point -- all the buffers were completely flushed OLAPBuffersFlushed_ = TRUE; if (getPartitionEnd()) { firstOLAPBufferFromOF_ = firstOLAPBuffer_; numberOfOLAPBuffersFromOF_ = numberOfOLAPBuffers_; cluster_->nextBufferToRead_ = firstOLAPBufferFromOF_; // First time we read and fill all the buffers cluster_->afterLastBufferToRead_ = NULL; pstate->step_ = ExSeq_OVERFLOW_READ; } else { pstate->step_ = ExSeq_WORKING_READ; } } break; // ExSeq_OVERFLOW_READ // // Transition to this state from ... // 1. ExSeq_OVERFLOW_WRITE // 2. ExSeq_WORKING_RETURN // Remain in this state until ... // 1. OLAPbuffers are read from oveflow space. // 2. An error occurs // // Transition from this state to ... // 1. ExSeq_WORKING_RETURN // 2. ExSeq_ERROR - If an error occurs case ExSeq_OVERFLOW_READ: { assert(firstOLAPBufferFromOF_ && isUnboundedFollowing() ); if ( ! cluster_->read(&rc_) ) { // LCOV_EXCL_START if ( rc_ ) { // some error updateDiagsArea( rc_); pstate->step_ = ExSeq_ERROR; break; } // LCOV_EXCL_STOP // not all the buffers are completely read. An I/O is pending // LCOV_EXCL_START return WORK_OK; // LCOV_EXCL_STOP } numberOfRowsReturnedBeforeReadOF_ = 0; pstate->step_ = ExSeq_WORKING_RETURN; } break; // ExSeq_DONE // // Transition to the state from ... // 1. ExSeq_WORKING_RETURN - if all child rows have been processed. // 2. ExSeq_CANCELLED - if all child rows have been consumed. // 3. ExSeq_EMPTY - if the request was DOA. // // Remain in this state until ... // 1. The EOD is returned to the parent. // // Transition from this state to ... // 1. ExSeq_EMPTY - In all cases. // case ExSeq_DONE: { // If there is not any room in the parent's queue, // try again later. // if (qparent_.up->isFull()) // LCOV_EXCL_START return WORK_OK; // LCOV_EXCL_STOP ex_queue_entry * pentry_up = qparent_.up->getTailEntry(); pentry_up->upState.status = ex_queue::Q_NO_DATA; pentry_up->upState.parentIndex = pentry_down->downState.parentIndex; pentry_up->upState.downIndex = qparent_.down->getHeadIndex(); pentry_up->upState.setMatchNo(pstate->matchCount_); qparent_.down->removeHead(); qparent_.up->insert(); // Re-initialize pstate // pstate->step_ = ExSeq_EMPTY; pstate->matchCount_ = 0; workAtp_->release(); // Initialize the history buffer in preparation for the // next request. // initializeHistory(); // If there are no more requests, simply return. // if (qparent_.down->isEmpty()) return WORK_OK; // LCOV_EXCL_START // If we haven't given to our child the new head // index return and ask to be called again. // if (qparent_.down->getHeadIndex() == processedInputs_) return WORK_CALL_AGAIN; // Position at the new head of the request queue. // pentry_down = qparent_.down->getHeadEntry(); pstate = (ExSequencePrivateState*) pentry_down->pstate; request = pentry_down->downState.request; // LCOV_EXCL_STOP } break; } // switch pstate->step_ } // while }
short ExTupleFlowTcb::work() { // This is some sort of a hack to fix the problems with the number of rows // inserted returned to the user for packed tables. For these tables, rows // are packed (by the Pack node which is the left child of this tuple flow) // before they are sent off to DP2. DP2 has no idea that it's actually // inserting multiple logical rows (as perceived by the user). However, // there is actually a hidden count of logical rows stored as the first 4 // bytes of the packed row. This counter is supposed to keep track of a sum // of this number in each packed row it gets from the left. When all // insertions are done, this sum is used to override what's stored by the // PA node in the executor global area the number of rows inserted. This is // not a very good place to have this fix, but since this is a low-priority // problem at this time, here we are. // // // NB: The code introduced for this fix // could be safely removed if desired. Also, all changes are within // this file. // if (qParent_.down->isEmpty()) return WORK_OK; ex_queue_entry * pentry_down = qParent_.down->getHeadEntry(); ExTupleFlowPrivateState & pstate = *((ExTupleFlowPrivateState*) pentry_down->pstate); if ((tflowTdb().userSidetreeInsert()) && (pentry_down->downState.request == ex_queue::GET_EOD) && (NOT pstate.parentEOD_)) { pstate.step_ = MOVE_EOD_TO_TGT_; } else if ((pstate.step_ != DONE_) && (pstate.step_ != CANCELLED_) && (pentry_down->downState.request == ex_queue::GET_NOMORE)) { if (pstate.step_ == EMPTY_) pstate.step_ = DONE_; else pstate.step_ = CANCELLED_; } while (1) { switch (pstate.step_) { case EMPTY_: { if (qSrc_.down->isFull()) return WORK_OK; ex_queue_entry * src_entry = qSrc_.down->getTailEntry(); src_entry->downState.request = pentry_down->downState.request; src_entry->downState.requestValue = pentry_down->downState.requestValue; if ((tflowTdb().firstNRows() >= 0) && (pentry_down->downState.request != ex_queue::GET_N)) { src_entry->downState.request = ex_queue::GET_N; src_entry->downState.requestValue = tflowTdb().firstNRows(); } src_entry->downState.parentIndex = qParent_.down->getHeadIndex(); src_entry->passAtp(pentry_down); qSrc_.down->insert(); // just checking to make sure we got a diags area from the CLI if we are // executing a non-tomic insert. This is done now so that we don't have to do it in multiple // places later. if (tflowTdb().isNonFatalErrorTolerated()) { ComDiagsArea *cliDiagsArea = pentry_down->getDiagsArea(); ex_assert(cliDiagsArea, "In Tupleflow : Non-Atomic insert received no diags area from the CLI"); } pstate.parentEOD_ = FALSE; pstate.srcEOD_ = FALSE; pstate.matchCount_ = 0; pstate.tgtRequests_ = 0; pstate.tgtRowsSent_ = FALSE; pstate.noOfUnPackedRows_ = 0; pstate.srcRequestCount_ = -1; pstate.nonFatalErrorSeen_ = FALSE; // Set startRightIndex_ so that CancelReques doesn't do anything. pstate.startRightIndex_ = pstate.srcRequestCount_; pstate.step_ = MOVE_SRC_TO_TGT_; } break; case MOVE_SRC_TO_TGT_: { // if there are some rows in source up queue, move them to target. while ((! qSrc_.up->isEmpty()) && (! qTgt_.down->isFull()) && (pstate.step_ != HANDLE_ERROR_)) { ex_queue_entry * src_entry = qSrc_.up->getHeadEntry(); ex_queue_entry * tgt_entry = qTgt_.down->getTailEntry(); switch (src_entry->upState.status) { case ex_queue::Q_OK_MMORE: { // move this source row to target. // LCOV_EXCL_START // BEGIN: - Read note at beginning of work(). // if (tcbSrc_->getNodeType() == ComTdb::ex_PACKROWS) { char* packTuppPtr = src_entry->getTupp(src_entry->numTuples()-1) .getDataPointer(); Int32 noOfRows = *((Int32 *)packTuppPtr); pstate.noOfUnPackedRows_ += (noOfRows - 1); } // // END:- Read note at beginning of work(). // LCOV_EXCL_STOP pstate.srcRequestCount_++; tgt_entry->downState.request = pentry_down->downState.request; tgt_entry->downState.requestValue = pentry_down->downState.requestValue; tgt_entry->downState.parentIndex = (Lng32) pstate.srcRequestCount_; tgt_entry->copyAtp(src_entry); qTgt_.down->insert(); pstate.tgtRequests_++; pstate.tgtRowsSent_ = TRUE; qSrc_.up->removeHead(); } break; case ex_queue::Q_NO_DATA: { if ((tflowTdb().vsbbInsertOn()) && (pstate.tgtRowsSent_ == TRUE)) { if (tflowTdb().userSidetreeInsert()) { tgt_entry->downState.request = ex_queue::GET_EOD_NO_ST_COMMIT; } else { tgt_entry->downState.request = ex_queue::GET_EOD; } tgt_entry->downState.requestValue = pentry_down->downState.requestValue; tgt_entry->downState.parentIndex = (Lng32) pstate.srcRequestCount_; tgt_entry->copyAtp(src_entry); qTgt_.down->insert(); pstate.tgtRequests_++; } // LCOV_EXCL_START if ((pstate.tgtRowsSent_ == FALSE) && (src_entry->getDiagsArea())) { // a warning is returned with EOD and // nothing else was returned from source. // Move warning to parent's up queue. if (qParent_.up->isFull()) return WORK_OK; ex_queue_entry * up_entry = qParent_.up->getTailEntry(); up_entry->setDiagsArea(src_entry->getDiagsArea()); } // LCOV_EXCL_STOP qSrc_.up->removeHead(); pstate.srcEOD_ = TRUE; // LCOV_EXCL_START if (tflowTdb().sendEODtoTgt()) pstate.step_ = MOVE_EOD_TO_TGT_; // LCOV_EXCL_STOP } break; case ex_queue::Q_SQLERROR: { if (qParent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry = qParent_.up->getTailEntry(); ComDiagsArea * da = src_entry->getDiagsArea(); ex_assert(da, "We have a Q_SQLERROR in Tupleflow but no diags area"); if (tflowTdb().isNonFatalErrorTolerated() && (da->getNextRowNumber(ComCondition::NONFATAL_ERROR) == ComCondition::NONFATAL_ERROR)) { pstate.nonFatalErrorSeen_ = TRUE; } else { pstate.step_ = HANDLE_ERROR_; pstate.nonFatalErrorSeen_ = FALSE; } pstate.srcRequestCount_++; if(tflowTdb().isRowsetIterator()) da->setAllRowNumber((Lng32) pstate.srcRequestCount_); ComDiagsArea *accumulatedDiagsArea = pentry->getDiagsArea(); if (accumulatedDiagsArea) { accumulatedDiagsArea->mergeAfter(*da); if (!(accumulatedDiagsArea->canAcceptMoreErrors()) && tflowTdb().isNonFatalErrorTolerated()) { pstate.nonFatalErrorSeen_ = FALSE; pstate.step_ = HANDLE_ERROR_; } } else { pentry->setDiagsArea(da); da->incrRefCount(); accumulatedDiagsArea = da ; if (tflowTdb().isNonFatalErrorTolerated()) { ComDiagsArea *cliDiagsArea = pentry_down->getDiagsArea(); da->setLengthLimit(cliDiagsArea->getLengthLimit()); } } // For Non-Fatal errors we will remove this Q_SQLERROR reply from the // left child right below as we will continue to stay in this state (MOVE_SRC_TO_TGT_). // For fatal errors this Q_SQLERROR reply is removed in HANDLE_ERROR step to which // we will transition immediately. if (pstate.nonFatalErrorSeen_ == TRUE) qSrc_.up->removeHead(); } break; case ex_queue::Q_REC_SKIPPED: { pstate.srcRequestCount_++; ComDiagsArea * da = src_entry->getDiagsArea(); if (da) pstate.nonFatalErrorSeen_ = TRUE; qSrc_.up->removeHead(); } break; default: { ex_assert(0, "ExTupleFlowTcb::work() Error returned from src"); // LCOV_EXCL_LINE } break; } // switch } // while // if the child reply is not an Q_SQLERROR, then process target if ((pstate.step_ != HANDLE_ERROR_) && (pstate.step_ != MOVE_EOD_TO_TGT_)) pstate.step_ = PROCESS_TGT_; } // MOVE_SRC_TO_TGT break; case MOVE_EOD_TO_TGT_: { pstate.parentEOD_ = TRUE; if (qTgt_.down->isFull()) return WORK_OK; ex_queue_entry * tgt_entry = qTgt_.down->getTailEntry(); tgt_entry->downState.request = ex_queue::GET_EOD; tgt_entry->downState.requestValue = pentry_down->downState.requestValue; tgt_entry->downState.parentIndex = qParent_.down->getHeadIndex(); //tgt_entry->passAtp(pentry_down); qTgt_.down->insert(); pstate.tgtRequests_++; // LCOV_EXCL_START if (tflowTdb().sendEODtoTgt()) pstate.srcEOD_ = TRUE; // LCOV_EXCL_STOP else pstate.srcEOD_ = FALSE; pstate.step_ = PROCESS_TGT_; } break; case PROCESS_TGT_: { while (! qTgt_.up->isEmpty() && pstate.step_ != HANDLE_ERROR_) { ex_queue_entry * tgt_entry = qTgt_.up->getHeadEntry(); switch (tgt_entry->upState.status) { case ex_queue::Q_OK_MMORE: { if (!tflowTdb().isNonFatalErrorTolerated()) { // ex_assert(0, "ExTupleFlowTcb::work() OK_MMORE from tgt"); if (qParent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry = qParent_.up->getTailEntry(); pentry->upState.status = ex_queue::Q_OK_MMORE; pentry->upState.downIndex = qParent_.down->getHeadIndex(); pentry->upState.parentIndex = pentry_down->downState.parentIndex; pentry->upState.setMatchNo(pstate.matchCount_); // copy input tupps from parent request pentry->copyAtp(pentry_down); // copy child's atp to // the output atp (to parent's up queue) pentry->copyAtp(tgt_entry); // insert into parent up queue qParent_.up->insert(); } else { ComDiagsArea * da = tgt_entry->getDiagsArea(); ex_assert(da, "We have a Q_OK_MMORE in Tupleflow but no diags area"); if (da->mainSQLCODE() != 0) { // Non-atomic Rowsets sends OK_MMORE with non-empty diags from child // empty diags (mainsqlcode == 0) implies OK_MMORE sent by ignoreDupKey code // when NAR is on, for -8102 error. Just consume the OK_MMORE. if(tflowTdb().isRowsetIterator()) { da->setAllRowNumber(Lng32 (tgt_entry->upState.parentIndex)); } pstate.nonFatalErrorSeen_ = TRUE; ex_queue_entry * pentry = qParent_.up->getTailEntry(); ComDiagsArea *accumulatedDiagsArea = pentry->getDiagsArea(); if (accumulatedDiagsArea) { accumulatedDiagsArea->mergeAfter(*da); if (!(accumulatedDiagsArea->canAcceptMoreErrors()) && tflowTdb().isNonFatalErrorTolerated()) { pstate.nonFatalErrorSeen_ = FALSE; pstate.step_ = HANDLE_ERROR_; } } else { pentry->setDiagsArea(da); da->incrRefCount(); if (tflowTdb().isNonFatalErrorTolerated()) { ComDiagsArea *cliDiagsArea = pentry_down->getDiagsArea(); da->setLengthLimit(cliDiagsArea->getLengthLimit()); } } } } qTgt_.up->removeHead(); } break; case ex_queue::Q_NO_DATA: { ComDiagsArea * da = tgt_entry->getDiagsArea(); if (da) { ex_queue_entry * pentry = qParent_.up->getTailEntry(); ComDiagsArea *accumulatedDiagsArea = pentry->getDiagsArea(); if (accumulatedDiagsArea) accumulatedDiagsArea->mergeAfter(*da); else { pentry->setDiagsArea(da); da->incrRefCount(); if (tflowTdb().isNonFatalErrorTolerated()) { ComDiagsArea *cliDiagsArea = pentry_down->getDiagsArea(); da->setLengthLimit(cliDiagsArea->getLengthLimit()); } } } pstate.matchCount_ += tgt_entry->upState.getMatchNo(); qTgt_.up->removeHead(); pstate.tgtRequests_--; pstate.startRightIndex_++; } break; case ex_queue::Q_SQLERROR: { if (qParent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry = qParent_.up->getTailEntry(); pentry->copyAtp(tgt_entry); pstate.nonFatalErrorSeen_ = FALSE; pstate.step_ = HANDLE_ERROR_; if(tflowTdb().isRowsetIterator()) { ex_queue_entry * pentry = qParent_.up->getTailEntry(); ComDiagsArea *da = pentry->getDiagsArea(); ex_assert(da, "To set RowNumber, an error condition must be present in the diags area"); da->setAllRowNumber(Lng32 (tgt_entry->upState.parentIndex)); } } break; default: { ex_assert(0, "ExTupleFlowTcb::work() Error returned from tgt"); // LCOV_EXCL_LINE } break; } // switch } // while if (pstate.step_ == HANDLE_ERROR_) break; // if source has returned EOD, // and there are no pending requests in target's down // queue, then we are done with this parent request. if (((pstate.srcEOD_ == TRUE) || (pstate.parentEOD_ == TRUE)) && (qTgt_.down->isEmpty())) pstate.step_ = DONE_; else { if (NOT pstate.parentEOD_) pstate.step_ = MOVE_SRC_TO_TGT_; if (qSrc_.up->isEmpty() || qTgt_.down->isFull()) return WORK_OK; else return WORK_CALL_AGAIN; } } break; case HANDLE_ERROR_: { ex_queue_entry * pentry = qParent_.up->getTailEntry(); pentry->upState.status = ex_queue::Q_SQLERROR; pentry->upState.downIndex = qParent_.down->getHeadIndex(); pentry->upState.parentIndex = pentry_down->downState.parentIndex; pentry->upState.setMatchNo(pstate.matchCount_); ComDiagsArea *da = pentry->getDiagsArea(); if (tflowTdb().isNonFatalErrorTolerated() && !(da->canAcceptMoreErrors())) { ComDiagsArea *cliDiagsArea = pentry_down->getDiagsArea(); da->removeLastErrorCondition(); *da << DgSqlCode(-EXE_NONATOMIC_FAILURE_LIMIT_EXCEEDED) << DgInt0(cliDiagsArea->getLengthLimit()); } // insert into parent up queue qParent_.up->insert(); pstate.step_ = CANCELLED_; } break; case CANCELLED_: { qSrc_.down->cancelRequestWithParentIndex(qParent_.down->getHeadIndex()); // Cancel all the outstanding requests that have been sent to the target. // Cancel all requests within given range (inclusive) qTgt_.down->cancelRequestWithParentIndexRange((queue_index)pstate.startRightIndex_+1, (queue_index)pstate.srcRequestCount_); pstate.startRightIndex_ = pstate.srcRequestCount_; //ignore all rows from source child, till Q_NO_DATA is reached while ((pstate.srcEOD_ != TRUE) && (!qSrc_.up->isEmpty())) { ex_queue_entry * src_entry = qSrc_.up->getHeadEntry(); switch(src_entry->upState.status) { case ex_queue::Q_OK_MMORE: case ex_queue::Q_SQLERROR: case ex_queue::Q_REC_SKIPPED: { qSrc_.up->removeHead(); } break; case ex_queue::Q_NO_DATA: { pstate.srcEOD_ = TRUE; qSrc_.up->removeHead(); } break; default: { ex_assert(0, "ExTupleFlowTcb::work() Error returned from src"); // LCOV_EXCL_LINE } break; } } //ignore all rows from target child, till Q_NO_DATA is reached while (pstate.tgtRequests_ && !qTgt_.up->isEmpty()) { ex_queue_entry * tgt_entry = qTgt_.up->getHeadEntry(); switch(tgt_entry->upState.status) { case ex_queue::Q_OK_MMORE: case ex_queue::Q_SQLERROR: { qTgt_.up->removeHead(); } break; case ex_queue::Q_NO_DATA: { qTgt_.up->removeHead(); pstate.tgtRequests_--; } break; default: { ex_assert(0, "ExTupleFlowTcb::work() Error returned from tgt"); // LCOV_EXCL_LINE } break; } } // if both source and target returned all the rows, // insert Q_SQLERROR into the parent up queue if ((pstate.srcEOD_ == TRUE) && !pstate.tgtRequests_) { pstate.step_ = DONE_; } else return WORK_OK; } break; case DONE_: { if (qParent_.up->isFull()) return WORK_OK; ex_queue_entry * pentry = qParent_.up->getTailEntry(); if (pstate.nonFatalErrorSeen_) { ComDiagsArea *da = pentry->getDiagsArea(); ComDiagsArea *cliDiagsArea = pentry_down->getDiagsArea(); ex_assert((da || cliDiagsArea), "We have non-fatal errors in Tupleflow but no diags area"); if (cliDiagsArea) { if (da) da->mergeAfter(*cliDiagsArea); else { pentry->setDiagsArea(cliDiagsArea); cliDiagsArea->incrRefCount(); } } if (cliDiagsArea->canAcceptMoreErrors()) { ComDiagsArea *mergedDiagsArea = pentry->getDiagsArea(); // used to make mainSQLCODE() return 30022 or 30035. mergedDiagsArea->setNonFatalErrorSeen(TRUE); NABoolean anyRowsAffected = FALSE; // This tupleflow should be in the master for // non-atomic rowsets. ExMasterStmtGlobals *g = getGlobals()-> castToExExeStmtGlobals()->castToExMasterStmtGlobals(); ex_assert(g, "Rowset insert has a flow node that is not in the master executor"); if (g->getRowsAffected() > 0) anyRowsAffected = TRUE; if (anyRowsAffected) *mergedDiagsArea << DgSqlCode(EXE_NONFATAL_ERROR_SEEN); else *mergedDiagsArea << DgSqlCode(EXE_NONFATAL_ERROR_ON_ALL_ROWS); } // we exceeded the Nonfatal error limit when merging with the CLI diags area else { pstate.step_ = HANDLE_ERROR_; // will prevent us from merging the diags areas again pstate.nonFatalErrorSeen_ = FALSE ; break ; } } pentry->upState.status = ex_queue::Q_NO_DATA; pentry->upState.downIndex = qParent_.down->getHeadIndex(); pentry->upState.parentIndex = pentry_down->downState.parentIndex; pentry->upState.setMatchNo(pstate.matchCount_); // LCOV_EXCL_START // BEGIN: Read note at beginning of work(). // if(pstate.noOfUnPackedRows_ != 0) { ComDiagsArea *da = pentry->getDiagsArea(); if (da == NULL) { da = ComDiagsArea::allocate(getGlobals()->getDefaultHeap()); pentry->setDiagsArea(da); } da->addRowCount(pstate.noOfUnPackedRows_); pstate.noOfUnPackedRows_ = 0; } // // END: - Read note at beginning of work(). // LCOV_EXCL_STOP // if stats are to be collected, collect them. if (getStatsEntry()) { // nothing yet returned from right child or returned // to parent. getStatsEntry()->setActualRowsReturned(0); } // insert into parent up queue qParent_.up->insert(); pstate.step_ = EMPTY_; qParent_.down->removeHead(); return WORK_CALL_AGAIN; // check for more down requests } break; } // switch pstate.step_ } // while #pragma nowarn(203) // warning elimination return 0; #pragma warn(203) // warning elimination }
ExWorkProcRetcode ExProbeCacheTcb::workUp() { ExProbeCacheStats *stats = getProbeCacheStats(); // Work on requests from the head of parent down, until // either we reach nextRequest_ (which work down hasn't seen yet) // or until there is not room in the up queue. Note that // there are "return" statements coded in this loop. while ((qparent_.down->getHeadIndex() != nextRequest_) && !qparent_.up->isFull()) { ex_queue_entry *pentry_down = qparent_.down->getHeadEntry(); ExProbeCachePrivateState & pstate = *((ExProbeCachePrivateState *) pentry_down->pstate); switch (pstate.step_) { case CACHE_MISS: { if (qchild_.up->isEmpty()) return WORK_OK; ex_queue_entry *reply = qchild_.up->getHeadEntry(); switch( reply->upState.status ) { case ex_queue::Q_OK_MMORE: { MoveStatus moveRetCode = moveReplyToCache(*reply, *pstate.pcEntry_); if (moveRetCode == MOVE_BLOCKED) { return WORK_POOL_BLOCKED; } else if (moveRetCode == MOVE_OK) { pstate.matchCount_ = 1; makeReplyToParentUp(pentry_down, pstate, ex_queue::Q_OK_MMORE); // Cancel here, b/c semi-join and anti-semi-join // will return more than one Q_OK_MMORE. Tbd - // perhaps the tdb should pass a flag for this. qchild_.down->cancelRequestWithParentIndex( pstate.pcEntry_->probeQueueIndex_); break; } else { ex_assert(moveRetCode == MOVE_ERROR, "bad retcode from moveReplyToCache"); // Don't break from this Q_OK_MMORE case, but // instead flow down as if Q_SQLERROR. The // diagsArea should have been init'd in the // moveInnerExpr()->eval. } } case ex_queue::Q_SQLERROR: { // Initialize ExPCE members pstate.pcEntry_->upstateStatus_ = ex_queue::Q_SQLERROR; ComDiagsArea *da = reply->getAtp()->getDiagsArea(); ex_assert(da, "Q_SQLERROR without a diags area"); pstate.pcEntry_->diagsArea_ = da; da->incrRefCount(); makeReplyToParentUp(pentry_down, pstate, ex_queue::Q_SQLERROR); // No need to cancel, since we expect no more than // one reply from our child. break; } case ex_queue::Q_NO_DATA: { // Initialize ExPCE members pstate.pcEntry_->upstateStatus_ = ex_queue::Q_NO_DATA; ComDiagsArea *da = reply->getAtp()->getDiagsArea(); if (da) { pstate.pcEntry_->diagsArea_ = da; da->incrRefCount(); } break; // A Q_NO_DATA will be inserted into the parent up queue // in the DONE step_. } default: { ex_assert(0, "Unknown upstate.status in child up queue"); break; } } if (stats) stats->incMiss(); pstate.pcEntry_->release(); //Request no longer references PCE. pstate.step_ = DONE_MISS; break; } case CACHE_HIT: { switch(pstate.pcEntry_->upstateStatus_) { case ex_queue::Q_OK_MMORE: { pstate.matchCount_ = 1; makeReplyToParentUp(pentry_down, pstate, pstate.pcEntry_->upstateStatus_); break; } case ex_queue::Q_SQLERROR: { makeReplyToParentUp(pentry_down, pstate, pstate.pcEntry_->upstateStatus_); // No need to cancel, since we expect no more than // one reply from our child. break; } case ex_queue::Q_NO_DATA: { // The DONE step will handle this. break; } case ex_queue::Q_INVALID: { // Should not happen. ex_assert(0, "CACHE_HIT saw Q_INVALID"); } default: { // Should not happen. ex_assert(0, "CACHE_HIT saw unknown upstateStatus"); } } if (stats) stats->incHit(); pstate.step_ = DONE; pstate.pcEntry_->release(); //Request no longer references PCE. break; } case CANCELED_MISS: { if (qchild_.up->isEmpty()) return WORK_OK; if (pstate.pcEntry_->refCnt_ != 0) { // There are other requests that are interested in this // reply, so put it into the Probe Cache, according to // its upState.status. However, do not reply with Q_OK_MMORE // or Q_SQLERROR to this request. ex_queue_entry *reply = qchild_.up->getHeadEntry(); switch( reply->upState.status ) { case ex_queue::Q_OK_MMORE: { MoveStatus moveRetCode2 = moveReplyToCache(*reply, *pstate.pcEntry_); if (moveRetCode2 == MOVE_BLOCKED) { return WORK_POOL_BLOCKED; } else if (moveRetCode2 == MOVE_OK) { // Now that we have the reply, we can propagate // the cancel. We do this b/c (anti-)semi-join // will return more than one Q_OK_MMORE. Tbd - // perhaps the tdb should pass a flag for this. qchild_.down->cancelRequestWithParentIndex( pstate.pcEntry_->probeQueueIndex_); break; } else { ex_assert(moveRetCode2 == MOVE_ERROR, "bad retcode from moveReplyToCache"); // Don't break from this Q_OK_MMORE case, but // instead flow down as if Q_SQLERROR. The // diagsArea should have been init'd in the // moveInnerExpr()->eval. } } case ex_queue::Q_SQLERROR: { // Initialize ExPCE members pstate.pcEntry_->upstateStatus_ = ex_queue::Q_SQLERROR; ComDiagsArea *da = reply->getAtp()->getDiagsArea(); ex_assert(da, "Q_SQLERROR without a diags area"); pstate.pcEntry_->diagsArea_ = da; da->incrRefCount(); break; } case ex_queue::Q_NO_DATA: { // Initialize ExPCE members pstate.pcEntry_->upstateStatus_ = ex_queue::Q_NO_DATA; ComDiagsArea *da = reply->getAtp()->getDiagsArea(); if (da) { pstate.pcEntry_->diagsArea_ = da; da->incrRefCount(); } break; } default: { ex_assert(0, "Unknown upstate.status in child up queue"); break; } } } else { // There are no uncanceled requests interested // in this PCE. } if (stats) stats->incCanceledMiss(); pstate.step_ = DONE_MISS; break; } case CANCELED_HIT: { if (stats) stats->incCanceledHit(); pstate.step_ = DONE; break; } case CANCELED_NOT_STARTED: { if (stats) stats->incCanceledNotStarted(); pstate.step_ = DONE; break; } case DONE_MISS: { // In this step we discard the original 1st reply to the // CACHE_MISS or CANCELED_MISS, as well as any other replies. // We can get multiple replies for a semi-join or // anti-semi-join. // We also discard the Q_NO_DATA. NABoolean finishedDoneMiss = FALSE; while (!finishedDoneMiss) { if (qchild_.up->isEmpty()) return WORK_OK; ex_queue_entry *reply2 = qchild_.up->getHeadEntry(); if (reply2->upState.status == ex_queue::Q_NO_DATA) finishedDoneMiss = TRUE; qchild_.up->removeHead(); } pstate.step_ = DONE; break; } case DONE: { makeReplyToParentUp(pentry_down, pstate, ex_queue::Q_NO_DATA); pstate.init(); qparent_.down->removeHead(); break; } case NOT_STARTED: default: { ex_assert(0, "workUp saw unexpected pstate.step_"); break; } } } return WORK_OK; }