void Dbtup::lcpFlushLogLab(Signal* signal, CheckpointInfoPtr ciPtr) { DiskBufferSegmentInfoPtr oldDbsiPtr; LocalLogInfoPtr lliPtr; UndoPagePtr oldUndoPagePtr; UndoPagePtr newUndoPagePtr; lliPtr.i = ciPtr.p->lcpLocalLogInfoP; ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo); oldDbsiPtr.i = lliPtr.p->lliUndoBufferSegmentP; ptrCheckGuard(oldDbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo); oldDbsiPtr.p->pdxNumDataPages++; if (clblPageCounter > 0) { ljam(); clblPageCounter--; }//if oldUndoPagePtr.i = lliPtr.p->lliUndoPage; ptrCheckGuard(oldUndoPagePtr, cnoOfUndoPage, undoPage); lcpWriteUndoSegment(signal, lliPtr.p, true); oldDbsiPtr.p->pdxOperation = CHECKPOINT_UNDO_WRITE_FLUSH; oldDbsiPtr.p->pdxCheckpointInfoP = ciPtr.i; /* ---------------------------------------------------------------- */ /* SINCE LAST PAGE SENT TO DISK WAS NOT FULL YET WE COPY IT */ /* TO THE NEW LAST PAGE. */ /* ---------------------------------------------------------------- */ newUndoPagePtr.i = lliPtr.p->lliUndoPage; ptrCheckGuard(newUndoPagePtr, cnoOfUndoPage, undoPage); ndbrequire(lliPtr.p->lliUndoWord < ZWORDS_ON_PAGE); MEMCOPY_NO_WORDS(&newUndoPagePtr.p->undoPageWord[0], &oldUndoPagePtr.p->undoPageWord[0], lliPtr.p->lliUndoWord); }//Dbtup::lcpFlushLogLab()
void Dbtup::tuxFreeNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node) { jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); Local_key key; key.m_page_no = pageId; key.m_page_idx = pageOffset; PagePtr pagePtr; Tuple_header* ptr = (Tuple_header*)get_ptr(&pagePtr, &key, tablePtr.p); Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); Uint32 attrDataOffset= AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr); ndbrequire(node == (Uint32*)ptr + attrDataOffset); free_fix_rec(fragPtr.p, tablePtr.p, &key, (Fix_page*)pagePtr.p); }
int Dbtup::tuxAllocNode(Signal* signal, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node) { jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); terrorCode= 0; Local_key key; Uint32* ptr, frag_page_id; if ((ptr= alloc_fix_rec(fragPtr.p, tablePtr.p, &key, &frag_page_id)) == 0) { jam(); terrorCode = ZMEM_NOMEM_ERROR; // caller sets error return terrorCode; } pageId= key.m_page_no; pageOffset= key.m_page_idx; Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); Uint32 attrDataOffset= AttributeOffset::getOffset( tableDescriptor[attrDescIndex + 1].tabDescr); node= ptr + attrDataOffset; return 0; }
int Dbtup::tuxAllocNode(EmulatedJamBuffer * jamBuf, Uint32 fragPtrI, Uint32& pageId, Uint32& pageOffset, Uint32*& node) { thrjamEntry(jamBuf); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); Local_key key; Uint32* ptr, frag_page_id, err; if ((ptr= alloc_fix_rec(jamBuf, &err,fragPtr.p,tablePtr.p, &key, &frag_page_id)) == 0) { thrjam(jamBuf); return err; } pageId= key.m_page_no; pageOffset= key.m_page_idx; Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); Uint32 attrDataOffset= AttributeOffset::getOffset( tableDescriptor[attrDescIndex + 1].tabDescr); node= ptr + attrDataOffset; return 0; }
int Dbtup::mt_scan_next(Uint32 tableId, Uint32 fragPtrI, Local_key* pos, bool moveNext) { TablerecPtr tablePtr; tablePtr.i = tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); FragrecordPtr fragPtr; fragPtr.i = fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size; if (moveNext) { pos->m_page_idx += tupheadsize; } PagePtr pagePtr; c_page_pool.getPtr(pagePtr, pos->m_page_no); while (1) { Tuple_header* tuple_ptr; while (pos->m_page_idx + tupheadsize <= Fix_page::DATA_WORDS) { tuple_ptr = (Tuple_header*)(pagePtr.p->m_data + pos->m_page_idx); // skip over free tuple if (tuple_ptr->m_header_bits & Tuple_header::FREE) { pos->m_page_idx += tupheadsize; continue; } pos->m_file_no = tuple_ptr->get_tuple_version(); return 0; // Found } // End of page...move to next Uint32 fragPageId = pagePtr.p->frag_page_id + 1; while (fragPageId < fragPtr.p->m_max_page_cnt) { Uint32 realPageId = getRealpidCheck(fragPtr.p, fragPageId); if (realPageId != RNIL) { pos->m_page_no = realPageId; break; } fragPageId++; } if (fragPageId == fragPtr.p->m_max_page_cnt) break; pos->m_page_idx = 0; c_page_pool.getPtr(pagePtr, pos->m_page_no); } return 1; }
/* ---------------------------------------------------------------- */ void Dbtup::execTUP_LCPREQ(Signal* signal) { CheckpointInfoPtr ciPtr; DiskBufferSegmentInfoPtr dbsiPtr; FragrecordPtr regFragPtr; LocalLogInfoPtr lliPtr; ljamEntry(); // Uint32 userptr = signal->theData[0]; // BlockReference userblockref = signal->theData[1]; ciPtr.i = signal->theData[2]; ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo); regFragPtr.i = ciPtr.p->lcpFragmentP; ptrCheckGuard(regFragPtr, cnoOfFragrec, fragrecord); /* ---------------------------------------------------------------- */ /* ASSIGNING A VALUE DIFFERENT FROM RNIL TO CHECKPOINT VERSION*/ /* TRIGGERS THAT UNDO LOGGING WILL START FOR THIS FRAGMENT. */ /* WE ASSIGN IT THE POINTER TO THE CHECKPOINT RECORD FOR */ /* OPTIMISATION OF THE WRITING OF THE UNDO LOG. */ /* ---------------------------------------------------------------- */ regFragPtr.p->checkpointVersion = ciPtr.p->lcpLocalLogInfoP; /* MARK START OF UNDO LOGGING */ regFragPtr.p->maxPageWrittenInCheckpoint = getNoOfPages(regFragPtr.p); regFragPtr.p->minPageNotWrittenInCheckpoint = 0; ndbrequire(getNoOfPages(regFragPtr.p) > 0); allocDataBufferSegment(signal, dbsiPtr); dbsiPtr.p->pdxNumDataPages = 0; dbsiPtr.p->pdxFilePage = 1; ciPtr.p->lcpDataBufferSegmentP = dbsiPtr.i; dbsiPtr.p->pdxCheckpointInfoP = ciPtr.i; ciPtr.p->lcpNoOfPages = getNoOfPages(regFragPtr.p); ciPtr.p->lcpNoCopyPagesAlloc = regFragPtr.p->noCopyPagesAlloc; ciPtr.p->lcpEmptyPrimPage = regFragPtr.p->emptyPrimPage; ciPtr.p->lcpThFreeFirst = regFragPtr.p->thFreeFirst; ciPtr.p->lcpThFreeCopyFirst = regFragPtr.p->thFreeCopyFirst; lliPtr.i = ciPtr.p->lcpLocalLogInfoP; ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo); /* ---------------------------------------------------------------- */ /* --- PERFORM A COPY OF THE TABLE DESCRIPTOR FOR THIS FRAGMENT --- */ /* ---------------------------------------------------------------- */ cprAddLogHeader(signal, lliPtr.p, ZTABLE_DESCRIPTOR, ciPtr.p->lcpTabPtr, ciPtr.p->lcpFragmentId); /* ---------------------------------------------------------------- */ /* CONTINUE WITH SAVING ACTIVE OPERATIONS AFTER A REAL-TIME */ /* BREAK. */ /* ---------------------------------------------------------------- */ ciPtr.p->lcpTmpOperPtr = regFragPtr.p->firstusedOprec; lcpSaveCopyListLab(signal, ciPtr); return; }//Dbtup::execTUP_LCPREQ()
/* ---------------------------------------------------------------- */ void Dbtup::execSTORED_PROCREQ(Signal* signal) { OperationrecPtr regOperPtr; TablerecPtr regTabPtr; jamEntry(); regOperPtr.i = signal->theData[0]; c_operation_pool.getPtr(regOperPtr); regTabPtr.i = signal->theData[1]; ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); Uint32 requestInfo = signal->theData[3]; TransState trans_state= get_trans_state(regOperPtr.p); ndbrequire(trans_state == TRANS_IDLE || ((trans_state == TRANS_ERROR_WAIT_STORED_PROCREQ) && (requestInfo == ZSTORED_PROCEDURE_DELETE))); ndbrequire(regTabPtr.p->tableStatus == DEFINED); switch (requestInfo) { case ZSCAN_PROCEDURE: jam(); scanProcedure(signal, regOperPtr.p, signal->theData[4]); break; case ZCOPY_PROCEDURE: jam(); copyProcedure(signal, regTabPtr, regOperPtr.p); break; case ZSTORED_PROCEDURE_DELETE: jam(); deleteScanProcedure(signal, regOperPtr.p); break; default: ndbrequire(false); }//switch }//Dbtup::execSTORED_PROCREQ()
/* ---------------------------------------------------------------------- */ void Dbtup::execEND_LCPREQ(Signal* signal) { DiskBufferSegmentInfoPtr dbsiPtr; LocalLogInfoPtr lliPtr; PendingFileOpenInfoPtr pfoiPtr; ljamEntry(); clqhUserpointer = signal->theData[0]; clqhBlockref = signal->theData[1]; for (lliPtr.i = 0; lliPtr.i < 16; lliPtr.i++) { ljam(); ptrAss(lliPtr, localLogInfo); if (lliPtr.p->lliActiveLcp > 0) { ljam(); dbsiPtr.i = lliPtr.p->lliUndoBufferSegmentP; ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo); freeDiskBufferSegmentRecord(signal, dbsiPtr); seizePendingFileOpenInfoRecord(pfoiPtr); /* SEIZE A NEW FILE OPEN INFO */ pfoiPtr.p->pfoOpenType = LCP_UNDO_FILE_CLOSE; pfoiPtr.p->pfoCheckpointInfoP = lliPtr.i; signal->theData[0] = lliPtr.p->lliUndoFileHandle; signal->theData[1] = cownref; signal->theData[2] = pfoiPtr.i; signal->theData[3] = 0; sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA); lliPtr.p->lliActiveLcp = 0; }//if }//for return; }//Dbtup::execEND_LCPREQ()
void Dbtup::execLCP_FRAG_ORD(Signal* signal) { LcpFragOrd* req= (LcpFragOrd*)signal->getDataPtr(); TablerecPtr tablePtr; tablePtr.i = req->tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); if (tablePtr.p->m_no_of_disk_attributes) { jam(); FragrecordPtr fragPtr; Uint32 fragId = req->fragmentId; fragPtr.i = RNIL; getFragmentrec(fragPtr, fragId, tablePtr.p); ndbrequire(fragPtr.i != RNIL); Fragrecord& frag = *fragPtr.p; ndbrequire(frag.m_lcp_scan_op == RNIL && c_lcp_scan_op != RNIL); frag.m_lcp_scan_op = c_lcp_scan_op; ScanOpPtr scanPtr; c_scanOpPool.getPtr(scanPtr, frag.m_lcp_scan_op); ndbrequire(scanPtr.p->m_fragPtrI == RNIL); scanPtr.p->m_fragPtrI = fragPtr.i; scanFirst(signal, scanPtr); scanPtr.p->m_state = ScanOp::First; } }
/* ---------------------------------------------------------------- */ void Dbtup::execSTORED_PROCREQ(Signal* signal) { OperationrecPtr regOperPtr; TablerecPtr regTabPtr; jamEntry(); regOperPtr.i = signal->theData[0]; c_operation_pool.getPtr(regOperPtr); regTabPtr.i = signal->theData[1]; ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); Uint32 requestInfo = signal->theData[3]; TransState trans_state= get_trans_state(regOperPtr.p); ndbrequire(trans_state == TRANS_IDLE || ((trans_state == TRANS_ERROR_WAIT_STORED_PROCREQ) && (requestInfo == ZSTORED_PROCEDURE_DELETE))); ndbrequire(regTabPtr.p->tableStatus == DEFINED); /* * Also store count of procs called from non-API scans. * It can be done here since seize/release always succeeds. * The count is only used under -DERROR_INSERT via DUMP. */ BlockReference apiBlockref = signal->theData[5]; switch (requestInfo) { case ZSCAN_PROCEDURE: { jam(); #if defined VM_TRACE || defined ERROR_INSERT storedProcCountNonAPI(apiBlockref, +1); #endif SectionHandle handle(this, signal); ndbrequire(handle.m_cnt == 1); scanProcedure(signal, regOperPtr.p, &handle, false); // Not copy break; } case ZCOPY_PROCEDURE: jam(); #if defined VM_TRACE || defined ERROR_INSERT storedProcCountNonAPI(apiBlockref, +1); #endif copyProcedure(signal, regTabPtr, regOperPtr.p); break; case ZSTORED_PROCEDURE_DELETE: jam(); #if defined VM_TRACE || defined ERROR_INSERT storedProcCountNonAPI(apiBlockref, -1); #endif deleteScanProcedure(signal, regOperPtr.p); break; default: ndbrequire(false); }//switch }//Dbtup::execSTORED_PROCREQ()
// Trigger signals void Dbtup::execCREATE_TRIG_REQ(Signal* signal) { jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr(); const CreateTrigReq* const req = &reqCopy; CreateTrigRef::ErrorCode error= CreateTrigRef::NoError; // Find table TablerecPtr tabPtr; tabPtr.i = req->getTableId(); ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); if (tabPtr.p->tableStatus != DEFINED ) { jam(); error= CreateTrigRef::InvalidTable; } // Create trigger and associate it with the table else if (createTrigger(tabPtr.p, req)) { jam(); // Send conf CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend(); conf->setUserRef(reference()); conf->setConnectionPtr(req->getConnectionPtr()); conf->setRequestType(req->getRequestType()); conf->setTableId(req->getTableId()); conf->setIndexId(req->getIndexId()); conf->setTriggerId(req->getTriggerId()); conf->setTriggerInfo(req->getTriggerInfo()); sendSignal(senderRef, GSN_CREATE_TRIG_CONF, signal, CreateTrigConf::SignalLength, JBB); return; } else { jam(); error= CreateTrigRef::TooManyTriggers; } ndbassert(error != CreateTrigRef::NoError); // Send ref CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend(); ref->setUserRef(reference()); ref->setConnectionPtr(req->getConnectionPtr()); ref->setRequestType(req->getRequestType()); ref->setTableId(req->getTableId()); ref->setIndexId(req->getIndexId()); ref->setTriggerId(req->getTriggerId()); ref->setTriggerInfo(req->getTriggerInfo()); ref->setErrorCode(error); sendSignal(senderRef, GSN_CREATE_TRIG_REF, signal, CreateTrigRef::SignalLength, JBB); }//Dbtup::execCREATE_TRIG_REQ()
int Dbtup::mt_scan_init(Uint32 tableId, Uint32 fragId, Local_key* pos, Uint32 * fragPtrI) { TablerecPtr tablePtr; tablePtr.i = tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); FragrecordPtr fragPtr; fragPtr.i = RNIL; for (Uint32 i = 0; i<NDB_ARRAY_SIZE(tablePtr.p->fragid); i++) { if (tablePtr.p->fragid[i] == fragId) { fragPtr.i = tablePtr.p->fragrec[i]; break; } } if (fragPtr.i == RNIL) return -1; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Uint32 fragPageId = 0; while (fragPageId < fragPtr.p->m_max_page_cnt) { Uint32 realPageId= getRealpidCheck(fragPtr.p, fragPageId); if (realPageId != RNIL) { * fragPtrI = fragPtr.i; pos->m_page_no = realPageId; pos->m_page_idx = 0; pos->m_file_no = 0; return 0; } fragPageId++; } return 1; }
void Dbtup::lcpFlushRestartInfoLab(Signal* signal, Uint32 ciIndex) { CheckpointInfoPtr ciPtr; DiskBufferSegmentInfoPtr dbsiPtr; LocalLogInfoPtr lliPtr; UndoPagePtr undoCopyPagePtr; ciPtr.i = ciIndex; ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo); lliPtr.i = ciPtr.p->lcpLocalLogInfoP; ptrCheckGuard(lliPtr, cnoOfParallellUndoFiles, localLogInfo); dbsiPtr.i = ciPtr.p->lcpDataBufferSegmentP; ptrCheckGuard(dbsiPtr, cnoOfConcurrentWriteOp, diskBufferSegmentInfo); undoCopyPagePtr.i = dbsiPtr.p->pdxDataPage[0]; /* UNDO INFO STORED AT PAGE 0 */ ptrCheckGuard(undoCopyPagePtr, cnoOfUndoPage, undoPage); ndbrequire(ciPtr.p->lcpNoOfPages > 0); undoCopyPagePtr.p->undoPageWord[ZSRI_NO_OF_FRAG_PAGES_POS] = ciPtr.p->lcpNoOfPages; undoCopyPagePtr.p->undoPageWord[ZSRI_NO_COPY_PAGES_ALLOC] = ciPtr.p->lcpNoCopyPagesAlloc; undoCopyPagePtr.p->undoPageWord[ZSRI_EMPTY_PRIM_PAGE] = ciPtr.p->lcpEmptyPrimPage; undoCopyPagePtr.p->undoPageWord[ZSRI_TH_FREE_FIRST] = ciPtr.p->lcpThFreeFirst; undoCopyPagePtr.p->undoPageWord[ZSRI_TH_FREE_COPY_FIRST] = ciPtr.p->lcpThFreeCopyFirst; undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_REC_ID] = lliPtr.p->lliPrevRecordId; undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_FILE_VER] = cundoFileVersion; if (lliPtr.p->lliUndoWord == ZUNDO_PAGE_HEADER_SIZE) { ljam(); undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_PAGE_ID] = lliPtr.p->lliLogFilePage - 1; } else { ljam(); undoCopyPagePtr.p->undoPageWord[ZSRI_UNDO_LOG_END_PAGE_ID] = lliPtr.p->lliLogFilePage; }//if dbsiPtr.p->pdxNumDataPages = 1; dbsiPtr.p->pdxFilePage = 0; if (clblPageCounter > 0) { ljam(); clblPageCounter--; }//if lcpWriteListDataPageSegment(signal, dbsiPtr, ciPtr, true); dbsiPtr.p->pdxOperation = CHECKPOINT_DATA_WRITE_FLUSH; return; }//Dbtup::lcpFlushRestartInfoLab()
void Dbtup::tuxGetNode(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32*& node) { FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); PagePtr pagePtr; c_page_pool.getPtr(pagePtr, pageId); Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); Uint32 attrDataOffset= AttributeOffset::getOffset( tableDescriptor[attrDescIndex + 1].tabDescr); node= ((Fix_page*)pagePtr.p)-> get_ptr(pageOffset, tablePtr.p->m_offsets[MM].m_fix_header_size) + attrDataOffset; }
void Dbtup::tuxFreeNode(Signal* signal, Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* node) { jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); PagePtr pagePtr; pagePtr.i= pageId; ptrCheckGuard(pagePtr, cnoOfPage, cpage); Uint32 attrDescIndex= tablePtr.p->tabDescriptor + (0 << ZAD_LOG_SIZE); Uint32 attrDataOffset= AttributeOffset::getOffset(tableDescriptor[attrDescIndex + 1].tabDescr); ndbrequire(node == &pagePtr.p->pageWord[pageOffset] + attrDataOffset); freeTh(fragPtr.p, tablePtr.p, signal, pagePtr.p, pageOffset); }
void Dbtup::execACC_CHECK_SCAN(Signal* signal) { jamEntry(); const AccCheckScan reqCopy = *(const AccCheckScan*)signal->getDataPtr(); const AccCheckScan* const req = &reqCopy; ScanOpPtr scanPtr; c_scanOpPool.getPtr(scanPtr, req->accPtr); ScanOp& scan = *scanPtr.p; // fragment FragrecordPtr fragPtr; fragPtr.i = scan.m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Fragrecord& frag = *fragPtr.p; if (req->checkLcpStop == AccCheckScan::ZCHECK_LCP_STOP) { jam(); signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; } if (scan.m_bits & ScanOp::SCAN_LOCK_WAIT) { jam(); // LQH asks if we are waiting for lock and we tell it to ask again NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = RNIL; // no tuple returned conf->fragId = frag.fragmentId; unsigned signalLength = 3; // if TC has ordered scan close, it will be detected here sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); return; // stop } if (scan.m_state == ScanOp::First) { jam(); scanFirst(signal, scanPtr); } if (scan.m_state == ScanOp::Next) { jam(); bool immediate = scanNext(signal, scanPtr); if (! immediate) { jam(); // time-slicing via TUP or PGMAN return; } } scanReply(signal, scanPtr); }
int Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) { jamEntry(); // get table TablerecPtr tablePtr; tablePtr.i = tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); // get fragment FragrecordPtr fragPtr; getFragmentrec(fragPtr, fragId, tablePtr.p); // get real page id and tuple offset Uint32 pageId = getRealpid(fragPtr.p, fragPageId); // use TUX routine - optimize later int ret = tuxReadPk(fragPtr.i, pageId, pageIndex, dataOut, xfrmFlag); return ret; }
void Dbtup::scanFirst(Signal*, ScanOpPtr scanPtr) { ScanOp& scan = *scanPtr.p; ScanPos& pos = scan.m_scanPos; Local_key& key = pos.m_key; const Uint32 bits = scan.m_bits; // fragment FragrecordPtr fragPtr; fragPtr.i = scan.m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Fragrecord& frag = *fragPtr.p; // in the future should not pre-allocate pages if (frag.noOfPages == 0 && ((bits & ScanOp::SCAN_NR) == 0)) { jam(); scan.m_state = ScanOp::Last; return; } if (! (bits & ScanOp::SCAN_DD)) { key.m_file_no = ZNIL; key.m_page_no = 0; pos.m_get = ScanPos::Get_page_mm; // for MM scan real page id is cached for efficiency pos.m_realpid_mm = RNIL; } else { Disk_alloc_info& alloc = frag.m_disk_alloc_info; // for now must check disk part explicitly if (alloc.m_extent_list.firstItem == RNIL) { jam(); scan.m_state = ScanOp::Last; return; } pos.m_extent_info_ptr_i = alloc.m_extent_list.firstItem; Extent_info* ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i); key.m_file_no = ext->m_key.m_file_no; key.m_page_no = ext->m_first_page_no; pos.m_get = ScanPos::Get_page_dd; } key.m_page_idx = 0; // let scanNext() do the work scan.m_state = ScanOp::Next; }
void Dbtup::execDROP_TRIG_REQ(Signal* signal) { jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const DropTrigReq reqCopy = *(const DropTrigReq*)signal->getDataPtr(); const DropTrigReq* const req = &reqCopy; // Find table TablerecPtr tabPtr; tabPtr.i = req->getTableId(); ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); // Drop trigger Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef)); if (r == 0){ // Send conf DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend(); conf->setUserRef(senderRef); conf->setConnectionPtr(req->getConnectionPtr()); conf->setRequestType(req->getRequestType()); conf->setTableId(req->getTableId()); conf->setIndexId(req->getIndexId()); conf->setTriggerId(req->getTriggerId()); sendSignal(senderRef, GSN_DROP_TRIG_CONF, signal, DropTrigConf::SignalLength, JBB); } else { // Send ref DropTrigRef* const ref = (DropTrigRef*)signal->getDataPtrSend(); ref->setUserRef(senderRef); ref->setConnectionPtr(req->getConnectionPtr()); ref->setRequestType(req->getRequestType()); ref->setTableId(req->getTableId()); ref->setIndexId(req->getIndexId()); ref->setTriggerId(req->getTriggerId()); ref->setErrorCode((DropTrigRef::ErrorCode)r); ref->setErrorLine(__LINE__); ref->setErrorNode(refToNode(reference())); sendSignal(senderRef, GSN_DROP_TRIG_REF, signal, DropTrigRef::SignalLength, JBB); } }//Dbtup::DROP_TRIG_REQ()
void Dbtup::releaseScanOp(ScanOpPtr& scanPtr) { FragrecordPtr fragPtr; fragPtr.i = scanPtr.p->m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); if(scanPtr.p->m_bits & ScanOp::SCAN_LCP) { jam(); fragPtr.p->m_lcp_scan_op = RNIL; scanPtr.p->m_fragPtrI = RNIL; } else { jam(); LocalDLList<ScanOp> list(c_scanOpPool, fragPtr.p->m_scanList); list.release(scanPtr); } }
void Dbtup::lcpCompletedLab(Signal* signal, Uint32 ciIndex) { CheckpointInfoPtr ciPtr; PendingFileOpenInfoPtr pfoiPtr; /* ---------------------------------------------------------------------- */ /* INSERT CODE TO CLOSE DATA FILE HERE. DO THIS BEFORE SEND CONF */ /* ---------------------------------------------------------------------- */ ciPtr.i = ciIndex; ptrCheckGuard(ciPtr, cnoOfLcpRec, checkpointInfo); seizePendingFileOpenInfoRecord(pfoiPtr); pfoiPtr.p->pfoOpenType = LCP_DATA_FILE_CLOSE; pfoiPtr.p->pfoCheckpointInfoP = ciPtr.i; signal->theData[0] = ciPtr.p->lcpDataFileHandle; signal->theData[1] = cownref; signal->theData[2] = pfoiPtr.i; signal->theData[3] = 0; sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, 4, JBA); return; }//Dbtup::lcpCompletedLab()
void Dbtup::allocDataBufferSegment(Signal* signal, DiskBufferSegmentInfoPtr& dbsiPtr) { UndoPagePtr regUndoPagePtr; seizeDiskBufferSegmentRecord(dbsiPtr); dbsiPtr.p->pdxBuffertype = COMMON_AREA_PAGES; ndbrequire(cfirstfreeUndoSeg != RNIL); if (cnoFreeUndoSeg == ZMIN_PAGE_LIMIT_TUP_COMMITREQ) { EXECUTE_DIRECT(DBLQH, GSN_TUP_COM_BLOCK, signal, 1); ljamEntry(); }//if cnoFreeUndoSeg--; ndbrequire(cnoFreeUndoSeg >= 0); regUndoPagePtr.i = cfirstfreeUndoSeg; ptrCheckGuard(regUndoPagePtr, cnoOfUndoPage, undoPage); cfirstfreeUndoSeg = regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS]; regUndoPagePtr.p->undoPageWord[ZPAGE_NEXT_POS] = RNIL; for (Uint32 i = 0; i < ZUB_SEGMENT_SIZE; i++) { dbsiPtr.p->pdxDataPage[i] = regUndoPagePtr.i + i; }//for }//Dbtup::allocDataBufferSegment()
void Dbtup::execTUP_DEALLOCREQ(Signal* signal) { TablerecPtr regTabPtr; FragrecordPtr regFragPtr; Uint32 frag_page_id, frag_id; jamEntry(); frag_id= signal->theData[0]; regTabPtr.i= signal->theData[1]; frag_page_id= signal->theData[2]; Uint32 page_index= signal->theData[3]; ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); getFragmentrec(regFragPtr, frag_id, regTabPtr.p); ndbassert(regFragPtr.p != NULL); if (! Local_key::isInvalid(frag_page_id, page_index)) { Local_key tmp; tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); tmp.m_page_idx= page_index; PagePtr pagePtr; Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p); ndbrequire(ptr->m_header_bits & Tuple_header::FREED); if (regTabPtr.p->m_attributes[MM].m_no_of_varsize + regTabPtr.p->m_attributes[MM].m_no_of_dynamic) { jam(); free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr); } else { free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p); } } }
bool Dbtup::storedProcedureAttrInfo(Signal* signal, Operationrec* regOperPtr, const Uint32 *data, Uint32 length, bool copyProcedure) { AttrbufrecPtr regAttrPtr; Uint32 RnoFree = cnoFreeAttrbufrec; if (ERROR_INSERTED(4004) && !copyProcedure) { CLEAR_ERROR_INSERT_VALUE; storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_SEIZE_ATTRINBUFREC_ERROR); return false; }//if regOperPtr->currentAttrinbufLen += length; ndbrequire(regOperPtr->currentAttrinbufLen <= regOperPtr->attrinbufLen); if ((RnoFree > MIN_ATTRBUF) || (copyProcedure)) { jam(); regAttrPtr.i = cfirstfreeAttrbufrec; ptrCheckGuard(regAttrPtr, cnoOfAttrbufrec, attrbufrec); regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = 0; cfirstfreeAttrbufrec = regAttrPtr.p->attrbuf[ZBUF_NEXT]; cnoFreeAttrbufrec = RnoFree - 1; regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; } else { jam(); storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_SEIZE_ATTRINBUFREC_ERROR); return false; }//if if (regOperPtr->firstAttrinbufrec == RNIL) { jam(); regOperPtr->firstAttrinbufrec = regAttrPtr.i; }//if regAttrPtr.p->attrbuf[ZBUF_NEXT] = RNIL; if (regOperPtr->lastAttrinbufrec != RNIL) { AttrbufrecPtr tempAttrinbufptr; jam(); tempAttrinbufptr.i = regOperPtr->lastAttrinbufrec; ptrCheckGuard(tempAttrinbufptr, cnoOfAttrbufrec, attrbufrec); tempAttrinbufptr.p->attrbuf[ZBUF_NEXT] = regAttrPtr.i; }//if regOperPtr->lastAttrinbufrec = regAttrPtr.i; regAttrPtr.p->attrbuf[ZBUF_DATA_LEN] = length; MEMCOPY_NO_WORDS(®AttrPtr.p->attrbuf[0], data, length); if (regOperPtr->currentAttrinbufLen < regOperPtr->attrinbufLen) { jam(); return true; }//if if (ERROR_INSERTED(4005) && !copyProcedure) { CLEAR_ERROR_INSERT_VALUE; storedSeizeAttrinbufrecErrorLab(signal, regOperPtr, ZSTORED_SEIZE_ATTRINBUFREC_ERROR); return false; }//if StoredProcPtr storedPtr; c_storedProcPool.getPtr(storedPtr, (Uint32)regOperPtr->storedProcPtr); ndbrequire(storedPtr.p->storedCode == ZSCAN_PROCEDURE); regOperPtr->currentAttrinbufLen = 0; storedPtr.p->storedLinkFirst = regOperPtr->firstAttrinbufrec; storedPtr.p->storedLinkLast = regOperPtr->lastAttrinbufrec; regOperPtr->firstAttrinbufrec = RNIL; regOperPtr->lastAttrinbufrec = RNIL; regOperPtr->m_any_value = 0; set_trans_state(regOperPtr, TRANS_IDLE); signal->theData[0] = regOperPtr->userpointer; signal->theData[1] = storedPtr.i; sendSignal(DBLQH_REF, GSN_STORED_PROCCONF, signal, 2, JBB); return true; }//Dbtup::storedProcedureAttrInfo()
void Dbtup::buildIndexOffline_table_readonly(Signal* signal, Uint32 buildPtrI) { // get build record BuildIndexPtr buildPtr; buildPtr.i= buildPtrI; c_buildIndexList.getPtr(buildPtr); const BuildIndxImplReq* buildReq = (const BuildIndxImplReq*)&buildPtr.p->m_request; // get table TablerecPtr tablePtr; tablePtr.i= buildReq->tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); for (;buildPtr.p->m_fragNo < NDB_ARRAY_SIZE(tablePtr.p->fragrec); buildPtr.p->m_fragNo++) { jam(); FragrecordPtr fragPtr; fragPtr.i = tablePtr.p->fragrec[buildPtr.p->m_fragNo]; if (fragPtr.i == RNIL) { jam(); continue; } ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); mt_BuildIndxReq req; bzero(&req, sizeof(req)); req.senderRef = reference(); req.senderData = buildPtr.i; req.tableId = buildReq->tableId; req.indexId = buildPtr.p->m_indexId; req.fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo]; SimulatedBlock * tux = globalData.getBlock(DBTUX); if (instance() != 0) { tux = tux->getInstance(instance()); ndbrequire(tux != 0); } req.tux_ptr = tux; req.tup_ptr = this; req.func_ptr = Dbtux_mt_buildIndexFragment_wrapper_C; req.buffer_size = 16*32768; // thread-local-buffer Uint32 * req_ptr = signal->getDataPtrSend(); memcpy(req_ptr, &req, sizeof(req)); sendSignal(NDBFS_REF, GSN_BUILD_INDX_IMPL_REQ, signal, (sizeof(req) + 15) / 4, JBB); buildPtr.p->m_outstanding++; if (buildPtr.p->m_outstanding >= m_max_parallel_index_build) { jam(); return; } } if (buildPtr.p->m_outstanding == 0) { jam(); AlterTabReq* req = (AlterTabReq*)signal->getDataPtrSend(); /** * Note: before 7.3.4, 7.2.15, 7.1.30 fifth word and * up was undefined. */ bzero(req, sizeof(*req)); req->senderRef = reference(); req->senderData = buildPtrI; req->tableId = buildReq->tableId; req->requestType = AlterTabReq::AlterTableReadWrite; sendSignal(calcInstanceBlockRef(DBLQH), GSN_ALTER_TAB_REQ, signal, AlterTabReq::SignalLength, JBB); return; } else { jam(); // wait for replies return; } }
bool Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr) { ScanOp& scan = *scanPtr.p; ScanPos& pos = scan.m_scanPos; Local_key& key = pos.m_key; const Uint32 bits = scan.m_bits; // table TablerecPtr tablePtr; tablePtr.i = scan.m_tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); Tablerec& table = *tablePtr.p; // fragment FragrecordPtr fragPtr; fragPtr.i = scan.m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Fragrecord& frag = *fragPtr.p; // tuple found Tuple_header* th = 0; Uint32 thbits = 0; Uint32 loop_count = 0; Uint32 scanGCI = scanPtr.p->m_scanGCI; Uint32 foundGCI; const bool mm = (bits & ScanOp::SCAN_DD); const bool lcp = (bits & ScanOp::SCAN_LCP); Uint32 lcp_list = fragPtr.p->m_lcp_keep_list; Uint32 size = table.m_offsets[mm].m_fix_header_size; if (lcp && lcp_list != RNIL) goto found_lcp_keep; switch(pos.m_get){ case ScanPos::Get_next_tuple: case ScanPos::Get_next_tuple_fs: jam(); key.m_page_idx += size; // fall through case ScanPos::Get_tuple: case ScanPos::Get_tuple_fs: jam(); /** * We need to refetch page after timeslice */ pos.m_get = ScanPos::Get_page; break; default: break; } while (true) { switch (pos.m_get) { case ScanPos::Get_next_page: // move to next page jam(); { if (! (bits & ScanOp::SCAN_DD)) pos.m_get = ScanPos::Get_next_page_mm; else pos.m_get = ScanPos::Get_next_page_dd; } continue; case ScanPos::Get_page: // get real page jam(); { if (! (bits & ScanOp::SCAN_DD)) pos.m_get = ScanPos::Get_page_mm; else pos.m_get = ScanPos::Get_page_dd; } continue; case ScanPos::Get_next_page_mm: // move to next logical TUP page jam(); { key.m_page_no++; if (key.m_page_no >= frag.noOfPages) { jam(); if ((bits & ScanOp::SCAN_NR) && (scan.m_endPage != RNIL)) { jam(); if (key.m_page_no < scan.m_endPage) { jam(); ndbout_c("scanning page %u", key.m_page_no); goto cont; } } // no more pages, scan ends pos.m_get = ScanPos::Get_undef; scan.m_state = ScanOp::Last; return true; } cont: key.m_page_idx = 0; pos.m_get = ScanPos::Get_page_mm; // clear cached value pos.m_realpid_mm = RNIL; } /*FALLTHRU*/ case ScanPos::Get_page_mm: // get TUP real page jam(); { if (pos.m_realpid_mm == RNIL) { jam(); if (key.m_page_no < frag.noOfPages) pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no); else { ndbassert(bits & ScanOp::SCAN_NR); goto nopage; } } PagePtr pagePtr; c_page_pool.getPtr(pagePtr, pos.m_realpid_mm); if (pagePtr.p->page_state == ZEMPTY_MM) { // skip empty page jam(); if (! (bits & ScanOp::SCAN_NR)) { pos.m_get = ScanPos::Get_next_page_mm; break; // incr loop count } else { jam(); pos.m_realpid_mm = RNIL; } } nopage: pos.m_page = pagePtr.p; pos.m_get = ScanPos::Get_tuple; } continue; case ScanPos::Get_next_page_dd: // move to next disk page jam(); { Disk_alloc_info& alloc = frag.m_disk_alloc_info; Local_fragment_extent_list list(c_extent_pool, alloc.m_extent_list); Ptr<Extent_info> ext_ptr; c_extent_pool.getPtr(ext_ptr, pos.m_extent_info_ptr_i); Extent_info* ext = ext_ptr.p; key.m_page_no++; if (key.m_page_no >= ext->m_first_page_no + alloc.m_extent_size) { // no more pages in this extent jam(); if (! list.next(ext_ptr)) { // no more extents, scan ends jam(); pos.m_get = ScanPos::Get_undef; scan.m_state = ScanOp::Last; return true; } else { // move to next extent jam(); pos.m_extent_info_ptr_i = ext_ptr.i; ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i); key.m_file_no = ext->m_key.m_file_no; key.m_page_no = ext->m_first_page_no; } } key.m_page_idx = 0; pos.m_get = ScanPos::Get_page_dd; /* read ahead for scan in disk order do read ahead every 8:th page */ if ((bits & ScanOp::SCAN_DD) && (((key.m_page_no - ext->m_first_page_no) & 7) == 0)) { jam(); // initialize PGMAN request Page_cache_client::Request preq; preq.m_page = pos.m_key; preq.m_callback = TheNULLCallback; // set maximum read ahead Uint32 read_ahead = m_max_page_read_ahead; while (true) { // prepare page read ahead in current extent Uint32 page_no = preq.m_page.m_page_no; Uint32 page_no_limit = page_no + read_ahead; Uint32 limit = ext->m_first_page_no + alloc.m_extent_size; if (page_no_limit > limit) { jam(); // read ahead crosses extent, set limit for this extent read_ahead = page_no_limit - limit; page_no_limit = limit; // and make sure we only read one extra extent next time around if (read_ahead > alloc.m_extent_size) read_ahead = alloc.m_extent_size; } else { jam(); read_ahead = 0; // no more to read ahead after this } // do read ahead pages for this extent while (page_no < page_no_limit) { // page request to PGMAN jam(); preq.m_page.m_page_no = page_no; int flags = 0; // ignore result m_pgman.get_page(signal, preq, flags); jamEntry(); page_no++; } if (!read_ahead || !list.next(ext_ptr)) { // no more extents after this or read ahead done jam(); break; } // move to next extent and initialize PGMAN request accordingly Extent_info* ext = c_extent_pool.getPtr(ext_ptr.i); preq.m_page.m_file_no = ext->m_key.m_file_no; preq.m_page.m_page_no = ext->m_first_page_no; } } // if ScanOp::SCAN_DD read ahead } /*FALLTHRU*/ case ScanPos::Get_page_dd: // get global page in PGMAN cache jam(); { // check if page is un-allocated or empty if (likely(! (bits & ScanOp::SCAN_NR))) { Tablespace_client tsman(signal, c_tsman, frag.fragTableId, frag.fragmentId, frag.m_tablespace_id); unsigned uncommitted, committed; uncommitted = committed = ~(unsigned)0; int ret = tsman.get_page_free_bits(&key, &uncommitted, &committed); ndbrequire(ret == 0); if (committed == 0 && uncommitted == 0) { // skip empty page jam(); pos.m_get = ScanPos::Get_next_page_dd; break; // incr loop count } } // page request to PGMAN Page_cache_client::Request preq; preq.m_page = pos.m_key; preq.m_callback.m_callbackData = scanPtr.i; preq.m_callback.m_callbackFunction = safe_cast(&Dbtup::disk_page_tup_scan_callback); int flags = 0; int res = m_pgman.get_page(signal, preq, flags); jamEntry(); if (res == 0) { jam(); // request queued pos.m_get = ScanPos::Get_tuple; return false; } ndbrequire(res > 0); pos.m_page = (Page*)m_pgman.m_ptr.p; } pos.m_get = ScanPos::Get_tuple; continue; // get tuple // move to next tuple case ScanPos::Get_next_tuple: case ScanPos::Get_next_tuple_fs: // move to next fixed size tuple jam(); { key.m_page_idx += size; pos.m_get = ScanPos::Get_tuple_fs; } /*FALLTHRU*/ case ScanPos::Get_tuple: case ScanPos::Get_tuple_fs: // get fixed size tuple jam(); { Fix_page* page = (Fix_page*)pos.m_page; if (key.m_page_idx + size <= Fix_page::DATA_WORDS) { pos.m_get = ScanPos::Get_next_tuple_fs; th = (Tuple_header*)&page->m_data[key.m_page_idx]; if (likely(! (bits & ScanOp::SCAN_NR))) { jam(); thbits = th->m_header_bits; if (! (thbits & Tuple_header::FREE)) { goto found_tuple; } } else { if (pos.m_realpid_mm == RNIL) { jam(); foundGCI = 0; goto found_deleted_rowid; } thbits = th->m_header_bits; if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI || foundGCI == 0) { if (! (thbits & Tuple_header::FREE)) { jam(); goto found_tuple; } else { goto found_deleted_rowid; } } else if (thbits != Fix_page::FREE_RECORD && th->m_operation_ptr_i != RNIL) { jam(); goto found_tuple; // Locked tuple... // skip free tuple } } } else { jam(); // no more tuples on this page pos.m_get = ScanPos::Get_next_page; } } break; // incr loop count found_tuple: // found possible tuple to return jam(); { // caller has already set pos.m_get to next tuple if (! (bits & ScanOp::SCAN_LCP && thbits & Tuple_header::LCP_SKIP)) { Local_key& key_mm = pos.m_key_mm; if (! (bits & ScanOp::SCAN_DD)) { key_mm = pos.m_key; // real page id is already set } else { key_mm.assref(th->m_base_record_ref); // recompute for each disk tuple pos.m_realpid_mm = getRealpid(fragPtr.p, key_mm.m_page_no); } // TUPKEYREQ handles savepoint stuff scan.m_state = ScanOp::Current; return true; } else { jam(); // clear it so that it will show up in next LCP th->m_header_bits = thbits & ~(Uint32)Tuple_header::LCP_SKIP; if (tablePtr.p->m_bits & Tablerec::TR_Checksum) { jam(); setChecksum(th, tablePtr.p); } } } break; found_deleted_rowid: jam(); { ndbassert(bits & ScanOp::SCAN_NR); Local_key& key_mm = pos.m_key_mm; if (! (bits & ScanOp::SCAN_DD)) { key_mm = pos.m_key; // caller has already set pos.m_get to next tuple // real page id is already set } else { key_mm.assref(th->m_base_record_ref); // recompute for each disk tuple pos.m_realpid_mm = getRealpid(fragPtr.p, key_mm.m_page_no); Fix_page *mmpage = (Fix_page*)c_page_pool.getPtr(pos.m_realpid_mm); th = (Tuple_header*)(mmpage->m_data + key_mm.m_page_idx); if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI || foundGCI == 0) { if (! (thbits & Tuple_header::FREE)) break; } } NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = RNIL; conf->fragId = frag.fragmentId; conf->localKey[0] = pos.m_key_mm.ref(); conf->localKey[1] = 0; conf->localKeyLength = 1; conf->gci = foundGCI; Uint32 blockNo = refToBlock(scan.m_userRef); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 7); jamEntry(); // TUPKEYREQ handles savepoint stuff loop_count = 32; scan.m_state = ScanOp::Next; return false; } break; // incr loop count default: ndbrequire(false); break; } if (++loop_count >= 32) break; } // TODO: at drop table we have to flush and terminate these jam(); signal->theData[0] = ZTUP_SCAN; signal->theData[1] = scanPtr.i; sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); return false; found_lcp_keep: Local_key tmp; tmp.assref(lcp_list); tmp.m_page_no = getRealpid(fragPtr.p, tmp.m_page_no); Ptr<Page> pagePtr; c_page_pool.getPtr(pagePtr, tmp.m_page_no); Tuple_header* ptr = (Tuple_header*) ((Fix_page*)pagePtr.p)->get_ptr(tmp.m_page_idx, 0); Uint32 headerbits = ptr->m_header_bits; ndbrequire(headerbits & Tuple_header::LCP_KEEP); Uint32 next = ptr->m_operation_ptr_i; ptr->m_operation_ptr_i = RNIL; ptr->m_header_bits = headerbits & ~(Uint32)Tuple_header::FREE; if (tablePtr.p->m_bits & Tablerec::TR_Checksum) { jam(); setChecksum(ptr, tablePtr.p); } NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = (Uint32)-1; conf->fragId = frag.fragmentId; conf->localKey[0] = lcp_list; conf->localKey[1] = 0; conf->localKeyLength = 1; conf->gci = 0; Uint32 blockNo = refToBlock(scan.m_userRef); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 7); fragPtr.p->m_lcp_keep_list = next; ptr->m_header_bits |= Tuple_header::FREED; // RESTORE free flag if (headerbits & Tuple_header::FREED) { if (tablePtr.p->m_attributes[MM].m_no_of_varsize) { jam(); free_var_rec(fragPtr.p, tablePtr.p, &tmp, pagePtr); } else { jam(); free_fix_rec(fragPtr.p, tablePtr.p, &tmp, (Fix_page*)pagePtr.p); } } return false; }
void Dbtup::execBUILD_INDX_IMPL_REQ(Signal* signal) { jamEntry(); #ifdef TIME_MEASUREMENT time_events= 0; tot_time_passed= 0; number_events= 1; #endif const BuildIndxImplReq* const req = (const BuildIndxImplReq*)signal->getDataPtr(); // get new operation BuildIndexPtr buildPtr; if (ERROR_INSERTED(4031) || ! c_buildIndexList.seizeFirst(buildPtr)) { jam(); BuildIndexRec buildRec; buildRec.m_request = *req; buildRec.m_errorCode = BuildIndxImplRef::Busy; if (ERROR_INSERTED(4031)) { CLEAR_ERROR_INSERT_VALUE; } buildIndexReply(signal, &buildRec); return; } buildPtr.p->m_request = *req; const BuildIndxImplReq* buildReq = &buildPtr.p->m_request; // check buildPtr.p->m_errorCode= BuildIndxImplRef::NoError; buildPtr.p->m_outstanding = 0; do { if (buildReq->tableId >= cnoOfTablerec) { jam(); buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable; break; } TablerecPtr tablePtr; tablePtr.i= buildReq->tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); if (tablePtr.p->tableStatus != DEFINED) { jam(); buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable; break; } // memory page format buildPtr.p->m_build_vs = (tablePtr.p->m_attributes[MM].m_no_of_varsize + tablePtr.p->m_attributes[MM].m_no_of_dynamic) > 0; if (DictTabInfo::isOrderedIndex(buildReq->indexType)) { jam(); const DLList<TupTriggerData>& triggerList = tablePtr.p->tuxCustomTriggers; TriggerPtr triggerPtr; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { if (triggerPtr.p->indexId == buildReq->indexId) { jam(); break; } triggerList.next(triggerPtr); } if (triggerPtr.i == RNIL) { jam(); // trigger was not created ndbassert(false); buildPtr.p->m_errorCode = BuildIndxImplRef::InternalError; break; } buildPtr.p->m_indexId = buildReq->indexId; buildPtr.p->m_buildRef = DBTUX; AlterIndxImplReq* req = (AlterIndxImplReq*)signal->getDataPtrSend(); req->indexId = buildReq->indexId; req->senderRef = 0; req->requestType = AlterIndxImplReq::AlterIndexBuilding; EXECUTE_DIRECT(DBTUX, GSN_ALTER_INDX_IMPL_REQ, signal, AlterIndxImplReq::SignalLength); } else if(buildReq->indexId == RNIL) { jam(); // REBUILD of acc buildPtr.p->m_indexId = RNIL; buildPtr.p->m_buildRef = DBACC; } else { jam(); buildPtr.p->m_errorCode = BuildIndxImplRef::InvalidIndexType; break; } // set to first tuple position const Uint32 firstTupleNo = 0; buildPtr.p->m_fragNo= 0; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; // start build bool offline = !!(buildReq->requestType&BuildIndxImplReq::RF_BUILD_OFFLINE); if (offline && m_max_parallel_index_build > 1) { jam(); buildIndexOffline(signal, buildPtr.i); } else { jam(); buildIndex(signal, buildPtr.i); } return; } while (0); // check failed buildIndexReply(signal, buildPtr.p); c_buildIndexList.release(buildPtr); }
void Dbtup::execACC_SCANREQ(Signal* signal) { jamEntry(); const AccScanReq reqCopy = *(const AccScanReq*)signal->getDataPtr(); const AccScanReq* const req = &reqCopy; ScanOpPtr scanPtr; scanPtr.i = RNIL; do { // find table and fragment TablerecPtr tablePtr; tablePtr.i = req->tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); FragrecordPtr fragPtr; Uint32 fragId = req->fragmentNo; fragPtr.i = RNIL; getFragmentrec(fragPtr, fragId, tablePtr.p); ndbrequire(fragPtr.i != RNIL); Fragrecord& frag = *fragPtr.p; // flags Uint32 bits = 0; if (AccScanReq::getLcpScanFlag(req->requestInfo)) { jam(); bits |= ScanOp::SCAN_LCP; c_scanOpPool.getPtr(scanPtr, c_lcp_scan_op); } else { // seize from pool and link to per-fragment list LocalDLList<ScanOp> list(c_scanOpPool, frag.m_scanList); if (! list.seize(scanPtr)) { jam(); break; } } if (!AccScanReq::getNoDiskScanFlag(req->requestInfo) && tablePtr.p->m_no_of_disk_attributes) { bits |= ScanOp::SCAN_DD; } bool mm = (bits & ScanOp::SCAN_DD); if (tablePtr.p->m_attributes[mm].m_no_of_varsize > 0) { bits |= ScanOp::SCAN_VS; // disk pages have fixed page format ndbrequire(! (bits & ScanOp::SCAN_DD)); } if (! AccScanReq::getReadCommittedFlag(req->requestInfo)) { if (AccScanReq::getLockMode(req->requestInfo) == 0) bits |= ScanOp::SCAN_LOCK_SH; else bits |= ScanOp::SCAN_LOCK_EX; } if (AccScanReq::getNRScanFlag(req->requestInfo)) { jam(); bits |= ScanOp::SCAN_NR; scanPtr.p->m_endPage = req->maxPage; if (req->maxPage != RNIL && req->maxPage > frag.noOfPages) { ndbout_c("%u %u endPage: %u (noOfPages: %u)", tablePtr.i, fragId, req->maxPage, fragPtr.p->noOfPages); } } else { jam(); scanPtr.p->m_endPage = RNIL; } if (AccScanReq::getLcpScanFlag(req->requestInfo)) { jam(); ndbrequire((bits & ScanOp::SCAN_DD) == 0); ndbrequire((bits & ScanOp::SCAN_LOCK) == 0); } // set up scan op new (scanPtr.p) ScanOp(); ScanOp& scan = *scanPtr.p; scan.m_state = ScanOp::First; scan.m_bits = bits; scan.m_userPtr = req->senderData; scan.m_userRef = req->senderRef; scan.m_tableId = tablePtr.i; scan.m_fragId = frag.fragmentId; scan.m_fragPtrI = fragPtr.i; scan.m_transId1 = req->transId1; scan.m_transId2 = req->transId2; scan.m_savePointId = req->savePointId; // conf AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend(); conf->scanPtr = req->senderData; conf->accPtr = scanPtr.i; conf->flag = AccScanConf::ZNOT_EMPTY_FRAGMENT; sendSignal(req->senderRef, GSN_ACC_SCANCONF, signal, AccScanConf::SignalLength, JBB); return; } while (0); if (scanPtr.i != RNIL) { jam(); releaseScanOp(scanPtr); } // LQH does not handle REF signal->theData[0] = 0x313; sendSignal(req->senderRef, GSN_ACC_SCANREF, signal, 1, JBB); }
void Dbtup::scanReply(Signal* signal, ScanOpPtr scanPtr) { ScanOp& scan = *scanPtr.p; FragrecordPtr fragPtr; fragPtr.i = scan.m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Fragrecord& frag = *fragPtr.p; // for reading tuple key in Current state Uint32* pkData = (Uint32*)c_dataBuffer; unsigned pkSize = 0; if (scan.m_state == ScanOp::Current) { // found an entry to return jam(); ndbrequire(scan.m_accLockOp == RNIL); if (scan.m_bits & ScanOp::SCAN_LOCK) { jam(); // read tuple key - use TUX routine const ScanPos& pos = scan.m_scanPos; const Local_key& key_mm = pos.m_key_mm; int ret = tuxReadPk(fragPtr.i, pos.m_realpid_mm, key_mm.m_page_idx, pkData, true); ndbrequire(ret > 0); pkSize = ret; dbg((DBTUP, "PK size=%d data=%08x", pkSize, pkData[0])); // get read lock or exclusive lock AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; lockReq->requestInfo = (scan.m_bits & ScanOp::SCAN_LOCK_SH) ? AccLockReq::LockShared : AccLockReq::LockExclusive; lockReq->accOpPtr = RNIL; lockReq->userPtr = scanPtr.i; lockReq->userRef = reference(); lockReq->tableId = scan.m_tableId; lockReq->fragId = frag.fragmentId; lockReq->fragPtrI = RNIL; // no cached frag ptr yet lockReq->hashValue = md5_hash((Uint64*)pkData, pkSize); lockReq->tupAddr = key_mm.ref(); lockReq->transId1 = scan.m_transId1; lockReq->transId2 = scan.m_transId2; EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::LockSignalLength); jamEntry(); switch (lockReq->returnCode) { case AccLockReq::Success: jam(); scan.m_state = ScanOp::Locked; scan.m_accLockOp = lockReq->accOpPtr; break; case AccLockReq::IsBlocked: jam(); // normal lock wait scan.m_state = ScanOp::Blocked; scan.m_bits |= ScanOp::SCAN_LOCK_WAIT; scan.m_accLockOp = lockReq->accOpPtr; // LQH will wake us up signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; break; case AccLockReq::Refused: jam(); // we cannot see deleted tuple (assert only) ndbassert(false); // skip it scan.m_state = ScanOp::Next; signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; break; case AccLockReq::NoFreeOp: jam(); // max ops should depend on max scans (assert only) ndbassert(false); // stay in Current state scan.m_state = ScanOp::Current; signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; break; default: ndbrequire(false); break; } } else { scan.m_state = ScanOp::Locked; } } if (scan.m_state == ScanOp::Locked) { // we have lock or do not need one jam(); // conf signal NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; // the lock is passed to LQH Uint32 accLockOp = scan.m_accLockOp; if (accLockOp != RNIL) { scan.m_accLockOp = RNIL; // remember it until LQH unlocks it addAccLockOp(scan, accLockOp); } else { ndbrequire(! (scan.m_bits & ScanOp::SCAN_LOCK)); // operation RNIL in LQH would signal no tuple returned accLockOp = (Uint32)-1; } const ScanPos& pos = scan.m_scanPos; conf->accOperationPtr = accLockOp; conf->fragId = frag.fragmentId; conf->localKey[0] = pos.m_key_mm.ref(); conf->localKey[1] = 0; conf->localKeyLength = 1; unsigned signalLength = 6; if (scan.m_bits & ScanOp::SCAN_LOCK) { sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); } else { Uint32 blockNo = refToBlock(scan.m_userRef); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength); jamEntry(); } // next time look for next entry scan.m_state = ScanOp::Next; return; } if (scan.m_state == ScanOp::Last || scan.m_state == ScanOp::Invalid) { jam(); NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = RNIL; conf->fragId = RNIL; unsigned signalLength = 3; sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); return; } ndbrequire(false); }
void Dbtup::buildIndex(Signal* signal, Uint32 buildPtrI) { // get build record BuildIndexPtr buildPtr; buildPtr.i= buildPtrI; c_buildIndexList.getPtr(buildPtr); const BuildIndxImplReq* buildReq= &buildPtr.p->m_request; // get table TablerecPtr tablePtr; tablePtr.i= buildReq->tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); const Uint32 firstTupleNo = 0; const Uint32 tupheadsize = tablePtr.p->m_offsets[MM].m_fix_header_size; #ifdef TIME_MEASUREMENT NDB_TICKS start; NDB_TICKS stop; Uint64 time_passed; #endif do { // get fragment FragrecordPtr fragPtr; if (buildPtr.p->m_fragNo == NDB_ARRAY_SIZE(tablePtr.p->fragrec)) { jam(); // build ready buildIndexReply(signal, buildPtr.p); c_buildIndexList.release(buildPtr); return; } ndbrequire(buildPtr.p->m_fragNo < NDB_ARRAY_SIZE(tablePtr.p->fragrec)); fragPtr.i= tablePtr.p->fragrec[buildPtr.p->m_fragNo]; if (fragPtr.i == RNIL) { jam(); buildPtr.p->m_fragNo++; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; break; } ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); // get page PagePtr pagePtr; if (buildPtr.p->m_pageId >= fragPtr.p->m_max_page_cnt) { jam(); buildPtr.p->m_fragNo++; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; break; } Uint32 realPageId= getRealpidCheck(fragPtr.p, buildPtr.p->m_pageId); // skip empty page if (realPageId == RNIL) { jam(); goto next_tuple; } c_page_pool.getPtr(pagePtr, realPageId); next_tuple: // get tuple Uint32 pageIndex = ~0; const Tuple_header* tuple_ptr = 0; pageIndex = buildPtr.p->m_tupleNo * tupheadsize; if (pageIndex + tupheadsize > Fix_page::DATA_WORDS) { jam(); buildPtr.p->m_pageId++; buildPtr.p->m_tupleNo= firstTupleNo; break; } if (realPageId == RNIL) { jam(); buildPtr.p->m_tupleNo++; break; } tuple_ptr = (Tuple_header*)&pagePtr.p->m_data[pageIndex]; // skip over free tuple if (tuple_ptr->m_header_bits & Tuple_header::FREE) { jam(); buildPtr.p->m_tupleNo++; break; } Uint32 tupVersion= tuple_ptr->get_tuple_version(); OperationrecPtr pageOperPtr; pageOperPtr.i= tuple_ptr->m_operation_ptr_i; #ifdef TIME_MEASUREMENT start = NdbTick_getCurrentTicks(); #endif // add to index TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); req->errorCode = RNIL; req->tableId = tablePtr.i; req->indexId = buildPtr.p->m_indexId; req->fragId = tablePtr.p->fragid[buildPtr.p->m_fragNo]; req->pageId = realPageId; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; req->tupFragPtrI = fragPtr.i; req->fragPageId = buildPtr.p->m_pageId; req->pageIndex = pageIndex; if (pageOperPtr.i == RNIL) { EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength+2); } else { /* If there is an ongoing operation on the tuple then it is either a copy tuple or an original tuple with an ongoing transaction. In both cases realPageId and pageOffset refer to the original tuple. The tuple address stored in TUX will always be the original tuple but with the tuple version of the tuple we found. This is necessary to avoid having to update TUX at abort of update. If an update aborts then the copy tuple is copied to the original tuple. The build will however have found that tuple as a copy tuple. The original tuple is stable and is thus preferrable to store in TUX. */ jam(); /** * Since copy tuples now can't be found on real pages. * we will here build all copies of the tuple * * Note only "real" tupVersion's should be added * i.e delete's shouldnt be added * (unless it's the first op, when "original" should be added) */ /* * Start from first operation. This is only to make things more * clear. It is not required by ordered index implementation. */ c_operation_pool.getPtr(pageOperPtr); while (pageOperPtr.p->prevActiveOp != RNIL) { jam(); pageOperPtr.i = pageOperPtr.p->prevActiveOp; c_operation_pool.getPtr(pageOperPtr); } /* * Do not use req->errorCode as global control. */ bool ok = true; /* * If first operation is an update, add previous version. * This version does not appear as the version of any operation. * At commit this version is removed by executeTuxCommitTriggers. * At abort it is preserved by executeTuxAbortTriggers. */ if (pageOperPtr.p->op_type == ZUPDATE) { jam(); req->errorCode = RNIL; req->tupVersion = decr_tup_version(pageOperPtr.p->op_struct.bit_field.tupVersion); EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength+2); ok = (req->errorCode == 0); } /* * Add versions from all operations. * * Each operation has a tuple version. For insert and update it * is the newly created version. For delete it is the version * deleted. The existence of operation tuple version implies that * a corresponding tuple version exists for TUX to read. * * We could be in the middle of a commit. The process here makes * no assumptions about operation commit order. (It should be * first to last but this is not the place to assert it). * * Duplicate versions are possible e.g. a delete in the middle * may have same version as the previous operation. TUX ignores * duplicate version errors during index build. */ while (pageOperPtr.i != RNIL && ok) { jam(); c_operation_pool.getPtr(pageOperPtr); req->errorCode = RNIL; req->tupVersion = pageOperPtr.p->op_struct.bit_field.tupVersion; EXECUTE_DIRECT(buildPtr.p->m_buildRef, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength+2); pageOperPtr.i = pageOperPtr.p->nextActiveOp; ok = (req->errorCode == 0); } } jamEntry(); if (req->errorCode != 0) { switch (req->errorCode) { case TuxMaintReq::NoMemError: jam(); buildPtr.p->m_errorCode= BuildIndxImplRef::AllocationFailure; break; default: ndbrequire(false); break; } buildIndexReply(signal, buildPtr.p); c_buildIndexList.release(buildPtr); return; } #ifdef TIME_MEASUREMENT stop = NdbTick_getCurrentTicks(); time_passed= NdbTick_Elapsed(start, stop).microSec(); if (time_passed < 1000) { time_events++; tot_time_passed += time_passed; if (time_events == number_events) { Uint64 mean_time_passed= tot_time_passed / (Uint64)number_events; ndbout << "Number of events= " << number_events; ndbout << " Mean time passed= " << mean_time_passed << endl; number_events <<= 1; tot_time_passed= 0; time_events= 0; } } #endif // next tuple buildPtr.p->m_tupleNo++; break; } while (0); signal->theData[0]= ZBUILD_INDEX; signal->theData[1]= buildPtr.i; sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); }