void Dbtup::releaseCopyProcedure() { /* Return Copy Procedure section to original length */ ndbassert(cCopyProcedure != RNIL); ndbassert(cCopyLastSeg != RNIL); Ptr<SectionSegment> first; g_sectionSegmentPool.getPtr(first, cCopyProcedure); ndbassert(first.p->m_sz <= MAX_COPY_PROC_LEN); first.p->m_sz= MAX_COPY_PROC_LEN; first.p->m_lastSegment= cCopyLastSeg; if (cCopyOverwriteLen) { ndbassert(cCopyOverwriteLen <= EXTRA_COPY_PROC_WORDS); Uint32 attrids[EXTRA_COPY_PROC_WORDS]; for (Uint32 i=0; i < cCopyOverwriteLen; i++) { AttributeHeader ah(cCopyOverwrite + i, 0); attrids[i] = ah.m_value; } ndbrequire(writeToSection(first.i, cCopyOverwrite, attrids, cCopyOverwriteLen)); cCopyOverwriteLen= 0; cCopyOverwrite= 0; } cCopyLastSeg= RNIL; }
void Restore::execFSREADCONF(Signal * signal) { jamEntry(); FilePtr file_ptr; FsConf* conf= (FsConf*)signal->getDataPtr(); m_file_pool.getPtr(file_ptr, conf->userPointer); file_ptr.p->m_bytes_left += conf->bytes_read; ndbassert(file_ptr.p->m_outstanding_reads); file_ptr.p->m_outstanding_reads--; if(file_ptr.p->m_outstanding_reads == 0) { ndbassert(conf->bytes_read <= GLOBAL_PAGE_SIZE); if(conf->bytes_read == GLOBAL_PAGE_SIZE) { read_file(signal, file_ptr); } else { file_ptr.p->m_status |= File::FILE_EOF; file_ptr.p->m_status &= ~(Uint32)File::FILE_THREAD_RUNNING; } } }
void Dbtup::prepareCopyProcedure(Uint32 numAttrs, Uint16 tableBits) { /* Set length of copy procedure section to the * number of attributes supplied */ ndbassert(numAttrs <= MAX_ATTRIBUTES_IN_TABLE); ndbassert(cCopyProcedure != RNIL); ndbassert(cCopyLastSeg == RNIL); ndbassert(cCopyOverwrite == 0); ndbassert(cCopyOverwriteLen == 0); Ptr<SectionSegment> first; g_sectionSegmentPool.getPtr(first, cCopyProcedure); /* Record original 'last segment' of section */ cCopyLastSeg= first.p->m_lastSegment; /* Check table bits to see if we need to do extra reads */ Uint32 extraAttrIds[EXTRA_COPY_PROC_WORDS]; Uint32 extraReads = 0; if (tableBits & Tablerec::TR_ExtraRowGCIBits) { AttributeHeader ah(AttributeHeader::ROW_GCI64,0); extraAttrIds[extraReads++] = ah.m_value; } if (tableBits & Tablerec::TR_ExtraRowAuthorBits) { AttributeHeader ah(AttributeHeader::ROW_AUTHOR,0); extraAttrIds[extraReads++] = ah.m_value; } /* Modify section to represent relevant prefix * of code by modifying size and lastSegment */ Uint32 newSize = numAttrs + extraReads; first.p->m_sz= newSize; if (extraReads) { cCopyOverwrite= numAttrs; cCopyOverwriteLen = extraReads; ndbrequire(writeToSection(first.i, numAttrs, extraAttrIds, extraReads)); } /* Trim section size and lastSegment */ Ptr<SectionSegment> curr= first; while(newSize > SectionSegment::DataLength) { g_sectionSegmentPool.getPtr(curr, curr.p->m_nextSegment); newSize-= SectionSegment::DataLength; } first.p->m_lastSegment= curr.i; }
int Dbtup::realloc_var_part(Fragrecord* fragPtr, Tablerec* tabPtr, PagePtr pagePtr, Var_part_ref* refptr, Uint32 oldsz, Uint32 newsz) { Uint32 add = newsz - oldsz; Var_page* pageP = (Var_page*)pagePtr.p; Local_key oldref; refptr->copyout(&oldref); if (pageP->free_space >= add) { jam(); if(!pageP->is_space_behind_entry(oldref.m_page_idx, add)) { if(0) printf("extra reorg"); jam(); /** * In this case we need to reorganise the page to fit. To ensure we * don't complicate matters we make a little trick here where we * fool the reorg_page to avoid copying the entry at hand and copy * that separately at the end. This means we need to copy it out of * the page before reorg_page to save the entry contents. */ Uint32* copyBuffer= cinBuffer; memcpy(copyBuffer, pageP->get_ptr(oldref.m_page_idx), 4*oldsz); pageP->set_entry_len(oldref.m_page_idx, 0); pageP->free_space += oldsz; pageP->reorg((Var_page*)ctemp_page); memcpy(pageP->get_free_space_ptr(), copyBuffer, 4*oldsz); pageP->set_entry_offset(oldref.m_page_idx, pageP->insert_pos); add += oldsz; } pageP->grow_entry(oldref.m_page_idx, add); update_free_page_list(fragPtr, pagePtr); } else { Local_key newref; Uint32 *src = pageP->get_ptr(oldref.m_page_idx); Uint32 *dst = alloc_var_part(fragPtr, tabPtr, newsz, &newref); if (unlikely(dst == 0)) return -1; ndbassert(oldref.m_page_no != newref.m_page_no); ndbassert(pageP->get_entry_len(oldref.m_page_idx) == oldsz); memcpy(dst, src, 4*oldsz); refptr->assign(&newref); pageP->free_record(oldref.m_page_idx, Var_page::CHAIN); update_free_page_list(fragPtr, pagePtr); } return 0; }
/* Allocator for variable sized segments Part of the external interface for variable sized segments This method is used to allocate and free variable sized tuples and parts of tuples. This part can be used to implement variable sized attributes without wasting memory. It can be used to support small BLOB's attached to the record. It can also be used to support adding and dropping attributes without the need to copy the entire table. SYNOPSIS fragPtr A pointer to the fragment description tabPtr A pointer to the table description alloc_size Size of the allocated record signal The signal object to be used if a signal needs to be sent RETURN VALUES Returns true if allocation was successful otherwise false page_offset Page offset of allocated record page_index Page index of allocated record page_ptr The i and p value of the page where the record was allocated */ Uint32* Dbtup::alloc_var_rec(Fragrecord* fragPtr, Tablerec* tabPtr, Uint32 alloc_size, Local_key* key, Uint32 * out_frag_page_id) { /** * TODO alloc fix+var part */ Uint32 *ptr = alloc_fix_rec(fragPtr, tabPtr, key, out_frag_page_id); if (unlikely(ptr == 0)) { return 0; } ndbassert(alloc_size >= tabPtr->m_offsets[MM].m_fix_header_size); alloc_size -= tabPtr->m_offsets[MM].m_fix_header_size; Local_key varref; if (likely(alloc_var_part(fragPtr, tabPtr, alloc_size, &varref) != 0)) { Tuple_header* tuple = (Tuple_header*)ptr; Var_part_ref* dst = tuple->get_var_part_ref_ptr(tabPtr); dst->assign(&varref); return ptr; } PagePtr pagePtr; c_page_pool.getPtr(pagePtr, key->m_page_no); free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p); return 0; }
// Trigger signals void Dbtup::execCREATE_TRIG_REQ(Signal* signal) { jamEntry(); BlockReference senderRef = signal->getSendersBlockRef(); const CreateTrigReq reqCopy = *(const CreateTrigReq*)signal->getDataPtr(); const CreateTrigReq* const req = &reqCopy; CreateTrigRef::ErrorCode error= CreateTrigRef::NoError; // Find table TablerecPtr tabPtr; tabPtr.i = req->getTableId(); ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); if (tabPtr.p->tableStatus != DEFINED ) { jam(); error= CreateTrigRef::InvalidTable; } // Create trigger and associate it with the table else if (createTrigger(tabPtr.p, req)) { jam(); // Send conf CreateTrigConf* const conf = (CreateTrigConf*)signal->getDataPtrSend(); conf->setUserRef(reference()); conf->setConnectionPtr(req->getConnectionPtr()); conf->setRequestType(req->getRequestType()); conf->setTableId(req->getTableId()); conf->setIndexId(req->getIndexId()); conf->setTriggerId(req->getTriggerId()); conf->setTriggerInfo(req->getTriggerInfo()); sendSignal(senderRef, GSN_CREATE_TRIG_CONF, signal, CreateTrigConf::SignalLength, JBB); return; } else { jam(); error= CreateTrigRef::TooManyTriggers; } ndbassert(error != CreateTrigRef::NoError); // Send ref CreateTrigRef* const ref = (CreateTrigRef*)signal->getDataPtrSend(); ref->setUserRef(reference()); ref->setConnectionPtr(req->getConnectionPtr()); ref->setRequestType(req->getRequestType()); ref->setTableId(req->getTableId()); ref->setIndexId(req->getIndexId()); ref->setTriggerId(req->getTriggerId()); ref->setTriggerInfo(req->getTriggerInfo()); ref->setErrorCode(error); sendSignal(senderRef, GSN_CREATE_TRIG_REF, signal, CreateTrigRef::SignalLength, JBB); }//Dbtup::execCREATE_TRIG_REQ()
void Dbtup::storedProcCountNonAPI(BlockReference apiBlockref, int add_del) { BlockNumber apiBlockno = refToBlock(apiBlockref); if (apiBlockno < MIN_API_BLOCK_NO) { ndbassert(blockToMain(apiBlockno) == BACKUP || blockToMain(apiBlockno) == SUMA || blockToMain(apiBlockno) == DBLQH || blockToMain(apiBlockno) == DBSPJ); if (add_del == +1) { jam(); c_storedProcCountNonAPI++; } else if (add_del == -1) { jam(); ndbassert(c_storedProcCountNonAPI > 0); c_storedProcCountNonAPI--; } else { ndbassert(false); } } }
/** * Move to the first operation performed on this tuple */ void Dbtup::findFirstOp(OperationrecPtr & firstPtr) { jam(); printf("Detect out-of-order commit(%u) -> ", firstPtr.i); ndbassert(!firstPtr.p->is_first_operation()); while(firstPtr.p->prevActiveOp != RNIL) { firstPtr.i = firstPtr.p->prevActiveOp; c_operation_pool.getPtr(firstPtr); } ndbout_c("%u", firstPtr.i); }
void Trpman::execCLOSE_COMREQ(Signal* signal) { // Close communication with the node and halt input/output from // other blocks than QMGR CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0]; const BlockReference userRef = closeCom->xxxBlockRef; Uint32 requestType = closeCom->requestType; Uint32 failNo = closeCom->failNo; // Uint32 noOfNodes = closeCom->noOfNodes; jamEntry(); for (unsigned i = 1; i < MAX_NODES; i++) { if (NodeBitmask::get(closeCom->theNodes, i) && handles_this_node(i)) { jam(); //----------------------------------------------------- // Report that the connection to the node is closed //----------------------------------------------------- signal->theData[0] = NDB_LE_CommunicationClosed; signal->theData[1] = i; sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); globalTransporterRegistry.setIOState(i, HaltIO); globalTransporterRegistry.do_disconnect(i); } } if (requestType != CloseComReqConf::RT_NO_REPLY) { ndbassert((requestType == CloseComReqConf::RT_API_FAILURE) || ((requestType == CloseComReqConf::RT_NODE_FAILURE) && (failNo != 0))); jam(); CloseComReqConf* closeComConf = (CloseComReqConf *)signal->getDataPtrSend(); closeComConf->xxxBlockRef = userRef; closeComConf->requestType = requestType; closeComConf->failNo = failNo; /* Note assumption that noOfNodes and theNodes * bitmap is not trampled above * signals received from the remote node. */ sendSignal(TRPMAN_REF, GSN_CLOSE_COMCONF, signal, 19, JBA); } }
void Dbtup::disk_page_log_buffer_callback(Signal* signal, Uint32 opPtrI, Uint32 unused) { Uint32 hash_value; Uint32 gci_hi, gci_lo; Uint32 transId1, transId2; OperationrecPtr regOperPtr; jamEntry(); c_operation_pool.getPtr(regOperPtr, opPtrI); c_lqh->get_op_info(regOperPtr.p->userpointer, &hash_value, &gci_hi, &gci_lo, &transId1, &transId2); Uint32 page= regOperPtr.p->m_commit_disk_callback_page; TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); tupCommitReq->opPtr= opPtrI; tupCommitReq->hashValue= hash_value; tupCommitReq->gci_hi= gci_hi; tupCommitReq->gci_lo= gci_lo; tupCommitReq->diskpage = page; tupCommitReq->transId1 = transId1; tupCommitReq->transId2 = transId2; ndbassert(regOperPtr.p->op_struct.bit_field.m_load_diskpage_on_commit == 0); regOperPtr.p->op_struct.bit_field.m_wait_log_buffer= 0; m_global_page_pool.getPtr(m_pgman_ptr, page); execTUP_COMMITREQ(signal); ndbassert(signal->theData[0] == 0); c_lqh->tupcommit_conf_callback(signal, regOperPtr.p->userpointer); }
void Restore::reorder_key(const KeyDescriptor* desc, Uint32 *data, Uint32 len) { Uint32 i; Uint32 *var= data; Uint32 Tmp[MAX_KEY_SIZE_IN_WORDS]; for(i = 0; i<desc->noOfKeyAttr; i++) { Uint32 attr = desc->keyAttr[i].attributeDescriptor; switch(AttributeDescriptor::getArrayType(attr)){ case NDB_ARRAYTYPE_FIXED: var += AttributeDescriptor::getSizeInWords(attr); } } Uint32 *dst = Tmp; Uint32 *src = data; for(i = 0; i<desc->noOfKeyAttr; i++) { Uint32 sz; Uint32 attr = desc->keyAttr[i].attributeDescriptor; switch(AttributeDescriptor::getArrayType(attr)){ case NDB_ARRAYTYPE_FIXED: sz = AttributeDescriptor::getSizeInWords(attr); memcpy(dst, src, 4 * sz); src += sz; break; case NDB_ARRAYTYPE_SHORT_VAR: sz = (1 + ((Uint8*)var)[0] + 3) >> 2; memcpy(dst, var, 4 * sz); var += sz; break; case NDB_ARRAYTYPE_MEDIUM_VAR: sz = (2 + ((Uint8*)var)[0] + 256*((Uint8*)var)[1] + 3) >> 2; memcpy(dst, var, 4 * sz); var += sz; break; default: ndbrequire(false); sz = 0; } dst += sz; } ndbassert((Uint32) (dst - Tmp) == len); memcpy(data, Tmp, 4*len); }
void TrpmanProxy::execROUTE_ORD(Signal* signal) { RouteOrd* ord = (RouteOrd*)signal->getDataPtr(); Uint32 nodeId = ord->from; jamEntry(); ndbassert(nodeId != 0); #ifndef NDBD_MULTITHREADED Uint32 workerId = 0; #else Uint32 workerId = mt_get_recv_thread_idx(nodeId); #endif SectionHandle handle(this, signal); sendSignal(workerRef(workerId), GSN_ROUTE_ORD, signal, signal->getLength(), JBB, &handle); }
void Restore::execLQHKEYCONF(Signal* signal) { FilePtr file_ptr; LqhKeyConf * conf = (LqhKeyConf *)signal->getDataPtr(); m_file_pool.getPtr(file_ptr, conf->opPtr); ndbassert(file_ptr.p->m_outstanding_operations); file_ptr.p->m_outstanding_operations--; file_ptr.p->m_rows_restored++; if(file_ptr.p->m_outstanding_operations == 0 && file_ptr.p->m_fd == RNIL) { jam(); restore_lcp_conf(signal, file_ptr); return; } }
void Dbinfo::execNODE_FAILREP(Signal* signal) { jamEntry(); NodeFailRep * rep = (NodeFailRep*)signal->getDataPtr(); Uint32 theFailedNodes[NdbNodeBitmask::Size]; for (Uint32 i = 0; i < NdbNodeBitmask::Size; i++) theFailedNodes[i] = rep->theNodes[i]; for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { if (NdbNodeBitmask::get(theFailedNodes, i)) { Uint32 elementsCleaned = simBlockNodeFailure(signal, i); // No callback ndbassert(elementsCleaned == 0); // DbInfo should have no distributed frag signals (void) elementsCleaned; // Remove compiler warning } } }
void Restore::parse_file_header(Signal* signal, FilePtr file_ptr, const Uint32* data, Uint32 len) { const BackupFormat::FileHeader* fh= (BackupFormat::FileHeader*)data; if(memcmp(fh->Magic, "NDBBCKUP", 8) != 0) { parse_error(signal, file_ptr, __LINE__, *data); return; } file_ptr.p->m_lcp_version = ntohl(fh->BackupVersion); if (check_file_version(signal, ntohl(fh->BackupVersion))) { parse_error(signal, file_ptr, __LINE__, ntohl(fh->NdbVersion)); return; } ndbassert(ntohl(fh->SectionType) == BackupFormat::FILE_HEADER); if(ntohl(fh->SectionLength) != len-3) { parse_error(signal, file_ptr, __LINE__, ntohl(fh->SectionLength)); return; } if(ntohl(fh->FileType) != BackupFormat::LCP_FILE) { parse_error(signal, file_ptr, __LINE__, ntohl(fh->FileType)); return; } if(fh->ByteOrder != 0x12345678) { parse_error(signal, file_ptr, __LINE__, fh->ByteOrder); return; } }
void Dbtup::execTUP_DEALLOCREQ(Signal* signal) { TablerecPtr regTabPtr; FragrecordPtr regFragPtr; Uint32 frag_page_id, frag_id; jamEntry(); frag_id= signal->theData[0]; regTabPtr.i= signal->theData[1]; frag_page_id= signal->theData[2]; Uint32 page_index= signal->theData[3]; ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); getFragmentrec(regFragPtr, frag_id, regTabPtr.p); ndbassert(regFragPtr.p != NULL); if (! Local_key::isInvalid(frag_page_id, page_index)) { Local_key tmp; tmp.m_page_no= getRealpid(regFragPtr.p, frag_page_id); tmp.m_page_idx= page_index; PagePtr pagePtr; Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p); ndbrequire(ptr->m_header_bits & Tuple_header::FREED); if (regTabPtr.p->m_attributes[MM].m_no_of_varsize + regTabPtr.p->m_attributes[MM].m_no_of_dynamic) { jam(); free_var_rec(regFragPtr.p, regTabPtr.p, &tmp, pagePtr); } else { free_fix_rec(regFragPtr.p, regTabPtr.p, &tmp, (Fix_page*)pagePtr.p); } } }
/* Deallocator for variable sized segments Part of the external interface for variable sized segments SYNOPSIS fragPtr A pointer to the fragment description tabPtr A pointer to the table description signal The signal object to be used if a signal needs to be sent page_ptr A reference to the page of the variable sized segment free_page_index Page index on page of variable sized segment which is freed RETURN VALUES Returns true if deallocation was successful otherwise false */ void Dbtup::free_var_rec(Fragrecord* fragPtr, Tablerec* tabPtr, Local_key* key, Ptr<Page> pagePtr) { /** * TODO free fix + var part */ Uint32 *ptr = ((Fix_page*)pagePtr.p)->get_ptr(key->m_page_idx, 0); Tuple_header* tuple = (Tuple_header*)ptr; Local_key ref; Var_part_ref * varref = tuple->get_var_part_ref_ptr(tabPtr); varref->copyout(&ref); free_fix_rec(fragPtr, tabPtr, key, (Fix_page*)pagePtr.p); c_page_pool.getPtr(pagePtr, ref.m_page_no); ((Var_page*)pagePtr.p)->free_record(ref.m_page_idx, Var_page::CHAIN); ndbassert(pagePtr.p->free_space <= Var_page::DATA_WORDS); if (pagePtr.p->free_space == Var_page::DATA_WORDS - 1) { jam(); /* This code could be used when we release pages. remove_free_page(signal,fragPtr,page_header,page_header->list_index); return_empty_page(fragPtr, page_header); */ update_free_page_list(fragPtr, pagePtr); } else { jam(); update_free_page_list(fragPtr, pagePtr); } return; }
void Dbtup::scanReply(Signal* signal, ScanOpPtr scanPtr) { ScanOp& scan = *scanPtr.p; FragrecordPtr fragPtr; fragPtr.i = scan.m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Fragrecord& frag = *fragPtr.p; // for reading tuple key in Current state Uint32* pkData = (Uint32*)c_dataBuffer; unsigned pkSize = 0; if (scan.m_state == ScanOp::Current) { // found an entry to return jam(); ndbrequire(scan.m_accLockOp == RNIL); if (scan.m_bits & ScanOp::SCAN_LOCK) { jam(); // read tuple key - use TUX routine const ScanPos& pos = scan.m_scanPos; const Local_key& key_mm = pos.m_key_mm; int ret = tuxReadPk(fragPtr.i, pos.m_realpid_mm, key_mm.m_page_idx, pkData, true); ndbrequire(ret > 0); pkSize = ret; dbg((DBTUP, "PK size=%d data=%08x", pkSize, pkData[0])); // get read lock or exclusive lock AccLockReq* const lockReq = (AccLockReq*)signal->getDataPtrSend(); lockReq->returnCode = RNIL; lockReq->requestInfo = (scan.m_bits & ScanOp::SCAN_LOCK_SH) ? AccLockReq::LockShared : AccLockReq::LockExclusive; lockReq->accOpPtr = RNIL; lockReq->userPtr = scanPtr.i; lockReq->userRef = reference(); lockReq->tableId = scan.m_tableId; lockReq->fragId = frag.fragmentId; lockReq->fragPtrI = RNIL; // no cached frag ptr yet lockReq->hashValue = md5_hash((Uint64*)pkData, pkSize); lockReq->tupAddr = key_mm.ref(); lockReq->transId1 = scan.m_transId1; lockReq->transId2 = scan.m_transId2; EXECUTE_DIRECT(DBACC, GSN_ACC_LOCKREQ, signal, AccLockReq::LockSignalLength); jamEntry(); switch (lockReq->returnCode) { case AccLockReq::Success: jam(); scan.m_state = ScanOp::Locked; scan.m_accLockOp = lockReq->accOpPtr; break; case AccLockReq::IsBlocked: jam(); // normal lock wait scan.m_state = ScanOp::Blocked; scan.m_bits |= ScanOp::SCAN_LOCK_WAIT; scan.m_accLockOp = lockReq->accOpPtr; // LQH will wake us up signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; break; case AccLockReq::Refused: jam(); // we cannot see deleted tuple (assert only) ndbassert(false); // skip it scan.m_state = ScanOp::Next; signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; break; case AccLockReq::NoFreeOp: jam(); // max ops should depend on max scans (assert only) ndbassert(false); // stay in Current state scan.m_state = ScanOp::Current; signal->theData[0] = scan.m_userPtr; signal->theData[1] = true; EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); return; break; default: ndbrequire(false); break; } } else { scan.m_state = ScanOp::Locked; } } if (scan.m_state == ScanOp::Locked) { // we have lock or do not need one jam(); // conf signal NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; // the lock is passed to LQH Uint32 accLockOp = scan.m_accLockOp; if (accLockOp != RNIL) { scan.m_accLockOp = RNIL; // remember it until LQH unlocks it addAccLockOp(scan, accLockOp); } else { ndbrequire(! (scan.m_bits & ScanOp::SCAN_LOCK)); // operation RNIL in LQH would signal no tuple returned accLockOp = (Uint32)-1; } const ScanPos& pos = scan.m_scanPos; conf->accOperationPtr = accLockOp; conf->fragId = frag.fragmentId; conf->localKey[0] = pos.m_key_mm.ref(); conf->localKey[1] = 0; conf->localKeyLength = 1; unsigned signalLength = 6; if (scan.m_bits & ScanOp::SCAN_LOCK) { sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); } else { Uint32 blockNo = refToBlock(scan.m_userRef); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength); jamEntry(); } // next time look for next entry scan.m_state = ScanOp::Next; return; } if (scan.m_state == ScanOp::Last || scan.m_state == ScanOp::Invalid) { jam(); NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = RNIL; conf->fragId = RNIL; unsigned signalLength = 3; sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF, signal, signalLength, JBB); return; } ndbrequire(false); }
void Dbtup::execBUILD_INDX_IMPL_REQ(Signal* signal) { jamEntry(); #ifdef TIME_MEASUREMENT time_events= 0; tot_time_passed= 0; number_events= 1; #endif const BuildIndxImplReq* const req = (const BuildIndxImplReq*)signal->getDataPtr(); // get new operation BuildIndexPtr buildPtr; if (ERROR_INSERTED(4031) || ! c_buildIndexList.seizeFirst(buildPtr)) { jam(); BuildIndexRec buildRec; buildRec.m_request = *req; buildRec.m_errorCode = BuildIndxImplRef::Busy; if (ERROR_INSERTED(4031)) { CLEAR_ERROR_INSERT_VALUE; } buildIndexReply(signal, &buildRec); return; } buildPtr.p->m_request = *req; const BuildIndxImplReq* buildReq = &buildPtr.p->m_request; // check buildPtr.p->m_errorCode= BuildIndxImplRef::NoError; buildPtr.p->m_outstanding = 0; do { if (buildReq->tableId >= cnoOfTablerec) { jam(); buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable; break; } TablerecPtr tablePtr; tablePtr.i= buildReq->tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); if (tablePtr.p->tableStatus != DEFINED) { jam(); buildPtr.p->m_errorCode= BuildIndxImplRef::InvalidPrimaryTable; break; } // memory page format buildPtr.p->m_build_vs = (tablePtr.p->m_attributes[MM].m_no_of_varsize + tablePtr.p->m_attributes[MM].m_no_of_dynamic) > 0; if (DictTabInfo::isOrderedIndex(buildReq->indexType)) { jam(); const DLList<TupTriggerData>& triggerList = tablePtr.p->tuxCustomTriggers; TriggerPtr triggerPtr; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { if (triggerPtr.p->indexId == buildReq->indexId) { jam(); break; } triggerList.next(triggerPtr); } if (triggerPtr.i == RNIL) { jam(); // trigger was not created ndbassert(false); buildPtr.p->m_errorCode = BuildIndxImplRef::InternalError; break; } buildPtr.p->m_indexId = buildReq->indexId; buildPtr.p->m_buildRef = DBTUX; AlterIndxImplReq* req = (AlterIndxImplReq*)signal->getDataPtrSend(); req->indexId = buildReq->indexId; req->senderRef = 0; req->requestType = AlterIndxImplReq::AlterIndexBuilding; EXECUTE_DIRECT(DBTUX, GSN_ALTER_INDX_IMPL_REQ, signal, AlterIndxImplReq::SignalLength); } else if(buildReq->indexId == RNIL) { jam(); // REBUILD of acc buildPtr.p->m_indexId = RNIL; buildPtr.p->m_buildRef = DBACC; } else { jam(); buildPtr.p->m_errorCode = BuildIndxImplRef::InvalidIndexType; break; } // set to first tuple position const Uint32 firstTupleNo = 0; buildPtr.p->m_fragNo= 0; buildPtr.p->m_pageId= 0; buildPtr.p->m_tupleNo= firstTupleNo; // start build bool offline = !!(buildReq->requestType&BuildIndxImplReq::RF_BUILD_OFFLINE); if (offline && m_max_parallel_index_build > 1) { jam(); buildIndexOffline(signal, buildPtr.i); } else { jam(); buildIndex(signal, buildPtr.i); } return; } while (0); // check failed buildIndexReply(signal, buildPtr.p); c_buildIndexList.release(buildPtr); }
int Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag) { jamEntry(); // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); Operationrec tmpOp; tmpOp.m_tuple_location.m_page_no= pageId; tmpOp.m_tuple_location.m_page_idx= pageIndex; KeyReqStruct req_struct(this); req_struct.tablePtrP = tablePtr.p; req_struct.fragPtrP = fragPtr.p; PagePtr page_ptr; Uint32* ptr= get_ptr(&page_ptr, &tmpOp.m_tuple_location, tablePtr.p); req_struct.m_page_ptr = page_ptr; req_struct.m_tuple_ptr = (Tuple_header*)ptr; int ret = 0; if (! (req_struct.m_tuple_ptr->m_header_bits & Tuple_header::FREE)) { req_struct.check_offset[MM]= tablePtr.p->get_check_offset(MM); req_struct.check_offset[DD]= tablePtr.p->get_check_offset(DD); Uint32 num_attr= tablePtr.p->m_no_of_attributes; Uint32 descr_start= tablePtr.p->tabDescriptor; TableDescriptor *tab_descr= &tableDescriptor[descr_start]; ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec); req_struct.attr_descr= tab_descr; if(req_struct.m_tuple_ptr->m_header_bits & Tuple_header::ALLOC) { Uint32 opPtrI= req_struct.m_tuple_ptr->m_operation_ptr_i; Operationrec* opPtrP= c_operation_pool.getPtr(opPtrI); ndbassert(!opPtrP->m_copy_tuple_location.isNull()); req_struct.m_tuple_ptr= get_copy_tuple(&opPtrP->m_copy_tuple_location); } prepare_read(&req_struct, tablePtr.p, false); const Uint32* attrIds= &tableDescriptor[tablePtr.p->readKeyArray].tabDescr; const Uint32 numAttrs= tablePtr.p->noOfKeyAttr; // read pk attributes from original tuple // do it ret = readAttributes(&req_struct, attrIds, numAttrs, dataOut, ZNIL, xfrmFlag); // done if (ret >= 0) { // remove headers Uint32 n= 0; Uint32 i= 0; while (n < numAttrs) { const AttributeHeader ah(dataOut[i]); Uint32 size= ah.getDataSize(); ndbrequire(size != 0); for (Uint32 j= 0; j < size; j++) { dataOut[i + j - n]= dataOut[i + j + 1]; } n+= 1; i+= 1 + size; } ndbrequire((int)i == ret); ret -= numAttrs; } else { return ret; } } if (tablePtr.p->m_bits & Tablerec::TR_RowGCI) { dataOut[ret] = *req_struct.m_tuple_ptr->get_mm_gci(tablePtr.p); } else { dataOut[ret] = 0; } return ret; }
void Dbtup::commit_operation(Signal* signal, Uint32 gci_hi, Uint32 gci_lo, Tuple_header* tuple_ptr, PagePtr pagePtr, Operationrec* regOperPtr, Fragrecord* regFragPtr, Tablerec* regTabPtr) { ndbassert(regOperPtr->op_type != ZDELETE); Uint32 lcpScan_ptr_i= regFragPtr->m_lcp_scan_op; Uint32 save= tuple_ptr->m_operation_ptr_i; Uint32 bits= tuple_ptr->m_header_bits; Tuple_header *disk_ptr= 0; Tuple_header *copy= get_copy_tuple(®OperPtr->m_copy_tuple_location); Uint32 copy_bits= copy->m_header_bits; Uint32 fixsize= regTabPtr->m_offsets[MM].m_fix_header_size; Uint32 mm_vars= regTabPtr->m_attributes[MM].m_no_of_varsize; Uint32 mm_dyns= regTabPtr->m_attributes[MM].m_no_of_dynamic; bool update_gci_at_commit = ! regOperPtr->op_struct.bit_field.m_gci_written; if((mm_vars+mm_dyns) == 0) { jam(); memcpy(tuple_ptr, copy, 4*fixsize); disk_ptr= (Tuple_header*)(((Uint32*)copy)+fixsize); } else { jam(); /** * Var_part_ref is only stored in *allocated* tuple * so memcpy from copy, will over write it... * hence subtle copyout/assign... */ Local_key tmp; Var_part_ref *ref= tuple_ptr->get_var_part_ref_ptr(regTabPtr); ref->copyout(&tmp); memcpy(tuple_ptr, copy, 4*fixsize); ref->assign(&tmp); PagePtr vpagePtr; if (copy_bits & Tuple_header::VAR_PART) { jam(); ndbassert(bits & Tuple_header::VAR_PART); ndbassert(tmp.m_page_no != RNIL); ndbassert(copy_bits & Tuple_header::COPY_TUPLE); Uint32 *dst= get_ptr(&vpagePtr, *ref); Var_page* vpagePtrP = (Var_page*)vpagePtr.p; Varpart_copy*vp =(Varpart_copy*)copy->get_end_of_fix_part_ptr(regTabPtr); /* The first word of shrunken tuple holds the lenght in words. */ Uint32 len = vp->m_len; memcpy(dst, vp->m_data, 4*len); if(copy_bits & Tuple_header::MM_SHRINK) { jam(); ndbassert(vpagePtrP->get_entry_len(tmp.m_page_idx) >= len); if (len) { jam(); ndbassert(regFragPtr->m_varWordsFree >= vpagePtrP->free_space); regFragPtr->m_varWordsFree -= vpagePtrP->free_space; vpagePtrP->shrink_entry(tmp.m_page_idx, len); // Adds the new free space value for the page to the fragment total. update_free_page_list(regFragPtr, vpagePtr); } else { jam(); free_var_part(regFragPtr, vpagePtr, tmp.m_page_idx); tmp.m_page_no = RNIL; ref->assign(&tmp); copy_bits &= ~(Uint32)Tuple_header::VAR_PART; } } else { jam(); ndbassert(vpagePtrP->get_entry_len(tmp.m_page_idx) == len); } /** * Find disk part after * header + fixed MM part + length word + varsize part. */ disk_ptr = (Tuple_header*)(vp->m_data + len); } else { jam(); ndbassert(tmp.m_page_no == RNIL); disk_ptr = (Tuple_header*)copy->get_end_of_fix_part_ptr(regTabPtr); } } if (regTabPtr->m_no_of_disk_attributes && (copy_bits & Tuple_header::DISK_INLINE)) { jam(); Local_key key; memcpy(&key, copy->get_disk_ref_ptr(regTabPtr), sizeof(Local_key)); Uint32 logfile_group_id= regFragPtr->m_logfile_group_id; PagePtr diskPagePtr((Tup_page*)m_pgman_ptr.p, m_pgman_ptr.i); ndbassert(diskPagePtr.p->m_page_no == key.m_page_no); ndbassert(diskPagePtr.p->m_file_no == key.m_file_no); Uint32 sz, *dst; if(copy_bits & Tuple_header::DISK_ALLOC) { jam(); disk_page_alloc(signal, regTabPtr, regFragPtr, &key, diskPagePtr, gci_hi); } if(regTabPtr->m_attributes[DD].m_no_of_varsize == 0) { jam(); sz= regTabPtr->m_offsets[DD].m_fix_header_size; dst= ((Fix_page*)diskPagePtr.p)->get_ptr(key.m_page_idx, sz); } else { jam(); dst= ((Var_page*)diskPagePtr.p)->get_ptr(key.m_page_idx); sz= ((Var_page*)diskPagePtr.p)->get_entry_len(key.m_page_idx); } if(! (copy_bits & Tuple_header::DISK_ALLOC)) { jam(); disk_page_undo_update(diskPagePtr.p, &key, dst, sz, gci_hi, logfile_group_id); } memcpy(dst, disk_ptr, 4*sz); memcpy(tuple_ptr->get_disk_ref_ptr(regTabPtr), &key, sizeof(Local_key)); ndbassert(! (disk_ptr->m_header_bits & Tuple_header::FREE)); copy_bits |= Tuple_header::DISK_PART; } if(lcpScan_ptr_i != RNIL && (bits & Tuple_header::ALLOC)) { jam(); ScanOpPtr scanOp; c_scanOpPool.getPtr(scanOp, lcpScan_ptr_i); Local_key rowid = regOperPtr->m_tuple_location; rowid.m_page_no = pagePtr.p->frag_page_id; if (!is_rowid_lcp_scanned(rowid, *scanOp.p)) { jam(); copy_bits |= Tuple_header::LCP_SKIP; } } Uint32 clear= Tuple_header::ALLOC | Tuple_header::FREE | Tuple_header::COPY_TUPLE | Tuple_header::DISK_ALLOC | Tuple_header::DISK_INLINE | Tuple_header::MM_SHRINK | Tuple_header::MM_GROWN; copy_bits &= ~(Uint32)clear; tuple_ptr->m_header_bits= copy_bits; tuple_ptr->m_operation_ptr_i= save; if (regTabPtr->m_bits & Tablerec::TR_RowGCI && update_gci_at_commit) { jam(); * tuple_ptr->get_mm_gci(regTabPtr) = gci_hi; if (regTabPtr->m_bits & Tablerec::TR_ExtraRowGCIBits) { Uint32 attrId = regTabPtr->getExtraAttrId<Tablerec::TR_ExtraRowGCIBits>(); store_extra_row_bits(attrId, regTabPtr, tuple_ptr, gci_lo, /* truncate */true); } } if (regTabPtr->m_bits & Tablerec::TR_Checksum) { jam(); setChecksum(tuple_ptr, regTabPtr); } }
/* ----------------------------------------------------------------- */ void Dbtup::execTUP_COMMITREQ(Signal* signal) { FragrecordPtr regFragPtr; OperationrecPtr regOperPtr; TablerecPtr regTabPtr; KeyReqStruct req_struct(this, KRS_COMMIT); TransState trans_state; Uint32 no_of_fragrec, no_of_tablerec; TupCommitReq * const tupCommitReq= (TupCommitReq *)signal->getDataPtr(); regOperPtr.i= tupCommitReq->opPtr; Uint32 hash_value= tupCommitReq->hashValue; Uint32 gci_hi = tupCommitReq->gci_hi; Uint32 gci_lo = tupCommitReq->gci_lo; Uint32 transId1 = tupCommitReq->transId1; Uint32 transId2 = tupCommitReq->transId2; jamEntry(); c_operation_pool.getPtr(regOperPtr); regFragPtr.i= regOperPtr.p->fragmentPtr; trans_state= get_trans_state(regOperPtr.p); no_of_fragrec= cnoOfFragrec; ndbrequire(trans_state == TRANS_STARTED); ptrCheckGuard(regFragPtr, no_of_fragrec, fragrecord); no_of_tablerec= cnoOfTablerec; regTabPtr.i= regFragPtr.p->fragTableId; req_struct.signal= signal; req_struct.hash_value= hash_value; req_struct.gci_hi = gci_hi; req_struct.gci_lo = gci_lo; /* Put transid in req_struct, so detached triggers can access it */ req_struct.trans_id1 = transId1; req_struct.trans_id2 = transId2; req_struct.m_reorg = regOperPtr.p->op_struct.bit_field.m_reorg; regOperPtr.p->m_commit_disk_callback_page = tupCommitReq->diskpage; #ifdef VM_TRACE if (tupCommitReq->diskpage == RNIL) { m_pgman_ptr.i = RNIL; m_pgman_ptr.p = 0; req_struct.m_disk_page_ptr.i = RNIL; req_struct.m_disk_page_ptr.p = 0; } #endif ptrCheckGuard(regTabPtr, no_of_tablerec, tablerec); PagePtr page; Tuple_header* tuple_ptr= (Tuple_header*) get_ptr(&page, ®OperPtr.p->m_tuple_location, regTabPtr.p); /** * NOTE: This has to be run before potential time-slice when * waiting for disk, as otherwise the "other-ops" in a multi-op * commit might run while we're waiting for disk * */ if (!regTabPtr.p->tuxCustomTriggers.isEmpty()) { if(get_tuple_state(regOperPtr.p) == TUPLE_PREPARED) { jam(); OperationrecPtr loopPtr = regOperPtr; if (unlikely(!regOperPtr.p->is_first_operation())) { findFirstOp(loopPtr); } /** * Execute all tux triggers at first commit * since previous tuple is otherwise removed... */ jam(); goto first; while(loopPtr.i != RNIL) { c_operation_pool.getPtr(loopPtr); first: executeTuxCommitTriggers(signal, loopPtr.p, regFragPtr.p, regTabPtr.p); set_tuple_state(loopPtr.p, TUPLE_TO_BE_COMMITTED); loopPtr.i = loopPtr.p->nextActiveOp; } } } bool get_page = false; if(regOperPtr.p->op_struct.bit_field.m_load_diskpage_on_commit) { jam(); Page_cache_client::Request req; /** * Only last op on tuple needs "real" commit, * hence only this one should have m_load_diskpage_on_commit */ ndbassert(tuple_ptr->m_operation_ptr_i == regOperPtr.i); /** * Check for page */ if(!regOperPtr.p->m_copy_tuple_location.isNull()) { jam(); Tuple_header* tmp= get_copy_tuple(®OperPtr.p->m_copy_tuple_location); memcpy(&req.m_page, tmp->get_disk_ref_ptr(regTabPtr.p), sizeof(Local_key)); if (unlikely(regOperPtr.p->op_type == ZDELETE && tmp->m_header_bits & Tuple_header::DISK_ALLOC)) { jam(); /** * Insert+Delete */ regOperPtr.p->op_struct.bit_field.m_load_diskpage_on_commit = 0; regOperPtr.p->op_struct.bit_field.m_wait_log_buffer = 0; disk_page_abort_prealloc(signal, regFragPtr.p, &req.m_page, req.m_page.m_page_idx); D("Logfile_client - execTUP_COMMITREQ"); Logfile_client lgman(this, c_lgman, regFragPtr.p->m_logfile_group_id); lgman.free_log_space(regOperPtr.p->m_undo_buffer_space); goto skip_disk; if (0) ndbout_c("insert+delete"); jamEntry(); goto skip_disk; } } else { jam(); // initial delete ndbassert(regOperPtr.p->op_type == ZDELETE); memcpy(&req.m_page, tuple_ptr->get_disk_ref_ptr(regTabPtr.p), sizeof(Local_key)); ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART); } if (retrieve_data_page(signal, req, regOperPtr) == 0) { return; // Data page has not been retrieved yet. } get_page = true; } if(regOperPtr.p->op_struct.bit_field.m_wait_log_buffer) { jam(); /** * Only last op on tuple needs "real" commit, * hence only this one should have m_wait_log_buffer */ ndbassert(tuple_ptr->m_operation_ptr_i == regOperPtr.i); if (retrieve_log_page(signal, regFragPtr, regOperPtr) == 0) { return; // Log page has not been retrieved yet. } } assert(tuple_ptr); skip_disk: req_struct.m_tuple_ptr = tuple_ptr; Uint32 nextOp = regOperPtr.p->nextActiveOp; Uint32 prevOp = regOperPtr.p->prevActiveOp; /** * The trigger code (which is shared between detached/imediate) * check op-list to check were to read before values from * detached triggers should always read from original tuple value * from before transaction start, not from any intermediate update * * Setting the op-list has this effect */ regOperPtr.p->nextActiveOp = RNIL; regOperPtr.p->prevActiveOp = RNIL; if(tuple_ptr->m_operation_ptr_i == regOperPtr.i) { jam(); /** * Perform "real" commit */ Uint32 disk = regOperPtr.p->m_commit_disk_callback_page; set_commit_change_mask_info(regTabPtr.p, &req_struct, regOperPtr.p); checkDetachedTriggers(&req_struct, regOperPtr.p, regTabPtr.p, disk != RNIL); tuple_ptr->m_operation_ptr_i = RNIL; if (regOperPtr.p->op_type == ZDELETE) { jam(); if (get_page) { ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART); } dealloc_tuple(signal, gci_hi, gci_lo, page.p, tuple_ptr, &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p); } else if(regOperPtr.p->op_type != ZREFRESH) { jam(); commit_operation(signal, gci_hi, gci_lo, tuple_ptr, page, regOperPtr.p, regFragPtr.p, regTabPtr.p); } else { jam(); commit_refresh(signal, gci_hi, gci_lo, tuple_ptr, page, &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p); } } if (nextOp != RNIL) { c_operation_pool.getPtr(nextOp)->prevActiveOp = prevOp; } if (prevOp != RNIL) { c_operation_pool.getPtr(prevOp)->nextActiveOp = nextOp; } if(!regOperPtr.p->m_copy_tuple_location.isNull()) { jam(); c_undo_buffer.free_copy_tuple(®OperPtr.p->m_copy_tuple_location); } initOpConnection(regOperPtr.p); signal->theData[0] = 0; }
void Dbinfo::execDBINFO_SCANREQ(Signal *signal) { jamEntry(); DbinfoScanReq* req_ptr = (DbinfoScanReq*)signal->getDataPtrSend(); const Uint32 senderRef = signal->header.theSendersBlockRef; // Copy signal on stack DbinfoScanReq req = *req_ptr; const Uint32 resultData = req.resultData; const Uint32 transId0 = req.transId[0]; const Uint32 transId1 = req.transId[1]; const Uint32 resultRef = req.resultRef; // Validate tableId const Uint32 tableId = req.tableId; if (tableId >= (Uint32)Ndbinfo::getNumTables()) { jam(); DbinfoScanRef *ref= (DbinfoScanRef*)signal->getDataPtrSend(); ref->resultData = resultData; ref->transId[0] = transId0; ref->transId[1] = transId1; ref->resultRef = resultRef; ref->errorCode= DbinfoScanRef::NoTable; sendSignal(senderRef, GSN_DBINFO_SCANREF, signal, DbinfoScanRef::SignalLength, JBB); return; } // TODO Check all scan parameters Ndbinfo::ScanCursor* cursor = CAST_PTR(Ndbinfo::ScanCursor, DbinfoScan::getCursorPtrSend(&req)); Uint32 signal_length = signal->getLength(); if (signal_length == DbinfoScanReq::SignalLength) { // Initialize cursor jam(); cursor->senderRef = senderRef; cursor->saveSenderRef = 0; cursor->currRef = 0; cursor->saveCurrRef = 0; // Reset all data holders memset(cursor->data, 0, sizeof(cursor->data)); cursor->flags = 0; cursor->totalRows = 0; cursor->totalBytes = 0; req.cursor_sz = Ndbinfo::ScanCursor::Length; signal_length += req.cursor_sz; } ndbrequire(signal_length == DbinfoScanReq::SignalLength + Ndbinfo::ScanCursor::Length); ndbrequire(req.cursor_sz == Ndbinfo::ScanCursor::Length); switch(tableId) { case Ndbinfo::TABLES_TABLEID: { jam(); Ndbinfo::Ratelimit rl; Uint32 tableId = cursor->data[0]; while(tableId < (Uint32)Ndbinfo::getNumTables()) { jam(); const Ndbinfo::Table& tab = Ndbinfo::getTable(tableId); Ndbinfo::Row row(signal, req); row.write_uint32(tableId); row.write_string(tab.m.name); row.write_string(tab.m.comment); ndbinfo_send_row(signal, req, row, rl); tableId++; if (rl.need_break(req)) { jam(); ndbinfo_send_scan_break(signal, req, rl, tableId); return; } } // All tables sent req.cursor_sz = 0; // Close cursor ndbinfo_send_scan_conf(signal, req, rl); return; break; } case Ndbinfo::COLUMNS_TABLEID: { jam(); Ndbinfo::Ratelimit rl; Uint32 tableId = cursor->data[0]; Uint32 columnId = cursor->data[1]; while(tableId < (Uint32)Ndbinfo::getNumTables()) { jam(); const Ndbinfo::Table& tab = Ndbinfo::getTable(tableId); while(columnId < (Uint32)tab.m.ncols) { jam(); Ndbinfo::Row row(signal, req); row.write_uint32(tableId); row.write_uint32(columnId); row.write_string(tab.col[columnId].name); row.write_uint32(tab.col[columnId].coltype); row.write_string(tab.col[columnId].comment); ndbinfo_send_row(signal, req, row, rl); assert(columnId < 256); columnId++; if(rl.need_break(req)) { jam(); ndbinfo_send_scan_break(signal, req, rl, tableId, columnId); return; } } columnId = 0; tableId++; } // All tables and columns sent req.cursor_sz = 0; // Close cursor ndbinfo_send_scan_conf(signal, req, rl); break; } default: { jam(); ndbassert(tableId > 1); //printSignalHeader(stdout, signal->header, 99, 98, true); //printDBINFO_SCAN(stdout, signal->theData, signal->getLength(), 0); if (Ndbinfo::ScanCursor::getHasMoreData(cursor->flags) || find_next(cursor)) { jam(); ndbrequire(cursor->currRef); // CONF or REF should be sent back here cursor->senderRef = reference(); // Send SCANREQ MEMCOPY_NO_WORDS(req_ptr, &req, signal_length); sendSignal(cursor->currRef, GSN_DBINFO_SCANREQ, signal, signal_length, JBB); } else { // Scan is done, send SCANCONF back to caller jam(); DbinfoScanConf *apiconf= (DbinfoScanConf*)signal->getDataPtrSend(); MEMCOPY_NO_WORDS(apiconf, &req, DbinfoScanConf::SignalLength); // Set cursor_sz back to 0 to indicate end of scan apiconf->cursor_sz = 0; sendSignal(resultRef, GSN_DBINFO_SCANCONF, signal, DbinfoScanConf::SignalLength, JBB); } break; } } }
Uint32 Restore::init_file(const RestoreLcpReq* req, FilePtr file_ptr) { new (file_ptr.p) File(); file_ptr.p->m_sender_ref = req->senderRef; file_ptr.p->m_sender_data = req->senderData; file_ptr.p->m_fd = RNIL; file_ptr.p->m_file_type = BackupFormat::LCP_FILE; file_ptr.p->m_status = File::FIRST_READ; file_ptr.p->m_lcp_no = req->lcpNo; file_ptr.p->m_table_id = req->tableId; file_ptr.p->m_fragment_id = req->fragmentId; file_ptr.p->m_table_version = RNIL; file_ptr.p->m_bytes_left = 0; // Bytes read from FS file_ptr.p->m_current_page_ptr_i = RNIL; file_ptr.p->m_current_page_pos = 0; file_ptr.p->m_current_page_index = 0; file_ptr.p->m_current_file_page = 0; file_ptr.p->m_outstanding_reads = 0; file_ptr.p->m_outstanding_operations = 0; file_ptr.p->m_rows_restored = 0; file_ptr.p->m_bytes_restored = 0; file_ptr.p->m_restore_start_time = NdbTick_CurrentMillisecond();; LocalDataBuffer<15> pages(m_databuffer_pool, file_ptr.p->m_pages); LocalDataBuffer<15> columns(m_databuffer_pool, file_ptr.p->m_columns); ndbassert(columns.isEmpty()); columns.release(); ndbassert(pages.isEmpty()); pages.release(); Uint32 buf_size= PAGES*GLOBAL_PAGE_SIZE; Uint32 page_count= (buf_size+GLOBAL_PAGE_SIZE-1)/GLOBAL_PAGE_SIZE; if(!pages.seize(page_count)) { return RestoreLcpRef::OutOfDataBuffer; } List::Iterator it; for(pages.first(it); !it.isNull(); pages.next(it)) { * it.data = RNIL; } Uint32 err= 0; for(pages.first(it); !it.isNull(); pages.next(it)) { Ptr<GlobalPage> page_ptr; if(!m_global_page_pool.seize(page_ptr)) { err= RestoreLcpRef::OutOfReadBufferPages; break; } * it.data = page_ptr.i; } if(err) { for(pages.first(it); !it.isNull(); pages.next(it)) { if(* it.data == RNIL) break; m_global_page_pool.release(* it.data); } } else { pages.first(it); file_ptr.p->m_current_page_ptr_i = *it.data; } return err; }
void Dbinfo::execDBINFO_SCANCONF(Signal *signal) { const DbinfoScanConf* conf_ptr= (const DbinfoScanConf*)signal->getDataPtr(); // Copy signal on stack DbinfoScanConf conf= *conf_ptr; jamEntry(); //printDBINFO_SCAN(stdout, signal->theData, signal->getLength(), 0); Uint32 signal_length = signal->getLength(); ndbrequire(signal_length == DbinfoScanReq::SignalLength+Ndbinfo::ScanCursor::Length); ndbrequire(conf.cursor_sz == Ndbinfo::ScanCursor::Length); // Validate tableId const Uint32 tableId= conf.tableId; ndbassert(tableId < (Uint32)Ndbinfo::getNumTables()); const Uint32 resultRef = conf.resultRef; // Copy cursor on stack ndbrequire(conf.cursor_sz); Ndbinfo::ScanCursor* cursor = CAST_PTR(Ndbinfo::ScanCursor, DbinfoScan::getCursorPtrSend(&conf)); if (Ndbinfo::ScanCursor::getHasMoreData(cursor->flags) || conf.returnedRows) { // Rate limit break, pass through to API jam(); ndbrequire(cursor->currRef); DbinfoScanConf *apiconf = (DbinfoScanConf*) signal->getDataPtrSend(); MEMCOPY_NO_WORDS(apiconf, &conf, signal_length); sendSignal(resultRef, GSN_DBINFO_SCANCONF, signal, signal_length, JBB); return; } if (find_next(cursor)) { jam(); ndbrequire(cursor->currRef); // CONF or REF should be sent back here cursor->senderRef = reference(); // Send SCANREQ MEMCOPY_NO_WORDS(signal->getDataPtrSend(), &conf, signal_length); sendSignal(cursor->currRef, GSN_DBINFO_SCANREQ, signal, signal_length, JBB); return; } // Scan is done, send SCANCONF back to caller jam(); DbinfoScanConf *apiconf = (DbinfoScanConf*) signal->getDataPtrSend(); MEMCOPY_NO_WORDS(apiconf, &conf, DbinfoScanConf::SignalLength); // Set cursor_sz back to 0 to indicate end of scan apiconf->cursor_sz = 0; sendSignal(resultRef, GSN_DBINFO_SCANCONF, signal, DbinfoScanConf::SignalLength, JBB); return; }
void Restore::parse_record(Signal* signal, FilePtr file_ptr, const Uint32 *data, Uint32 len) { List::Iterator it; LocalDataBuffer<15> columns(m_databuffer_pool, file_ptr.p->m_columns); Uint32 * const key_start = signal->getDataPtrSend()+24; Uint32 * const attr_start = key_start + MAX_KEY_SIZE_IN_WORDS; data += 1; const Uint32* const dataStart = data; bool disk = false; bool rowid = false; bool gci = false; Uint32 keyLen; Uint32 attrLen; Local_key rowid_val; Uint64 gci_val; Uint32 tableId = file_ptr.p->m_table_id; const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId); if (likely(file_ptr.p->m_lcp_version >= NDBD_RAW_LCP)) { rowid = true; rowid_val.m_page_no = data[0]; rowid_val.m_page_idx = data[1]; keyLen = c_tup->read_lcp_keys(tableId, data+2, len - 3, key_start); AttributeHeader::init(attr_start, AttributeHeader::READ_LCP, 4*(len - 3)); memcpy(attr_start + 1, data + 2, 4 * (len - 3)); attrLen = 1 + len - 3; } else { Uint32 *keyData = key_start; Uint32 *attrData = attr_start; union { Column c; Uint32 _align[sizeof(Column)/sizeof(Uint32)]; }; columns.first(it); while(!it.isNull()) { _align[0] = *it.data; ndbrequire(columns.next(it)); _align[1] = *it.data; columns.next(it); if (c.m_id == AttributeHeader::ROWID) { rowid_val.m_page_no = data[0]; rowid_val.m_page_idx = data[1]; data += 2; rowid = true; continue; } if (c.m_id == AttributeHeader::ROW_GCI) { memcpy(&gci_val, data, 8); data += 2; gci = true; continue; } if (! (c.m_flags & (Column::COL_VAR | Column::COL_NULL))) { ndbrequire(data < dataStart + len); if(c.m_flags & Column::COL_KEY) { memcpy(keyData, data, 4*c.m_size); keyData += c.m_size; } AttributeHeader::init(attrData++, c.m_id, c.m_size << 2); memcpy(attrData, data, 4*c.m_size); attrData += c.m_size; data += c.m_size; } if(c.m_flags & Column::COL_DISK) disk= true; } // second part is data driven while (data + 2 < dataStart + len) { Uint32 sz= ntohl(*data); data++; Uint32 id= ntohl(*data); data++; // column_no ndbrequire(columns.position(it, 2 * id)); _align[0] = *it.data; ndbrequire(columns.next(it)); _align[1] = *it.data; Uint32 sz32 = (sz + 3) >> 2; ndbassert(c.m_flags & (Column::COL_VAR | Column::COL_NULL)); if (c.m_flags & Column::COL_KEY) { memcpy(keyData, data, 4 * sz32); keyData += sz32; } AttributeHeader::init(attrData++, c.m_id, sz); memcpy(attrData, data, sz); attrData += sz32; data += sz32; } ndbrequire(data == dataStart + len - 1); ndbrequire(disk == false); // Not supported... ndbrequire(rowid == true); keyLen = Uint32(keyData - key_start); attrLen = Uint32(attrData - attr_start); if (desc->noOfKeyAttr != desc->noOfVarKeys) { reorder_key(desc, key_start, keyLen); } } LqhKeyReq * req = (LqhKeyReq *)signal->getDataPtrSend(); Uint32 hashValue; if (g_key_descriptor_pool.getPtr(tableId)->hasCharAttr) hashValue = calulate_hash(tableId, key_start); else hashValue = md5_hash((Uint64*)key_start, keyLen); Uint32 tmp= 0; LqhKeyReq::setAttrLen(tmp, attrLen); req->attrLen = tmp; tmp= 0; LqhKeyReq::setKeyLen(tmp, keyLen); LqhKeyReq::setLastReplicaNo(tmp, 0); /* ---------------------------------------------------------------------- */ // Indicate Application Reference is present in bit 15 /* ---------------------------------------------------------------------- */ LqhKeyReq::setApplicationAddressFlag(tmp, 0); LqhKeyReq::setDirtyFlag(tmp, 1); LqhKeyReq::setSimpleFlag(tmp, 1); LqhKeyReq::setOperation(tmp, ZINSERT); LqhKeyReq::setSameClientAndTcFlag(tmp, 0); LqhKeyReq::setAIInLqhKeyReq(tmp, 0); LqhKeyReq::setNoDiskFlag(tmp, disk ? 0 : 1); LqhKeyReq::setRowidFlag(tmp, 1); LqhKeyReq::setGCIFlag(tmp, gci); req->clientConnectPtr = file_ptr.i; req->hashValue = hashValue; req->requestInfo = tmp; req->tcBlockref = reference(); req->savePointId = 0; req->tableSchemaVersion = file_ptr.p->m_table_id + (file_ptr.p->m_table_version << 16); req->fragmentData = file_ptr.p->m_fragment_id; req->transId1 = 0; req->transId2 = 0; req->scanInfo = 0; memcpy(req->variableData, key_start, 16); Uint32 pos = keyLen > 4 ? 4 : keyLen; req->variableData[pos++] = rowid_val.m_page_no; req->variableData[pos++] = rowid_val.m_page_idx; if (gci) req->variableData[pos++] = (Uint32)gci_val; file_ptr.p->m_outstanding_operations++; EXECUTE_DIRECT(DBLQH, GSN_LQHKEYREQ, signal, LqhKeyReq::FixedSignalLength+pos); if(keyLen > 4) { c_lqh->receive_keyinfo(signal, key_start + 4, keyLen - 4); } c_lqh->receive_attrinfo(signal, attr_start, attrLen); }
bool Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr) { ScanOp& scan = *scanPtr.p; ScanPos& pos = scan.m_scanPos; Local_key& key = pos.m_key; const Uint32 bits = scan.m_bits; // table TablerecPtr tablePtr; tablePtr.i = scan.m_tableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); Tablerec& table = *tablePtr.p; // fragment FragrecordPtr fragPtr; fragPtr.i = scan.m_fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); Fragrecord& frag = *fragPtr.p; // tuple found Tuple_header* th = 0; Uint32 thbits = 0; Uint32 loop_count = 0; Uint32 scanGCI = scanPtr.p->m_scanGCI; Uint32 foundGCI; const bool mm = (bits & ScanOp::SCAN_DD); const bool lcp = (bits & ScanOp::SCAN_LCP); Uint32 lcp_list = fragPtr.p->m_lcp_keep_list; Uint32 size = table.m_offsets[mm].m_fix_header_size; if (lcp && lcp_list != RNIL) goto found_lcp_keep; switch(pos.m_get){ case ScanPos::Get_next_tuple: case ScanPos::Get_next_tuple_fs: jam(); key.m_page_idx += size; // fall through case ScanPos::Get_tuple: case ScanPos::Get_tuple_fs: jam(); /** * We need to refetch page after timeslice */ pos.m_get = ScanPos::Get_page; break; default: break; } while (true) { switch (pos.m_get) { case ScanPos::Get_next_page: // move to next page jam(); { if (! (bits & ScanOp::SCAN_DD)) pos.m_get = ScanPos::Get_next_page_mm; else pos.m_get = ScanPos::Get_next_page_dd; } continue; case ScanPos::Get_page: // get real page jam(); { if (! (bits & ScanOp::SCAN_DD)) pos.m_get = ScanPos::Get_page_mm; else pos.m_get = ScanPos::Get_page_dd; } continue; case ScanPos::Get_next_page_mm: // move to next logical TUP page jam(); { key.m_page_no++; if (key.m_page_no >= frag.noOfPages) { jam(); if ((bits & ScanOp::SCAN_NR) && (scan.m_endPage != RNIL)) { jam(); if (key.m_page_no < scan.m_endPage) { jam(); ndbout_c("scanning page %u", key.m_page_no); goto cont; } } // no more pages, scan ends pos.m_get = ScanPos::Get_undef; scan.m_state = ScanOp::Last; return true; } cont: key.m_page_idx = 0; pos.m_get = ScanPos::Get_page_mm; // clear cached value pos.m_realpid_mm = RNIL; } /*FALLTHRU*/ case ScanPos::Get_page_mm: // get TUP real page jam(); { if (pos.m_realpid_mm == RNIL) { jam(); if (key.m_page_no < frag.noOfPages) pos.m_realpid_mm = getRealpid(fragPtr.p, key.m_page_no); else { ndbassert(bits & ScanOp::SCAN_NR); goto nopage; } } PagePtr pagePtr; c_page_pool.getPtr(pagePtr, pos.m_realpid_mm); if (pagePtr.p->page_state == ZEMPTY_MM) { // skip empty page jam(); if (! (bits & ScanOp::SCAN_NR)) { pos.m_get = ScanPos::Get_next_page_mm; break; // incr loop count } else { jam(); pos.m_realpid_mm = RNIL; } } nopage: pos.m_page = pagePtr.p; pos.m_get = ScanPos::Get_tuple; } continue; case ScanPos::Get_next_page_dd: // move to next disk page jam(); { Disk_alloc_info& alloc = frag.m_disk_alloc_info; Local_fragment_extent_list list(c_extent_pool, alloc.m_extent_list); Ptr<Extent_info> ext_ptr; c_extent_pool.getPtr(ext_ptr, pos.m_extent_info_ptr_i); Extent_info* ext = ext_ptr.p; key.m_page_no++; if (key.m_page_no >= ext->m_first_page_no + alloc.m_extent_size) { // no more pages in this extent jam(); if (! list.next(ext_ptr)) { // no more extents, scan ends jam(); pos.m_get = ScanPos::Get_undef; scan.m_state = ScanOp::Last; return true; } else { // move to next extent jam(); pos.m_extent_info_ptr_i = ext_ptr.i; ext = c_extent_pool.getPtr(pos.m_extent_info_ptr_i); key.m_file_no = ext->m_key.m_file_no; key.m_page_no = ext->m_first_page_no; } } key.m_page_idx = 0; pos.m_get = ScanPos::Get_page_dd; /* read ahead for scan in disk order do read ahead every 8:th page */ if ((bits & ScanOp::SCAN_DD) && (((key.m_page_no - ext->m_first_page_no) & 7) == 0)) { jam(); // initialize PGMAN request Page_cache_client::Request preq; preq.m_page = pos.m_key; preq.m_callback = TheNULLCallback; // set maximum read ahead Uint32 read_ahead = m_max_page_read_ahead; while (true) { // prepare page read ahead in current extent Uint32 page_no = preq.m_page.m_page_no; Uint32 page_no_limit = page_no + read_ahead; Uint32 limit = ext->m_first_page_no + alloc.m_extent_size; if (page_no_limit > limit) { jam(); // read ahead crosses extent, set limit for this extent read_ahead = page_no_limit - limit; page_no_limit = limit; // and make sure we only read one extra extent next time around if (read_ahead > alloc.m_extent_size) read_ahead = alloc.m_extent_size; } else { jam(); read_ahead = 0; // no more to read ahead after this } // do read ahead pages for this extent while (page_no < page_no_limit) { // page request to PGMAN jam(); preq.m_page.m_page_no = page_no; int flags = 0; // ignore result m_pgman.get_page(signal, preq, flags); jamEntry(); page_no++; } if (!read_ahead || !list.next(ext_ptr)) { // no more extents after this or read ahead done jam(); break; } // move to next extent and initialize PGMAN request accordingly Extent_info* ext = c_extent_pool.getPtr(ext_ptr.i); preq.m_page.m_file_no = ext->m_key.m_file_no; preq.m_page.m_page_no = ext->m_first_page_no; } } // if ScanOp::SCAN_DD read ahead } /*FALLTHRU*/ case ScanPos::Get_page_dd: // get global page in PGMAN cache jam(); { // check if page is un-allocated or empty if (likely(! (bits & ScanOp::SCAN_NR))) { Tablespace_client tsman(signal, c_tsman, frag.fragTableId, frag.fragmentId, frag.m_tablespace_id); unsigned uncommitted, committed; uncommitted = committed = ~(unsigned)0; int ret = tsman.get_page_free_bits(&key, &uncommitted, &committed); ndbrequire(ret == 0); if (committed == 0 && uncommitted == 0) { // skip empty page jam(); pos.m_get = ScanPos::Get_next_page_dd; break; // incr loop count } } // page request to PGMAN Page_cache_client::Request preq; preq.m_page = pos.m_key; preq.m_callback.m_callbackData = scanPtr.i; preq.m_callback.m_callbackFunction = safe_cast(&Dbtup::disk_page_tup_scan_callback); int flags = 0; int res = m_pgman.get_page(signal, preq, flags); jamEntry(); if (res == 0) { jam(); // request queued pos.m_get = ScanPos::Get_tuple; return false; } ndbrequire(res > 0); pos.m_page = (Page*)m_pgman.m_ptr.p; } pos.m_get = ScanPos::Get_tuple; continue; // get tuple // move to next tuple case ScanPos::Get_next_tuple: case ScanPos::Get_next_tuple_fs: // move to next fixed size tuple jam(); { key.m_page_idx += size; pos.m_get = ScanPos::Get_tuple_fs; } /*FALLTHRU*/ case ScanPos::Get_tuple: case ScanPos::Get_tuple_fs: // get fixed size tuple jam(); { Fix_page* page = (Fix_page*)pos.m_page; if (key.m_page_idx + size <= Fix_page::DATA_WORDS) { pos.m_get = ScanPos::Get_next_tuple_fs; th = (Tuple_header*)&page->m_data[key.m_page_idx]; if (likely(! (bits & ScanOp::SCAN_NR))) { jam(); thbits = th->m_header_bits; if (! (thbits & Tuple_header::FREE)) { goto found_tuple; } } else { if (pos.m_realpid_mm == RNIL) { jam(); foundGCI = 0; goto found_deleted_rowid; } thbits = th->m_header_bits; if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI || foundGCI == 0) { if (! (thbits & Tuple_header::FREE)) { jam(); goto found_tuple; } else { goto found_deleted_rowid; } } else if (thbits != Fix_page::FREE_RECORD && th->m_operation_ptr_i != RNIL) { jam(); goto found_tuple; // Locked tuple... // skip free tuple } } } else { jam(); // no more tuples on this page pos.m_get = ScanPos::Get_next_page; } } break; // incr loop count found_tuple: // found possible tuple to return jam(); { // caller has already set pos.m_get to next tuple if (! (bits & ScanOp::SCAN_LCP && thbits & Tuple_header::LCP_SKIP)) { Local_key& key_mm = pos.m_key_mm; if (! (bits & ScanOp::SCAN_DD)) { key_mm = pos.m_key; // real page id is already set } else { key_mm.assref(th->m_base_record_ref); // recompute for each disk tuple pos.m_realpid_mm = getRealpid(fragPtr.p, key_mm.m_page_no); } // TUPKEYREQ handles savepoint stuff scan.m_state = ScanOp::Current; return true; } else { jam(); // clear it so that it will show up in next LCP th->m_header_bits = thbits & ~(Uint32)Tuple_header::LCP_SKIP; if (tablePtr.p->m_bits & Tablerec::TR_Checksum) { jam(); setChecksum(th, tablePtr.p); } } } break; found_deleted_rowid: jam(); { ndbassert(bits & ScanOp::SCAN_NR); Local_key& key_mm = pos.m_key_mm; if (! (bits & ScanOp::SCAN_DD)) { key_mm = pos.m_key; // caller has already set pos.m_get to next tuple // real page id is already set } else { key_mm.assref(th->m_base_record_ref); // recompute for each disk tuple pos.m_realpid_mm = getRealpid(fragPtr.p, key_mm.m_page_no); Fix_page *mmpage = (Fix_page*)c_page_pool.getPtr(pos.m_realpid_mm); th = (Tuple_header*)(mmpage->m_data + key_mm.m_page_idx); if ((foundGCI = *th->get_mm_gci(tablePtr.p)) > scanGCI || foundGCI == 0) { if (! (thbits & Tuple_header::FREE)) break; } } NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = RNIL; conf->fragId = frag.fragmentId; conf->localKey[0] = pos.m_key_mm.ref(); conf->localKey[1] = 0; conf->localKeyLength = 1; conf->gci = foundGCI; Uint32 blockNo = refToBlock(scan.m_userRef); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 7); jamEntry(); // TUPKEYREQ handles savepoint stuff loop_count = 32; scan.m_state = ScanOp::Next; return false; } break; // incr loop count default: ndbrequire(false); break; } if (++loop_count >= 32) break; } // TODO: at drop table we have to flush and terminate these jam(); signal->theData[0] = ZTUP_SCAN; signal->theData[1] = scanPtr.i; sendSignal(reference(), GSN_CONTINUEB, signal, 2, JBB); return false; found_lcp_keep: Local_key tmp; tmp.assref(lcp_list); tmp.m_page_no = getRealpid(fragPtr.p, tmp.m_page_no); Ptr<Page> pagePtr; c_page_pool.getPtr(pagePtr, tmp.m_page_no); Tuple_header* ptr = (Tuple_header*) ((Fix_page*)pagePtr.p)->get_ptr(tmp.m_page_idx, 0); Uint32 headerbits = ptr->m_header_bits; ndbrequire(headerbits & Tuple_header::LCP_KEEP); Uint32 next = ptr->m_operation_ptr_i; ptr->m_operation_ptr_i = RNIL; ptr->m_header_bits = headerbits & ~(Uint32)Tuple_header::FREE; if (tablePtr.p->m_bits & Tablerec::TR_Checksum) { jam(); setChecksum(ptr, tablePtr.p); } NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend(); conf->scanPtr = scan.m_userPtr; conf->accOperationPtr = (Uint32)-1; conf->fragId = frag.fragmentId; conf->localKey[0] = lcp_list; conf->localKey[1] = 0; conf->localKeyLength = 1; conf->gci = 0; Uint32 blockNo = refToBlock(scan.m_userRef); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 7); fragPtr.p->m_lcp_keep_list = next; ptr->m_header_bits |= Tuple_header::FREED; // RESTORE free flag if (headerbits & Tuple_header::FREED) { if (tablePtr.p->m_attributes[MM].m_no_of_varsize) { jam(); free_var_rec(fragPtr.p, tablePtr.p, &tmp, pagePtr); } else { jam(); free_fix_rec(fragPtr.p, tablePtr.p, &tmp, (Fix_page*)pagePtr.p); } } return false; }
void Ndbfs::execFSOPENREQ(Signal* signal) { jamEntry(); const FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0]; const BlockReference userRef = fsOpenReq->userReference; AsyncFile* file = getIdleFile(); ndbrequire(file != NULL); Filename::NameSpec spec(theFileSystemPath, theBackupFilePath); Uint32 userPointer = fsOpenReq->userPointer; if(fsOpenReq->fileFlags & FsOpenReq::OM_INIT) { Ptr<GlobalPage> page_ptr; if(m_global_page_pool.seize(page_ptr) == false) { FsRef * const fsRef = (FsRef *)&signal->theData[0]; fsRef->userPointer = userPointer; fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrOutOfMemory); fsRef->osErrorCode = ~0; // Indicate local error sendSignal(userRef, GSN_FSOPENREF, signal, 3, JBB); return; } file->m_page_ptr = page_ptr; } else { ndbassert(file->m_page_ptr.isNull()); file->m_page_ptr.setNull(); } if(signal->getNoOfSections() == 0){ jam(); file->theFileName.set(spec, userRef, fsOpenReq->fileNumber); } else { jam(); SegmentedSectionPtr ptr; signal->getSection(ptr, FsOpenReq::FILENAME); file->theFileName.set(spec, ptr, g_sectionSegmentPool); releaseSections(signal); } file->reportTo(&theFromThreads); if (getenv("NDB_TRACE_OPEN")) ndbout_c("open(%s)", file->theFileName.c_str()); Request* request = theRequestPool->get(); request->action = Request::open; request->error = 0; request->set(userRef, userPointer, newId() ); request->file = file; request->theTrace = signal->getTrace(); request->par.open.flags = fsOpenReq->fileFlags; request->par.open.page_size = fsOpenReq->page_size; request->par.open.file_size = fsOpenReq->file_size_hi; request->par.open.file_size <<= 32; request->par.open.file_size |= fsOpenReq->file_size_lo; request->par.open.auto_sync_size = fsOpenReq->auto_sync_size; ndbrequire(forward(file, request)); }
void Dbtup::handle_lcp_keep_commit(const Local_key* rowid, KeyReqStruct * req_struct, Operationrec * opPtrP, Fragrecord * regFragPtr, Tablerec * regTabPtr) { bool disk = false; Uint32 sizes[4]; Uint32 * copytuple = get_copy_tuple_raw(&opPtrP->m_copy_tuple_location); Tuple_header * dst = get_copy_tuple(copytuple); Tuple_header * org = req_struct->m_tuple_ptr; if (regTabPtr->need_expand(disk)) { setup_fixed_tuple_ref(req_struct, opPtrP, regTabPtr); setup_fixed_part(req_struct, opPtrP, regTabPtr); req_struct->m_tuple_ptr = dst; expand_tuple(req_struct, sizes, org, regTabPtr, disk); shrink_tuple(req_struct, sizes+2, regTabPtr, disk); } else { memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size); } dst->m_header_bits |= Tuple_header::COPY_TUPLE; /** * Store original row-id in copytuple[0,1] * Store next-ptr in copytuple[1,2] (set to RNIL/RNIL) * */ assert(sizeof(Local_key) == 8); memcpy(copytuple+0, rowid, sizeof(Local_key)); Local_key nil; nil.setNull(); memcpy(copytuple+2, &nil, sizeof(nil)); /** * Link it to list */ if (regFragPtr->m_lcp_keep_list_tail.isNull()) { jam(); regFragPtr->m_lcp_keep_list_head = opPtrP->m_copy_tuple_location; } else { jam(); Uint32 * tail = get_copy_tuple_raw(®FragPtr->m_lcp_keep_list_tail); Local_key nextptr; memcpy(&nextptr, tail+2, sizeof(Local_key)); ndbassert(nextptr.isNull()); nextptr = opPtrP->m_copy_tuple_location; memcpy(tail+2, &nextptr, sizeof(Local_key)); } regFragPtr->m_lcp_keep_list_tail = opPtrP->m_copy_tuple_location; /** * And finally clear m_copy_tuple_location so that it won't be freed */ opPtrP->m_copy_tuple_location.setNull(); }