int Dbtup::tuxReadAttrs(EmulatedJamBuffer * jamBuf, Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32 tupVersion, const Uint32* attrIds, Uint32 numAttrs, Uint32* dataOut, bool xfrmFlag) { thrjamEntry(jamBuf); // use own variables instead of globals FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); // search for tuple version if not original Operationrec tmpOp; KeyReqStruct req_struct(jamBuf); req_struct.tablePtrP = tablePtr.p; req_struct.fragPtrP = fragPtr.p; tmpOp.m_tuple_location.m_page_no= pageId; tmpOp.m_tuple_location.m_page_idx= pageIndex; tmpOp.op_type = ZREAD; // valgrind setup_fixed_tuple_ref(&req_struct, &tmpOp, tablePtr.p); setup_fixed_part(&req_struct, &tmpOp, tablePtr.p); Tuple_header *tuple_ptr= req_struct.m_tuple_ptr; if (tuple_ptr->get_tuple_version() != tupVersion) { jam(); OperationrecPtr opPtr; opPtr.i= tuple_ptr->m_operation_ptr_i; Uint32 loopGuard= 0; while (opPtr.i != RNIL) { c_operation_pool.getPtr(opPtr); if (opPtr.p->op_struct.bit_field.tupVersion == tupVersion) { jam(); if (!opPtr.p->m_copy_tuple_location.isNull()) { req_struct.m_tuple_ptr= get_copy_tuple(&opPtr.p->m_copy_tuple_location); } break; } jam(); opPtr.i= opPtr.p->prevActiveOp; ndbrequire(++loopGuard < (1 << ZTUP_VERSION_BITS)); } } // read key attributes from found tuple version // save globals prepare_read(&req_struct, tablePtr.p, false); // do it int ret = readAttributes(&req_struct, attrIds, numAttrs, dataOut, ZNIL, xfrmFlag); // done return ret; }
/* * TUX index contains all tuple versions. A scan in TUX has scanned * one of them and asks if it can be returned as scan result. This * depends on trans id, dirty read flag, and savepoint within trans. * * Previously this faked a ZREAD operation and used getPage(). * In TUP getPage() is run after ACC locking, but TUX comes here * before ACC access. Instead of modifying getPage() it is more * clear to do the full check here. */ bool Dbtup::tuxQueryTh(Uint32 fragPtrI, Uint32 pageId, Uint32 pageIndex, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, bool dirty, Uint32 savepointId) { jamEntry(); FragrecordPtr fragPtr; fragPtr.i= fragPtrI; ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord); TablerecPtr tablePtr; tablePtr.i= fragPtr.p->fragTableId; ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec); PagePtr pagePtr; pagePtr.i = pageId; c_page_pool.getPtr(pagePtr); KeyReqStruct req_struct(this); { Operationrec tmpOp; tmpOp.m_tuple_location.m_page_no = pageId; tmpOp.m_tuple_location.m_page_idx = pageIndex; tmpOp.op_type = ZREAD; // valgrind setup_fixed_tuple_ref(&req_struct, &tmpOp, tablePtr.p); setup_fixed_part(&req_struct, &tmpOp, tablePtr.p); } Tuple_header* tuple_ptr = req_struct.m_tuple_ptr; OperationrecPtr currOpPtr; currOpPtr.i = tuple_ptr->m_operation_ptr_i; if (currOpPtr.i == RNIL) { jam(); // tuple has no operation, any scan can see it return true; } c_operation_pool.getPtr(currOpPtr); const bool sameTrans = c_lqh->is_same_trans(currOpPtr.p->userpointer, transId1, transId2); bool res = false; OperationrecPtr loopOpPtr = currOpPtr; if (!sameTrans) { jam(); if (!dirty) { jam(); if (currOpPtr.p->nextActiveOp == RNIL) { jam(); // last op - TUX makes ACC lock request in same timeslice res = true; } } else { // loop to first op (returns false) find_savepoint(loopOpPtr, 0); const Uint32 op_type = loopOpPtr.p->op_type; if (op_type != ZINSERT) { jam(); // read committed version const Uint32 origVersion = tuple_ptr->get_tuple_version(); if (origVersion == tupVersion) { jam(); res = true; } } } } else { jam(); // for own trans, ignore dirty flag if (find_savepoint(loopOpPtr, savepointId)) { jam(); const Uint32 op_type = loopOpPtr.p->op_type; if (op_type != ZDELETE) { jam(); // check if this op has produced the scanned version Uint32 loopVersion = loopOpPtr.p->op_struct.bit_field.tupVersion; if (loopVersion == tupVersion) { jam(); res = true; } } } } return res; }
void Dbtup::handle_lcp_keep_commit(const Local_key* rowid, KeyReqStruct * req_struct, Operationrec * opPtrP, Fragrecord * regFragPtr, Tablerec * regTabPtr) { bool disk = false; Uint32 sizes[4]; Uint32 * copytuple = get_copy_tuple_raw(&opPtrP->m_copy_tuple_location); Tuple_header * dst = get_copy_tuple(copytuple); Tuple_header * org = req_struct->m_tuple_ptr; if (regTabPtr->need_expand(disk)) { setup_fixed_tuple_ref(req_struct, opPtrP, regTabPtr); setup_fixed_part(req_struct, opPtrP, regTabPtr); req_struct->m_tuple_ptr = dst; expand_tuple(req_struct, sizes, org, regTabPtr, disk); shrink_tuple(req_struct, sizes+2, regTabPtr, disk); } else { memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size); } dst->m_header_bits |= Tuple_header::COPY_TUPLE; /** * Store original row-id in copytuple[0,1] * Store next-ptr in copytuple[1,2] (set to RNIL/RNIL) * */ assert(sizeof(Local_key) == 8); memcpy(copytuple+0, rowid, sizeof(Local_key)); Local_key nil; nil.setNull(); memcpy(copytuple+2, &nil, sizeof(nil)); /** * Link it to list */ if (regFragPtr->m_lcp_keep_list_tail.isNull()) { jam(); regFragPtr->m_lcp_keep_list_head = opPtrP->m_copy_tuple_location; } else { jam(); Uint32 * tail = get_copy_tuple_raw(®FragPtr->m_lcp_keep_list_tail); Local_key nextptr; memcpy(&nextptr, tail+2, sizeof(Local_key)); ndbassert(nextptr.isNull()); nextptr = opPtrP->m_copy_tuple_location; memcpy(tail+2, &nextptr, sizeof(Local_key)); } regFragPtr->m_lcp_keep_list_tail = opPtrP->m_copy_tuple_location; /** * And finally clear m_copy_tuple_location so that it won't be freed */ opPtrP->m_copy_tuple_location.setNull(); }