/********************************************************************* * * scan_index_i::scan_index_i(stid, c1, bound1, c2, bound2, cc, prefetch) * * Create a scan on index "stid" between "bound1" and "bound2". * c1 could be >, >= or ==. c2 could be <, <= or ==. * cc is the concurrency control method to use on the index. * *********************************************************************/ scan_index_i::scan_index_i( const stid_t& stid_, cmp_t c1, const cvec_t& bound1_, cmp_t c2, const cvec_t& bound2_, bool include_nulls, concurrency_t cc, lock_mode_t mode, const bool bIgnoreLatches ) : xct_dependent_t(xct()), _stid(stid_), ntype(ss_m::t_bad_ndx_t), _eof(false), _error_occurred(), _btcursor(0), _skip_nulls( ! include_nulls ), _cc(cc), _bIgnoreLatches(bIgnoreLatches) { INIT_SCAN_PROLOGUE_RC(scan_index_i::scan_index_i, prologue_rc_t::read_only, 1); _init(c1, bound1_, c2, bound2_, mode, bIgnoreLatches); register_me(); }
rc_t btree_impl::_sx_adopt_foster_all_core ( btree_page_h &parent, bool is_root, bool recursive) { // TODO this should use the improved tree-walk-through // See jira ticket:60 "Tree walk-through without more than 2 pages latched" (originally trac ticket:62) w_assert1 (xct()->is_sys_xct()); w_assert1 (parent.is_fixed()); w_assert1 (parent.latch_mode() == LATCH_EX); if (parent.is_node()) { w_assert1(parent.pid0()); W_DO(_sx_adopt_foster_sweep(parent)); if (recursive) { // also adopt at all children recursively for (int i = -1; i < parent.nrecs(); ++i) { btree_page_h child; PageID shpid_opaqueptr = i == -1 ? parent.get_foster_opaqueptr() : parent.child_opaqueptr(i); W_DO(child.fix_nonroot(parent, shpid_opaqueptr, LATCH_EX)); W_DO(_sx_adopt_foster_all_core(child, false, true)); } } } // after all adopts, if this parent is the root and has foster, // let's grow the tree if (is_root && parent.get_foster()) { W_DO(_sx_grow_tree(parent)); W_DO(_sx_adopt_foster_sweep(parent)); } w_assert3(parent.is_consistent(true, true)); return RCOK; }
/********************************************************************* * * logrec_t::fill(pid, len) * * Fill the "pid" and "length" field of the log record. * *********************************************************************/ void logrec_t::fill(PageID p, StoreID store, uint16_t tag, smsize_t l) { w_assert9(w_base_t::is_aligned(_data)); /* adjust _cat */ xct_t *x = xct(); if(x && (x->rolling_back() || x->state() == smlevel_0::xct_aborting)) { header._cat |= t_rollback; } set_pid(0); if (!is_single_sys_xct()) { // prv does not exist in single-log system transaction set_xid_prev(lsn_t::null); } header._page_tag = tag; header._pid = p; header._stid = store; char *dat = is_single_sys_xct() ? data_ssx() : data(); if (l != ALIGN_BYTE(l)) { // zero out extra space to keep purify happy memset(dat+l, 0, ALIGN_BYTE(l)-l); } unsigned int tmp = ALIGN_BYTE(l) + (is_single_sys_xct() ? hdr_single_sys_xct_sz : hdr_non_ssx_sz) + sizeof(lsn_t); tmp = (tmp + 7) & unsigned(-8); // force 8-byte alignment w_assert1(tmp <= sizeof(*this)); header._len = tmp; if(type() != t_skip) { DBG( << "Creat log rec: " << *this << " size: " << header._len << " xid_prevlsn: " << (is_single_sys_xct() ? lsn_t::null : xid_prev()) ); }
rc_t btree_impl::_ux_norec_alloc_core(btree_page_h &page, PageID &new_page_id) { // This is called only in REDO-only SSX, so no compensation logging. Just apply. w_assert1 (xct()->is_single_log_sys_xct()); w_assert1 (page.latch_mode() == LATCH_EX); W_DO(smlevel_0::vol->alloc_a_page(new_page_id)); btree_page_h new_page; w_rc_t rc; rc = new_page.fix_nonroot(page, new_page_id, LATCH_EX, false, true); if (rc.is_error()) { // if failed for any reason, we release the allocated page. W_DO(smlevel_0::vol ->deallocate_page(new_page_id)); return rc; } // The new page has an empty key range; parent's high to high. w_keystr_t fence, chain_high; page.copy_fence_high_key(fence); bool was_right_most = (page.get_chain_fence_high_length() == 0); page.copy_chain_fence_high_key(chain_high); if (was_right_most) { // this means there was no chain or the page was the right-most of it. // (so its high=high of chain) // upon the first foster split, we start setting the chain-high. page.copy_fence_high_key(chain_high); } #if W_DEBUG_LEVEL >= 3 lsn_t old_lsn = page.get_page_lsn(); #endif //W_DEBUG_LEVEL W_DO(log_btree_norec_alloc(page, new_page, new_page_id, fence, chain_high)); DBGOUT3(<< "btree_impl::_ux_norec_alloc_core, fence=" << fence << ", old-LSN=" << old_lsn << ", new-LSN=" << page.get_page_lsn() << ", PID=" << new_page_id); // initialize as an empty child: new_page.format_steal(page.get_page_lsn(), new_page_id, page.store(), page.root(), page.level(), 0, lsn_t::null, page.get_foster_opaqueptr(), page.get_foster_emlsn(), fence, fence, chain_high, false); page.accept_empty_child(page.get_page_lsn(), new_page_id, false /*not from redo*/); // in this operation, the log contains everything we need to recover without any // write-order-dependency. So, no registration for WOD. w_assert3(new_page.is_consistent(true, true)); w_assert1(new_page.is_fixed()); w_assert1(new_page.latch_mode() == LATCH_EX); w_assert3(page.is_consistent(true, true)); w_assert1(page.is_fixed()); return RCOK; }
rc_t log_alloc_file_page(const lpid_t& pid, const lsn_t& rec_lsn) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled; if (should_log) { logrec_t* logrec; // fudge 2.4 W_DO(xd->get_logbuf(logrec, t_alloc_file_page)); new (logrec) alloc_file_page_log(pid, rec_lsn); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_xct_prepare_stores(int num, const stid_t* stids) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_xct_prepare_stores)); new (logrec) xct_prepare_stores_log(num, stids); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_xct_prepare_alk(int num, lockid_t* lks, lock_mode_t* modes) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_xct_prepare_alk)); new (logrec) xct_prepare_alk_log(num, lks, modes); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_xct_prepare_st(const gtid_t* g, const server_handle_t& h) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_xct_prepare_st)); new (logrec) xct_prepare_st_log(g, h); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_xct_prepare_fi(int numex, int numix, int numsix, int numextent, const lsn_t& first, int rsvd, int ready, int used) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_xct_prepare_fi)); new (logrec) xct_prepare_fi_log(numex, numix, numsix, numextent, first, rsvd, ready, used); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_free_ext_list(const page_p& page, const stid_t& stid, extnum_t head, extnum_t count) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled; if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_free_ext_list, &page)); new (logrec) free_ext_list_log(page, stid, head, count); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_set_ext_next(const page_p& page, extnum_t ext, extnum_t new_next) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled; if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_set_ext_next, &page)); new (logrec) set_ext_next_log(page, ext, new_next); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_compensate(const lsn_t& rec_lsn) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_compensate)); new (logrec) compensate_log(rec_lsn); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_comment(const char* msg) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_comment)); new (logrec) comment_log(msg); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_store_operation(const page_p& page, const store_operation_param& op) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled; if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_store_operation, &page)); new (logrec) store_operation_log(page, op); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_xct_end() { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 0.0 W_DO(xd->get_logbuf(logrec, t_xct_end)); new (logrec) xct_end_log(); W_DO(xd->give_logbuf(logrec)); } return RCOK; }
rc_t log_free_pages_in_ext(const page_p& page, snum_t snum, extnum_t idx, const Pmap& pmap) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled; if (should_log) { logrec_t* logrec; // fudge 1.5 W_DO(xd->get_logbuf(logrec, t_free_pages_in_ext, &page)); new (logrec) free_pages_in_ext_log(page, snum, idx, pmap); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
bool lock_m::sli_query(lockid_t const &n) { xct_t * xd = xct(); bool rval = false; if (n.lspace() <= lockid_t::t_page && xd && xd->lock_cache_enabled()) { xct_lock_info_t* const theLockInfo = xd->lock_info(); W_COERCE(theLockInfo->lock_info_mutex.acquire()); lock_cache_elem_t* e = _core->search_cache(theLockInfo, n, true); rval = (e && e->req->_sli_status == sli_active); W_VOID(theLockInfo->lock_info_mutex.release()); } return rval; }
rc_t log_btree_purge(const page_p& page) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 3.29 W_DO(xd->get_logbuf(logrec, t_btree_purge, &page)); new (logrec) btree_purge_log(page); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_page_set_byte(const page_p& page, int idx, u_char old, u_char bits, int op) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_page_set_byte, &page)); new (logrec) page_set_byte_log(page, idx, old, bits, op); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_page_splicez(const page_p& page, int idx, int start, int len, int osave, int nsave, const cvec_t& vec) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_page_splicez, &page)); new (logrec) page_splicez_log(page, idx, start, len, osave, nsave, vec); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_page_shift(const page_p& page, int idx2, page_s::slot_length_t off2, page_s::slot_length_t len2, int idx1, page_s::slot_length_t off1) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_page_shift, &page)); new (logrec) page_shift_log(page, idx2, off2, len2, idx1, off1); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_page_reclaim(const page_p& page, int idx, const cvec_t& vec) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.67 W_DO(xd->get_logbuf(logrec, t_page_reclaim, &page)); new (logrec) page_reclaim_log(page, idx, vec); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_btree_insert(const page_p& page, int idx, const cvec_t& key, const cvec_t& el, bool unique) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.42 W_DO(xd->get_logbuf(logrec, t_btree_insert, &page)); new (logrec) btree_insert_log(page, idx, key, el, unique); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_page_link(const page_p& page, shpid_t new_prev, shpid_t new_next) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_page_link, &page)); new (logrec) page_link_log(page, new_prev, new_next); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
rc_t log_rtree_remove(const page_p& page, int idx, const nbox_t& key, const cvec_t& el) { xct_t* xd = xct(); bool should_log = smlevel_1::log && smlevel_0::logging_enabled && (page.get_store_flags() & page.st_tmp) == 0 && xd && xd->is_log_on(); if (should_log) { logrec_t* logrec; // fudge 1.0 W_DO(xd->get_logbuf(logrec, t_rtree_remove, &page)); new (logrec) rtree_remove_log(page, idx, key, el); W_DO(xd->give_logbuf(logrec, &page)); } else page.set_dirty(); return RCOK; }
dsmStatus_t dsmBlobDmp( dsmContext_t *pcontext, /* IN database context */ dsmBlob_t *pBlob, /* IN blob descriptor */ GBOOL silent) /* IN silent except if an error */ { dsmStatus_t returnCode; dbcontext_t *pdbcontext; xDbkey_t xDbkey; /* extended dbkey (area & recid) */ LONG recordSize, segRC; LONG maxRecordSize; dsmBuffer_t *pRecord; /* With the 1 byte indicator */ dsmBuffer_t *pSegTab; /* With the 1 byte indicator */ dsmBuffer_t *pST; /* within pSegTab */ LONG nent; /* number seg tab entries */ LONG segLen; LONG i; LONG remainder; long tl; LONG n_ds = 0, n_ss = 0; dsmBuffer_t pName [] = "bozo"; void printf (...); TRACE_CB(pcontext, "dsmBlobDmp"); pdbcontext = pcontext->pdbcontext; if (pdbcontext->usertype & SELFSERVE) { if (pdbcontext->resyncing || lkservcon(pcontext)) return DSM_S_CTRLC; /* Self-service only */ } xDbkey.dbkey = pBlob->blobId; if (!silent) printf ("\nBlobId: %10ld ", xDbkey.dbkey); returnCode = omIdToArea (pcontext, DSMOBJECT_BLOB, (COUNT)pBlob->blobObjNo, &(xDbkey.area)); if (returnCode) { if (!silent) printf ("omIdToArea for object Type: %ld blobObjNo: %ld returned %ld <--\n", DSMOBJECT_BLOB, pBlob->blobObjNo, returnCode); return returnCode; } pdbcontext->inservice++; maxRecordSize = DSMBLOBMAXLEN + 1; pRecord = (dsmBuffer_t *)utmalloc (maxRecordSize); if (!pRecord) { pdbcontext->inservice--; returnCode = DSM_S_BLOBNOMEMORY; return returnCode; } /* returns size of record if record is found. Returns * a negative number if record is not found. */ recordSize = rmFetchRecord(pcontext, xDbkey.area, xDbkey.dbkey, pRecord, (COUNT)maxRecordSize, 0 /* not continuation */); if (recordSize == 4 && xlng (pRecord) == 0) { pBlob->segLength = 0; returnCode = DSM_S_BLOBDNE; if (!silent) printf ("D.N.E. recordSize: 4, record: 0 0 0 0 <--\n"); } else if (recordSize < 0) { pBlob->segLength = 0; returnCode = (dsmStatus_t) recordSize; if (!silent) printf ("returnCode: %ld <--\n", returnCode); } else { returnCode = DSM_S_BLOBOK; /* Set the total length of the blob */ if (*pRecord == DSMBLOBDATA) { n_ds++; pBlob->totLength = recordSize - 1; } else if (recordSize > 10) pBlob->totLength = xlng (pRecord + 7); else { pBlob->totLength = 0; returnCode = DSM_S_BLOBBAD; } if (!silent) printf ("Length: %10ld Type: %s\n", pBlob->totLength, (char *) ((*pRecord == DSMBLOBDATA) ? "Direct" : (*pRecord == DSMBLOBSEG) ? "Segmented" : "Bad <--")); tl = remainder = pBlob->totLength; if (returnCode == DSM_S_BLOBOK && *pRecord != DSMBLOBDATA) { if (xlng(pRecord+3) == 0) /* Only 1 seg table */ pSegTab = utmalloc (recordSize); else pSegTab = utmalloc (BLOBMAXSEGTAB); if (!pSegTab) { utfree (pRecord); pdbcontext->inservice--; returnCode = DSM_S_BLOBNOMEMORY; return returnCode; } bufcop (pSegTab, pRecord, recordSize); while (returnCode == DSM_S_BLOBOK) { n_ss++; pST = pSegTab + SEGHDR_LEN; nent = xct (pSegTab + 1); if (!silent) printf (" SegTab: %10ld number entries: %d %s\n", xDbkey.dbkey, nent, ((nent < 1 || nent > MAXSEGENT) ? "<--" : "")); if (nent < 1 || nent > MAXSEGENT) { returnCode = DSM_S_BLOBBAD; break; } for (i = 0; i < nent; i++, pST += SEGENT_LEN) { segLen = xct (pST); xDbkey.dbkey = xlng (pST + 2); segRC = rmFetchRecord(pcontext, xDbkey.area, xDbkey.dbkey, pRecord, (COUNT)maxRecordSize, 0); if (segRC <= 0) { printf (" Seg%3d: %10ld returnCode: %d <--\n", i, xDbkey.dbkey, segRC); returnCode = (dsmStatus_t) (segRC ? segRC : DSM_S_BLOBBAD); break; } if (segRC - 1 != xct (pST) || *pRecord != DSMBLOBDATA) { printf (" Seg%3d: %10ld Len: %d ActualLen: %d type: %d <--\n", i, xDbkey.dbkey, xct (pST), segRC - 1, *pRecord); returnCode = DSM_S_BLOBBAD; break; } if (!silent) printf (" Seg%3d: %10ld Len: %d\n", i, xDbkey.dbkey, xct (pST)); n_ds++; remainder -= xct (pST); } /* for i ... < nent */ if (returnCode != DSM_S_BLOBOK) break; xDbkey.dbkey = xlng (pSegTab + 3); if (!silent) printf (" N Seg: %10ld\n", xDbkey.dbkey); if (!xDbkey.dbkey) { break; } returnCode = dbBlobFetch (pcontext, &xDbkey, pSegTab, BLOBMAXSEGTAB, DSMBLOBSEG, pName); } /* while remainder */ if (remainder) printf ("Length error: remainder: %10ld <--\n", remainder); utfree (pSegTab); } /* inital blob segemnt ok */ } /* initial rmfetch ok */ utfree (pRecord); pdbcontext->inservice--; if (!silent) printf ("tl: %10ld ds: %4d ss: %4d\n", tl, n_ds, n_ss); return returnCode; }
/********************************************************************* * * scan_index_i::_fetch(key, klen, el, elen, skip) * * Fetch current entry into "key" and "el". If "skip" is true, * advance the scan to the next qualifying entry. * *********************************************************************/ rc_t scan_index_i::_fetch( vec_t* key, smsize_t* klen, vec_t* el, smsize_t* elen, bool skip) { // Check if error condition occured. if (_error_occurred.is_error()) { if(_error_occurred.err_num() == eBADCMPOP) { _eof = true; return RCOK; } return w_rc_t(_error_occurred); } SM_PROLOGUE_RC(scan_index_i::_fetch, in_xct, read_only, 0); /* * Check if scan is terminated. */ if (_finished) { return RC(eBADSCAN); } if (_eof) { return RC(eEOF); } w_assert1(xct()->tid() == tid); switch (ntype) { case t_btree: case t_uni_btree: case t_mrbtree: case t_uni_mrbtree: case t_mrbtree_l: case t_uni_mrbtree_l: case t_mrbtree_p: case t_uni_mrbtree_p: if (skip) { /* * Advance cursor. */ do { DBG(<<""); W_DO( bt->fetch(*_btcursor, _bIgnoreLatches) ); if(_btcursor->eof()) break; } while (_skip_nulls && (_btcursor->klen() == 0)); } break; case t_bad_ndx_t: default: W_FATAL(eINTERNAL); } /* * Copy result to user buffer. */ if (_btcursor->eof()) { DBG(<<"eof"); _eof = true; } else {
lock_cache_elem_t* e = _core->search_cache(theLockInfo, n, true); rval = (e && e->req->_sli_status == sli_active); W_VOID(theLockInfo->lock_info_mutex.release()); } return rval; } rc_t lock_m::query( const lockid_t& n, lmode_t& m, const tid_t& tid, bool implicit, bool cache_only) { DBGTHRD(<<"lock_m::query for lock " << n); xct_t * xd = xct(); w_assert9(!implicit || tid != tid_t::null); INC_TSTAT(lock_query_cnt); if (!implicit) { m = NL; // search the cache first, if you can if (n.lspace() <= lockid_t::t_page && xd && tid == xd->tid() && xd->lock_cache_enabled()) { xct_lock_info_t* const theLockInfo = xd->lock_info(); W_COERCE(theLockInfo->lock_info_mutex.acquire()); lock_cache_elem_t* e = _core->search_cache(theLockInfo, n); if (e) {
/********************************************************************* * * scan_index_i::_init(cond, b1, c2, b2) * * Initialize a scan. Called by all constructors. * * Of which there is only 1, and it uses mode=SH * *********************************************************************/ void scan_index_i::_init( cmp_t cond, const cvec_t& bound, cmp_t c2, const cvec_t& b2, lock_mode_t mode, const bool bIgnoreLatches) { _finished = false; /* * Determine index and kvl lock modes. */ lock_mode_t index_lock_mode; concurrency_t key_lock_level; // _cc was passed in on constructor // Disallow certain kinds of scans on certain // kinds of indexes: // switch(_cc) { case t_cc_none: index_lock_mode = lock_m::parent_mode[mode]; // IS if mode == SH key_lock_level = t_cc_none; break; case t_cc_im: case t_cc_kvl: index_lock_mode = lock_m::parent_mode[mode]; // IS if mode==SH key_lock_level = _cc; break; case t_cc_modkvl: index_lock_mode = lock_m::parent_mode[mode]; // IS if mode==SH // GROT: force the checks below to // check scan conditions key_lock_level = t_cc_none; break; case t_cc_file: index_lock_mode = mode; key_lock_level = t_cc_none; break; case t_cc_append: default: _error_occurred = RC(eBADLOCKMODE); return; break; } /* * Save tid */ tid = xct()->tid(); /* * Access directory entry */ sdesc_t* sd = 0; _error_occurred = dir->access(_stid, sd, index_lock_mode); if (_error_occurred.is_error()) { return; } if (sd->sinfo().stype != t_index) { _error_occurred = RC(eBADSTORETYPE); return; } if((concurrency_t)sd->sinfo().cc != key_lock_level) { switch((concurrency_t)sd->sinfo().cc) { case t_cc_none: // allow anything break; case t_cc_modkvl: // certain checks are made in fetch_init if(_cc == t_cc_none || _cc == t_cc_file) { key_lock_level = t_cc_none; } else { key_lock_level = (concurrency_t)sd->sinfo().cc; } break; case t_cc_im: case t_cc_kvl: // allow file if(_cc == t_cc_file) { key_lock_level = t_cc_file; break; } key_lock_level = (concurrency_t)sd->sinfo().cc; break; default: _error_occurred = RC(eBADCCLEVEL); return; } } /* * Initialize the fetch */ switch (ntype = (ndx_t) sd->sinfo().ntype) { case t_bad_ndx_t: _error_occurred = RC(eBADNDXTYPE); return; case t_btree: case t_uni_btree: { _btcursor = new bt_cursor_t(!_skip_nulls); if (! _btcursor) { _error_occurred = RC(eOUTOFMEMORY); return; } bool inclusive = (cond == eq || cond == ge || cond == le); cvec_t* elem = 0; if(_btcursor->is_backward()) { elem = &(inclusive ? cvec_t::pos_inf : cvec_t::neg_inf); } else { elem = &(inclusive ? cvec_t::neg_inf : cvec_t::pos_inf); } _error_occurred = bt->fetch_init(*_btcursor, sd->root(), sd->sinfo().nkc, sd->sinfo().kc, ntype == t_uni_btree, key_lock_level, bound, *elem, cond, c2, b2, mode); if (_error_occurred.is_error()) { return; } /* if(_btcursor->is_backward()) { // Not fully supported _error_occurred = RC(eNOTIMPLEMENTED); return; } */ } break; case t_mrbtree: case t_uni_mrbtree: case t_mrbtree_l: case t_uni_mrbtree_l: case t_mrbtree_p: case t_uni_mrbtree_p: { _btcursor = new bt_cursor_t(!_skip_nulls); if (! _btcursor) { _error_occurred = RC(eOUTOFMEMORY); return; } bool inclusive = (cond == eq || cond == ge || cond == le); cvec_t* elem = 0; if(_btcursor->is_backward()) { elem = &(inclusive ? cvec_t::pos_inf : cvec_t::neg_inf); } else { elem = &(inclusive ? cvec_t::neg_inf : cvec_t::pos_inf); } // traverse all the subtrees that covers the region [bound,b2] std::vector<lpid_t> roots; cvec_t* bound_key; cvec_t* b2_key; _error_occurred = bt->_scramble_key(bound_key, bound, sd->sinfo().nkc, sd->sinfo().kc); char* bound_sc = (char*) malloc((*bound_key).size()); (*bound_key).copy_to(bound_sc, (*bound_key).size()); cvec_t b1(bound_sc, (*bound_key).size()); _error_occurred = bt->_scramble_key(b2_key, b2, sd->sinfo().nkc, sd->sinfo().kc); if(&bound == &vec_t::neg_inf && &b2 == &vec_t::pos_inf) { _error_occurred = sd->partitions().getAllPartitions(roots); } else { _error_occurred = sd->partitions().getPartitions(b1, *b2_key, roots); } _error_occurred = bt->mr_fetch_init(*_btcursor, roots, sd->sinfo().nkc, sd->sinfo().kc, ntype == t_uni_btree, key_lock_level, b1, *elem, cond, c2, *b2_key, mode, bIgnoreLatches); free(bound_sc); if (_error_occurred.is_error()) { return; } /* if(_btcursor->is_backward()) { // Not fully supported _error_occurred = RC(eNOTIMPLEMENTED); return; } */ } break; default: W_FATAL(eINTERNAL); } }