rc_t btree_impl::_sx_adopt_foster (btree_page_h &parent, btree_page_h &child) { w_keystr_t new_child_key; child.copy_fence_high_key(new_child_key); W_DO(_sx_split_if_needed(parent, new_child_key)); // Now, another SSX to move the pointer sys_xct_section_t sxs(true); W_DO(sxs.check_error_on_start()); rc_t ret = _ux_adopt_foster_core(parent, child, new_child_key); W_DO (sxs.end_sys_xct (ret)); DBG(<< "Adopted " << child.pid() << " into " << parent.pid()); return ret; }
rc_t btree_impl::_sx_opportunistic_adopt_foster (btree_page_h &parent, btree_page_h &child, bool &pushedup, const bool from_recovery) { w_assert1 (parent.is_fixed()); w_assert1 (parent.is_node()); w_assert1 (child.is_fixed()); pushedup = false; // let's try upgrading parent to EX latch. This highly likely fails in high-load situation, // so let's do it here to avoid system transaction creation cost. // we start from parent because EX latch on child is assured to be available in this order if (!parent.upgrade_latch_conditional()) { DBGOUT1(<< "opportunistic_adopt gave it up because of parent. " << parent.pid() << ". do nothing."); increase_ex_need(parent.pid()); // give a hint to subsequent accesses return RCOK; }
void bt_cursor_t::_set_current_page(btree_page_h &page) { if (_pid != 0) { _release_current_page(); } w_assert1(_pid == 0); w_assert1(_pid_bfidx.idx() == 0); _pid = page.pid(); // pin this page for subsequent refix() _pid_bfidx.set(page.pin_for_refix()); _lsn = page.get_page_lsn(); #ifndef USE_ATOMIC_COMMIT w_assert1(_lsn.valid()); // must have a valid LSN for _check_page_update to work #endif }
rc_t bt_cursor_t::_check_page_update(btree_page_h &p) { // was the page changed? if (_pid != p.pid() || p.get_page_lsn() != _lsn) { // check if the page still contains the key we are based on bool found = false; if (p.fence_contains(_key)) { // it still contains. just re-locate _slot p.search(_key, found, _slot); } else { // we have to re-locate the page W_DO( btree_impl::_ux_traverse(_store, _key, btree_impl::t_fence_contain, LATCH_SH, p)); p.search(_key, found, _slot); } w_assert1(found || !_needs_lock || (!_forward && !_upper_inclusive && !_dont_move_next)); // see _locate_first _set_current_page(p); } return RCOK; }
rc_t btree_impl::_sx_split_foster(btree_page_h& page, PageID& new_page_id, const w_keystr_t& triggering_key) { sys_xct_section_t sxs(true); W_DO(sxs.check_error_on_start()); w_assert1 (page.latch_mode() == LATCH_EX); // DBG(<< "SPLITTING " << page); /* * Step 1: Allocate a new page for the foster child */ W_DO(smlevel_0::vol->alloc_a_page(new_page_id)); /* * Step 2: Create new foster child and move records into it, logging its * raw contents as a page_img_format operation */ btree_page_h new_page; rc_t rc = new_page.fix_nonroot(page, new_page_id, LATCH_EX, false, true); if (rc.is_error()) { W_DO(smlevel_0::vol ->deallocate_page(new_page_id)); return rc; } // assure foster-child page has an entry same as fence-low for locking correctness. // See jira ticket:84 "Key Range Locking" (originally trac ticket:86). // CS TODO - why is this required // There may be a bug, since error happens if we uncomment this // W_DO(_ux_assure_fence_low_entry(new_page)); // this might be another SSX int move_count = 0; w_keystr_t split_key; W_DO(new_page.format_foster_child(page, new_page_id, triggering_key, split_key, move_count)); w_assert0(move_count > 0); // DBG5(<< "NEW FOSTER CHILD " << new_page); /* * Step 3: Delete moved records and update foster child pointer and high * fence on overflowing page. Foster parent is not recompressed after * moving records (CS TODO) */ page.delete_range(page.nrecs() - move_count, page.nrecs()); // DBG5(<< "AFTER RANGE DELETE " << page); w_keystr_t new_chain; new_page.copy_chain_fence_high_key(new_chain); bool foster_set = page.set_foster_child(new_page_id, split_key, new_chain); w_assert0(foster_set); /* * Step 4: Update parent pointers of the moved records. The new foster * child will have the correct parent set by the fix call above. This is * only required because of swizzling. */ // set parent pointer on hash table // smlevel_0::bf->switch_parent(new_page_id, page.get_generic_page()); // set parent pointer for children that moved to new page int max_slot = new_page.max_child_slot(); for (general_recordid_t i = GeneralRecordIds::FOSTER_CHILD; i <= max_slot; ++i) { // CS TODO: Slot 1 (which is actually 0 in the internal page // representation) is not used when inserting into an empty node (see // my comment on btree_page_h.cpp::insert_nonghost), so in *some* // cases, the slot i=1 will yield and invalid page in switch_parent // below. Because of this great design feature, switch_parent has to // cope with an invalid page. smlevel_0::bf->switch_parent(*new_page.child_slot_address(i), new_page.get_generic_page()); } /* * Step 5: Log bulk deletion and foster update on parent */ W_DO(log_btree_split(new_page, page, move_count, split_key, new_chain)); w_assert1(new_page.get_page_lsn() != lsn_t::null); // hint for subsequent accesses increase_forster_child(page.pid()); W_DO (sxs.end_sys_xct (RCOK)); DBG1(<< "Split page " << page.pid() << " into " << new_page_id); return RCOK; }