Ejemplo n.º 1
0
rc_t btree_impl::_ux_norec_alloc_core(btree_page_h &page, PageID &new_page_id) {
    // This is called only in REDO-only SSX, so no compensation logging. Just apply.
    w_assert1 (xct()->is_single_log_sys_xct());
    w_assert1 (page.latch_mode() == LATCH_EX);

    W_DO(smlevel_0::vol->alloc_a_page(new_page_id));
    btree_page_h new_page;
    w_rc_t rc;
    rc = new_page.fix_nonroot(page, new_page_id, LATCH_EX, false, true);

    if (rc.is_error()) {
        // if failed for any reason, we release the allocated page.
        W_DO(smlevel_0::vol ->deallocate_page(new_page_id));
        return rc;
    }

    // The new page has an empty key range; parent's high to high.
    w_keystr_t fence, chain_high;
    page.copy_fence_high_key(fence);
    bool was_right_most = (page.get_chain_fence_high_length() == 0);
    page.copy_chain_fence_high_key(chain_high);
    if (was_right_most) {
        // this means there was no chain or the page was the right-most of it.
        // (so its high=high of chain)
        // upon the first foster split, we start setting the chain-high.
        page.copy_fence_high_key(chain_high);
    }

#if W_DEBUG_LEVEL >= 3
    lsn_t old_lsn = page.get_page_lsn();
#endif //W_DEBUG_LEVEL

    W_DO(log_btree_norec_alloc(page, new_page, new_page_id, fence, chain_high));
    DBGOUT3(<< "btree_impl::_ux_norec_alloc_core, fence=" << fence << ", old-LSN="
        << old_lsn << ", new-LSN=" << page.get_page_lsn() << ", PID=" << new_page_id);

    // initialize as an empty child:
    new_page.format_steal(page.get_page_lsn(), new_page_id, page.store(),
                          page.root(), page.level(), 0, lsn_t::null,
                          page.get_foster_opaqueptr(), page.get_foster_emlsn(),
                          fence, fence, chain_high, false);
    page.accept_empty_child(page.get_page_lsn(), new_page_id, false /*not from redo*/);

    // in this operation, the log contains everything we need to recover without any
    // write-order-dependency. So, no registration for WOD.
    w_assert3(new_page.is_consistent(true, true));
    w_assert1(new_page.is_fixed());
    w_assert1(new_page.latch_mode() == LATCH_EX);

    w_assert3(page.is_consistent(true, true));
    w_assert1(page.is_fixed());
    return RCOK;
}
Ejemplo n.º 2
0
void bt_cursor_t::_set_current_page(btree_page_h &page) {
    if (_pid != 0) {
        _release_current_page();
    }
    w_assert1(_pid == 0);
    w_assert1(_pid_bfidx.idx() == 0);
    _pid = page.pid();
    // pin this page for subsequent refix()
    _pid_bfidx.set(page.pin_for_refix());
    _lsn = page.get_page_lsn();
#ifndef USE_ATOMIC_COMMIT
    w_assert1(_lsn.valid()); // must have a valid LSN for _check_page_update to work
#endif
}
Ejemplo n.º 3
0
rc_t bt_cursor_t::_check_page_update(btree_page_h &p)
{
    // was the page changed?
    if (_pid != p.pid() || p.get_page_lsn() != _lsn) {
        // check if the page still contains the key we are based on
        bool found = false;
        if (p.fence_contains(_key)) {
            // it still contains. just re-locate _slot
            p.search(_key, found, _slot);
        } else {
            // we have to re-locate the page
            W_DO( btree_impl::_ux_traverse(_store, _key, btree_impl::t_fence_contain, LATCH_SH, p));
            p.search(_key, found, _slot);
        }
        w_assert1(found || !_needs_lock
            || (!_forward && !_upper_inclusive && !_dont_move_next)); // see _locate_first
        _set_current_page(p);
    }
    return RCOK;
}
Ejemplo n.º 4
0
rc_t
btree_impl::_ux_lock_key(
    const StoreID&      store,
    btree_page_h&      leaf,
    const void*        keystr,
    size_t             keylen,
    latch_mode_t       latch_mode,
    const okvl_mode&   lock_mode,
    bool               check_only
    )
{
    // Callers:
    // 1. Top level _ux_lock_key() - I/U/D and search operations, lock conflict is possible
    // 2. _ux_lock_range() - lock conflict is possible
    //
    // Lock conflict:
    // 1. Deadlock - the asking lock is held by another transaction currently, and the
    //                      current transaction is holding other locks already, failed
    // 2. Timeout -  the asking lock is held by another transaction currently, but the
    //                     current transaction does not hold other locks, okay to retry

    // For restart operation using lock re-acquisition:
    // 1. On_demand or mixed UNDO - when lock conflict. it triggers UNDO transaction rollback
    //                                                 this is a blocking operation, meaning the other concurrent
    //                                                 transactions asking for the same lock are blocked, no deadlock
    // 2. Traditional UNDO - original behavior, either deadlock error or timeout and retry

    lockid_t lid (store, (const unsigned char*) keystr, keylen);
    // first, try conditionally. we utilize the inserted lock entry even if it fails
    RawLock* entry = nullptr;

    // The lock request does the following:
    // If the lock() failed to acquire lock (trying to acquire lock while holding the latch) and
    // if the transaction doesn't have any other locks, because 'condition' is true, lock()
    // returns immediatelly with eCONDLOCKTIMEOUT which indicates it failed to
    // acquire lock but no deadlock worry and the lock entry has been created already.
    // In this case caller (this function) releases latch and try again using retry_lock()
    // which is a blocking operation, this is because it is safe to forever retry without
    // risking deadlock
    // If the lock() returns eDEADLOCK, it means lock acquisition failed and
    // the current transaction already held other locks, it is not safe to retry (will cause
    // further deadlocks) therefore caller must abort the current transaction
    rc_t lock_rc = lm->lock(lid.hash(), lock_mode, true /*check */, false /* wait */,
            !check_only /* acquire */, smthread_t::xct(),timeout_t::WAIT_IMMEDIATE, &entry);

    if (!lock_rc.is_error()) {
        // lucky! we got it immediately. just return.
        return RCOK;
    } else {
        // if it caused deadlock and it was chosen to be victim, give up! (not retry)
        if (lock_rc.err_num() == eDEADLOCK)
        {
            // The user transaction will abort and rollback itself upon deadlock detection.
            // Because Express does not have a deadlock monitor and policy to determine
            // which transaction to rollback during a deadlock (should abort the cheaper
            // transaction), the user transaction which detects deadlock will be aborted.
            w_assert1(entry == nullptr);
            return lock_rc;
        }

        // couldn't immediately get it. then we unlatch the page and wait.
        w_assert1(lock_rc.err_num() == eCONDLOCKTIMEOUT);
        w_assert1(entry != nullptr);

        // we release the latch here. However, we increment the pin count before that
        // to prevent the page from being evicted.
        pin_for_refix_holder pin_holder(leaf.pin_for_refix()); // automatically releases the pin
        lsn_t prelsn = leaf.get_page_lsn(); // to check if it's modified after this unlatch
        leaf.unfix();
        // then, we try it unconditionally (this will block)
        W_DO(lm->retry_lock(&entry, !check_only /* acquire */));
        // now we got the lock.. but it might be changed because we unlatched.
        w_rc_t refix_rc = leaf.refix_direct(pin_holder._idx, latch_mode);
        if (refix_rc.is_error() || leaf.get_page_lsn() != prelsn)
        {
            // release acquired lock
            if (entry != nullptr) {
                w_assert1(!check_only);
                lm->unlock(entry);
            } else {
                w_assert1(check_only);
            }
            if (refix_rc.is_error())
            {
                return refix_rc;
            }
            else
            {
                w_assert1(leaf.get_page_lsn() != prelsn); // unluckily, it's the case
                return RC(eLOCKRETRY); // retry!
            }
        }
        return RCOK;
    }
}