Esempio n. 1
0
void TruncateLog::run()
{
    // CS TODO fix this
    // start_base();
    // start_log(logdir);
    // start_buffer();
    // start_other();

    // cout << "Taking checkpoint ... ";
    // ss_m::chkpt->take();
    // cout << "OK" << endl;

    // ss_m::SSM->_truncate_log();

    // CS TODO: temporary code to generate empty log file
    const size_t bufsize = 8192;
    char* buffer = new char[bufsize];
    ::memset(buffer, 0, bufsize);
    size_t pos = 0;

    logrec_t* logrec = (logrec_t*) (buffer + pos);
    logrec->init_header(chkpt_begin_log::TYPE);
    reinterpret_cast<chkpt_begin_log*>(logrec)->construct();
    logrec->set_lsn_ck(lsn_t(partition, 0));
    pos += logrec->length();

    logrec = (logrec_t*) (buffer + pos);
    logrec->init_header(skip_log::TYPE);
    reinterpret_cast<skip_log*>(logrec)->construct();
    logrec->set_lsn_ck(lsn_t(partition, pos));
    pos += logrec->length();

    {
        string fname = logdir + "/log." + std::to_string(partition);
        std::ofstream ofs (fname, std::ofstream::out | std::ofstream::binary
                | std::ofstream::trunc);

        ofs.write(buffer, bufsize);
        ofs.close();
    }

    {
        // now generate empty checkpoint
        string fname = logdir + "/chkpt_" + std::to_string(partition) + ".0";
        std::ofstream ofs (fname, std::ofstream::out | std::ofstream::binary
                | std::ofstream::trunc);

        chkpt_t chkpt;
        chkpt.init();
        chkpt.serialize_binary(ofs);

        ofs.close();
    }

    delete buffer;
}
Esempio n. 2
0
rc_t fullPipelineTest(ss_m* ssm, test_volume_t* test_vol)
{
    unsigned howManyToInsert = 1000;
    W_DO(populateBtree(ssm, test_vol, howManyToInsert));

    LogArchiver::LogConsumer cons(lsn_t(1,0), BLOCK_SIZE);
    LogArchiver::ArchiverHeap heap(BLOCK_SIZE);
    LogArchiver::ArchiveDirectory dir(test_env->archive_dir, BLOCK_SIZE);
    LogArchiver::BlockAssembly assemb(&dir);

    LogArchiver la(&dir, &cons, &heap, &assemb);
    la.fork();
    la.activate(lsn_t::null, true /* wait */);

    // nextLSN of consumer tells us that all logrecs up (and excluding) that
    // LSN were added to the heap. When shutdown is invoked, heap is then emptied
    // using the selection method, thus also gauaranteeing that the archive is persistent
    // up to nextLSN
    while (cons.getNextLSN() < ssm->log->durable_lsn()) {
        usleep(1000); // 1ms
    }

    la.shutdown();
    la.join();

    // TODO use archive scanner to verify:
    // 1) integrity of archive
    // 2) if no logrecs are missing (scan log with same ignores as log archiver
    // and check if each logrec is in archiver -- random access, NL join)

    return RCOK;
}
Esempio n. 3
0
// TODO implement heapTestFactory that uses LogFactory
rc_t heapTestReal(ss_m* ssm, test_volume_t* test_vol)
{
    unsigned howManyToInsert = 1000;
    W_DO(populateBtree(ssm, test_vol, howManyToInsert));

    lsn_t lastLSN = ssm->log->durable_lsn();
    lsn_t prevLSN = lsn_t(1,0);
    LogArchiver::LogConsumer cons(prevLSN, BLOCK_SIZE);
    cons.open(lastLSN);

    LogArchiver::ArchiverHeap heap(BLOCK_SIZE);

    logrec_t* lr;
    bool pushed = false;
    while (cons.next(lr)) {
        pushed = heap.push(lr, false);
        if (!pushed) {
            emptyHeapAndCheck(heap);
            pushed = heap.push(lr, false);
            EXPECT_TRUE(pushed);
        }
    }

    emptyHeapAndCheck(heap);
    EXPECT_EQ(0, heap.size());

    return RCOK;
}
Esempio n. 4
0
rc_t consumerTest(ss_m* ssm, test_volume_t* test_vol)
{
    unsigned howManyToInsert = 1000;
    W_DO(populateBtree(ssm, test_vol, howManyToInsert));

    lsn_t lastLSN = ssm->log->durable_lsn();
    lsn_t prevLSN = lsn_t(1,0);
    LogArchiver::LogConsumer cons(prevLSN, BLOCK_SIZE);
    cons.open(lastLSN);

    logrec_t* lr;
    unsigned int insertCount = 0;
    while (cons.next(lr)) {
        if (lr->type() == logrec_t::t_btree_insert ||
                lr->type() == logrec_t::t_btree_insert_nonghost)
        {
            insertCount++;
        }
        EXPECT_TRUE(lr->lsn_ck() > prevLSN);
        prevLSN = lr->lsn_ck();
    }

    EXPECT_EQ(howManyToInsert, insertCount);

    return RCOK;
}
Esempio n. 5
0
rc_t runMergerFullTest(ss_m* ssm, test_volume_t* test_vol)
{
    unsigned howManyToInsert = 2000;
    W_DO(populateBtree(ssm, test_vol, howManyToInsert));

    LogArchiver::LogConsumer cons(lsn_t(1,0), BLOCK_SIZE);
    LogArchiver::ArchiverHeap heap(BLOCK_SIZE);
    LogArchiver::ArchiveDirectory dir(test_env->archive_dir, BLOCK_SIZE, false);
    LogArchiver::BlockAssembly assemb(&dir);

    LogArchiver la(&dir, &cons, &heap, &assemb);
    la.fork();
    la.activate(lsn_t::null, true /* wait */);
    // wait for logarchiver to consume up to durable LSN,
    while (cons.getNextLSN() < ssm->log->durable_lsn()) {
        usleep(1000); // 1ms
    }

    LogArchiver::ArchiveScanner::RunMerger* merger =
        buildRunMergerFromDirectory(dir);

    PageID prevPID = 0;
    lsn_t prevLSN = lsn_t::null;
    logrec_t* lr;
    while (merger->next(lr)) {
        EXPECT_TRUE(lr->valid_header(lr->lsn_ck()));
        EXPECT_TRUE(lr->pid() >= prevPID);
        EXPECT_TRUE(lr->lsn_ck() != prevLSN);
        prevPID = lr->pid();
        prevLSN = lr->lsn_ck();
    }

    la.shutdown();
    la.join();

    return RCOK;
}
Esempio n. 6
0
rc_t
log_core::fetch(lsn_t& ll, void* buf, lsn_t* nxt, const bool forward)
{
    INC_TSTAT(log_fetches);

    lintel::atomic_thread_fence(lintel::memory_order_acquire);
    if (ll < _fetch_buf_end && ll >= _fetch_buf_begin)
    {
        // log record can be found in fetch buffer -- no I/O
        size_t i = ll.hi() - _fetch_buf_first;
        if (_fetch_buffers[i]) {
            logrec_t* rp = (logrec_t*) (_fetch_buffers[i] + ll.lo());
            w_assert1(rp->valid_header(ll));

            if (rp->type() == logrec_t::t_skip)
            {
                if (forward) {
                    ll = lsn_t(ll.hi() + 1, 0);
                    return fetch(ll, buf, nxt, forward);
                }
                else { // backward scan
                    ll = *((lsn_t*) (_fetch_buffers[i] + ll.lo() - sizeof(lsn_t)));
                }

                rp = (logrec_t*) (_fetch_buffers[i] + ll.lo());
                w_assert1(rp->valid_header(ll));
            }

            if (nxt) {
                if (!forward && ll.lo() == 0) {
                    auto p = _storage->get_partition(ll.hi() - 1);
                    *nxt = p ? lsn_t(p->num(), p->get_size()) : lsn_t::null;
                }
                else {
                    if (forward) {
                        *nxt = ll;
                        nxt->advance(rp->length());
                    }
                    else {
                        memcpy(nxt, (char*) rp - sizeof(lsn_t), sizeof(lsn_t));
                    }
                }
            }

            memcpy(buf, rp, rp->length());
            INC_TSTAT(log_buffer_hit);

            return RCOK;
        }
    }

    if (forward && ll >= durable_lsn()) {
        w_assert1(ll == durable_lsn());
        // reading the durable_lsn during recovery yields a skip log record,
        return RC(eEOF);
    }
    if (!forward && ll == lsn_t::null) {
        // for a backward scan, nxt pointer is set to null
        // when the first log record in the first partition is set
        return RC(eEOF);
    }

    auto p = _storage->get_partition(ll.hi());
    if(!p) { return RC(eEOF); }
    W_DO(p->open_for_read());

    logrec_t* rp;
    lsn_t prev_lsn = lsn_t::null;
    DBGOUT3(<< "fetch @ lsn: " << ll);
    W_COERCE(p->read(rp, ll, forward ? nullptr : &prev_lsn));
    w_assert1(rp->valid_header(ll));

    // handle skip log record
    if (rp->type() == logrec_t::t_skip)
    {
        p->release_read();
        if (forward) {
            DBGTHRD(<<"seeked to skip" << ll );
            DBGTHRD(<<"getting next partition.");
            ll = lsn_t(ll.hi() + 1, 0);

            p = _storage->get_partition(ll.hi());
            if(!p) { return RC(eEOF); }

            // re-read
            DBGOUT3(<< "fetch @ lsn: " << ll);
            W_DO(p->open_for_read());
            W_COERCE(p->read(rp, ll));
            w_assert1(rp->valid_header(ll));
        }
        else { // backward scan
Esempio n. 7
0
/*********************************************************************
*
*  chkpt_m::backward_scan_log(lock_heap)
*
*  Scans the log backwards, starting from _lsn until the t_chkpt_begin log record
*  corresponding to the latest completed checkpoint.
*
*********************************************************************/
void chkpt_t::scan_log()
{
    init();

    lsn_t scan_start = smlevel_0::log->durable_lsn();
    if (scan_start == lsn_t(1,0)) { return; }

    log_i scan(*smlevel_0::log, scan_start, false); // false == backward scan
    logrec_t r;
    lsn_t lsn = lsn_t::max;   // LSN of the retrieved log record

    // Set when scan finds begin of previous checkpoint
    lsn_t scan_stop = lsn_t(1,0);

    // CS TODO: not needed with file serialization
    // bool insideChkpt = false;
    while (lsn > scan_stop && scan.xct_next(lsn, r))
    {
        if (r.is_skip() || r.type() == logrec_t::t_comment) {
            continue;
        }

        if (!r.tid().is_null()) {
            if (r.tid() > get_highest_tid()) {
                set_highest_tid(r.tid());
            }

            if (r.is_page_update() || r.is_cpsn()) {
                mark_xct_active(r.tid(), lsn, lsn);

                if (is_xct_active(r.tid())) {
                    if (!r.is_cpsn()) { acquire_lock(r); }
                }
                else if (r.xid_prev().is_null()) {
                    // We won't see this xct again -- delete it
                    delete_xct(r.tid());
                }
            }
        }

        if (r.is_page_update()) {
            w_assert0(r.is_redo());
            mark_page_dirty(r.pid(), lsn, lsn);

            if (r.is_multi_page()) {
                w_assert0(r.pid2() != 0);
                mark_page_dirty(r.pid2(), lsn, lsn);
            }
        }

        switch (r.type())
        {
            case logrec_t::t_chkpt_begin:
                // CS TODO: not needed with file serialization
                // if (insideChkpt) {
                //     // Signal to stop backward log scan loop now
                //     scan_stop = lsn;
                // }
                {
                    fs::path fpath = smlevel_0::log->get_storage()->make_chkpt_path(lsn);
                    if (fs::exists(fpath)) {
                        ifstream ifs(fpath.string(), ios::binary);
                        deserialize_binary(ifs);
                        ifs.close();
                        scan_stop = lsn;
                    }
                }

                break;

            case logrec_t::t_chkpt_bf_tab:
                // CS TODO: not needed with file serialization
                // if (insideChkpt) {
                //     const chkpt_bf_tab_t* dp = (chkpt_bf_tab_t*) r.data();
                //     for (uint i = 0; i < dp->count; i++) {
                //         mark_page_dirty(dp->brec[i].pid, dp->brec[i].page_lsn,
                //                 dp->brec[i].rec_lsn);
                //     }
                // }
                break;


            case logrec_t::t_chkpt_xct_lock:
                // CS TODO: not needed with file serialization
                // if (insideChkpt) {
                //     const chkpt_xct_lock_t* dp = (chkpt_xct_lock_t*) r.data();
                //     if (is_xct_active(dp->tid)) {
                //         for (uint i = 0; i < dp->count; i++) {
                //             add_lock(dp->tid, dp->xrec[i].lock_mode,
                //                     dp->xrec[i].lock_hash);
                //         }
                //     }
                // }
                break;

            case logrec_t::t_chkpt_xct_tab:
                // CS TODO: not needed with file serialization
                // if (insideChkpt) {
                //     const chkpt_xct_tab_t* dp = (chkpt_xct_tab_t*) r.data();
                //     for (size_t i = 0; i < dp->count; ++i) {
                //         tid_t tid = dp->xrec[i].tid;
                //         w_assert1(!tid.is_null());
                //         mark_xct_active(tid, dp->xrec[i].first_lsn,
                //                 dp->xrec[i].last_lsn);
                //     }
                // }
                break;


                // CS TODO: not needed with file serialization
            // case logrec_t::t_chkpt_end:
                // checkpoints should not run concurrently
                // w_assert0(!insideChkpt);
                // insideChkpt = true;
                break;

            // CS TODO: why do we need this? Isn't it related to 2PC?
            // case logrec_t::t_xct_freeing_space:
            case logrec_t::t_xct_end:
            case logrec_t::t_xct_abort:
                mark_xct_ended(r.tid());
                break;

            case logrec_t::t_xct_end_group:
                {
                    // CS TODO: is this type of group commit still used?
                    w_assert0(false);
                    const xct_list_t* list = (xct_list_t*) r.data();
                    uint listlen = list->count;
                    for(uint i=0; i<listlen; i++) {
                        tid_t tid = list->xrec[i].tid;
                        mark_xct_ended(tid);
                    }
                }
                break;

            case logrec_t::t_page_write:
                {
                    char* pos = r.data();

                    PageID pid = *((PageID*) pos);
                    pos += sizeof(PageID);

                    lsn_t clean_lsn = *((lsn_t*) pos);
                    pos += sizeof(lsn_t);

                    uint32_t count = *((uint32_t*) pos);
                    PageID end = pid + count;

                    while (pid < end) {
                        mark_page_clean(pid, clean_lsn);
                        pid++;
                    }
                }
                break;

            case logrec_t::t_add_backup:
                {
                    const char* dev = (const char*)(r.data_ssx());
                    add_backup(dev);
                }
                break;

            case logrec_t::t_chkpt_backup_tab:
                // CS TODO
                break;

            case logrec_t::t_restore_begin:
            case logrec_t::t_restore_end:
            case logrec_t::t_restore_segment:
            case logrec_t::t_chkpt_restore_tab:
                // CS TODO - IMPLEMENT!
                break;

            default:
                break;

        } //switch
    } //while

    w_assert0(lsn == scan_stop);

    cleanup();
}