static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; unsigned int prio; if (!xc) return -ENOENT; for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { struct xive_q *q = &xc->queues[prio]; if (!q->qpage) continue; /* Mark EQ page dirty for migration */ mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); } return 0; }
void chkpt_t::deserialize_binary(ifstream& ifs) { if(!ifs.is_open()) { cerr << "Could not open input stream for chkpt file" << endl;; W_FATAL(fcINTERNAL); } ifs.read((char*)&highest_tid, sizeof(tid_t)); size_t buf_tab_size; ifs.read((char*)&buf_tab_size, sizeof(size_t)); for(uint i=0; i<buf_tab_size; i++) { PageID pid; ifs.read((char*)&pid, sizeof(PageID)); buf_tab_entry_t entry; ifs.read((char*)&entry, sizeof(buf_tab_entry_t)); DBGOUT1(<<"pid[]="<<pid<< " , " << "rec_lsn[]="<<entry.rec_lsn<< " , " << "page_lsn[]="<<entry.page_lsn); // buf_tab[pid] = entry; mark_page_dirty(pid, entry.page_lsn, entry.rec_lsn); } size_t xct_tab_size; ifs.read((char*)&xct_tab_size, sizeof(size_t)); for(uint i=0; i<xct_tab_size; i++) { tid_t tid; ifs.read((char*)&tid, sizeof(tid_t)); xct_tab_entry_t entry; ifs.read((char*)&entry.state, sizeof(smlevel_0::xct_state_t)); ifs.read((char*)&entry.last_lsn, sizeof(lsn_t)); ifs.read((char*)&entry.first_lsn, sizeof(lsn_t)); DBGOUT1(<<"tid[]="<<tid<<" , " << "state[]="<<entry.state<< " , " << "last_lsn[]="<<entry.last_lsn<<" , " << "first_lsn[]="<<entry.first_lsn); if (entry.state != smlevel_0::xct_ended) { mark_xct_active(tid, entry.first_lsn, entry.last_lsn); if (is_xct_active(tid)) { size_t lock_tab_size; ifs.read((char*)&lock_tab_size, sizeof(size_t)); for(uint j=0; j<lock_tab_size; j++) { lock_info_t lock_entry; ifs.read((char*)&lock_entry, sizeof(lock_info_t)); // entry.locks.push_back(lock_entry); add_lock(tid, lock_entry.lock_mode, lock_entry.lock_hash); DBGOUT1(<< " lock_mode[]="<<lock_entry.lock_mode << " , lock_hash[]="<<lock_entry.lock_hash); } } // xct_tab[tid] = entry; } }
/********************************************************************* * * chkpt_m::backward_scan_log(lock_heap) * * Scans the log backwards, starting from _lsn until the t_chkpt_begin log record * corresponding to the latest completed checkpoint. * *********************************************************************/ void chkpt_t::scan_log() { init(); lsn_t scan_start = smlevel_0::log->durable_lsn(); if (scan_start == lsn_t(1,0)) { return; } log_i scan(*smlevel_0::log, scan_start, false); // false == backward scan logrec_t r; lsn_t lsn = lsn_t::max; // LSN of the retrieved log record // Set when scan finds begin of previous checkpoint lsn_t scan_stop = lsn_t(1,0); // CS TODO: not needed with file serialization // bool insideChkpt = false; while (lsn > scan_stop && scan.xct_next(lsn, r)) { if (r.is_skip() || r.type() == logrec_t::t_comment) { continue; } if (!r.tid().is_null()) { if (r.tid() > get_highest_tid()) { set_highest_tid(r.tid()); } if (r.is_page_update() || r.is_cpsn()) { mark_xct_active(r.tid(), lsn, lsn); if (is_xct_active(r.tid())) { if (!r.is_cpsn()) { acquire_lock(r); } } else if (r.xid_prev().is_null()) { // We won't see this xct again -- delete it delete_xct(r.tid()); } } } if (r.is_page_update()) { w_assert0(r.is_redo()); mark_page_dirty(r.pid(), lsn, lsn); if (r.is_multi_page()) { w_assert0(r.pid2() != 0); mark_page_dirty(r.pid2(), lsn, lsn); } } switch (r.type()) { case logrec_t::t_chkpt_begin: // CS TODO: not needed with file serialization // if (insideChkpt) { // // Signal to stop backward log scan loop now // scan_stop = lsn; // } { fs::path fpath = smlevel_0::log->get_storage()->make_chkpt_path(lsn); if (fs::exists(fpath)) { ifstream ifs(fpath.string(), ios::binary); deserialize_binary(ifs); ifs.close(); scan_stop = lsn; } } break; case logrec_t::t_chkpt_bf_tab: // CS TODO: not needed with file serialization // if (insideChkpt) { // const chkpt_bf_tab_t* dp = (chkpt_bf_tab_t*) r.data(); // for (uint i = 0; i < dp->count; i++) { // mark_page_dirty(dp->brec[i].pid, dp->brec[i].page_lsn, // dp->brec[i].rec_lsn); // } // } break; case logrec_t::t_chkpt_xct_lock: // CS TODO: not needed with file serialization // if (insideChkpt) { // const chkpt_xct_lock_t* dp = (chkpt_xct_lock_t*) r.data(); // if (is_xct_active(dp->tid)) { // for (uint i = 0; i < dp->count; i++) { // add_lock(dp->tid, dp->xrec[i].lock_mode, // dp->xrec[i].lock_hash); // } // } // } break; case logrec_t::t_chkpt_xct_tab: // CS TODO: not needed with file serialization // if (insideChkpt) { // const chkpt_xct_tab_t* dp = (chkpt_xct_tab_t*) r.data(); // for (size_t i = 0; i < dp->count; ++i) { // tid_t tid = dp->xrec[i].tid; // w_assert1(!tid.is_null()); // mark_xct_active(tid, dp->xrec[i].first_lsn, // dp->xrec[i].last_lsn); // } // } break; // CS TODO: not needed with file serialization // case logrec_t::t_chkpt_end: // checkpoints should not run concurrently // w_assert0(!insideChkpt); // insideChkpt = true; break; // CS TODO: why do we need this? Isn't it related to 2PC? // case logrec_t::t_xct_freeing_space: case logrec_t::t_xct_end: case logrec_t::t_xct_abort: mark_xct_ended(r.tid()); break; case logrec_t::t_xct_end_group: { // CS TODO: is this type of group commit still used? w_assert0(false); const xct_list_t* list = (xct_list_t*) r.data(); uint listlen = list->count; for(uint i=0; i<listlen; i++) { tid_t tid = list->xrec[i].tid; mark_xct_ended(tid); } } break; case logrec_t::t_page_write: { char* pos = r.data(); PageID pid = *((PageID*) pos); pos += sizeof(PageID); lsn_t clean_lsn = *((lsn_t*) pos); pos += sizeof(lsn_t); uint32_t count = *((uint32_t*) pos); PageID end = pid + count; while (pid < end) { mark_page_clean(pid, clean_lsn); pid++; } } break; case logrec_t::t_add_backup: { const char* dev = (const char*)(r.data_ssx()); add_backup(dev); } break; case logrec_t::t_chkpt_backup_tab: // CS TODO break; case logrec_t::t_restore_begin: case logrec_t::t_restore_end: case logrec_t::t_restore_segment: case logrec_t::t_chkpt_restore_tab: // CS TODO - IMPLEMENT! break; default: break; } //switch } //while w_assert0(lsn == scan_stop); cleanup(); }