ibool flst_validate( /*==========*/ /* out: TRUE if ok */ flst_base_node_t* base, /* in: pointer to base node of list */ mtr_t* mtr1) /* in: mtr */ { ulint space; flst_node_t* node; fil_addr_t node_addr; fil_addr_t base_addr; ulint len; ulint i; mtr_t mtr2; ut_ad(base); ut_ad(mtr_memo_contains(mtr1, buf_block_align(base), MTR_MEMO_PAGE_X_FIX)); /* We use two mini-transaction handles: the first is used to lock the base node, and prevent other threads from modifying the list. The second is used to traverse the list. We cannot run the second mtr without committing it at times, because if the list is long, then the x-locked pages could fill the buffer resulting in a deadlock. */ /* Find out the space id */ buf_ptr_get_fsp_addr(base, &space, &base_addr); len = flst_get_len(base, mtr1); node_addr = flst_get_first(base, mtr1); for (i = 0; i < len; i++) { mtr_start(&mtr2); node = fut_get_ptr(space, node_addr, RW_X_LATCH, &mtr2); node_addr = flst_get_next_addr(node, &mtr2); mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer becoming full */ } ut_a(fil_addr_is_null(node_addr)); node_addr = flst_get_last(base, mtr1); for (i = 0; i < len; i++) { mtr_start(&mtr2); node = fut_get_ptr(space, node_addr, RW_X_LATCH, &mtr2); node_addr = flst_get_prev_addr(node, &mtr2); mtr_commit(&mtr2); /* Commit mtr2 each round to prevent buffer becoming full */ } ut_a(fil_addr_is_null(node_addr)); return(TRUE); }
/********************************************************************//** Adds a node as the last node in a list. */ UNIV_INTERN void flst_add_last( /*==========*/ flst_base_node_t* base, /*!< in: pointer to base node of list */ flst_node_t* node, /*!< in: node to add */ mtr_t* mtr) /*!< in: mini-transaction handle */ { ulint space; fil_addr_t node_addr; ulint len; fil_addr_t last_addr; flst_node_t* last_node; ut_ad(mtr && base && node); ut_ad(base != node); ut_ad(mtr_memo_contains_page(mtr, base, MTR_MEMO_PAGE_X_FIX)); ut_ad(mtr_memo_contains_page(mtr, node, MTR_MEMO_PAGE_X_FIX)); len = flst_get_len(base, mtr); last_addr = flst_get_last(base, mtr); buf_ptr_get_fsp_addr(node, &space, &node_addr); /* If the list is not empty, call flst_insert_after */ if (len != 0) { if (last_addr.page == node_addr.page) { last_node = page_align(node) + last_addr.boffset; } else { ulint zip_size = fil_space_get_zip_size(space); last_node = fut_get_ptr(space, zip_size, last_addr, RW_X_LATCH, mtr); } flst_insert_after(base, last_node, node, mtr); } else { /* else call flst_add_to_empty */ flst_add_to_empty(base, node, mtr); } }
/********************************************************************//** Removes unnecessary history data from a rollback segment. */ static void trx_purge_truncate_rseg_history( /*============================*/ trx_rseg_t* rseg, /*!< in: rollback segment */ trx_id_t limit_trx_no, /*!< in: remove update undo logs whose trx number is < limit_trx_no */ undo_no_t limit_undo_no) /*!< in: if transaction number is equal to limit_trx_no, truncate undo records with undo number < limit_undo_no */ { fil_addr_t hdr_addr; fil_addr_t prev_hdr_addr; trx_rsegf_t* rseg_hdr; page_t* undo_page; trx_ulogf_t* log_hdr; trx_usegf_t* seg_hdr; ulint n_removed_logs = 0; mtr_t mtr; trx_id_t undo_trx_no; mtr_start(&mtr); mutex_enter(&(rseg->mutex)); rseg_hdr = trx_rsegf_get(rseg->space, rseg->zip_size, rseg->page_no, &mtr); hdr_addr = trx_purge_get_log_from_hist( flst_get_last(rseg_hdr + TRX_RSEG_HISTORY, &mtr)); loop: if (hdr_addr.page == FIL_NULL) { mutex_exit(&(rseg->mutex)); mtr_commit(&mtr); return; } undo_page = trx_undo_page_get(rseg->space, rseg->zip_size, hdr_addr.page, &mtr); log_hdr = undo_page + hdr_addr.boffset; undo_trx_no = mach_read_from_8(log_hdr + TRX_UNDO_TRX_NO); if (undo_trx_no >= limit_trx_no) { if (undo_trx_no == limit_trx_no) { trx_undo_truncate_start(rseg, rseg->space, hdr_addr.page, hdr_addr.boffset, limit_undo_no); } mutex_enter(&kernel_mutex); ut_a(trx_sys->rseg_history_len >= n_removed_logs); trx_sys->rseg_history_len -= n_removed_logs; mutex_exit(&kernel_mutex); flst_truncate_end(rseg_hdr + TRX_RSEG_HISTORY, log_hdr + TRX_UNDO_HISTORY_NODE, n_removed_logs, &mtr); mutex_exit(&(rseg->mutex)); mtr_commit(&mtr); return; } prev_hdr_addr = trx_purge_get_log_from_hist( flst_get_prev_addr(log_hdr + TRX_UNDO_HISTORY_NODE, &mtr)); n_removed_logs++; seg_hdr = undo_page + TRX_UNDO_SEG_HDR; if ((mach_read_from_2(seg_hdr + TRX_UNDO_STATE) == TRX_UNDO_TO_PURGE) && (mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG) == 0)) { /* We can free the whole log segment */ mutex_exit(&(rseg->mutex)); mtr_commit(&mtr); trx_purge_free_segment(rseg, hdr_addr, n_removed_logs); n_removed_logs = 0; } else { mutex_exit(&(rseg->mutex)); mtr_commit(&mtr); } mtr_start(&mtr); mutex_enter(&(rseg->mutex)); rseg_hdr = trx_rsegf_get(rseg->space, rseg->zip_size, rseg->page_no, &mtr); hdr_addr = prev_hdr_addr; goto loop; }
/*************************************************************************** Creates and initializes a rollback segment object. The values for the fields are read from the header. The object is inserted to the rseg list of the trx system object and a pointer is inserted in the rseg array in the trx system object. @return own: rollback segment object */ static trx_rseg_t* trx_rseg_mem_create( /*================*/ ulint id, /*!< in: rollback segment id */ ulint space, /*!< in: space where the segment placed */ ulint zip_size, /*!< in: compressed page size in bytes or 0 for uncompressed pages */ ulint page_no, /*!< in: page number of the segment header */ mtr_t* mtr) /*!< in: mtr */ { ulint len; trx_rseg_t* rseg; fil_addr_t node_addr; trx_rsegf_t* rseg_header; trx_ulogf_t* undo_log_hdr; ulint sum_of_undo_sizes; ut_ad(mutex_own(&kernel_mutex)); rseg = mem_zalloc(sizeof(trx_rseg_t)); rseg->id = id; rseg->space = space; rseg->zip_size = zip_size; rseg->page_no = page_no; mutex_create(rseg_mutex_key, &rseg->mutex, SYNC_RSEG); UT_LIST_ADD_LAST(rseg_list, trx_sys->rseg_list, rseg); trx_sys_set_nth_rseg(trx_sys, id, rseg); rseg_header = trx_rsegf_get_new(space, zip_size, page_no, mtr); rseg->max_size = mtr_read_ulint(rseg_header + TRX_RSEG_MAX_SIZE, MLOG_4BYTES, mtr); /* Initialize the undo log lists according to the rseg header */ sum_of_undo_sizes = trx_undo_lists_init(rseg); rseg->curr_size = mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, MLOG_4BYTES, mtr) + 1 + sum_of_undo_sizes; len = flst_get_len(rseg_header + TRX_RSEG_HISTORY, mtr); if (len > 0) { trx_sys->rseg_history_len += len; node_addr = trx_purge_get_log_from_hist( flst_get_last(rseg_header + TRX_RSEG_HISTORY, mtr)); rseg->last_page_no = node_addr.page; rseg->last_offset = node_addr.boffset; undo_log_hdr = trx_undo_page_get(rseg->space, rseg->zip_size, node_addr.page, mtr) + node_addr.boffset; rseg->last_trx_no = mtr_read_dulint( undo_log_hdr + TRX_UNDO_TRX_NO, mtr); rseg->last_del_marks = mtr_read_ulint( undo_log_hdr + TRX_UNDO_DEL_MARKS, MLOG_2BYTES, mtr); } else { rseg->last_page_no = FIL_NULL; } return(rseg); }