/********************************************************************//** Test and dump block metadata to dump file if necessary. */ UNIV_INTERN void fc_flush_test_and_dump_blkmeta( /*==========================*/ ulint last_time) /*!< in: the last time when dump the block metadata */ { ulint curr_time = 0; curr_time = ut_time_ms(); if (((curr_time - FLASH_CACHE_DUMP_BLOCK_META_PERIOD) > last_time)) { /*FIXME: seal with a function*/ flash_cache_mutex_enter(); rw_lock_s_lock(&fc->hash_rwlock); fc_dump(); rw_lock_s_unlock(&fc->hash_rwlock); flash_cache_log_mutex_enter(); fc_log_update(FALSE, FLASH_CACHE_LOG_UPDATE_DUMP); fc_log_update_commit_status(); flash_cache_mutex_exit(); fc_log_commit(); flash_cache_log_mutex_exit(); srv_fc_flush_last_dump = ut_time_ms(); } }
/*******************************************************************//** Issue a shared/read lock on the tables cache. */ UNIV_INTERN void trx_i_s_cache_start_read( /*=====================*/ trx_i_s_cache_t* cache) /*!< in: cache */ { rw_lock_s_lock(&cache->rw_lock); }
/************************************************************************* Checks if possible foreign key constraints hold after a delete of the record under pcur. NOTE that this function will temporarily commit mtr and lose pcur position! */ static ulint row_upd_check_references_constraints( /*=================================*/ /* out: DB_SUCCESS, DB_LOCK_WAIT, or an error code */ btr_pcur_t* pcur, /* in: cursor positioned on a record; NOTE: the cursor position is lost in this function! */ dict_table_t* table, /* in: table in question */ dict_index_t* index, /* in: index of the cursor */ que_thr_t* thr, /* in: query thread */ mtr_t* mtr) /* in: mtr */ { dict_foreign_t* foreign; mem_heap_t* heap; dtuple_t* entry; rec_t* rec; ulint err; rec = btr_pcur_get_rec(pcur); heap = mem_heap_create(500); entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap); mtr_commit(mtr); mtr_start(mtr); rw_lock_s_lock(&dict_foreign_key_check_lock); foreign = UT_LIST_GET_FIRST(table->referenced_list); while (foreign) { if (foreign->referenced_index == index) { err = row_ins_check_foreign_constraint(FALSE, foreign, table, index, entry, thr); if (err != DB_SUCCESS) { rw_lock_s_unlock(&dict_foreign_key_check_lock); mem_heap_free(heap); return(err); } } foreign = UT_LIST_GET_NEXT(referenced_list, foreign); } rw_lock_s_unlock(&dict_foreign_key_check_lock); mem_heap_free(heap); return(DB_SUCCESS); }
/*****************************************************************//** Returns the value of ref_count. The value is protected by btr_search_latch. @return ref_count value. */ UNIV_INTERN ulint btr_search_info_get_ref_count( /*==========================*/ btr_search_t* info) /*!< in: search info. */ { ulint ret; ut_ad(info); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ rw_lock_s_lock(&btr_search_latch); ret = info->ref_count; rw_lock_s_unlock(&btr_search_latch); return(ret); }
/********************************************************************//** Applies linear read-ahead if in the buf_pool the page is a border page of a linear read-ahead area and all the pages in the area have been accessed. Does not read any page if the read-ahead mechanism is not activated. Note that the algorithm looks at the 'natural' adjacent successor and predecessor of the page, which on the leaf level of a B-tree are the next and previous page in the chain of leaves. To know these, the page specified in (space, offset) must already be present in the buf_pool. Thus, the natural way to use this function is to call it when a page in the buf_pool is accessed the first time, calling this function just after it has been bufferfixed. NOTE 1: as this function looks at the natural predecessor and successor fields on the page, what happens, if these are not initialized to any sensible value? No problem, before applying read-ahead we check that the area to read is within the span of the space, if not, read-ahead is not applied. An uninitialized value may result in a useless read operation, but only very improbably. NOTE 2: the calling thread may own latches on pages: to avoid deadlocks this function must be written such that it cannot end up waiting for these latches! NOTE 3: the calling thread must want access to the page given: this rule is set to prevent unintended read-aheads performed by ibuf routines, a situation which could result in a deadlock if the OS does not support asynchronous io. @return number of page read requests issued */ UNIV_INTERN ulint buf_read_ahead_linear( /*==================*/ ulint space, /*!< in: space id */ ulint zip_size, /*!< in: compressed page size in bytes, or 0 */ ulint offset, /*!< in: page number; see NOTE 3 above */ ibool inside_ibuf, /*!< in: TRUE if we are inside ibuf routine */ trx_t* trx) { buf_pool_t* buf_pool = buf_pool_get(space, offset); ib_int64_t tablespace_version; buf_page_t* bpage; buf_frame_t* frame; buf_page_t* pred_bpage = NULL; ulint pred_offset; ulint succ_offset; ulint count; int asc_or_desc; ulint new_offset; ulint fail_count; ulint ibuf_mode; ulint low, high; ulint err; ulint i; const ulint buf_read_ahead_linear_area = BUF_READ_AHEAD_AREA(buf_pool); ulint threshold; if (!(srv_read_ahead & 2)) { return(0); } if (UNIV_UNLIKELY(srv_startup_is_before_trx_rollback_phase)) { /* No read-ahead to avoid thread deadlocks */ return(0); } low = (offset / buf_read_ahead_linear_area) * buf_read_ahead_linear_area; high = (offset / buf_read_ahead_linear_area + 1) * buf_read_ahead_linear_area; if ((offset != low) && (offset != high - 1)) { /* This is not a border page of the area: return */ return(0); } if (ibuf_bitmap_page(zip_size, offset) || trx_sys_hdr_page(space, offset)) { /* If it is an ibuf bitmap page or trx sys hdr, we do no read-ahead, as that could break the ibuf page access order */ return(0); } /* Remember the tablespace version before we ask te tablespace size below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we do not try to read outside the bounds of the tablespace! */ tablespace_version = fil_space_get_version(space); buf_pool_mutex_enter(buf_pool); if (high > fil_space_get_size(space)) { buf_pool_mutex_exit(buf_pool); /* The area is not whole, return */ return(0); } if (buf_pool->n_pend_reads > buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) { buf_pool_mutex_exit(buf_pool); return(0); } buf_pool_mutex_exit(buf_pool); /* Check that almost all pages in the area have been accessed; if offset == low, the accesses must be in a descending order, otherwise, in an ascending order. */ asc_or_desc = 1; if (offset == low) { asc_or_desc = -1; } /* How many out of order accessed pages can we ignore when working out the access pattern for linear readahead */ threshold = ut_min((64 - srv_read_ahead_threshold), BUF_READ_AHEAD_AREA(buf_pool)); fail_count = 0; rw_lock_s_lock(&buf_pool->page_hash_latch); for (i = low; i < high; i++) { bpage = buf_page_hash_get(buf_pool, space, i); if (bpage == NULL || !buf_page_is_accessed(bpage)) { /* Not accessed */ fail_count++; } else if (pred_bpage) { /* Note that buf_page_is_accessed() returns the time of the first access. If some blocks of the extent existed in the buffer pool at the time of a linear access pattern, the first access times may be nonmonotonic, even though the latest access times were linear. The threshold (srv_read_ahead_factor) should help a little against this. */ int res = ut_ulint_cmp( buf_page_is_accessed(bpage), buf_page_is_accessed(pred_bpage)); /* Accesses not in the right order */ if (res != 0 && res != asc_or_desc) { fail_count++; } } if (fail_count > threshold) { /* Too many failures: return */ //buf_pool_mutex_exit(buf_pool); rw_lock_s_unlock(&buf_pool->page_hash_latch); return(0); } if (bpage && buf_page_is_accessed(bpage)) { pred_bpage = bpage; } } /* If we got this far, we know that enough pages in the area have been accessed in the right order: linear read-ahead can be sensible */ bpage = buf_page_hash_get(buf_pool, space, offset); if (bpage == NULL) { //buf_pool_mutex_exit(buf_pool); rw_lock_s_unlock(&buf_pool->page_hash_latch); return(0); } switch (buf_page_get_state(bpage)) { case BUF_BLOCK_ZIP_PAGE: frame = bpage->zip.data; break; case BUF_BLOCK_FILE_PAGE: frame = ((buf_block_t*) bpage)->frame; break; default: ut_error; break; } /* Read the natural predecessor and successor page addresses from the page; NOTE that because the calling thread may have an x-latch on the page, we do not acquire an s-latch on the page, this is to prevent deadlocks. Even if we read values which are nonsense, the algorithm will work. */ pred_offset = fil_page_get_prev(frame); succ_offset = fil_page_get_next(frame); //buf_pool_mutex_exit(buf_pool); rw_lock_s_unlock(&buf_pool->page_hash_latch); if ((offset == low) && (succ_offset == offset + 1)) { /* This is ok, we can continue */ new_offset = pred_offset; } else if ((offset == high - 1) && (pred_offset == offset - 1)) { /* This is ok, we can continue */ new_offset = succ_offset; } else { /* Successor or predecessor not in the right order */ return(0); } low = (new_offset / buf_read_ahead_linear_area) * buf_read_ahead_linear_area; high = (new_offset / buf_read_ahead_linear_area + 1) * buf_read_ahead_linear_area; if ((new_offset != low) && (new_offset != high - 1)) { /* This is not a border page of the area: return */ return(0); } if (high > fil_space_get_size(space)) { /* The area is not whole, return */ return(0); } /* If we got this far, read-ahead can be sensible: do it */ ibuf_mode = inside_ibuf ? BUF_READ_IBUF_PAGES_ONLY | OS_AIO_SIMULATED_WAKE_LATER : BUF_READ_ANY_PAGE | OS_AIO_SIMULATED_WAKE_LATER; count = 0; /* Since Windows XP seems to schedule the i/o handler thread very eagerly, and consequently it does not wait for the full read batch to be posted, we use special heuristics here */ os_aio_simulated_put_read_threads_to_sleep(); for (i = low; i < high; i++) { /* It is only sensible to do read-ahead in the non-sync aio mode: hence FALSE as the first parameter */ if (!ibuf_bitmap_page(zip_size, i)) { count += buf_read_page_low( &err, FALSE, ibuf_mode, space, zip_size, FALSE, tablespace_version, i, trx); if (err == DB_TABLESPACE_DELETED) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Warning: in" " linear readahead trying to access\n" "InnoDB: tablespace %lu page %lu,\n" "InnoDB: but the tablespace does not" " exist or is just being dropped.\n", (ulong) space, (ulong) i); } } } /* In simulated aio we wake the aio handler threads only after queuing all aio requests, in native aio the following call does nothing: */ os_aio_simulated_wake_handler_threads(); /* Flush pages from the end of the LRU list if necessary */ buf_flush_free_margin(buf_pool, TRUE); #ifdef UNIV_DEBUG if (buf_debug_prints && (count > 0)) { fprintf(stderr, "LINEAR read-ahead space %lu offset %lu pages %lu\n", (ulong) space, (ulong) offset, (ulong) count); } #endif /* UNIV_DEBUG */ /* Read ahead is considered one I/O operation for the purpose of LRU policy decision. */ buf_LRU_stat_inc_io(); buf_pool->stat.n_ra_pages_read += count; return(count); }
/*****************************************************************//** Constructs the last committed version of a clustered index record, which should be seen by a semi-consistent read. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_semi_consistent_read( /*====================================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ const rec_t** old_vers)/*!< out: rec, old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; mem_heap_t* heap = NULL; byte* buf; ulint err; trx_id_t rec_trx_id = ut_dulint_zero; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); rw_lock_s_lock(&(purge_sys->latch)); /* The S-latch on purge_sys prevents the purge view from changing. Thus, if we have an uncommitted transaction at this point, then purge cannot remove its undo log even if the transaction could commit now. */ version = rec; for (;;) { trx_t* version_trx; mem_heap_t* heap2; rec_t* prev_version; trx_id_t version_trx_id; version_trx_id = row_get_rec_trx_id(version, index, *offsets); if (rec == version) { rec_trx_id = version_trx_id; } mutex_enter(&kernel_mutex); version_trx = trx_get_on_id(version_trx_id); if (version_trx && (version_trx->conc_state == TRX_COMMITTED_IN_MEMORY || version_trx->conc_state == TRX_NOT_STARTED)) { version_trx = NULL; } mutex_exit(&kernel_mutex); if (!version_trx) { /* We found a version that belongs to a committed transaction: return it. */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (rec == version) { *old_vers = rec; err = DB_SUCCESS; break; } /* We assume that a rolled-back transaction stays in TRX_ACTIVE state until all the changes have been rolled back and the transaction is removed from the global list of transactions. */ if (!ut_dulint_cmp(rec_trx_id, version_trx_id)) { /* The transaction was committed while we searched for earlier versions. Return the current version as a semi-consistent read. */ version = rec; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); } buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } heap2 = heap; heap = mem_heap_create(1024); err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (UNIV_UNLIKELY(err != DB_SUCCESS)) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { mem_heap_free(heap); } rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/*****************************************************************//** Constructs the version of a clustered index record which a consistent read should see. We assume that the trx id stored in rec is such that the consistent read should not see rec in its present version. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_consistent_read( /*===============================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ read_view_t* view, /*!< in: the consistent read view */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ rec_t** old_vers)/*!< out, own: old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; rec_t* prev_version; trx_id_t trx_id; mem_heap_t* heap = NULL; byte* buf; ulint err; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); trx_id = row_get_rec_trx_id(rec, index, *offsets); ut_ad(!read_view_sees_trx_id(view, trx_id)); rw_lock_s_lock(&(purge_sys->latch)); version = rec; for (;;) { mem_heap_t* heap2 = heap; trx_undo_rec_t* undo_rec; roll_ptr_t roll_ptr; undo_no_t undo_no; heap = mem_heap_create(1024); /* If we have high-granularity consistent read view and creating transaction of the view is the same as trx_id in the record we see this record only in the case when undo_no of the record is < undo_no in the view. */ if (view->type == VIEW_HIGH_GRANULARITY && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) { roll_ptr = row_get_rec_roll_ptr(version, index, *offsets); undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap); undo_no = trx_undo_rec_get_undo_no(undo_rec); mem_heap_empty(heap); if (ut_dulint_cmp(view->undo_no, undo_no) > 0) { /* The view already sees this version: we can copy it to in_heap and return */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } } err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (err != DB_SUCCESS) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); if (read_view_sees_trx_id(view, trx_id)) { /* The view already sees this version: we can copy it to in_heap and return */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, prev_version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } version = prev_version; }/* for (;;) */ mem_heap_free(heap); rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/************************************************************************* Checks if index currently is mentioned as a referenced index in a foreign key constraint. This function also loads into the dictionary cache the possible referencing table. */ static ibool row_upd_index_is_referenced( /*========================*/ /* out: TRUE if referenced; NOTE that since we do not hold dict_foreign_key_check_lock when leaving the function, it may be that the referencing table has been dropped when we leave this function: this function is only for heuristic use! */ dict_index_t* index) /* in: index */ { dict_table_t* table = index->table; dict_foreign_t* foreign; ulint phase = 1; try_again: if (!UT_LIST_GET_FIRST(table->referenced_list)) { return(FALSE); } if (phase == 2) { mutex_enter(&(dict_sys->mutex)); } rw_lock_s_lock(&dict_foreign_key_check_lock); foreign = UT_LIST_GET_FIRST(table->referenced_list); while (foreign) { if (foreign->referenced_index == index) { if (foreign->foreign_table == NULL) { if (phase == 2) { dict_table_get_low(foreign-> foreign_table_name); } else { phase = 2; rw_lock_s_unlock( &dict_foreign_key_check_lock); goto try_again; } } rw_lock_s_unlock(&dict_foreign_key_check_lock); if (phase == 2) { mutex_exit(&(dict_sys->mutex)); } return(TRUE); } foreign = UT_LIST_GET_NEXT(referenced_list, foreign); } rw_lock_s_unlock(&dict_foreign_key_check_lock); if (phase == 2) { mutex_exit(&(dict_sys->mutex)); } return(FALSE); }