/*****************************************************************//** This function should be called before reserving any btr search mutex, if the intended operation might add nodes to the search system hash table. Because of the latching order, once we have reserved the btr search system latch, we cannot allocate a free frame from the buffer pool. Checks that there is a free buffer frame allocated for hash table heap in the btr search system. If not, allocates a free frames for the heap. This check makes it probable that, when have reserved the btr search system latch and we need to allocate a new node to the hash table, it will succeed. However, the check will not guarantee success. */ static void btr_search_check_free_space_in_heap(void) /*=====================================*/ { hash_table_t* table; mem_heap_t* heap; #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ table = btr_search_sys->hash_index; heap = table->heap; /* Note that we peek the value of heap->free_block without reserving the latch: this is ok, because we will not guarantee that there will be enough free space in the hash table. */ if (heap->free_block == NULL) { buf_block_t* block = buf_block_alloc(NULL, 0); rw_lock_x_lock(&btr_search_latch); if (heap->free_block == NULL) { heap->free_block = block; } else { buf_block_free(block); } rw_lock_x_unlock(&btr_search_latch); } }
/*******************************************************************//** Selects a INFORMATION SCHEMA table cache from the whole cache. @return table cache */ static i_s_table_cache_t* cache_select_table( /*===============*/ trx_i_s_cache_t* cache, /*!< in: whole cache */ enum i_s_table table) /*!< in: which table */ { i_s_table_cache_t* table_cache; #ifdef UNIV_SYNC_DEBUG ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED) || rw_lock_own(&cache->rw_lock, RW_LOCK_EX)); #endif switch (table) { case I_S_INNODB_TRX: table_cache = &cache->innodb_trx; break; case I_S_INNODB_LOCKS: table_cache = &cache->innodb_locks; break; case I_S_INNODB_LOCK_WAITS: table_cache = &cache->innodb_lock_waits; break; default: ut_error; } return(table_cache); }
/*****************************************************************//** Finds out if we must preserve a delete marked earlier version of a clustered index record, because it is >= the purge view. @return TRUE if earlier version should be preserved */ UNIV_INTERN ibool row_vers_must_preserve_del_marked( /*==============================*/ trx_id_t trx_id, /*!< in: transaction id in the version */ mtr_t* mtr) /*!< in: mtr holding the latch on the clustered index record; it will also hold the latch on purge_view */ { #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ mtr_s_lock(&(purge_sys->latch), mtr); if (trx_purge_update_undo_must_exist(trx_id)) { /* A purge operation is not yet allowed to remove this delete marked record */ return(TRUE); } return(FALSE); }
/***********************************************************//** Deletes a hash node. */ UNIV_INTERN void ha_delete_hash_node( /*================*/ hash_table_t* table, /*!< in: hash table */ ha_node_t* del_node) /*!< in: node to be deleted */ { ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(btr_search_enabled); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # ifndef UNIV_HOTBACKUP if (table->adaptive) { ut_a(del_node->block->frame = page_align(del_node->data)); ut_a(del_node->block->n_pointers > 0); del_node->block->n_pointers--; } # endif /* !UNIV_HOTBACKUP */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node); }
/*******************************************************************//** Checks if the cache can safely be updated. @return TRUE if can be updated */ static ibool can_cache_be_updated( /*=================*/ trx_i_s_cache_t* cache) /*!< in: cache */ { ullint now; /* Here we read cache->last_read without acquiring its mutex because last_read is only updated when a shared rw lock on the whole cache is being held (see trx_i_s_cache_end_read()) and we are currently holding an exclusive rw lock on the cache. So it is not possible for last_read to be updated while we are reading it. */ #ifdef UNIV_SYNC_DEBUG ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX)); #endif now = ut_time_us(NULL); if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) { return(TRUE); } return(FALSE); }
/*************************************************************//** Empties a hash table and frees the memory heaps. */ UNIV_INTERN void ha_clear( /*=====*/ hash_table_t* table) /*!< in, own: hash table */ { ulint i; ulint n; ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE)); #endif /* UNIV_SYNC_DEBUG */ #ifndef UNIV_HOTBACKUP /* Free the memory heaps. */ n = table->n_mutexes; for (i = 0; i < n; i++) { mem_heap_free(table->heaps[i]); } #endif /* !UNIV_HOTBACKUP */ /* Clear the hash table. */ n = hash_get_n_cells(table); for (i = 0; i < n; i++) { hash_get_nth_cell(table, i)->node = NULL; } }
/*****************************************************************//** Returns the value of ref_count. The value is protected by btr_search_latch. @return ref_count value. */ UNIV_INTERN ulint btr_search_info_get_ref_count( /*==========================*/ btr_search_t* info) /*!< in: search info. */ { ulint ret; ut_ad(info); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ rw_lock_s_lock(&btr_search_latch); ret = info->ref_count; rw_lock_s_unlock(&btr_search_latch); return(ret); }
/*******************************************************************//** Release an exclusive/write lock on the tables cache. */ UNIV_INTERN void trx_i_s_cache_end_write( /*====================*/ trx_i_s_cache_t* cache) /*!< in: cache */ { #ifdef UNIV_SYNC_DEBUG ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX)); #endif rw_lock_x_unlock(&cache->rw_lock); }
/*****************************************************************//** Removes from the chain determined by fold all nodes whose data pointer points to the page given. */ UNIV_INTERN void ha_remove_all_nodes_to_page( /*========================*/ hash_table_t* table, /*!< in: hash table */ ulint fold, /*!< in: fold value */ const page_t* page) /*!< in: buffer page */ { ha_node_t* node; ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ASSERT_HASH_MUTEX_OWN(table, fold); #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(btr_search_enabled); node = ha_chain_get_first(table, fold); while (node) { if (page_align(ha_node_get_data(node)) == page) { /* Remove the hash node */ ha_delete_hash_node(table, node); /* Start again from the first node in the chain because the deletion may compact the heap of nodes and move other nodes! */ node = ha_chain_get_first(table, fold); } else { node = ha_chain_get_next(node); } } #ifdef UNIV_DEBUG /* Check that all nodes really got deleted */ node = ha_chain_get_first(table, fold); while (node) { ut_a(page_align(ha_node_get_data(node)) != page); node = ha_chain_get_next(node); } #endif }
/*****************************************************************//** Checks if trx_id is >= purge_view: then it is guaranteed that its update undo log still exists in the system. @return TRUE if is sure that it is preserved, also if the function returns FALSE, it is possible that the undo log still exists in the system */ UNIV_INTERN ibool trx_purge_update_undo_must_exist( /*=============================*/ trx_id_t trx_id) /*!< in: transaction id */ { #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ if (!read_view_sees_trx_id(purge_sys->view, trx_id)) { return(TRUE); } return(FALSE); }
/*********************************************************//** Looks for an element when we know the pointer to the data, and updates the pointer to data, if found. */ UNIV_INTERN void ha_search_and_update_if_found_func( /*===============================*/ hash_table_t* table, /*!< in/out: hash table */ ulint fold, /*!< in: folded value of the searched data */ const rec_t* data, /*!< in: pointer to the data */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* new_block,/*!< in: block containing new_data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ const rec_t* new_data)/*!< in: new pointer to the data */ { ha_node_t* node; ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); ASSERT_HASH_MUTEX_OWN(table, fold); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG ut_a(new_block->frame == page_align(new_data)); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ if (!btr_search_enabled) { return; } node = ha_search_with_data(table, fold, data); if (node) { #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # ifndef UNIV_HOTBACKUP if (table->adaptive) { ut_a(node->block->n_pointers > 0); node->block->n_pointers--; new_block->n_pointers++; } # endif /* !UNIV_HOTBACKUP */ node->block = new_block; #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ node->data = new_data; } }
ibool trx_purge_update_undo_must_exist( /*=============================*/ /* out: TRUE if is sure that it is preserved, also if the function returns FALSE, it is possible that the undo log still exists in the system */ dulint trx_id) /* in: transaction id */ { #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ if (!read_view_sees_trx_id(purge_sys->view, trx_id)) { return(TRUE); } return(FALSE); }
/*******************************************************************//** Release a shared/read lock on the tables cache. */ UNIV_INTERN void trx_i_s_cache_end_read( /*===================*/ trx_i_s_cache_t* cache) /*!< in: cache */ { ullint now; #ifdef UNIV_SYNC_DEBUG ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_SHARED)); #endif /* update cache last read time */ now = ut_time_us(NULL); mutex_enter(&cache->last_read_mutex); cache->last_read = now; mutex_exit(&cache->last_read_mutex); rw_lock_s_unlock(&cache->rw_lock); }
/*****************************************************************//** Constructs the version of a clustered index record which a consistent read should see. We assume that the trx id stored in rec is such that the consistent read should not see rec in its present version. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_consistent_read( /*===============================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ read_view_t* view, /*!< in: the consistent read view */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ rec_t** old_vers)/*!< out, own: old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; rec_t* prev_version; trx_id_t trx_id; mem_heap_t* heap = NULL; byte* buf; ulint err; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); trx_id = row_get_rec_trx_id(rec, index, *offsets); ut_ad(!read_view_sees_trx_id(view, trx_id)); rw_lock_s_lock(&(purge_sys->latch)); version = rec; for (;;) { mem_heap_t* heap2 = heap; trx_undo_rec_t* undo_rec; roll_ptr_t roll_ptr; undo_no_t undo_no; heap = mem_heap_create(1024); /* If we have high-granularity consistent read view and creating transaction of the view is the same as trx_id in the record we see this record only in the case when undo_no of the record is < undo_no in the view. */ if (view->type == VIEW_HIGH_GRANULARITY && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) { roll_ptr = row_get_rec_roll_ptr(version, index, *offsets); undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap); undo_no = trx_undo_rec_get_undo_no(undo_rec); mem_heap_empty(heap); if (ut_dulint_cmp(view->undo_no, undo_no) > 0) { /* The view already sees this version: we can copy it to in_heap and return */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } } err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (err != DB_SUCCESS) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); if (read_view_sees_trx_id(view, trx_id)) { /* The view already sees this version: we can copy it to in_heap and return */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, prev_version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } version = prev_version; }/* for (;;) */ mem_heap_free(heap); rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/******************************************************************//** NOTE! Use the corresponding macro, not directly this function! Lock an rw-lock in exclusive mode for the current thread. If the rw-lock is locked in shared or exclusive mode, or there is an exclusive lock request waiting, the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for the lock before suspending the thread. If the same thread has an x-lock on the rw-lock, locking succeed, with the following exception: if pass != 0, only a single x-lock may be taken on the lock. NOTE: If the same thread has an s-lock, locking does not succeed! */ UNIV_INTERN void rw_lock_x_lock_func( /*================*/ rw_lock_t* lock, /*!< in: pointer to rw-lock */ ulint pass, /*!< in: pass value; != 0, if the lock will be passed to another thread to unlock */ const char* file_name,/*!< in: file name where lock requested */ ulint line) /*!< in: line where requested */ { ulint index; /*!< index of the reserved wait cell */ ulint i; /*!< spin round count */ ibool spinning = FALSE; ut_ad(rw_lock_validate(lock)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ i = 0; lock_loop: if (rw_lock_x_lock_low(lock, pass, file_name, line)) { rw_x_spin_round_count += i; return; /* Locking succeeded */ } else { if (!spinning) { spinning = TRUE; rw_x_spin_wait_count++; } /* Spin waiting for the lock_word to become free */ while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } i++; } if (i == SYNC_SPIN_ROUNDS) { os_thread_yield(); } else { goto lock_loop; } } rw_x_spin_round_count += i; if (srv_print_latch_waits) { fprintf(stderr, "Thread %lu spin wait rw-x-lock at %p" " cfile %s cline %lu rnds %lu\n", os_thread_pf(os_thread_get_curr_id()), (void*) lock, innobase_basename(lock->cfile_name), (ulong) lock->cline, (ulong) i); } sync_array_reserve_cell(sync_primary_wait_array, lock, RW_LOCK_EX, file_name, line, &index); /* Waiters must be set before checking lock_word, to ensure signal is sent. This could lead to a few unnecessary wake-up signals. */ rw_lock_set_waiter_flag(lock); if (rw_lock_x_lock_low(lock, pass, file_name, line)) { sync_array_free_cell(sync_primary_wait_array, index); return; /* Locking succeeded */ } if (srv_print_latch_waits) { fprintf(stderr, "Thread %lu OS wait for rw-x-lock at %p" " cfile %s cline %lu\n", os_thread_pf(os_thread_get_curr_id()), (void*) lock, innobase_basename(lock->cfile_name), (ulong) lock->cline); } /* these stats may not be accurate */ lock->count_os_wait++; rw_x_os_wait_count++; sync_array_wait_event(sync_primary_wait_array, index); i = 0; goto lock_loop; }
/*************************************************************//** Inserts an entry into a hash table. If an entry with the same fold number is found, its node is updated to point to the new data, and no new node is inserted. If btr_search_enabled is set to FALSE, we will only allow updating existing nodes, but no new node is allowed to be added. @return TRUE if succeed, FALSE if no more memory could be allocated */ UNIV_INTERN ibool ha_insert_for_fold_func( /*====================*/ hash_table_t* table, /*!< in: hash table */ ulint fold, /*!< in: folded value of data; if a node with the same fold value already exists, it is updated to point to the same data, and no new node is created! */ #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG buf_block_t* block, /*!< in: buffer block containing the data */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ const rec_t* data) /*!< in: data, must not be NULL */ { hash_cell_t* cell; ha_node_t* node; ha_node_t* prev_node; ulint hash; ut_ad(data); ut_ad(table); ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG ut_a(block->frame == page_align(data)); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ #ifdef UNIV_SYNC_DEBUG ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ASSERT_HASH_MUTEX_OWN(table, fold); ut_ad(btr_search_enabled); hash = hash_calc_hash(fold, table); cell = hash_get_nth_cell(table, hash); prev_node = cell->node; while (prev_node != NULL) { if (prev_node->fold == fold) { #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # ifndef UNIV_HOTBACKUP if (table->adaptive) { buf_block_t* prev_block = prev_node->block; ut_a(prev_block->frame == page_align(prev_node->data)); ut_a(prev_block->n_pointers > 0); prev_block->n_pointers--; block->n_pointers++; } # endif /* !UNIV_HOTBACKUP */ prev_node->block = block; #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ prev_node->data = data; return(TRUE); } prev_node = prev_node->next; } /* We have to allocate a new chain node */ node = mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t)); if (node == NULL) { /* It was a btr search type memory heap and at the moment no more memory could be allocated: return */ ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH); return(FALSE); } ha_node_set_data(node, block, data); #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG # ifndef UNIV_HOTBACKUP if (table->adaptive) { block->n_pointers++; } # endif /* !UNIV_HOTBACKUP */ #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ node->fold = fold; node->next = NULL; prev_node = cell->node; if (prev_node == NULL) { cell->node = node; return(TRUE); } while (prev_node->next != NULL) { prev_node = prev_node->next; } prev_node->next = node; return(TRUE); }
/*********************************************************************//** Updates the block search info on hash successes. NOTE that info and block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any semaphore, to save CPU time! Do not assume the fields are consistent. @return TRUE if building a (new) hash index on the block is recommended */ static ibool btr_search_update_block_hash_info( /*==============================*/ btr_search_t* info, /*!< in: search info */ buf_block_t* block, /*!< in: buffer block */ btr_cur_t* cursor __attribute__((unused))) /*!< in: cursor */ { #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); ut_ad(rw_lock_own(&block->lock, RW_LOCK_SHARED) || rw_lock_own(&block->lock, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(cursor); info->last_hash_succ = FALSE; ut_a(buf_block_state_valid(block)); ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N); if ((block->n_hash_helps > 0) && (info->n_hash_potential > 0) && (block->n_fields == info->n_fields) && (block->n_bytes == info->n_bytes)
/*********************************************************************//** Updates the search info of an index about hash successes. NOTE that info is NOT protected by any semaphore, to save CPU time! Do not assume its fields are consistent. */ static void btr_search_info_update_hash( /*========================*/ btr_search_t* info, /*!< in/out: search info */ btr_cur_t* cursor) /*!< in: cursor which was just positioned */ { dict_index_t* index; ulint n_unique; int cmp; #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ index = cursor->index; if (dict_index_is_ibuf(index)) { /* So many deletes are performed on an insert buffer tree that we do not consider a hash index useful on it: */ return; } n_unique = dict_index_get_n_unique_in_tree(index); if (info->n_hash_potential == 0) { goto set_new_recomm; } /* Test if the search would have succeeded using the recommended hash prefix */ if (info->n_fields >= n_unique && cursor->up_match >= n_unique) { increment_potential: info->n_hash_potential++; return; } cmp = ut_pair_cmp(info->n_fields, info->n_bytes, cursor->low_match, cursor->low_bytes); if (info->left_side ? cmp <= 0 : cmp > 0) { goto set_new_recomm; } cmp = ut_pair_cmp(info->n_fields, info->n_bytes, cursor->up_match, cursor->up_bytes); if (info->left_side ? cmp <= 0 : cmp > 0) { goto increment_potential; } set_new_recomm: /* We have to set a new recommendation; skip the hash analysis for a while to avoid unnecessary CPU time usage when there is no chance for success */ info->hash_analysis = 0; cmp = ut_pair_cmp(cursor->up_match, cursor->up_bytes, cursor->low_match, cursor->low_bytes); if (cmp == 0) { info->n_hash_potential = 0; /* For extra safety, we set some sensible values here */ info->n_fields = 1; info->n_bytes = 0; info->left_side = TRUE; } else if (cmp > 0) { info->n_hash_potential = 1; if (cursor->up_match >= n_unique) { info->n_fields = n_unique; info->n_bytes = 0; } else if (cursor->low_match < cursor->up_match) { info->n_fields = cursor->low_match + 1; info->n_bytes = 0; } else { info->n_fields = cursor->low_match; info->n_bytes = cursor->low_bytes + 1; } info->left_side = TRUE; } else { info->n_hash_potential = 1; if (cursor->low_match >= n_unique) { info->n_fields = n_unique; info->n_bytes = 0; } else if (cursor->low_match > cursor->up_match) { info->n_fields = cursor->up_match + 1; info->n_bytes = 0; } else { info->n_fields = cursor->up_match; info->n_bytes = cursor->up_bytes + 1; } info->left_side = FALSE; } }
/*****************************************************************//** Constructs the last committed version of a clustered index record, which should be seen by a semi-consistent read. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_semi_consistent_read( /*====================================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ const rec_t** old_vers)/*!< out: rec, old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; mem_heap_t* heap = NULL; byte* buf; ulint err; trx_id_t rec_trx_id = ut_dulint_zero; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); rw_lock_s_lock(&(purge_sys->latch)); /* The S-latch on purge_sys prevents the purge view from changing. Thus, if we have an uncommitted transaction at this point, then purge cannot remove its undo log even if the transaction could commit now. */ version = rec; for (;;) { trx_t* version_trx; mem_heap_t* heap2; rec_t* prev_version; trx_id_t version_trx_id; version_trx_id = row_get_rec_trx_id(version, index, *offsets); if (rec == version) { rec_trx_id = version_trx_id; } mutex_enter(&kernel_mutex); version_trx = trx_get_on_id(version_trx_id); if (version_trx && (version_trx->conc_state == TRX_COMMITTED_IN_MEMORY || version_trx->conc_state == TRX_NOT_STARTED)) { version_trx = NULL; } mutex_exit(&kernel_mutex); if (!version_trx) { /* We found a version that belongs to a committed transaction: return it. */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (rec == version) { *old_vers = rec; err = DB_SUCCESS; break; } /* We assume that a rolled-back transaction stays in TRX_ACTIVE state until all the changes have been rolled back and the transaction is removed from the global list of transactions. */ if (!ut_dulint_cmp(rec_trx_id, version_trx_id)) { /* The transaction was committed while we searched for earlier versions. Return the current version as a semi-consistent read. */ version = rec; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); } buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } heap2 = heap; heap = mem_heap_create(1024); err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (UNIV_UNLIKELY(err != DB_SUCCESS)) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { mem_heap_free(heap); } rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/*****************************************************************//** Finds out if a version of the record, where the version >= the current purge view, should have ientry as its secondary index entry. We check if there is any not delete marked version of the record where the trx id >= purge view, and the secondary index entry and ientry are identified in the alphabetical ordering; exactly in this case we return TRUE. @return TRUE if earlier version should have */ UNIV_INTERN ibool row_vers_old_has_index_entry( /*=========================*/ ibool also_curr,/*!< in: TRUE if also rec is included in the versions to search; otherwise only versions prior to it are searched */ const rec_t* rec, /*!< in: record in the clustered index; the caller must have a latch on the page */ mtr_t* mtr, /*!< in: mtr holding the latch on rec; it will also hold the latch on purge_view */ dict_index_t* index, /*!< in: the secondary index */ const dtuple_t* ientry) /*!< in: the secondary index entry */ { const rec_t* version; rec_t* prev_version; dict_index_t* clust_index; ulint* clust_offsets; mem_heap_t* heap; mem_heap_t* heap2; const dtuple_t* row; const dtuple_t* entry; ulint err; ulint comp; ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ mtr_s_lock(&(purge_sys->latch), mtr); clust_index = dict_table_get_first_index(index->table); comp = page_rec_is_comp(rec); ut_ad(!dict_table_is_comp(index->table) == !comp); heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(rec, clust_index, NULL, ULINT_UNDEFINED, &heap); if (also_curr && !rec_get_deleted_flag(rec, comp)) { row_ext_t* ext; /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, rec, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* If entry == NULL, the record contains unset BLOB pointers. This must be a freshly inserted record. If this is called from row_purge_remove_sec_if_poss_low(), the thread will hold latches on the clustered index and the secondary index. Because the insert works in three steps: (1) insert the record to clustered index (2) store the BLOBs and update BLOB pointers (3) insert records to secondary indexes the purge thread can safely ignore freshly inserted records and delete the secondary index record. The thread that inserted the new record will be inserting the secondary index records. */ /* NOTE that we cannot do the comparison as binary fields because the row is maybe being modified so that the clustered index record has already been updated to a different binary value in a char field, but the collation identifies the old and new value anyway! */ if (entry && !dtuple_coll_cmp(ientry, entry)) { mem_heap_free(heap); return(TRUE); } } version = rec; for (;;) { heap2 = heap; heap = mem_heap_create(1024); err = trx_undo_prev_version_build(rec, mtr, version, clust_index, clust_offsets, heap, &prev_version); mem_heap_free(heap2); /* free version and clust_offsets */ if (err != DB_SUCCESS || !prev_version) { /* Versions end here */ mem_heap_free(heap); return(FALSE); } clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, ULINT_UNDEFINED, &heap); if (!rec_get_deleted_flag(prev_version, comp)) { row_ext_t* ext; /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, prev_version, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* If entry == NULL, the record contains unset BLOB pointers. This must be a freshly inserted record that we can safely ignore. For the justification, see the comments after the previous row_build_index_entry() call. */ /* NOTE that we cannot do the comparison as binary fields because maybe the secondary index record has already been updated to a different binary value in a char field, but the collation identifies the old and new value anyway! */ if (entry && !dtuple_coll_cmp(ientry, entry)) { mem_heap_free(heap); return(TRUE); } } version = prev_version; } }
/*****************************************************************//** Finds out if an active transaction has inserted or modified a secondary index record. NOTE: the kernel mutex is temporarily released in this function! @return NULL if committed, else the active transaction */ UNIV_INTERN trx_t* row_vers_impl_x_locked_off_kernel( /*==============================*/ const rec_t* rec, /*!< in: record in a secondary index */ dict_index_t* index, /*!< in: the secondary index */ const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ { dict_index_t* clust_index; rec_t* clust_rec; ulint* clust_offsets; rec_t* version; trx_id_t trx_id; mem_heap_t* heap; mem_heap_t* heap2; dtuple_t* row; dtuple_t* entry = NULL; /* assignment to eliminate compiler warning */ trx_t* trx; ulint rec_del; #ifdef UNIV_DEBUG ulint err; #endif /* UNIV_DEBUG */ mtr_t mtr; ulint comp; ut_ad(mutex_own(&kernel_mutex)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ mutex_exit(&kernel_mutex); mtr_start(&mtr); /* Search for the clustered index record: this is a time-consuming operation: therefore we release the kernel mutex; also, the release is required by the latching order convention. The latch on the clustered index locks the top of the stack of versions. We also reserve purge_latch to lock the bottom of the version stack. */ clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr); if (!clust_rec) { /* In a rare case it is possible that no clust rec is found for a secondary index record: if in row0umod.c row_undo_mod_remove_clust_low() we have already removed the clust rec, while purge is still cleaning and removing secondary index records associated with earlier versions of the clustered index record. In that case there cannot be any implicit lock on the secondary index record, because an active transaction which has modified the secondary index record has also modified the clustered index record. And in a rollback we always undo the modifications to secondary index records before the clustered index record. */ mutex_enter(&kernel_mutex); mtr_commit(&mtr); return(NULL); } heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL, ULINT_UNDEFINED, &heap); trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets); mtr_s_lock(&(purge_sys->latch), &mtr); mutex_enter(&kernel_mutex); trx = NULL; if (!trx_is_active(trx_id)) { /* The transaction that modified or inserted clust_rec is no longer active: no implicit lock on rec */ goto exit_func; } if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index, clust_offsets, TRUE)) { /* Corruption noticed: try to avoid a crash by returning */ goto exit_func; } comp = page_rec_is_comp(rec); ut_ad(index->table == clust_index->table); ut_ad(!!comp == dict_table_is_comp(index->table)); ut_ad(!comp == !page_rec_is_comp(clust_rec)); /* We look up if some earlier version, which was modified by the trx_id transaction, of the clustered index record would require rec to be in a different state (delete marked or unmarked, or have different field values, or not existing). If there is such a version, then rec was modified by the trx_id transaction, and it has an implicit x-lock on rec. Note that if clust_rec itself would require rec to be in a different state, then the trx_id transaction has not yet had time to modify rec, and does not necessarily have an implicit x-lock on rec. */ rec_del = rec_get_deleted_flag(rec, comp); trx = NULL; version = clust_rec; for (;;) { rec_t* prev_version; ulint vers_del; row_ext_t* ext; trx_id_t prev_trx_id; mutex_exit(&kernel_mutex); /* While we retrieve an earlier version of clust_rec, we release the kernel mutex, because it may take time to access the disk. After the release, we have to check if the trx_id transaction is still active. We keep the semaphore in mtr on the clust_rec page, so that no other transaction can update it and get an implicit x-lock on rec. */ heap2 = heap; heap = mem_heap_create(1024); #ifdef UNIV_DEBUG err = #endif /* UNIV_DEBUG */ trx_undo_prev_version_build(clust_rec, &mtr, version, clust_index, clust_offsets, heap, &prev_version); mem_heap_free(heap2); /* free version and clust_offsets */ if (prev_version == NULL) { mutex_enter(&kernel_mutex); if (!trx_is_active(trx_id)) { /* Transaction no longer active: no implicit x-lock */ break; } /* If the transaction is still active, clust_rec must be a fresh insert, because no previous version was found. */ ut_ad(err == DB_SUCCESS); /* It was a freshly inserted version: there is an implicit x-lock on rec */ trx = trx_get_on_id(trx_id); break; } clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, ULINT_UNDEFINED, &heap); vers_del = rec_get_deleted_flag(prev_version, comp); prev_trx_id = row_get_rec_trx_id(prev_version, clust_index, clust_offsets); /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, prev_version, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* entry may be NULL if a record was inserted in place of a deleted record, and the BLOB pointers of the new record were not initialized yet. But in that case, prev_version should be NULL. */ ut_a(entry); mutex_enter(&kernel_mutex); if (!trx_is_active(trx_id)) { /* Transaction no longer active: no implicit x-lock */ break; } /* If we get here, we know that the trx_id transaction is still active and it has modified prev_version. Let us check if prev_version would require rec to be in a different state. */ /* The previous version of clust_rec must be accessible, because the transaction is still active and clust_rec was not a fresh insert. */ ut_ad(err == DB_SUCCESS); /* We check if entry and rec are identified in the alphabetical ordering */ if (0 == cmp_dtuple_rec(entry, rec, offsets)) { /* The delete marks of rec and prev_version should be equal for rec to be in the state required by prev_version */ if (rec_del != vers_del) { trx = trx_get_on_id(trx_id); break; } /* It is possible that the row was updated so that the secondary index record remained the same in alphabetical ordering, but the field values changed still. For example, 'abc' -> 'ABC'. Check also that. */ dtuple_set_types_binary(entry, dtuple_get_n_fields(entry)); if (0 != cmp_dtuple_rec(entry, rec, offsets)) { trx = trx_get_on_id(trx_id); break; } } else if (!rec_del) { /* The delete mark should be set in rec for it to be in the state required by prev_version */ trx = trx_get_on_id(trx_id); break; } if (0 != ut_dulint_cmp(trx_id, prev_trx_id)) { /* The versions modified by the trx_id transaction end to prev_version: no implicit x-lock */ break; } version = prev_version; }/* for (;;) */ exit_func: mtr_commit(&mtr); mem_heap_free(heap); return(trx); }
Updates the block search info on hash successes. NOTE that info and block->n_hash_helps, n_fields, n_bytes, side are NOT protected by any semaphore, to save CPU time! Do not assume the fields are consistent. */ static ibool btr_search_update_block_hash_info( /*==============================*/ /* out: TRUE if building a (new) hash index on the block is recommended */ btr_search_t* info, /* in: search info */ buf_block_t* block, /* in: buffer block */ btr_cur_t* cursor __attribute__((unused))) /* in: cursor */ { #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED)); ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX)); ut_ad(rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_SHARED) || rw_lock_own(&((buf_block_t*) block)->lock, RW_LOCK_EX)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(cursor); info->last_hash_succ = FALSE; ut_a(block->magic_n == BUF_BLOCK_MAGIC_N); ut_ad(info->magic_n == BTR_SEARCH_MAGIC_N); if ((block->n_hash_helps > 0) && (info->n_hash_potential > 0) && (block->n_fields == info->n_fields) && (block->n_bytes == info->n_bytes)