/********************************************************************** Takes a block out of the LRU list and page hash table and sets the block state to BUF_BLOCK_REMOVE_HASH. */ static void buf_LRU_block_remove_hashed_page( /*=============================*/ buf_block_t* block) /* in: block, must contain a file page and be in a state where it can be freed; there may or may not be a hash index to the page */ { ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); ut_ad(block); ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_a(block->io_fix == 0); ut_a(block->buf_fix_count == 0); ut_a(ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) == 0); buf_LRU_remove_block(block); buf_pool->freed_page_clock += 1; /* Note that if AWE is enabled the block may not have a frame at all */ buf_block_modify_clock_inc(block); if (block != buf_page_hash_get(block->space, block->offset)) { fprintf(stderr, "InnoDB: Error: page %lu %lu not found" " in the hash table\n", (ulong) block->space, (ulong) block->offset); if (buf_page_hash_get(block->space, block->offset)) { fprintf(stderr, "InnoDB: In hash table we find block" " %p of %lu %lu which is not %p\n", (void*) buf_page_hash_get (block->space, block->offset), (ulong) buf_page_hash_get (block->space, block->offset)->space, (ulong) buf_page_hash_get (block->space, block->offset)->offset, (void*) block); } #ifdef UNIV_DEBUG buf_print(); buf_LRU_print(); buf_validate(); buf_LRU_validate(); #endif ut_a(0); } HASH_DELETE(buf_block_t, hash, buf_pool->page_hash, buf_page_address_fold(block->space, block->offset), block); UNIV_MEM_INVALID(block->frame, UNIV_PAGE_SIZE); block->state = BUF_BLOCK_REMOVE_HASH; }
ibool buf_flush_ready_for_replace( /*========================*/ /* out: TRUE if can replace immediately */ buf_block_t* block) /* in: buffer control block, must be in state BUF_BLOCK_FILE_PAGE and in the LRU list */ { ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); if (block->state != BUF_BLOCK_FILE_PAGE) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: Error: buffer block state %lu" " in the LRU list!\n", (ulong)block->state); ut_print_buf(stderr, block, sizeof(buf_block_t)); return(FALSE); } if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0) || (block->buf_fix_count != 0) || (block->io_fix != 0)) { return(FALSE); } return(TRUE); }
void buf_LRU_block_free_non_file_page( /*=============================*/ buf_block_t* block) /* in: block, must not contain a file page */ { ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); ut_ad(block); ut_a((block->state == BUF_BLOCK_MEMORY) || (block->state == BUF_BLOCK_READY_FOR_USE)); ut_a(block->n_pointers == 0); ut_a(!block->in_free_list); block->state = BUF_BLOCK_NOT_USED; UNIV_MEM_ALLOC(block->frame, UNIV_PAGE_SIZE); #ifdef UNIV_DEBUG /* Wipe contents of page to reveal possible stale pointers to it */ memset(block->frame, '\0', UNIV_PAGE_SIZE); #endif UT_LIST_ADD_FIRST(free, buf_pool->free, block); block->in_free_list = TRUE; UNIV_MEM_ASSERT_AND_FREE(block->frame, UNIV_PAGE_SIZE); if (srv_use_awe && block->frame) { /* Add to the list of mapped pages */ UT_LIST_ADD_FIRST(awe_LRU_free_mapped, buf_pool->awe_LRU_free_mapped, block); } }
/************************************************************************ Returns TRUE if the block is modified and ready for flushing. */ UNIV_INLINE ibool buf_flush_ready_for_flush( /*======================*/ /* out: TRUE if can flush immediately */ buf_block_t* block, /* in: buffer control block, must be in state BUF_BLOCK_FILE_PAGE */ ulint flush_type)/* in: BUF_FLUSH_LRU or BUF_FLUSH_LIST */ { ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&(block->mutex))); ut_a(block->state == BUF_BLOCK_FILE_PAGE); if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0) && (block->io_fix == 0)) { if (flush_type != BUF_FLUSH_LRU) { return(TRUE); } else if (block->buf_fix_count == 0) { /* If we are flushing the LRU list, to avoid deadlocks we require the block not to be bufferfixed, and hence not latched. */ return(TRUE); } } return(FALSE); }
/*********************************************************************** Initializes the old blocks pointer in the LRU list. This function should be called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */ static void buf_LRU_old_init(void) /*==================*/ { buf_block_t* block; ut_ad(mutex_own(&(buf_pool->mutex))); ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN); /* We first initialize all blocks in the LRU list as old and then use the adjust function to move the LRU_old pointer to the right position */ block = UT_LIST_GET_FIRST(buf_pool->LRU); while (block != NULL) { ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_a(block->in_LRU_list); block->old = TRUE; block = UT_LIST_GET_NEXT(LRU, block); } buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU); buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU); buf_LRU_old_adjust_len(); }
/***********************************************************//** Moves the query threads in the sig reply wait list of trx to the SUSPENDED state. */ static void trx_sig_reply_wait_to_suspended( /*============================*/ trx_t* trx) /*!< in: transaction */ { trx_sig_t* sig; que_thr_t* thr; ut_ad(mutex_own(&kernel_mutex)); sig = UT_LIST_GET_FIRST(trx->reply_signals); while (sig != NULL) { thr = sig->receiver; ut_ad(thr->state == QUE_THR_SIG_REPLY_WAIT); thr->state = QUE_THR_SUSPENDED; sig->receiver = NULL; UT_LIST_REMOVE(reply_signals, trx->reply_signals, sig); sig = UT_LIST_GET_FIRST(trx->reply_signals); } }
/****************************************************************//** Send the reply message when a signal in the queue of the trx has been handled. */ UNIV_INTERN void trx_sig_reply( /*==========*/ trx_sig_t* sig, /*!< in: signal */ que_thr_t** next_thr) /*!< in/out: next query thread to run; if the value which is passed in is a pointer to a NULL pointer, then the calling function can start running a new query thread */ { trx_t* receiver_trx; ut_ad(sig); ut_ad(mutex_own(&kernel_mutex)); if (sig->receiver != NULL) { ut_ad((sig->receiver)->state == QUE_THR_SIG_REPLY_WAIT); receiver_trx = thr_get_trx(sig->receiver); UT_LIST_REMOVE(reply_signals, receiver_trx->reply_signals, sig); ut_ad(receiver_trx->sess->state != SESS_ERROR); que_thr_end_wait(sig->receiver, next_thr); sig->receiver = NULL; } }
/****************************************************************//** Inserts the trx handle in the trx system trx list in the right position. The list is sorted on the trx id so that the biggest id is at the list start. This function is used at the database startup to insert incomplete transactions to the list. */ static void trx_list_insert_ordered( /*====================*/ trx_t* trx) /*!< in: trx handle */ { trx_t* trx2; ut_ad(mutex_own(&kernel_mutex)); trx2 = UT_LIST_GET_FIRST(trx_sys->trx_list); while (trx2 != NULL) { if (ut_dulint_cmp(trx->id, trx2->id) >= 0) { ut_ad(ut_dulint_cmp(trx->id, trx2->id) == 1); break; } trx2 = UT_LIST_GET_NEXT(trx_list, trx2); } if (trx2 != NULL) { trx2 = UT_LIST_GET_PREV(trx_list, trx2); if (trx2 == NULL) { UT_LIST_ADD_FIRST(trx_list, trx_sys->trx_list, trx); } else { UT_LIST_INSERT_AFTER(trx_list, trx_sys->trx_list, trx2, trx); } } else { UT_LIST_ADD_LAST(trx_list, trx_sys->trx_list, trx); } }
/*******************************************************************//** Drops the index tree associated with a row in SYS_INDEXES table. */ UNIV_INTERN void dict_drop_index_tree( /*=================*/ rec_t* rec, /*!< in/out: record in the clustered index of SYS_INDEXES table */ mtr_t* mtr) /*!< in: mtr having the latch on the record page */ { ulint root_page_no; ulint space; ulint zip_size; const byte* ptr; ulint len; ut_ad(mutex_own(&(dict_sys->mutex))); ut_a(!dict_table_is_comp(dict_sys->sys_indexes)); ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len); ut_ad(len == 4); root_page_no = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); if (root_page_no == FIL_NULL) { /* The tree has already been freed */ return; } ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_SPACE_NO_FIELD, &len); ut_ad(len == 4); space = mtr_read_ulint(ptr, MLOG_4BYTES, mtr); zip_size = fil_space_get_zip_size(space); if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) { /* It is a single table tablespace and the .ibd file is missing: do nothing */ return; } /* We free all the pages but the root page first; this operation may span several mini-transactions */ btr_free_but_not_root(space, zip_size, root_page_no); /* Then we free the root page in the same mini-transaction where we write FIL_NULL to the appropriate field in the SYS_INDEXES record: this mini-transaction marks the B-tree totally freed */ /* printf("Dropping index tree in space %lu root page %lu\n", space, root_page_no); */ btr_free_root(space, zip_size, root_page_no, mtr); page_rec_write_index_page_no(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, FIL_NULL, mtr); }
/******************************************************************//** Assigns a rollback segment to a transaction in a round-robin fashion. Skips the SYSTEM rollback segment if another is available. @return assigned rollback segment id */ UNIV_INLINE ulint trx_assign_rseg(void) /*=================*/ { trx_rseg_t* rseg = trx_sys->latest_rseg; ut_ad(mutex_own(&kernel_mutex)); loop: /* Get next rseg in a round-robin fashion */ rseg = UT_LIST_GET_NEXT(rseg_list, rseg); if (rseg == NULL) { rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list); } /* If it is the SYSTEM rollback segment, and there exist others, skip it */ if ((rseg->id == TRX_SYS_SYSTEM_RSEG_ID) && (UT_LIST_GET_LEN(trx_sys->rseg_list) > 1)) { goto loop; } trx_sys->latest_rseg = rseg; return(rseg->id); }
/********************************************************************** Puts a file page whose has no hash index to the free list. */ static void buf_LRU_block_free_hashed_page( /*===========================*/ buf_block_t* block) /* in: block, must contain a file page and be in a state where it can be freed */ { ut_ad(mutex_own(&(buf_pool->mutex))); ut_ad(mutex_own(&block->mutex)); ut_a(block->state == BUF_BLOCK_REMOVE_HASH); block->state = BUF_BLOCK_MEMORY; buf_LRU_block_free_non_file_page(block); }
void que_thr_end_wait_no_next_thr( /*=========================*/ que_thr_t* thr) /* in: query thread in the QUE_THR_LOCK_WAIT, or QUE_THR_PROCEDURE_WAIT, or QUE_THR_SIG_REPLY_WAIT state */ { ibool was_active; ut_a(thr->state == QUE_THR_LOCK_WAIT); /* In MySQL this is the only possible state here */ #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(thr); ut_ad((thr->state == QUE_THR_LOCK_WAIT) || (thr->state == QUE_THR_PROCEDURE_WAIT) || (thr->state == QUE_THR_SIG_REPLY_WAIT)); was_active = thr->is_active; que_thr_move_to_run_state(thr); if (was_active) { return; } /* In MySQL we let the OS thread (not just the query thread) to wait for the lock to be released: */ srv_release_mysql_thread_if_suspended(thr); /* srv_que_task_enqueue_low(thr); */ }
void buf_flush_insert_sorted_into_flush_list( /*====================================*/ buf_block_t* block) /* in: block which is modified */ { buf_block_t* prev_b; buf_block_t* b; ut_ad(mutex_own(&(buf_pool->mutex))); prev_b = NULL; b = UT_LIST_GET_FIRST(buf_pool->flush_list); while (b && (ut_dulint_cmp(b->oldest_modification, block->oldest_modification) > 0)) { prev_b = b; b = UT_LIST_GET_NEXT(flush_list, b); } if (prev_b == NULL) { UT_LIST_ADD_FIRST(flush_list, buf_pool->flush_list, block); } else { UT_LIST_INSERT_AFTER(flush_list, buf_pool->flush_list, prev_b, block); } ut_ad(buf_flush_validate_low()); }
/************************************************************************ Frees the global purge system control structure. */ UNIV_INTERN void trx_purge_sys_close(void) /*======================*/ { ut_ad(!mutex_own(&kernel_mutex)); que_graph_free(purge_sys->query); ut_a(purge_sys->sess->trx->is_purge); purge_sys->sess->trx->conc_state = TRX_NOT_STARTED; sess_close(purge_sys->sess); purge_sys->sess = NULL; if (purge_sys->view != NULL) { /* Because acquiring the kernel mutex is a pre-condition of read_view_close(). We don't really need it here. */ mutex_enter(&kernel_mutex); read_view_close(purge_sys->view); purge_sys->view = NULL; mutex_exit(&kernel_mutex); } trx_undo_arr_free(purge_sys->arr); rw_lock_free(&purge_sys->latch); mutex_free(&purge_sys->mutex); mem_heap_free(purge_sys->heap); mem_free(purge_sys); purge_sys = NULL; }
/********************************************************************** Removes a block from the LRU list. */ UNIV_INLINE void buf_LRU_remove_block( /*=================*/ buf_block_t* block) /* in: control block */ { ut_ad(buf_pool); ut_ad(block); ut_ad(mutex_own(&(buf_pool->mutex))); ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_a(block->in_LRU_list); /* If the LRU_old pointer is defined and points to just this block, move it backward one step */ if (block == buf_pool->LRU_old) { /* Below: the previous block is guaranteed to exist, because the LRU_old pointer is only allowed to differ by the tolerance value from strict 3/8 of the LRU list length. */ buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, block); (buf_pool->LRU_old)->old = TRUE; buf_pool->LRU_old_len++; ut_a(buf_pool->LRU_old); } /* Remove the block from the LRU list */ UT_LIST_REMOVE(LRU, buf_pool->LRU, block); block->in_LRU_list = FALSE; if (srv_use_awe && block->frame) { /* Remove from the list of mapped pages */ UT_LIST_REMOVE(awe_LRU_free_mapped, buf_pool->awe_LRU_free_mapped, block); } /* If the LRU list is so short that LRU_old not defined, return */ if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) { buf_pool->LRU_old = NULL; return; } ut_ad(buf_pool->LRU_old); /* Update the LRU_old_len field if necessary */ if (block->old) { buf_pool->LRU_old_len--; } /* Adjust the length of the old block list if necessary */ buf_LRU_old_adjust_len(); }
void read_view_close( /*============*/ read_view_t* view) /* in: read view */ { ut_ad(mutex_own(&kernel_mutex)); UT_LIST_REMOVE(view_list, trx_sys->view_list, view); }
/********************************************************************** Adds a block to the LRU list end. */ UNIV_INLINE void buf_LRU_add_block_to_end_low( /*=========================*/ buf_block_t* block) /* in: control block */ { buf_block_t* last_block; ut_ad(buf_pool); ut_ad(block); ut_ad(mutex_own(&(buf_pool->mutex))); ut_a(block->state == BUF_BLOCK_FILE_PAGE); block->old = TRUE; last_block = UT_LIST_GET_LAST(buf_pool->LRU); if (last_block) { block->LRU_position = last_block->LRU_position; } else { block->LRU_position = buf_pool_clock_tic(); } ut_a(!block->in_LRU_list); UT_LIST_ADD_LAST(LRU, buf_pool->LRU, block); block->in_LRU_list = TRUE; if (srv_use_awe && block->frame) { /* Add to the list of mapped pages */ UT_LIST_ADD_LAST(awe_LRU_free_mapped, buf_pool->awe_LRU_free_mapped, block); } if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) { buf_pool->LRU_old_len++; } if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) { ut_ad(buf_pool->LRU_old); /* Adjust the length of the old block list if necessary */ buf_LRU_old_adjust_len(); } else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) { /* The LRU list is now long enough for LRU_old to become defined: init it */ buf_LRU_old_init(); } }
/***************************************************************//** Creates an index tree for the index if it is not a member of a cluster. @return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ static ulint dict_create_index_tree_step( /*========================*/ ind_node_t* node) /*!< in: index create node */ { dict_index_t* index; dict_table_t* sys_indexes; dict_table_t* table; dtuple_t* search_tuple; ulint zip_size; btr_pcur_t pcur; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); index = node->index; table = node->table; sys_indexes = dict_sys->sys_indexes; /* Run a mini-transaction in which the index tree is allocated for the index and its root address is written to the index entry in sys_indexes */ mtr_start(&mtr); search_tuple = dict_create_search_tuple(node->ind_row, node->heap); btr_pcur_open(UT_LIST_GET_FIRST(sys_indexes->indexes), search_tuple, PAGE_CUR_L, BTR_MODIFY_LEAF, &pcur, &mtr); btr_pcur_move_to_next_user_rec(&pcur, &mtr); zip_size = dict_table_zip_size(index->table); node->page_no = btr_create(index->type, index->space, zip_size, index->id, index, &mtr); /* printf("Created a new index tree in space %lu root page %lu\n", index->space, index->page_no); */ page_rec_write_index_page_no(btr_pcur_get_rec(&pcur), DICT_SYS_INDEXES_PAGE_NO_FIELD, node->page_no, &mtr); btr_pcur_close(&pcur); mtr_commit(&mtr); if (node->page_no == FIL_NULL) { return(DB_OUT_OF_FILE_SPACE); } return(DB_SUCCESS); }
/***********************************************************************//** Adds a query graph to the session's list of graphs. */ UNIV_INTERN void que_graph_publish( /*==============*/ que_t* graph, /*!< in: graph */ sess_t* sess) /*!< in: session */ { ut_ad(mutex_own(&kernel_mutex)); UT_LIST_ADD_LAST(graphs, sess->graphs, graph); }
void que_graph_publish( /*==============*/ que_t* graph, /* in: graph */ sess_t* sess) /* in: session */ { #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); #endif /* UNIV_SYNC_DEBUG */ UT_LIST_ADD_LAST(graphs, sess->graphs, graph); }
/**********************************************************************//** Allocate a block. The thread calling this function must hold buf_pool->mutex and must not hold buf_pool->zip_mutex or any block->mutex. The buf_pool_mutex may be released and reacquired. @return allocated block, never NULL */ UNIV_INTERN void* buf_buddy_alloc_low( /*================*/ buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */ ulint i, /*!< in: index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ ibool* lru) /*!< in: pointer to a variable that will be assigned TRUE if storage was allocated from the LRU list and buf_pool->mutex was temporarily released */ { buf_block_t* block; ut_ad(lru); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); if (i < BUF_BUDDY_SIZES) { /* Try to allocate from the buddy system. */ block = buf_buddy_alloc_zip(buf_pool, i); if (block) { goto func_exit; } } /* Try allocating from the buf_pool->free list. */ block = buf_LRU_get_free_only(buf_pool); if (block) { goto alloc_big; } /* Try replacing an uncompressed page in the buffer pool. */ buf_pool_mutex_exit(buf_pool); block = buf_LRU_get_free_block(buf_pool); *lru = TRUE; buf_pool_mutex_enter(buf_pool); alloc_big: buf_buddy_block_register(block); block = buf_buddy_alloc_from( buf_pool, block->frame, i, BUF_BUDDY_SIZES); func_exit: buf_pool->buddy_stat[i].used++; return(block); }
/*******************************************************************//** Fetches the data needed to fill the 3 INFORMATION SCHEMA tables into the table cache buffer. Cache must be locked for write. */ static void fetch_data_into_cache( /*==================*/ trx_i_s_cache_t* cache) /*!< in/out: cache */ { trx_t* trx; i_s_trx_row_t* trx_row; i_s_locks_row_t* requested_lock_row; ut_ad(mutex_own(&kernel_mutex)); trx_i_s_cache_clear(cache); /* We iterate over the list of all transactions and add each one to innodb_trx's cache. We also add all locks that are relevant to each transaction into innodb_locks' and innodb_lock_waits' caches. */ for (trx = UT_LIST_GET_FIRST(trx_sys->trx_list); trx != NULL; trx = UT_LIST_GET_NEXT(trx_list, trx)) { if (!add_trx_relevant_locks_to_cache(cache, trx, &requested_lock_row)) { cache->is_truncated = TRUE; return; } trx_row = (i_s_trx_row_t*) table_cache_create_empty_row(&cache->innodb_trx, cache); /* memory could not be allocated */ if (trx_row == NULL) { cache->is_truncated = TRUE; return; } if (!fill_trx_row(trx_row, trx, requested_lock_row, cache)) { /* memory could not be allocated */ cache->innodb_trx.rows_used--; cache->is_truncated = TRUE; return; } } cache->is_truncated = FALSE; }
static void sess_close( /*=======*/ sess_t* sess) /* in, own: session object */ { #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(sess->trx == NULL); mem_free(sess); }
/**********************************************************************//** Enqueues a task to server task queue and releases a worker thread, if there is a suspended one. */ UNIV_INTERN void srv_que_task_enqueue_low( /*=====================*/ que_thr_t* thr) /*!< in: query thread */ { ut_ad(thr); ut_ad(mutex_own(&kernel_mutex)); UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr); srv_release_threads(SRV_WORKER, 1); }
void dict_load_sys_table( /*================*/ dict_table_t* table) /* in: system table */ { mem_heap_t* heap; ut_ad(mutex_own(&(dict_sys->mutex))); heap = mem_heap_create(1000); dict_load_indexes(table, heap); mem_heap_free(heap); }
/***************************************************************//** Builds an index definition row to insert. @return DB_SUCCESS or error code */ static ulint dict_build_index_def_step( /*======================*/ que_thr_t* thr, /*!< in: query thread */ ind_node_t* node) /*!< in: index create node */ { dict_table_t* table; dict_index_t* index; dtuple_t* row; trx_t* trx; ut_ad(mutex_own(&(dict_sys->mutex))); trx = thr_get_trx(thr); index = node->index; table = dict_table_get_low(index->table_name); if (table == NULL) { return(DB_TABLE_NOT_FOUND); } trx->table_id = table->id; node->table = table; ut_ad((UT_LIST_GET_LEN(table->indexes) > 0) || dict_index_is_clust(index)); dict_hdr_get_new_id(NULL, &index->id, NULL); /* Inherit the space id from the table; we store all indexes of a table in the same tablespace */ index->space = table->space; node->page_no = FIL_NULL; row = dict_create_sys_indexes_tuple(index, node->heap); node->ind_row = row; ins_node_set_new_row(node->ind_def, row); /* Note that the index was created by this transaction. */ index->trx_id = trx->id; return(DB_SUCCESS); }
/****************************************************************//** Starts a new transaction. @return TRUE */ UNIV_INTERN ibool trx_start_low( /*==========*/ trx_t* trx, /*!< in: transaction */ ulint rseg_id)/*!< in: rollback segment id; if ULINT_UNDEFINED is passed, the system chooses the rollback segment automatically in a round-robin fashion */ { trx_rseg_t* rseg; ut_ad(mutex_own(&kernel_mutex)); ut_ad(trx->rseg == NULL); if (trx->is_purge) { trx->id = ut_dulint_zero; trx->conc_state = TRX_ACTIVE; trx->start_time = time(NULL); return(TRUE); } ut_ad(trx->conc_state != TRX_ACTIVE); if (rseg_id == ULINT_UNDEFINED) { rseg_id = trx_assign_rseg(); } rseg = trx_sys_get_nth_rseg(trx_sys, rseg_id); trx->id = trx_sys_get_new_trx_id(); /* The initial value for trx->no: ut_dulint_max is used in read_view_open_now: */ trx->no = ut_dulint_max; trx->rseg = rseg; trx->conc_state = TRX_ACTIVE; trx->start_time = time(NULL); UT_LIST_ADD_FIRST(trx_list, trx_sys->trx_list, trx); return(TRUE); }
/******************************************************************* Builds an index definition row to insert. */ static ulint dict_build_index_def_step( /*======================*/ /* out: DB_SUCCESS or error code */ que_thr_t* thr, /* in: query thread */ ind_node_t* node) /* in: index create node */ { dict_table_t* table; dict_index_t* index; dtuple_t* row; trx_t* trx; ut_ad(mutex_own(&(dict_sys->mutex))); trx = thr_get_trx(thr); index = node->index; table = dict_table_get_low(index->table_name); if (table == NULL) { return(DB_TABLE_NOT_FOUND); } trx->table_id = table->id; node->table = table; ut_ad((UT_LIST_GET_LEN(table->indexes) > 0) || (index->type & DICT_CLUSTERED)); index->id = dict_hdr_get_new_id(DICT_HDR_INDEX_ID); /* Inherit the space id from the table; we store all indexes of a table in the same tablespace */ index->space = table->space; node->page_no = FIL_NULL; row = dict_create_sys_indexes_tuple(index, node->heap); node->ind_row = row; ins_node_set_new_row(node->ind_def, row); return(DB_SUCCESS); }
void trx_end_signal_handling( /*====================*/ trx_t* trx) /* in: trx */ { ut_ad(mutex_own(&kernel_mutex)); ut_ad(trx->handling_signals == TRUE); trx->handling_signals = FALSE; trx->graph = trx->graph_before_signal_handling; if (trx->graph && (trx->sess->state == SESS_ERROR)) { que_fork_error_handle(trx, trx->graph); } }
void buf_flush_insert_into_flush_list( /*=============================*/ buf_block_t* block) /* in: block which is modified */ { ut_ad(mutex_own(&(buf_pool->mutex))); ut_a(block->state == BUF_BLOCK_FILE_PAGE); ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL) || (ut_dulint_cmp((UT_LIST_GET_FIRST(buf_pool->flush_list)) ->oldest_modification, block->oldest_modification) <= 0)); UT_LIST_ADD_FIRST(flush_list, buf_pool->flush_list, block); ut_ad(buf_flush_validate_low()); }