/**********************************************************************//** Allocate a block from a bigger object. @return allocated block */ static void* buf_buddy_alloc_from( /*=================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ void* buf, /*!< in: a block that is free to use */ ulint i, /*!< in: index of buf_pool->zip_free[] */ ulint j) /*!< in: size of buf as an index of buf_pool->zip_free[] */ { ulint offs = BUF_BUDDY_LOW << j; ut_ad(j <= BUF_BUDDY_SIZES); ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); ut_ad(j >= i); ut_ad(!ut_align_offset(buf, offs)); /* Add the unused parts of the block to the free lists. */ while (j > i) { buf_page_t* bpage; offs >>= 1; j--; bpage = (buf_page_t*) ((byte*) buf + offs); ut_d(memset(bpage, j, BUF_BUDDY_LOW << j)); bpage->state = BUF_BLOCK_ZIP_FREE; ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); buf_buddy_add_to_free(buf_pool, bpage, j); } return(buf); }
/***************************************************************//** Commits a mini-transaction. */ UNIV_INTERN void mtr_commit( /*=======*/ mtr_t* mtr) /*!< in: mini-transaction */ { ut_ad(mtr); ut_ad(mtr->magic_n == MTR_MAGIC_N); ut_ad(mtr->state == MTR_ACTIVE); ut_ad(!mtr->inside_ibuf); ut_d(mtr->state = MTR_COMMITTING); #ifndef UNIV_HOTBACKUP /* This is a dirty read, for debugging. */ ut_ad(!recv_no_log_write); if (mtr->modifications && mtr->n_log_recs) { mtr_log_reserve_and_write(mtr); } mtr_memo_pop_all(mtr); #endif /* !UNIV_HOTBACKUP */ ut_d(mtr->state = MTR_COMMITTED); dyn_array_free(&(mtr->memo)); dyn_array_free(&(mtr->log)); }
/******************************************************************//** Calling this function is obligatory only if the memory buffer containing the rw-lock is freed. Removes an rw-lock object from the global list. The rw-lock is checked to be in the non-locked state. */ UNIV_INTERN void rw_lock_free_func( /*==============*/ rw_lock_t* lock) /*!< in: rw-lock */ { ut_ad(rw_lock_validate(lock)); ut_a(lock->lock_word == X_LOCK_DECR); #ifndef INNODB_RW_LOCKS_USE_ATOMICS mutex_free(rw_lock_get_mutex(lock)); #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ mutex_enter(&rw_lock_list_mutex); os_event_free(lock->event); os_event_free(lock->wait_ex_event); ut_ad(UT_LIST_GET_PREV(list, lock) == NULL || UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N); ut_ad(UT_LIST_GET_NEXT(list, lock) == NULL || UT_LIST_GET_NEXT(list, lock)->magic_n == RW_LOCK_MAGIC_N); UT_LIST_REMOVE(list, rw_lock_list, lock); mutex_exit(&rw_lock_list_mutex); ut_d(lock->magic_n = 0); }
/*************************************************************//** Creates a hash table with >= n array cells. The actual number of cells is chosen to be a prime number slightly bigger than n. @return own: created table */ UNIV_INTERN hash_table_t* hash_create( /*========*/ ulint n) /*!< in: number of array cells */ { hash_cell_t* array; ulint prime; hash_table_t* table; prime = ut_find_prime(n); table = mem_alloc(sizeof(hash_table_t)); array = ut_malloc(sizeof(hash_cell_t) * prime); table->array = array; table->n_cells = prime; #ifndef UNIV_HOTBACKUP # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG table->adaptive = FALSE; # endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ table->n_mutexes = 0; table->mutexes = NULL; table->heaps = NULL; #endif /* !UNIV_HOTBACKUP */ table->heap = NULL; ut_d(table->magic_n = HASH_TABLE_MAGIC_N); /* Initialize the cell array */ hash_table_clear(table); return(table); }
/**********************************************************************//** Creates a table memory object. @return own: table object */ UNIV_INTERN dict_table_t* dict_mem_table_create( /*==================*/ const char* name, /*!< in: table name */ ulint space, /*!< in: space where the clustered index of the table is placed; this parameter is ignored if the table is made a member of a cluster */ ulint n_cols, /*!< in: number of columns */ ulint flags) /*!< in: table flags */ { dict_table_t* table; mem_heap_t* heap; ut_ad(name); ut_a(!(flags & (~0 << DICT_TF2_BITS))); heap = mem_heap_create(DICT_HEAP_SIZE); table = mem_heap_zalloc(heap, sizeof(dict_table_t)); table->heap = heap; table->flags = (unsigned int) flags; table->name = mem_heap_strdup(heap, name); table->space = (unsigned int) space; table->n_cols = (unsigned int) (n_cols + DATA_N_SYS_COLS); table->cols = mem_heap_alloc(heap, (n_cols + DATA_N_SYS_COLS) * sizeof(dict_col_t)); ut_d(table->magic_n = DICT_TABLE_MAGIC_N); return(table); }
/***************************************************************//** Commits a mini-transaction. */ UNIV_INTERN void mtr_commit( /*=======*/ mtr_t* mtr) /*!< in: mini-transaction */ { #ifndef UNIV_HOTBACKUP ibool write_log; #endif /* !UNIV_HOTBACKUP */ ut_ad(mtr); ut_ad(mtr->magic_n == MTR_MAGIC_N); ut_ad(mtr->state == MTR_ACTIVE); ut_d(mtr->state = MTR_COMMITTING); #ifndef UNIV_HOTBACKUP /* This is a dirty read, for debugging. */ ut_ad(!recv_no_log_write); write_log = mtr->modifications && mtr->n_log_recs; if (write_log) { mtr_log_reserve_and_write(mtr); mtr_memo_note_modification_all(mtr); } /* We first update the modification info to buffer pages, and only after that release the log mutex: this guarantees that when the log mutex is free, all buffer pages contain an up-to-date info of their modifications. This fact is used in making a checkpoint when we look at the oldest modification of any page in the buffer pool. It is also required when we insert modified buffer pages in to the flush list which must be sorted on oldest_modification. */ if (write_log) { log_release(); } /* All unlocking has been moved here, after log_sys mutex release. */ mtr_memo_pop_all(mtr); #endif /* !UNIV_HOTBACKUP */ ut_d(mtr->state = MTR_COMMITTED); dyn_array_free(&(mtr->memo)); dyn_array_free(&(mtr->log)); }
/**********************************************************************//** Try to allocate a block from buf_pool->zip_free[]. @return allocated block, or NULL if buf_pool->zip_free[] was empty */ static void* buf_buddy_alloc_zip( /*================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ ulint i) /*!< in: index of buf_pool->zip_free[] */ { buf_page_t* bpage; ut_ad(buf_pool_mutex_own(buf_pool)); ut_a(i < BUF_BUDDY_SIZES); ut_a(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); if (bpage) { ut_a(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); buf_buddy_remove_from_free(buf_pool, bpage, i); } else if (i + 1 < BUF_BUDDY_SIZES) { /* Attempt to split. */ bpage = buf_buddy_alloc_zip(buf_pool, i + 1); if (bpage) { buf_page_t* buddy = (buf_page_t*) (((char*) bpage) + (BUF_BUDDY_LOW << i)); ut_ad(!buf_pool_contains_zip(buf_pool, buddy)); ut_d(memset(buddy, i, BUF_BUDDY_LOW << i)); buddy->state = BUF_BLOCK_ZIP_FREE; buf_buddy_add_to_free(buf_pool, buddy, i); } } if (bpage) { ut_d(memset(bpage, ~i, BUF_BUDDY_LOW << i)); UNIV_MEM_ALLOC(bpage, BUF_BUDDY_SIZES << i); } return(bpage); }
/****************************************************************//** Free a table memory object. */ UNIV_INTERN void dict_mem_table_free( /*================*/ dict_table_t* table) /*!< in: table */ { ut_ad(table); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_d(table->cached = FALSE); mem_heap_free(table->heap); }
/**********************************************************************//** Creates a table memory object. @return own: table object */ UNIV_INTERN dict_table_t* dict_mem_table_create( /*==================*/ const char* name, /*!< in: table name */ ulint space, /*!< in: space where the clustered index of the table is placed; this parameter is ignored if the table is made a member of a cluster */ ulint n_cols, /*!< in: number of columns */ ulint flags) /*!< in: table flags */ { dict_table_t* table; mem_heap_t* heap; ut_ad(name); ut_a(!(flags & (~0 << DICT_TF2_BITS))); heap = mem_heap_create(DICT_HEAP_SIZE); table = mem_heap_zalloc(heap, sizeof(dict_table_t)); table->heap = heap; table->flags = (unsigned int) flags; table->name = ut_malloc(strlen(name) + 1); memcpy(table->name, name, strlen(name) + 1); table->space = (unsigned int) space; table->n_cols = (unsigned int) (n_cols + DATA_N_SYS_COLS); table->cols = mem_heap_alloc(heap, (n_cols + DATA_N_SYS_COLS) * sizeof(dict_col_t)); #ifndef UNIV_HOTBACKUP table->autoinc_lock = mem_heap_alloc(heap, lock_get_size()); mutex_create(autoinc_mutex_key, &table->autoinc_mutex, SYNC_DICT_AUTOINC_MUTEX); table->autoinc = 0; /* The number of transactions that are either waiting on the AUTOINC lock or have been granted the lock. */ table->n_waiting_or_granted_auto_inc_locks = 0; table->is_corrupt = FALSE; #endif /* !UNIV_HOTBACKUP */ ut_d(table->magic_n = DICT_TABLE_MAGIC_N); return(table); }
/***************************************************************//** Commits a mini-transaction. */ UNIV_INTERN void mtr_commit( /*=======*/ mtr_t* mtr) /*!< in: mini-transaction */ { ut_ad(mtr); ut_ad(mtr->magic_n == MTR_MAGIC_N); ut_ad(mtr->state == MTR_ACTIVE); ut_ad(!mtr->inside_ibuf); ut_d(mtr->state = MTR_COMMITTING); #ifndef UNIV_HOTBACKUP /* This is a dirty read, for debugging. */ ut_ad(!recv_no_log_write); if (mtr->modifications && mtr->n_log_recs) { mtr_log_reserve_and_write(mtr); } mtr_memo_pop_all(mtr); #endif /* !UNIV_HOTBACKUP */ dyn_array_free(&(mtr->memo)); dyn_array_free(&(mtr->log)); #ifdef UNIV_DEBUG_VALGRIND /* Declare everything uninitialized except mtr->start_lsn, mtr->end_lsn and mtr->state. */ { ib_uint64_t start_lsn = mtr->start_lsn; ib_uint64_t end_lsn = mtr->end_lsn; UNIV_MEM_INVALID(mtr, sizeof *mtr); mtr->start_lsn = start_lsn; mtr->end_lsn = end_lsn; } #endif /* UNIV_DEBUG_VALGRIND */ ut_d(mtr->state = MTR_COMMITTED); }
/**********************************************************************//** Deallocate a buffer frame of UNIV_PAGE_SIZE. */ static void buf_buddy_block_free( /*=================*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ void* buf) /*!< in: buffer frame to deallocate */ { const ulint fold = BUF_POOL_ZIP_FOLD_PTR(buf); buf_page_t* bpage; buf_block_t* block; ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_a(!ut_align_offset(buf, UNIV_PAGE_SIZE)); HASH_SEARCH(hash, buf_pool->zip_hash, fold, buf_page_t*, bpage, ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY && bpage->in_zip_hash && !bpage->in_page_hash), ((buf_block_t*) bpage)->frame == buf); ut_a(bpage); ut_a(buf_page_get_state(bpage) == BUF_BLOCK_MEMORY); ut_ad(!bpage->in_page_hash); ut_ad(bpage->in_zip_hash); ut_d(bpage->in_zip_hash = FALSE); HASH_DELETE(buf_page_t, hash, buf_pool->zip_hash, fold, bpage); ut_d(memset(buf, 0, UNIV_PAGE_SIZE)); UNIV_MEM_INVALID(buf, UNIV_PAGE_SIZE); block = (buf_block_t*) bpage; mutex_enter(&block->mutex); buf_LRU_block_free_non_file_page(block); mutex_exit(&block->mutex); ut_ad(buf_pool->buddy_n_frames > 0); ut_d(buf_pool->buddy_n_frames--); }
/**********************************************************************//** Allocate a buffer block to the buddy allocator. */ static void buf_buddy_block_register( /*=====================*/ buf_block_t* block) /*!< in: buffer frame to allocate */ { buf_pool_t* buf_pool = buf_pool_from_block(block); const ulint fold = BUF_POOL_ZIP_FOLD(block); ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(buf_block_get_state(block) == BUF_BLOCK_READY_FOR_USE); buf_block_set_state(block, BUF_BLOCK_MEMORY); ut_a(block->frame); ut_a(!ut_align_offset(block->frame, UNIV_PAGE_SIZE)); ut_ad(!block->page.in_page_hash); ut_ad(!block->page.in_zip_hash); ut_d(block->page.in_zip_hash = TRUE); HASH_INSERT(buf_page_t, hash, buf_pool->zip_hash, fold, &block->page); ut_d(buf_pool->buddy_n_frames++); }
/****************************************************************//** Free a table memory object. */ UNIV_INTERN void dict_mem_table_free( /*================*/ dict_table_t* table) /*!< in: table */ { ut_ad(table); ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); ut_d(table->cached = FALSE); #ifndef UNIV_HOTBACKUP mutex_free(&(table->autoinc_mutex)); #endif /* UNIV_HOTBACKUP */ ut_free(table->name); mem_heap_free(table->heap); }
/********************************************************************//** Creates the global purge system control structure and inits the history mutex. */ UNIV_INTERN void trx_purge_sys_create( /*=================*/ ib_bh_t* ib_bh) /*!< in, own: UNDO log min binary heap */ { ut_ad(mutex_own(&kernel_mutex)); purge_sys = mem_zalloc(sizeof(trx_purge_t)); /* Take ownership of ib_bh, we are responsible for freeing it. */ purge_sys->ib_bh = ib_bh; purge_sys->state = TRX_STOP_PURGE; purge_sys->n_pages_handled = 0; purge_sys->purge_trx_no = 0; purge_sys->purge_undo_no = 0; purge_sys->next_stored = FALSE; ut_d(purge_sys->done_trx_no = 0); rw_lock_create(trx_purge_latch_key, &purge_sys->latch, SYNC_PURGE_LATCH); mutex_create( purge_sys_bh_mutex_key, &purge_sys->bh_mutex, SYNC_PURGE_QUEUE); purge_sys->heap = mem_heap_create(256); purge_sys->arr = trx_undo_arr_create(); purge_sys->sess = sess_open(); purge_sys->trx = purge_sys->sess->trx; purge_sys->trx->is_purge = 1; ut_a(trx_start_low(purge_sys->trx, ULINT_UNDEFINED)); purge_sys->query = trx_purge_graph_build(); purge_sys->prebuilt_view = read_view_oldest_copy_or_open_new(0, NULL); purge_sys->view = purge_sys->prebuilt_view; }
/********************************************************************//** Does a truncate if the purge array is empty. NOTE that when this function is called, the caller must not have any latches on undo log pages! @return TRUE if array empty */ UNIV_INLINE ibool trx_purge_truncate_if_arr_empty(void) /*=================================*/ { ut_ad(mutex_own(&(purge_sys->mutex))); if (purge_sys->arr->n_used == 0) { ut_d(purge_sys->done_trx_no = purge_sys->purge_trx_no); trx_purge_truncate_history(); return(TRUE); } return(FALSE); }
/********************************************************************//** Creates the global purge system control structure and inits the history mutex. */ UNIV_INTERN void trx_purge_sys_create(void) /*======================*/ { ut_ad(mutex_own(&kernel_mutex)); purge_sys = mem_alloc(sizeof(trx_purge_t)); purge_sys->state = TRX_STOP_PURGE; purge_sys->n_pages_handled = 0; purge_sys->purge_trx_no = ut_dulint_zero; purge_sys->purge_undo_no = ut_dulint_zero; purge_sys->next_stored = FALSE; ut_d(purge_sys->done_trx_no = ut_dulint_zero); rw_lock_create(&purge_sys->latch, SYNC_PURGE_LATCH); mutex_create(&purge_sys->mutex, SYNC_PURGE_SYS); purge_sys->heap = mem_heap_create(256); purge_sys->arr = trx_undo_arr_create(); purge_sys->sess = sess_open(); purge_sys->trx = purge_sys->sess->trx; purge_sys->trx->is_purge = 1; ut_a(trx_start_low(purge_sys->trx, ULINT_UNDEFINED)); purge_sys->query = trx_purge_graph_build(); purge_sys->view = read_view_oldest_copy_or_open_new(ut_dulint_zero, purge_sys->heap); }
void buf_flush_write_complete( /*=====================*/ buf_block_t* block) /* in: pointer to the block in question */ { ut_ad(block); #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&(buf_pool->mutex))); #endif /* UNIV_SYNC_DEBUG */ ut_a(block->state == BUF_BLOCK_FILE_PAGE); block->oldest_modification = ut_dulint_zero; UT_LIST_REMOVE(flush_list, buf_pool->flush_list, block); ut_d(UT_LIST_VALIDATE(flush_list, buf_block_t, buf_pool->flush_list)); (buf_pool->n_flush[block->flush_type])--; if (block->flush_type == BUF_FLUSH_LRU) { /* Put the block to the end of the LRU list to wait to be moved to the free list */ buf_LRU_make_block_old(block); buf_pool->LRU_flush_ended++; } /* fprintf(stderr, "n pending flush %lu\n", buf_pool->n_flush[block->flush_type]); */ if ((buf_pool->n_flush[block->flush_type] == 0) && (buf_pool->init_flush[block->flush_type] == FALSE)) { /* The running flush batch has ended */ os_event_set(buf_pool->no_flush[block->flush_type]); } }
/***************************************************************//** Creates a memory heap block where data can be allocated. @return own: memory heap block, NULL if did not succeed (only possible for MEM_HEAP_BTR_SEARCH type heaps) */ UNIV_INTERN mem_block_t* mem_heap_create_block( /*==================*/ mem_heap_t* heap, /*!< in: memory heap or NULL if first block should be created */ ulint n, /*!< in: number of bytes needed for user data */ ulint type, /*!< in: type of heap: MEM_HEAP_DYNAMIC or MEM_HEAP_BUFFER */ const char* file_name,/*!< in: file name where created */ ulint line) /*!< in: line where created */ { #ifndef UNIV_HOTBACKUP buf_block_t* buf_block = NULL; #endif /* !UNIV_HOTBACKUP */ mem_block_t* block; ulint len; ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER) || (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH)); if (heap && heap->magic_n != MEM_BLOCK_MAGIC_N) { mem_analyze_corruption(heap); } /* In dynamic allocation, calculate the size: block header + data. */ len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); #ifndef UNIV_HOTBACKUP if (type == MEM_HEAP_DYNAMIC || len < UNIV_PAGE_SIZE / 2) { ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF); block = mem_area_alloc(&len, mem_comm_pool); } else { len = UNIV_PAGE_SIZE; if ((type & MEM_HEAP_BTR_SEARCH) && heap) { /* We cannot allocate the block from the buffer pool, but must get the free block from the heap header free block field */ buf_block = heap->free_block; heap->free_block = NULL; if (UNIV_UNLIKELY(!buf_block)) { return(NULL); } } else { buf_block = buf_block_alloc(NULL, 0); } block = (mem_block_t*) buf_block->frame; } ut_ad(block); block->buf_block = buf_block; block->free_block = NULL; #else /* !UNIV_HOTBACKUP */ len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); block = ut_malloc(len); ut_ad(block); #endif /* !UNIV_HOTBACKUP */ block->magic_n = MEM_BLOCK_MAGIC_N; ut_strlcpy_rev(block->file_name, file_name, sizeof(block->file_name)); block->line = line; #ifdef MEM_PERIODIC_CHECK mem_pool_mutex_enter(); if (!mem_block_list_inited) { mem_block_list_inited = TRUE; UT_LIST_INIT(mem_block_list); } UT_LIST_ADD_LAST(mem_block_list, mem_block_list, block); mem_pool_mutex_exit(); #endif mem_block_set_len(block, len); mem_block_set_type(block, type); mem_block_set_free(block, MEM_BLOCK_HEADER_SIZE); mem_block_set_start(block, MEM_BLOCK_HEADER_SIZE); if (UNIV_UNLIKELY(heap == NULL)) { /* This is the first block of the heap. The field total_size should be initialized here */ block->total_size = len; } else { /* Not the first allocation for the heap. This block's total_length field should be set to undefined. */ ut_d(block->total_size = ULINT_UNDEFINED); UNIV_MEM_INVALID(&block->total_size, sizeof block->total_size); heap->total_size += len; } ut_ad((ulint)MEM_BLOCK_HEADER_SIZE < len); return(block); }
/**********************************************************************//** Deallocate a block. */ UNIV_INTERN void buf_buddy_free_low( /*===============*/ buf_pool_t* buf_pool, /*!< in: buffer pool instance */ void* buf, /*!< in: block to be freed, must not be pointed to by the buffer pool */ ulint i) /*!< in: index of buf_pool->zip_free[], or BUF_BUDDY_SIZES */ { buf_page_t* bpage; buf_page_t* buddy; ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(!mutex_own(&buf_pool->zip_mutex)); ut_ad(i <= BUF_BUDDY_SIZES); ut_ad(i >= buf_buddy_get_slot(PAGE_ZIP_MIN_SIZE)); ut_ad(buf_pool->buddy_stat[i].used > 0); buf_pool->buddy_stat[i].used--; recombine: UNIV_MEM_ASSERT_AND_ALLOC(buf, BUF_BUDDY_LOW << i); ((buf_page_t*) buf)->state = BUF_BLOCK_ZIP_FREE; if (i == BUF_BUDDY_SIZES) { buf_buddy_block_free(buf_pool, buf); return; } ut_ad(i < BUF_BUDDY_SIZES); ut_ad(buf == ut_align_down(buf, BUF_BUDDY_LOW << i)); ut_ad(!buf_pool_contains_zip(buf_pool, buf)); /* Do not recombine blocks if there are few free blocks. We may waste up to 15360*max_len bytes to free blocks (1024 + 2048 + 4096 + 8192 = 15360) */ if (UT_LIST_GET_LEN(buf_pool->zip_free[i]) < 16) { goto func_exit; } /* Try to combine adjacent blocks. */ buddy = (buf_page_t*) buf_buddy_get(((byte*) buf), BUF_BUDDY_LOW << i); #ifndef UNIV_DEBUG_VALGRIND /* When Valgrind instrumentation is not enabled, we can read buddy->state to quickly determine that a block is not free. When the block is not free, buddy->state belongs to a compressed page frame that may be flagged uninitialized in our Valgrind instrumentation. */ if (buddy->state != BUF_BLOCK_ZIP_FREE) { goto buddy_nonfree; } #endif /* !UNIV_DEBUG_VALGRIND */ for (bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); bpage; ) { ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_FREE); if (bpage == buddy) { /* The buddy is free: recombine */ buf_buddy_remove_from_free(buf_pool, bpage, i); buddy_is_free: ut_ad(buf_page_get_state(buddy) == BUF_BLOCK_ZIP_FREE); ut_ad(!buf_pool_contains_zip(buf_pool, buddy)); i++; buf = ut_align_down(buf, BUF_BUDDY_LOW << i); goto recombine; } ut_a(bpage != buf); UNIV_MEM_ASSERT_W(bpage, BUF_BUDDY_LOW << i); bpage = UT_LIST_GET_NEXT(list, bpage); } #ifndef UNIV_DEBUG_VALGRIND buddy_nonfree: #endif /* !UNIV_DEBUG_VALGRIND */ ut_d(BUF_BUDDY_LIST_VALIDATE(buf_pool, i)); /* The buddy is not free. Is there a free block of this size? */ bpage = UT_LIST_GET_FIRST(buf_pool->zip_free[i]); if (bpage) { /* Remove the block from the free list, because a successful buf_buddy_relocate() will overwrite bpage->list. */ buf_buddy_remove_from_free(buf_pool, bpage, i); /* Try to relocate the buddy of buf to the free block. */ if (buf_buddy_relocate(buf_pool, buddy, bpage, i)) { buddy->state = BUF_BLOCK_ZIP_FREE; goto buddy_is_free; } buf_buddy_add_to_free(buf_pool, bpage, i); } func_exit: /* Free the block to the buddy list. */ bpage = buf; /* Fill large blocks with a constant pattern. */ ut_d(memset(bpage, i, BUF_BUDDY_LOW << i)); UNIV_MEM_INVALID(bpage, BUF_BUDDY_LOW << i); bpage->state = BUF_BLOCK_ZIP_FREE; buf_buddy_add_to_free(buf_pool, bpage, i); }
/******************************************************************//** Creates, or rather, initializes an rw-lock object in a specified memory location (which must be appropriately aligned). The rw-lock is initialized to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free is necessary only if the memory block containing it is freed. */ UNIV_INTERN void rw_lock_create_func( /*================*/ rw_lock_t* lock, /*!< in: pointer to memory */ #ifdef UNIV_DEBUG # ifdef UNIV_SYNC_DEBUG ulint level, /*!< in: level */ # endif /* UNIV_SYNC_DEBUG */ const char* cmutex_name, /*!< in: mutex name */ #endif /* UNIV_DEBUG */ const char* cfile_name, /*!< in: file name where created */ ulint cline) /*!< in: file line where created */ { /* If this is the very first time a synchronization object is created, then the following call initializes the sync system. */ #ifndef INNODB_RW_LOCKS_USE_ATOMICS mutex_create(rw_lock_mutex_key, rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK); lock->mutex.cfile_name = cfile_name; lock->mutex.cline = cline; ut_d(lock->mutex.cmutex_name = cmutex_name); ut_d(lock->mutex.mutex_type = 1); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ # ifdef UNIV_DEBUG UT_NOT_USED(cmutex_name); # endif #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->lock_word = X_LOCK_DECR; lock->waiters = 0; /* We set this value to signify that lock->writer_thread contains garbage at initialization and cannot be used for recursive x-locking. */ lock->recursive = FALSE; /* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */ memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread); UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread); #ifdef UNIV_SYNC_DEBUG UT_LIST_INIT(lock->debug_list); lock->level = level; #endif /* UNIV_SYNC_DEBUG */ ut_d(lock->magic_n = RW_LOCK_MAGIC_N); lock->cfile_name = cfile_name; lock->cline = (unsigned int) cline; lock->count_os_wait = 0; lock->last_s_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved"; lock->last_s_line = 0; lock->last_x_line = 0; lock->event = os_event_create(NULL); lock->wait_ex_event = os_event_create(NULL); mutex_enter(&rw_lock_list_mutex); ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL || UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N); UT_LIST_ADD_FIRST(list, rw_lock_list, lock); mutex_exit(&rw_lock_list_mutex); }