/******************************************************************//** Initializes the memory system. */ UNIV_INTERN void mem_init( /*=====*/ ulint size) /*!< in: common pool size in bytes */ { #ifdef UNIV_MEM_DEBUG ulint i; /* Initialize the hash table */ ut_a(FALSE == mem_hash_initialized); mutex_create(mem_hash_mutex_key, &mem_hash_mutex, SYNC_MEM_HASH); for (i = 0; i < MEM_HASH_SIZE; i++) { UT_LIST_INIT(*mem_hash_get_nth_cell(i)); } UT_LIST_INIT(mem_all_list_base); mem_hash_initialized = TRUE; #endif if (UNIV_LIKELY(srv_use_sys_malloc)) { /* When innodb_use_sys_malloc is set, the mem_comm_pool won't be used for any allocations. We create a dummy mem_comm_pool, because some statistics and debugging code relies on it being initialized. */ size = 1; } mem_comm_pool = mem_pool_create(size); }
// Initialize internal data structs OSEventThread::OSEventThread() :mIdleTimeout(-1),mMaxfFd(0),mFdWakeupRead(-1),mFdWakeupWrite(-1) { FD_ZERO(&mReadFds); UT_LIST_INIT(mTimerList); UT_LIST_INIT(mPendingList); UT_LIST_INIT(mWatchList); }
/*********************************************************//** Initializes global event and OS 'slow' mutex lists. */ UNIV_INTERN void os_sync_init(void) /*==============*/ { UT_LIST_INIT(os_event_list); UT_LIST_INIT(os_mutex_list); os_sync_mutex = NULL; os_sync_mutex_inited = FALSE; os_sync_mutex = os_mutex_create(NULL); os_sync_mutex_inited = TRUE; }
/************************************************************//** Adds a new block to a dyn array. @return created block */ UNIV_INTERN dyn_block_t* dyn_array_add_block( /*================*/ dyn_array_t* arr) /*!< in: dyn array */ { mem_heap_t* heap; dyn_block_t* block; ut_ad(arr); ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N); if (arr->heap == NULL) { UT_LIST_INIT(arr->base); UT_LIST_ADD_FIRST(list, arr->base, arr); arr->heap = mem_heap_create(sizeof(dyn_block_t)); } block = dyn_array_get_last_block(arr); block->used = block->used | DYN_BLOCK_FULL_FLAG; heap = arr->heap; block = mem_heap_alloc(heap, sizeof(dyn_block_t)); block->used = 0; UT_LIST_ADD_LAST(list, arr->base, block); return(block); }
void trx_rseg_list_and_array_init( /*=========================*/ trx_sysf_t* sys_header, /* in: trx system header */ mtr_t* mtr) /* in: mtr */ { ulint i; ulint page_no; ulint space; UT_LIST_INIT(trx_sys->rseg_list); trx_sys->rseg_history_len = 0; for (i = 0; i < TRX_SYS_N_RSEGS; i++) { page_no = trx_sysf_rseg_get_page_no(sys_header, i, mtr); if (page_no == FIL_NULL) { trx_sys_set_nth_rseg(trx_sys, i, NULL); } else { space = trx_sysf_rseg_get_space(sys_header, i, mtr); trx_rseg_mem_create(i, space, page_no, mtr); } } }
/******************************************************************//** Creates a symbol table for a single stored procedure or query. @return own: symbol table */ UNIV_INTERN sym_tab_t* sym_tab_create( /*===========*/ mem_heap_t* heap) /*!< in: memory heap where to create */ { sym_tab_t* sym_tab; sym_tab = mem_heap_alloc(heap, sizeof(sym_tab_t)); UT_LIST_INIT(sym_tab->sym_list); UT_LIST_INIT(sym_tab->func_node_list); sym_tab->heap = heap; return(sym_tab); }
/********************************************************************//** Creates a memory pool. @return memory pool */ UNIV_INTERN mem_pool_t* mem_pool_create( /*============*/ ulint size) /*!< in: pool size in bytes */ { mem_pool_t* pool; mem_area_t* area; ulint i; ulint used; pool = ut_malloc(sizeof(mem_pool_t)); pool->buf = ut_malloc_low(size, TRUE); pool->size = size; mutex_create(&pool->mutex, SYNC_MEM_POOL); /* Initialize the free lists */ for (i = 0; i < 64; i++) { UT_LIST_INIT(pool->free_list[i]); } used = 0; while (size - used >= MEM_AREA_MIN_SIZE) { i = ut_2_log(size - used); if (ut_2_exp(i) > size - used) { /* ut_2_log rounds upward */ i--; } area = (mem_area_t*)(pool->buf + used); mem_area_set_size(area, ut_2_exp(i)); mem_area_set_free(area, TRUE); UNIV_MEM_FREE(MEM_AREA_EXTRA_SIZE + (byte*) area, ut_2_exp(i) - MEM_AREA_EXTRA_SIZE); UT_LIST_ADD_FIRST(free_list, pool->free_list[i], area); used = used + ut_2_exp(i); } ut_ad(size >= used); pool->reserved = 0; return(pool); }
/**********************************************************************//** Initializes the mem block list at database startup. */ UNIV_INTERN void ut_mem_init(void) /*=============*/ { ut_a(!ut_mem_block_list_inited); os_fast_mutex_init(&ut_list_mutex); UT_LIST_INIT(ut_mem_block_list); ut_mem_block_list_inited = TRUE; }
/******************************************************************** Initialize the rollback instance list. */ UNIV_INTERN void trx_rseg_list_and_array_init( /*=========================*/ trx_sysf_t* sys_header, /* in: trx system header */ mtr_t* mtr) /* in: mtr */ { UT_LIST_INIT(trx_sys->rseg_list); trx_sys->rseg_history_len = 0; trx_rseg_create_instance(sys_header, mtr); }
void rw_lock_create_func( /*================*/ rw_lock_t* lock, /* in: pointer to memory */ char* cfile_name, /* in: file name where created */ ulint cline) /* in: file line where created */ { /* If this is the very first time a synchronization object is created, then the following call initializes the sync system. */ mutex_create(rw_lock_get_mutex(lock)); mutex_set_level(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK); lock->mutex.cfile_name = cfile_name; lock->mutex.cline = cline; rw_lock_set_waiters(lock, 0); rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED); lock->writer_count = 0; rw_lock_set_reader_count(lock, 0); lock->writer_is_wait_ex = FALSE; UT_LIST_INIT(lock->debug_list); lock->magic_n = RW_LOCK_MAGIC_N; lock->level = SYNC_LEVEL_NONE; lock->cfile_name = cfile_name; lock->cline = cline; lock->last_s_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved"; lock->last_s_line = 0; lock->last_x_line = 0; mutex_enter(&rw_lock_list_mutex); UT_LIST_ADD_FIRST(list, rw_lock_list, lock); mutex_exit(&rw_lock_list_mutex); }
/***********************************************************************//** Creates a query graph fork node. @return own: fork node */ UNIV_INTERN que_fork_t* que_fork_create( /*============*/ que_t* graph, /*!< in: graph, if NULL then this fork node is assumed to be the graph root */ que_node_t* parent, /*!< in: parent node */ ulint fork_type, /*!< in: fork type */ mem_heap_t* heap) /*!< in: memory heap where created */ { que_fork_t* fork; ut_ad(heap); fork = mem_heap_alloc(heap, sizeof(que_fork_t)); fork->common.type = QUE_NODE_FORK; fork->n_active_thrs = 0; fork->state = QUE_FORK_COMMAND_WAIT; if (graph != NULL) { fork->graph = graph; } else { fork->graph = fork; } fork->common.parent = parent; fork->fork_type = fork_type; fork->caller = NULL; UT_LIST_INIT(fork->thrs); fork->sym_tab = NULL; fork->info = NULL; fork->heap = heap; return(fork); }
sess_t* sess_open(void) /*===========*/ /* out, own: session object */ { sess_t* sess; #ifdef UNIV_SYNC_DEBUG ut_ad(mutex_own(&kernel_mutex)); #endif /* UNIV_SYNC_DEBUG */ sess = mem_alloc(sizeof(sess_t)); sess->state = SESS_ACTIVE; sess->trx = trx_create(sess); UT_LIST_INIT(sess->graphs); return(sess); }
/****************************************************************//** Creates and initializes a transaction object. @return own: the transaction */ UNIV_INTERN trx_t* trx_create( /*=======*/ sess_t* sess) /*!< in: session */ { trx_t* trx; ut_ad(mutex_own(&kernel_mutex)); ut_ad(sess); trx = mem_alloc(sizeof(trx_t)); trx->magic_n = TRX_MAGIC_N; trx->op_info = ""; trx->is_purge = 0; trx->is_recovered = 0; trx->conc_state = TRX_NOT_STARTED; trx->start_time = time(NULL); trx->isolation_level = TRX_ISO_REPEATABLE_READ; trx->id = ut_dulint_zero; trx->no = ut_dulint_max; trx->support_xa = TRUE; trx->check_foreigns = TRUE; trx->check_unique_secondary = TRUE; trx->flush_log_later = FALSE; trx->must_flush_log_later = FALSE; trx->dict_operation = TRX_DICT_OP_NONE; trx->table_id = ut_dulint_zero; trx->mysql_thd = NULL; trx->active_trans = 0; trx->duplicates = 0; trx->n_mysql_tables_in_use = 0; trx->mysql_n_tables_locked = 0; trx->mysql_log_file_name = NULL; trx->mysql_log_offset = 0; mutex_create(&trx->undo_mutex, SYNC_TRX_UNDO); trx->rseg = NULL; trx->undo_no = ut_dulint_zero; trx->last_sql_stat_start.least_undo_no = ut_dulint_zero; trx->insert_undo = NULL; trx->update_undo = NULL; trx->undo_no_arr = NULL; trx->error_state = DB_SUCCESS; trx->error_key_num = 0; trx->detailed_error[0] = '\0'; trx->sess = sess; trx->que_state = TRX_QUE_RUNNING; trx->n_active_thrs = 0; trx->handling_signals = FALSE; UT_LIST_INIT(trx->signals); UT_LIST_INIT(trx->reply_signals); trx->graph = NULL; trx->wait_lock = NULL; trx->was_chosen_as_deadlock_victim = FALSE; UT_LIST_INIT(trx->wait_thrs); trx->lock_heap = mem_heap_create_in_buffer(256); UT_LIST_INIT(trx->trx_locks); UT_LIST_INIT(trx->trx_savepoints); trx->dict_operation_lock_mode = 0; trx->has_search_latch = FALSE; trx->search_latch_timeout = BTR_SEA_TIMEOUT; trx->declared_to_be_inside_innodb = FALSE; trx->n_tickets_to_enter_innodb = 0; trx->global_read_view_heap = mem_heap_create(256); trx->global_read_view = NULL; trx->read_view = NULL; /* Set X/Open XA transaction identification to NULL */ memset(&trx->xid, 0, sizeof(trx->xid)); trx->xid.formatID = -1; trx->n_autoinc_rows = 0; /* Remember to free the vector explicitly. */ trx->autoinc_locks = ib_vector_create( mem_heap_create(sizeof(ib_vector_t) + sizeof(void*) * 4), 4); return(trx); }
/****************************************************************//** Creates trx objects for transactions and initializes the trx list of trx_sys at database start. Rollback segment and undo log lists must already exist when this function is called, because the lists of transactions to be rolled back or cleaned up are built based on the undo log lists. */ UNIV_INTERN void trx_lists_init_at_db_start(void) /*============================*/ { trx_rseg_t* rseg; trx_undo_t* undo; trx_t* trx; ut_ad(mutex_own(&kernel_mutex)); UT_LIST_INIT(trx_sys->trx_list); /* Look from the rollback segments if there exist undo logs for transactions */ rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list); while (rseg != NULL) { undo = UT_LIST_GET_FIRST(rseg->insert_undo_list); while (undo != NULL) { trx = trx_create(trx_dummy_sess); trx->is_recovered = TRUE; trx->id = undo->trx_id; trx->xid = undo->xid; trx->insert_undo = undo; trx->rseg = rseg; if (undo->state != TRX_UNDO_ACTIVE) { /* Prepared transactions are left in the prepared state waiting for a commit or abort decision from MySQL */ if (undo->state == TRX_UNDO_PREPARED) { fprintf(stderr, "InnoDB: Transaction " TRX_ID_FMT " was in the" " XA prepared state.\n", TRX_ID_PREP_PRINTF(trx->id)); if (srv_force_recovery == 0) { trx->conc_state = TRX_PREPARED; } else { fprintf(stderr, "InnoDB: Since" " innodb_force_recovery" " > 0, we will" " rollback it" " anyway.\n"); trx->conc_state = TRX_ACTIVE; } } else { trx->conc_state = TRX_COMMITTED_IN_MEMORY; } /* We give a dummy value for the trx no; this should have no relevance since purge is not interested in committed transaction numbers, unless they are in the history list, in which case it looks the number from the disk based undo log structure */ trx->no = trx->id; } else { trx->conc_state = TRX_ACTIVE; /* A running transaction always has the number field inited to ut_dulint_max */ trx->no = ut_dulint_max; } if (undo->dict_operation) { trx_set_dict_operation( trx, TRX_DICT_OP_TABLE); trx->table_id = undo->table_id; } if (!undo->empty) { trx->undo_no = ut_dulint_add(undo->top_undo_no, 1); } trx_list_insert_ordered(trx); undo = UT_LIST_GET_NEXT(undo_list, undo); } undo = UT_LIST_GET_FIRST(rseg->update_undo_list); while (undo != NULL) { trx = trx_get_on_id(undo->trx_id); if (NULL == trx) { trx = trx_create(trx_dummy_sess); trx->is_recovered = TRUE; trx->id = undo->trx_id; trx->xid = undo->xid; if (undo->state != TRX_UNDO_ACTIVE) { /* Prepared transactions are left in the prepared state waiting for a commit or abort decision from MySQL */ if (undo->state == TRX_UNDO_PREPARED) { fprintf(stderr, "InnoDB: Transaction " TRX_ID_FMT " was in the" " XA prepared state.\n", TRX_ID_PREP_PRINTF( trx->id)); if (srv_force_recovery == 0) { trx->conc_state = TRX_PREPARED; } else { fprintf(stderr, "InnoDB: Since" " innodb_force_recovery" " > 0, we will" " rollback it" " anyway.\n"); trx->conc_state = TRX_ACTIVE; } } else { trx->conc_state = TRX_COMMITTED_IN_MEMORY; } /* We give a dummy value for the trx number */ trx->no = trx->id; } else { trx->conc_state = TRX_ACTIVE; /* A running transaction always has the number field inited to ut_dulint_max */ trx->no = ut_dulint_max; } trx->rseg = rseg; trx_list_insert_ordered(trx); if (undo->dict_operation) { trx_set_dict_operation( trx, TRX_DICT_OP_TABLE); trx->table_id = undo->table_id; } } trx->update_undo = undo; if ((!undo->empty) && (ut_dulint_cmp(undo->top_undo_no, trx->undo_no) >= 0)) { trx->undo_no = ut_dulint_add(undo->top_undo_no, 1); } undo = UT_LIST_GET_NEXT(undo_list, undo); } rseg = UT_LIST_GET_NEXT(rseg_list, rseg); } }
void rw_lock_create_func( /*================*/ rw_lock_t* lock, /* in: pointer to memory */ #ifdef UNIV_DEBUG # ifdef UNIV_SYNC_DEBUG ulint level, /* in: level */ # endif /* UNIV_SYNC_DEBUG */ const char* cmutex_name, /* in: mutex name */ #endif /* UNIV_DEBUG */ const char* cfile_name, /* in: file name where created */ ulint cline) /* in: file line where created */ { /* If this is the very first time a synchronization object is created, then the following call initializes the sync system. */ mutex_create(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK); lock->mutex.cfile_name = cfile_name; lock->mutex.cline = cline; #if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP lock->mutex.cmutex_name = cmutex_name; lock->mutex.mutex_type = 1; #endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */ rw_lock_set_waiters(lock, 0); rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED); lock->writer_count = 0; rw_lock_set_reader_count(lock, 0); lock->writer_is_wait_ex = FALSE; #ifdef UNIV_SYNC_DEBUG UT_LIST_INIT(lock->debug_list); lock->level = level; #endif /* UNIV_SYNC_DEBUG */ lock->magic_n = RW_LOCK_MAGIC_N; lock->cfile_name = cfile_name; lock->cline = (unsigned int) cline; lock->last_s_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved"; lock->last_s_line = 0; lock->last_x_line = 0; lock->event = os_event_create(NULL); #ifdef __WIN__ lock->wait_ex_event = os_event_create(NULL); #endif mutex_enter(&rw_lock_list_mutex); if (UT_LIST_GET_LEN(rw_lock_list) > 0) { ut_a(UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N); } UT_LIST_ADD_FIRST(list, rw_lock_list, lock); mutex_exit(&rw_lock_list_mutex); }
mem_block_t* mem_heap_create_block( /*==================*/ /* out, own: memory heap block, NULL if did not succeed */ mem_heap_t* heap, /* in: memory heap or NULL if first block should be created */ ulint n, /* in: number of bytes needed for user data, or if init_block is not NULL, its size in bytes */ void* init_block, /* in: init block in fast create, type must be MEM_HEAP_DYNAMIC */ ulint type, /* in: type of heap: MEM_HEAP_DYNAMIC or MEM_HEAP_BUFFER */ const char* file_name,/* in: file name where created */ ulint line) /* in: line where created */ { mem_block_t* block; ulint len; ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER) || (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH)); if (heap && heap->magic_n != MEM_BLOCK_MAGIC_N) { mem_analyze_corruption((byte*)heap); } /* In dynamic allocation, calculate the size: block header + data. */ if (init_block != NULL) { ut_ad(type == MEM_HEAP_DYNAMIC); ut_ad(n > MEM_BLOCK_START_SIZE + MEM_BLOCK_HEADER_SIZE); len = n; block = init_block; } else if (type == MEM_HEAP_DYNAMIC) { len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); block = mem_area_alloc(len, mem_comm_pool); } else { ut_ad(n <= MEM_MAX_ALLOC_IN_BUF); len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); if (len < UNIV_PAGE_SIZE / 2) { block = mem_area_alloc(len, mem_comm_pool); } else { len = UNIV_PAGE_SIZE; if ((type & MEM_HEAP_BTR_SEARCH) && heap) { /* We cannot allocate the block from the buffer pool, but must get the free block from the heap header free block field */ block = (mem_block_t*)heap->free_block; heap->free_block = NULL; } else { block = (mem_block_t*)buf_frame_alloc(); } } } if (block == NULL) { return(NULL); } block->magic_n = MEM_BLOCK_MAGIC_N; ut_strlcpy_rev(block->file_name, file_name, sizeof(block->file_name)); block->line = line; #ifdef MEM_PERIODIC_CHECK mem_pool_mutex_enter(); if (!mem_block_list_inited) { mem_block_list_inited = TRUE; UT_LIST_INIT(mem_block_list); } UT_LIST_ADD_LAST(mem_block_list, mem_block_list, block); mem_pool_mutex_exit(); #endif mem_block_set_len(block, len); mem_block_set_type(block, type); mem_block_set_free(block, MEM_BLOCK_HEADER_SIZE); mem_block_set_start(block, MEM_BLOCK_HEADER_SIZE); block->free_block = NULL; block->init_block = (init_block != NULL); ut_ad((ulint)MEM_BLOCK_HEADER_SIZE < len); return(block); }
/******************************************************************//** Creates, or rather, initializes an rw-lock object in a specified memory location (which must be appropriately aligned). The rw-lock is initialized to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free is necessary only if the memory block containing it is freed. */ UNIV_INTERN void rw_lock_create_func( /*================*/ rw_lock_t* lock, /*!< in: pointer to memory */ #ifdef UNIV_DEBUG # ifdef UNIV_SYNC_DEBUG ulint level, /*!< in: level */ # endif /* UNIV_SYNC_DEBUG */ const char* cmutex_name, /*!< in: mutex name */ #endif /* UNIV_DEBUG */ const char* cfile_name, /*!< in: file name where created */ ulint cline) /*!< in: file line where created */ { /* If this is the very first time a synchronization object is created, then the following call initializes the sync system. */ #ifndef INNODB_RW_LOCKS_USE_ATOMICS mutex_create(rw_lock_mutex_key, rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK); lock->mutex.cfile_name = cfile_name; lock->mutex.cline = cline; ut_d(lock->mutex.cmutex_name = cmutex_name); ut_d(lock->mutex.mutex_type = 1); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ # ifdef UNIV_DEBUG UT_NOT_USED(cmutex_name); # endif #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->lock_word = X_LOCK_DECR; lock->waiters = 0; /* We set this value to signify that lock->writer_thread contains garbage at initialization and cannot be used for recursive x-locking. */ lock->recursive = FALSE; /* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */ memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread); UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread); #ifdef UNIV_SYNC_DEBUG UT_LIST_INIT(lock->debug_list); lock->level = level; #endif /* UNIV_SYNC_DEBUG */ ut_d(lock->magic_n = RW_LOCK_MAGIC_N); lock->cfile_name = cfile_name; lock->cline = (unsigned int) cline; lock->count_os_wait = 0; lock->last_s_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved"; lock->last_s_line = 0; lock->last_x_line = 0; lock->event = os_event_create(NULL); lock->wait_ex_event = os_event_create(NULL); mutex_enter(&rw_lock_list_mutex); ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL || UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N); UT_LIST_ADD_FIRST(list, rw_lock_list, lock); mutex_exit(&rw_lock_list_mutex); }
/***************************************************************//** Creates a memory heap block where data can be allocated. @return own: memory heap block, NULL if did not succeed (only possible for MEM_HEAP_BTR_SEARCH type heaps) */ UNIV_INTERN mem_block_t* mem_heap_create_block( /*==================*/ mem_heap_t* heap, /*!< in: memory heap or NULL if first block should be created */ ulint n, /*!< in: number of bytes needed for user data */ ulint type, /*!< in: type of heap: MEM_HEAP_DYNAMIC or MEM_HEAP_BUFFER */ const char* file_name,/*!< in: file name where created */ ulint line) /*!< in: line where created */ { #ifndef UNIV_HOTBACKUP buf_block_t* buf_block = NULL; #endif /* !UNIV_HOTBACKUP */ mem_block_t* block; ulint len; ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER) || (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH)); if (heap && heap->magic_n != MEM_BLOCK_MAGIC_N) { mem_analyze_corruption(heap); } /* In dynamic allocation, calculate the size: block header + data. */ len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); #ifndef UNIV_HOTBACKUP if (type == MEM_HEAP_DYNAMIC || len < UNIV_PAGE_SIZE / 2) { ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF); block = mem_area_alloc(&len, mem_comm_pool); } else { len = UNIV_PAGE_SIZE; if ((type & MEM_HEAP_BTR_SEARCH) && heap) { /* We cannot allocate the block from the buffer pool, but must get the free block from the heap header free block field */ buf_block = heap->free_block; heap->free_block = NULL; if (UNIV_UNLIKELY(!buf_block)) { return(NULL); } } else { buf_block = buf_block_alloc(NULL, 0); } block = (mem_block_t*) buf_block->frame; } ut_ad(block); block->buf_block = buf_block; block->free_block = NULL; #else /* !UNIV_HOTBACKUP */ len = MEM_BLOCK_HEADER_SIZE + MEM_SPACE_NEEDED(n); block = ut_malloc(len); ut_ad(block); #endif /* !UNIV_HOTBACKUP */ block->magic_n = MEM_BLOCK_MAGIC_N; ut_strlcpy_rev(block->file_name, file_name, sizeof(block->file_name)); block->line = line; #ifdef MEM_PERIODIC_CHECK mem_pool_mutex_enter(); if (!mem_block_list_inited) { mem_block_list_inited = TRUE; UT_LIST_INIT(mem_block_list); } UT_LIST_ADD_LAST(mem_block_list, mem_block_list, block); mem_pool_mutex_exit(); #endif mem_block_set_len(block, len); mem_block_set_type(block, type); mem_block_set_free(block, MEM_BLOCK_HEADER_SIZE); mem_block_set_start(block, MEM_BLOCK_HEADER_SIZE); if (UNIV_UNLIKELY(heap == NULL)) { /* This is the first block of the heap. The field total_size should be initialized here */ block->total_size = len; } else { /* Not the first allocation for the heap. This block's total_length field should be set to undefined. */ ut_d(block->total_size = ULINT_UNDEFINED); UNIV_MEM_INVALID(&block->total_size, sizeof block->total_size); heap->total_size += len; } ut_ad((ulint)MEM_BLOCK_HEADER_SIZE < len); return(block); }
mem_pool_t* mem_pool_create( /*============*/ /* out: memory pool */ ulint size) /* in: pool size in bytes */ { mem_pool_t* pool; mem_area_t* area; ulint i; ulint used; ut_a(size > 10000); pool = ut_malloc(sizeof(mem_pool_t)); /* We do not set the memory to zero (FALSE) in the pool, but only when allocated at a higher level in mem0mem.c. This is to avoid masking useful Purify warnings. */ pool->buf = ut_malloc_low(size, FALSE, TRUE); pool->size = size; mutex_create(&pool->mutex, SYNC_MEM_POOL); /* Initialize the free lists */ for (i = 0; i < 64; i++) { UT_LIST_INIT(pool->free_list[i]); } used = 0; while (size - used >= MEM_AREA_MIN_SIZE) { i = ut_2_log(size - used); if (ut_2_exp(i) > size - used) { /* ut_2_log rounds upward */ i--; } area = (mem_area_t*)(pool->buf + used); mem_area_set_size(area, ut_2_exp(i)); mem_area_set_free(area, TRUE); UNIV_MEM_FREE(MEM_AREA_EXTRA_SIZE + (byte*) area, ut_2_exp(i) - MEM_AREA_EXTRA_SIZE); UT_LIST_ADD_FIRST(free_list, pool->free_list[i], area); used = used + ut_2_exp(i); } ut_ad(size >= used); pool->reserved = 0; return(pool); }