Exemplo n.º 1
0
/********************************************************************//**
Prints info of a memory pool. */
UNIV_INTERN
void
mem_pool_print_info(
/*================*/
	FILE*		outfile,/*!< in: output file to write to */
	mem_pool_t*	pool)	/*!< in: memory pool */
{
	ulint		i;

	mem_pool_validate(pool);

	fprintf(outfile, "INFO OF A MEMORY POOL\n");

	mutex_enter(&(pool->mutex));

	for (i = 0; i < 64; i++) {
		if (UT_LIST_GET_LEN(pool->free_list[i]) > 0) {

			fprintf(outfile,
				"Free list length %lu for"
				" blocks of size %lu\n",
				(ulong) UT_LIST_GET_LEN(pool->free_list[i]),
				(ulong) ut_2_exp(i));
		}
	}

	fprintf(outfile, "Pool size %lu, reserved %lu.\n", (ulong) pool->size,
		(ulong) pool->reserved);
	mutex_exit(&(pool->mutex));
}
Exemplo n.º 2
0
/***********************************************************************
Initializes the old blocks pointer in the LRU list. This function should be
called when the LRU list grows to BUF_LRU_OLD_MIN_LEN length. */
static
void
buf_LRU_old_init(void)
/*==================*/
{
	buf_block_t*	block;

	ut_ad(mutex_own(&(buf_pool->mutex)));
	ut_a(UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN);

	/* We first initialize all blocks in the LRU list as old and then use
	the adjust function to move the LRU_old pointer to the right
	position */

	block = UT_LIST_GET_FIRST(buf_pool->LRU);

	while (block != NULL) {
		ut_a(block->state == BUF_BLOCK_FILE_PAGE);
		ut_a(block->in_LRU_list);
		block->old = TRUE;
		block = UT_LIST_GET_NEXT(LRU, block);
	}

	buf_pool->LRU_old = UT_LIST_GET_FIRST(buf_pool->LRU);
	buf_pool->LRU_old_len = UT_LIST_GET_LEN(buf_pool->LRU);

	buf_LRU_old_adjust_len();
}
Exemplo n.º 3
0
/**********************************************************************
Adds a block to the LRU list end. */
UNIV_INLINE
void
buf_LRU_add_block_to_end_low(
/*=========================*/
	buf_block_t*	block)	/* in: control block */
{
	buf_block_t*	last_block;

	ut_ad(buf_pool);
	ut_ad(block);
	ut_ad(mutex_own(&(buf_pool->mutex)));

	ut_a(block->state == BUF_BLOCK_FILE_PAGE);

	block->old = TRUE;

	last_block = UT_LIST_GET_LAST(buf_pool->LRU);

	if (last_block) {
		block->LRU_position = last_block->LRU_position;
	} else {
		block->LRU_position = buf_pool_clock_tic();
	}

	ut_a(!block->in_LRU_list);
	UT_LIST_ADD_LAST(LRU, buf_pool->LRU, block);
	block->in_LRU_list = TRUE;

	if (srv_use_awe && block->frame) {
		/* Add to the list of mapped pages */

		UT_LIST_ADD_LAST(awe_LRU_free_mapped,
				 buf_pool->awe_LRU_free_mapped, block);
	}

	if (UT_LIST_GET_LEN(buf_pool->LRU) >= BUF_LRU_OLD_MIN_LEN) {

		buf_pool->LRU_old_len++;
	}

	if (UT_LIST_GET_LEN(buf_pool->LRU) > BUF_LRU_OLD_MIN_LEN) {

		ut_ad(buf_pool->LRU_old);

		/* Adjust the length of the old block list if necessary */

		buf_LRU_old_adjust_len();

	} else if (UT_LIST_GET_LEN(buf_pool->LRU) == BUF_LRU_OLD_MIN_LEN) {

		/* The LRU list is now long enough for LRU_old to become
		defined: init it */

		buf_LRU_old_init();
	}
}
Exemplo n.º 4
0
int
trx_weight_cmp(
/*===========*/
			/* out: <0, 0 or >0; similar to strcmp(3) */
	trx_t*	a,	/* in: the first transaction to be compared */
	trx_t*	b)	/* in: the second transaction to be compared */
{
	ibool	a_notrans_edit;
	ibool	b_notrans_edit;

	/* If mysql_thd is NULL for a transaction we assume that it has
	not edited non-transactional tables. */

	a_notrans_edit = a->mysql_thd != NULL
	    && thd_has_edited_nontrans_tables(a->mysql_thd);

	b_notrans_edit = b->mysql_thd != NULL
	    && thd_has_edited_nontrans_tables(b->mysql_thd);

	if (a_notrans_edit && !b_notrans_edit) {

		return(1);
	}

	if (!a_notrans_edit && b_notrans_edit) {

		return(-1);
	}

	/* Either both had edited non-transactional tables or both had
	not, we fall back to comparing the number of altered/locked
	rows. */

#if 0
	fprintf(stderr,
		"%s TRX_WEIGHT(a): %lld+%lu, TRX_WEIGHT(b): %lld+%lu\n",
		__func__,
		ut_conv_dulint_to_longlong(a->undo_no),
		UT_LIST_GET_LEN(a->trx_locks),
		ut_conv_dulint_to_longlong(b->undo_no),
		UT_LIST_GET_LEN(b->trx_locks));
#endif

#define TRX_WEIGHT(t)	\
	ut_dulint_add((t)->undo_no, UT_LIST_GET_LEN((t)->trx_locks))

	return(ut_dulint_cmp(TRX_WEIGHT(a), TRX_WEIGHT(b)));
}
Exemplo n.º 5
0
ulint
buf_LRU_get_recent_limit(void)
/*==========================*/
			/* out: the limit; zero if could not determine it */
{
	buf_block_t*	block;
	ulint		len;
	ulint		limit;

	mutex_enter(&(buf_pool->mutex));

	len = UT_LIST_GET_LEN(buf_pool->LRU);

	if (len < BUF_LRU_OLD_MIN_LEN) {
		/* The LRU list is too short to do read-ahead */

		mutex_exit(&(buf_pool->mutex));

		return(0);
	}

	block = UT_LIST_GET_FIRST(buf_pool->LRU);

	limit = block->LRU_position - len / BUF_LRU_INITIAL_RATIO;

	mutex_exit(&(buf_pool->mutex));

	return(limit);
}
Exemplo n.º 6
0
/******************************************************************//**
Assigns a rollback segment to a transaction in a round-robin fashion.
Skips the SYSTEM rollback segment if another is available.
@return	assigned rollback segment id */
UNIV_INLINE
ulint
trx_assign_rseg(void)
/*=================*/
{
	trx_rseg_t*	rseg	= trx_sys->latest_rseg;

	ut_ad(mutex_own(&kernel_mutex));
loop:
	/* Get next rseg in a round-robin fashion */

	rseg = UT_LIST_GET_NEXT(rseg_list, rseg);

	if (rseg == NULL) {
		rseg = UT_LIST_GET_FIRST(trx_sys->rseg_list);
	}

	/* If it is the SYSTEM rollback segment, and there exist others, skip
	it */

	if ((rseg->id == TRX_SYS_SYSTEM_RSEG_ID)
	    && (UT_LIST_GET_LEN(trx_sys->rseg_list) > 1)) {
		goto loop;
	}

	trx_sys->latest_rseg = rseg;

	return(rseg->id);
}
Exemplo n.º 7
0
/**********************************************************************
Removes a block from the LRU list. */
UNIV_INLINE
void
buf_LRU_remove_block(
/*=================*/
	buf_block_t*	block)	/* in: control block */
{
	ut_ad(buf_pool);
	ut_ad(block);
	ut_ad(mutex_own(&(buf_pool->mutex)));

	ut_a(block->state == BUF_BLOCK_FILE_PAGE);
	ut_a(block->in_LRU_list);

	/* If the LRU_old pointer is defined and points to just this block,
	move it backward one step */

	if (block == buf_pool->LRU_old) {

		/* Below: the previous block is guaranteed to exist, because
		the LRU_old pointer is only allowed to differ by the
		tolerance value from strict 3/8 of the LRU list length. */

		buf_pool->LRU_old = UT_LIST_GET_PREV(LRU, block);
		(buf_pool->LRU_old)->old = TRUE;

		buf_pool->LRU_old_len++;
		ut_a(buf_pool->LRU_old);
	}

	/* Remove the block from the LRU list */
	UT_LIST_REMOVE(LRU, buf_pool->LRU, block);
	block->in_LRU_list = FALSE;

	if (srv_use_awe && block->frame) {
		/* Remove from the list of mapped pages */

		UT_LIST_REMOVE(awe_LRU_free_mapped,
			       buf_pool->awe_LRU_free_mapped, block);
	}

	/* If the LRU list is so short that LRU_old not defined, return */
	if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN) {

		buf_pool->LRU_old = NULL;

		return;
	}

	ut_ad(buf_pool->LRU_old);

	/* Update the LRU_old_len field if necessary */
	if (block->old) {

		buf_pool->LRU_old_len--;
	}

	/* Adjust the length of the old block list if necessary */
	buf_LRU_old_adjust_len();
}
Exemplo n.º 8
0
/*************************************************************//**
Prints info of a hash table. */
UNIV_INTERN
void
ha_print_info(
/*==========*/
	FILE*		file,	/*!< in: file where to print */
	hash_table_t*	table)	/*!< in: hash table */
{
#ifdef UNIV_DEBUG
/* Some of the code here is disabled for performance reasons in production
builds, see http://bugs.mysql.com/36941 */
#define PRINT_USED_CELLS
#endif /* UNIV_DEBUG */

#ifdef PRINT_USED_CELLS
	hash_cell_t*	cell;
	ulint		cells	= 0;
	ulint		i;
#endif /* PRINT_USED_CELLS */
	ulint		n_bufs;

	ut_ad(table);
	ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef PRINT_USED_CELLS
	for (i = 0; i < hash_get_n_cells(table); i++) {

		cell = hash_get_nth_cell(table, i);

		if (cell->node) {

			cells++;
		}
	}
#endif /* PRINT_USED_CELLS */

	fprintf(file, "Hash table size %lu",
		(ulong) hash_get_n_cells(table));

#ifdef PRINT_USED_CELLS
	fprintf(file, ", used cells %lu", (ulong) cells);
#endif /* PRINT_USED_CELLS */

	if (table->heaps == NULL && table->heap != NULL) {

		/* This calculation is intended for the adaptive hash
		index: how many buffer frames we have reserved? */

		n_bufs = UT_LIST_GET_LEN(table->heap->base) - 1;

		if (table->heap->free_block) {
			n_bufs++;
		}

		fprintf(file, ", node heap has %lu buffer(s)\n",
			(ulong) n_bufs);
	}
}
Exemplo n.º 9
0
/***********************************************************************//**
Free's an instance of the rollback segment in memory. */
UNIV_INTERN
void
trx_rseg_mem_free(
/*==============*/
	trx_rseg_t*	rseg)	/* in, own: instance to free */
{
	trx_undo_t*	undo;

	mutex_free(&rseg->mutex);

	if (!srv_apply_log_only) {
	/* There can't be any active transactions. */
	ut_a(UT_LIST_GET_LEN(rseg->update_undo_list) == 0);
	ut_a(UT_LIST_GET_LEN(rseg->insert_undo_list) == 0);
	}

	undo = UT_LIST_GET_FIRST(rseg->update_undo_cached);

	while (undo != NULL) {
		trx_undo_t*	prev_undo = undo;

		undo = UT_LIST_GET_NEXT(undo_list, undo);
		UT_LIST_REMOVE(undo_list, rseg->update_undo_cached, prev_undo);

		trx_undo_mem_free(prev_undo);
	}

	undo = UT_LIST_GET_FIRST(rseg->insert_undo_cached);

	while (undo != NULL) {
		trx_undo_t*	prev_undo = undo;

		undo = UT_LIST_GET_NEXT(undo_list, undo);
		UT_LIST_REMOVE(undo_list, rseg->insert_undo_cached, prev_undo);

		trx_undo_mem_free(prev_undo);
	}

	trx_sys_set_nth_rseg(trx_sys, rseg->id, NULL);

	mem_free(rseg);
}
Exemplo n.º 10
0
ibool
buf_LRU_buf_pool_running_out(void)
/*==============================*/
				/* out: TRUE if less than 25 % of buffer pool
				left */
{
	ibool	ret	= FALSE;

	mutex_enter(&(buf_pool->mutex));

	if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
	    + UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 4) {

		ret = TRUE;
	}

	mutex_exit(&(buf_pool->mutex));

	return(ret);
}
Exemplo n.º 11
0
/******************************************************************//**
Calling this function is obligatory only if the memory buffer containing
the mutex is freed. Removes a mutex object from the mutex list. The mutex
is checked to be in the reset state. */
UNIV_INTERN
void
mutex_free(
/*=======*/
	mutex_t*	mutex)	/*!< in: mutex */
{
	ut_ad(mutex_validate(mutex));
	ut_a(mutex_get_lock_word(mutex) == 0);
	ut_a(mutex_get_waiters(mutex) == 0);

#ifdef UNIV_MEM_DEBUG
	if (mutex == &mem_hash_mutex) {
		ut_ad(UT_LIST_GET_LEN(mutex_list) == 1);
		ut_ad(UT_LIST_GET_FIRST(mutex_list) == &mem_hash_mutex);
		UT_LIST_REMOVE(list, mutex_list, mutex);
		goto func_exit;
	}
#endif /* UNIV_MEM_DEBUG */

	if (mutex != &mutex_list_mutex
#ifdef UNIV_SYNC_DEBUG
	    && mutex != &sync_thread_mutex
#endif /* UNIV_SYNC_DEBUG */
	    ) {

		mutex_enter(&mutex_list_mutex);

		ut_ad(!UT_LIST_GET_PREV(list, mutex)
		      || UT_LIST_GET_PREV(list, mutex)->magic_n
		      == MUTEX_MAGIC_N);
		ut_ad(!UT_LIST_GET_NEXT(list, mutex)
		      || UT_LIST_GET_NEXT(list, mutex)->magic_n
		      == MUTEX_MAGIC_N);

		UT_LIST_REMOVE(list, mutex_list, mutex);

		mutex_exit(&mutex_list_mutex);
	}

	os_event_free(mutex->event);
#ifdef UNIV_MEM_DEBUG
func_exit:
#endif /* UNIV_MEM_DEBUG */
#if !defined(HAVE_ATOMIC_BUILTINS)
	os_fast_mutex_free(&(mutex->os_fast_mutex));
#endif
	/* If we free the mutex protecting the mutex list (freeing is
	not necessary), we have to reset the magic number AFTER removing
	it from the list. */
#ifdef UNIV_DEBUG
	mutex->magic_n = 0;
#endif /* UNIV_DEBUG */
}
Exemplo n.º 12
0
/*******************************************************************//**
Frees a single savepoint struct. */
UNIV_INTERN
void
trx_roll_savepoint_free(
/*=====================*/
	trx_t*			trx,	/*!< in: transaction handle */
	trx_named_savept_t*	savep)	/*!< in: savepoint to free */
{
	ut_a(savep != NULL);
	ut_a(UT_LIST_GET_LEN(trx->trx_savepoints) > 0);

	UT_LIST_REMOVE(trx_savepoints, trx->trx_savepoints, savep);
	mem_free(savep->name);
	mem_free(savep);
}
Exemplo n.º 13
0
/***************************************************************//**
Builds an index definition row to insert.
@return	DB_SUCCESS or error code */
static
ulint
dict_build_index_def_step(
    /*======================*/
    que_thr_t*	thr,	/*!< in: query thread */
    ind_node_t*	node)	/*!< in: index create node */
{
    dict_table_t*	table;
    dict_index_t*	index;
    dtuple_t*	row;
    trx_t*		trx;

    ut_ad(mutex_own(&(dict_sys->mutex)));

    trx = thr_get_trx(thr);

    index = node->index;

    table = dict_table_get_low(index->table_name);

    if (table == NULL) {
        return(DB_TABLE_NOT_FOUND);
    }

    trx->table_id = table->id;

    node->table = table;

    ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
          || dict_index_is_clust(index));

    dict_hdr_get_new_id(NULL, &index->id, NULL);

    /* Inherit the space id from the table; we store all indexes of a
    table in the same tablespace */

    index->space = table->space;
    node->page_no = FIL_NULL;
    row = dict_create_sys_indexes_tuple(index, node->heap);
    node->ind_row = row;

    ins_node_set_new_row(node->ind_def, row);

    /* Note that the index was created by this transaction. */
    index->trx_id = trx->id;

    return(DB_SUCCESS);
}
Exemplo n.º 14
0
ibool
sess_try_close(
/*===========*/
			/* out: TRUE if closed */
	sess_t*	sess)	/* in, own: session object */
{
#ifdef UNIV_SYNC_DEBUG
	ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
	if (UT_LIST_GET_LEN(sess->graphs) == 0) {
		sess_close(sess);

		return(TRUE);
	}

	return(FALSE);
}
Exemplo n.º 15
0
/*******************************************************************
Builds an index definition row to insert. */
static
ulint
dict_build_index_def_step(
/*======================*/
				/* out: DB_SUCCESS or error code */
	que_thr_t*	thr,	/* in: query thread */
	ind_node_t*	node)	/* in: index create node */
{
	dict_table_t*	table;
	dict_index_t*	index;
	dtuple_t*	row;
	trx_t*		trx;

	ut_ad(mutex_own(&(dict_sys->mutex)));

	trx = thr_get_trx(thr);

	index = node->index;

	table = dict_table_get_low(index->table_name);

	if (table == NULL) {
		return(DB_TABLE_NOT_FOUND);
	}

	trx->table_id = table->id;

	node->table = table;

	ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
	      || (index->type & DICT_CLUSTERED));

	index->id = dict_hdr_get_new_id(DICT_HDR_INDEX_ID);

	/* Inherit the space id from the table; we store all indexes of a
	table in the same tablespace */

	index->space = table->space;
	node->page_no = FIL_NULL;
	row = dict_create_sys_indexes_tuple(index, node->heap);
	node->ind_row = row;

	ins_node_set_new_row(node->ind_def, row);

	return(DB_SUCCESS);
}
Exemplo n.º 16
0
/**********************************************************************
Gives a recommendation of how many blocks should be flushed to establish
a big enough margin of replaceable blocks near the end of the LRU list
and in the free list. */
static
ulint
buf_flush_LRU_recommendation(void)
/*==============================*/
			/* out: number of blocks which should be flushed
			from the end of the LRU list */
{
	buf_block_t*	block;
	ulint		n_replaceable;
	ulint		distance	= 0;

	mutex_enter(&(buf_pool->mutex));

	n_replaceable = UT_LIST_GET_LEN(buf_pool->free);

	block = UT_LIST_GET_LAST(buf_pool->LRU);

	while ((block != NULL)
	       && (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN
		   + BUF_FLUSH_EXTRA_MARGIN)
	       && (distance < BUF_LRU_FREE_SEARCH_LEN)) {

		mutex_enter(&block->mutex);

		if (buf_flush_ready_for_replace(block)) {
			n_replaceable++;
		}

		mutex_exit(&block->mutex);

		distance++;

		block = UT_LIST_GET_PREV(LRU, block);
	}

	mutex_exit(&(buf_pool->mutex));

	if (n_replaceable >= BUF_FLUSH_FREE_BLOCK_MARGIN) {

		return(0);
	}

	return(BUF_FLUSH_FREE_BLOCK_MARGIN + BUF_FLUSH_EXTRA_MARGIN
	       - n_replaceable);
}
Exemplo n.º 17
0
/****************************************************************//**
Commits a transaction. NOTE that the kernel mutex is temporarily released. */
static
void
trx_handle_commit_sig_off_kernel(
/*=============================*/
	trx_t*		trx,		/*!< in: transaction */
	que_thr_t**	next_thr)	/*!< in/out: next query thread to run;
					if the value which is passed in is
					a pointer to a NULL pointer, then the
					calling function can start running
					a new query thread */
{
	trx_sig_t*	sig;
	trx_sig_t*	next_sig;

	ut_ad(mutex_own(&kernel_mutex));

	trx->que_state = TRX_QUE_COMMITTING;

	trx_commit_off_kernel(trx);

	ut_ad(UT_LIST_GET_LEN(trx->wait_thrs) == 0);

	/* Remove all TRX_SIG_COMMIT signals from the signal queue and send
	reply messages to them */

	sig = UT_LIST_GET_FIRST(trx->signals);

	while (sig != NULL) {
		next_sig = UT_LIST_GET_NEXT(signals, sig);

		if (sig->type == TRX_SIG_COMMIT) {

			trx_sig_reply(sig, next_thr);
			trx_sig_remove(trx, sig);
		}

		sig = next_sig;
	}

	trx->que_state = TRX_QUE_RUNNING;
}
Exemplo n.º 18
0
/***********************************************************************
Moves the LRU_old pointer so that the length of the old blocks list
is inside the allowed limits. */
UNIV_INLINE
void
buf_LRU_old_adjust_len(void)
/*========================*/
{
	ulint	old_len;
	ulint	new_len;

	ut_a(buf_pool->LRU_old);
	ut_ad(mutex_own(&(buf_pool->mutex)));
	ut_ad(3 * (BUF_LRU_OLD_MIN_LEN / 8) > BUF_LRU_OLD_TOLERANCE + 5);

	for (;;) {
		old_len = buf_pool->LRU_old_len;
		new_len = 3 * (UT_LIST_GET_LEN(buf_pool->LRU) / 8);

		ut_a(buf_pool->LRU_old->in_LRU_list);

		/* Update the LRU_old pointer if necessary */

		if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {

			buf_pool->LRU_old = UT_LIST_GET_PREV(
				LRU, buf_pool->LRU_old);
			(buf_pool->LRU_old)->old = TRUE;
			buf_pool->LRU_old_len++;

		} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {

			(buf_pool->LRU_old)->old = FALSE;
			buf_pool->LRU_old = UT_LIST_GET_NEXT(
				LRU, buf_pool->LRU_old);
			buf_pool->LRU_old_len--;
		} else {
			ut_a(buf_pool->LRU_old); /* Check that we did not
						 fall out of the LRU list */
			return;
		}
	}
}
Exemplo n.º 19
0
/********************************************************************//**
Fills the specified free list.
@return	TRUE if we were able to insert a block to the free list */
static
ibool
mem_pool_fill_free_list(
/*====================*/
	ulint		i,	/*!< in: free list index */
	mem_pool_t*	pool)	/*!< in: memory pool */
{
	mem_area_t*	area;
	mem_area_t*	area2;
	ibool		ret;

	ut_ad(mutex_own(&(pool->mutex)));

	if (UNIV_UNLIKELY(i >= 63)) {
		/* We come here when we have run out of space in the
		memory pool: */

		return(FALSE);
	}

	area = UT_LIST_GET_FIRST(pool->free_list[i + 1]);

	if (area == NULL) {
		if (UT_LIST_GET_LEN(pool->free_list[i + 1]) > 0) {
			ut_print_timestamp(stderr);

			fprintf(stderr,
				"  InnoDB: Error: mem pool free list %lu"
				" length is %lu\n"
				"InnoDB: though the list is empty!\n",
				(ulong) i + 1,
				(ulong)
				UT_LIST_GET_LEN(pool->free_list[i + 1]));
		}

		ret = mem_pool_fill_free_list(i + 1, pool);

		if (ret == FALSE) {

			return(FALSE);
		}

		area = UT_LIST_GET_FIRST(pool->free_list[i + 1]);
	}

	if (UNIV_UNLIKELY(UT_LIST_GET_LEN(pool->free_list[i + 1]) == 0)) {
		mem_analyze_corruption(area);

		ut_error;
	}

	UT_LIST_REMOVE(free_list, pool->free_list[i + 1], area);

	area2 = (mem_area_t*)(((byte*)area) + ut_2_exp(i));
	UNIV_MEM_ALLOC(area2, MEM_AREA_EXTRA_SIZE);

	mem_area_set_size(area2, ut_2_exp(i));
	mem_area_set_free(area2, TRUE);

	UT_LIST_ADD_FIRST(free_list, pool->free_list[i], area2);

	mem_area_set_size(area, ut_2_exp(i));

	UT_LIST_ADD_FIRST(free_list, pool->free_list[i], area);

	return(TRUE);
}
Exemplo n.º 20
0
/******************************************************************//**
Creates, or rather, initializes a mutex object in a specified memory
location (which must be appropriately aligned). The mutex is initialized
in the reset state. Explicit freeing of the mutex with mutex_free is
necessary only if the memory block containing it is freed. */
UNIV_INTERN
void
mutex_create_func(
/*==============*/
	mutex_t*	mutex,		/*!< in: pointer to memory */
#ifdef UNIV_DEBUG
	const char*	cmutex_name,	/*!< in: mutex name */
# ifdef UNIV_SYNC_DEBUG
	ulint		level,		/*!< in: level */
# endif /* UNIV_SYNC_DEBUG */
#endif /* UNIV_DEBUG */
	const char*	cfile_name,	/*!< in: file name where created */
	ulint		cline)		/*!< in: file line where created */
{
#if defined(HAVE_ATOMIC_BUILTINS)
	mutex_reset_lock_word(mutex);
#else
	os_fast_mutex_init(&(mutex->os_fast_mutex));
	mutex->lock_word = 0;
#endif
	mutex->event = os_event_create(NULL);
	mutex_set_waiters(mutex, 0);
#ifdef UNIV_DEBUG
	mutex->magic_n = MUTEX_MAGIC_N;
#endif /* UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
	mutex->line = 0;
	mutex->file_name = "not yet reserved";
	mutex->level = level;
#endif /* UNIV_SYNC_DEBUG */
	mutex->cfile_name = cfile_name;
	mutex->cline = cline;
	mutex->count_os_wait = 0;
#ifdef UNIV_DEBUG
	mutex->cmutex_name=	  cmutex_name;
	mutex->count_using=	  0;
	mutex->mutex_type=	  0;
	mutex->lspent_time=	  0;
	mutex->lmax_spent_time=     0;
	mutex->count_spin_loop= 0;
	mutex->count_spin_rounds=   0;
	mutex->count_os_yield=  0;
#endif /* UNIV_DEBUG */

	/* Check that lock_word is aligned; this is important on Intel */
	ut_ad(((ulint)(&(mutex->lock_word))) % 4 == 0);

	/* NOTE! The very first mutexes are not put to the mutex list */

	if ((mutex == &mutex_list_mutex)
#ifdef UNIV_SYNC_DEBUG
	    || (mutex == &sync_thread_mutex)
#endif /* UNIV_SYNC_DEBUG */
	    ) {

		return;
	}

	mutex_enter(&mutex_list_mutex);

	ut_ad(UT_LIST_GET_LEN(mutex_list) == 0
	      || UT_LIST_GET_FIRST(mutex_list)->magic_n == MUTEX_MAGIC_N);

	UT_LIST_ADD_FIRST(list, mutex_list, mutex);

	mutex_exit(&mutex_list_mutex);
}
Exemplo n.º 21
0
/****************************************************************//**
Commits a transaction. */
UNIV_INTERN
void
trx_commit_off_kernel(
/*==================*/
	trx_t*	trx)	/*!< in: transaction */
{
	page_t*		update_hdr_page;
	ib_uint64_t	lsn		= 0;
	trx_rseg_t*	rseg;
	trx_undo_t*	undo;
	mtr_t		mtr;

	ut_ad(mutex_own(&kernel_mutex));

	trx->must_flush_log_later = FALSE;

	rseg = trx->rseg;

	if (trx->insert_undo != NULL || trx->update_undo != NULL) {

		mutex_exit(&kernel_mutex);

		mtr_start(&mtr);

		/* Change the undo log segment states from TRX_UNDO_ACTIVE
		to some other state: these modifications to the file data
		structure define the transaction as committed in the file
		based world, at the serialization point of the log sequence
		number lsn obtained below. */

		mutex_enter(&(rseg->mutex));

		if (trx->insert_undo != NULL) {
			trx_undo_set_state_at_finish(
				rseg, trx, trx->insert_undo, &mtr);
		}

		undo = trx->update_undo;

		if (undo) {
			mutex_enter(&kernel_mutex);
			trx->no = trx_sys_get_new_trx_no();

			mutex_exit(&kernel_mutex);

			/* It is not necessary to obtain trx->undo_mutex here
			because only a single OS thread is allowed to do the
			transaction commit for this transaction. */

			update_hdr_page = trx_undo_set_state_at_finish(
				rseg, trx, undo, &mtr);

			/* We have to do the cleanup for the update log while
			holding the rseg mutex because update log headers
			have to be put to the history list in the order of
			the trx number. */

			trx_undo_update_cleanup(trx, update_hdr_page, &mtr);
		}

		mutex_exit(&(rseg->mutex));

		/* Update the latest MySQL binlog name and offset info
		in trx sys header if MySQL binlogging is on or the database
		server is a MySQL replication slave */

		if (trx->mysql_log_file_name
		    && trx->mysql_log_file_name[0] != '\0') {
			trx_sys_update_mysql_binlog_offset(
				trx->mysql_log_file_name,
				trx->mysql_log_offset,
				TRX_SYS_MYSQL_LOG_INFO, &mtr);
			trx->mysql_log_file_name = NULL;
		}

		/* The following call commits the mini-transaction, making the
		whole transaction committed in the file-based world, at this
		log sequence number. The transaction becomes 'durable' when
		we write the log to disk, but in the logical sense the commit
		in the file-based data structures (undo logs etc.) happens
		here.

		NOTE that transaction numbers, which are assigned only to
		transactions with an update undo log, do not necessarily come
		in exactly the same order as commit lsn's, if the transactions
		have different rollback segments. To get exactly the same
		order we should hold the kernel mutex up to this point,
		adding to the contention of the kernel mutex. However, if
		a transaction T2 is able to see modifications made by
		a transaction T1, T2 will always get a bigger transaction
		number and a bigger commit lsn than T1. */

		/*--------------*/
		mtr_commit(&mtr);
		/*--------------*/
		lsn = mtr.end_lsn;

		mutex_enter(&kernel_mutex);
	}

	ut_ad(trx->conc_state == TRX_ACTIVE
	      || trx->conc_state == TRX_PREPARED);
	ut_ad(mutex_own(&kernel_mutex));

	/* The following assignment makes the transaction committed in memory
	and makes its changes to data visible to other transactions.
	NOTE that there is a small discrepancy from the strict formal
	visibility rules here: a human user of the database can see
	modifications made by another transaction T even before the necessary
	log segment has been flushed to the disk. If the database happens to
	crash before the flush, the user has seen modifications from T which
	will never be a committed transaction. However, any transaction T2
	which sees the modifications of the committing transaction T, and
	which also itself makes modifications to the database, will get an lsn
	larger than the committing transaction T. In the case where the log
	flush fails, and T never gets committed, also T2 will never get
	committed. */

	/*--------------------------------------*/
	trx->conc_state = TRX_COMMITTED_IN_MEMORY;
	/*--------------------------------------*/

	/* If we release kernel_mutex below and we are still doing
	recovery i.e.: back ground rollback thread is still active
	then there is a chance that the rollback thread may see
	this trx as COMMITTED_IN_MEMORY and goes adhead to clean it
	up calling trx_cleanup_at_db_startup(). This can happen
	in the case we are committing a trx here that is left in
	PREPARED state during the crash. Note that commit of the
	rollback of a PREPARED trx happens in the recovery thread
	while the rollback of other transactions happen in the
	background thread. To avoid this race we unconditionally
	unset the is_recovered flag from the trx. */

	trx->is_recovered = FALSE;

	lock_release_off_kernel(trx);

	if (trx->global_read_view) {
		read_view_close(trx->global_read_view);
		mem_heap_empty(trx->global_read_view_heap);
		trx->global_read_view = NULL;
	}

	trx->read_view = NULL;

	if (lsn) {

		mutex_exit(&kernel_mutex);

		if (trx->insert_undo != NULL) {

			trx_undo_insert_cleanup(trx);
		}

		/* NOTE that we could possibly make a group commit more
		efficient here: call os_thread_yield here to allow also other
		trxs to come to commit! */

		/*-------------------------------------*/

		/* Depending on the my.cnf options, we may now write the log
		buffer to the log files, making the transaction durable if
		the OS does not crash. We may also flush the log files to
		disk, making the transaction durable also at an OS crash or a
		power outage.

		The idea in InnoDB's group commit is that a group of
		transactions gather behind a trx doing a physical disk write
		to log files, and when that physical write has been completed,
		one of those transactions does a write which commits the whole
		group. Note that this group commit will only bring benefit if
		there are > 2 users in the database. Then at least 2 users can
		gather behind one doing the physical log write to disk.

		If we are calling trx_commit() under prepare_commit_mutex, we
		will delay possible log write and flush to a separate function
		trx_commit_complete_for_mysql(), which is only called when the
		thread has released the mutex. This is to make the
		group commit algorithm to work. Otherwise, the prepare_commit
		mutex would serialize all commits and prevent a group of
		transactions from gathering. */

		if (trx->flush_log_later) {
			/* Do nothing yet */
			trx->must_flush_log_later = TRUE;
		} else if (srv_flush_log_at_trx_commit == 0) {
			/* Do nothing */
		} else if (srv_flush_log_at_trx_commit == 1) {
			if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
				/* Write the log but do not flush it to disk */

				log_write_up_to(lsn, LOG_WAIT_ONE_GROUP,
						FALSE);
			} else {
				/* Write the log to the log files AND flush
				them to disk */

				log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE);
			}
		} else if (srv_flush_log_at_trx_commit == 2) {

			/* Write the log but do not flush it to disk */

			log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
		} else {
			ut_error;
		}

		trx->commit_lsn = lsn;

		/*-------------------------------------*/

		mutex_enter(&kernel_mutex);
	}

	/* Free all savepoints */
	trx_roll_free_all_savepoints(trx);

	trx->conc_state = TRX_NOT_STARTED;
	trx->rseg = NULL;
	trx->undo_no = ut_dulint_zero;
	trx->last_sql_stat_start.least_undo_no = ut_dulint_zero;

	ut_ad(UT_LIST_GET_LEN(trx->wait_thrs) == 0);
	ut_ad(UT_LIST_GET_LEN(trx->trx_locks) == 0);

	UT_LIST_REMOVE(trx_list, trx_sys->trx_list, trx);
}
Exemplo n.º 22
0
void
trx_free(
/*=====*/
	trx_t*	trx)	/* in, own: trx object */
{
	ut_ad(mutex_own(&kernel_mutex));

	if (trx->declared_to_be_inside_innodb) {
		ut_print_timestamp(stderr);
		fputs("  InnoDB: Error: Freeing a trx which is declared"
		      " to be processing\n"
		      "InnoDB: inside InnoDB.\n", stderr);
		trx_print(stderr, trx, 600);
		putc('\n', stderr);
	}

	if (trx->n_mysql_tables_in_use != 0
	    || trx->mysql_n_tables_locked != 0) {

		ut_print_timestamp(stderr);
		fprintf(stderr,
			"  InnoDB: Error: MySQL is freeing a thd\n"
			"InnoDB: though trx->n_mysql_tables_in_use is %lu\n"
			"InnoDB: and trx->mysql_n_tables_locked is %lu.\n",
			(ulong)trx->n_mysql_tables_in_use,
			(ulong)trx->mysql_n_tables_locked);

		trx_print(stderr, trx, 600);

		ut_print_buf(stderr, trx, sizeof(trx_t));
	}

	ut_a(trx->magic_n == TRX_MAGIC_N);

	trx->magic_n = 11112222;

	ut_a(trx->conc_state == TRX_NOT_STARTED);

	mutex_free(&(trx->undo_mutex));

	ut_a(trx->insert_undo == NULL);
	ut_a(trx->update_undo == NULL);

	if (trx->undo_no_arr) {
		trx_undo_arr_free(trx->undo_no_arr);
	}

	ut_a(UT_LIST_GET_LEN(trx->signals) == 0);
	ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0);

	ut_a(trx->wait_lock == NULL);
	ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0);

	ut_a(!trx->has_search_latch);
	ut_a(!trx->auto_inc_lock);

	ut_a(trx->dict_operation_lock_mode == 0);

	if (trx->lock_heap) {
		mem_heap_free(trx->lock_heap);
	}

	ut_a(UT_LIST_GET_LEN(trx->trx_locks) == 0);

	if (trx->global_read_view_heap) {
		mem_heap_free(trx->global_read_view_heap);
	}

	trx->global_read_view = NULL;

	ut_a(trx->read_view == NULL);

	mem_free(trx);
}
Exemplo n.º 23
0
/********************************************************************//**
Frees a transaction object. */
UNIV_INTERN
void
trx_free(
/*=====*/
	trx_t*	trx)	/*!< in, own: trx object */
{
	ut_ad(mutex_own(&kernel_mutex));

	if (trx->declared_to_be_inside_innodb) {
		ut_print_timestamp(stderr);
		fputs("  InnoDB: Error: Freeing a trx which is declared"
		      " to be processing\n"
		      "InnoDB: inside InnoDB.\n", stderr);
		trx_print(stderr, trx, 600);
		putc('\n', stderr);

		/* This is an error but not a fatal error. We must keep
		the counters like srv_conc_n_threads accurate. */
		srv_conc_force_exit_innodb(trx);
	}

	if (trx->n_mysql_tables_in_use != 0
	    || trx->mysql_n_tables_locked != 0) {

		ut_print_timestamp(stderr);
		fprintf(stderr,
			"  InnoDB: Error: MySQL is freeing a thd\n"
			"InnoDB: though trx->n_mysql_tables_in_use is %lu\n"
			"InnoDB: and trx->mysql_n_tables_locked is %lu.\n",
			(ulong)trx->n_mysql_tables_in_use,
			(ulong)trx->mysql_n_tables_locked);

		trx_print(stderr, trx, 600);

		ut_print_buf(stderr, trx, sizeof(trx_t));
		putc('\n', stderr);
	}

	ut_a(trx->magic_n == TRX_MAGIC_N);

	trx->magic_n = 11112222;

	ut_a(trx->conc_state == TRX_NOT_STARTED);

	mutex_free(&(trx->undo_mutex));

	ut_a(trx->insert_undo == NULL);
	ut_a(trx->update_undo == NULL);

	if (trx->undo_no_arr) {
		trx_undo_arr_free(trx->undo_no_arr);
	}

	ut_a(UT_LIST_GET_LEN(trx->signals) == 0);
	ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0);

	ut_a(trx->wait_lock == NULL);
	ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0);

	ut_a(!trx->has_search_latch);

	ut_a(trx->dict_operation_lock_mode == 0);

	if (trx->lock_heap) {
		mem_heap_free(trx->lock_heap);
	}

	ut_a(UT_LIST_GET_LEN(trx->trx_locks) == 0);

	if (trx->global_read_view_heap) {
		mem_heap_free(trx->global_read_view_heap);
	}

	trx->global_read_view = NULL;

	ut_a(trx->read_view == NULL);

	ut_a(ib_vector_is_empty(trx->autoinc_locks));
	/* We allocated a dedicated heap for the vector. */
	ib_vector_free(trx->autoinc_locks);

	mem_free(trx);
}
Exemplo n.º 24
0
/**********************************************************************//**
Prints info about a transaction to the given file. The caller must own the
kernel mutex. */
UNIV_INTERN
void
trx_print(
/*======*/
	FILE*	f,		/*!< in: output stream */
	trx_t*	trx,		/*!< in: transaction */
	ulint	max_query_len)	/*!< in: max query length to print, or 0 to
				   use the default max length */
{
	ibool	newline;

	fprintf(f, "TRANSACTION " TRX_ID_FMT, TRX_ID_PREP_PRINTF(trx->id));

	switch (trx->conc_state) {
	case TRX_NOT_STARTED:
		fputs(", not started", f);
		break;
	case TRX_ACTIVE:
		fprintf(f, ", ACTIVE %lu sec",
			(ulong)difftime(time(NULL), trx->start_time));
		break;
	case TRX_PREPARED:
		fprintf(f, ", ACTIVE (PREPARED) %lu sec",
			(ulong)difftime(time(NULL), trx->start_time));
		break;
	case TRX_COMMITTED_IN_MEMORY:
		fputs(", COMMITTED IN MEMORY", f);
		break;
	default:
		fprintf(f, " state %lu", (ulong) trx->conc_state);
	}

#ifdef UNIV_LINUX
	fprintf(f, ", process no %lu", trx->mysql_process_no);
#endif
	fprintf(f, ", OS thread id %lu",
		(ulong) os_thread_pf(trx->mysql_thread_id));

	if (*trx->op_info) {
		putc(' ', f);
		fputs(trx->op_info, f);
	}

	if (trx->is_recovered) {
		fputs(" recovered trx", f);
	}

	if (trx->is_purge) {
		fputs(" purge trx", f);
	}

	if (trx->declared_to_be_inside_innodb) {
		fprintf(f, ", thread declared inside InnoDB %lu",
			(ulong) trx->n_tickets_to_enter_innodb);
	}

	putc('\n', f);

	if (trx->n_mysql_tables_in_use > 0 || trx->mysql_n_tables_locked > 0) {
		fprintf(f, "mysql tables in use %lu, locked %lu\n",
			(ulong) trx->n_mysql_tables_in_use,
			(ulong) trx->mysql_n_tables_locked);
	}

	newline = TRUE;

	switch (trx->que_state) {
	case TRX_QUE_RUNNING:
		newline = FALSE; break;
	case TRX_QUE_LOCK_WAIT:
		fputs("LOCK WAIT ", f); break;
	case TRX_QUE_ROLLING_BACK:
		fputs("ROLLING BACK ", f); break;
	case TRX_QUE_COMMITTING:
		fputs("COMMITTING ", f); break;
	default:
		fprintf(f, "que state %lu ", (ulong) trx->que_state);
	}

	if (0 < UT_LIST_GET_LEN(trx->trx_locks)
	    || mem_heap_get_size(trx->lock_heap) > 400) {
		newline = TRUE;

		fprintf(f, "%lu lock struct(s), heap size %lu,"
			" %lu row lock(s)",
			(ulong) UT_LIST_GET_LEN(trx->trx_locks),
			(ulong) mem_heap_get_size(trx->lock_heap),
			(ulong) lock_number_of_rows_locked(trx));
	}

	if (trx->has_search_latch) {
		newline = TRUE;
		fputs(", holds adaptive hash latch", f);
	}

	if (!ut_dulint_is_zero(trx->undo_no)) {
		newline = TRUE;
		fprintf(f, ", undo log entries %lu",
			(ulong) ut_dulint_get_low(trx->undo_no));
	}

	if (newline) {
		putc('\n', f);
	}

	if (trx->mysql_thd != NULL) {
		innobase_mysql_print_thd(f, trx->mysql_thd, max_query_len);
	}
}
Exemplo n.º 25
0
/****************************************************************//**
Starts handling of a trx signal. */
UNIV_INTERN
void
trx_sig_start_handle(
/*=================*/
	trx_t*		trx,		/*!< in: trx handle */
	que_thr_t**	next_thr)	/*!< in/out: next query thread to run;
					if the value which is passed in is
					a pointer to a NULL pointer, then the
					calling function can start running
					a new query thread; if the parameter
					is NULL, it is ignored */
{
	trx_sig_t*	sig;
	ulint		type;
loop:
	/* We loop in this function body as long as there are queued signals
	we can process immediately */

	ut_ad(trx);
	ut_ad(mutex_own(&kernel_mutex));

	if (trx->handling_signals && (UT_LIST_GET_LEN(trx->signals) == 0)) {

		trx_end_signal_handling(trx);

		return;
	}

	if (trx->conc_state == TRX_NOT_STARTED) {

		trx_start_low(trx, ULINT_UNDEFINED);
	}

	/* If the trx is in a lock wait state, moves the waiting query threads
	to the suspended state */

	if (trx->que_state == TRX_QUE_LOCK_WAIT) {

		trx_lock_wait_to_suspended(trx);
	}

	/* If the session is in the error state and this trx has threads
	waiting for reply from signals, moves these threads to the suspended
	state, canceling wait reservations; note that if the transaction has
	sent a commit or rollback signal to itself, and its session is not in
	the error state, then nothing is done here. */

	if (trx->sess->state == SESS_ERROR) {
		trx_sig_reply_wait_to_suspended(trx);
	}

	/* If there are no running query threads, we can start processing of a
	signal, otherwise we have to wait until all query threads of this
	transaction are aware of the arrival of the signal. */

	if (trx->n_active_thrs > 0) {

		return;
	}

	if (trx->handling_signals == FALSE) {
		trx->graph_before_signal_handling = trx->graph;

		trx->handling_signals = TRUE;
	}

	sig = UT_LIST_GET_FIRST(trx->signals);
	type = sig->type;

	if (type == TRX_SIG_COMMIT) {

		trx_handle_commit_sig_off_kernel(trx, next_thr);

	} else if ((type == TRX_SIG_TOTAL_ROLLBACK)
		   || (type == TRX_SIG_ROLLBACK_TO_SAVEPT)) {

		trx_rollback(trx, sig, next_thr);

		/* No further signals can be handled until the rollback
		completes, therefore we return */

		return;

	} else if (type == TRX_SIG_ERROR_OCCURRED) {

		trx_rollback(trx, sig, next_thr);

		/* No further signals can be handled until the rollback
		completes, therefore we return */

		return;

	} else if (type == TRX_SIG_BREAK_EXECUTION) {

		trx_sig_reply(sig, next_thr);
		trx_sig_remove(trx, sig);
	} else {
		ut_error;
	}

	goto loop;
}
Exemplo n.º 26
0
/****************************************************************//**
Sends a signal to a trx object. */
UNIV_INTERN
void
trx_sig_send(
/*=========*/
	trx_t*		trx,		/*!< in: trx handle */
	ulint		type,		/*!< in: signal type */
	ulint		sender,		/*!< in: TRX_SIG_SELF or
					TRX_SIG_OTHER_SESS */
	que_thr_t*	receiver_thr,	/*!< in: query thread which wants the
					reply, or NULL; if type is
					TRX_SIG_END_WAIT, this must be NULL */
	trx_savept_t*	savept,		/*!< in: possible rollback savepoint, or
					NULL */
	que_thr_t**	next_thr)	/*!< in/out: next query thread to run;
					if the value which is passed in is
					a pointer to a NULL pointer, then the
					calling function can start running
					a new query thread; if the parameter
					is NULL, it is ignored */
{
	trx_sig_t*	sig;
	trx_t*		receiver_trx;

	ut_ad(trx);
	ut_ad(mutex_own(&kernel_mutex));

	if (!trx_sig_is_compatible(trx, type, sender)) {
		/* The signal is not compatible with the other signals in
		the queue: die */

		ut_error;
	}

	/* Queue the signal object */

	if (UT_LIST_GET_LEN(trx->signals) == 0) {

		/* The signal list is empty: the 'sig' slot must be unused
		(we improve performance a bit by avoiding mem_alloc) */
		sig = &(trx->sig);
	} else {
		/* It might be that the 'sig' slot is unused also in this
		case, but we choose the easy way of using mem_alloc */

		sig = mem_alloc(sizeof(trx_sig_t));
	}

	UT_LIST_ADD_LAST(signals, trx->signals, sig);

	sig->type = type;
	sig->sender = sender;
	sig->receiver = receiver_thr;

	if (savept) {
		sig->savept = *savept;
	}

	if (receiver_thr) {
		receiver_trx = thr_get_trx(receiver_thr);

		UT_LIST_ADD_LAST(reply_signals, receiver_trx->reply_signals,
				 sig);
	}

	if (trx->sess->state == SESS_ERROR) {

		trx_sig_reply_wait_to_suspended(trx);
	}

	if ((sender != TRX_SIG_SELF) || (type == TRX_SIG_BREAK_EXECUTION)) {
		ut_error;
	}

	/* If there were no other signals ahead in the queue, try to start
	handling of the signal */

	if (UT_LIST_GET_FIRST(trx->signals) == sig) {

		trx_sig_start_handle(trx, next_thr);
	}
}
Exemplo n.º 27
0
/*****************************************************************//**
Checks the compatibility of a new signal with the other signals in the
queue.
@return	TRUE if the signal can be queued */
static
ibool
trx_sig_is_compatible(
/*==================*/
	trx_t*	trx,	/*!< in: trx handle */
	ulint	type,	/*!< in: signal type */
	ulint	sender)	/*!< in: TRX_SIG_SELF or TRX_SIG_OTHER_SESS */
{
	trx_sig_t*	sig;

	ut_ad(mutex_own(&kernel_mutex));

	if (UT_LIST_GET_LEN(trx->signals) == 0) {

		return(TRUE);
	}

	if (sender == TRX_SIG_SELF) {
		if (type == TRX_SIG_ERROR_OCCURRED) {

			return(TRUE);

		} else if (type == TRX_SIG_BREAK_EXECUTION) {

			return(TRUE);
		} else {
			return(FALSE);
		}
	}

	ut_ad(sender == TRX_SIG_OTHER_SESS);

	sig = UT_LIST_GET_FIRST(trx->signals);

	if (type == TRX_SIG_COMMIT) {
		while (sig != NULL) {

			if (sig->type == TRX_SIG_TOTAL_ROLLBACK) {

				return(FALSE);
			}

			sig = UT_LIST_GET_NEXT(signals, sig);
		}

		return(TRUE);

	} else if (type == TRX_SIG_TOTAL_ROLLBACK) {
		while (sig != NULL) {

			if (sig->type == TRX_SIG_COMMIT) {

				return(FALSE);
			}

			sig = UT_LIST_GET_NEXT(signals, sig);
		}

		return(TRUE);

	} else if (type == TRX_SIG_BREAK_EXECUTION) {

		return(TRUE);
	} else {
		ut_error;

		return(FALSE);
	}
}
Exemplo n.º 28
0
void
rw_lock_create_func(
    /*================*/
    rw_lock_t*	lock,		/* in: pointer to memory */
#ifdef UNIV_DEBUG
# ifdef UNIV_SYNC_DEBUG
    ulint		level,		/* in: level */
# endif /* UNIV_SYNC_DEBUG */
    const char*	cmutex_name, 	/* in: mutex name */
#endif /* UNIV_DEBUG */
    const char*	cfile_name,	/* in: file name where created */
    ulint 		cline)		/* in: file line where created */
{
    /* If this is the very first time a synchronization object is
    created, then the following call initializes the sync system. */

    mutex_create(rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK);

    lock->mutex.cfile_name = cfile_name;
    lock->mutex.cline = cline;

#if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP
    lock->mutex.cmutex_name = cmutex_name;
    lock->mutex.mutex_type = 1;
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */

    rw_lock_set_waiters(lock, 0);
    rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
    lock->writer_count = 0;
    rw_lock_set_reader_count(lock, 0);

    lock->writer_is_wait_ex = FALSE;

#ifdef UNIV_SYNC_DEBUG
    UT_LIST_INIT(lock->debug_list);

    lock->level = level;
#endif /* UNIV_SYNC_DEBUG */

    lock->magic_n = RW_LOCK_MAGIC_N;

    lock->cfile_name = cfile_name;
    lock->cline = (unsigned int) cline;

    lock->last_s_file_name = "not yet reserved";
    lock->last_x_file_name = "not yet reserved";
    lock->last_s_line = 0;
    lock->last_x_line = 0;
    lock->event = os_event_create(NULL);

#ifdef __WIN__
    lock->wait_ex_event = os_event_create(NULL);
#endif

    mutex_enter(&rw_lock_list_mutex);

    if (UT_LIST_GET_LEN(rw_lock_list) > 0) {
        ut_a(UT_LIST_GET_FIRST(rw_lock_list)->magic_n
             == RW_LOCK_MAGIC_N);
    }

    UT_LIST_ADD_FIRST(list, rw_lock_list, lock);

    mutex_exit(&rw_lock_list_mutex);
}
Exemplo n.º 29
0
/********************************************************************//**
Allocates memory from a pool. NOTE: This low-level function should only be
used in mem0mem.*!
@return	own: allocated memory buffer */
UNIV_INTERN
void*
mem_area_alloc(
/*===========*/
	ulint*		psize,	/*!< in: requested size in bytes; for optimum
				space usage, the size should be a power of 2
				minus MEM_AREA_EXTRA_SIZE;
				out: allocated size in bytes (greater than
				or equal to the requested size) */
	mem_pool_t*	pool)	/*!< in: memory pool */
{
	mem_area_t*	area;
	ulint		size;
	ulint		n;
	ibool		ret;

	/* If we are using os allocator just make a simple call
	to malloc */
	if (UNIV_LIKELY(srv_use_sys_malloc)) {
		return(malloc(*psize));
	}

	size = *psize;
	n = ut_2_log(ut_max(size + MEM_AREA_EXTRA_SIZE, MEM_AREA_MIN_SIZE));

	mutex_enter(&(pool->mutex));
	mem_n_threads_inside++;

	ut_a(mem_n_threads_inside == 1);

	area = UT_LIST_GET_FIRST(pool->free_list[n]);

	if (area == NULL) {
		ret = mem_pool_fill_free_list(n, pool);

		if (ret == FALSE) {
			/* Out of memory in memory pool: we try to allocate
			from the operating system with the regular malloc: */

			mem_n_threads_inside--;
			mutex_exit(&(pool->mutex));

			return(ut_malloc(size));
		}

		area = UT_LIST_GET_FIRST(pool->free_list[n]);
	}

	if (!mem_area_get_free(area)) {
		fprintf(stderr,
			"InnoDB: Error: Removing element from mem pool"
			" free list %lu though the\n"
			"InnoDB: element is not marked free!\n",
			(ulong) n);

		mem_analyze_corruption(area);

		/* Try to analyze a strange assertion failure reported at
		[email protected] where the free bit IS 1 in the
		hex dump above */

		if (mem_area_get_free(area)) {
			fprintf(stderr,
				"InnoDB: Probably a race condition"
				" because now the area is marked free!\n");
		}

		ut_error;
	}

	if (UT_LIST_GET_LEN(pool->free_list[n]) == 0) {
		fprintf(stderr,
			"InnoDB: Error: Removing element from mem pool"
			" free list %lu\n"
			"InnoDB: though the list length is 0!\n",
			(ulong) n);
		mem_analyze_corruption(area);

		ut_error;
	}

	ut_ad(mem_area_get_size(area) == ut_2_exp(n));

	mem_area_set_free(area, FALSE);

	UT_LIST_REMOVE(free_list, pool->free_list[n], area);

	pool->reserved += mem_area_get_size(area);

	mem_n_threads_inside--;
	mutex_exit(&(pool->mutex));

	ut_ad(mem_pool_validate(pool));

	*psize = ut_2_exp(n) - MEM_AREA_EXTRA_SIZE;
	UNIV_MEM_ALLOC(MEM_AREA_EXTRA_SIZE + (byte*)area, *psize);

	return((void*)(MEM_AREA_EXTRA_SIZE + ((byte*)area)));
}
Exemplo n.º 30
0
ulint
dict_create_or_check_foreign_constraint_tables(void)
/*================================================*/
				/* out: DB_SUCCESS or error code */
{
	dict_table_t*	table1;
	dict_table_t*	table2;
	ulint		error;
	trx_t*		trx;

	mutex_enter(&(dict_sys->mutex));

	table1 = dict_table_get_low("SYS_FOREIGN");
	table2 = dict_table_get_low("SYS_FOREIGN_COLS");

	if (table1 && table2
	    && UT_LIST_GET_LEN(table1->indexes) == 3
	    && UT_LIST_GET_LEN(table2->indexes) == 1) {

		/* Foreign constraint system tables have already been
		created, and they are ok */

		mutex_exit(&(dict_sys->mutex));

		return(DB_SUCCESS);
	}

	mutex_exit(&(dict_sys->mutex));

	trx = trx_allocate_for_mysql();

	trx->op_info = "creating foreign key sys tables";

	row_mysql_lock_data_dictionary(trx);

	if (table1) {
		fprintf(stderr,
			"InnoDB: dropping incompletely created"
			" SYS_FOREIGN table\n");
		row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
	}

	if (table2) {
		fprintf(stderr,
			"InnoDB: dropping incompletely created"
			" SYS_FOREIGN_COLS table\n");
		row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
	}

	fprintf(stderr,
		"InnoDB: Creating foreign key constraint system tables\n");

	/* NOTE: in dict_load_foreigns we use the fact that
	there are 2 secondary indexes on SYS_FOREIGN, and they
	are defined just like below */

	/* NOTE: when designing InnoDB's foreign key support in 2001, we made
	an error and made the table names and the foreign key id of type
	'CHAR' (internally, really a VARCHAR). We should have made the type
	VARBINARY, like in other InnoDB system tables, to get a clean
	design. */

	error = que_eval_sql(NULL,
			     "PROCEDURE CREATE_FOREIGN_SYS_TABLES_PROC () IS\n"
			     "BEGIN\n"
			     "CREATE TABLE\n"
			     "SYS_FOREIGN(ID CHAR, FOR_NAME CHAR,"
			     " REF_NAME CHAR, N_COLS INT);\n"
			     "CREATE UNIQUE CLUSTERED INDEX ID_IND"
			     " ON SYS_FOREIGN (ID);\n"
			     "CREATE INDEX FOR_IND"
			     " ON SYS_FOREIGN (FOR_NAME);\n"
			     "CREATE INDEX REF_IND"
			     " ON SYS_FOREIGN (REF_NAME);\n"
			     "CREATE TABLE\n"
			     "SYS_FOREIGN_COLS(ID CHAR, POS INT,"
			     " FOR_COL_NAME CHAR, REF_COL_NAME CHAR);\n"
			     "CREATE UNIQUE CLUSTERED INDEX ID_IND"
			     " ON SYS_FOREIGN_COLS (ID, POS);\n"
			     "COMMIT WORK;\n"
			     "END;\n"
			     , FALSE, trx);

	if (error != DB_SUCCESS) {
		fprintf(stderr, "InnoDB: error %lu in creation\n",
			(ulong) error);

		ut_a(error == DB_OUT_OF_FILE_SPACE
		     || error == DB_TOO_MANY_CONCURRENT_TRXS);

		fprintf(stderr,
			"InnoDB: creation failed\n"
			"InnoDB: tablespace is full\n"
			"InnoDB: dropping incompletely created"
			" SYS_FOREIGN tables\n");

		row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
		row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);

		error = DB_MUST_GET_MORE_FILE_SPACE;
	}

	trx->op_info = "";

	row_mysql_unlock_data_dictionary(trx);

	trx_free_for_mysql(trx);

	if (error == DB_SUCCESS) {
		fprintf(stderr,
			"InnoDB: Foreign key constraint system tables"
			" created\n");
	}

	return(error);
}