コード例 #1
0
ファイル: mtr0mtr.c プロジェクト: isleon/Jaxer
/*********************************************************************
Releases the item in the slot given. */
UNIV_INLINE
void
mtr_memo_slot_release(
/*==================*/
	mtr_t*			mtr,	/* in: mtr */
	mtr_memo_slot_t*	slot)	/* in: memo slot */
{
	void*	object;
	ulint	type;

	ut_ad(mtr && slot);

	object = slot->object;
	type = slot->type;

	if (UNIV_LIKELY(object != NULL)) {
		if (type <= MTR_MEMO_BUF_FIX) {
			buf_page_release((buf_block_t*)object, type, mtr);
		} else if (type == MTR_MEMO_S_LOCK) {
			rw_lock_s_unlock((rw_lock_t*)object);
#ifdef UNIV_DEBUG
		} else if (type == MTR_MEMO_X_LOCK) {
			rw_lock_x_unlock((rw_lock_t*)object);
		} else {
			ut_ad(type == MTR_MEMO_MODIFY);
			ut_ad(mtr_memo_contains(mtr, object,
						MTR_MEMO_PAGE_X_FIX));
#else
		} else {
			rw_lock_x_unlock((rw_lock_t*)object);
#endif
		}
	}
コード例 #2
0
ファイル: btr0sea.c プロジェクト: Ihon/mysql-5.5-debian
/*****************************************************************//**
This function should be called before reserving any btr search mutex, if
the intended operation might add nodes to the search system hash table.
Because of the latching order, once we have reserved the btr search system
latch, we cannot allocate a free frame from the buffer pool. Checks that
there is a free buffer frame allocated for hash table heap in the btr search
system. If not, allocates a free frames for the heap. This check makes it
probable that, when have reserved the btr search system latch and we need to
allocate a new node to the hash table, it will succeed. However, the check
will not guarantee success. */
static
void
btr_search_check_free_space_in_heap(void)
/*=====================================*/
{
	hash_table_t*	table;
	mem_heap_t*	heap;

#ifdef UNIV_SYNC_DEBUG
	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
	ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */

	table = btr_search_sys->hash_index;

	heap = table->heap;

	/* Note that we peek the value of heap->free_block without reserving
	the latch: this is ok, because we will not guarantee that there will
	be enough free space in the hash table. */

	if (heap->free_block == NULL) {
		buf_block_t*	block = buf_block_alloc(NULL, 0);

		rw_lock_x_lock(&btr_search_latch);

		if (heap->free_block == NULL) {
			heap->free_block = block;
		} else {
			buf_block_free(block);
		}

		rw_lock_x_unlock(&btr_search_latch);
	}
}
コード例 #3
0
ファイル: btr0sea.c プロジェクト: Ihon/mysql-5.5-debian
/********************************************************************//**
Disable the adaptive hash search system and empty the index. */
UNIV_INTERN
void
btr_search_disable(void)
/*====================*/
{
	mutex_enter(&btr_search_enabled_mutex);
	rw_lock_x_lock(&btr_search_latch);

	/* Disable access to hash index, also tell ha_insert_for_fold()
	stop adding new nodes to hash index, but still allow updating
	existing nodes */
	btr_search_enabled = FALSE;

	/* Clear all block->is_hashed flags and remove all entries
	from btr_search_sys->hash_index. */
	buf_pool_drop_hash_index();

	/* hash index has been cleaned up, disallow any operation to
	the hash index */
	btr_search_fully_disabled = TRUE;

	/* btr_search_enabled_mutex should guarantee this. */
	ut_ad(!btr_search_enabled);

	rw_lock_x_unlock(&btr_search_latch);
	mutex_exit(&btr_search_enabled_mutex);
}
コード例 #4
0
/********************************************************************//**
Enable the adaptive hash search system. */
UNIV_INTERN
void
btr_search_enable(void)
/*====================*/
{
	rw_lock_x_lock(&btr_search_latch);

	btr_search_enabled = TRUE;

	rw_lock_x_unlock(&btr_search_latch);
}
コード例 #5
0
ファイル: trx0i_s.c プロジェクト: pombredanne/mysql-1
/*******************************************************************//**
Release an exclusive/write lock on the tables cache. */
UNIV_INTERN
void
trx_i_s_cache_end_write(
/*====================*/
	trx_i_s_cache_t*	cache)	/*!< in: cache */
{
#ifdef UNIV_SYNC_DEBUG
	ut_a(rw_lock_own(&cache->rw_lock, RW_LOCK_EX));
#endif

	rw_lock_x_unlock(&cache->rw_lock);
}
コード例 #6
0
ファイル: btr0sea.c プロジェクト: Suker-Xu/david-mysql-tools
/********************************************************************//**
Enable the adaptive hash search system. */
UNIV_INTERN
void
btr_search_enable(void)
/*====================*/
{
	mutex_enter(&btr_search_enabled_mutex);
	rw_lock_x_lock(&btr_search_latch);

	btr_search_enabled = TRUE;

	rw_lock_x_unlock(&btr_search_latch);
	mutex_exit(&btr_search_enabled_mutex);
}
コード例 #7
0
ファイル: btr0sea.c プロジェクト: Suker-Xu/david-mysql-tools
/********************************************************************//**
Disable the adaptive hash search system and empty the index. */
UNIV_INTERN
void
btr_search_disable(void)
/*====================*/
{
	mutex_enter(&btr_search_enabled_mutex);
	rw_lock_x_lock(&btr_search_latch);

	btr_search_enabled = FALSE;

	/* Clear all block->is_hashed flags and remove all entries
	from btr_search_sys->hash_index. */
	buf_pool_drop_hash_index();

	/* btr_search_enabled_mutex should guarantee this. */
	ut_ad(!btr_search_enabled);

	rw_lock_x_unlock(&btr_search_latch);
	mutex_exit(&btr_search_enabled_mutex);
}
コード例 #8
0
ファイル: mtr0mtr.c プロジェクト: Suker-Xu/david-mysql-tools
/*****************************************************************//**
Releases the item in the slot given. */
static
void
mtr_memo_slot_release(
/*==================*/
	mtr_t*			mtr,	/*!< in: mtr */
	mtr_memo_slot_t*	slot)	/*!< in: memo slot */
{
	void*	object;
	ulint	type;

	ut_ad(mtr);
	ut_ad(slot);

#ifndef UNIV_DEBUG
	UT_NOT_USED(mtr);
#endif /* UNIV_DEBUG */

	object = slot->object;
	type = slot->type;

	if (UNIV_LIKELY(object != NULL)) {
		if (type <= MTR_MEMO_BUF_FIX) {
			buf_page_release((buf_block_t*)object, type);
		} else if (type == MTR_MEMO_S_LOCK) {
			rw_lock_s_unlock((rw_lock_t*)object);
#ifdef UNIV_DEBUG
		} else if (type != MTR_MEMO_X_LOCK) {
			ut_ad(type == MTR_MEMO_MODIFY);
			ut_ad(mtr_memo_contains(mtr, object,
						MTR_MEMO_PAGE_X_FIX));
#endif /* UNIV_DEBUG */
		} else {
			rw_lock_x_unlock((rw_lock_t*)object);
		}
	}

	slot->object = NULL;
}
コード例 #9
0
/********************************************************************//**
Disable the adaptive hash search system and empty the index. */
UNIV_INTERN
void
btr_search_disable(void)
/*====================*/
{
	dict_table_t*	table;

	mutex_enter(&dict_sys->mutex);
	rw_lock_x_lock(&btr_search_latch);

	btr_search_enabled = FALSE;

	/* Clear the index->search_info->ref_count of every index in
	the data dictionary cache. */
	for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
	     table = UT_LIST_GET_NEXT(table_LRU, table)) {

		dict_index_t*	index;

		for (index = dict_table_get_first_index(table); index;
		     index = dict_table_get_next_index(index)) {

			index->search_info->ref_count = 0;
		}
	}

	mutex_exit(&dict_sys->mutex);

	/* Set all block->index = NULL. */
	buf_pool_clear_hash_index();

	/* Clear the adaptive hash index. */
	hash_table_clear(btr_search_sys->hash_index);
	mem_heap_empty(btr_search_sys->hash_index->heap);

	rw_lock_x_unlock(&btr_search_latch);
}
コード例 #10
0
/*******************************************************************//**
This function runs a purge batch.
@return	number of undo log pages handled in the batch */
UNIV_INTERN
ulint
trx_purge(
    /*======*/
    ulint	limit)		/*!< in: the maximum number of records to
				purge in one batch */
{
    que_thr_t*	thr;
    ulint		old_pages_handled;

    if (srv_fake_write)
        return(0);

    ut_a(purge_sys->trx->n_active_thrs == 0);

    rw_lock_x_lock(&purge_sys->latch);

    mutex_enter(&kernel_mutex);

    /* Close and free the old purge view */

    read_view_close(purge_sys->view);
    purge_sys->view = NULL;
    mem_heap_empty(purge_sys->heap);

    /* Determine how much data manipulation language (DML) statements
    need to be delayed in order to reduce the lagging of the purge
    thread. */
    srv_dml_needed_delay = 0; /* in microseconds; default: no delay */

    /* If we cannot advance the 'purge view' because of an old
    'consistent read view', then the DML statements cannot be delayed.
    Also, srv_max_purge_lag <= 0 means 'infinity'. */
    if (srv_max_purge_lag > 0) {
        float	ratio = (float) trx_sys->rseg_history_len
                        / srv_max_purge_lag;
        if (ratio > ULINT_MAX / 10000) {
            /* Avoid overflow: maximum delay is 4295 seconds */
            srv_dml_needed_delay = ULINT_MAX;
        } else if (ratio > 1) {
            /* If the history list length exceeds the
            innodb_max_purge_lag, the
            data manipulation statements are delayed
            by at least 5000 microseconds. */
            srv_dml_needed_delay = (ulint) ((ratio - .5) * 10000);
        }
    }

    purge_sys->view = read_view_oldest_copy_or_open_new(
                          0, purge_sys->heap);

    mutex_exit(&kernel_mutex);

    rw_lock_x_unlock(&(purge_sys->latch));

    purge_sys->state = TRX_PURGE_ON;

    purge_sys->handle_limit = purge_sys->n_pages_handled + limit;

    old_pages_handled = purge_sys->n_pages_handled;


    mutex_enter(&kernel_mutex);

    thr = que_fork_start_command(purge_sys->query);

    ut_ad(thr);

    mutex_exit(&kernel_mutex);

    if (srv_print_thread_releases) {

        fputs("Starting purge\n", stderr);
    }

    que_run_threads(thr);

    if (srv_print_thread_releases) {

        fprintf(stderr,
                "Purge ends; pages handled %lu\n",
                (ulong) purge_sys->n_pages_handled);
    }

    return(purge_sys->n_pages_handled - old_pages_handled);
}
コード例 #11
0
/*******************************************************************//**
This function runs a purge batch.
@return	number of undo log pages handled in the batch */
UNIV_INTERN
ulint
trx_purge(void)
/*===========*/
{
	que_thr_t*	thr;
	/*	que_thr_t*	thr2; */
	ulonglong	old_pages_handled;

	mutex_enter(&(purge_sys->mutex));

	if (purge_sys->trx->n_active_thrs > 0) {

		mutex_exit(&(purge_sys->mutex));

		/* Should not happen */

		ut_error;

		return(0);
	}

	rw_lock_x_lock(&(purge_sys->latch));

	mutex_enter(&kernel_mutex);

	/* Close and free the old purge view */

	read_view_close(purge_sys->view);
	purge_sys->view = NULL;
	mem_heap_empty(purge_sys->heap);

	/* Determine how much data manipulation language (DML) statements
	need to be delayed in order to reduce the lagging of the purge
	thread. */
	srv_dml_needed_delay = 0; /* in microseconds; default: no delay */

	/* If we cannot advance the 'purge view' because of an old
	'consistent read view', then the DML statements cannot be delayed.
	Also, srv_max_purge_lag <= 0 means 'infinity'. */
	if (srv_max_purge_lag > 0
	    && !UT_LIST_GET_LAST(trx_sys->view_list)) {
		float	ratio = (float) trx_sys->rseg_history_len
			/ srv_max_purge_lag;
		if (ratio > ULINT_MAX / 10000) {
			/* Avoid overflow: maximum delay is 4295 seconds */
			srv_dml_needed_delay = ULINT_MAX;
		} else if (ratio > 1) {
			/* If the history list length exceeds the
			innodb_max_purge_lag, the
			data manipulation statements are delayed
			by at least 5000 microseconds. */
			srv_dml_needed_delay = (ulint) ((ratio - .5) * 10000);
		}
	}

	purge_sys->view = read_view_oldest_copy_or_open_new(ut_dulint_zero,
							    purge_sys->heap);
	mutex_exit(&kernel_mutex);

	rw_lock_x_unlock(&(purge_sys->latch));

#ifdef UNIV_DEBUG
	if (srv_purge_view_update_only_debug) {
		mutex_exit(&(purge_sys->mutex));
		return(0);
	}
#endif

	purge_sys->state = TRX_PURGE_ON;

	/* Handle at most 20 undo log pages in one purge batch */

	purge_sys->handle_limit = purge_sys->n_pages_handled + 20;

	old_pages_handled = purge_sys->n_pages_handled;

	mutex_exit(&(purge_sys->mutex));

	mutex_enter(&kernel_mutex);

	thr = que_fork_start_command(purge_sys->query);

	ut_ad(thr);

	/*	thr2 = que_fork_start_command(purge_sys->query);

	ut_ad(thr2); */


	mutex_exit(&kernel_mutex);

	/*	srv_que_task_enqueue(thr2); */

	if (srv_print_thread_releases) {

		fputs("Starting purge\n", stderr);
	}

	que_run_threads(thr);

	if (srv_print_thread_releases) {

		fprintf(stderr,
			"Purge ends; pages handled %lu\n",
			(ulong) purge_sys->n_pages_handled);
	}

	return((ulint) (purge_sys->n_pages_handled - old_pages_handled));
}
コード例 #12
0
ファイル: row0purge.c プロジェクト: NickeyWoo/mysql-3.23.49
/***************************************************************
Fetches an undo log record and does the purge for the recorded operation.
If none left, or the current purge completed, returns the control to the
parent node, which is always a query thread node. */
static
ulint
row_purge(
/*======*/
				/* out: DB_SUCCESS if operation successfully
				completed, else error code */
	purge_node_t*	node,	/* in: row purge node */
	que_thr_t*	thr)	/* in: query thread */
{
	dulint	roll_ptr;
	ibool	purge_needed;
	ibool	updated_extern;
	
	ut_ad(node && thr);

	node->undo_rec = trx_purge_fetch_next_rec(&roll_ptr,
						&(node->reservation),
						node->heap);
	if (!node->undo_rec) {
		/* Purge completed for this query thread */

		thr->run_node = que_node_get_parent(node);

		return(DB_SUCCESS);
	}

	node->roll_ptr = roll_ptr;

	if (node->undo_rec == &trx_purge_dummy_rec) {
		purge_needed = FALSE;
	} else {
		purge_needed = row_purge_parse_undo_rec(node, &updated_extern,
									thr);
	}

	if (purge_needed) {
		node->found_clust = FALSE;
	
		node->index = dict_table_get_next_index(
				dict_table_get_first_index(node->table));

		if (node->rec_type == TRX_UNDO_DEL_MARK_REC) {
			row_purge_del_mark(node, thr);

		} else if (updated_extern
			    || node->rec_type == TRX_UNDO_UPD_EXIST_REC) {

			row_purge_upd_exist_or_extern(node, thr);
		}

		if (node->found_clust) {
			btr_pcur_close(&(node->pcur));
		}

		rw_lock_x_unlock(&(purge_sys->purge_is_running));		
	}

	/* Do some cleanup */
	trx_purge_rec_release(node->reservation);
	mem_heap_empty(node->heap);
	
	thr->run_node = node;

	return(DB_SUCCESS);
}
コード例 #13
0
ファイル: row0purge.c プロジェクト: NickeyWoo/mysql-3.23.49
/***************************************************************
Parses the row reference and other info in a modify undo log record. */
static
ibool
row_purge_parse_undo_rec(
/*=====================*/
				/* out: TRUE if purge operation required */
	purge_node_t*	node,	/* in: row undo node */
	ibool*		updated_extern,
				/* out: TRUE if an externally stored field
				was updated */
	que_thr_t*	thr)	/* in: query thread */
{
	dict_index_t*	clust_index;
	byte*		ptr;
	dulint		undo_no;
	dulint		table_id;
	dulint		trx_id;
	dulint		roll_ptr;
	ulint		info_bits;
	ulint		type;
	ulint		cmpl_info;
	
	ut_ad(node && thr);
	
	ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
					updated_extern, &undo_no, &table_id);
	node->rec_type = type;

	if (type == TRX_UNDO_UPD_DEL_REC && !(*updated_extern)) {

		return(FALSE);
	}	    		

	ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
								&info_bits);
	node->table = NULL;

	if (type == TRX_UNDO_UPD_EXIST_REC
	    && cmpl_info & UPD_NODE_NO_ORD_CHANGE && !(*updated_extern)) {

	    	/* Purge requires no changes to indexes: we may return */

	    	return(FALSE);
	}
	
 	mutex_enter(&(dict_sys->mutex));

	node->table = dict_table_get_on_id_low(table_id, thr_get_trx(thr));

	rw_lock_x_lock(&(purge_sys->purge_is_running));

 	mutex_exit(&(dict_sys->mutex));
	
	if (node->table == NULL) {
		/* The table has been dropped: no need to do purge */

		rw_lock_x_unlock(&(purge_sys->purge_is_running));

		return(FALSE);
	}

	clust_index = dict_table_get_first_index(node->table);

	ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref),
								node->heap);

	ptr = trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
					roll_ptr, info_bits, node->heap,
					&(node->update));

	/* Read to the partial row the fields that occur in indexes */

	if (!cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
		ptr = trx_undo_rec_get_partial_row(ptr, clust_index,
						&(node->row), node->heap);
	}
	
	return(TRUE);
}