Exemple #1
0
/*******************************************************************//**
Calculates a hash fold for a lock. For a record lock the fold is
calculated from 4 elements, which uniquely identify a lock at a given
point in time: transaction id, space id, page number, record number.
For a table lock the fold is table's id.
@return	fold */
static
ulint
fold_lock(
/*======*/
	const lock_t*	lock,	/*!< in: lock object to fold */
	ulint		heap_no)/*!< in: lock's record number
				or ULINT_UNDEFINED if the lock
				is a table lock */
{
#ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
	static ulint	fold = 0;

	return(fold++);
#else
	ulint	ret;

	switch (lock_get_type(lock)) {
	case LOCK_REC:
		ut_a(heap_no != ULINT_UNDEFINED);

		ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
					 lock_rec_get_space_id(lock));

		ret = ut_fold_ulint_pair(ret,
					 lock_rec_get_page_no(lock));

		ret = ut_fold_ulint_pair(ret, heap_no);

		break;
	case LOCK_TABLE:
		/* this check is actually not necessary for continuing
		correct operation, but something must have gone wrong if
		it fails. */
		ut_a(heap_no == ULINT_UNDEFINED);

		ret = (ulint) lock_get_table_id(lock);

		break;
	default:
		ut_error;
	}

	return(ret);
#endif
}
Exemple #2
0
/*******************************************************************//**
Checks whether i_s_locks_row_t object represents a lock_t object.
@return	TRUE if they match */
static
ibool
locks_row_eq_lock(
/*==============*/
	const i_s_locks_row_t*	row,	/*!< in: innodb_locks row */
	const lock_t*		lock,	/*!< in: lock object */
	ulint			heap_no)/*!< in: lock's record number
					or ULINT_UNDEFINED if the lock
					is a table lock */
{
	ut_ad(i_s_locks_row_validate(row));
#ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
	return(0);
#else
	switch (lock_get_type(lock)) {
	case LOCK_REC:
		ut_a(heap_no != ULINT_UNDEFINED);

		return(row->lock_trx_id == lock_get_trx_id(lock)
		       && row->lock_space == lock_rec_get_space_id(lock)
		       && row->lock_page == lock_rec_get_page_no(lock)
		       && row->lock_rec == heap_no);

	case LOCK_TABLE:
		/* this check is actually not necessary for continuing
		correct operation, but something must have gone wrong if
		it fails. */
		ut_a(heap_no == ULINT_UNDEFINED);

		return(row->lock_trx_id == lock_get_trx_id(lock)
		       && row->lock_table_id == lock_get_table_id(lock));

	default:
		ut_error;
		return(FALSE);
	}
#endif
}
Exemple #3
0
/*******************************************************************//**
Fills i_s_locks_row_t object. Returns its first argument.
If memory can not be allocated then FALSE is returned.
@return	FALSE if allocation fails */
static
ibool
fill_locks_row(
/*===========*/
	i_s_locks_row_t* row,	/*!< out: result object that's filled */
	const lock_t*	lock,	/*!< in: lock to get data from */
	ulint		heap_no,/*!< in: lock's record number
				or ULINT_UNDEFINED if the lock
				is a table lock */
	trx_i_s_cache_t* cache)	/*!< in/out: cache into which to copy
				volatile strings */
{
	row->lock_trx_id = lock_get_trx_id(lock);
	row->lock_mode = lock_get_mode_str(lock);
	row->lock_type = lock_get_type_str(lock);

	row->lock_table = ha_storage_put_str_memlim(
		cache->storage, lock_get_table_name(lock),
		MAX_ALLOWED_FOR_STORAGE(cache));

	/* memory could not be allocated */
	if (row->lock_table == NULL) {

		return(FALSE);
	}

	switch (lock_get_type(lock)) {
	case LOCK_REC:
		row->lock_index = ha_storage_put_str_memlim(
			cache->storage, lock_rec_get_index_name(lock),
			MAX_ALLOWED_FOR_STORAGE(cache));

		/* memory could not be allocated */
		if (row->lock_index == NULL) {

			return(FALSE);
		}

		row->lock_space = lock_rec_get_space_id(lock);
		row->lock_page = lock_rec_get_page_no(lock);
		row->lock_rec = heap_no;

		if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {

			/* memory could not be allocated */
			return(FALSE);
		}

		break;
	case LOCK_TABLE:
		row->lock_index = NULL;

		row->lock_space = ULINT_UNDEFINED;
		row->lock_page = ULINT_UNDEFINED;
		row->lock_rec = ULINT_UNDEFINED;

		row->lock_data = NULL;

		break;
	default:
		ut_error;
	}

	row->lock_table_id = lock_get_table_id(lock);

	row->hash_chain.value = row;

	return(TRUE);
}
Exemple #4
0
/*******************************************************************//**
Fills the "lock_data" member of i_s_locks_row_t object.
If memory can not be allocated then FALSE is returned.
@return	FALSE if allocation fails */
static
ibool
fill_lock_data(
/*===========*/
	const char**		lock_data,/*!< out: "lock_data" to fill */
	const lock_t*		lock,	/*!< in: lock used to find the data */
	ulint			heap_no,/*!< in: rec num used to find the data */
	trx_i_s_cache_t*	cache)	/*!< in/out: cache where to store
					volatile data */
{
	mtr_t			mtr;

	const buf_block_t*	block;
	const page_t*		page;
	const rec_t*		rec;

	ut_a(lock_get_type(lock) == LOCK_REC);

	mtr_start(&mtr);

	block = buf_page_try_get(lock_rec_get_space_id(lock),
				 lock_rec_get_page_no(lock),
				 &mtr);

	if (block == NULL) {

		*lock_data = NULL;

		mtr_commit(&mtr);

		return(TRUE);
	}

	page = (const page_t*) buf_block_get_frame(block);

	rec = page_find_rec_with_heap_no(page, heap_no);

	if (page_rec_is_infimum(rec)) {

		*lock_data = ha_storage_put_str_memlim(
			cache->storage, "infimum pseudo-record",
			MAX_ALLOWED_FOR_STORAGE(cache));
	} else if (page_rec_is_supremum(rec)) {

		*lock_data = ha_storage_put_str_memlim(
			cache->storage, "supremum pseudo-record",
			MAX_ALLOWED_FOR_STORAGE(cache));
	} else {

		const dict_index_t*	index;
		ulint			n_fields;
		mem_heap_t*		heap;
		ulint			offsets_onstack[REC_OFFS_NORMAL_SIZE];
		ulint*			offsets;
		char			buf[TRX_I_S_LOCK_DATA_MAX_LEN];
		ulint			buf_used;
		ulint			i;

		rec_offs_init(offsets_onstack);
		offsets = offsets_onstack;

		index = lock_rec_get_index(lock);

		n_fields = dict_index_get_n_unique(index);

		ut_a(n_fields > 0);

		heap = NULL;
		offsets = rec_get_offsets(rec, index, offsets, n_fields,
					  &heap);

		/* format and store the data */

		buf_used = 0;
		for (i = 0; i < n_fields; i++) {

			buf_used += put_nth_field(
				buf + buf_used, sizeof(buf) - buf_used,
				i, index, rec, offsets) - 1;
		}

		*lock_data = (const char*) ha_storage_put_memlim(
			cache->storage, buf, buf_used + 1,
			MAX_ALLOWED_FOR_STORAGE(cache));

		if (UNIV_UNLIKELY(heap != NULL)) {

			/* this means that rec_get_offsets() has created a new
			heap and has stored offsets in it; check that this is
			really the case and free the heap */
			ut_a(offsets != offsets_onstack);
			mem_heap_free(heap);
		}
	}

	mtr_commit(&mtr);

	if (*lock_data == NULL) {

		return(FALSE);
	}

	return(TRUE);
}