Exemple #1
0
/*************************************************************************
Checks if possible foreign key constraints hold after a delete of the record
under pcur. NOTE that this function will temporarily commit mtr and lose
pcur position! */
static
ulint
row_upd_check_references_constraints(
/*=================================*/
				/* out: DB_SUCCESS, DB_LOCK_WAIT, or an error
				code */
	btr_pcur_t*	pcur,	/* in: cursor positioned on a record; NOTE: the
				cursor position is lost in this function! */
	dict_table_t*	table,	/* in: table in question */
	dict_index_t*	index,	/* in: index of the cursor */
	que_thr_t*	thr,	/* in: query thread */
	mtr_t*		mtr)	/* in: mtr */
{
	dict_foreign_t*	foreign;
	mem_heap_t*	heap;
	dtuple_t*	entry;
	rec_t*		rec;
	ulint		err;

	rec = btr_pcur_get_rec(pcur);

	heap = mem_heap_create(500);

	entry = row_rec_to_index_entry(ROW_COPY_DATA, index, rec, heap);

	mtr_commit(mtr);	

	mtr_start(mtr);	
	
	rw_lock_s_lock(&dict_foreign_key_check_lock);	

	foreign = UT_LIST_GET_FIRST(table->referenced_list);

	while (foreign) {
		if (foreign->referenced_index == index) {

			err = row_ins_check_foreign_constraint(FALSE, foreign,
						table, index, entry, thr);
			if (err != DB_SUCCESS) {
				rw_lock_s_unlock(&dict_foreign_key_check_lock);	
				mem_heap_free(heap);

				return(err);
			}
		}

		foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
	}

	rw_lock_s_unlock(&dict_foreign_key_check_lock);	
	mem_heap_free(heap);
	
	return(DB_SUCCESS);
}
Exemple #2
0
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is DEL_MARK.
@return	DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
static
ulint
row_undo_mod_del_mark_sec(
/*======================*/
	undo_node_t*	node,	/*!< in: row undo node */
	que_thr_t*	thr)	/*!< in: query thread */
{
	mem_heap_t*	heap;
	dtuple_t*	entry;
	dict_index_t*	index;
	ulint		err;

	heap = mem_heap_create(1024);

	while (node->index != NULL) {
		/* Skip all corrupted secondary index */
		dict_table_skip_corrupt_index(node->index);

		if (!node->index) {
			break;
		}

		index = node->index;

		entry = row_build_index_entry(node->row, node->ext,
					      index, heap);
		ut_a(entry);
		err = row_undo_mod_del_unmark_sec_and_undo_update(
			BTR_MODIFY_LEAF, thr, index, entry);
		if (err == DB_FAIL) {
			err = row_undo_mod_del_unmark_sec_and_undo_update(
				BTR_MODIFY_TREE, thr, index, entry);
		}

		if (err != DB_SUCCESS) {

			mem_heap_free(heap);

			return(err);
		}

		node->index = dict_table_get_next_index(node->index);
	}

	mem_heap_free(heap);

	return(DB_SUCCESS);
}
Exemple #3
0
/*******************************************************************//**
Rollback a transaction used in MySQL.
@return	error code or DB_SUCCESS */
UNIV_INTERN
int
trx_general_rollback_for_mysql(
/*===========================*/
	trx_t*		trx,	/*!< in: transaction handle */
	trx_savept_t*	savept)	/*!< in: pointer to savepoint undo number, if
				partial rollback requested, or NULL for
				complete rollback */
{
	mem_heap_t*	heap;
	que_thr_t*	thr;
	roll_node_t*	roll_node;

	/* Tell Innobase server that there might be work for
	utility threads: */

	srv_active_wake_master_thread();

	trx_start_if_not_started(trx);

	heap = mem_heap_create(512);

	roll_node = roll_node_create(heap);

	if (savept) {
		roll_node->partial = TRUE;
		roll_node->savept = *savept;
	}

	trx->error_state = DB_SUCCESS;

	thr = pars_complete_graph_for_exec(roll_node, trx, heap);

	ut_a(thr == que_fork_start_command(que_node_get_parent(thr)));
	que_run_threads(thr);

	mutex_enter(&kernel_mutex);

	while (trx->que_state != TRX_QUE_RUNNING) {

		mutex_exit(&kernel_mutex);

		os_thread_sleep(100000);

		mutex_enter(&kernel_mutex);
	}

	mutex_exit(&kernel_mutex);

	mem_heap_free(heap);

	ut_a(trx->error_state == DB_SUCCESS);

	/* Tell Innobase server that there might be work for
	utility threads: */

	srv_active_wake_master_thread();

	return((int) trx->error_state);
}
Exemple #4
0
void
dtuple_free_for_mysql(
/*==================*/
	void*	heap) /* in: memory heap where tuple was created */
{
  	mem_heap_free((mem_heap_t*)heap);
}
Exemple #5
0
/* Remove a broadcast entry from the list and free it */
static void nwk_brc_free(mem_ptr_t *mem_ptr)
{
	if (mem_ptr) {
		list_remove(brc_list, mem_ptr);
		mem_heap_free(mem_ptr);
	}
}
Exemple #6
0
/*************************************************************//**
Empties a hash table and frees the memory heaps. */
UNIV_INTERN
void
ha_clear(
/*=====*/
	hash_table_t*	table)	/*!< in, own: hash table */
{
	ulint	i;
	ulint	n;

	ut_ad(table);
	ut_ad(table->magic_n == HASH_TABLE_MAGIC_N);
#ifdef UNIV_SYNC_DEBUG
	ut_ad(rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */

#ifndef UNIV_HOTBACKUP
	/* Free the memory heaps. */
	n = table->n_mutexes;

	for (i = 0; i < n; i++) {
		mem_heap_free(table->heaps[i]);
	}
#endif /* !UNIV_HOTBACKUP */

	/* Clear the hash table. */
	n = hash_get_n_cells(table);

	for (i = 0; i < n; i++) {
		hash_get_nth_cell(table, i)->node = NULL;
	}
}
Exemple #7
0
/***************************************************************
Purges a delete marking of a record. */
static
void
row_purge_del_mark(
/*===============*/
	purge_node_t*	node,	/* in: row purge node */
	que_thr_t*	thr)	/* in: query thread */
{
	mem_heap_t*	heap;
	dtuple_t*	entry;
	dict_index_t*	index;
	
	ut_ad(node && thr);

	heap = mem_heap_create(1024);

	while (node->index != NULL) {
		index = node->index;

		/* Build the index entry */
		entry = row_build_index_entry(node->row, index, heap);

		row_purge_remove_sec_if_poss(node, thr, index, entry);

		node->index = dict_table_get_next_index(node->index);
	}

	mem_heap_free(heap);	

	row_purge_remove_clust_if_poss(node, thr);
}
/************************************************************************
Frees the global purge system control structure. */
UNIV_INTERN
void
trx_purge_sys_close(void)
/*======================*/
{
	ut_ad(!mutex_own(&kernel_mutex));

	que_graph_free(purge_sys->query);

	ut_a(purge_sys->sess->trx->is_purge);
	purge_sys->sess->trx->conc_state = TRX_NOT_STARTED;
	sess_close(purge_sys->sess);
	purge_sys->sess = NULL;

	if (purge_sys->view != NULL) {
		/* Because acquiring the kernel mutex is a pre-condition
		of read_view_close(). We don't really need it here. */
		mutex_enter(&kernel_mutex);

		read_view_close(purge_sys->view);
		purge_sys->view = NULL;

		mutex_exit(&kernel_mutex);
	}

	trx_undo_arr_free(purge_sys->arr);

	rw_lock_free(&purge_sys->latch);
	mutex_free(&purge_sys->mutex);

	mem_heap_free(purge_sys->heap);
	mem_free(purge_sys);

	purge_sys = NULL;
}
Exemple #9
0
static void nwk_addr_map_free(mem_ptr_t *mem_ptr)
{
    if (mem_ptr)
    {
        list_remove(addr_map, mem_ptr);
        mem_heap_free(mem_ptr);
    }
}
Exemple #10
0
static void aps_grp_free(mem_ptr_t *mem_ptr)
{
    if (mem_ptr)
    {
        list_remove(grp_tbl, mem_ptr);
        mem_heap_free(mem_ptr);
    }
}
Exemple #11
0
static void zcl_id_tmr_free(mem_ptr_t *mem_ptr)
{
    if (mem_ptr)
    {
        list_remove(id_tmr_list, mem_ptr);
        mem_heap_free(mem_ptr);
    }
}
Exemple #12
0
void nwk_rte_tbl_free(mem_ptr_t *mem_ptr)
{
    if (mem_ptr)    
    {
        list_remove(rte_tbl, mem_ptr);
        mem_heap_free(mem_ptr);
    }
}
Exemple #13
0
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is UPD_DEL.
@return	DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
static
ulint
row_undo_mod_upd_del_sec(
/*=====================*/
	undo_node_t*	node,	/*!< in: row undo node */
	que_thr_t*	thr)	/*!< in: query thread */
{
	mem_heap_t*	heap;
	dtuple_t*	entry;
	dict_index_t*	index;
	ulint		err	= DB_SUCCESS;

	ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
	heap = mem_heap_create(1024);

	while (node->index != NULL) {

		/* Skip all corrupted secondary index */
		dict_table_skip_corrupt_index(node->index);

		if (!node->index) {
			break;
		}

		index = node->index;

		entry = row_build_index_entry(node->row, node->ext,
					      index, heap);
		if (UNIV_UNLIKELY(!entry)) {
			/* The database must have crashed after
			inserting a clustered index record but before
			writing all the externally stored columns of
			that record.  Because secondary index entries
			are inserted after the clustered index record,
			we may assume that the secondary index record
			does not exist.  However, this situation may
			only occur during the rollback of incomplete
			transactions. */
			ut_a(thr_is_recv(thr));
		} else {
			err = row_undo_mod_del_mark_or_remove_sec(
				node, thr, index, entry);

			if (err != DB_SUCCESS) {

				break;
			}
		}

		mem_heap_empty(heap);

		node->index = dict_table_get_next_index(node->index);
	}

	mem_heap_free(heap);

	return(err);
}
Exemple #14
0
ibool
row_undo_search_clust_to_pcur(
/*==========================*/
				/* out: TRUE if found; NOTE the node->pcur
				must be closed by the caller, regardless of
				the return value */
	undo_node_t*	node)	/* in: row undo node */
{
	dict_index_t*	clust_index;
	ibool		found;
	mtr_t		mtr;
	ibool		ret;
	rec_t*		rec;
	mem_heap_t*	heap		= NULL;
	ulint		offsets_[REC_OFFS_NORMAL_SIZE];
	ulint*		offsets		= offsets_;
	*offsets_ = (sizeof offsets_) / sizeof *offsets_;

	mtr_start(&mtr);

	clust_index = dict_table_get_first_index(node->table);

	found = row_search_on_row_ref(&(node->pcur), BTR_MODIFY_LEAF,
				      node->table, node->ref, &mtr);

	rec = btr_pcur_get_rec(&(node->pcur));

	offsets = rec_get_offsets(rec, clust_index, offsets,
				  ULINT_UNDEFINED, &heap);

	if (!found || 0 != ut_dulint_cmp(node->roll_ptr,
					 row_get_rec_roll_ptr(rec, clust_index,
							      offsets))) {

		/* We must remove the reservation on the undo log record
		BEFORE releasing the latch on the clustered index page: this
		is to make sure that some thread will eventually undo the
		modification corresponding to node->roll_ptr. */

		/* fputs("--------------------undoing a previous version\n",
		stderr); */

		ret = FALSE;
	} else {
		node->row = row_build(ROW_COPY_DATA, clust_index, rec,
				      offsets, node->heap);
		btr_pcur_store_position(&(node->pcur), &mtr);

		ret = TRUE;
	}

	btr_pcur_commit_specify_mtr(&(node->pcur), &mtr);

	if (UNIV_LIKELY_NULL(heap)) {
		mem_heap_free(heap);
	}
	return(ret);
}
Exemple #15
0
static void aps_retry_free(mem_ptr_t *mem_ptr)
{
    if (mem_ptr)
    {
        buf_free(APS_RETRY_ENTRY(mem_ptr)->buf);
        list_remove(aps_retry, mem_ptr);
        mem_heap_free(mem_ptr);
    }
}
Exemple #16
0
/***********************************************************//**
Undoes a modify in a clustered index record.
@return	DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
static
ulint
row_undo_mod_clust_low(
/*===================*/
	undo_node_t*	node,	/*!< in: row undo node */
	que_thr_t*	thr,	/*!< in: query thread */
	mtr_t*		mtr,	/*!< in: mtr; must be committed before
				latching any further pages */
	ulint		mode)	/*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
	btr_pcur_t*	pcur;
	btr_cur_t*	btr_cur;
	ulint		err;
#ifdef UNIV_DEBUG
	ibool		success;
#endif /* UNIV_DEBUG */

	pcur = &(node->pcur);
	btr_cur = btr_pcur_get_btr_cur(pcur);

#ifdef UNIV_DEBUG
	success =
#endif /* UNIV_DEBUG */
	btr_pcur_restore_position(mode, pcur, mtr);

	ut_ad(success);

	if (mode == BTR_MODIFY_LEAF) {

		err = btr_cur_optimistic_update(BTR_NO_LOCKING_FLAG
						| BTR_NO_UNDO_LOG_FLAG
						| BTR_KEEP_SYS_FLAG,
						btr_cur, node->update,
						node->cmpl_info, thr, mtr);
	} else {
		mem_heap_t*	heap		= NULL;
		big_rec_t*	dummy_big_rec;

		ut_ad(mode == BTR_MODIFY_TREE);

		err = btr_cur_pessimistic_update(
			BTR_NO_LOCKING_FLAG
			| BTR_NO_UNDO_LOG_FLAG
			| BTR_KEEP_SYS_FLAG,
			btr_cur, &heap, &dummy_big_rec, node->update,
			node->cmpl_info, thr, mtr);

		ut_a(!dummy_big_rec);
		if (UNIV_LIKELY_NULL(heap)) {
			mem_heap_free(heap);
		}
	}

	return(err);
}
Exemple #17
0
/**********************************************************************//**
Frees an index memory object. */
UNIV_INTERN
void
dict_mem_index_free(
/*================*/
	dict_index_t*	index)	/*!< in: index */
{
	ut_ad(index);
	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);

	mem_heap_free(index->heap);
}
Exemple #18
0
/*****************************************************************//**
Frees the adaptive search system at a database shutdown. */
UNIV_INTERN
void
btr_search_sys_free(void)
/*=====================*/
{
	mem_free(btr_search_latch_temp);
	btr_search_latch_temp = NULL;
	mem_heap_free(btr_search_sys->hash_index->heap);
	hash_table_free(btr_search_sys->hash_index);
	mem_free(btr_search_sys);
	btr_search_sys = NULL;
}
Exemple #19
0
/****************************************************************//**
Free a table memory object. */
UNIV_INTERN
void
dict_mem_table_free(
/*================*/
	dict_table_t*	table)		/*!< in: table */
{
	ut_ad(table);
	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
	ut_d(table->cached = FALSE);

	mem_heap_free(table->heap);
}
Exemple #20
0
static void zcl_rpt_free(mem_ptr_t *mem_ptr)
{
    if (mem_ptr)
    {
        // Nullify the report pointer in the attrib
        ZCL_RPT(mem_ptr)->attrib->rpt = NULL;

        // free the mem pointer
        list_remove(zcl_rpt, mem_ptr);
        mem_heap_free(mem_ptr);
    }
}
Exemple #21
0
void
dict_load_sys_table(
/*================*/
	dict_table_t*	table)	/* in: system table */
{
	mem_heap_t*	heap;

	ut_ad(mutex_own(&(dict_sys->mutex)));

	heap = mem_heap_create(1000);

	dict_load_indexes(table, heap);
	
	mem_heap_free(heap);
}
Exemple #22
0
/****************************************************************//**
Free a table memory object. */
UNIV_INTERN
void
dict_mem_table_free(
/*================*/
	dict_table_t*	table)		/*!< in: table */
{
	ut_ad(table);
	ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
	ut_d(table->cached = FALSE);

#ifndef UNIV_HOTBACKUP
	mutex_free(&(table->autoinc_mutex));
#endif /* UNIV_HOTBACKUP */
	ut_free(table->name);
	mem_heap_free(table->heap);
}
Exemple #23
0
/**********************************************************************//**
Frees an index memory object. */
UNIV_INTERN
void
dict_mem_index_free(
/*================*/
	dict_index_t*	index)	/*!< in: index */
{
	ut_ad(index);
	ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
#ifdef UNIV_BLOB_DEBUG
	if (index->blobs) {
		mutex_free(&index->blobs_mutex);
		rbt_free(index->blobs);
	}
#endif /* UNIV_BLOB_DEBUG */

	mem_heap_free(index->heap);
}
Exemple #24
0
rec_t*
row_get_clust_rec(
/*==============*/
				/* out: record or NULL, if no record found */
	ulint		mode,	/* in: BTR_MODIFY_LEAF, ... */
	rec_t*		rec,	/* in: record in a secondary index */
	dict_index_t*	index,	/* in: secondary index */
	dict_index_t**	clust_index,/* out: clustered index */
	mtr_t*		mtr)	/* in: mtr */
{
	mem_heap_t*	heap;
	dtuple_t*	ref;
	dict_table_t*	table;
	btr_pcur_t	pcur;
	ibool		found;
	rec_t*		clust_rec;
	
	ut_ad((index->type & DICT_CLUSTERED) == 0);

	table = index->table;

	heap = mem_heap_create(256);

	ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec, heap);

	found = row_search_on_row_ref(&pcur, mode, table, ref, mtr);

	clust_rec = btr_pcur_get_rec(&pcur);

	mem_heap_free(heap);

	btr_pcur_close(&pcur);

	*clust_index = dict_table_get_first_index(table);

	if (!found) {

		return(NULL);
	}

	return(clust_rec);
}
void
read_cursor_view_close_for_mysql(
/*=============================*/
	trx_t*		trx,	/* in: trx */
	cursor_view_t*	curview)/* in: cursor view to be closed */
{
	ut_a(curview);
	ut_a(curview->read_view);
	ut_a(curview->heap);

	/* Add cursor's tables to the global count of active tables that
	belong to this transaction */
	trx->n_mysql_tables_in_use += curview->n_mysql_tables_in_use;

	mutex_enter(&kernel_mutex);

	read_view_close(curview->read_view);
	trx->read_view = trx->global_read_view;

	mutex_exit(&kernel_mutex);

	mem_heap_free(curview->heap);
}
Exemple #26
0
/*********************************************************************//**
Fetches the clustered index record for a secondary index record. The latches
on the secondary index record are preserved.
@return	record or NULL, if no record found */
UNIV_INTERN
rec_t*
row_get_clust_rec(
/*==============*/
	ulint		mode,	/*!< in: BTR_MODIFY_LEAF, ... */
	const rec_t*	rec,	/*!< in: record in a secondary index */
	dict_index_t*	index,	/*!< in: secondary index */
	dict_index_t**	clust_index,/*!< out: clustered index */
	mtr_t*		mtr)	/*!< in: mtr */
{
	mem_heap_t*	heap;
	dtuple_t*	ref;
	dict_table_t*	table;
	btr_pcur_t	pcur;
	ibool		found;
	rec_t*		clust_rec;

	ut_ad(!dict_index_is_clust(index));

	table = index->table;

	heap = mem_heap_create(256);

	ref = row_build_row_ref(ROW_COPY_POINTERS, index, rec, heap);

	found = row_search_on_row_ref(&pcur, mode, table, ref, mtr);

	clust_rec = found ? btr_pcur_get_rec(&pcur) : NULL;

	mem_heap_free(heap);

	btr_pcur_close(&pcur);

	*clust_index = dict_table_get_first_index(table);

	return(clust_rec);
}
Exemple #27
0
/***********************************************************//**
Purges a delete marking of a record. */
static
void
row_purge_del_mark(
    /*===============*/
    purge_node_t*	node)	/*!< in: row purge node */
{
    mem_heap_t*	heap;
    dtuple_t*	entry;
    dict_index_t*	index;

    ut_ad(node);

    heap = mem_heap_create(1024);

    while (node->index != NULL) {
        /* skip corrupted secondary index */
        dict_table_skip_corrupt_index(node->index);

        if (!node->index) {
            break;
        }

        index = node->index;

        /* Build the index entry */
        entry = row_build_index_entry(node->row, NULL, index, heap);
        ut_a(entry);
        row_purge_remove_sec_if_poss(node, index, entry);

        node->index = dict_table_get_next_index(node->index);
    }

    mem_heap_free(heap);

    row_purge_remove_clust_if_poss(node);
}
Exemple #28
0
/********************************************************************//**
Frees a transaction object. */
UNIV_INTERN
void
trx_free(
/*=====*/
	trx_t*	trx)	/*!< in, own: trx object */
{
	ut_ad(mutex_own(&kernel_mutex));

	if (trx->declared_to_be_inside_innodb) {
		ut_print_timestamp(stderr);
		fputs("  InnoDB: Error: Freeing a trx which is declared"
		      " to be processing\n"
		      "InnoDB: inside InnoDB.\n", stderr);
		trx_print(stderr, trx, 600);
		putc('\n', stderr);

		/* This is an error but not a fatal error. We must keep
		the counters like srv_conc_n_threads accurate. */
		srv_conc_force_exit_innodb(trx);
	}

	if (trx->n_mysql_tables_in_use != 0
	    || trx->mysql_n_tables_locked != 0) {

		ut_print_timestamp(stderr);
		fprintf(stderr,
			"  InnoDB: Error: MySQL is freeing a thd\n"
			"InnoDB: though trx->n_mysql_tables_in_use is %lu\n"
			"InnoDB: and trx->mysql_n_tables_locked is %lu.\n",
			(ulong)trx->n_mysql_tables_in_use,
			(ulong)trx->mysql_n_tables_locked);

		trx_print(stderr, trx, 600);

		ut_print_buf(stderr, trx, sizeof(trx_t));
		putc('\n', stderr);
	}

	ut_a(trx->magic_n == TRX_MAGIC_N);

	trx->magic_n = 11112222;

	ut_a(trx->conc_state == TRX_NOT_STARTED);

	mutex_free(&(trx->undo_mutex));

	ut_a(trx->insert_undo == NULL);
	ut_a(trx->update_undo == NULL);

	if (trx->undo_no_arr) {
		trx_undo_arr_free(trx->undo_no_arr);
	}

	ut_a(UT_LIST_GET_LEN(trx->signals) == 0);
	ut_a(UT_LIST_GET_LEN(trx->reply_signals) == 0);

	ut_a(trx->wait_lock == NULL);
	ut_a(UT_LIST_GET_LEN(trx->wait_thrs) == 0);

	ut_a(!trx->has_search_latch);

	ut_a(trx->dict_operation_lock_mode == 0);

	if (trx->lock_heap) {
		mem_heap_free(trx->lock_heap);
	}

	ut_a(UT_LIST_GET_LEN(trx->trx_locks) == 0);

	if (trx->global_read_view_heap) {
		mem_heap_free(trx->global_read_view_heap);
	}

	trx->global_read_view = NULL;

	ut_a(trx->read_view == NULL);

	ut_a(ib_vector_is_empty(trx->autoinc_locks));
	/* We allocated a dedicated heap for the vector. */
	ib_vector_free(trx->autoinc_locks);

	mem_free(trx);
}
Exemple #29
0
/*******************************************************************//**
Fills the "lock_data" member of i_s_locks_row_t object.
If memory can not be allocated then FALSE is returned.
@return	FALSE if allocation fails */
static
ibool
fill_lock_data(
/*===========*/
	const char**		lock_data,/*!< out: "lock_data" to fill */
	const lock_t*		lock,	/*!< in: lock used to find the data */
	ulint			heap_no,/*!< in: rec num used to find the data */
	trx_i_s_cache_t*	cache)	/*!< in/out: cache where to store
					volatile data */
{
	mtr_t			mtr;

	const buf_block_t*	block;
	const page_t*		page;
	const rec_t*		rec;

	ut_a(lock_get_type(lock) == LOCK_REC);

	mtr_start(&mtr);

	block = buf_page_try_get(lock_rec_get_space_id(lock),
				 lock_rec_get_page_no(lock),
				 &mtr);

	if (block == NULL) {

		*lock_data = NULL;

		mtr_commit(&mtr);

		return(TRUE);
	}

	page = (const page_t*) buf_block_get_frame(block);

	rec = page_find_rec_with_heap_no(page, heap_no);

	if (page_rec_is_infimum(rec)) {

		*lock_data = ha_storage_put_str_memlim(
			cache->storage, "infimum pseudo-record",
			MAX_ALLOWED_FOR_STORAGE(cache));
	} else if (page_rec_is_supremum(rec)) {

		*lock_data = ha_storage_put_str_memlim(
			cache->storage, "supremum pseudo-record",
			MAX_ALLOWED_FOR_STORAGE(cache));
	} else {

		const dict_index_t*	index;
		ulint			n_fields;
		mem_heap_t*		heap;
		ulint			offsets_onstack[REC_OFFS_NORMAL_SIZE];
		ulint*			offsets;
		char			buf[TRX_I_S_LOCK_DATA_MAX_LEN];
		ulint			buf_used;
		ulint			i;

		rec_offs_init(offsets_onstack);
		offsets = offsets_onstack;

		index = lock_rec_get_index(lock);

		n_fields = dict_index_get_n_unique(index);

		ut_a(n_fields > 0);

		heap = NULL;
		offsets = rec_get_offsets(rec, index, offsets, n_fields,
					  &heap);

		/* format and store the data */

		buf_used = 0;
		for (i = 0; i < n_fields; i++) {

			buf_used += put_nth_field(
				buf + buf_used, sizeof(buf) - buf_used,
				i, index, rec, offsets) - 1;
		}

		*lock_data = (const char*) ha_storage_put_memlim(
			cache->storage, buf, buf_used + 1,
			MAX_ALLOWED_FOR_STORAGE(cache));

		if (UNIV_UNLIKELY(heap != NULL)) {

			/* this means that rec_get_offsets() has created a new
			heap and has stored offsets in it; check that this is
			really the case and free the heap */
			ut_a(offsets != offsets_onstack);
			mem_heap_free(heap);
		}
	}

	mtr_commit(&mtr);

	if (*lock_data == NULL) {

		return(FALSE);
	}

	return(TRUE);
}
Exemple #30
0
/**************************************************************//**
Restores the stored position of a persistent cursor bufferfixing the page and
obtaining the specified latches. If the cursor position was saved when the
(1) cursor was positioned on a user record: this function restores the position
to the last record LESS OR EQUAL to the stored record;
(2) cursor was positioned on a page infimum record: restores the position to
the last record LESS than the user record which was the successor of the page
infimum;
(3) cursor was positioned on the page supremum: restores to the first record
GREATER than the user record which was the predecessor of the supremum.
(4) cursor was positioned before the first or after the last in an empty tree:
restores to before first or after the last in the tree.
@return TRUE if the cursor position was stored when it was on a user
record and it can be restored on a user record whose ordering fields
are identical to the ones of the original user record */
UNIV_INTERN
ibool
btr_pcur_restore_position_func(
/*===========================*/
	ulint		latch_mode,	/*!< in: BTR_SEARCH_LEAF, ... */
	btr_pcur_t*	cursor,		/*!< in: detached persistent cursor */
	const char*	file,		/*!< in: file name */
	ulint		line,		/*!< in: line where called */
	mtr_t*		mtr)		/*!< in: mtr */
{
	dict_index_t*	index;
	dtuple_t*	tuple;
	ulint		mode;
	ulint		old_mode;
	mem_heap_t*	heap;

	ut_ad(mtr);
	ut_ad(mtr->state == MTR_ACTIVE);

	index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor));

	if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED)
	    || UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED
			     && cursor->pos_state != BTR_PCUR_IS_POSITIONED)) {
		ut_print_buf(stderr, cursor, sizeof(btr_pcur_t));
		putc('\n', stderr);
		if (cursor->trx_if_known) {
			trx_print(stderr, cursor->trx_if_known, 0);
		}

		ut_error;
	}

	if (UNIV_UNLIKELY
	    (cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
	     || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) {

		/* In these cases we do not try an optimistic restoration,
		but always do a search */

		btr_cur_open_at_index_side(
			cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
			index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr);

		cursor->latch_mode = latch_mode;
		cursor->pos_state = BTR_PCUR_IS_POSITIONED;
		cursor->block_when_stored = btr_pcur_get_block(cursor);

		return(FALSE);
	}

	ut_a(cursor->old_rec);
	ut_a(cursor->old_n_fields);

	if (UNIV_LIKELY(latch_mode == BTR_SEARCH_LEAF)
	    || UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) {
		/* Try optimistic restoration */

		if (UNIV_LIKELY(buf_page_optimistic_get(
					latch_mode,
					cursor->block_when_stored,
					cursor->modify_clock,
					file, line, mtr))) {
			cursor->pos_state = BTR_PCUR_IS_POSITIONED;

			buf_block_dbg_add_level(
				btr_pcur_get_block(cursor),
				dict_index_is_ibuf(index)
				? SYNC_IBUF_TREE_NODE : SYNC_TREE_NODE);

			if (cursor->rel_pos == BTR_PCUR_ON) {
#ifdef UNIV_DEBUG
				const rec_t*	rec;
				const ulint*	offsets1;
				const ulint*	offsets2;
#endif /* UNIV_DEBUG */
				cursor->latch_mode = latch_mode;
#ifdef UNIV_DEBUG
				rec = btr_pcur_get_rec(cursor);

				heap = mem_heap_create(256);
				offsets1 = rec_get_offsets(
					cursor->old_rec, index, NULL,
					cursor->old_n_fields, &heap);
				offsets2 = rec_get_offsets(
					rec, index, NULL,
					cursor->old_n_fields, &heap);

				ut_ad(!cmp_rec_rec(cursor->old_rec,
						   rec, offsets1, offsets2,
						   index));
				mem_heap_free(heap);
#endif /* UNIV_DEBUG */
				return(TRUE);
			}

			return(FALSE);
		}
	}

	/* If optimistic restoration did not succeed, open the cursor anew */

	heap = mem_heap_create(256);

	tuple = dict_index_build_data_tuple(index, cursor->old_rec,
					    cursor->old_n_fields, heap);

	/* Save the old search mode of the cursor */
	old_mode = cursor->search_mode;

	switch (cursor->rel_pos) {
	case BTR_PCUR_ON:
		mode = PAGE_CUR_LE;
		break;
	case BTR_PCUR_AFTER:
		mode = PAGE_CUR_G;
		break;
	case BTR_PCUR_BEFORE:
		mode = PAGE_CUR_L;
		break;
	default:
		ut_error;
		mode = 0;
	}

	btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode,
					cursor, 0, file, line, mtr);

	/* Restore the old search mode */
	cursor->search_mode = old_mode;

	switch (cursor->rel_pos) {
	case BTR_PCUR_ON:
		if (btr_pcur_is_on_user_rec(cursor)
		    && !cmp_dtuple_rec(
			    tuple, btr_pcur_get_rec(cursor),
			    rec_get_offsets(btr_pcur_get_rec(cursor),
					    index, NULL,
					    ULINT_UNDEFINED, &heap))) {

			/* We have to store the NEW value for
			the modify clock, since the cursor can
			now be on a different page! But we can
			retain the value of old_rec */

			cursor->block_when_stored =
				btr_pcur_get_block(cursor);
			cursor->modify_clock =
				buf_block_get_modify_clock(
					cursor->block_when_stored);
			cursor->old_stored = BTR_PCUR_OLD_STORED;

			mem_heap_free(heap);

			return(TRUE);
		}
#ifdef UNIV_DEBUG
		/* fall through */
	case BTR_PCUR_BEFORE:
	case BTR_PCUR_AFTER:
		break;
	default:
		ut_error;
#endif /* UNIV_DEBUG */
	}

	mem_heap_free(heap);

	/* We have to store new position information, modify_clock etc.,
	to the cursor because it can now be on a different page, the record
	under it may have been removed, etc. */

	btr_pcur_store_position(cursor, mtr);

	return(FALSE);
}