Exemplo n.º 1
0
ibool
btr_pcur_restore_position(
/*======================*/
					/* out: TRUE if the cursor position
					was stored when it was on a user record
					and it can be restored on a user record
					whose ordering fields are identical to
					the ones of the original user record */
	ulint		latch_mode,	/* in: BTR_SEARCH_LEAF, ... */
	btr_pcur_t*	cursor,		/* in: detached persistent cursor */
	mtr_t*		mtr)		/* in: mtr */
{
	dict_index_t*	index;
	page_t*		page;
	dtuple_t*	tuple;
	ulint		mode;
	ulint		old_mode;
	mem_heap_t*	heap;

	index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor));

	if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED)
	    || UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED
			     && cursor->pos_state != BTR_PCUR_IS_POSITIONED)) {
		ut_print_buf(stderr, cursor, sizeof(btr_pcur_t));
		if (cursor->trx_if_known) {
			trx_print(stderr, cursor->trx_if_known, 0);
		}

		ut_error;
	}

	if (UNIV_UNLIKELY(
		    cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
		    || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) {

		/* In these cases we do not try an optimistic restoration,
		but always do a search */

		btr_cur_open_at_index_side(
			cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
			index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr);

		cursor->block_when_stored
			= buf_block_align(btr_pcur_get_page(cursor));

		return(FALSE);
	}

	ut_a(cursor->old_rec);
	ut_a(cursor->old_n_fields);

	page = btr_cur_get_page(btr_pcur_get_btr_cur(cursor));

	if (UNIV_LIKELY(latch_mode == BTR_SEARCH_LEAF)
	    || UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) {
		/* Try optimistic restoration */

		if (UNIV_LIKELY(buf_page_optimistic_get(
					latch_mode,
					cursor->block_when_stored, page,
					cursor->modify_clock, mtr))) {
			cursor->pos_state = BTR_PCUR_IS_POSITIONED;
#ifdef UNIV_SYNC_DEBUG
			buf_page_dbg_add_level(page, SYNC_TREE_NODE);
#endif /* UNIV_SYNC_DEBUG */
			if (cursor->rel_pos == BTR_PCUR_ON) {
#ifdef UNIV_DEBUG
				rec_t*		rec;
				ulint*		offsets1;
				ulint*		offsets2;
#endif /* UNIV_DEBUG */
				cursor->latch_mode = latch_mode;
#ifdef UNIV_DEBUG
				rec = btr_pcur_get_rec(cursor);

				heap = mem_heap_create(256);
				offsets1 = rec_get_offsets(
					cursor->old_rec, index, NULL,
					cursor->old_n_fields, &heap);
				offsets2 = rec_get_offsets(
					rec, index, NULL,
					cursor->old_n_fields, &heap);

				ut_ad(!cmp_rec_rec(cursor->old_rec,
						   rec, offsets1, offsets2,
						   index));
				mem_heap_free(heap);
#endif /* UNIV_DEBUG */
				return(TRUE);
			}

			return(FALSE);
		}
	}

	/* If optimistic restoration did not succeed, open the cursor anew */

	heap = mem_heap_create(256);

	tuple = dict_index_build_data_tuple(index, cursor->old_rec,
					    cursor->old_n_fields, heap);

	/* Save the old search mode of the cursor */
	old_mode = cursor->search_mode;

	switch (cursor->rel_pos) {
	case BTR_PCUR_ON:
		mode = PAGE_CUR_LE;
		break;
	case BTR_PCUR_AFTER:
		mode = PAGE_CUR_G;
		break;
	case BTR_PCUR_BEFORE:
		mode = PAGE_CUR_L;
		break;
	default:
		ut_error;
		mode = 0; /* silence a warning */
	}

	btr_pcur_open_with_no_init(index, tuple, mode, latch_mode,
				   cursor, 0, mtr);

	/* Restore the old search mode */
	cursor->search_mode = old_mode;

	if (btr_pcur_is_on_user_rec(cursor, mtr)) {
		switch (cursor->rel_pos) {
		case BTR_PCUR_ON:
			if (!cmp_dtuple_rec(
				    tuple, btr_pcur_get_rec(cursor),
				    rec_get_offsets(btr_pcur_get_rec(cursor),
						    index, NULL,
						    ULINT_UNDEFINED, &heap))) {

				/* We have to store the NEW value for
				the modify clock, since the cursor can
				now be on a different page! But we can
				retain the value of old_rec */

				cursor->block_when_stored =
					buf_block_align(
						btr_pcur_get_page(cursor));
				cursor->modify_clock =
					buf_block_get_modify_clock(
						cursor->block_when_stored);
				cursor->old_stored = BTR_PCUR_OLD_STORED;

				mem_heap_free(heap);

				return(TRUE);
			}

			break;
		case BTR_PCUR_BEFORE:
			page_cur_move_to_next(btr_pcur_get_page_cur(cursor));
			break;
		case BTR_PCUR_AFTER:
			page_cur_move_to_prev(btr_pcur_get_page_cur(cursor));
			break;
#ifdef UNIV_DEBUG
		default:
			ut_error;
#endif /* UNIV_DEBUG */
		}
	}

	mem_heap_free(heap);

	/* We have to store new position information, modify_clock etc.,
	to the cursor because it can now be on a different page, the record
	under it may have been removed, etc. */

	btr_pcur_store_position(cursor, mtr);

	return(FALSE);
}
Exemplo n.º 2
0
/***************************************************************
Purges an update of an existing record. Also purges an update of a delete
marked record if that record contained an externally stored field. */
static
void
row_purge_upd_exist_or_extern(
/*==========================*/
	purge_node_t*	node,	/* in: row purge node */
	que_thr_t*	thr)	/* in: query thread */
{
	mem_heap_t*	heap;
	dtuple_t*	entry;
	dict_index_t*	index;
	upd_field_t*	ufield;
	ibool		is_insert;
	ulint		rseg_id;
	ulint		page_no;
	ulint		offset;
	ulint		internal_offset;
	byte*		data_field;
	ulint		data_field_len;
	ulint		i;
	mtr_t		mtr;
	
	ut_ad(node && thr);

	if (node->rec_type == TRX_UNDO_UPD_DEL_REC) {

		goto skip_secondaries;
	}

	heap = mem_heap_create(1024);

	while (node->index != NULL) {
		index = node->index;

		if (row_upd_changes_ord_field_binary(NULL, node->index,
							node->update)) {
			/* Build the older version of the index entry */
			entry = row_build_index_entry(node->row, index, heap);

			row_purge_remove_sec_if_poss(node, thr, index, entry);
		}

		node->index = dict_table_get_next_index(node->index);
	}

	mem_heap_free(heap);	

skip_secondaries:
	/* Free possible externally stored fields */
	for (i = 0; i < upd_get_n_fields(node->update); i++) {

		ufield = upd_get_nth_field(node->update, i);

		if (ufield->extern_storage) {
			/* We use the fact that new_val points to
			node->undo_rec and get thus the offset of
			dfield data inside the unod record. Then we
			can calculate from node->roll_ptr the file
			address of the new_val data */

			internal_offset = ((byte*)ufield->new_val.data)
						- node->undo_rec;
						
			ut_a(internal_offset < UNIV_PAGE_SIZE);

			trx_undo_decode_roll_ptr(node->roll_ptr,
						&is_insert, &rseg_id,
						&page_no, &offset);
			mtr_start(&mtr);

			/* We have to acquire an X-latch to the clustered
			index tree */

			index = dict_table_get_first_index(node->table);

			mtr_x_lock(dict_tree_get_lock(index->tree), &mtr);
			
			/* We assume in purge of externally stored fields
			that the space id of the undo log record is 0! */

			data_field = buf_page_get(0, page_no, RW_X_LATCH, &mtr)
				     + offset + internal_offset;

			buf_page_dbg_add_level(buf_frame_align(data_field),
						SYNC_TRX_UNDO_PAGE);
				     
			data_field_len = ufield->new_val.len;

			btr_free_externally_stored_field(index, data_field,
						data_field_len, FALSE, &mtr);
			mtr_commit(&mtr);
		}
	}
}
Exemplo n.º 3
0
ulint
trx_rseg_header_create(
/*===================*/
				/* out: page number of the created segment,
				FIL_NULL if fail */
	ulint	space,		/* in: space id */
	ulint	max_size,	/* in: max size in pages */
	ulint*	slot_no,	/* out: rseg id == slot number in trx sys */
	mtr_t*	mtr)		/* in: mtr */
{
	ulint		page_no;
	trx_rsegf_t*	rsegf;
	trx_sysf_t*	sys_header;
	ulint		i;
	page_t*		page;
	
	ut_ad(mtr);
#ifdef UNIV_SYNC_DEBUG
	ut_ad(mutex_own(&kernel_mutex));
#endif /* UNIV_SYNC_DEBUG */
	ut_ad(mtr_memo_contains(mtr, fil_space_get_latch(space),
							MTR_MEMO_X_LOCK));
	sys_header = trx_sysf_get(mtr);

	*slot_no = trx_sysf_rseg_find_free(mtr);

	if (*slot_no == ULINT_UNDEFINED) {

		return(FIL_NULL);
	}

	/* Allocate a new file segment for the rollback segment */
	page = fseg_create(space, 0, TRX_RSEG + TRX_RSEG_FSEG_HEADER, mtr);

	if (page == NULL) {
		/* No space left */

		return(FIL_NULL);
	}

#ifdef UNIV_SYNC_DEBUG
	buf_page_dbg_add_level(page, SYNC_RSEG_HEADER_NEW);
#endif /* UNIV_SYNC_DEBUG */

	page_no = buf_frame_get_page_no(page);

	/* Get the rollback segment file page */
	rsegf = trx_rsegf_get_new(space, page_no, mtr);
					    
	/* Initialize max size field */
	mlog_write_ulint(rsegf + TRX_RSEG_MAX_SIZE, max_size, MLOG_4BYTES, mtr);
	
	/* Initialize the history list */

	mlog_write_ulint(rsegf + TRX_RSEG_HISTORY_SIZE, 0, MLOG_4BYTES, mtr);
	flst_init(rsegf + TRX_RSEG_HISTORY, mtr);

	/* Reset the undo log slots */
	for (i = 0; i < TRX_RSEG_N_SLOTS; i++) {

		trx_rsegf_set_nth_undo(rsegf, i, FIL_NULL, mtr);
	}

	/* Add the rollback segment info to the free slot in the trx system
	header */

	trx_sysf_rseg_set_space(sys_header, *slot_no, space, mtr);	
	trx_sysf_rseg_set_page_no(sys_header, *slot_no, page_no, mtr);

	return(page_no);
}
Exemplo n.º 4
0
/***************************************************************
Purges an update of an existing record. Also purges an update of a delete
marked record if that record contained an externally stored field. */
static
void
row_purge_upd_exist_or_extern(
/*==========================*/
	purge_node_t*	node)	/* in: row purge node */
{
	mem_heap_t*	heap;
	dtuple_t*	entry;
	dict_index_t*	index;
	upd_field_t*	ufield;
	ibool		is_insert;
	ulint		rseg_id;
	ulint		page_no;
	ulint		offset;
	ulint		internal_offset;
	byte*		data_field;
	ulint		data_field_len;
	ulint		i;
	mtr_t		mtr;

	ut_ad(node);

	if (node->rec_type == TRX_UNDO_UPD_DEL_REC) {

		goto skip_secondaries;
	}

	heap = mem_heap_create(1024);

	while (node->index != NULL) {
		index = node->index;

		if (row_upd_changes_ord_field_binary(NULL, node->index,
						     node->update)) {
			/* Build the older version of the index entry */
			entry = row_build_index_entry(node->row, index, heap);

			row_purge_remove_sec_if_poss(node, index, entry);
		}

		node->index = dict_table_get_next_index(node->index);
	}

	mem_heap_free(heap);

skip_secondaries:
	/* Free possible externally stored fields */
	for (i = 0; i < upd_get_n_fields(node->update); i++) {

		ufield = upd_get_nth_field(node->update, i);

		if (ufield->extern_storage) {
			/* We use the fact that new_val points to
			node->undo_rec and get thus the offset of
			dfield data inside the unod record. Then we
			can calculate from node->roll_ptr the file
			address of the new_val data */

			internal_offset = ((byte*)ufield->new_val.data)
				- node->undo_rec;

			ut_a(internal_offset < UNIV_PAGE_SIZE);

			trx_undo_decode_roll_ptr(node->roll_ptr,
						 &is_insert, &rseg_id,
						 &page_no, &offset);
			mtr_start(&mtr);

			/* We have to acquire an X-latch to the clustered
			index tree */

			index = dict_table_get_first_index(node->table);

			mtr_x_lock(dict_index_get_lock(index), &mtr);

			/* NOTE: we must also acquire an X-latch to the
			root page of the tree. We will need it when we
			free pages from the tree. If the tree is of height 1,
			the tree X-latch does NOT protect the root page,
			because it is also a leaf page. Since we will have a
			latch on an undo log page, we would break the
			latching order if we would only later latch the
			root page of such a tree! */

			btr_root_get(index, &mtr);

			/* We assume in purge of externally stored fields
			that the space id of the undo log record is 0! */

			data_field = buf_page_get(0, page_no, RW_X_LATCH, &mtr)
				+ offset + internal_offset;

#ifdef UNIV_SYNC_DEBUG
			buf_page_dbg_add_level(buf_frame_align(data_field),
					       SYNC_TRX_UNDO_PAGE);
#endif /* UNIV_SYNC_DEBUG */

			data_field_len = ufield->new_val.len;

			btr_free_externally_stored_field(index, data_field,
							 data_field_len,
							 FALSE, &mtr);
			mtr_commit(&mtr);
		}
	}
}