ibool row_undo_search_clust_to_pcur( /*==========================*/ /* out: TRUE if found; NOTE the node->pcur must be closed by the caller, regardless of the return value */ undo_node_t* node) /* in: row undo node */ { dict_index_t* clust_index; ibool found; mtr_t mtr; ibool ret; rec_t* rec; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; *offsets_ = (sizeof offsets_) / sizeof *offsets_; mtr_start(&mtr); clust_index = dict_table_get_first_index(node->table); found = row_search_on_row_ref(&(node->pcur), BTR_MODIFY_LEAF, node->table, node->ref, &mtr); rec = btr_pcur_get_rec(&(node->pcur)); offsets = rec_get_offsets(rec, clust_index, offsets, ULINT_UNDEFINED, &heap); if (!found || 0 != ut_dulint_cmp(node->roll_ptr, row_get_rec_roll_ptr(rec, clust_index, offsets))) { /* We must remove the reservation on the undo log record BEFORE releasing the latch on the clustered index page: this is to make sure that some thread will eventually undo the modification corresponding to node->roll_ptr. */ /* fputs("--------------------undoing a previous version\n", stderr); */ ret = FALSE; } else { node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets, node->heap); btr_pcur_store_position(&(node->pcur), &mtr); ret = TRUE; } btr_pcur_commit_specify_mtr(&(node->pcur), &mtr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } return(ret); }
/*******************************************************************//** Fills the "lock_data" member of i_s_locks_row_t object. If memory can not be allocated then FALSE is returned. @return FALSE if allocation fails */ static ibool fill_lock_data( /*===========*/ const char** lock_data,/*!< out: "lock_data" to fill */ const lock_t* lock, /*!< in: lock used to find the data */ ulint heap_no,/*!< in: rec num used to find the data */ trx_i_s_cache_t* cache) /*!< in/out: cache where to store volatile data */ { mtr_t mtr; const buf_block_t* block; const page_t* page; const rec_t* rec; ut_a(lock_get_type(lock) == LOCK_REC); mtr_start(&mtr); block = buf_page_try_get(lock_rec_get_space_id(lock), lock_rec_get_page_no(lock), &mtr); if (block == NULL) { *lock_data = NULL; mtr_commit(&mtr); return(TRUE); } page = (const page_t*) buf_block_get_frame(block); rec = page_find_rec_with_heap_no(page, heap_no); if (page_rec_is_infimum(rec)) { *lock_data = ha_storage_put_str_memlim( cache->storage, "infimum pseudo-record", MAX_ALLOWED_FOR_STORAGE(cache)); } else if (page_rec_is_supremum(rec)) { *lock_data = ha_storage_put_str_memlim( cache->storage, "supremum pseudo-record", MAX_ALLOWED_FOR_STORAGE(cache)); } else { const dict_index_t* index; ulint n_fields; mem_heap_t* heap; ulint offsets_onstack[REC_OFFS_NORMAL_SIZE]; ulint* offsets; char buf[TRX_I_S_LOCK_DATA_MAX_LEN]; ulint buf_used; ulint i; rec_offs_init(offsets_onstack); offsets = offsets_onstack; index = lock_rec_get_index(lock); n_fields = dict_index_get_n_unique(index); ut_a(n_fields > 0); heap = NULL; offsets = rec_get_offsets(rec, index, offsets, n_fields, &heap); /* format and store the data */ buf_used = 0; for (i = 0; i < n_fields; i++) { buf_used += put_nth_field( buf + buf_used, sizeof(buf) - buf_used, i, index, rec, offsets) - 1; } *lock_data = (const char*) ha_storage_put_memlim( cache->storage, buf, buf_used + 1, MAX_ALLOWED_FOR_STORAGE(cache)); if (UNIV_UNLIKELY(heap != NULL)) { /* this means that rec_get_offsets() has created a new heap and has stored offsets in it; check that this is really the case and free the heap */ ut_a(offsets != offsets_onstack); mem_heap_free(heap); } } mtr_commit(&mtr); if (*lock_data == NULL) { return(FALSE); } return(TRUE); }
/**************************************************************//** Restores the stored position of a persistent cursor bufferfixing the page and obtaining the specified latches. If the cursor position was saved when the (1) cursor was positioned on a user record: this function restores the position to the last record LESS OR EQUAL to the stored record; (2) cursor was positioned on a page infimum record: restores the position to the last record LESS than the user record which was the successor of the page infimum; (3) cursor was positioned on the page supremum: restores to the first record GREATER than the user record which was the predecessor of the supremum. (4) cursor was positioned before the first or after the last in an empty tree: restores to before first or after the last in the tree. @return TRUE if the cursor position was stored when it was on a user record and it can be restored on a user record whose ordering fields are identical to the ones of the original user record */ UNIV_INTERN ibool btr_pcur_restore_position_func( /*===========================*/ ulint latch_mode, /*!< in: BTR_SEARCH_LEAF, ... */ btr_pcur_t* cursor, /*!< in: detached persistent cursor */ const char* file, /*!< in: file name */ ulint line, /*!< in: line where called */ mtr_t* mtr) /*!< in: mtr */ { dict_index_t* index; dtuple_t* tuple; ulint mode; ulint old_mode; mem_heap_t* heap; ut_ad(mtr); ut_ad(mtr->state == MTR_ACTIVE); index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor)); if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED) || UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED && cursor->pos_state != BTR_PCUR_IS_POSITIONED)) { ut_print_buf(stderr, cursor, sizeof(btr_pcur_t)); putc('\n', stderr); if (cursor->trx_if_known) { trx_print(stderr, cursor->trx_if_known, 0); } ut_error; } if (UNIV_UNLIKELY (cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) { /* In these cases we do not try an optimistic restoration, but always do a search */ btr_cur_open_at_index_side( cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE, index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr); cursor->latch_mode = latch_mode; cursor->pos_state = BTR_PCUR_IS_POSITIONED; cursor->block_when_stored = btr_pcur_get_block(cursor); return(FALSE); } ut_a(cursor->old_rec); ut_a(cursor->old_n_fields); if (UNIV_LIKELY(latch_mode == BTR_SEARCH_LEAF) || UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) { /* Try optimistic restoration */ if (UNIV_LIKELY(buf_page_optimistic_get( latch_mode, cursor->block_when_stored, cursor->modify_clock, file, line, mtr))) { cursor->pos_state = BTR_PCUR_IS_POSITIONED; buf_block_dbg_add_level( btr_pcur_get_block(cursor), dict_index_is_ibuf(index) ? SYNC_IBUF_TREE_NODE : SYNC_TREE_NODE); if (cursor->rel_pos == BTR_PCUR_ON) { #ifdef UNIV_DEBUG const rec_t* rec; const ulint* offsets1; const ulint* offsets2; #endif /* UNIV_DEBUG */ cursor->latch_mode = latch_mode; #ifdef UNIV_DEBUG rec = btr_pcur_get_rec(cursor); heap = mem_heap_create(256); offsets1 = rec_get_offsets( cursor->old_rec, index, NULL, cursor->old_n_fields, &heap); offsets2 = rec_get_offsets( rec, index, NULL, cursor->old_n_fields, &heap); ut_ad(!cmp_rec_rec(cursor->old_rec, rec, offsets1, offsets2, index)); mem_heap_free(heap); #endif /* UNIV_DEBUG */ return(TRUE); } return(FALSE); } } /* If optimistic restoration did not succeed, open the cursor anew */ heap = mem_heap_create(256); tuple = dict_index_build_data_tuple(index, cursor->old_rec, cursor->old_n_fields, heap); /* Save the old search mode of the cursor */ old_mode = cursor->search_mode; switch (cursor->rel_pos) { case BTR_PCUR_ON: mode = PAGE_CUR_LE; break; case BTR_PCUR_AFTER: mode = PAGE_CUR_G; break; case BTR_PCUR_BEFORE: mode = PAGE_CUR_L; break; default: ut_error; mode = 0; } btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode, cursor, 0, file, line, mtr); /* Restore the old search mode */ cursor->search_mode = old_mode; switch (cursor->rel_pos) { case BTR_PCUR_ON: if (btr_pcur_is_on_user_rec(cursor) && !cmp_dtuple_rec( tuple, btr_pcur_get_rec(cursor), rec_get_offsets(btr_pcur_get_rec(cursor), index, NULL, ULINT_UNDEFINED, &heap))) { /* We have to store the NEW value for the modify clock, since the cursor can now be on a different page! But we can retain the value of old_rec */ cursor->block_when_stored = btr_pcur_get_block(cursor); cursor->modify_clock = buf_block_get_modify_clock( cursor->block_when_stored); cursor->old_stored = BTR_PCUR_OLD_STORED; mem_heap_free(heap); return(TRUE); } #ifdef UNIV_DEBUG /* fall through */ case BTR_PCUR_BEFORE: case BTR_PCUR_AFTER: break; default: ut_error; #endif /* UNIV_DEBUG */ } mem_heap_free(heap); /* We have to store new position information, modify_clock etc., to the cursor because it can now be on a different page, the record under it may have been removed, etc. */ btr_pcur_store_position(cursor, mtr); return(FALSE); }
ibool btr_pcur_restore_position( /*======================*/ /* out: TRUE if the cursor position was stored when it was on a user record and it can be restored on a user record whose ordering fields are identical to the ones of the original user record */ ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */ btr_pcur_t* cursor, /* in: detached persistent cursor */ mtr_t* mtr) /* in: mtr */ { dict_index_t* index; page_t* page; dtuple_t* tuple; ulint mode; ulint old_mode; mem_heap_t* heap; index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor)); if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED) || UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED && cursor->pos_state != BTR_PCUR_IS_POSITIONED)) { ut_print_buf(stderr, cursor, sizeof(btr_pcur_t)); if (cursor->trx_if_known) { trx_print(stderr, cursor->trx_if_known, 0); } ut_error; } if (UNIV_UNLIKELY( cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE || cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) { /* In these cases we do not try an optimistic restoration, but always do a search */ btr_cur_open_at_index_side( cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE, index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr); cursor->block_when_stored = buf_block_align(btr_pcur_get_page(cursor)); return(FALSE); } ut_a(cursor->old_rec); ut_a(cursor->old_n_fields); page = btr_cur_get_page(btr_pcur_get_btr_cur(cursor)); if (UNIV_LIKELY(latch_mode == BTR_SEARCH_LEAF) || UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) { /* Try optimistic restoration */ if (UNIV_LIKELY(buf_page_optimistic_get( latch_mode, cursor->block_when_stored, page, cursor->modify_clock, mtr))) { cursor->pos_state = BTR_PCUR_IS_POSITIONED; #ifdef UNIV_SYNC_DEBUG buf_page_dbg_add_level(page, SYNC_TREE_NODE); #endif /* UNIV_SYNC_DEBUG */ if (cursor->rel_pos == BTR_PCUR_ON) { #ifdef UNIV_DEBUG rec_t* rec; ulint* offsets1; ulint* offsets2; #endif /* UNIV_DEBUG */ cursor->latch_mode = latch_mode; #ifdef UNIV_DEBUG rec = btr_pcur_get_rec(cursor); heap = mem_heap_create(256); offsets1 = rec_get_offsets( cursor->old_rec, index, NULL, cursor->old_n_fields, &heap); offsets2 = rec_get_offsets( rec, index, NULL, cursor->old_n_fields, &heap); ut_ad(!cmp_rec_rec(cursor->old_rec, rec, offsets1, offsets2, index)); mem_heap_free(heap); #endif /* UNIV_DEBUG */ return(TRUE); } return(FALSE); } } /* If optimistic restoration did not succeed, open the cursor anew */ heap = mem_heap_create(256); tuple = dict_index_build_data_tuple(index, cursor->old_rec, cursor->old_n_fields, heap); /* Save the old search mode of the cursor */ old_mode = cursor->search_mode; switch (cursor->rel_pos) { case BTR_PCUR_ON: mode = PAGE_CUR_LE; break; case BTR_PCUR_AFTER: mode = PAGE_CUR_G; break; case BTR_PCUR_BEFORE: mode = PAGE_CUR_L; break; default: ut_error; mode = 0; /* silence a warning */ } btr_pcur_open_with_no_init(index, tuple, mode, latch_mode, cursor, 0, mtr); /* Restore the old search mode */ cursor->search_mode = old_mode; if (btr_pcur_is_on_user_rec(cursor, mtr)) { switch (cursor->rel_pos) { case BTR_PCUR_ON: if (!cmp_dtuple_rec( tuple, btr_pcur_get_rec(cursor), rec_get_offsets(btr_pcur_get_rec(cursor), index, NULL, ULINT_UNDEFINED, &heap))) { /* We have to store the NEW value for the modify clock, since the cursor can now be on a different page! But we can retain the value of old_rec */ cursor->block_when_stored = buf_block_align( btr_pcur_get_page(cursor)); cursor->modify_clock = buf_block_get_modify_clock( cursor->block_when_stored); cursor->old_stored = BTR_PCUR_OLD_STORED; mem_heap_free(heap); return(TRUE); } break; case BTR_PCUR_BEFORE: page_cur_move_to_next(btr_pcur_get_page_cur(cursor)); break; case BTR_PCUR_AFTER: page_cur_move_to_prev(btr_pcur_get_page_cur(cursor)); break; #ifdef UNIV_DEBUG default: ut_error; #endif /* UNIV_DEBUG */ } } mem_heap_free(heap); /* We have to store new position information, modify_clock etc., to the cursor because it can now be on a different page, the record under it may have been removed, etc. */ btr_pcur_store_position(cursor, mtr); return(FALSE); }
/*******************************************************************//** Builds from a secondary index record a row reference with which we can search the clustered index record. */ UNIV_INTERN void row_build_row_ref_in_tuple( /*=======================*/ dtuple_t* ref, /*!< in/out: row reference built; see the NOTE below! */ const rec_t* rec, /*!< in: record in the index; NOTE: the data fields in ref will point directly into this record, therefore, the buffer page of this record must be at least s-latched and the latch held as long as the row reference is used! */ const dict_index_t* index, /*!< in: secondary index */ ulint* offsets,/*!< in: rec_get_offsets(rec, index) or NULL */ trx_t* trx) /*!< in: transaction */ { const dict_index_t* clust_index; dfield_t* dfield; const byte* field; ulint len; ulint ref_len; ulint pos; ulint clust_col_prefix_len; ulint i; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); ut_a(ref); ut_a(index); ut_a(rec); ut_ad(!dict_index_is_clust(index)); if (UNIV_UNLIKELY(!index->table)) { fputs("InnoDB: table ", stderr); notfound: ut_print_name(stderr, trx, TRUE, index->table_name); fputs(" for index ", stderr); ut_print_name(stderr, trx, FALSE, index->name); fputs(" not found\n", stderr); ut_error; } clust_index = dict_table_get_first_index(index->table); if (UNIV_UNLIKELY(!clust_index)) { fputs("InnoDB: clust index for table ", stderr); goto notfound; } if (!offsets) { offsets = rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap); } else { ut_ad(rec_offs_validate(rec, index, offsets)); } /* Secondary indexes must not contain externally stored columns. */ ut_ad(!rec_offs_any_extern(offsets)); ref_len = dict_index_get_n_unique(clust_index); ut_ad(ref_len == dtuple_get_n_fields(ref)); dict_index_copy_types(ref, clust_index, ref_len); for (i = 0; i < ref_len; i++) { dfield = dtuple_get_nth_field(ref, i); pos = dict_index_get_nth_field_pos(index, clust_index, i); ut_a(pos != ULINT_UNDEFINED); field = rec_get_nth_field(rec, offsets, pos, &len); dfield_set_data(dfield, field, len); /* If the primary key contains a column prefix, then the secondary index may contain a longer prefix of the same column, or the full column, and we must adjust the length accordingly. */ clust_col_prefix_len = dict_index_get_nth_field( clust_index, i)->prefix_len; if (clust_col_prefix_len > 0) { if (len != UNIV_SQL_NULL) { const dtype_t* dtype = dfield_get_type(dfield); dfield_set_len(dfield, dtype_get_at_most_n_mbchars( dtype->prtype, dtype->mbminlen, dtype->mbmaxlen, clust_col_prefix_len, len, (char*) field)); } } } ut_ad(dtuple_check_typed(ref)); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } }
/*******************************************************************//** Builds from a secondary index record a row reference with which we can search the clustered index record. @return own: row reference built; see the NOTE below! */ UNIV_INTERN dtuple_t* row_build_row_ref( /*==============*/ ulint type, /*!< in: ROW_COPY_DATA, or ROW_COPY_POINTERS: the former copies also the data fields to heap, whereas the latter only places pointers to data fields on the index page */ dict_index_t* index, /*!< in: secondary index */ const rec_t* rec, /*!< in: record in the index; NOTE: in the case ROW_COPY_POINTERS the data fields in the row will point directly into this record, therefore, the buffer page of this record must be at least s-latched and the latch held as long as the row reference is used! */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ { dict_table_t* table; dict_index_t* clust_index; dfield_t* dfield; dtuple_t* ref; const byte* field; ulint len; ulint ref_len; ulint pos; byte* buf; ulint clust_col_prefix_len; ulint i; mem_heap_t* tmp_heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; rec_offs_init(offsets_); ut_ad(index && rec && heap); ut_ad(!dict_index_is_clust(index)); offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &tmp_heap); /* Secondary indexes must not contain externally stored columns. */ ut_ad(!rec_offs_any_extern(offsets)); if (type == ROW_COPY_DATA) { /* Take a copy of rec to heap */ buf = mem_heap_alloc(heap, rec_offs_size(offsets)); rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, offsets); } table = index->table; clust_index = dict_table_get_first_index(table); ref_len = dict_index_get_n_unique(clust_index); ref = dtuple_create(heap, ref_len); dict_index_copy_types(ref, clust_index, ref_len); for (i = 0; i < ref_len; i++) { dfield = dtuple_get_nth_field(ref, i); pos = dict_index_get_nth_field_pos(index, clust_index, i); ut_a(pos != ULINT_UNDEFINED); field = rec_get_nth_field(rec, offsets, pos, &len); dfield_set_data(dfield, field, len); /* If the primary key contains a column prefix, then the secondary index may contain a longer prefix of the same column, or the full column, and we must adjust the length accordingly. */ clust_col_prefix_len = dict_index_get_nth_field( clust_index, i)->prefix_len; if (clust_col_prefix_len > 0) { if (len != UNIV_SQL_NULL) { const dtype_t* dtype = dfield_get_type(dfield); dfield_set_len(dfield, dtype_get_at_most_n_mbchars( dtype->prtype, dtype->mbminlen, dtype->mbmaxlen, clust_col_prefix_len, len, (char*) field)); } } } ut_ad(dtuple_check_typed(ref)); if (tmp_heap) { mem_heap_free(tmp_heap); } return(ref); }
/*******************************************************************//** An inverse function to row_build_index_entry. Builds a row from a record in a clustered index. @return own: row built; see the NOTE below! */ UNIV_INTERN dtuple_t* row_build( /*======*/ ulint type, /*!< in: ROW_COPY_POINTERS or ROW_COPY_DATA; the latter copies also the data fields to heap while the first only places pointers to data fields on the index page, and thus is more efficient */ const dict_index_t* index, /*!< in: clustered index */ const rec_t* rec, /*!< in: record in the clustered index; NOTE: in the case ROW_COPY_POINTERS the data fields in the row will point directly into this record, therefore, the buffer page of this record must be at least s-latched and the latch held as long as the row dtuple is used! */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) or NULL, in which case this function will invoke rec_get_offsets() */ const dict_table_t* col_table, /*!< in: table, to check which externally stored columns occur in the ordering columns of an index, or NULL if index->table should be consulted instead */ row_ext_t** ext, /*!< out, own: cache of externally stored column prefixes, or NULL */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ { dtuple_t* row; const dict_table_t* table; ulint n_fields; ulint n_ext_cols; ulint* ext_cols = NULL; /* remove warning */ ulint len; ulint row_len; byte* buf; ulint i; ulint j; mem_heap_t* tmp_heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); ut_ad(index && rec && heap); ut_ad(dict_index_is_clust(index)); ut_ad(!mutex_own(&kernel_mutex)); if (!offsets) { offsets = rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &tmp_heap); } else { ut_ad(rec_offs_validate(rec, index, offsets)); } #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG if (rec_offs_any_null_extern(rec, offsets)) { /* This condition can occur during crash recovery before trx_rollback_active() has completed execution, or when a concurrently executing row_ins_index_entry_low() has committed the B-tree mini-transaction but has not yet managed to restore the cursor position for writing the big_rec. */ ut_a(trx_undo_roll_ptr_is_insert( row_get_rec_roll_ptr(rec, index, offsets))); } #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ buf = mem_heap_alloc(heap, rec_offs_size(offsets)); rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, (ulint*) offsets); } table = index->table; row_len = dict_table_get_n_cols(table); row = dtuple_create(heap, row_len); dict_table_copy_types(row, table); dtuple_set_info_bits(row, rec_get_info_bits( rec, dict_table_is_comp(table))); n_fields = rec_offs_n_fields(offsets); n_ext_cols = rec_offs_n_extern(offsets); if (n_ext_cols) { ext_cols = mem_heap_alloc(heap, n_ext_cols * sizeof *ext_cols); } for (i = j = 0; i < n_fields; i++) { dict_field_t* ind_field = dict_index_get_nth_field(index, i); const dict_col_t* col = dict_field_get_col(ind_field); ulint col_no = dict_col_get_no(col); dfield_t* dfield = dtuple_get_nth_field(row, col_no); if (ind_field->prefix_len == 0) { const byte* field = rec_get_nth_field( rec, offsets, i, &len); dfield_set_data(dfield, field, len); } if (rec_offs_nth_extern(offsets, i)) { dfield_set_ext(dfield); if (UNIV_LIKELY_NULL(col_table)) { ut_a(col_no < dict_table_get_n_cols(col_table)); col = dict_table_get_nth_col( col_table, col_no); } if (col->ord_part) { /* We will have to fetch prefixes of externally stored columns that are referenced by column prefixes. */ ext_cols[j++] = col_no; } } } ut_ad(dtuple_check_typed(row)); if (!ext) { /* REDUNDANT and COMPACT formats store a local 768-byte prefix of each externally stored column. No cache is needed. */ ut_ad(dict_table_get_format(index->table) < DICT_TF_FORMAT_ZIP); } else if (j) { *ext = row_ext_create(j, ext_cols, row, dict_table_zip_size(index->table), heap); } else { *ext = NULL; } if (tmp_heap) { mem_heap_free(tmp_heap); } return(row); }
/*****************************************************************//** Constructs the last committed version of a clustered index record, which should be seen by a semi-consistent read. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_semi_consistent_read( /*====================================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ const rec_t** old_vers)/*!< out: rec, old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; mem_heap_t* heap = NULL; byte* buf; ulint err; trx_id_t rec_trx_id = ut_dulint_zero; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); rw_lock_s_lock(&(purge_sys->latch)); /* The S-latch on purge_sys prevents the purge view from changing. Thus, if we have an uncommitted transaction at this point, then purge cannot remove its undo log even if the transaction could commit now. */ version = rec; for (;;) { trx_t* version_trx; mem_heap_t* heap2; rec_t* prev_version; trx_id_t version_trx_id; version_trx_id = row_get_rec_trx_id(version, index, *offsets); if (rec == version) { rec_trx_id = version_trx_id; } mutex_enter(&kernel_mutex); version_trx = trx_get_on_id(version_trx_id); if (version_trx && (version_trx->conc_state == TRX_COMMITTED_IN_MEMORY || version_trx->conc_state == TRX_NOT_STARTED)) { version_trx = NULL; } mutex_exit(&kernel_mutex); if (!version_trx) { /* We found a version that belongs to a committed transaction: return it. */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (rec == version) { *old_vers = rec; err = DB_SUCCESS; break; } /* We assume that a rolled-back transaction stays in TRX_ACTIVE state until all the changes have been rolled back and the transaction is removed from the global list of transactions. */ if (!ut_dulint_cmp(rec_trx_id, version_trx_id)) { /* The transaction was committed while we searched for earlier versions. Return the current version as a semi-consistent read. */ version = rec; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); } buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } heap2 = heap; heap = mem_heap_create(1024); err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (UNIV_UNLIKELY(err != DB_SUCCESS)) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { mem_heap_free(heap); } rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/*****************************************************************//** Finds out if an active transaction has inserted or modified a secondary index record. NOTE: the kernel mutex is temporarily released in this function! @return NULL if committed, else the active transaction */ UNIV_INTERN trx_t* row_vers_impl_x_locked_off_kernel( /*==============================*/ const rec_t* rec, /*!< in: record in a secondary index */ dict_index_t* index, /*!< in: the secondary index */ const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ { dict_index_t* clust_index; rec_t* clust_rec; ulint* clust_offsets; rec_t* version; trx_id_t trx_id; mem_heap_t* heap; mem_heap_t* heap2; dtuple_t* row; dtuple_t* entry = NULL; /* assignment to eliminate compiler warning */ trx_t* trx; ulint rec_del; #ifdef UNIV_DEBUG ulint err; #endif /* UNIV_DEBUG */ mtr_t mtr; ulint comp; ut_ad(mutex_own(&kernel_mutex)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ mutex_exit(&kernel_mutex); mtr_start(&mtr); /* Search for the clustered index record: this is a time-consuming operation: therefore we release the kernel mutex; also, the release is required by the latching order convention. The latch on the clustered index locks the top of the stack of versions. We also reserve purge_latch to lock the bottom of the version stack. */ clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr); if (!clust_rec) { /* In a rare case it is possible that no clust rec is found for a secondary index record: if in row0umod.c row_undo_mod_remove_clust_low() we have already removed the clust rec, while purge is still cleaning and removing secondary index records associated with earlier versions of the clustered index record. In that case there cannot be any implicit lock on the secondary index record, because an active transaction which has modified the secondary index record has also modified the clustered index record. And in a rollback we always undo the modifications to secondary index records before the clustered index record. */ mutex_enter(&kernel_mutex); mtr_commit(&mtr); return(NULL); } heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL, ULINT_UNDEFINED, &heap); trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets); mtr_s_lock(&(purge_sys->latch), &mtr); mutex_enter(&kernel_mutex); trx = NULL; if (!trx_is_active(trx_id)) { /* The transaction that modified or inserted clust_rec is no longer active: no implicit lock on rec */ goto exit_func; } if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index, clust_offsets, TRUE)) { /* Corruption noticed: try to avoid a crash by returning */ goto exit_func; } comp = page_rec_is_comp(rec); ut_ad(index->table == clust_index->table); ut_ad(!!comp == dict_table_is_comp(index->table)); ut_ad(!comp == !page_rec_is_comp(clust_rec)); /* We look up if some earlier version, which was modified by the trx_id transaction, of the clustered index record would require rec to be in a different state (delete marked or unmarked, or have different field values, or not existing). If there is such a version, then rec was modified by the trx_id transaction, and it has an implicit x-lock on rec. Note that if clust_rec itself would require rec to be in a different state, then the trx_id transaction has not yet had time to modify rec, and does not necessarily have an implicit x-lock on rec. */ rec_del = rec_get_deleted_flag(rec, comp); trx = NULL; version = clust_rec; for (;;) { rec_t* prev_version; ulint vers_del; row_ext_t* ext; trx_id_t prev_trx_id; mutex_exit(&kernel_mutex); /* While we retrieve an earlier version of clust_rec, we release the kernel mutex, because it may take time to access the disk. After the release, we have to check if the trx_id transaction is still active. We keep the semaphore in mtr on the clust_rec page, so that no other transaction can update it and get an implicit x-lock on rec. */ heap2 = heap; heap = mem_heap_create(1024); #ifdef UNIV_DEBUG err = #endif /* UNIV_DEBUG */ trx_undo_prev_version_build(clust_rec, &mtr, version, clust_index, clust_offsets, heap, &prev_version); mem_heap_free(heap2); /* free version and clust_offsets */ if (prev_version == NULL) { mutex_enter(&kernel_mutex); if (!trx_is_active(trx_id)) { /* Transaction no longer active: no implicit x-lock */ break; } /* If the transaction is still active, clust_rec must be a fresh insert, because no previous version was found. */ ut_ad(err == DB_SUCCESS); /* It was a freshly inserted version: there is an implicit x-lock on rec */ trx = trx_get_on_id(trx_id); break; } clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, ULINT_UNDEFINED, &heap); vers_del = rec_get_deleted_flag(prev_version, comp); prev_trx_id = row_get_rec_trx_id(prev_version, clust_index, clust_offsets); /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, prev_version, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* entry may be NULL if a record was inserted in place of a deleted record, and the BLOB pointers of the new record were not initialized yet. But in that case, prev_version should be NULL. */ ut_a(entry); mutex_enter(&kernel_mutex); if (!trx_is_active(trx_id)) { /* Transaction no longer active: no implicit x-lock */ break; } /* If we get here, we know that the trx_id transaction is still active and it has modified prev_version. Let us check if prev_version would require rec to be in a different state. */ /* The previous version of clust_rec must be accessible, because the transaction is still active and clust_rec was not a fresh insert. */ ut_ad(err == DB_SUCCESS); /* We check if entry and rec are identified in the alphabetical ordering */ if (0 == cmp_dtuple_rec(entry, rec, offsets)) { /* The delete marks of rec and prev_version should be equal for rec to be in the state required by prev_version */ if (rec_del != vers_del) { trx = trx_get_on_id(trx_id); break; } /* It is possible that the row was updated so that the secondary index record remained the same in alphabetical ordering, but the field values changed still. For example, 'abc' -> 'ABC'. Check also that. */ dtuple_set_types_binary(entry, dtuple_get_n_fields(entry)); if (0 != cmp_dtuple_rec(entry, rec, offsets)) { trx = trx_get_on_id(trx_id); break; } } else if (!rec_del) { /* The delete mark should be set in rec for it to be in the state required by prev_version */ trx = trx_get_on_id(trx_id); break; } if (0 != ut_dulint_cmp(trx_id, prev_trx_id)) { /* The versions modified by the trx_id transaction end to prev_version: no implicit x-lock */ break; } version = prev_version; }/* for (;;) */ exit_func: mtr_commit(&mtr); mem_heap_free(heap); return(trx); }
/*****************************************************************//** Constructs the version of a clustered index record which a consistent read should see. We assume that the trx id stored in rec is such that the consistent read should not see rec in its present version. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_consistent_read( /*===============================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ read_view_t* view, /*!< in: the consistent read view */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ rec_t** old_vers)/*!< out, own: old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; rec_t* prev_version; trx_id_t trx_id; mem_heap_t* heap = NULL; byte* buf; ulint err; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); trx_id = row_get_rec_trx_id(rec, index, *offsets); ut_ad(!read_view_sees_trx_id(view, trx_id)); rw_lock_s_lock(&(purge_sys->latch)); version = rec; for (;;) { mem_heap_t* heap2 = heap; trx_undo_rec_t* undo_rec; roll_ptr_t roll_ptr; undo_no_t undo_no; heap = mem_heap_create(1024); /* If we have high-granularity consistent read view and creating transaction of the view is the same as trx_id in the record we see this record only in the case when undo_no of the record is < undo_no in the view. */ if (view->type == VIEW_HIGH_GRANULARITY && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) { roll_ptr = row_get_rec_roll_ptr(version, index, *offsets); undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap); undo_no = trx_undo_rec_get_undo_no(undo_rec); mem_heap_empty(heap); if (ut_dulint_cmp(view->undo_no, undo_no) > 0) { /* The view already sees this version: we can copy it to in_heap and return */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } } err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (err != DB_SUCCESS) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); if (read_view_sees_trx_id(view, trx_id)) { /* The view already sees this version: we can copy it to in_heap and return */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, prev_version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } version = prev_version; }/* for (;;) */ mem_heap_free(heap); rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/*****************************************************************//** Finds out if a version of the record, where the version >= the current purge view, should have ientry as its secondary index entry. We check if there is any not delete marked version of the record where the trx id >= purge view, and the secondary index entry and ientry are identified in the alphabetical ordering; exactly in this case we return TRUE. @return TRUE if earlier version should have */ UNIV_INTERN ibool row_vers_old_has_index_entry( /*=========================*/ ibool also_curr,/*!< in: TRUE if also rec is included in the versions to search; otherwise only versions prior to it are searched */ const rec_t* rec, /*!< in: record in the clustered index; the caller must have a latch on the page */ mtr_t* mtr, /*!< in: mtr holding the latch on rec; it will also hold the latch on purge_view */ dict_index_t* index, /*!< in: the secondary index */ const dtuple_t* ientry) /*!< in: the secondary index entry */ { const rec_t* version; rec_t* prev_version; dict_index_t* clust_index; ulint* clust_offsets; mem_heap_t* heap; mem_heap_t* heap2; const dtuple_t* row; const dtuple_t* entry; ulint err; ulint comp; ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ mtr_s_lock(&(purge_sys->latch), mtr); clust_index = dict_table_get_first_index(index->table); comp = page_rec_is_comp(rec); ut_ad(!dict_table_is_comp(index->table) == !comp); heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(rec, clust_index, NULL, ULINT_UNDEFINED, &heap); if (also_curr && !rec_get_deleted_flag(rec, comp)) { row_ext_t* ext; /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, rec, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* If entry == NULL, the record contains unset BLOB pointers. This must be a freshly inserted record. If this is called from row_purge_remove_sec_if_poss_low(), the thread will hold latches on the clustered index and the secondary index. Because the insert works in three steps: (1) insert the record to clustered index (2) store the BLOBs and update BLOB pointers (3) insert records to secondary indexes the purge thread can safely ignore freshly inserted records and delete the secondary index record. The thread that inserted the new record will be inserting the secondary index records. */ /* NOTE that we cannot do the comparison as binary fields because the row is maybe being modified so that the clustered index record has already been updated to a different binary value in a char field, but the collation identifies the old and new value anyway! */ if (entry && !dtuple_coll_cmp(ientry, entry)) { mem_heap_free(heap); return(TRUE); } } version = rec; for (;;) { heap2 = heap; heap = mem_heap_create(1024); err = trx_undo_prev_version_build(rec, mtr, version, clust_index, clust_offsets, heap, &prev_version); mem_heap_free(heap2); /* free version and clust_offsets */ if (err != DB_SUCCESS || !prev_version) { /* Versions end here */ mem_heap_free(heap); return(FALSE); } clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, ULINT_UNDEFINED, &heap); if (!rec_get_deleted_flag(prev_version, comp)) { row_ext_t* ext; /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, prev_version, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* If entry == NULL, the record contains unset BLOB pointers. This must be a freshly inserted record that we can safely ignore. For the justification, see the comments after the previous row_build_index_entry() call. */ /* NOTE that we cannot do the comparison as binary fields because maybe the secondary index record has already been updated to a different binary value in a char field, but the collation identifies the old and new value anyway! */ if (entry && !dtuple_coll_cmp(ientry, entry)) { mem_heap_free(heap); return(TRUE); } } version = prev_version; } }
/***********************************************************//** Looks for the clustered index record when node has the row reference. The pcur in node is used in the search. If found, stores the row to node, and stores the position of pcur, and detaches it. The pcur must be closed by the caller in any case. @return TRUE if found; NOTE the node->pcur must be closed by the caller, regardless of the return value */ UNIV_INTERN ibool row_undo_search_clust_to_pcur( /*==========================*/ undo_node_t* node) /*!< in: row undo node */ { dict_index_t* clust_index; ibool found; mtr_t mtr; ibool ret; rec_t* rec; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; ulint* offsets = offsets_; rec_offs_init(offsets_); mtr_start(&mtr); clust_index = dict_table_get_first_index(node->table); found = row_search_on_row_ref(&(node->pcur), BTR_MODIFY_LEAF, node->table, node->ref, &mtr); rec = btr_pcur_get_rec(&(node->pcur)); offsets = rec_get_offsets(rec, clust_index, offsets, ULINT_UNDEFINED, &heap); if (!found || 0 != ut_dulint_cmp(node->roll_ptr, row_get_rec_roll_ptr(rec, clust_index, offsets))) { /* We must remove the reservation on the undo log record BEFORE releasing the latch on the clustered index page: this is to make sure that some thread will eventually undo the modification corresponding to node->roll_ptr. */ /* fputs("--------------------undoing a previous version\n", stderr); */ ret = FALSE; } else { row_ext_t** ext; if (dict_table_get_format(node->table) >= DICT_TF_FORMAT_ZIP) { /* In DYNAMIC or COMPRESSED format, there is no prefix of externally stored columns in the clustered index record. Build a cache of column prefixes. */ ext = &node->ext; } else { /* REDUNDANT and COMPACT formats store a local 768-byte prefix of each externally stored column. No cache is needed. */ ext = NULL; node->ext = NULL; } node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets, NULL, ext, node->heap); if (node->update) { node->undo_row = dtuple_copy(node->row, node->heap); row_upd_replace(node->undo_row, &node->undo_ext, clust_index, node->update, node->heap); } else { node->undo_row = NULL; node->undo_ext = NULL; } btr_pcur_store_position(&(node->pcur), &mtr); ret = TRUE; } btr_pcur_commit_specify_mtr(&(node->pcur), &mtr); if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } return(ret); }
/***********************************************************//** Removes a delete marked clustered index record if possible. @return TRUE if success, or if not found, or if modified after the delete marking */ static ibool row_purge_remove_clust_if_poss_low( /*===============================*/ purge_node_t* node, /*!< in: row purge node */ ulint mode) /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */ { dict_index_t* index; btr_pcur_t* pcur; btr_cur_t* btr_cur; ibool success; ulint err; mtr_t mtr; rec_t* rec; mem_heap_t* heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); index = dict_table_get_first_index(node->table); pcur = &(node->pcur); btr_cur = btr_pcur_get_btr_cur(pcur); log_free_check(); mtr_start(&mtr); success = row_purge_reposition_pcur(mode, node, &mtr); if (!success) { /* The record is already removed */ btr_pcur_commit_specify_mtr(pcur, &mtr); return(TRUE); } rec = btr_pcur_get_rec(pcur); if (node->roll_ptr != row_get_rec_roll_ptr( rec, index, rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &heap))) { if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } /* Someone else has modified the record later: do not remove */ btr_pcur_commit_specify_mtr(pcur, &mtr); return(TRUE); } if (UNIV_LIKELY_NULL(heap)) { mem_heap_free(heap); } if (mode == BTR_MODIFY_LEAF) { success = btr_cur_optimistic_delete(btr_cur, &mtr); } else { ut_ad(mode == BTR_MODIFY_TREE); btr_cur_pessimistic_delete(&err, FALSE, btr_cur, RB_NONE, &mtr); if (err == DB_SUCCESS) { success = TRUE; } else if (err == DB_OUT_OF_FILE_SPACE) { success = FALSE; } else { ut_error; } } btr_pcur_commit_specify_mtr(pcur, &mtr); return(success); }