/*******************************************************************//** An inverse function to row_build_index_entry. Builds a row from a record in a clustered index. @return own: row built; see the NOTE below! */ UNIV_INTERN dtuple_t* row_build( /*======*/ ulint type, /*!< in: ROW_COPY_POINTERS or ROW_COPY_DATA; the latter copies also the data fields to heap while the first only places pointers to data fields on the index page, and thus is more efficient */ const dict_index_t* index, /*!< in: clustered index */ const rec_t* rec, /*!< in: record in the clustered index; NOTE: in the case ROW_COPY_POINTERS the data fields in the row will point directly into this record, therefore, the buffer page of this record must be at least s-latched and the latch held as long as the row dtuple is used! */ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) or NULL, in which case this function will invoke rec_get_offsets() */ const dict_table_t* col_table, /*!< in: table, to check which externally stored columns occur in the ordering columns of an index, or NULL if index->table should be consulted instead */ row_ext_t** ext, /*!< out, own: cache of externally stored column prefixes, or NULL */ mem_heap_t* heap) /*!< in: memory heap from which the memory needed is allocated */ { dtuple_t* row; const dict_table_t* table; ulint n_fields; ulint n_ext_cols; ulint* ext_cols = NULL; /* remove warning */ ulint len; ulint row_len; byte* buf; ulint i; ulint j; mem_heap_t* tmp_heap = NULL; ulint offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs_init(offsets_); ut_ad(index && rec && heap); ut_ad(dict_index_is_clust(index)); ut_ad(!mutex_own(&kernel_mutex)); if (!offsets) { offsets = rec_get_offsets(rec, index, offsets_, ULINT_UNDEFINED, &tmp_heap); } else { ut_ad(rec_offs_validate(rec, index, offsets)); } #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG if (rec_offs_any_null_extern(rec, offsets)) { /* This condition can occur during crash recovery before trx_rollback_active() has completed execution. This condition is possible if the server crashed during an insert or update-by-delete-and-insert before btr_store_big_rec_extern_fields() did mtr_commit() all BLOB pointers to the freshly inserted clustered index record. */ ut_a(trx_assert_recovered( row_get_rec_trx_id(rec, index, offsets))); ut_a(trx_undo_roll_ptr_is_insert( row_get_rec_roll_ptr(rec, index, offsets))); } #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (type != ROW_COPY_POINTERS) { /* Take a copy of rec to heap */ buf = mem_heap_alloc(heap, rec_offs_size(offsets)); rec = rec_copy(buf, rec, offsets); /* Avoid a debug assertion in rec_offs_validate(). */ rec_offs_make_valid(rec, index, (ulint*) offsets); } table = index->table; row_len = dict_table_get_n_cols(table); row = dtuple_create(heap, row_len); dict_table_copy_types(row, table); dtuple_set_info_bits(row, rec_get_info_bits( rec, dict_table_is_comp(table))); n_fields = rec_offs_n_fields(offsets); n_ext_cols = rec_offs_n_extern(offsets); if (n_ext_cols) { ext_cols = mem_heap_alloc(heap, n_ext_cols * sizeof *ext_cols); } for (i = j = 0; i < n_fields; i++) { dict_field_t* ind_field = dict_index_get_nth_field(index, i); const dict_col_t* col = dict_field_get_col(ind_field); ulint col_no = dict_col_get_no(col); dfield_t* dfield = dtuple_get_nth_field(row, col_no); if (ind_field->prefix_len == 0) { const byte* field = rec_get_nth_field( rec, offsets, i, &len); dfield_set_data(dfield, field, len); } if (rec_offs_nth_extern(offsets, i)) { dfield_set_ext(dfield); if (UNIV_LIKELY_NULL(col_table)) { ut_a(col_no < dict_table_get_n_cols(col_table)); col = dict_table_get_nth_col( col_table, col_no); } if (col->ord_part) { /* We will have to fetch prefixes of externally stored columns that are referenced by column prefixes. */ ext_cols[j++] = col_no; } } } ut_ad(dtuple_check_typed(row)); if (!ext) { /* REDUNDANT and COMPACT formats store a local 768-byte prefix of each externally stored column. No cache is needed. */ ut_ad(dict_table_get_format(index->table) < DICT_TF_FORMAT_ZIP); } else if (j) { *ext = row_ext_create(j, ext_cols, row, dict_table_zip_size(index->table), heap); } else { *ext = NULL; } if (tmp_heap) { mem_heap_free(tmp_heap); } return(row); }
/*****************************************************************//** Finds out if an active transaction has inserted or modified a secondary index record. NOTE: the kernel mutex is temporarily released in this function! @return NULL if committed, else the active transaction */ UNIV_INTERN trx_t* row_vers_impl_x_locked_off_kernel( /*==============================*/ const rec_t* rec, /*!< in: record in a secondary index */ dict_index_t* index, /*!< in: the secondary index */ const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */ { dict_index_t* clust_index; rec_t* clust_rec; ulint* clust_offsets; rec_t* version; trx_id_t trx_id; mem_heap_t* heap; mem_heap_t* heap2; dtuple_t* row; dtuple_t* entry = NULL; /* assignment to eliminate compiler warning */ trx_t* trx; ulint rec_del; #ifdef UNIV_DEBUG ulint err; #endif /* UNIV_DEBUG */ mtr_t mtr; ulint comp; ut_ad(mutex_own(&kernel_mutex)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ mutex_exit(&kernel_mutex); mtr_start(&mtr); /* Search for the clustered index record: this is a time-consuming operation: therefore we release the kernel mutex; also, the release is required by the latching order convention. The latch on the clustered index locks the top of the stack of versions. We also reserve purge_latch to lock the bottom of the version stack. */ clust_rec = row_get_clust_rec(BTR_SEARCH_LEAF, rec, index, &clust_index, &mtr); if (!clust_rec) { /* In a rare case it is possible that no clust rec is found for a secondary index record: if in row0umod.c row_undo_mod_remove_clust_low() we have already removed the clust rec, while purge is still cleaning and removing secondary index records associated with earlier versions of the clustered index record. In that case there cannot be any implicit lock on the secondary index record, because an active transaction which has modified the secondary index record has also modified the clustered index record. And in a rollback we always undo the modifications to secondary index records before the clustered index record. */ mutex_enter(&kernel_mutex); mtr_commit(&mtr); return(NULL); } heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(clust_rec, clust_index, NULL, ULINT_UNDEFINED, &heap); trx_id = row_get_rec_trx_id(clust_rec, clust_index, clust_offsets); mtr_s_lock(&(purge_sys->latch), &mtr); mutex_enter(&kernel_mutex); trx = NULL; if (!trx_is_active(trx_id)) { /* The transaction that modified or inserted clust_rec is no longer active: no implicit lock on rec */ goto exit_func; } if (!lock_check_trx_id_sanity(trx_id, clust_rec, clust_index, clust_offsets, TRUE)) { /* Corruption noticed: try to avoid a crash by returning */ goto exit_func; } comp = page_rec_is_comp(rec); ut_ad(index->table == clust_index->table); ut_ad(!!comp == dict_table_is_comp(index->table)); ut_ad(!comp == !page_rec_is_comp(clust_rec)); /* We look up if some earlier version, which was modified by the trx_id transaction, of the clustered index record would require rec to be in a different state (delete marked or unmarked, or have different field values, or not existing). If there is such a version, then rec was modified by the trx_id transaction, and it has an implicit x-lock on rec. Note that if clust_rec itself would require rec to be in a different state, then the trx_id transaction has not yet had time to modify rec, and does not necessarily have an implicit x-lock on rec. */ rec_del = rec_get_deleted_flag(rec, comp); trx = NULL; version = clust_rec; for (;;) { rec_t* prev_version; ulint vers_del; row_ext_t* ext; trx_id_t prev_trx_id; mutex_exit(&kernel_mutex); /* While we retrieve an earlier version of clust_rec, we release the kernel mutex, because it may take time to access the disk. After the release, we have to check if the trx_id transaction is still active. We keep the semaphore in mtr on the clust_rec page, so that no other transaction can update it and get an implicit x-lock on rec. */ heap2 = heap; heap = mem_heap_create(1024); #ifdef UNIV_DEBUG err = #endif /* UNIV_DEBUG */ trx_undo_prev_version_build(clust_rec, &mtr, version, clust_index, clust_offsets, heap, &prev_version); mem_heap_free(heap2); /* free version and clust_offsets */ if (prev_version == NULL) { mutex_enter(&kernel_mutex); if (!trx_is_active(trx_id)) { /* Transaction no longer active: no implicit x-lock */ break; } /* If the transaction is still active, clust_rec must be a fresh insert, because no previous version was found. */ ut_ad(err == DB_SUCCESS); /* It was a freshly inserted version: there is an implicit x-lock on rec */ trx = trx_get_on_id(trx_id); break; } clust_offsets = rec_get_offsets(prev_version, clust_index, NULL, ULINT_UNDEFINED, &heap); vers_del = rec_get_deleted_flag(prev_version, comp); prev_trx_id = row_get_rec_trx_id(prev_version, clust_index, clust_offsets); /* The stack of versions is locked by mtr. Thus, it is safe to fetch the prefixes for externally stored columns. */ row = row_build(ROW_COPY_POINTERS, clust_index, prev_version, clust_offsets, NULL, &ext, heap); entry = row_build_index_entry(row, ext, index, heap); /* entry may be NULL if a record was inserted in place of a deleted record, and the BLOB pointers of the new record were not initialized yet. But in that case, prev_version should be NULL. */ ut_a(entry); mutex_enter(&kernel_mutex); if (!trx_is_active(trx_id)) { /* Transaction no longer active: no implicit x-lock */ break; } /* If we get here, we know that the trx_id transaction is still active and it has modified prev_version. Let us check if prev_version would require rec to be in a different state. */ /* The previous version of clust_rec must be accessible, because the transaction is still active and clust_rec was not a fresh insert. */ ut_ad(err == DB_SUCCESS); /* We check if entry and rec are identified in the alphabetical ordering */ if (0 == cmp_dtuple_rec(entry, rec, offsets)) { /* The delete marks of rec and prev_version should be equal for rec to be in the state required by prev_version */ if (rec_del != vers_del) { trx = trx_get_on_id(trx_id); break; } /* It is possible that the row was updated so that the secondary index record remained the same in alphabetical ordering, but the field values changed still. For example, 'abc' -> 'ABC'. Check also that. */ dtuple_set_types_binary(entry, dtuple_get_n_fields(entry)); if (0 != cmp_dtuple_rec(entry, rec, offsets)) { trx = trx_get_on_id(trx_id); break; } } else if (!rec_del) { /* The delete mark should be set in rec for it to be in the state required by prev_version */ trx = trx_get_on_id(trx_id); break; } if (0 != ut_dulint_cmp(trx_id, prev_trx_id)) { /* The versions modified by the trx_id transaction end to prev_version: no implicit x-lock */ break; } version = prev_version; }/* for (;;) */ exit_func: mtr_commit(&mtr); mem_heap_free(heap); return(trx); }
/*****************************************************************//** Constructs the last committed version of a clustered index record, which should be seen by a semi-consistent read. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_semi_consistent_read( /*====================================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ const rec_t** old_vers)/*!< out: rec, old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; mem_heap_t* heap = NULL; byte* buf; ulint err; trx_id_t rec_trx_id = ut_dulint_zero; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); rw_lock_s_lock(&(purge_sys->latch)); /* The S-latch on purge_sys prevents the purge view from changing. Thus, if we have an uncommitted transaction at this point, then purge cannot remove its undo log even if the transaction could commit now. */ version = rec; for (;;) { trx_t* version_trx; mem_heap_t* heap2; rec_t* prev_version; trx_id_t version_trx_id; version_trx_id = row_get_rec_trx_id(version, index, *offsets); if (rec == version) { rec_trx_id = version_trx_id; } mutex_enter(&kernel_mutex); version_trx = trx_get_on_id(version_trx_id); if (version_trx && (version_trx->conc_state == TRX_COMMITTED_IN_MEMORY || version_trx->conc_state == TRX_NOT_STARTED)) { version_trx = NULL; } mutex_exit(&kernel_mutex); if (!version_trx) { /* We found a version that belongs to a committed transaction: return it. */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ if (rec == version) { *old_vers = rec; err = DB_SUCCESS; break; } /* We assume that a rolled-back transaction stays in TRX_ACTIVE state until all the changes have been rolled back and the transaction is removed from the global list of transactions. */ if (!ut_dulint_cmp(rec_trx_id, version_trx_id)) { /* The transaction was committed while we searched for earlier versions. Return the current version as a semi-consistent read. */ version = rec; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); } buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } heap2 = heap; heap = mem_heap_create(1024); err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (UNIV_UNLIKELY(err != DB_SUCCESS)) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } version = prev_version; *offsets = rec_get_offsets(version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ }/* for (;;) */ if (heap) { mem_heap_free(heap); } rw_lock_s_unlock(&(purge_sys->latch)); return(err); }
/*****************************************************************//** Constructs the version of a clustered index record which a consistent read should see. We assume that the trx id stored in rec is such that the consistent read should not see rec in its present version. @return DB_SUCCESS or DB_MISSING_HISTORY */ UNIV_INTERN ulint row_vers_build_for_consistent_read( /*===============================*/ const rec_t* rec, /*!< in: record in a clustered index; the caller must have a latch on the page; this latch locks the top of the stack of versions of this records */ mtr_t* mtr, /*!< in: mtr holding the latch on rec */ dict_index_t* index, /*!< in: the clustered index */ ulint** offsets,/*!< in/out: offsets returned by rec_get_offsets(rec, index) */ read_view_t* view, /*!< in: the consistent read view */ mem_heap_t** offset_heap,/*!< in/out: memory heap from which the offsets are allocated */ mem_heap_t* in_heap,/*!< in: memory heap from which the memory for *old_vers is allocated; memory for possible intermediate versions is allocated and freed locally within the function */ rec_t** old_vers)/*!< out, own: old version, or NULL if the record does not exist in the view, that is, it was freshly inserted afterwards */ { const rec_t* version; rec_t* prev_version; trx_id_t trx_id; mem_heap_t* heap = NULL; byte* buf; ulint err; ut_ad(dict_index_is_clust(index)); ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX) || mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_S_FIX)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(&(purge_sys->latch), RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ ut_ad(rec_offs_validate(rec, index, *offsets)); trx_id = row_get_rec_trx_id(rec, index, *offsets); ut_ad(!read_view_sees_trx_id(view, trx_id)); rw_lock_s_lock(&(purge_sys->latch)); version = rec; for (;;) { mem_heap_t* heap2 = heap; trx_undo_rec_t* undo_rec; roll_ptr_t roll_ptr; undo_no_t undo_no; heap = mem_heap_create(1024); /* If we have high-granularity consistent read view and creating transaction of the view is the same as trx_id in the record we see this record only in the case when undo_no of the record is < undo_no in the view. */ if (view->type == VIEW_HIGH_GRANULARITY && ut_dulint_cmp(view->creator_trx_id, trx_id) == 0) { roll_ptr = row_get_rec_roll_ptr(version, index, *offsets); undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap); undo_no = trx_undo_rec_get_undo_no(undo_rec); mem_heap_empty(heap); if (ut_dulint_cmp(view->undo_no, undo_no) > 0) { /* The view already sees this version: we can copy it to in_heap and return */ #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern( version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } } err = trx_undo_prev_version_build(rec, mtr, version, index, *offsets, heap, &prev_version); if (heap2) { mem_heap_free(heap2); /* free version */ } if (err != DB_SUCCESS) { break; } if (prev_version == NULL) { /* It was a freshly inserted version */ *old_vers = NULL; err = DB_SUCCESS; break; } *offsets = rec_get_offsets(prev_version, index, *offsets, ULINT_UNDEFINED, offset_heap); #if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG ut_a(!rec_offs_any_null_extern(prev_version, *offsets)); #endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */ trx_id = row_get_rec_trx_id(prev_version, index, *offsets); if (read_view_sees_trx_id(view, trx_id)) { /* The view already sees this version: we can copy it to in_heap and return */ buf = mem_heap_alloc(in_heap, rec_offs_size(*offsets)); *old_vers = rec_copy(buf, prev_version, *offsets); rec_offs_make_valid(*old_vers, index, *offsets); err = DB_SUCCESS; break; } version = prev_version; }/* for (;;) */ mem_heap_free(heap); rw_lock_s_unlock(&(purge_sys->latch)); return(err); }