ibool dtuple_check_typed_no_assert( /*=========================*/ /* out: TRUE if ok */ dtuple_t* tuple) /* in: tuple */ { dfield_t* field; ulint i; char err_buf[1000]; if (dtuple_get_n_fields(tuple) > REC_MAX_N_FIELDS) { fprintf(stderr, "InnoDB: Error: index entry has %lu fields\n", dtuple_get_n_fields(tuple)); dtuple_sprintf(err_buf, 900, tuple); fprintf(stderr, "InnoDB: Tuple contents: %s\n", err_buf); return(FALSE); } for (i = 0; i < dtuple_get_n_fields(tuple); i++) { field = dtuple_get_nth_field(tuple, i); if (!dfield_check_typed_no_assert(field)) { dtuple_sprintf(err_buf, 900, tuple); fprintf(stderr, "InnoDB: Tuple contents: %s\n", err_buf); return(FALSE); } } return(TRUE); }
/*************************************************************** Updates a secondary index entry of a row. */ static ulint row_upd_sec_index_entry( /*====================*/ /* out: DB_SUCCESS if operation successfully completed, else error code or DB_LOCK_WAIT */ upd_node_t* node, /* in: row update node */ que_thr_t* thr) /* in: query thread */ { ibool check_ref; ibool found; dict_index_t* index; dtuple_t* entry; btr_pcur_t pcur; btr_cur_t* btr_cur; mem_heap_t* heap; rec_t* rec; ulint err = DB_SUCCESS; mtr_t mtr; char err_buf[1000]; index = node->index; check_ref = row_upd_index_is_referenced(index); heap = mem_heap_create(1024); /* Build old index entry */ entry = row_build_index_entry(node->row, index, heap); log_free_check(); mtr_start(&mtr); found = row_search_index_entry(index, entry, BTR_MODIFY_LEAF, &pcur, &mtr); btr_cur = btr_pcur_get_btr_cur(&pcur); rec = btr_cur_get_rec(btr_cur); if (!found) { fprintf(stderr, "InnoDB: error in sec index entry update in\n" "InnoDB: index %s table %s\n", index->name, index->table->name); dtuple_sprintf(err_buf, 900, entry); fprintf(stderr, "InnoDB: tuple %s\n", err_buf); rec_sprintf(err_buf, 900, rec); fprintf(stderr, "InnoDB: record %s\n", err_buf); fprintf(stderr, "InnoDB: Make a detailed bug report and send it\n"); fprintf(stderr, "InnoDB: to [email protected]\n"); trx_print(thr_get_trx(thr)); } else { /* Delete mark the old index record; it can already be delete marked if we return after a lock wait in row_ins_index_entry below */ if (!rec_get_deleted_flag(rec)) { err = btr_cur_del_mark_set_sec_rec(0, btr_cur, TRUE, thr, &mtr); if (err == DB_SUCCESS && check_ref) { /* NOTE that the following call loses the position of pcur ! */ err = row_upd_check_references_constraints( &pcur, index->table, index, thr, &mtr); if (err != DB_SUCCESS) { goto close_cur; } } } } close_cur: btr_pcur_close(&pcur); mtr_commit(&mtr); if (node->is_delete || err != DB_SUCCESS) { mem_heap_free(heap); return(err); } /* Build a new index entry */ row_upd_index_replace_new_col_vals(entry, index, node->update); /* Insert new index entry */ err = row_ins_index_entry(index, entry, NULL, 0, thr); mem_heap_free(heap); return(err); }
big_rec_t* dtuple_convert_big_rec( /*===================*/ /* out, own: created big record vector, NULL if we are not able to shorten the entry enough, i.e., if there are too many short fields in entry */ dict_index_t* index, /* in: index */ dtuple_t* entry, /* in: index entry */ ulint* ext_vec,/* in: array of externally stored fields, or NULL: if a field already is externally stored, then we cannot move it to the vector this function returns */ ulint n_ext_vec)/* in: number of elements is ext_vec */ { mem_heap_t* heap; big_rec_t* vector; dfield_t* dfield; ulint size; ulint n_fields; ulint longest; ulint longest_i = ULINT_MAX; ibool is_externally_stored; ulint i; ulint j; char err_buf[1000]; ut_a(dtuple_check_typed_no_assert(entry)); size = rec_get_converted_size(entry); if (size > 1000000000) { fprintf(stderr, "InnoDB: Warning: tuple size very big: %lu\n", size); dtuple_sprintf(err_buf, 900, entry); fprintf(stderr, "InnoDB: Tuple contents: %s\n", err_buf); } heap = mem_heap_create(size + dtuple_get_n_fields(entry) * sizeof(big_rec_field_t) + 1000); vector = mem_heap_alloc(heap, sizeof(big_rec_t)); vector->heap = heap; vector->fields = mem_heap_alloc(heap, dtuple_get_n_fields(entry) * sizeof(big_rec_field_t)); /* Decide which fields to shorten: the algorithm is to look for the longest field whose type is DATA_BLOB */ n_fields = 0; while ((rec_get_converted_size(entry) >= page_get_free_space_of_empty() / 2) || rec_get_converted_size(entry) >= REC_MAX_DATA_SIZE) { longest = 0; for (i = dict_index_get_n_unique_in_tree(index); i < dtuple_get_n_fields(entry); i++) { /* Skip over fields which already are externally stored */ is_externally_stored = FALSE; if (ext_vec) { for (j = 0; j < n_ext_vec; j++) { if (ext_vec[j] == i) { is_externally_stored = TRUE; } } } if (!is_externally_stored && dict_index_get_nth_type(index, i)->mtype == DATA_BLOB) { dfield = dtuple_get_nth_field(entry, i); if (dfield->len != UNIV_SQL_NULL && dfield->len > longest) { longest = dfield->len; longest_i = i; } } } /* We do not store externally fields which are smaller than DICT_MAX_COL_PREFIX_LEN */ ut_a(DICT_MAX_COL_PREFIX_LEN > REC_1BYTE_OFFS_LIMIT); if (longest < BTR_EXTERN_FIELD_REF_SIZE + 10 + DICT_MAX_COL_PREFIX_LEN) { /* Cannot shorten more */ mem_heap_free(heap); return(NULL); } /* Move data from field longest_i to big rec vector; we do not let data size of the remaining entry drop below 128 which is the limit for the 2-byte offset storage format in a physical record. This we accomplish by storing 128 bytes of data in entry itself, and only the remaining part to big rec vec. We store the first bytes locally to the record. Then we can calculate all ordering fields in all indexes from locally stored data. */ dfield = dtuple_get_nth_field(entry, longest_i); vector->fields[n_fields].field_no = longest_i; ut_a(dfield->len > DICT_MAX_COL_PREFIX_LEN); vector->fields[n_fields].len = dfield->len - DICT_MAX_COL_PREFIX_LEN; vector->fields[n_fields].data = mem_heap_alloc(heap, vector->fields[n_fields].len); /* Copy data (from the end of field) to big rec vector */ ut_memcpy(vector->fields[n_fields].data, ((byte*)dfield->data) + dfield->len - vector->fields[n_fields].len, vector->fields[n_fields].len); dfield->len = dfield->len - vector->fields[n_fields].len + BTR_EXTERN_FIELD_REF_SIZE; /* Set the extern field reference in dfield to zero */ memset(((byte*)dfield->data) + dfield->len - BTR_EXTERN_FIELD_REF_SIZE, 0, BTR_EXTERN_FIELD_REF_SIZE); n_fields++; } vector->n_fields = n_fields; return(vector); }