/******************************************************************//** Sets a thread priority. */ UNIV_INTERN void os_thread_set_priority( /*===================*/ os_thread_t handle, /*!< in: OS handle to the thread */ ulint pri) /*!< in: priority */ { #ifdef __WIN__ int os_pri; if (pri == OS_THREAD_PRIORITY_BACKGROUND) { os_pri = THREAD_PRIORITY_BELOW_NORMAL; } else if (pri == OS_THREAD_PRIORITY_NORMAL) { os_pri = THREAD_PRIORITY_NORMAL; } else if (pri == OS_THREAD_PRIORITY_ABOVE_NORMAL) { os_pri = THREAD_PRIORITY_HIGHEST; } else { ut_error; } ut_a(SetThreadPriority(handle, os_pri)); #else UT_NOT_USED(handle); UT_NOT_USED(pri); #endif }
void os_process_set_priority_boost( /*==========================*/ ibool do_boost) /* in: TRUE if priority boost should be done, FALSE if not */ { #ifdef __WIN__ ibool no_boost; if (do_boost) { no_boost = FALSE; } else { no_boost = TRUE; } #if TRUE != 1 # error "TRUE != 1" #endif /* Does not do anything currently! SetProcessPriorityBoost(GetCurrentProcess(), no_boost); */ fputs("Warning: process priority boost setting" " currently not functional!\n", stderr); #else UT_NOT_USED(do_boost); #endif }
void os_process_set_priority_boost( /*==========================*/ ibool do_boost) /* in: TRUE if priority boost should be done, FALSE if not */ { #ifdef __WIN__ ibool no_boost; if (do_boost) { no_boost = FALSE; } else { no_boost = TRUE; } ut_a(TRUE == 1); /* Does not do anything currently! SetProcessPriorityBoost(GetCurrentProcess(), no_boost); */ printf( "Warning: process priority boost setting currently not functional!\n" ); #else UT_NOT_USED(do_boost); #endif }
/*************************************************************** Checks if also the previous version of the clustered index record was modified or inserted by the same transaction, and its undo number is such that it should be undone in the same rollback. */ UNIV_INLINE ibool row_undo_mod_undo_also_prev_vers( /*=============================*/ /* out: TRUE if also previous modify or insert of this row should be undone */ undo_node_t* node, /* in: row undo node */ que_thr_t* thr, /* in: query thread */ dulint* undo_no)/* out: the undo number */ { trx_undo_rec_t* undo_rec; ibool ret; trx_t* trx; UT_NOT_USED(thr); trx = node->trx; if (0 != ut_dulint_cmp(node->new_trx_id, trx->id)) { return(FALSE); } undo_rec = trx_undo_get_undo_rec_low(node->new_roll_ptr, node->heap); *undo_no = trx_undo_rec_get_undo_no(undo_rec); if (ut_dulint_cmp(trx->roll_limit, *undo_no) <= 0) { ret = TRUE; } else { ret = FALSE; } return(ret); }
/**********************************************************//** Waits for an event object until it is in the signaled state. If srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS this also exits the waiting thread when the event becomes signaled (or immediately if the event is already in the signaled state). Typically, if the event has been signalled after the os_event_reset() we'll return immediately because event->is_set == TRUE. There are, however, situations (e.g.: sync_array code) where we may lose this information. For example: thread A calls os_event_reset() thread B calls os_event_set() [event->is_set == TRUE] thread C calls os_event_reset() [event->is_set == FALSE] thread A calls os_event_wait() [infinite wait!] thread C calls os_event_wait() [infinite wait!] Where such a scenario is possible, to avoid infinite wait, the value returned by os_event_reset() should be passed in as reset_sig_count. */ UNIV_INTERN void os_event_wait_low( /*==============*/ os_event_t event, /*!< in: event to wait */ ib_int64_t reset_sig_count)/*!< in: zero or the value returned by previous call of os_event_reset(). */ { #ifdef __WIN__ DWORD err; ut_a(event); UT_NOT_USED(reset_sig_count); /* Specify an infinite time limit for waiting */ err = WaitForSingleObject(event->handle, INFINITE); ut_a(err == WAIT_OBJECT_0); if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { os_thread_exit(NULL); } #else ib_int64_t old_signal_count; os_fast_mutex_lock(&(event->os_mutex)); if (reset_sig_count) { old_signal_count = reset_sig_count; } else { old_signal_count = event->signal_count; } for (;;) { if (event->is_set == TRUE || event->signal_count != old_signal_count) { os_fast_mutex_unlock(&(event->os_mutex)); if (srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) { os_thread_exit(NULL); } /* Ok, we may return */ return; } pthread_cond_wait(&(event->cond_var), &(event->os_mutex)); /* Solaris manual said that spurious wakeups may occur: we have to check if the event really has been signaled after we came here to wait */ } #endif }
byte* os_awe_allocate_virtual_mem_window( /*===============================*/ /* out, own: allocated memory, or NULL if did not succeed */ ulint size) /* in: virtual memory allocation size in bytes, must be < 2 GB */ { #ifdef UNIV_SIMULATE_AWE ulint i; os_awe_simulate_window = ut_align(ut_malloc(4096 + size), 4096); os_awe_simulate_window_size = size; os_awe_simulate_map = ut_malloc(sizeof(byte*) * (size / 4096)); for (i = 0; i < (size / 4096); i++) { *(os_awe_simulate_map + i) = NULL; } return(os_awe_simulate_window); #elif defined(__WIN2000__) byte* ptr; if (size > (ulint)0x7FFFFFFFUL) { fprintf(stderr, "InnoDB: AWE: Cannot allocate %lu bytes" " of virtual memory\n", size); return(NULL); } ptr = VirtualAlloc(NULL, (SIZE_T)size, MEM_RESERVE | MEM_PHYSICAL, PAGE_READWRITE); if (ptr == NULL) { fprintf(stderr, "InnoDB: AWE: Cannot allocate %lu bytes" " of virtual memory, error %lu\n", size, (ulint)GetLastError()); return(NULL); } os_awe_window = ptr; os_awe_window_size = size; ut_total_allocated_memory += size; return(ptr); #else UT_NOT_USED(size); return(NULL); #endif }
ibool row_undo_search_clust_to_pcur( /*==========================*/ /* out: TRUE if found; NOTE the node->pcur must be closed by the caller, regardless of the return value */ undo_node_t* node, /* in: row undo node */ que_thr_t* thr) /* in: query thread */ { dict_index_t* clust_index; ibool found; mtr_t mtr; ibool ret; rec_t* rec; UT_NOT_USED(thr); mtr_start(&mtr); clust_index = dict_table_get_first_index(node->table); found = row_search_on_row_ref(&(node->pcur), BTR_MODIFY_LEAF, node->table, node->ref, &mtr); rec = btr_pcur_get_rec(&(node->pcur)); if (!found || 0 != ut_dulint_cmp(node->roll_ptr, row_get_rec_roll_ptr(rec, clust_index))) { /* We must remove the reservation on the undo log record BEFORE releasing the latch on the clustered index page: this is to make sure that some thread will eventually undo the modification corresponding to node->roll_ptr. */ /* printf("--------------------undoing a previous version\n"); */ ret = FALSE; } else { node->row = row_build(ROW_COPY_DATA, clust_index, rec, node->heap); btr_pcur_store_position(&(node->pcur), &mtr); ret = TRUE; } btr_pcur_commit_specify_mtr(&(node->pcur), &mtr); return(ret); }
/*********************************************************//** Creates an operating system mutex semaphore. Because these are slow, the mutex semaphore of InnoDB itself (mutex_t) should be used where possible. @return the mutex handle */ UNIV_INTERN os_mutex_t os_mutex_create( /*============*/ const char* name) /*!< in: the name of the mutex, if NULL the mutex is created without a name */ { #ifdef __WIN__ HANDLE mutex; os_mutex_t mutex_str; mutex = CreateMutex(NULL, /* No security attributes */ FALSE, /* Initial state: no owner */ (LPCTSTR) name); ut_a(mutex); #else os_fast_mutex_t* mutex; os_mutex_t mutex_str; UT_NOT_USED(name); mutex = ut_malloc(sizeof(os_fast_mutex_t)); os_fast_mutex_init(mutex); #endif mutex_str = ut_malloc(sizeof(os_mutex_str_t)); mutex_str->handle = mutex; mutex_str->count = 0; mutex_str->event = os_event_create(NULL); if (UNIV_LIKELY(os_sync_mutex_inited)) { /* When creating os_sync_mutex itself we cannot reserve it */ os_mutex_enter(os_sync_mutex); } UT_LIST_ADD_FIRST(os_mutex_list, os_mutex_list, mutex_str); os_mutex_count++; if (UNIV_LIKELY(os_sync_mutex_inited)) { os_mutex_exit(os_sync_mutex); } return(mutex_str); }
/**********************************************************//** Waits for an event object until it is in the signaled state or a timeout is exceeded. In Unix the timeout is always infinite. @return 0 if success, OS_SYNC_TIME_EXCEEDED if timeout was exceeded */ UNIV_INTERN ulint os_event_wait_time( /*===============*/ os_event_t event, /*!< in: event to wait */ ulint time) /*!< in: timeout in microseconds, or OS_SYNC_INFINITE_TIME */ { #ifdef __WIN__ DWORD err; ut_a(event); if (time != OS_SYNC_INFINITE_TIME) { err = WaitForSingleObject(event->handle, (DWORD) time / 1000); } else { err = WaitForSingleObject(event->handle, INFINITE); } if (err == WAIT_OBJECT_0) { return(0); } else if (err == WAIT_TIMEOUT) { return(OS_SYNC_TIME_EXCEEDED); } else { ut_error; return(1000000); /* dummy value to eliminate compiler warn. */ } #else UT_NOT_USED(time); /* In Posix this is just an ordinary, infinite wait */ os_event_wait(event); return(0); #endif }
/*****************************************************************//** Releases the item in the slot given. */ static void mtr_memo_slot_release( /*==================*/ mtr_t* mtr, /*!< in: mtr */ mtr_memo_slot_t* slot) /*!< in: memo slot */ { void* object; ulint type; ut_ad(mtr); ut_ad(slot); #ifndef UNIV_DEBUG UT_NOT_USED(mtr); #endif /* UNIV_DEBUG */ object = slot->object; type = slot->type; if (UNIV_LIKELY(object != NULL)) { if (type <= MTR_MEMO_BUF_FIX) { buf_page_release((buf_block_t*)object, type); } else if (type == MTR_MEMO_S_LOCK) { rw_lock_s_unlock((rw_lock_t*)object); #ifdef UNIV_DEBUG } else if (type != MTR_MEMO_X_LOCK) { ut_ad(type == MTR_MEMO_MODIFY); ut_ad(mtr_memo_contains(mtr, object, MTR_MEMO_PAGE_X_FIX)); #endif /* UNIV_DEBUG */ } else { rw_lock_x_unlock((rw_lock_t*)object); } } slot->object = NULL; }
/*********************************************************//** Creates an event semaphore, i.e., a semaphore which may just have two states: signaled and nonsignaled. The created event is manual reset: it must be reset explicitly by calling sync_os_reset_event. @return the event handle */ UNIV_INTERN os_event_t os_event_create( /*============*/ const char* name) /*!< in: the name of the event, if NULL the event is created without a name */ { #ifdef __WIN__ os_event_t event; event = ut_malloc(sizeof(struct os_event_struct)); event->handle = CreateEvent(NULL, /* No security attributes */ TRUE, /* Manual reset */ FALSE, /* Initial state nonsignaled */ (LPCTSTR) name); if (!event->handle) { fprintf(stderr, "InnoDB: Could not create a Windows event semaphore;" " Windows error %lu\n", (ulong) GetLastError()); } #else /* Unix */ os_event_t event; UT_NOT_USED(name); event = ut_malloc(sizeof(struct os_event_struct)); os_fast_mutex_init(&(event->os_mutex)); ut_a(0 == pthread_cond_init(&(event->cond_var), NULL)); event->is_set = FALSE; /* We return this value in os_event_reset(), which can then be be used to pass to the os_event_wait_low(). The value of zero is reserved in os_event_wait_low() for the case when the caller does not want to pass any signal_count value. To distinguish between the two cases we initialize signal_count to 1 here. */ event->signal_count = 1; #endif /* __WIN__ */ /* The os_sync_mutex can be NULL because during startup an event can be created [ because it's embedded in the mutex/rwlock ] before this module has been initialized */ if (os_sync_mutex != NULL) { os_mutex_enter(os_sync_mutex); } /* Put to the list of events */ UT_LIST_ADD_FIRST(os_event_list, os_event_list, event); os_event_count++; if (os_sync_mutex != NULL) { os_mutex_exit(os_sync_mutex); } return(event); }
/************************************************************************ Loads definitions for index fields. */ static void dict_load_fields( /*=============*/ dict_table_t* table, /* in: table */ dict_index_t* index, /* in: index whose fields to load */ mem_heap_t* heap) /* in: memory heap for temporary storage */ { dict_table_t* sys_fields; dict_index_t* sys_index; btr_pcur_t pcur; dtuple_t* tuple; dfield_t* dfield; char* col_name; rec_t* rec; byte* field; ulint len; byte* buf; ulint i; mtr_t mtr; ut_ad(mutex_own(&(dict_sys->mutex))); UT_NOT_USED(table); mtr_start(&mtr); sys_fields = dict_table_get_low("SYS_FIELDS"); sys_index = UT_LIST_GET_FIRST(sys_fields->indexes); tuple = dtuple_create(heap, 1); dfield = dtuple_get_nth_field(tuple, 0); buf = mem_heap_alloc(heap, 8); mach_write_to_8(buf, index->id); dfield_set_data(dfield, buf, 8); dict_index_copy_types(tuple, sys_index, 1); btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr); for (i = 0; i < index->n_fields; i++) { rec = btr_pcur_get_rec(&pcur); ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr)); ut_a(!rec_get_deleted_flag(rec)); field = rec_get_nth_field(rec, 0, &len); ut_ad(len == 8); ut_a(ut_memcmp(buf, field, len) == 0); field = rec_get_nth_field(rec, 1, &len); ut_ad(len == 4); ut_a(i == mach_read_from_4(field)); ut_a(0 == ut_strcmp("COL_NAME", dict_field_get_col( dict_index_get_nth_field( dict_table_get_first_index(sys_fields), 4))->name)); field = rec_get_nth_field(rec, 4, &len); col_name = mem_heap_alloc(heap, len + 1); ut_memcpy(col_name, field, len); col_name[len] = '\0'; dict_mem_index_add_field(index, col_name, 0); btr_pcur_move_to_next_user_rec(&pcur, &mtr); } btr_pcur_close(&pcur); mtr_commit(&mtr); }
/******************************************************************* Removes a secondary index entry if found. */ static ulint row_undo_ins_remove_sec_low( /*========================*/ /* out: DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */ ulint mode, /* in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE, depending on whether we wish optimistic or pessimistic descent down the index tree */ dict_index_t* index, /* in: index */ dtuple_t* entry, /* in: index entry to remove */ que_thr_t* thr) /* in: query thread */ { btr_pcur_t pcur; btr_cur_t* btr_cur; ibool found; ibool success; ulint err; mtr_t mtr; UT_NOT_USED(thr); log_free_check(); mtr_start(&mtr); found = row_search_index_entry(index, entry, mode, &pcur, &mtr); btr_cur = btr_pcur_get_btr_cur(&pcur); if (!found) { /* Not found */ /* FIXME: remove printfs in the final version */ /* printf( "--UNDO INS: Record not found from page %lu index %s\n", buf_frame_get_page_no(btr_cur_get_rec(btr_cur)), index->name); */ /* ibuf_print(); */ btr_pcur_close(&pcur); mtr_commit(&mtr); return(DB_SUCCESS); } if (mode == BTR_MODIFY_LEAF) { success = btr_cur_optimistic_delete(btr_cur, &mtr); if (success) { err = DB_SUCCESS; } else { err = DB_FAIL; } } else { ut_ad(mode == BTR_MODIFY_TREE); btr_cur_pessimistic_delete(&err, FALSE, btr_cur, TRUE, &mtr); } btr_pcur_close(&pcur); mtr_commit(&mtr); return(err); }
ibool os_awe_map_physical_mem_to_window( /*==============================*/ /* out: TRUE if success; the function calls exit(1) in case of an error */ byte* ptr, /* in: a page-aligned pointer to somewhere in the virtual address space window; we map the physical mem pages here */ ulint n_mem_pages, /* in: number of 4 kB mem pages to map */ os_awe_t* page_info) /* in: array of page infos for those pages; each page has one slot in the array */ { #ifdef UNIV_SIMULATE_AWE ulint i; byte** map; byte* page; byte* phys_page; ut_a(ptr >= os_awe_simulate_window); ut_a(ptr < os_awe_simulate_window + os_awe_simulate_window_size); ut_a(page_info >= os_awe_simulate_page_info); ut_a(page_info < os_awe_simulate_page_info + (os_awe_simulate_mem_size / 4096)); /* First look if some other 'physical pages' are mapped at ptr, and copy them back to where they were if yes */ map = os_awe_simulate_map + ((ulint)(ptr - os_awe_simulate_window)) / 4096; page = ptr; for (i = 0; i < n_mem_pages; i++) { if (*map != NULL) { ut_memcpy(*map, page, 4096); } map++; page += 4096; } /* Then copy to ptr the 'physical pages' determined by page_info; we assume page_info is a segment of the array we created at the start */ phys_page = os_awe_simulate_mem + (ulint)(page_info - os_awe_simulate_page_info) * 4096; ut_memcpy(ptr, phys_page, n_mem_pages * 4096); /* Update the map */ map = os_awe_simulate_map + ((ulint)(ptr - os_awe_simulate_window)) / 4096; for (i = 0; i < n_mem_pages; i++) { *map = phys_page; map++; phys_page += 4096; } return(TRUE); #elif defined(__WIN2000__) BOOL bResult; os_awe_t n_pages; n_pages = (os_awe_t)n_mem_pages; if (!(ptr >= os_awe_window)) { fprintf(stderr, "InnoDB: AWE: Error: trying to map to address %lx" " but AWE window start %lx\n", (ulint)ptr, (ulint)os_awe_window); ut_a(0); } if (!(ptr <= os_awe_window + os_awe_window_size - UNIV_PAGE_SIZE)) { fprintf(stderr, "InnoDB: AWE: Error: trying to map to address %lx" " but AWE window end %lx\n", (ulint)ptr, (ulint)os_awe_window + os_awe_window_size); ut_a(0); } if (!(page_info >= os_awe_page_info)) { fprintf(stderr, "InnoDB: AWE: Error: trying to map page info" " at %lx but array start %lx\n", (ulint)page_info, (ulint)os_awe_page_info); ut_a(0); } if (!(page_info <= os_awe_page_info + (os_awe_n_pages - 4))) { fprintf(stderr, "InnoDB: AWE: Error: trying to map page info" " at %lx but array end %lx\n", (ulint)page_info, (ulint)(os_awe_page_info + os_awe_n_pages)); ut_a(0); } bResult = MapUserPhysicalPages((PVOID)ptr, n_pages, page_info); if (bResult != TRUE) { ut_print_timestamp(stderr); fprintf(stderr, " InnoDB: AWE: Mapping of %lu physical pages" " to address %lx failed,\n" "InnoDB: error %lu.\n" "InnoDB: Cannot continue operation.\n", n_mem_pages, (ulint)ptr, (ulint)GetLastError()); exit(1); } return(TRUE); #else UT_NOT_USED(ptr); UT_NOT_USED(n_mem_pages); UT_NOT_USED(page_info); return(FALSE); #endif }
ibool os_awe_allocate_physical_mem( /*=========================*/ /* out: TRUE if success */ os_awe_t** page_info, /* out, own: array of opaque data containing the info for allocated physical memory pages; each allocated 4 kB physical memory page has one slot of type os_awe_t in the array */ ulint n_megabytes) /* in: number of megabytes to allocate */ { #ifdef UNIV_SIMULATE_AWE os_awe_simulate_page_info = ut_malloc (sizeof(os_awe_t) * n_megabytes * ((1024 * 1024) / OS_AWE_X86_PAGE_SIZE)); os_awe_simulate_mem = ut_align(ut_malloc(4096 + 1024 * 1024 * n_megabytes), 4096); os_awe_simulate_mem_size = n_megabytes * 1024 * 1024; *page_info = os_awe_simulate_page_info; return(TRUE); #elif defined(__WIN2000__) BOOL bResult; os_awe_t NumberOfPages; /* Question: why does Windows use the name ULONG_PTR for a scalar integer type? Maybe because we may also refer to &NumberOfPages? */ os_awe_t NumberOfPagesInitial; SYSTEM_INFO sSysInfo; int PFNArraySize; if (n_megabytes > 64 * 1024) { fprintf(stderr, "InnoDB: AWE: Error: tried to allocate %lu MB.\n" "InnoDB: AWE cannot allocate more than" " 64 GB in any computer.\n", n_megabytes); return(FALSE); } GetSystemInfo(&sSysInfo); /* fill the system information structure */ if ((ulint)OS_AWE_X86_PAGE_SIZE != (ulint)sSysInfo.dwPageSize) { fprintf(stderr, "InnoDB: AWE: Error: this computer has a page size" " of %lu.\n" "InnoDB: Should be 4096 bytes for" " InnoDB AWE support to work.\n", (ulint)sSysInfo.dwPageSize); return(FALSE); } /* Calculate the number of pages of memory to request */ NumberOfPages = n_megabytes * ((1024 * 1024) / OS_AWE_X86_PAGE_SIZE); /* Calculate the size of page_info for allocated physical pages */ PFNArraySize = NumberOfPages * sizeof(os_awe_t); *page_info = (os_awe_t*)HeapAlloc(GetProcessHeap(), 0, PFNArraySize); if (*page_info == NULL) { fprintf(stderr, "InnoDB: AWE: Failed to allocate page info" " array from process heap, error %lu\n", (ulint)GetLastError()); return(FALSE); } ut_total_allocated_memory += PFNArraySize; /* Enable this process' privilege to lock pages to physical memory */ if (!os_awe_enable_lock_pages_in_mem()) { return(FALSE); } /* Allocate the physical memory */ NumberOfPagesInitial = NumberOfPages; os_awe_page_info = *page_info; os_awe_n_pages = (ulint)NumberOfPages; /* Compilation note: if the compiler complains the function is not defined, see the note at the start of this file */ bResult = AllocateUserPhysicalPages(GetCurrentProcess(), &NumberOfPages, *page_info); if (bResult != TRUE) { fprintf(stderr, "InnoDB: AWE: Cannot allocate physical pages," " error %lu.\n", (ulint)GetLastError()); return(FALSE); } if (NumberOfPagesInitial != NumberOfPages) { fprintf(stderr, "InnoDB: AWE: Error: allocated only %lu pages" " of %lu requested.\n" "InnoDB: Check that you have enough free RAM.\n" "InnoDB: In Windows XP Professional and" " 2000 Professional\n" "InnoDB: Windows PAE size is max 4 GB." " In 2000 and .NET\n" "InnoDB: Advanced Servers and 2000 Datacenter Server" " it is 32 GB,\n" "InnoDB: and in .NET Datacenter Server it is 64 GB.\n" "InnoDB: A Microsoft web page said that" " the processor must be an Intel\n" "InnoDB: processor.\n", (ulint)NumberOfPages, (ulint)NumberOfPagesInitial); return(FALSE); } fprintf(stderr, "InnoDB: Using Address Windowing Extensions (AWE);" " allocated %lu MB\n", n_megabytes); return(TRUE); #else UT_NOT_USED(n_megabytes); UT_NOT_USED(page_info); return(FALSE); #endif }
/*************************************************************** Removes a delete marked clustered index record if possible. */ static ibool row_purge_remove_clust_if_poss_low( /*===============================*/ /* out: TRUE if success, or if not found, or if modified after the delete marking */ purge_node_t* node, /* in: row purge node */ que_thr_t* thr, /* in: query thread */ ulint mode) /* in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */ { dict_index_t* index; btr_pcur_t* pcur; btr_cur_t* btr_cur; ibool success; ulint err; mtr_t mtr; UT_NOT_USED(thr); index = dict_table_get_first_index(node->table); pcur = &(node->pcur); btr_cur = btr_pcur_get_btr_cur(pcur); mtr_start(&mtr); success = row_purge_reposition_pcur(mode, node, &mtr); if (!success) { /* The record is already removed */ btr_pcur_commit_specify_mtr(pcur, &mtr); return(TRUE); } if (0 != ut_dulint_cmp(node->roll_ptr, row_get_rec_roll_ptr(btr_pcur_get_rec(pcur), index))) { /* Someone else has modified the record later: do not remove */ btr_pcur_commit_specify_mtr(pcur, &mtr); return(TRUE); } if (mode == BTR_MODIFY_LEAF) { success = btr_cur_optimistic_delete(btr_cur, &mtr); } else { ut_ad(mode == BTR_MODIFY_TREE); btr_cur_pessimistic_delete(&err, FALSE, btr_cur, FALSE, &mtr); if (err == DB_SUCCESS) { success = TRUE; } else if (err == DB_OUT_OF_FILE_SPACE) { success = FALSE; } else { ut_a(0); } } btr_pcur_commit_specify_mtr(pcur, &mtr); return(success); }
/*************************************************************** Removes a secondary index entry if possible. */ static ibool row_purge_remove_sec_if_poss_low( /*=============================*/ /* out: TRUE if success or if not found */ purge_node_t* node, /* in: row purge node */ que_thr_t* thr, /* in: query thread */ dict_index_t* index, /* in: index */ dtuple_t* entry, /* in: index entry */ ulint mode) /* in: latch mode BTR_MODIFY_LEAF or BTR_MODIFY_TREE */ { btr_pcur_t pcur; btr_cur_t* btr_cur; ibool success; ibool old_has; ibool found; ulint err; mtr_t mtr; mtr_t* mtr_vers; UT_NOT_USED(thr); log_free_check(); mtr_start(&mtr); found = row_search_index_entry(index, entry, mode, &pcur, &mtr); if (!found) { /* Not found */ /* printf("PURGE:........sec entry not found\n"); */ /* dtuple_print(entry); */ btr_pcur_close(&pcur); mtr_commit(&mtr); return(TRUE); } btr_cur = btr_pcur_get_btr_cur(&pcur); /* We should remove the index record if no later version of the row, which cannot be purged yet, requires its existence. If some requires, we should do nothing. */ mtr_vers = mem_alloc(sizeof(mtr_t)); mtr_start(mtr_vers); success = row_purge_reposition_pcur(BTR_SEARCH_LEAF, node, mtr_vers); if (success) { old_has = row_vers_old_has_index_entry(TRUE, btr_pcur_get_rec(&(node->pcur)), mtr_vers, index, entry); } btr_pcur_commit_specify_mtr(&(node->pcur), mtr_vers); mem_free(mtr_vers); if (!success || !old_has) { /* Remove the index record */ if (mode == BTR_MODIFY_LEAF) { success = btr_cur_optimistic_delete(btr_cur, &mtr); } else { ut_ad(mode == BTR_MODIFY_TREE); btr_cur_pessimistic_delete(&err, FALSE, btr_cur, FALSE, &mtr); if (err == DB_SUCCESS) { success = TRUE; } else if (err == DB_OUT_OF_FILE_SPACE) { success = FALSE; } else { ut_a(0); } } } btr_pcur_close(&pcur); mtr_commit(&mtr); return(success); }
/******************************************************************* Removes a clustered index record. The pcur in node was positioned on the record, now it is detached. */ static ulint row_undo_ins_remove_clust_rec( /*==========================*/ /* out: DB_SUCCESS or DB_OUT_OF_FILE_SPACE */ undo_node_t* node, /* in: undo node */ que_thr_t* thr) /* in: query thread */ { btr_cur_t* btr_cur; ibool success; ulint err; ulint n_tries = 0; mtr_t mtr; UT_NOT_USED(thr); mtr_start(&mtr); success = btr_pcur_restore_position(BTR_MODIFY_LEAF, &(node->pcur), &mtr); ut_a(success); if (ut_dulint_cmp(node->table->id, DICT_INDEXES_ID) == 0) { /* Drop the index tree associated with the row in SYS_INDEXES table: */ dict_drop_index_tree(btr_pcur_get_rec(&(node->pcur)), &mtr); mtr_commit(&mtr); mtr_start(&mtr); success = btr_pcur_restore_position(BTR_MODIFY_LEAF, &(node->pcur), &mtr); ut_a(success); } btr_cur = btr_pcur_get_btr_cur(&(node->pcur)); success = btr_cur_optimistic_delete(btr_cur, &mtr); btr_pcur_commit_specify_mtr(&(node->pcur), &mtr); if (success) { trx_undo_rec_release(node->trx, node->undo_no); return(DB_SUCCESS); } retry: /* If did not succeed, try pessimistic descent to tree */ mtr_start(&mtr); success = btr_pcur_restore_position(BTR_MODIFY_TREE, &(node->pcur), &mtr); ut_a(success); btr_cur_pessimistic_delete(&err, FALSE, btr_cur, TRUE, &mtr); /* The delete operation may fail if we have little file space left: TODO: easiest to crash the database and restart with more file space */ if (err == DB_OUT_OF_FILE_SPACE && n_tries < BTR_CUR_RETRY_DELETE_N_TIMES) { btr_pcur_commit_specify_mtr(&(node->pcur), &mtr); n_tries++; os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME); goto retry; } btr_pcur_commit_specify_mtr(&(node->pcur), &mtr); trx_undo_rec_release(node->trx, node->undo_no); return(err); }
/******************************************************************//** Creates, or rather, initializes an rw-lock object in a specified memory location (which must be appropriately aligned). The rw-lock is initialized to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free is necessary only if the memory block containing it is freed. */ UNIV_INTERN void rw_lock_create_func( /*================*/ rw_lock_t* lock, /*!< in: pointer to memory */ #ifdef UNIV_DEBUG # ifdef UNIV_SYNC_DEBUG ulint level, /*!< in: level */ # endif /* UNIV_SYNC_DEBUG */ const char* cmutex_name, /*!< in: mutex name */ #endif /* UNIV_DEBUG */ const char* cfile_name, /*!< in: file name where created */ ulint cline) /*!< in: file line where created */ { /* If this is the very first time a synchronization object is created, then the following call initializes the sync system. */ #ifndef INNODB_RW_LOCKS_USE_ATOMICS mutex_create(rw_lock_mutex_key, rw_lock_get_mutex(lock), SYNC_NO_ORDER_CHECK); lock->mutex.cfile_name = cfile_name; lock->mutex.cline = cline; ut_d(lock->mutex.cmutex_name = cmutex_name); ut_d(lock->mutex.mutex_type = 1); #else /* INNODB_RW_LOCKS_USE_ATOMICS */ # ifdef UNIV_DEBUG UT_NOT_USED(cmutex_name); # endif #endif /* INNODB_RW_LOCKS_USE_ATOMICS */ lock->lock_word = X_LOCK_DECR; lock->waiters = 0; /* We set this value to signify that lock->writer_thread contains garbage at initialization and cannot be used for recursive x-locking. */ lock->recursive = FALSE; /* Silence Valgrind when UNIV_DEBUG_VALGRIND is not enabled. */ memset((void*) &lock->writer_thread, 0, sizeof lock->writer_thread); UNIV_MEM_INVALID(&lock->writer_thread, sizeof lock->writer_thread); #ifdef UNIV_SYNC_DEBUG UT_LIST_INIT(lock->debug_list); lock->level = level; #endif /* UNIV_SYNC_DEBUG */ ut_d(lock->magic_n = RW_LOCK_MAGIC_N); lock->cfile_name = cfile_name; lock->cline = (unsigned int) cline; lock->count_os_wait = 0; lock->last_s_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved"; lock->last_s_line = 0; lock->last_x_line = 0; lock->event = os_event_create(NULL); lock->wait_ex_event = os_event_create(NULL); mutex_enter(&rw_lock_list_mutex); ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL || UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N); UT_LIST_ADD_FIRST(list, rw_lock_list, lock); mutex_exit(&rw_lock_list_mutex); }