void sync_array_wait_event( /*==================*/ sync_array_t* arr, /* in: wait array */ ulint index) /* in: index of the reserved cell */ { sync_cell_t* cell; os_event_t event; ut_a(arr); sync_array_enter(arr); cell = sync_array_get_nth_cell(arr, index); ut_a(cell->wait_object); ut_a(!cell->waiting); ut_ad(os_thread_get_curr_id() == cell->thread); if (cell->request_type == SYNC_MUTEX) { event = ((mutex_t*) cell->wait_object)->event; #ifdef __WIN__ /* On windows if the thread about to wait is the one which has set the state of the rw_lock to RW_LOCK_WAIT_EX, then it waits on a special event i.e.: wait_ex_event. */ } else if (cell->request_type == RW_LOCK_WAIT_EX) { event = ((rw_lock_t*) cell->wait_object)->wait_ex_event; #endif } else { event = ((rw_lock_t*) cell->wait_object)->event; } cell->waiting = TRUE; #ifdef UNIV_SYNC_DEBUG /* We use simple enter to the mutex below, because if we cannot acquire it at once, mutex_enter would call recursively sync_array routines, leading to trouble. rw_lock_debug_mutex freezes the debug lists. */ rw_lock_debug_mutex_enter(); if (TRUE == sync_array_detect_deadlock(arr, cell, cell, 0)) { fputs("########################################\n", stderr); ut_error; } rw_lock_debug_mutex_exit(); #endif sync_array_exit(arr); os_event_wait_low(event, cell->signal_count); sync_array_free_cell(arr, index); }
/******************************************************************//** This function should be called when a thread starts to wait on a wait array cell. In the debug version this function checks if the wait for a semaphore will result in a deadlock, in which case prints info and asserts. */ UNIV_INTERN void sync_array_wait_event( /*==================*/ sync_array_t* arr, /*!< in: wait array */ ulint index) /*!< in: index of the reserved cell */ { sync_cell_t* cell; os_event_t event; ut_a(arr); sync_array_enter(arr); cell = sync_array_get_nth_cell(arr, index); ut_a(cell->wait_object); ut_a(!cell->waiting); ut_ad(os_thread_get_curr_id() == cell->thread); event = sync_cell_get_event(cell); cell->waiting = TRUE; #ifdef UNIV_SYNC_DEBUG /* We use simple enter to the mutex below, because if we cannot acquire it at once, mutex_enter would call recursively sync_array routines, leading to trouble. rw_lock_debug_mutex freezes the debug lists. */ rw_lock_debug_mutex_enter(); if (TRUE == sync_array_detect_deadlock(arr, cell, cell, 0)) { fputs("########################################\n", stderr); ut_error; } rw_lock_debug_mutex_exit(); #endif sync_array_exit(arr); os_event_wait_low(event, cell->signal_count); sync_array_free_cell(arr, index); }
/******************************************************************//** Function for the next writer to call. Waits for readers to exit. The caller must have already decremented lock_word by X_LOCK_DECR. */ UNIV_INLINE void rw_lock_x_lock_wait( /*================*/ rw_lock_t* lock, /*!< in: pointer to rw-lock */ #ifdef UNIV_SYNC_DEBUG ulint pass, /*!< in: pass value; != 0, if the lock will be passed to another thread to unlock */ #endif const char* file_name,/*!< in: file name where lock requested */ ulint line) /*!< in: line where requested */ { ulint index; ulint i = 0; ut_ad(lock->lock_word <= 0); while (lock->lock_word < 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } if(i < SYNC_SPIN_ROUNDS) { i++; continue; } /* If there is still a reader, then go to sleep.*/ rw_x_spin_round_count += i; i = 0; sync_array_reserve_cell(sync_primary_wait_array, lock, RW_LOCK_WAIT_EX, file_name, line, &index); /* Check lock_word to ensure wake-up isn't missed.*/ if(lock->lock_word < 0) { /* these stats may not be accurate */ lock->count_os_wait++; rw_x_os_wait_count++; /* Add debug info as it is needed to detect possible deadlock. We must add info for WAIT_EX thread for deadlock detection to work properly. */ #ifdef UNIV_SYNC_DEBUG rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX, file_name, line); #endif sync_array_wait_event(sync_primary_wait_array, index); #ifdef UNIV_SYNC_DEBUG rw_lock_remove_debug_info(lock, pass, RW_LOCK_WAIT_EX); #endif /* It is possible to wake when lock_word < 0. We must pass the while-loop check to proceed.*/ } else { sync_array_free_cell(sync_primary_wait_array, index); } } rw_x_spin_round_count += i; }
/******************************************************************//** NOTE! Use the corresponding macro, not directly this function! Lock an rw-lock in exclusive mode for the current thread. If the rw-lock is locked in shared or exclusive mode, or there is an exclusive lock request waiting, the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for the lock before suspending the thread. If the same thread has an x-lock on the rw-lock, locking succeed, with the following exception: if pass != 0, only a single x-lock may be taken on the lock. NOTE: If the same thread has an s-lock, locking does not succeed! */ UNIV_INTERN void rw_lock_x_lock_func( /*================*/ rw_lock_t* lock, /*!< in: pointer to rw-lock */ ulint pass, /*!< in: pass value; != 0, if the lock will be passed to another thread to unlock */ const char* file_name,/*!< in: file name where lock requested */ ulint line) /*!< in: line where requested */ { ulint index; /*!< index of the reserved wait cell */ ulint i; /*!< spin round count */ ibool spinning = FALSE; ut_ad(rw_lock_validate(lock)); #ifdef UNIV_SYNC_DEBUG ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); #endif /* UNIV_SYNC_DEBUG */ i = 0; lock_loop: if (rw_lock_x_lock_low(lock, pass, file_name, line)) { rw_x_spin_round_count += i; return; /* Locking succeeded */ } else { if (!spinning) { spinning = TRUE; rw_x_spin_wait_count++; } /* Spin waiting for the lock_word to become free */ while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } i++; } if (i == SYNC_SPIN_ROUNDS) { os_thread_yield(); } else { goto lock_loop; } } rw_x_spin_round_count += i; if (srv_print_latch_waits) { fprintf(stderr, "Thread %lu spin wait rw-x-lock at %p" " cfile %s cline %lu rnds %lu\n", os_thread_pf(os_thread_get_curr_id()), (void*) lock, innobase_basename(lock->cfile_name), (ulong) lock->cline, (ulong) i); } sync_array_reserve_cell(sync_primary_wait_array, lock, RW_LOCK_EX, file_name, line, &index); /* Waiters must be set before checking lock_word, to ensure signal is sent. This could lead to a few unnecessary wake-up signals. */ rw_lock_set_waiter_flag(lock); if (rw_lock_x_lock_low(lock, pass, file_name, line)) { sync_array_free_cell(sync_primary_wait_array, index); return; /* Locking succeeded */ } if (srv_print_latch_waits) { fprintf(stderr, "Thread %lu OS wait for rw-x-lock at %p" " cfile %s cline %lu\n", os_thread_pf(os_thread_get_curr_id()), (void*) lock, innobase_basename(lock->cfile_name), (ulong) lock->cline); } /* these stats may not be accurate */ lock->count_os_wait++; rw_x_os_wait_count++; sync_array_wait_event(sync_primary_wait_array, index); i = 0; goto lock_loop; }
/******************************************************************//** Lock an rw-lock in shared mode for the current thread. If the rw-lock is locked in exclusive mode, or there is an exclusive lock request waiting, the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting for the lock, before suspending the thread. */ UNIV_INTERN void rw_lock_s_lock_spin( /*================*/ rw_lock_t* lock, /*!< in: pointer to rw-lock */ ulint pass, /*!< in: pass value; != 0, if the lock will be passed to another thread to unlock */ const char* file_name, /*!< in: file name where lock requested */ ulint line) /*!< in: line where requested */ { ulint index; /* index of the reserved wait cell */ ulint i = 0; /* spin round count */ ut_ad(rw_lock_validate(lock)); rw_s_spin_wait_count++; /*!< Count calls to this function */ lock_loop: /* Spin waiting for the writer field to become free */ while (i < SYNC_SPIN_ROUNDS && lock->lock_word <= 0) { if (srv_spin_wait_delay) { ut_delay(ut_rnd_interval(0, srv_spin_wait_delay)); } i++; } if (i == SYNC_SPIN_ROUNDS) { os_thread_yield(); } if (srv_print_latch_waits) { fprintf(stderr, "Thread %lu spin wait rw-s-lock at %p" " cfile %s cline %lu rnds %lu\n", (ulong) os_thread_pf(os_thread_get_curr_id()), (void*) lock, innobase_basename(lock->cfile_name), (ulong) lock->cline, (ulong) i); } /* We try once again to obtain the lock */ if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) { rw_s_spin_round_count += i; return; /* Success */ } else { if (i < SYNC_SPIN_ROUNDS) { goto lock_loop; } rw_s_spin_round_count += i; sync_array_reserve_cell(sync_primary_wait_array, lock, RW_LOCK_SHARED, file_name, line, &index); /* Set waiters before checking lock_word to ensure wake-up signal is sent. This may lead to some unnecessary signals. */ rw_lock_set_waiter_flag(lock); if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) { sync_array_free_cell(sync_primary_wait_array, index); return; /* Success */ } if (srv_print_latch_waits) { fprintf(stderr, "Thread %lu OS wait rw-s-lock at %p" " cfile %s cline %lu\n", os_thread_pf(os_thread_get_curr_id()), (void*) lock, innobase_basename(lock->cfile_name), (ulong) lock->cline); } /* these stats may not be accurate */ lock->count_os_wait++; rw_s_os_wait_count++; sync_array_wait_event(sync_primary_wait_array, index); i = 0; goto lock_loop; } }