static void *high_run(void *arg) { test_emit('e'); semaphore_enter(&s); test_emit('g'); semaphore_leave(&s); test_emit('h'); return NULL; }
static void *low_run(void *arg) { test_emit('a'); semaphore_enter(&s); test_emit('b'); test_assert_zero(thread_launch(mid)); test_emit('j'); semaphore_leave(&s); test_emit('k'); return NULL; }
static void *mid_run(void *arg) { test_emit('c'); semaphore_enter(&s); test_emit('d'); test_assert_zero(thread_launch(high)); test_emit('f'); semaphore_leave(&s); test_emit('i'); return NULL; }
void sig_catcher (int sig) { signal (SIGINT, SIG_IGN); signal (SIGTERM, SIG_IGN); signal (SIGHUP, SIG_IGN); signal (SIGQUIT, SIG_IGN); signal (SIGPIPE, SIG_IGN); log_info ("Caught signal %d, shutting down", sig); db_shutdown = 1; semaphore_leave (background_sem); }
static int unfreeze_thread_read (dk_session_t * ses) { SESSION_SCH_DATA (ses)->sio_random_read_ready_action = NULL; if (!SESSION_SCH_DATA (ses)->sio_default_read_ready_action) remove_from_served_sessions (ses); semaphore_leave (SESSION_SCH_DATA (ses)->sio_reading_thread->thr_sem); return 0; }
void mutex_leave (dk_mutex_t *mtx) #endif { #ifndef MTX_DEBUG semaphore_leave (mtx->mtx_handle); #else semaphore_t *sem = (semaphore_t *) mtx->mtx_handle; thread_t *thr; #ifdef MALLOC_DEBUG if (_current_fiber == NULL) { assert (mtx == _dbgmal_mtx); semaphore_leave (sem); return; } #endif assert (mtx->mtx_owner == _current_fiber); assert (sem->sem_entry_count == 0); mtx->mtx_owner = NULL; if (sem->sem_entry_count) sem->sem_entry_count++; else { thr = thread_queue_from (&sem->sem_waiting); if (thr) { assert (thr->thr_status == WAITSEM); _fiber_status (thr, RUNNABLE); } else sem->sem_entry_count++; } #endif }
static int unfreeze_thread_write (dk_session_t * ses) { SESSION_SCH_DATA (ses)->sio_random_write_ready_action = NULL; /* in a direct io situation the session is not in the served set */ if (SESSION_SCH_DATA (ses)->sio_random_read_ready_action == NULL && SESSION_SCH_DATA (ses)->sio_default_read_ready_action == NULL) { /* if there is no other action on the session, remove it from served sessions. */ remove_from_served_sessions (ses); } ss_dprintf_4 (("Write in thread %p resumed.", (void *) (SESSION_SCH_DATA (ses)->sio_writing_thread))); semaphore_leave (SESSION_SCH_DATA (ses)->sio_writing_thread->thr_sem); return 0; }
void iq_schedule (buffer_desc_t ** bufs, int n) { int inx; int is_reads = 0; buf_sort (bufs, n, (sort_key_func_t) bd_phys_page_key); for (inx = 0; inx < n; inx++) { if (bufs[inx]->bd_iq) GPF_T1 ("buffer added to iq already has a bd_iq"); bufs[inx]->bd_iq = db_io_queue (bufs[inx]->bd_storage, bufs[inx]->bd_physical_page); } DO_SET (io_queue_t *, iq, &mti_io_queues) { int n_added = 0; buffer_desc_t * ipoint; int was_empty; IN_IOQ (iq); inx = 0; ipoint = iq->iq_first; was_empty = (iq->iq_first == NULL); while (inx < n) { buffer_desc_t * buf = bufs[inx]; if (!buf || buf->bd_iq != iq) { inx++; continue; } is_reads = buf->bd_being_read; if (buf->bd_iq_next || buf->bd_iq_prev) GPF_T1 ("can't schedule same buffer twice"); bufs[inx] = NULL; next_ipoint: if (!ipoint) { L2_PUSH_LAST (iq->iq_first, iq->iq_last, buf, bd_iq_); n_added++; inx++; } else if (BUF_SORT_DP (ipoint) < BUF_SORT_DP (buf)) { ipoint = ipoint->bd_iq_next; goto next_ipoint; } else if (BUF_SORT_DP (ipoint) == BUF_SORT_DP (buf)) GPF_T1 ("the same buffer can't be scheduled twice for io"); else { L2_INSERT (iq->iq_first, iq->iq_last, ipoint, buf, bd_iq_); n_added++; inx++; } if (!buf->bd_being_read) { page_leave_outside_map (buf); } } LEAVE_IOQ (iq); if (n_added && !is_reads) { dbg_printf (("IQ %s %d %s added, %s.\n", IQ_NAME (iq), n_added, is_reads ? "reads" : "writes", was_empty ? "starting" : "running")); } if (n_added && was_empty) semaphore_leave (iq->iq_sem); } END_DO_SET (); if (n) { if (is_reads) mti_reads_queued += n; else mti_writes_queued += n; } }