/** Signal callback for SIGINT. * @param[in] ev Signal event descriptor. */ static void sigint_callback(struct Event* ev) { assert(0 != ev_signal(ev)); assert(ET_SIGNAL == ev_type(ev)); assert(SIGINT == sig_signal(ev_signal(ev))); assert(SIGINT == ev_data(ev)); exit_schedule(1, 0, 0, "Received signal SIGINT"); }
/** Signal callback for SIGTERM. * @param[in] ev Signal event descriptor. */ static void sigterm_callback(struct Event* ev) { assert(0 != ev_signal(ev)); assert(ET_SIGNAL == ev_type(ev)); assert(SIGTERM == sig_signal(ev_signal(ev))); assert(SIGTERM == ev_data(ev)); exit_schedule(0, 0, 0, "Received signal SIGTERM"); }
static void sigint_callback(struct Event* ev) { assert(0 != ev_signal(ev)); assert(ET_SIGNAL == ev_type(ev)); assert(SIGINT == sig_signal(ev_signal(ev))); assert(SIGINT == ev_data(ev)); server_restart("caught signal: SIGINT"); }
static void sigterm_callback(struct Event* ev) { assert(0 != ev_signal(ev)); assert(ET_SIGNAL == ev_type(ev)); assert(SIGTERM == sig_signal(ev_signal(ev))); assert(SIGTERM == ev_data(ev)); server_die("received signal SIGTERM"); }
/** Signal callback for SIGHUP. * @param[in] ev Signal event descriptor. */ static void sighup_callback(struct Event* ev) { assert(0 != ev_signal(ev)); assert(ET_SIGNAL == ev_type(ev)); assert(SIGHUP == sig_signal(ev_signal(ev))); assert(SIGHUP == ev_data(ev)); ++SignalCounter.hup; rehash(&me, 1); }
static void sem_first_chk(struct k9_sem * const s) { struct k9_task *t; if ((t = ev_first(s->base)) && t->u->blocked->sem->n <= s->cnt) { ev_signal(s->base, 1); } }
void k9_ev_signal_all(struct k9_ev * const ev) { uint32 old; old = k9_cpu_intr_dis(); if (ev_signal(ev, 0) != 0) _k9_task_resched(); k9_cpu_intr_restore(old); }
int dds_cond_wait (cond_t *cv, HANDLE mutex) { int res, last_waiter; /* Prevent race conditions on the waiters count. */ lock_take (cv->waiters_lock); cv->waiters++; lock_release (cv->waiters_lock); res = DDS_RETCODE_OK; /* We keep the lock held just long enough to increment the count of waiters by one. Note that we can't keep it held across the call sema_wait() since that will deadlock other calls to cond_signal(). */ if (lock_release (mutex)) return (DDS_RETCODE_ERROR); /* Wait to be awakened by a cond_signal() or cond_signal_all(). */ res = sema_take (cv->sema); /* Reacquire lock to avoid race conditions on the waiters count. */ lock_take (cv->waiters_lock); /* We're ready to return, so there's one less waiter. */ cv->waiters--; last_waiter = cv->was_broadcast && cv->waiters == 0; /* Release the lock so that other collaborating threads can make progress. */ lock_release (cv->waiters_lock); if (res) ; /* Bad things happened, so let's just return. */ /* If we're the last waiter thread during this particular broadcast then let all the other threads proceed. */ if (last_waiter) ev_signal (cv->waiters_done); /* We must always regain the external_mutex, even when errors occur because that's the guarantee that we give to our callers. */ lock_take (mutex); return (res); }
void k9_mutex_give(struct k9_mutex * const m) { uint32 old; unsigned n1 = 0, n2 = 0; old = k9_cpu_intr_dis(); if (m->owner == cur_task) { m->owner = 0; n1 = ev_signal(m->base, 1); if (cur_task->effpri != cur_task->pri) n2 = task_effpri_set(cur_task, cur_task->pri); if (n1 != 0 || n2 != 0) _k9_task_resched; } k9_cpu_intr_restore(old); }
int dds_cond_timedwait (cond_t *cv, HANDLE mutex, const struct timespec *time) { int res, msec_timeout, last_waiter; struct timespec now; /* Handle the easy case first. */ if (!time) return (dds_cond_wait (cv, mutex)); /* Prevent race conditions on the waiters count. */ lock_take (cv->waiters_lock); cv->waiters++; lock_release (cv->waiters_lock); res = DDS_RETCODE_OK; if (time->tv_sec == 0 && time->tv_nsec == 0) msec_timeout = 0; /* Do a "poll." */ else { /* Note that we must convert between absolute time (which is passed as a parameter) and relative time (which is what WaitForSingleObject() expects). */ clock_gettime (0, &now); if (now.tv_sec > time->tv_sec || (now.tv_sec == time->tv_sec && now.tv_nsec > time->tv_nsec)) msec_timeout = 0; else msec_timeout = (int) ((time->tv_sec - now.tv_sec) * 1000 + (time->tv_nsec - now.tv_nsec) / 1000000); } /* We keep the lock held just long enough to increment the count of waiters by one. Note that we can't keep it held across the call to WaitForSingleObject since that will deadlock other calls to cond_signal(). */ lock_release (mutex); /* Wait to be awakened by a cond_signal() or cond_signal_all(). */ res = WaitForSingleObject (cv->sema, msec_timeout); /* Reacquire lock to avoid race conditions. */ lock_take (cv->waiters_lock); cv->waiters--; last_waiter = cv->was_broadcast && cv->waiters == 0; lock_release (cv->waiters_lock); if (res != WAIT_OBJECT_0) { if (res == WAIT_TIMEOUT) res = DDS_RETCODE_TIMEOUT; else res = DDS_RETCODE_ERROR; } else res = DDS_RETCODE_OK; if (last_waiter) /* Release the signaler/broadcaster if we're the last waiter. */ ev_signal (cv->waiters_done); /* We must always regain the external mutex, even when errors occur because that's the guarantee that we give to our callers. */ lock_take (mutex); return (res); }