void tmr_run(struct timeval *nowP) { int h; Timer *t; Timer *next; for (h = 0; h < HASH_SIZE; ++h) for (t = timers[h]; t != (Timer *)0; t = next) { next = t->next; /* Since the lists are sorted, as soon as we find a timer ** that isn't ready yet, we can go on to the next list. */ if (t->time.tv_sec > nowP->tv_sec || (t->time.tv_sec == nowP->tv_sec && t->time.tv_usec > nowP->tv_usec)) break; /* Invalidate mstimeout cache, since we're modifying the queue */ mstimeout_cache = -1; (t->timer_proc)(t->client_data, nowP); if (t->periodic) { /* Reschedule. */ t->time.tv_sec += t->msecs / 1000L; t->time.tv_usec += (t->msecs % 1000L) * 1000L; if (t->time.tv_usec >= 1000000L) { t->time.tv_sec += t->time.tv_usec / 1000000L; t->time.tv_usec %= 1000000L; } l_resort(t); } else tmr_cancel(t); } }
void tmr_run(struct timeval *nowP) { int h; Timer *t; Timer *next; for (h = 0; h < HASH_SIZE; ++h) for (t = timers[h]; t; t = next) { next = t->next; /* Since the lists are sorted, as soon as we find a timer ** that isn't ready yet, we can go on to the next list. */ if (t->time.tv_sec > nowP->tv_sec || (t->time.tv_sec == nowP->tv_sec && t->time.tv_usec > nowP->tv_usec)) break; (t->timer_proc) (t->arg, nowP); if (t->periodic) { /* Reschedule. */ t->time.tv_sec += t->msecs / 1000L; t->time.tv_usec += (t->msecs % 1000L) * 1000L; if (t->time.tv_usec >= 1000000L) { t->time.tv_sec += t->time.tv_usec / 1000000L; t->time.tv_usec %= 1000000L; } l_resort(t); } else tmr_cancel(t); } }
void tmr_reset(struct timeval *nowP, Timer * t) { t->time = *nowP; t->time.tv_sec += t->msecs / 1000L; t->time.tv_usec += (t->msecs % 1000L) * 1000L; if (t->time.tv_usec >= 1000000L) { t->time.tv_sec += t->time.tv_usec / 1000000L; t->time.tv_usec %= 1000000L; } l_resort(t); }