/* "main" timer routine * WARNING: it should never be called twice for the same *ticks value * (it could cause too fast expires for long timers), *ticks must be also * always increasing */ static void timer_handler(void) { ticks_t saved_ticks; #ifdef USE_SLOW_TIMER int run_slow_timer; int i; run_slow_timer=0; i=(slow_idx_t)(*t_idx%SLOW_LISTS_NO); #endif /*LM_DBG("called, ticks=%lu, prev_ticks=%lu\n", (unsigned long)*ticks, (unsigned long)prev_ticks); */ run_timer=0; /* reset run_timer */ adjust_ticks(); LOCK_TIMER_LIST(); do{ saved_ticks=*ticks; /* protect against time running backwards */ if (prev_ticks>=saved_ticks){ LM_CRIT("backwards or still time\n"); /* try to continue */ prev_ticks=saved_ticks-1; break; } /* go through all the "missed" ticks, taking a possible overflow * into account */ for (prev_ticks=prev_ticks+1; prev_ticks!=saved_ticks; prev_ticks++) timer_run(prev_ticks); timer_run(prev_ticks); /* do it for saved_ticks too */ }while(saved_ticks!=*ticks); /* in case *ticks changed */ #ifdef USE_SLOW_TIMER timer_list_expire(*ticks, &timer_lst->expired, &slow_timer_lists[i], *t_idx); #else timer_list_expire(*ticks, &timer_lst->expired); #endif /* WARNING: add_timer(...,0) must go directly to expired list, since * otherwise there is a race between timer running and adding it * (it could expire it H0_ENTRIES ticks later instead of 'now')*/ #ifdef USE_SLOW_TIMER if (slow_timer_lists[i].next!=(struct timer_ln*)&slow_timer_lists[i]){ run_slow_timer=1; if ((slow_idx_t)(*t_idx-*s_idx) < (SLOW_LISTS_NO-1U)) (*t_idx)++; else{ LM_WARN("slow timer too slow: overflow (%d - %d = %d)\n", *t_idx, *s_idx, *t_idx-*s_idx); /* trying to continue */ } } #endif UNLOCK_TIMER_LIST(); #ifdef USE_SLOW_TIMER /* wake up the "slow" timer */ if (run_slow_timer) kill(slow_timer_pid, SLOW_TIMER_SIG); #endif }
/* -------------------------------------------------------------------------- * * Loop around some timer stuff and the i/o multiplexer. * * -------------------------------------------------------------------------- */ void servauth_loop(void) { int ret = 0; int64_t *timeout; int64_t remain = 0LL; while(ret >= 0) { /* Calculate timeout value */ timeout = timer_timeout(); /* Do I/O multiplexing and event handling */ #ifdef USE_POLL ret = io_poll(&remain, timeout); #else ret = io_select(&remain, timeout); #endif /* Remaining time is 0msecs, we need to run a timer */ if(remain == 0LL) timer_run(); if(timeout) timer_drift(*timeout - remain); io_handle(); } }
void sploit_loop(void) { int ret = 0; int64_t *timeout; int64_t remain = 0LL; while(ret >= 0) { /* Calculate timeout value */ timeout = timer_timeout(); /* Do I/O multiplexing and event handling */ #if (defined USE_SELECT) ret = io_select(&remain, timeout); #elif (defined USE_POLL) ret = io_poll(&remain, timeout); #endif /* Remaining time is 0msecs, we need to run a timer */ if(remain == 0LL) timer_run(); if(timeout) timer_drift(*timeout - remain); io_handle(); timer_collect(); /* ircd_collect();*/ } }
// run timer and compute seconds without modifying stats float timer_peek( px_timer_t* stats ) { float seconds; px_timer_t old = *stats; seconds = timer_run( stats ); *stats = old; // restore it return seconds; }
void timer_irq_handler (struct irq_action_s *action) { register struct cpu_s *cpu; register struct device_s *timer; cpu = current_cpu; cpu_trace_write(cpu, timer_irq_handler); cpu_clock(cpu); timer = action->dev; timer_reset_irq(timer); timer_set_period(timer, CPU_CLOCK_TICK); timer_run(timer, 1); }
/** Run engine event loop. * @param[in] gen Lists of generators of various types. */ static void engine_loop(struct Generators* gen) { struct kevent *events; int events_count; struct Socket* sock; struct timespec wait; int nevs; int i; int errcode; size_t codesize; if ((events_count = feature_int(FEAT_POLLS_PER_LOOP)) < 20) events_count = 20; events = (struct kevent *)MyMalloc(sizeof(struct kevent) * events_count); while (running) { if ((i = feature_int(FEAT_POLLS_PER_LOOP)) >= 20 && i != events_count) { events = (struct kevent *)MyRealloc(events, sizeof(struct kevent) * i); events_count = i; } /* set up the sleep time */ wait.tv_sec = timer_next(gen) ? (timer_next(gen) - CurrentTime) : -1; wait.tv_nsec = 0; Debug((DEBUG_INFO, "kqueue: delay: %Tu (%Tu) %Tu", timer_next(gen), CurrentTime, wait.tv_sec)); /* check for active events */ nevs = kevent(kqueue_id, 0, 0, events, events_count, wait.tv_sec < 0 ? 0 : &wait); CurrentTime = time(0); /* set current time... */ if (nevs < 0) { if (errno != EINTR) { /* ignore kevent interrupts */ /* Log the kqueue error */ log_write(LS_SOCKET, L_ERROR, 0, "kevent() error: %m"); if (!errors++) timer_add(timer_init(&clear_error), error_clear, 0, TT_PERIODIC, ERROR_EXPIRE_TIME); else if (errors > KQUEUE_ERROR_THRESHOLD) /* too many errors... */ exit_schedule(1, 0, 0, "too many kevent errors"); } /* old code did a sleep(1) here; with usage these days, * that may be too expensive */ continue; } for (i = 0; i < nevs; i++) { if (events[i].filter == EVFILT_SIGNAL) { /* it's a signal; deal appropriately */ event_generate(ET_SIGNAL, events[i].udata, events[i].ident); continue; /* skip socket processing loop */ } assert(events[i].filter == EVFILT_READ || events[i].filter == EVFILT_WRITE); sock = sockList[events[i].ident]; if (!sock) /* slots may become empty while processing events */ continue; assert(s_fd(sock) == events[i].ident); gen_ref_inc(sock); /* can't have it going away on us */ Debug((DEBUG_ENGINE, "kqueue: Checking socket %p (fd %d) state %s, " "events %s", sock, s_fd(sock), state_to_name(s_state(sock)), sock_flags(s_events(sock)))); if (s_state(sock) != SS_NOTSOCK) { errcode = 0; /* check for errors on socket */ codesize = sizeof(errcode); if (getsockopt(s_fd(sock), SOL_SOCKET, SO_ERROR, &errcode, &codesize) < 0) errcode = errno; /* work around Solaris implementation */ if (errcode) { /* an error occurred; generate an event */ Debug((DEBUG_ENGINE, "kqueue: Error %d on fd %d, socket %p", errcode, s_fd(sock), sock)); event_generate(ET_ERROR, sock, errcode); gen_ref_dec(sock); /* careful not to leak reference counts */ continue; } } switch (s_state(sock)) { case SS_CONNECTING: if (events[i].filter == EVFILT_WRITE) { /* connection completed */ Debug((DEBUG_ENGINE, "kqueue: Connection completed")); event_generate(ET_CONNECT, sock, 0); } break; case SS_LISTENING: if (events[i].filter == EVFILT_READ) { /* connect. to be accept. */ Debug((DEBUG_ENGINE, "kqueue: Ready for accept")); event_generate(ET_ACCEPT, sock, 0); } break; case SS_NOTSOCK: /* doing nothing socket-specific */ case SS_CONNECTED: if (events[i].filter == EVFILT_READ) { /* data on socket */ Debug((DEBUG_ENGINE, "kqueue: EOF or data to be read")); event_generate(events[i].flags & EV_EOF ? ET_EOF : ET_READ, sock, 0); } if (events[i].filter == EVFILT_WRITE) { /* socket writable */ Debug((DEBUG_ENGINE, "kqueue: Data can be written")); event_generate(ET_WRITE, sock, 0); } break; case SS_DATAGRAM: case SS_CONNECTDG: if (events[i].filter == EVFILT_READ) { /* socket readable */ Debug((DEBUG_ENGINE, "kqueue: Datagram to be read")); event_generate(ET_READ, sock, 0); } if (events[i].filter == EVFILT_WRITE) { /* socket writable */ Debug((DEBUG_ENGINE, "kqueue: Datagram can be written")); event_generate(ET_WRITE, sock, 0); } break; } gen_ref_dec(sock); /* we're done with it */ } timer_run(); /* execute any pending timers */ } }
/* engine event loop */ static void engine_loop(struct Generators* gen) { struct timeval wait; fd_set read_set; fd_set write_set; int nfds; int i; int errcode; size_t codesize; struct Socket *sock; while (running) { read_set = global_read_set; /* all hail structure copy!! */ write_set = global_write_set; /* set up the sleep time */ wait.tv_sec = timer_next(gen) ? (timer_next(gen) - CurrentTime) : -1; wait.tv_usec = 0; Debug((DEBUG_INFO, "select: delay: %Tu (%Tu) %Tu", timer_next(gen), CurrentTime, wait.tv_sec)); /* check for active files */ nfds = select(highest_fd + 1, &read_set, &write_set, 0, wait.tv_sec < 0 ? 0 : &wait); CurrentTime = time(0); /* set current time... */ if (nfds < 0) { if (errno != EINTR) { /* ignore select interrupts */ /* Log the select error */ log_write(LS_SOCKET, L_ERROR, 0, "select() error: %m"); if (!errors++) timer_add(timer_init(&clear_error), error_clear, 0, TT_PERIODIC, ERROR_EXPIRE_TIME); else if (errors > SELECT_ERROR_THRESHOLD) /* too many errors... */ server_restart("too many select errors"); } /* old code did a sleep(1) here; with usage these days, * that may be too expensive */ continue; } for (i = 0; nfds && i <= highest_fd; i++) { if (!(sock = sockList[i])) /* skip empty socket elements */ continue; assert(s_fd(sock) == i); gen_ref_inc(sock); /* can't have it going away on us */ Debug((DEBUG_ENGINE, "select: Checking socket %p (fd %d) state %s, " "events %s", sock, i, state_to_name(s_state(sock)), sock_flags(s_events(sock)))); if (s_state(sock) != SS_NOTSOCK) { errcode = 0; /* check for errors on socket */ codesize = sizeof(errcode); if (getsockopt(i, SOL_SOCKET, SO_ERROR, &errcode, &codesize) < 0) errcode = errno; /* work around Solaris implementation */ if (errcode) { /* an error occurred; generate an event */ Debug((DEBUG_ENGINE, "select: Error %d on fd %d, socket %p", errcode, i, sock)); event_generate(ET_ERROR, sock, errcode); gen_ref_dec(sock); /* careful not to leak reference counts */ continue; } } switch (s_state(sock)) { case SS_CONNECTING: if (FD_ISSET(i, &write_set)) { /* connection completed */ Debug((DEBUG_ENGINE, "select: Connection completed")); event_generate(ET_CONNECT, sock, 0); nfds--; continue; } break; case SS_LISTENING: if (FD_ISSET(i, &read_set)) { /* connection to be accepted */ Debug((DEBUG_ENGINE, "select: Ready for accept")); event_generate(ET_ACCEPT, sock, 0); nfds--; } break; case SS_NOTSOCK: if (FD_ISSET(i, &read_set)) { /* data on socket */ /* can't peek; it's not a socket */ Debug((DEBUG_ENGINE, "select: non-socket readable")); event_generate(ET_READ, sock, 0); nfds--; } break; case SS_CONNECTED: if (FD_ISSET(i, &read_set)) { /* data to be read from socket */ char c; switch (recv(i, &c, 1, MSG_PEEK)) { /* check for EOF */ case -1: /* error occurred?!? */ if (errno == EAGAIN) { Debug((DEBUG_ENGINE, "select: Resource temporarily " "unavailable?")); continue; } Debug((DEBUG_ENGINE, "select: Uncaught error!")); event_generate(ET_ERROR, sock, errno); break; case 0: /* EOF from client */ Debug((DEBUG_ENGINE, "select: EOF from client")); event_generate(ET_EOF, sock, 0); break; default: /* some data can be read */ Debug((DEBUG_ENGINE, "select: Data to be read")); event_generate(ET_READ, sock, 0); break; } } if (FD_ISSET(i, &write_set)) { /* data can be written to socket */ Debug((DEBUG_ENGINE, "select: Data can be written")); event_generate(ET_WRITE, sock, 0); } if (FD_ISSET(i, &read_set) || FD_ISSET(i, &write_set)) nfds--; break; case SS_DATAGRAM: case SS_CONNECTDG: if (FD_ISSET(i, &read_set)) { /* data to be read from socket */ Debug((DEBUG_ENGINE, "select: Datagram to be read")); event_generate(ET_READ, sock, 0); } if (FD_ISSET(i, &write_set)) { /* data can be written to socket */ Debug((DEBUG_ENGINE, "select: Datagram can be written")); event_generate(ET_WRITE, sock, 0); } if (FD_ISSET(i, &read_set) || FD_ISSET(i, &write_set)) nfds--; break; } assert(s_fd(sock) == i); gen_ref_dec(sock); /* we're done with it */ } timer_run(); /* execute any pending timers */ } }
void timer_start() { timer_init(); timer_run(); }
static int sasyncd_run(pid_t ppid) { struct timeval *timeout, tv; fd_set *rfds, *wfds; size_t fdsetsize; int maxfd, n; n = getdtablesize(); fdsetsize = howmany(n, NFDBITS) * sizeof(fd_mask); rfds = (fd_set *)malloc(fdsetsize); if (!rfds) { log_err("malloc(%lu) failed", (unsigned long)fdsetsize); return -1; } wfds = (fd_set *)malloc(fdsetsize); if (!wfds) { log_err("malloc(%lu) failed", (unsigned long)fdsetsize); free(rfds); return -1; } control_setrun(); signal(SIGINT, sasyncd_stop); signal(SIGTERM, sasyncd_stop); timer_add("carp_undemote", CARP_DEMOTE_MAXTIME, monitor_carpundemote, NULL); while (!daemon_shutdown) { memset(rfds, 0, fdsetsize); memset(wfds, 0, fdsetsize); maxfd = net_set_rfds(rfds); n = net_set_pending_wfds(wfds); if (n > maxfd) maxfd = n; pfkey_set_rfd(rfds); pfkey_set_pending_wfd(wfds); if (cfgstate.pfkey_socket + 1 > maxfd) maxfd = cfgstate.pfkey_socket + 1; carp_set_rfd(rfds); if (cfgstate.route_socket + 1 > maxfd) maxfd = cfgstate.route_socket + 1; timeout = &tv; timer_next_event(&tv); n = select(maxfd, rfds, wfds, 0, timeout); if (n == -1) { if (errno != EINTR) { log_err("select()"); sleep(1); } } else if (n) { net_handle_messages(rfds); net_send_messages(wfds); pfkey_read_message(rfds); pfkey_send_message(wfds); carp_read_message(rfds); } timer_run(); /* Mostly for debugging. */ if (getppid() != ppid) { log_msg(0, "sasyncd: parent died"); daemon_shutdown++; } } free(rfds); free(wfds); return 0; }
static void engine_loop(struct Generators *gen) { struct epoll_event *events; struct Socket *sock; size_t codesize; int events_count, i, wait, nevs, errcode; if ((events_count = feature_int(FEAT_POLLS_PER_LOOP)) < 20) events_count = 20; events = MyMalloc(sizeof(events[0]) * events_count); while (running) { if ((i = feature_int(FEAT_POLLS_PER_LOOP)) >= 20 && i != events_count) { events = MyRealloc(events, sizeof(events[0]) * i); events_count = i; } wait = timer_next(gen) ? (timer_next(gen) - CurrentTime) * 1000 : -1; Debug((DEBUG_INFO, "epoll: delay: %d (%d) %d", timer_next(gen), CurrentTime, wait)); nevs = epoll_wait(epoll_fd, events, events_count, wait); CurrentTime = time(0); if (nevs < 0) { if (errno != EINTR) { log_write(LS_SOCKET, L_ERROR, 0, "epoll() error: %m"); if (!errors++) timer_add(timer_init(&clear_error), error_clear, 0, TT_PERIODIC, ERROR_EXPIRE_TIME); else if (errors > EPOLL_ERROR_THRESHOLD) server_restart("too many epoll errors"); } continue; } for (i = 0; i < nevs; i++) { if (!(sock = events[i].data.ptr)) continue; gen_ref_inc(sock); Debug((DEBUG_ENGINE, "epoll: Checking socket %p (fd %d) state %s, events %s", sock, s_fd(sock), state_to_name(s_state(sock)), sock_flags(s_events(sock)))); if (events[i].events & EPOLLERR) { errcode = 0; codesize = sizeof(errcode); if (getsockopt(s_fd(sock), SOL_SOCKET, SO_ERROR, &errcode, &codesize) < 0) errcode = errno; if (errcode) { event_generate(ET_ERROR, sock, errcode); gen_ref_dec(sock); continue; } } switch (s_state(sock)) { case SS_CONNECTING: if (events[i].events & EPOLLOUT) /* connection completed */ event_generate(ET_CONNECT, sock, 0); break; case SS_LISTENING: if (events[i].events & EPOLLIN) /* incoming connection */ event_generate(ET_ACCEPT, sock, 0); break; case SS_NOTSOCK: case SS_CONNECTED: if (events[i].events & EPOLLIN) event_generate((events[i].events & EPOLLHUP) ? ET_EOF : ET_READ, sock, 0); if (events[i].events & EPOLLOUT) event_generate(ET_WRITE, sock, 0); break; case SS_DATAGRAM: case SS_CONNECTDG: if (events[i].events & EPOLLIN) event_generate(ET_READ, sock, 0); if (events[i].events & EPOLLOUT) event_generate(ET_WRITE, sock, 0); break; } gen_ref_dec(sock); } timer_run(); } }