/* * This function only has to be called once after a wakeup event in case of * suspected timeout. It controls the stream interface timeouts and sets * si->flags accordingly. It does NOT close anything, as this timeout may * be used for any purpose. It returns 1 if the timeout fired, otherwise * zero. */ int stream_int_check_timeouts(struct stream_interface *si) { if (tick_is_expired(si->exp, now_ms)) { si->flags |= SI_FL_EXP; return 1; } return 0; }
void app_demo(void) { uint16_t adc_result = avg_get(&(app.adc_avg)) << 4; if (tick_is_expired(&(app.adc_tick))) { app.adc_tick = tick_from_ms(1); avg_moving(&(app.adc_avg), adc_sample(ADC_0)); } if (tick_is_expired(&(app.tlc5971_tick))) { app.tlc5971_tick = tick_from_ms(50); //for (i=0; i<12; i++) { // app.bgr_buf[i] = adc_result; //} tlc5971_show(&(app.bgr_buf[0]), 4); //tlc5971_testpattern(&(app.bgr_buf[0]), 12); tlc5971_set_bgr(&(app.tlc), &(app.bgr_buf[0])); gpio_toggle(GPIO_LED_R); //max31855_read(&app.max31855); } if (tick_is_expired(&(app.servo_tick))) { app.servo_tick = tick_from_ms(30); /* Small deadband */ /* TODO: Add deadband to PWM? */ if ((adc_result > servo_last+16) || (adc_result < servo_last-16)) { pwm_set_freq(&(app.pwm_servo), 50000+(((190000-50000)/0xFFFF)*adc_result)); servo_last = adc_result; } } if (tick_is_expired(&(app.lcd_tick))) { app.lcd_tick = tick_from_ms(500); serlcd_set_cursor(&(app.serlcd), SERLCD_ROW1_POS); serlcd_print_decimal(&(app.serlcd), adc_result); } }
/* * Task processing function to manage re-connect and peer session * tasks wakeup on local update. */ static struct task *process_peer_sync(struct task * task) { struct shared_table *st = (struct shared_table *)task->context; struct peer_session *ps; task->expire = TICK_ETERNITY; if (!stopping) { /* Normal case (not soft stop)*/ if (((st->flags & SHTABLE_RESYNC_STATEMASK) == SHTABLE_RESYNC_FROMLOCAL) && (!nb_oldpids || tick_is_expired(st->resync_timeout, now_ms)) && !(st->flags & SHTABLE_F_RESYNC_ASSIGN)) { /* Resync from local peer needed no peer was assigned for the lesson and no old local peer found or resync timeout expire */ /* flag no more resync from local, to try resync from remotes */ st->flags |= SHTABLE_F_RESYNC_LOCAL; /* reschedule a resync */ st->resync_timeout = tick_add(now_ms, MS_TO_TICKS(5000)); } /* For each session */ for (ps = st->sessions; ps; ps = ps->next) { /* For each remote peers */ if (!ps->peer->local) { if (!ps->session) { /* no active session */ if (ps->statuscode == 0 || ps->statuscode == PEER_SESSION_SUCCESSCODE || ((ps->statuscode == PEER_SESSION_CONNECTCODE || ps->statuscode == PEER_SESSION_CONNECTEDCODE) && tick_is_expired(ps->reconnect, now_ms))) { /* connection never tried * or previous session established with success * or previous session failed during connection * and reconnection timer is expired */ /* retry a connect */ ps->session = peer_session_create(ps->peer, ps); } else if (ps->statuscode == PEER_SESSION_CONNECTCODE || ps->statuscode == PEER_SESSION_CONNECTEDCODE) { /* If previous session failed during connection * but reconnection timer is not expired */ /* reschedule task for reconnect */ task->expire = tick_first(task->expire, ps->reconnect); } /* else do nothing */ } /* !ps->session */ else if (ps->statuscode == PEER_SESSION_SUCCESSCODE) { /* current session is active and established */ if (((st->flags & SHTABLE_RESYNC_STATEMASK) == SHTABLE_RESYNC_FROMREMOTE) && !(st->flags & SHTABLE_F_RESYNC_ASSIGN) && !(ps->flags & PEER_F_LEARN_NOTUP2DATE)) { /* Resync from a remote is needed * and no peer was assigned for lesson * and current peer may be up2date */ /* assign peer for the lesson */ ps->flags |= PEER_F_LEARN_ASSIGN; st->flags |= SHTABLE_F_RESYNC_ASSIGN; /* awake peer session task to handle a request of resync */ task_wakeup(ps->session->task, TASK_WOKEN_MSG); } else if ((int)(ps->pushed - ps->table->table->localupdate) < 0) { /* awake peer session task to push local updates */ task_wakeup(ps->session->task, TASK_WOKEN_MSG); } /* else do nothing */ } /* SUCCESSCODE */ } /* !ps->peer->local */ } /* for */ /* Resync from remotes expired: consider resync is finished */ if (((st->flags & SHTABLE_RESYNC_STATEMASK) == SHTABLE_RESYNC_FROMREMOTE) && !(st->flags & SHTABLE_F_RESYNC_ASSIGN) && tick_is_expired(st->resync_timeout, now_ms)) { /* Resync from remote peer needed * no peer was assigned for the lesson * and resync timeout expire */ /* flag no more resync from remote, consider resync is finished */ st->flags |= SHTABLE_F_RESYNC_REMOTE; } if ((st->flags & SHTABLE_RESYNC_STATEMASK) != SHTABLE_RESYNC_FINISHED) { /* Resync not finished*/ /* reschedule task to resync timeout, to ended resync if needed */ task->expire = tick_first(task->expire, st->resync_timeout); } } /* !stopping */ else { /* soft stop case */ if (task->state & TASK_WOKEN_SIGNAL) { /* We've just recieved the signal */ if (!(st->flags & SHTABLE_F_DONOTSTOP)) { /* add DO NOT STOP flag if not present */ jobs++; st->flags |= SHTABLE_F_DONOTSTOP; } /* disconnect all connected peers */ for (ps = st->sessions; ps; ps = ps->next) { if (ps->session) { peer_session_forceshutdown(ps->session); ps->session = NULL; } } } ps = st->local_session; if (ps->flags & PEER_F_TEACH_COMPLETE) { if (st->flags & SHTABLE_F_DONOTSTOP) { /* resync of new process was complete, current process can die now */ jobs--; st->flags &= ~SHTABLE_F_DONOTSTOP; } } else if (!ps->session) { /* If session is not active */ if (ps->statuscode == 0 || ps->statuscode == PEER_SESSION_SUCCESSCODE || ps->statuscode == PEER_SESSION_CONNECTEDCODE || ps->statuscode == PEER_SESSION_TRYAGAIN) { /* connection never tried * or previous session was successfully established * or previous session tcp connect success but init state incomplete * or during previous connect, peer replies a try again statuscode */ /* connect to the peer */ ps->session = peer_session_create(ps->peer, ps); } else { /* Other error cases */ if (st->flags & SHTABLE_F_DONOTSTOP) { /* unable to resync new process, current process can die now */ jobs--; st->flags &= ~SHTABLE_F_DONOTSTOP; } } } else if (ps->statuscode == PEER_SESSION_SUCCESSCODE && (int)(ps->pushed - ps->table->table->localupdate) < 0) { /* current session active and established awake session to push remaining local updates */ task_wakeup(ps->session->task, TASK_WOKEN_MSG); } } /* stopping */ /* Wakeup for re-connect */ return task; }
REGPRM3 static void _do_poll(struct poller *p, int exp, int wake) { int i; int wait_time; struct timespec timeout_ts; unsigned int nevlist; int fd, old_fd; int status; /* * Scan the list of file descriptors with an updated status: */ for (i = 0; i < fd_nbupdt; i++) { fd = fd_updt[i]; _HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit); if (fdtab[fd].owner == NULL) { activity[tid].poll_drop++; continue; } _update_fd(fd); } fd_nbupdt = 0; /* Scan the global update list */ for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { if (fd == -2) { fd = old_fd; continue; } else if (fd <= -3) fd = -fd -4; if (fd == -1) break; if (fdtab[fd].update_mask & tid_bit) done_update_polling(fd); else continue; if (!fdtab[fd].owner) continue; _update_fd(fd); } thread_harmless_now(); /* * Determine how long to wait for events to materialise on the port. */ wait_time = wake ? 0 : compute_poll_timeout(exp); tv_entering_poll(); activity_count_runtime(); do { int timeout = (global.tune.options & GTUNE_BUSY_POLLING) ? 0 : wait_time; int interrupted = 0; nevlist = 1; /* desired number of events to be retrieved */ timeout_ts.tv_sec = (timeout / 1000); timeout_ts.tv_nsec = (timeout % 1000) * 1000000; status = port_getn(evports_fd[tid], evports_evlist, evports_evlist_max, &nevlist, /* updated to the number of events retrieved */ &timeout_ts); if (status != 0) { int e = errno; switch (e) { case ETIME: /* * Though the manual page has not historically made it * clear, port_getn() can return -1 with an errno of * ETIME and still have returned some number of events. */ /* nevlist >= 0 */ break; default: nevlist = 0; interrupted = 1; break; } } tv_update_date(timeout, nevlist); if (nevlist || interrupted) break; if (timeout || !wait_time) break; if (signal_queue_len || wake) break; if (tick_isset(exp) && tick_is_expired(exp, now_ms)) break; } while(1); tv_leaving_poll(wait_time, nevlist); thread_harmless_end(); for (i = 0; i < nevlist; i++) { unsigned int n = 0; int events, rebind_events; fd = evports_evlist[i].portev_object; events = evports_evlist[i].portev_events; if (fdtab[fd].owner == NULL) { activity[tid].poll_dead++; continue; } if (!(fdtab[fd].thread_mask & tid_bit)) { activity[tid].poll_skip++; continue; } /* * By virtue of receiving an event for this file descriptor, it * is no longer associated with the port in question. Store * the previous event mask so that we may reassociate after * processing is complete. */ rebind_events = evports_state_to_events(fdtab[fd].state); /* rebind_events != 0 */ /* * Set bits based on the events we received from the port: */ if (events & POLLIN) n |= FD_POLL_IN; if (events & POLLOUT) n |= FD_POLL_OUT; if (events & POLLERR) n |= FD_POLL_ERR; if (events & POLLHUP) n |= FD_POLL_HUP; /* * Call connection processing callbacks. Note that it's * possible for this processing to alter the required event * port assocation; i.e., the "state" member of the "fdtab" * entry. If it changes, the fd will be placed on the updated * list for processing the next time we are called. */ fd_update_events(fd, n); /* * This file descriptor was closed during the processing of * polled events. No need to reassociate. */ if (fdtab[fd].owner == NULL) continue; /* * Reassociate with the port, using the same event mask as * before. This call will not result in a dissociation as we * asserted that _some_ events needed to be rebound above. * * Reassociating with the same mask allows us to mimic the * level-triggered behaviour of poll(2). In the event that we * are interested in the same events on the next turn of the * loop, this represents no extra work. * * If this additional port_associate(3C) call becomes a * performance problem, we would need to verify that we can * correctly interact with the file descriptor cache and update * list (see "src/fd.c") to avoid reassociating here, or to use * a different events mask. */ evports_resync_fd(fd, rebind_events); } }
/* * Linux epoll() poller */ REGPRM2 static void _do_poll(struct poller *p, int exp) { int status; int fd; int count; int updt_idx; int wait_time; int old_fd; /* first, scan the update list to find polling changes */ for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) { fd = fd_updt[updt_idx]; HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit); if (!fdtab[fd].owner) { activity[tid].poll_drop++; continue; } _update_fd(fd); } fd_nbupdt = 0; /* Scan the global update list */ for (old_fd = fd = update_list.first; fd != -1; fd = fdtab[fd].update.next) { if (fd == -2) { fd = old_fd; continue; } else if (fd <= -3) fd = -fd -4; if (fd == -1) break; if (fdtab[fd].update_mask & tid_bit) done_update_polling(fd); else continue; if (!fdtab[fd].owner) continue; _update_fd(fd); } thread_harmless_now(); /* compute the epoll_wait() timeout */ if (!exp) wait_time = MAX_DELAY_MS; else if (tick_is_expired(exp, now_ms)) { activity[tid].poll_exp++; wait_time = 0; } else { wait_time = TICKS_TO_MS(tick_remain(now_ms, exp)) + 1; if (wait_time > MAX_DELAY_MS) wait_time = MAX_DELAY_MS; } /* now let's wait for polled events */ gettimeofday(&before_poll, NULL); status = epoll_wait(epoll_fd[tid], epoll_events, global.tune.maxpollevents, wait_time); tv_update_date(wait_time, status); measure_idle(); thread_harmless_end(); /* process polled events */ for (count = 0; count < status; count++) { unsigned int n; unsigned int e = epoll_events[count].events; fd = epoll_events[count].data.fd; if (!fdtab[fd].owner) { activity[tid].poll_dead++; continue; } if (!(fdtab[fd].thread_mask & tid_bit)) { /* FD has been migrated */ activity[tid].poll_skip++; epoll_ctl(epoll_fd[tid], EPOLL_CTL_DEL, fd, &ev); HA_ATOMIC_AND(&polled_mask[fd], ~tid_bit); continue; } /* it looks complicated but gcc can optimize it away when constants * have same values... In fact it depends on gcc :-( */ if (EPOLLIN == FD_POLL_IN && EPOLLOUT == FD_POLL_OUT && EPOLLPRI == FD_POLL_PRI && EPOLLERR == FD_POLL_ERR && EPOLLHUP == FD_POLL_HUP) { n = e & (EPOLLIN|EPOLLOUT|EPOLLPRI|EPOLLERR|EPOLLHUP); } else { n = ((e & EPOLLIN ) ? FD_POLL_IN : 0) | ((e & EPOLLPRI) ? FD_POLL_PRI : 0) | ((e & EPOLLOUT) ? FD_POLL_OUT : 0) | ((e & EPOLLERR) ? FD_POLL_ERR : 0) | ((e & EPOLLHUP) ? FD_POLL_HUP : 0); } /* always remap RDHUP to HUP as they're used similarly */ if (e & EPOLLRDHUP) { HA_ATOMIC_OR(&cur_poller.flags, HAP_POLL_F_RDHUP); n |= FD_POLL_HUP; } fd_update_events(fd, n); } /* the caller will take care of cached events */ }