/* Decreases timeout to 'msec' if 'msec' is positive */ void poll_timer_wait_decrease(int msec) { if (msec > 0 && msec < timeout) { poll_timer_wait(msec); } }
void dp_run(struct datapath *dp) { time_t now = time_now(); struct remote *r, *rn; size_t i; if (now != dp->last_timeout) { dp->last_timeout = now; pipeline_timeout(dp->pipeline); } poll_timer_wait(1000); dp_ports_run(dp); /* Talk to remotes. */ LIST_FOR_EACH_SAFE (r, rn, struct remote, node, &dp->remotes) { remote_run(dp, r); } for (i = 0; i < dp->n_listeners; ) { struct pvconn *pvconn = dp->listeners[i]; struct vconn *new_vconn; int retval = pvconn_accept(pvconn, OFP_VERSION, &new_vconn); if (!retval) { remote_create(dp, rconn_new_from_vconn("passive", new_vconn)); } else if (retval != EAGAIN) { VLOG_WARN_RL(LOG_MODULE, &rl, "accept failed (%s)", strerror(retval)); dp->listeners[i] = dp->listeners[--dp->n_listeners]; continue; } i++; } }
static void wait_timeout(long long int started) { long long int now = time_msec(); long long int timeout = 10000 - (now - started); if (timeout <= 0) { poll_immediate_wake(); } else { poll_timer_wait(timeout); } }
static void rate_limit_wait_cb(void *rl_) { struct rate_limiter *rl = rl_; if (rl->n_queued) { if (rl->tokens >= 1000) { /* We can transmit more packets as soon as we're called again. */ poll_immediate_wake(); } else { /* We have to wait for the bucket to re-fill. We could calculate * the exact amount of time here for increased smoothness. */ poll_timer_wait(TIME_UPDATE_INTERVAL / 2); } } }
/* Causes the following call to poll_block() to wake up immediately, without * blocking. */ void poll_immediate_wake(void) { poll_timer_wait(0); }