/** * Invoked each time a new bandwidth timeslice begins. */ static void udp_sched_begin(void *data, int source, inputevt_cond_t cond) { udp_sched_t *us = data; unsigned i; udp_sched_check(us); udp_sched_log(4, "%p: starting, %zu bytes buffered", us, us->buffered); udp_sched_log(5, "%p: messages queued: " "data=%zu, control=%zu, urgent=%zu, highest=%zu", us, eslist_count(&us->lifo[PMSG_P_DATA]), eslist_count(&us->lifo[PMSG_P_CONTROL]), eslist_count(&us->lifo[PMSG_P_URGENT]), eslist_count(&us->lifo[PMSG_P_HIGHEST])); /* * Expire old traffic that we could not send. */ for (i = 0; i < N_ITEMS(us->lifo); i++) { eslist_foreach_remove(&us->lifo[i], udp_tx_desc_expired, us); } /* * Schedule pending traffic in LIFO order (starting from head), * processing the highest priority queue first. */ us->used_all = FALSE; do { udp_sched_seen_clear(us); for (i = N_ITEMS(us->lifo); i != 0 && !us->used_all; i--) { udp_sched_process(us, &us->lifo[i-1]); } udp_sched_tx_release(us); /* May re-queue traffic */ udp_sched_log(5, "%p: loop tail: %zu bytes buffered, b/w %s", us, us->buffered, us->used_all ? "gone" : "available"); } while (!us->used_all && us->buffered != 0); /* * If we did not use all the bandwidth yet and we flow-controlled * upper layers, service them. */ if (!us->used_all && us->flow_controlled) { struct udp_service_ctx ctx; us->flow_controlled = FALSE; ctx.fd = source; ctx.cond = cond; udp_sched_service(us, &ctx); } udp_sched_log(4, "%p: done (b/w %s, %zu bytes buffered%s)", us, us->used_all ? "gone" : "available", us->buffered, us->flow_controlled ? ", flow-controlled" : ""); }
static inline bool http_rangeset_invariant(const struct http_rangeset * const hrs) { g_assert(eslist_count(&hrs->list) == erbtree_count(&hrs->tree)); return TRUE; /* So that we can safety_assert() this routine */ }