static void queue_tx(struct lswitch *sw, struct ofpbuf *b) { int retval = rconn_send_with_limit(sw->rconn, b, sw->queued, 10); if (retval && retval != ENOTCONN) { if (retval == EAGAIN) { VLOG_INFO_RL(&rl, "%016llx: %s: tx queue overflow", sw->datapath_id, rconn_get_name(sw->rconn)); } else { VLOG_WARN_RL(&rl, "%016llx: %s: send: %s", sw->datapath_id, rconn_get_name(sw->rconn), ovs_strerror(retval)); } } }
static void rate_limit_periodic_cb(void *rl_) { struct rate_limiter *rl = rl_; int i; /* Drain some packets out of the bucket if possible, but limit the number * of iterations to allow other code to get work done too. */ refill_bucket(rl); for (i = 0; rl->n_queued && get_token(rl) && i < 50; i++) { /* Use a small, arbitrary limit for the amount of queuing to do here, * because the TCP connection is responsible for buffering and there is * no point in trying to transmit faster than the TCP connection can * handle. */ struct ofpbuf *b = dequeue_packet(rl); if (rconn_send_with_limit(rl->remote_rconn, b, &rl->n_txq, 10)) { rl->n_tx_dropped++; } } }