void qmgr_queue_done(QMGR_QUEUE *queue) { const char *myname = "qmgr_queue_done"; QMGR_TRANSPORT *transport = queue->transport; /* * Sanity checks. It is an error to delete an in-core queue with pending * messages or timers. */ if (queue->busy_refcount != 0 || queue->todo_refcount != 0) msg_panic("%s: refcount: %d", myname, queue->busy_refcount + queue->todo_refcount); if (queue->todo.next || queue->busy.next) msg_panic("%s: queue not empty: %s", myname, queue->name); if (!QMGR_QUEUE_READY(queue)) msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue)); if (queue->dsn) msg_panic("%s: queue %s: spurious reason %s", myname, queue->name, queue->dsn->reason); /* * Clean up this in-core queue. */ QMGR_LIST_UNLINK(transport->queue_list, QMGR_QUEUE *, queue, peers); htable_delete(transport->queue_byname, queue->name, (void (*) (char *)) 0); myfree(queue->name); myfree(queue->nexthop); qmgr_queue_count--; myfree((char *) queue); }
static void qmgr_queue_resume(int event, char *context) { QMGR_QUEUE *queue = (QMGR_QUEUE *) context; const char *myname = "qmgr_queue_resume"; /* * Sanity checks. */ if (!QMGR_QUEUE_SUSPENDED(queue)) msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue)); /* * We can't simply force delivery on this queue: the transport's pending * count may already be maxed out, and there may be other constraints * that definitely should be none of our business. The best we can do is * to play by the same rules as everyone else: let qmgr_active_drain() * and round-robin selection take care of message selection. */ queue->window = 1; /* * Every event handler that leaves a queue in the "ready" state should * remove the queue when it is empty. */ if (QMGR_QUEUE_READY(queue) && queue->todo.next == 0 && queue->busy.next == 0) qmgr_queue_done(queue); }
void qmgr_queue_suspend(QMGR_QUEUE *queue, int delay) { const char *myname = "qmgr_queue_suspend"; /* * Sanity checks. */ if (!QMGR_QUEUE_READY(queue)) msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue)); if (queue->busy_refcount > 0) msg_panic("%s: queue is busy", myname); /* * Set the queue status to "suspended". No-one is supposed to remove a * queue in suspended state. */ queue->window = QMGR_QUEUE_STAT_SUSPENDED; event_request_timer(qmgr_queue_resume, (char *) queue, delay); }
static void qmgr_queue_resume(int event, char *context) { QMGR_QUEUE *queue = (QMGR_QUEUE *) context; const char *myname = "qmgr_queue_resume"; /* * Sanity checks. */ if (!QMGR_QUEUE_SUSPENDED(queue)) msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue)); /* * We can't simply force delivery on this queue: the transport's pending * count may already be maxed out, and there may be other constraints * that definitely should be none of our business. The best we can do is * to play by the same rules as everyone else: let qmgr_active_drain() * and round-robin selection take care of message selection. */ queue->window = 1; /* * Every event handler that leaves a queue in the "ready" state should * remove the queue when it is empty. * * XXX Do not omit the redundant test below. It is here to simplify code * consistency checks. The check is trivially eliminated by the compiler * optimizer. There is no need to sacrifice code clarity for the sake of * performance. * * XXX Do not expose the blocker job logic here. Rate-limited queues are not * a performance-critical feature. Here, too, there is no need to sacrifice * code clarity for the sake of performance. */ if (QMGR_QUEUE_READY(queue) && queue->todo.next == 0 && queue->busy.next == 0) qmgr_queue_done(queue); else qmgr_job_blocker_update(queue); }
void qmgr_queue_throttle(QMGR_QUEUE *queue, DSN *dsn) { const char *myname = "qmgr_queue_throttle"; QMGR_TRANSPORT *transport = queue->transport; double feedback; /* * Sanity checks. */ if (!QMGR_QUEUE_READY(queue)) msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue)); if (queue->dsn) msg_panic("%s: queue %s: spurious reason %s", myname, queue->name, queue->dsn->reason); if (msg_verbose) msg_info("%s: queue %s: %s %s", myname, queue->name, dsn->status, dsn->reason); /* * Don't restart the positive feedback hysteresis cycle with every * negative feedback. Restart it only when we make a negative concurrency * adjustment (i.e. at the start of a negative feedback hysteresis * cycle). Otherwise positive feedback would be too weak (positive * feedback does not take effect until the end of its hysteresis cycle). */ /* * This queue is declared dead after a configurable number of * pseudo-cohort failures. */ if (QMGR_QUEUE_READY(queue)) { queue->fail_cohorts += 1.0 / queue->window; if (transport->fail_cohort_limit > 0 && queue->fail_cohorts >= transport->fail_cohort_limit) queue->window = QMGR_QUEUE_STAT_THROTTLED; } /* * Decrease the destination's concurrency limit until we reach 1. Base * adjustments on the concurrency limit itself, instead of using the * actual concurrency. The latter fluctuates wildly when deliveries * complete in bursts (artificial benchmark measurements). * * Even after reaching 1, we maintain the negative hysteresis cycle so that * negative feedback can cancel out positive feedback. */ if (QMGR_QUEUE_READY(queue)) { feedback = QMGR_FEEDBACK_VAL(transport->neg_feedback, queue->window); QMGR_LOG_FEEDBACK(feedback); queue->failure -= feedback; /* Prepare for overshoot (feedback > hysteresis, rounding error). */ while (queue->failure - feedback / 2 < 0) { queue->window -= transport->neg_feedback.hysteresis; queue->success = 0; queue->failure += transport->neg_feedback.hysteresis; } /* Prepare for overshoot. */ if (queue->window < 1) queue->window = 1; } /* * Special case for a site that just was declared dead. */ if (QMGR_QUEUE_THROTTLED(queue)) { queue->dsn = DSN_COPY(dsn); event_request_timer(qmgr_queue_unthrottle_wrapper, (char *) queue, var_min_backoff_time); queue->dflags = 0; } QMGR_LOG_WINDOW(queue); }
void qmgr_queue_unthrottle(QMGR_QUEUE *queue) { const char *myname = "qmgr_queue_unthrottle"; QMGR_TRANSPORT *transport = queue->transport; double feedback; if (msg_verbose) msg_info("%s: queue %s", myname, queue->name); /* * Sanity checks. */ if (!QMGR_QUEUE_READY(queue) && !QMGR_QUEUE_THROTTLED(queue)) msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue)); /* * Don't restart the negative feedback hysteresis cycle with every * positive feedback. Restart it only when we make a positive concurrency * adjustment (i.e. at the end of a positive feedback hysteresis cycle). * Otherwise negative feedback would be too aggressive: negative feedback * takes effect immediately at the start of its hysteresis cycle. */ queue->fail_cohorts = 0; /* * Special case when this site was dead. */ if (QMGR_QUEUE_THROTTLED(queue)) { event_cancel_timer(qmgr_queue_unthrottle_wrapper, (char *) queue); if (queue->dsn == 0) msg_panic("%s: queue %s: window 0 status 0", myname, queue->name); dsn_free(queue->dsn); queue->dsn = 0; /* Back from the almost grave, best concurrency is anyone's guess. */ if (queue->busy_refcount > 0) queue->window = queue->busy_refcount; else queue->window = transport->init_dest_concurrency; queue->success = queue->failure = 0; QMGR_LOG_WINDOW(queue); return; } /* * Increase the destination's concurrency limit until we reach the * transport's concurrency limit. Allow for a margin the size of the * initial destination concurrency, so that we're not too gentle. * * Why is the concurrency increment based on preferred concurrency and not * on the number of outstanding delivery requests? The latter fluctuates * wildly when deliveries complete in bursts (artificial benchmark * measurements), and does not account for cached connections. * * Keep the window within reasonable distance from actual concurrency * otherwise negative feedback will be ineffective. This expression * assumes that busy_refcount changes gradually. This is invalid when * deliveries complete in bursts (artificial benchmark measurements). */ if (transport->dest_concurrency_limit == 0 || transport->dest_concurrency_limit > queue->window) if (queue->window < queue->busy_refcount + transport->init_dest_concurrency) { feedback = QMGR_FEEDBACK_VAL(transport->pos_feedback, queue->window); QMGR_LOG_FEEDBACK(feedback); queue->success += feedback; /* Prepare for overshoot (feedback > hysteresis, rounding error). */ while (queue->success + feedback / 2 >= transport->pos_feedback.hysteresis) { queue->window += transport->pos_feedback.hysteresis; queue->success -= transport->pos_feedback.hysteresis; queue->failure = 0; } /* Prepare for overshoot. */ if (transport->dest_concurrency_limit > 0 && queue->window > transport->dest_concurrency_limit) queue->window = transport->dest_concurrency_limit; } QMGR_LOG_WINDOW(queue); }