Example #1
0
static QMGR_JOB *qmgr_job_create(QMGR_MESSAGE *message, QMGR_TRANSPORT *transport)
{
    QMGR_JOB *job;

    job = (QMGR_JOB *) mymalloc(sizeof(QMGR_JOB));
    job->message = message;
    QMGR_LIST_APPEND(message->job_list, job, message_peers);
    htable_enter(transport->job_byname, message->queue_id, (void *) job);
    job->transport = transport;
    QMGR_LIST_INIT(job->transport_peers);
    QMGR_LIST_INIT(job->time_peers);
    job->stack_parent = 0;
    QMGR_LIST_INIT(job->stack_children);
    QMGR_LIST_INIT(job->stack_siblings);
    job->stack_level = -1;
    job->blocker_tag = 0;
    job->peer_byname = htable_create(0);
    QMGR_LIST_INIT(job->peer_list);
    job->slots_used = 0;
    job->slots_available = 0;
    job->selected_entries = 0;
    job->read_entries = 0;
    job->rcpt_count = 0;
    job->rcpt_limit = 0;
    return (job);
}
Example #2
0
QMGR_QUEUE *qmgr_queue_create(QMGR_TRANSPORT *transport, const char *name,
			              const char *nexthop)
{
    QMGR_QUEUE *queue;

    /*
     * If possible, choose an initial concurrency of > 1 so that one bad
     * message or one bad network won't slow us down unnecessarily.
     */

    queue = (QMGR_QUEUE *) mymalloc(sizeof(QMGR_QUEUE));
    qmgr_queue_count++;
    queue->dflags = 0;
    queue->last_done = 0;
    queue->name = mystrdup(name);
    queue->nexthop = mystrdup(nexthop);
    queue->todo_refcount = 0;
    queue->busy_refcount = 0;
    queue->transport = transport;
    queue->window = transport->init_dest_concurrency;
    queue->success = queue->failure = queue->fail_cohorts = 0;
    QMGR_LIST_INIT(queue->todo);
    QMGR_LIST_INIT(queue->busy);
    queue->dsn = 0;
    queue->clog_time_to_warn = 0;
    queue->blocker_tag = 0;
    QMGR_LIST_APPEND(transport->queue_list, queue, peers);
    htable_enter(transport->queue_byname, name, (char *) queue);
    return (queue);
}
Example #3
0
QMGR_TRANSPORT *qmgr_transport_create(const char *name)
{
    QMGR_TRANSPORT *transport;

    if (htable_find(qmgr_transport_byname, name) != 0)
	msg_panic("qmgr_transport_create: transport exists: %s", name);
    transport = (QMGR_TRANSPORT *) mymalloc(sizeof(QMGR_TRANSPORT));
    transport->flags = 0;
    transport->pending = 0;
    transport->name = mystrdup(name);

    /*
     * Use global configuration settings or transport-specific settings.
     */
    transport->dest_concurrency_limit =
	get_mail_conf_int2(name, _DEST_CON_LIMIT,
			   var_dest_con_limit, 0, 0);
    transport->recipient_limit =
	get_mail_conf_int2(name, _DEST_RCPT_LIMIT,
			   var_dest_rcpt_limit, 0, 0);
    transport->init_dest_concurrency =
	get_mail_conf_int2(name, _INIT_DEST_CON,
			   var_init_dest_concurrency, 1, 0);
    transport->xport_rate_delay = get_mail_conf_time2(name, _XPORT_RATE_DELAY,
						      var_xport_rate_delay,
						      's', 0, 0);
    transport->rate_delay = get_mail_conf_time2(name, _DEST_RATE_DELAY,
						var_dest_rate_delay,
						's', 0, 0);

    if (transport->rate_delay > 0)
	transport->dest_concurrency_limit = 1;
    if (transport->dest_concurrency_limit != 0
    && transport->dest_concurrency_limit < transport->init_dest_concurrency)
	transport->init_dest_concurrency = transport->dest_concurrency_limit;

    transport->queue_byname = htable_create(0);
    QMGR_LIST_INIT(transport->queue_list);
    transport->dsn = 0;
    qmgr_feedback_init(&transport->pos_feedback, name, _CONC_POS_FDBACK,
		       VAR_CONC_POS_FDBACK, var_conc_pos_feedback);
    qmgr_feedback_init(&transport->neg_feedback, name, _CONC_NEG_FDBACK,
		       VAR_CONC_NEG_FDBACK, var_conc_neg_feedback);
    transport->fail_cohort_limit =
	get_mail_conf_int2(name, _CONC_COHORT_LIM,
			   var_conc_cohort_limit, 0, 0);
    if (qmgr_transport_byname == 0)
	qmgr_transport_byname = htable_create(10);
    htable_enter(qmgr_transport_byname, name, (void *) transport);
    QMGR_LIST_APPEND(qmgr_transport_list, transport);
    if (msg_verbose)
	msg_info("qmgr_transport_create: %s concurrency %d recipients %d",
		 transport->name, transport->dest_concurrency_limit,
		 transport->recipient_limit);
    return (transport);
}
Example #4
0
static void qmgr_job_parent_gone(QMGR_JOB *job, QMGR_JOB *parent)
{
    QMGR_JOB *child;

    while ((child = job->stack_children.next) != 0) {
	QMGR_LIST_UNLINK(job->stack_children, QMGR_JOB *, child, stack_siblings);
	if (parent != 0)
	    QMGR_LIST_APPEND(parent->stack_children, child, stack_siblings);
	child->stack_parent = parent;
    }
}
Example #5
0
QMGR_PEER *qmgr_peer_create(QMGR_JOB *job, QMGR_QUEUE *queue)
{
    QMGR_PEER *peer;

    peer = (QMGR_PEER *) mymalloc(sizeof(QMGR_PEER));
    peer->queue = queue;
    peer->job = job;
    QMGR_LIST_APPEND(job->peer_list, peer, peers);
    htable_enter(job->peer_byname, queue->name, (char *) peer);
    peer->refcount = 0;
    QMGR_LIST_INIT(peer->entry_list);
    return (peer);
}
Example #6
0
QMGR_ENTRY *qmgr_entry_select(QMGR_PEER *peer)
{
    const char *myname = "qmgr_entry_select";
    QMGR_ENTRY *entry;
    QMGR_QUEUE *queue;

    if ((entry = peer->entry_list.next) != 0) {
	queue = entry->queue;
	QMGR_LIST_UNLINK(queue->todo, QMGR_ENTRY *, entry, queue_peers);
	queue->todo_refcount--;
	QMGR_LIST_APPEND(queue->busy, entry, queue_peers);
	queue->busy_refcount++;
	QMGR_LIST_UNLINK(peer->entry_list, QMGR_ENTRY *, entry, peer_peers);
	peer->job->selected_entries++;

	/*
	 * With opportunistic session caching, the delivery agent must not
	 * only 1) save a session upon completion, but also 2) reuse a cached
	 * session upon the next delivery request. In order to not miss out
	 * on 2), we have to make caching sticky or else we get silly
	 * behavior when the in-memory queue drains. Specifically, new
	 * connections must not be made as long as cached connections exist.
	 * 
	 * Safety: don't enable opportunistic session caching unless the queue
	 * manager is able to schedule concurrent or back-to-back deliveries
	 * (we need to recognize back-to-back deliveries for transports with
	 * concurrency 1).
	 * 
	 * If caching has previously been enabled, but is not now, fetch any
	 * existing entries from the cache, but don't add new ones.
	 */
#define CONCURRENT_OR_BACK_TO_BACK_DELIVERY() \
	    (queue->busy_refcount > 1 || BACK_TO_BACK_DELIVERY())

#define BACK_TO_BACK_DELIVERY() \
		(queue->last_done + 1 >= event_time())

	/*
	 * Turn on session caching after we get up to speed. Don't enable
	 * session caching just because we have concurrent deliveries. This
	 * prevents unnecessary session caching when we have a burst of mail
	 * <= the initial concurrency limit.
	 */
	if ((queue->dflags & DEL_REQ_FLAG_CONN_STORE) == 0) {
	    if (BACK_TO_BACK_DELIVERY()) {
		if (msg_verbose)
		    msg_info("%s: allowing on-demand session caching for %s",
			     myname, queue->name);
		queue->dflags |= DEL_REQ_FLAG_CONN_MASK;
	    }
	}

	/*
	 * Turn off session caching when concurrency drops and we're running
	 * out of steam. This is what prevents from turning off session
	 * caching too early, and from making new connections while old ones
	 * are still cached.
	 */
	else {
	    if (!CONCURRENT_OR_BACK_TO_BACK_DELIVERY()) {
		if (msg_verbose)
		    msg_info("%s: disallowing on-demand session caching for %s",
			     myname, queue->name);
		queue->dflags &= ~DEL_REQ_FLAG_CONN_STORE;
	    }
	}
    }
Example #7
0
static QMGR_JOB *qmgr_job_preempt(QMGR_JOB *current)
{
    const char *myname = "qmgr_job_preempt";
    QMGR_TRANSPORT *transport = current->transport;
    QMGR_JOB *job, *prev;
    int     expected_slots;
    int     rcpt_slots;

    /*
     * Suppress preempting completely if the current job is not big enough to
     * accumulate even the minimal number of slots required.
     * 
     * Also, don't look for better job candidate if there are no available slots
     * yet (the count can get negative due to the slot loans below).
     */
    if (current->slots_available <= 0
      || MAX_ENTRIES(current) < transport->min_slots * transport->slot_cost)
	return (current);

    /*
     * Find best candidate for preempting the current job.
     * 
     * Note that the function also takes care that the candidate fits within the
     * number of delivery slots which the current job is still able to
     * accumulate.
     */
    if ((job = qmgr_job_candidate(current)) == 0)
	return (current);

    /*
     * Sanity checks.
     */
    if (job == current)
	msg_panic("%s: attempt to preempt itself", myname);
    if (job->stack_children.next != 0)
	msg_panic("%s: already on the job stack (%d)", myname, job->stack_level);
    if (job->stack_level < 0)
	msg_panic("%s: not on the job list (%d)", myname, job->stack_level);

    /*
     * Check if there is enough available delivery slots accumulated to
     * preempt the current job.
     * 
     * The slot loaning scheme improves the average message response time. Note
     * that the loan only allows the preemption happen earlier, though. It
     * doesn't affect how many slots have to be "paid" - in either case the
     * full number of slots required has to be accumulated later before the
     * current job can be preempted again.
     */
    expected_slots = MAX_ENTRIES(job) - job->selected_entries;
    if (current->slots_available / transport->slot_cost + transport->slot_loan
	< expected_slots * transport->slot_loan_factor / 100.0)
	return (current);

    /*
     * Preempt the current job.
     * 
     * This involves placing the selected candidate in front of the current job
     * on the job list and updating the stack parent/child/sibling pointers
     * appropriately. But first we need to make sure that the candidate is
     * taken from its previous job stack which it might be top of.
     */
    if (job->stack_level > 0)
	qmgr_job_pop(job);
    QMGR_LIST_UNLINK(transport->job_list, QMGR_JOB *, job, transport_peers);
    prev = current->transport_peers.prev;
    QMGR_LIST_LINK(transport->job_list, prev, job, current, transport_peers);
    job->stack_parent = current;
    QMGR_LIST_APPEND(current->stack_children, job, stack_siblings);
    job->stack_level = current->stack_level + 1;

    /*
     * Update the current job pointer and explicitly reset the candidate
     * cache.
     */
    transport->job_current = job;
    RESET_CANDIDATE_CACHE(transport);

    /*
     * Since the single job can be preempted by several jobs at the same
     * time, we have to adjust the available slot count now to prevent using
     * the same slots multiple times. To do that we subtract the number of
     * slots the preempting job will supposedly use. This number will be
     * corrected later when that job is popped from the stack to reflect the
     * number of slots really used.
     * 
     * As long as we don't need to keep track of how many slots were really
     * used, we can (ab)use the slots_used counter for counting the
     * difference between the real and expected amounts instead of the
     * absolute amount.
     */
    current->slots_available -= expected_slots * transport->slot_cost;
    job->slots_used = -expected_slots;

    /*
     * Add part of extra recipient slots reserved for preempting jobs to the
     * new current job if necessary.
     * 
     * Note that transport->rcpt_unused is within <-rcpt_per_stack,0> in such
     * case.
     */
    if (job->message->rcpt_offset != 0) {
	rcpt_slots = (transport->rcpt_per_stack + transport->rcpt_unused + 1) / 2;
	job->rcpt_limit += rcpt_slots;
	job->message->rcpt_limit += rcpt_slots;
	transport->rcpt_unused -= rcpt_slots;
    }
    if (msg_verbose)
	msg_info("%s: %s by %s, level %d", myname, current->message->queue_id,
		 job->message->queue_id, job->stack_level);

    return (job);
}