Ejemplo n.º 1
0
static void qmgr_message_move_limits(QMGR_MESSAGE *message)
{
    QMGR_JOB *job;

    for (job = message->job_list.next; job; job = job->message_peers.next)
	qmgr_job_move_limits(job);
}
Ejemplo n.º 2
0
static void qmgr_job_retire(QMGR_JOB *job)
{
    if (msg_verbose)
	msg_info("qmgr_job_retire: %s", job->message->queue_id);

    /*
     * Pop the job from the job stack if necessary.
     */
    if (job->stack_level > 0)
	qmgr_job_pop(job);

    /*
     * Make sure this job is not cached as the next unread job for this
     * transport. The qmgr_entry_done() will make sure that the slots donated
     * by this job are moved back to the transport pool as soon as possible.
     */
    qmgr_job_move_limits(job);

    /*
     * Remove the job from the job lists. Note that it remains on the message
     * job list, though, and that it can be revived by using
     * qmgr_job_obtain(). Also note that the available slot counter is left
     * intact.
     */
    qmgr_job_unlink(job);
}
Ejemplo n.º 3
0
void    qmgr_job_free(QMGR_JOB *job)
{
    const char *myname = "qmgr_job_free";
    QMGR_MESSAGE *message = job->message;
    QMGR_TRANSPORT *transport = job->transport;

    if (msg_verbose)
	msg_info("%s: %s %s", myname, message->queue_id, transport->name);

    /*
     * Sanity checks.
     */
    if (job->rcpt_count)
	msg_panic("%s: non-zero recipient count (%d)", myname, job->rcpt_count);

    /*
     * Pop the job from the job stack if necessary.
     */
    if (job->stack_level > 0)
	qmgr_job_pop(job);

    /*
     * Return any remaining recipient slots back to the recipient slots pool.
     */
    qmgr_job_move_limits(job);
    if (job->rcpt_limit)
	msg_panic("%s: recipient slots leak (%d)", myname, job->rcpt_limit);

    /*
     * Unlink and discard the structure. Check if the job is still linked on
     * the job lists or if it was already retired before unlinking it.
     */
    if (job->stack_level >= 0)
	qmgr_job_unlink(job);
    QMGR_LIST_UNLINK(message->job_list, QMGR_JOB *, job, message_peers);
    htable_delete(transport->job_byname, message->queue_id, (void (*) (void *)) 0);
    htable_free(job->peer_byname, (void (*) (void *)) 0);
    myfree((void *) job);
}
Ejemplo n.º 4
0
static void qmgr_job_link(QMGR_JOB *job)
{
    QMGR_TRANSPORT *transport = job->transport;
    QMGR_MESSAGE *message = job->message;
    QMGR_JOB *prev, *next, *list_prev, *list_next, *unread, *current;
    int     delay;

    /*
     * Sanity checks.
     */
    if (job->stack_level >= 0)
	msg_panic("qmgr_job_link: already on the job lists (%d)", job->stack_level);

    /*
     * Traverse the time list and the scheduler list from the end and stop
     * when we found job older than the one being linked.
     * 
     * During the traversals keep track if we have come across either the
     * current job or the first unread job on the job list. If this is the
     * case, these pointers will be adjusted below as required.
     * 
     * Although both lists are exactly the same when only jobs on the stack
     * level zero are considered, it's easier to traverse them separately.
     * Otherwise it's impossible to keep track of the current job pointer
     * effectively.
     * 
     * This may look inefficient but under normal operation it is expected that
     * the loops will stop right away, resulting in normal list appends
     * below. However, this code is necessary for reviving retired jobs and
     * for jobs which are created long after the first chunk of recipients
     * was read in-core (either of these can happen only for multi-transport
     * messages).
     * 
     * XXX Note that we test stack_parent rather than stack_level below. This
     * subtle difference allows us to enqueue the job in correct time order
     * with respect to orphaned children even after their original parent on
     * level zero is gone. Consequently, the early loop stop in candidate
     * selection works reliably, too. These are the reasons why we care to
     * bother with children adoption at all.
     */
    current = transport->job_current;
    for (next = 0, prev = transport->job_list.prev; prev;
	 next = prev, prev = prev->transport_peers.prev) {
	if (prev->stack_parent == 0) {
	    delay = message->queued_time - prev->message->queued_time;
	    if (delay >= 0)
		break;
	}
	if (current == prev)
	    current = 0;
    }
    list_prev = prev;
    list_next = next;

    unread = transport->job_next_unread;
    for (next = 0, prev = transport->job_bytime.prev; prev;
	 next = prev, prev = prev->time_peers.prev) {
	delay = message->queued_time - prev->message->queued_time;
	if (delay >= 0)
	    break;
	if (unread == prev)
	    unread = 0;
    }

    /*
     * Link the job into the proper place on the job lists and mark it so we
     * know it has been linked.
     */
    job->stack_level = 0;
    QMGR_LIST_LINK(transport->job_list, list_prev, job, list_next, transport_peers);
    QMGR_LIST_LINK(transport->job_bytime, prev, job, next, time_peers);

    /*
     * Update the current job pointer if necessary.
     */
    if (current == 0)
	transport->job_current = job;

    /*
     * Update the pointer to the first unread job on the job list and steal
     * the unused recipient slots from the old one.
     */
    if (unread == 0) {
	unread = transport->job_next_unread;
	transport->job_next_unread = job;
	if (unread != 0)
	    qmgr_job_move_limits(unread);
    }

    /*
     * Get as much recipient slots as possible. The excess will be returned
     * to the transport pool as soon as the exact amount required is known
     * (which is usually after all recipients have been read in core).
     */
    if (transport->rcpt_unused > 0) {
	job->rcpt_limit += transport->rcpt_unused;
	message->rcpt_limit += transport->rcpt_unused;
	transport->rcpt_unused = 0;
    }
}