Пример #1
0
static void qmgr_message_assign(QMGR_MESSAGE *message)
{
    RECIPIENT_LIST list = message->rcpt_list;
    RECIPIENT *recipient;
    QMGR_ENTRY *entry = 0;
    QMGR_QUEUE *queue;

    /*
     * Try to bundle as many recipients in a delivery request as we can. When
     * the recipient resolves to the same site and transport as the previous
     * recipient, do not create a new queue entry, just move that recipient
     * to the recipient list of the existing queue entry. All this provided
     * that we do not exceed the transport-specific limit on the number of
     * recipients per transaction. Skip recipients with a dead transport or
     * destination.
     */
#define LIMIT_OK(limit, count) ((limit) == 0 || ((count) < (limit)))

    for (recipient = list.info; recipient < list.info + list.len; recipient++) {
	if ((queue = recipient->u.queue) != 0) {
	    if (message->single_rcpt || entry == 0 || entry->queue != queue
		|| !LIMIT_OK(entry->queue->transport->recipient_limit,
			     entry->rcpt_list.len)) {
		entry = qmgr_entry_create(queue, message);
	    }
	    recipient_list_add(&entry->rcpt_list, recipient->offset,
			       recipient->dsn_orcpt, recipient->dsn_notify,
			       recipient->orig_addr, recipient->address);
	    qmgr_recipient_count++;
	}
    }
    recipient_list_free(&message->rcpt_list);
    recipient_list_init(&message->rcpt_list, RCPT_LIST_INIT_QUEUE);
}
Пример #2
0
static void qmgr_message_assign(QMGR_MESSAGE *message)
{
    RECIPIENT_LIST list = message->rcpt_list;
    RECIPIENT *recipient;
    QMGR_ENTRY *entry = 0;
    QMGR_QUEUE *queue;
    QMGR_JOB *job = 0;
    QMGR_PEER *peer = 0;

    /*
     * Try to bundle as many recipients in a delivery request as we can. When
     * the recipient resolves to the same site and transport as an existing
     * recipient, do not create a new queue entry, just move that recipient
     * to the recipient list of the existing queue entry. All this provided
     * that we do not exceed the transport-specific limit on the number of
     * recipients per transaction.
     */
#define LIMIT_OK(limit, count) ((limit) == 0 || ((count) < (limit)))

    for (recipient = list.info; recipient < list.info + list.len; recipient++) {

	/*
	 * Skip recipients with a dead transport or destination.
	 */
	if ((queue = recipient->u.queue) == 0)
	    continue;

	/*
	 * Lookup or instantiate the message job if necessary.
	 */
	if (job == 0 || queue->transport != job->transport) {
	    job = qmgr_job_obtain(message, queue->transport);
	    peer = 0;
	}

	/*
	 * Lookup or instantiate job peer if necessary.
	 */
	if (peer == 0 || queue != peer->queue)
	    peer = qmgr_peer_obtain(job, queue);

	/*
	 * Lookup old or instantiate new recipient entry. We try to reuse the
	 * last existing entry whenever the recipient limit permits.
	 */
	entry = peer->entry_list.prev;
	if (message->single_rcpt || entry == 0
	    || !LIMIT_OK(queue->transport->recipient_limit, entry->rcpt_list.len))
	    entry = qmgr_entry_create(peer, message);

	/*
	 * Add the recipient to the current entry and increase all those
	 * recipient counters accordingly.
	 */
	recipient_list_add(&entry->rcpt_list, recipient->offset,
			   recipient->dsn_orcpt, recipient->dsn_notify,
			   recipient->orig_addr, recipient->address);
	job->rcpt_count++;
	message->rcpt_count++;
	qmgr_recipient_count++;
    }

    /*
     * Release the message recipient list and reinitialize it for the next
     * time.
     */
    recipient_list_free(&message->rcpt_list);
    recipient_list_init(&message->rcpt_list, RCPT_LIST_INIT_QUEUE);

    /*
     * Note that even if qmgr_job_obtain() reset the job candidate cache of
     * all transports to which we assigned new recipients, this message may
     * have other jobs which we didn't touch at all this time. But the number
     * of unread recipients affecting the candidate selection might have
     * changed considerably, so we must invalidate the caches if it might be
     * of some use.
     */
    for (job = message->job_list.next; job; job = job->message_peers.next)
	if (job->selected_entries < job->read_entries
	    && job->blocker_tag != job->transport->blocker_tag)
	    job->transport->candidate_cache_current = 0;
}