static int flush_send_file_service(const char *queue_id) { const char *myname = "flush_send_file_service"; VSTRING *queue_file; struct utimbuf tbuf; static char qmgr_scan_trigger[] = { QMGR_REQ_SCAN_INCOMING, /* scan incoming queue */ }; /* * Sanity check. */ if (!mail_queue_id_ok(queue_id)) return (FLUSH_STAT_BAD); if (msg_verbose) msg_info("%s: requesting delivery for queue_id %s", myname, queue_id); queue_file = vstring_alloc(30); tbuf.actime = tbuf.modtime = event_time(); if (flush_one_file(queue_id, queue_file, &tbuf, UNTHROTTLE_AFTER) > 0) mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service, qmgr_scan_trigger, sizeof(qmgr_scan_trigger)); vstring_free(queue_file); return (FLUSH_STAT_OK); }
int mail_flush_maildrop(void) { static char wakeup[] = {TRIGGER_REQ_WAKEUP}; /* * Trigger the pickup service. */ return (mail_trigger(MAIL_CLASS_PUBLIC, var_pickup_service, wakeup, sizeof(wakeup))); }
int mail_flush_deferred(void) { static char qmgr_trigger[] = { QMGR_REQ_FLUSH_DEAD, /* all hosts, all transports */ QMGR_REQ_SCAN_ALL, /* all time stamps */ QMGR_REQ_SCAN_DEFERRED, /* scan deferred queue */ QMGR_REQ_SCAN_INCOMING, /* scan incoming queue */ }; /* * Trigger the flush queue service. */ return (mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service, qmgr_trigger, sizeof(qmgr_trigger))); }
static int flush_send_path(const char *path, int how) { const char *myname = "flush_send_path"; VSTRING *queue_id; VSTRING *queue_file; VSTREAM *log; struct utimbuf tbuf; static char qmgr_flush_trigger[] = { QMGR_REQ_FLUSH_DEAD, /* flush dead site/transport cache */ }; static char qmgr_scan_trigger[] = { QMGR_REQ_SCAN_INCOMING, /* scan incoming queue */ }; HTABLE *dup_filter; int count; /* * Sanity check. */ if (!mail_queue_id_ok(path)) return (FLUSH_STAT_BAD); /* * Open the logfile. If the file does not exist, then there is no queued * mail for this destination. */ if ((log = mail_queue_open(MAIL_QUEUE_FLUSH, path, O_RDWR, 0600)) == 0) { if (errno != ENOENT) msg_fatal("%s: open fast flush logfile %s: %m", myname, path); return (FLUSH_STAT_OK); } /* * We must lock the logfile, so that we don't lose information when it is * truncated. Unfortunately, this means that the file can be locked for a * significant amount of time. If things really get stuck the Postfix * watchdog will take care of it. */ if (myflock(vstream_fileno(log), INTERNAL_LOCK, MYFLOCK_OP_EXCLUSIVE) < 0) msg_fatal("%s: lock fast flush logfile %s: %m", myname, path); /* * With the UNTHROTTLE_BEFORE strategy, we ask the queue manager to * unthrottle all transports and queues before we move a deferred queue * file to the incoming queue. This minimizes a race condition where the * queue manager seizes a queue file before it knows that we want to * flush that message. * * This reduces the race condition time window to a very small amount (the * flush server does not really know when the queue manager reads its * command fifo). But there is a worse race, where the queue manager * moves a deferred queue file to the active queue before we have a * chance to expedite its delivery. */ if (how & UNTHROTTLE_BEFORE) mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service, qmgr_flush_trigger, sizeof(qmgr_flush_trigger)); /* * This is the part that dominates running time: schedule the listed * queue files for delivery by updating their file time stamps and by * moving them from the deferred queue to the incoming queue. This should * take no more than a couple seconds under normal conditions. Filter out * duplicate queue file names to avoid hammering the file system, with * some finite limit on the amount of memory that we are willing to * sacrifice for duplicate filtering. Graceful degradation. * * By moving selected queue files from the deferred queue to the incoming * queue we optimize for the case where most deferred mail is for other * sites. If that assumption does not hold, i.e. all deferred mail is for * the same site, then doing a "fast flush" will cost more disk I/O than * a "slow flush" that delivers the entire deferred queue. This penalty * is only temporary - it will go away after we unite the active queue * and the incoming queue. */ queue_id = vstring_alloc(10); queue_file = vstring_alloc(10); dup_filter = htable_create(10); tbuf.actime = tbuf.modtime = event_time(); for (count = 0; vstring_get_nonl(queue_id, log) != VSTREAM_EOF; count++) { if (!mail_queue_id_ok(STR(queue_id))) { msg_warn("bad queue id \"%.30s...\" in fast flush logfile %s", STR(queue_id), path); continue; } if (dup_filter->used >= FLUSH_DUP_FILTER_SIZE || htable_find(dup_filter, STR(queue_id)) == 0) { if (msg_verbose) msg_info("%s: logfile %s: update queue file %s time stamps", myname, path, STR(queue_id)); if (dup_filter->used <= FLUSH_DUP_FILTER_SIZE) htable_enter(dup_filter, STR(queue_id), 0); count += flush_one_file(STR(queue_id), queue_file, &tbuf, how); } else { if (msg_verbose) msg_info("%s: logfile %s: skip queue file %s as duplicate", myname, path, STR(queue_file)); } } htable_free(dup_filter, (void (*) (void *)) 0); vstring_free(queue_file); vstring_free(queue_id); /* * Truncate the fast flush log. */ if (count > 0 && ftruncate(vstream_fileno(log), (off_t) 0) < 0) msg_fatal("%s: truncate fast flush logfile %s: %m", myname, path); /* * Workaround for noatime mounts. Use futimes() if available. */ (void) utimes(VSTREAM_PATH(log), (struct timeval *) 0); /* * Request delivery and clean up. */ if (myflock(vstream_fileno(log), INTERNAL_LOCK, MYFLOCK_OP_NONE) < 0) msg_fatal("%s: unlock fast flush logfile %s: %m", myname, path); if (vstream_fclose(log) != 0) msg_warn("%s: read fast flush logfile %s: %m", myname, path); if (count > 0) { if (msg_verbose) msg_info("%s: requesting delivery for logfile %s", myname, path); mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service, qmgr_scan_trigger, sizeof(qmgr_scan_trigger)); } return (FLUSH_STAT_OK); }