예제 #1
0
int     pass_trigger(const char *service, const char *buf, ssize_t len, int timeout)
{
    const char *myname = "pass_trigger";
    int     pass_fd[2];
    struct pass_trigger *pp;
    int     connect_fd;

    if (msg_verbose > 1)
	msg_info("%s: service %s", myname, service);

    /*
     * Connect...
     */
    if ((connect_fd = LOCAL_CONNECT(service, BLOCKING, timeout)) < 0) {
	if (msg_verbose)
	    msg_warn("%s: connect to %s: %m", myname, service);
	return (-1);
    }
    close_on_exec(connect_fd, CLOSE_ON_EXEC);

    /*
     * Create a pipe, and send one pipe end to the server.
     */
    if (pipe(pass_fd) < 0)
	msg_fatal("%s: pipe: %m", myname);
    close_on_exec(pass_fd[0], CLOSE_ON_EXEC);
    close_on_exec(pass_fd[1], CLOSE_ON_EXEC);
    if (LOCAL_SEND_FD(connect_fd, pass_fd[0]) < 0)
	msg_fatal("%s: send file descriptor: %m", myname);

    /*
     * Stash away context.
     */
    pp = (struct pass_trigger *) mymalloc(sizeof(*pp));
    pp->connect_fd = connect_fd;
    pp->service = mystrdup(service);
    pp->pass_fd[0] = pass_fd[0];
    pp->pass_fd[1] = pass_fd[1];

    /*
     * Write the request...
     */
    if (write_buf(pass_fd[1], buf, len, timeout) < 0
	|| write_buf(pass_fd[1], "", 1, timeout) < 0)
	if (msg_verbose)
	    msg_warn("%s: write to %s: %m", myname, service);

    /*
     * Wakeup when the peer disconnects, or when we lose patience.
     */
    if (timeout > 0)
	event_request_timer(pass_trigger_event, (char *) pp, timeout + 100);
    event_enable_read(connect_fd, pass_trigger_event, (char *) pp);
    return (0);
}
예제 #2
0
static void trigger_server_accept_pass(int unused_event, char *context)
{
    const char *myname = "trigger_server_accept_pass";
    int     listen_fd = CAST_CHAR_PTR_TO_INT(context);
    int     time_left = 0;
    int     fd;

    if (msg_verbose)
	msg_info("%s: trigger arrived", myname);

    /*
     * Read a message from a socket. Be prepared for accept() to fail because
     * some other process already got the connection. The socket is
     * non-blocking so we won't get stuck when multiple processes wake up.
     * Don't get stuck when the client connects but sends no data. Restart
     * the idle timer if this was a false alarm.
     */
    if (var_idle_limit > 0)
	time_left = event_cancel_timer(trigger_server_timeout, (char *) 0);

    if (trigger_server_pre_accept)
	trigger_server_pre_accept(trigger_server_name, trigger_server_argv);
    fd = pass_accept(listen_fd);
    if (trigger_server_lock != 0
	&& myflock(vstream_fileno(trigger_server_lock), INTERNAL_LOCK,
		   MYFLOCK_OP_NONE) < 0)
	msg_fatal("select unlock: %m");
    if (fd < 0) {
	if (errno != EAGAIN)
	    msg_error("accept connection: %m");
	if (time_left >= 0)
	    event_request_timer(trigger_server_timeout, (char *) 0, time_left);
	return;
    }
    close_on_exec(fd, CLOSE_ON_EXEC);
    if (read_wait(fd, 10) == 0)
	trigger_server_wakeup(fd);
    else if (time_left >= 0)
	event_request_timer(trigger_server_timeout, (char *) 0, time_left);
    close(fd);
}
예제 #3
0
void    event_server_disconnect(VSTREAM *stream)
{
    if (msg_verbose)
	msg_info("connection closed fd %d", vstream_fileno(stream));
    if (event_server_pre_disconn)
	event_server_pre_disconn(stream, event_server_name, event_server_argv);
    (void) vstream_fclose(stream);
    client_count--;
    /* Avoid integer wrap-around in a persistent process.  */
    if (use_count < INT_MAX)
	use_count++;
    if (client_count == 0 && var_idle_limit > 0)
	event_request_timer(event_server_timeout, (char *) 0, var_idle_limit);
}
예제 #4
0
static void master_throttle(MASTER_SERV *serv)
{

    /*
     * Perhaps the command to be run is defective, perhaps some configuration
     * is wrong, or perhaps the system is out of resources. Disable further
     * process creation attempts for a while.
     */
    if ((serv->flags & MASTER_FLAG_THROTTLE) == 0) {
	serv->flags |= MASTER_FLAG_THROTTLE;
	event_request_timer(master_unthrottle_wrapper, (void *) serv,
			    serv->throttle_delay);
	if (msg_verbose)
	    msg_info("throttling command %s", serv->path);
	master_avail_listen(serv);
    }
}
예제 #5
0
void    qmgr_queue_suspend(QMGR_QUEUE *queue, int delay)
{
    const char *myname = "qmgr_queue_suspend";

    /*
     * Sanity checks.
     */
    if (!QMGR_QUEUE_READY(queue))
	msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue));
    if (queue->busy_refcount > 0)
	msg_panic("%s: queue is busy", myname);

    /*
     * Set the queue status to "suspended". No-one is supposed to remove a
     * queue in suspended state.
     */
    queue->window = QMGR_QUEUE_STAT_SUSPENDED;
    event_request_timer(qmgr_queue_resume, (char *) queue, delay);
}
예제 #6
0
파일: clnt_stream.c 프로젝트: ii0/postfix
VSTREAM *clnt_stream_access(CLNT_STREAM *clnt_stream)
{

    /*
     * Open a stream or restart the idle timer.
     * 
     * Important! Do not restart the TTL timer!
     */
    if (clnt_stream->vstream == 0) {
	clnt_stream_open(clnt_stream);
    } else if (readable(vstream_fileno(clnt_stream->vstream))) {
	clnt_stream_close(clnt_stream);
	clnt_stream_open(clnt_stream);
    } else {
	event_request_timer(clnt_stream_event, (void *) clnt_stream,
			    clnt_stream->timeout);
    }
    return (clnt_stream->vstream);
}
예제 #7
0
static void single_server_wakeup(int fd, HTABLE *attr)
{
    VSTREAM *stream;
    char   *tmp;

    /*
     * If the accept() succeeds, be sure to disable non-blocking I/O, because
     * the application is supposed to be single-threaded. Notice the master
     * of our (un)availability to service connection requests. Commit suicide
     * when the master process disconnected from us. Don't drop the already
     * accepted client request after "postfix reload"; that would be rude.
     */
    if (msg_verbose)
	msg_info("connection established");
    non_blocking(fd, BLOCKING);
    close_on_exec(fd, CLOSE_ON_EXEC);
    stream = vstream_fdopen(fd, O_RDWR);
    tmp = concatenate(single_server_name, " socket", (char *) 0);
    vstream_control(stream,
		    CA_VSTREAM_CTL_PATH(tmp),
		    CA_VSTREAM_CTL_CONTEXT((void *) attr),
		    CA_VSTREAM_CTL_END);
    myfree(tmp);
    timed_ipc_setup(stream);
    if (master_notify(var_pid, single_server_generation, MASTER_STAT_TAKEN) < 0)
	 /* void */ ;
    if (single_server_in_flow_delay && mail_flow_get(1) < 0)
	doze(var_in_flow_delay * 1000000);
    single_server_service(stream, single_server_name, single_server_argv);
    (void) vstream_fclose(stream);
    if (master_notify(var_pid, single_server_generation, MASTER_STAT_AVAIL) < 0)
	single_server_abort(EVENT_NULL_TYPE, EVENT_NULL_CONTEXT);
    if (msg_verbose)
	msg_info("connection closed");
    /* Avoid integer wrap-around in a persistent process.  */
    if (use_count < INT_MAX)
	use_count++;
    if (var_idle_limit > 0)
	event_request_timer(single_server_timeout, (void *) 0, var_idle_limit);
    if (attr)
	htable_free(attr, myfree);
}
예제 #8
0
파일: multi_server.c 프로젝트: LMDB/postfix
static void multi_server_wakeup(int fd, HTABLE *attr)
{
    VSTREAM *stream;
    char   *tmp;

#if defined(F_DUPFD) && (EVENTS_STYLE != EVENTS_STYLE_SELECT)
#ifndef THRESHOLD_FD_WORKAROUND
#define THRESHOLD_FD_WORKAROUND 128
#endif
    int     new_fd;

    /*
     * Leave some handles < FD_SETSIZE for DBMS libraries, in the unlikely
     * case of a multi-server with a thousand clients.
     */
    if (fd < THRESHOLD_FD_WORKAROUND) {
	if ((new_fd = fcntl(fd, F_DUPFD, THRESHOLD_FD_WORKAROUND)) < 0)
	    msg_fatal("fcntl F_DUPFD: %m");
	(void) close(fd);
	fd = new_fd;
    }
#endif
    if (msg_verbose)
	msg_info("connection established fd %d", fd);
    non_blocking(fd, BLOCKING);
    close_on_exec(fd, CLOSE_ON_EXEC);
    client_count++;
    stream = vstream_fdopen(fd, O_RDWR);
    tmp = concatenate(multi_server_name, " socket", (char *) 0);
    vstream_control(stream,
                    VSTREAM_CTL_PATH, tmp,
                    VSTREAM_CTL_CONTEXT, (char *) attr,
                    VSTREAM_CTL_END);
    myfree(tmp);
    timed_ipc_setup(stream);
    multi_server_saved_flags = vstream_flags(stream);
    if (multi_server_in_flow_delay && mail_flow_get(1) < 0)
	event_request_timer(multi_server_enable_read, (char *) stream,
			    var_in_flow_delay);
    else
	multi_server_enable_read(0, (char *) stream);
}
예제 #9
0
static void scache_single_save_endp(SCACHE *scache, int endp_ttl,
				            const char *endp_label,
				            const char *endp_prop, int fd)
{
    SCACHE_SINGLE *sp = (SCACHE_SINGLE *) scache;
    const char *myname = "scache_single_save_endp";

    if (endp_ttl <= 0)
	msg_panic("%s: bad endp_ttl: %d", myname, endp_ttl);

    if (SCACHE_SINGLE_ENDP_BUSY(sp))
	scache_single_free_endp(sp);		/* dump the cached fd */

    vstring_strcpy(sp->endp.endp_label, endp_label);
    vstring_strcpy(sp->endp.endp_prop, endp_prop);
    sp->endp.fd = fd;
    event_request_timer(scache_single_expire_endp, (char *) sp, endp_ttl);

    if (msg_verbose)
	msg_info("%s: %s fd=%d", myname, endp_label, fd);
}
예제 #10
0
static void trigger_server_wakeup(int fd)
{
    char    buf[TRIGGER_BUF_SIZE];
    int     len;

    /*
     * Commit suicide when the master process disconnected from us.
     */
    if (master_notify(var_pid, MASTER_STAT_TAKEN) < 0)
	trigger_server_abort(EVENT_NULL_TYPE, EVENT_NULL_CONTEXT);
    if (trigger_server_in_flow_delay && mail_flow_get(1) < 0)
	doze(var_in_flow_delay * 1000000);
    if ((len = read(fd, buf, sizeof(buf))) >= 0)
	trigger_server_service(buf, len, trigger_server_name,
			       trigger_server_argv);
    if (master_notify(var_pid, MASTER_STAT_AVAIL) < 0)
	trigger_server_abort(EVENT_NULL_TYPE, EVENT_NULL_CONTEXT);
    if (var_idle_limit > 0)
	event_request_timer(trigger_server_timeout, (char *) 0, var_idle_limit);
    use_count++;
}
예제 #11
0
void    psc_early_tests(PSC_STATE *state)
{
    const char *myname = "psc_early_tests";

    /*
     * Report errors and progress in the context of this test.
     */
    PSC_BEGIN_TESTS(state, "tests before SMTP handshake");

    /*
     * Run a PREGREET test. Send half the greeting banner, by way of teaser,
     * then wait briefly to see if the client speaks before its turn.
     */
    if ((state->flags & PSC_STATE_FLAG_PREGR_TODO) != 0
	&& psc_teaser_greeting != 0
	&& PSC_SEND_REPLY(state, psc_teaser_greeting) != 0) {
	psc_hangup_event(state);
	return;
    }

    /*
     * Run a DNS blocklist query.
     */
    if ((state->flags & PSC_STATE_FLAG_DNSBL_TODO) != 0)
	state->dnsbl_index =
	    psc_dnsbl_request(state->smtp_client_addr, psc_early_dnsbl_event,
			      (char *) state);
    else
	state->dnsbl_index = -1;
    state->dnsbl_score = NO_DNSBL_SCORE;

    /*
     * Wait for the client to respond or for DNS lookup to complete.
     */
    if ((state->flags & PSC_STATE_FLAG_PREGR_TODO) != 0)
	PSC_READ_EVENT_REQUEST(vstream_fileno(state->smtp_client_stream),
		       psc_early_event, (char *) state, PSC_EFF_GREET_WAIT);
    else
	event_request_timer(psc_early_event, (char *) state, PSC_EFF_GREET_WAIT);
}
예제 #12
0
static MYSQL_RES *plmysql_query(DICT_MYSQL *dict_mysql,
				        const char *name,
				        VSTRING *query)
{
    HOST   *host;
    MYSQL_RES *res = 0;

    while ((host = dict_mysql_get_active(dict_mysql)) != NULL) {
#if defined(MYSQL_VERSION_ID) && MYSQL_VERSION_ID >= 40000

	/*
	 * The active host is used to escape strings in the context of the
	 * active connection's character encoding.
	 */
	dict_mysql->active_host = host;
	VSTRING_RESET(query);
	VSTRING_TERMINATE(query);
	db_common_expand(dict_mysql->ctx, dict_mysql->query,
			 name, 0, query, dict_mysql_quote);
	dict_mysql->active_host = 0;
#endif

	if (!(mysql_query(host->db, vstring_str(query)))) {
	    if ((res = mysql_store_result(host->db)) == 0) {
		msg_warn("mysql query failed: %s", mysql_error(host->db));
		plmysql_down_host(host);
	    } else {
		if (msg_verbose)
		    msg_info("dict_mysql: successful query from host %s", host->hostname);
		event_request_timer(dict_mysql_event, (char *) host, IDLE_CONN_INTV);
		break;
	    }
	} else {
	    msg_warn("mysql query failed: %s", mysql_error(host->db));
	    plmysql_down_host(host);
	}
    }

    return res;
}
예제 #13
0
static void qmgr_transport_event(int unused_event, void *context)
{
    QMGR_TRANSPORT_ALLOC *alloc = (QMGR_TRANSPORT_ALLOC *) context;

    /*
     * This routine notifies the application when the request given to
     * qmgr_transport_alloc() completes.
     */
    if (msg_verbose)
	msg_info("transport_event: %s", alloc->transport->name);

    /*
     * Connection request completed. Stop the watchdog timer.
     */
    event_cancel_timer(qmgr_transport_abort, context);

    /*
     * Disable further read events that end up calling this function, and
     * free up this pending connection pipeline slot.
     */
    if (alloc->stream) {
	event_disable_readwrite(vstream_fileno(alloc->stream));
	non_blocking(vstream_fileno(alloc->stream), BLOCKING);
    }
    alloc->transport->pending -= 1;

    /*
     * Notify the requestor.
     */
    if (alloc->transport->xport_rate_delay > 0) {
	if ((alloc->transport->flags & QMGR_TRANSPORT_STAT_RATE_LOCK) == 0)
	    msg_panic("transport_event: missing rate lock for transport %s",
		      alloc->transport->name);
	event_request_timer(qmgr_transport_rate_event, (void *) alloc,
			    alloc->transport->xport_rate_delay);
    } else {
	alloc->notify(alloc->transport, alloc->stream);
	myfree((void *) alloc);
    }
}
예제 #14
0
void    qmgr_transport_throttle(QMGR_TRANSPORT *transport, DSN *dsn)
{
    const char *myname = "qmgr_transport_throttle";

    /*
     * We are unable to connect to a deliver process for this type of message
     * transport. Instead of hosing the system by retrying in a tight loop,
     * back off and disable this transport type for a while.
     */
    if ((transport->flags & QMGR_TRANSPORT_STAT_DEAD) == 0) {
	if (msg_verbose)
	    msg_info("%s: transport %s: status: %s reason: %s",
		     myname, transport->name, dsn->status, dsn->reason);
	transport->flags |= QMGR_TRANSPORT_STAT_DEAD;
	if (transport->dsn)
	    msg_panic("%s: transport %s: spurious reason: %s",
		      myname, transport->name, transport->dsn->reason);
	transport->dsn = DSN_COPY(dsn);
	event_request_timer(qmgr_transport_unthrottle_wrapper,
			    (void *) transport, var_transport_retry_time);
    }
}
예제 #15
0
void    nbbio_enable_write(NBBIO *np, int timeout)
{
    const char *myname = "nbbio_enable_write";

    /*
     * Sanity checks.
     */
    if (np->flags & NBBIO_MASK_ACTIVE)
	msg_panic("%s: socket fd=%d is enabled for %s",
		  myname, np->fd, NBBIO_OP_NAME(np));
    if (timeout <= 0)
	msg_panic("%s: socket fd=%d bad timeout %d",
		  myname, np->fd, timeout);
    if (np->write_pend <= 0)
	msg_panic("%s: socket fd=%d: empty write buffer",
		  myname, np->fd);

    /*
     * Enable events.
     */
    event_enable_write(np->fd, nbbio_event, (char *) np);
    event_request_timer(nbbio_event, (char *) np, timeout);
    np->flags |= NBBIO_FLAG_WRITE;
}
예제 #16
0
void    nbbio_enable_read(NBBIO *np, int timeout)
{
    const char *myname = "nbbio_enable_read";

    /*
     * Sanity checks.
     */
    if (np->flags & NBBIO_MASK_ACTIVE)
	msg_panic("%s: socket fd=%d is enabled for %s",
		  myname, np->fd, NBBIO_OP_NAME(np));
    if (timeout <= 0)
	msg_panic("%s: socket fd=%d: bad timeout %d",
		  myname, np->fd, timeout);
    if (np->read_pend >= np->bufsize)
	msg_panic("%s: socket fd=%d: read buffer is full",
		  myname, np->fd);

    /*
     * Enable events.
     */
    event_enable_read(np->fd, nbbio_event, (char *) np);
    event_request_timer(nbbio_event, (char *) np, timeout);
    np->flags |= NBBIO_FLAG_READ;
}
예제 #17
0
파일: tlsmgr.c 프로젝트: aosm/postfix
static void tlsmgr_cache_run_event(int unused_event, char *ctx)
{
    const char *myname = "tlsmgr_cache_run_event";
    TLSMGR_SCACHE *cache = (TLSMGR_SCACHE *) ctx;

    /*
     * This routine runs when it is time for another TLS session cache scan.
     * Make sure this routine gets called again in the future.
     * 
     * Don't start a new scan when the timer goes off while cache cleanup is
     * still in progress.
     */
    if (cache->cache_info->verbose)
	msg_info("%s: start TLS %s session cache cleanup",
		 myname, cache->cache_label);

    if (cache->cache_active == 0)
	cache->cache_active =
	    tls_scache_sequence(cache->cache_info, DICT_SEQ_FUN_FIRST,
				TLS_SCACHE_SEQUENCE_NOTHING);

    event_request_timer(tlsmgr_cache_run_event, (char *) cache,
			cache->cache_info->timeout);
}
예제 #18
0
static void post_jail_init(char *unused_name, char **unused_argv)
{

    /*
     * Dump and reset extreme usage every so often.
     */
    event_request_timer(anvil_status_update, (char *) 0, var_anvil_stat_time);

    /*
     * Initial client state tables.
     */
    anvil_remote_map = htable_create(1000);

    /*
     * Do not limit the number of client requests.
     */
    var_use_limit = 0;

    /*
     * Don't exit before the sampling interval ends.
     */
    if (var_idle_limit < var_anvil_time_unit)
	var_idle_limit = var_anvil_time_unit;
}
예제 #19
0
void    dict_cache_control(DICT_CACHE *cp,...)
{
    const char *myname = "dict_cache_control";
    const char *last_done;
    time_t  next_interval;
    int     cache_cleanup_is_active = (cp->exp_validator && cp->exp_interval);
    va_list ap;
    int     name;

    /*
     * Update the control settings.
     */
    va_start(ap, cp);
    while ((name = va_arg(ap, int)) > 0) {
	switch (name) {
	case DICT_CACHE_CTL_END:
	    break;
	case DICT_CACHE_CTL_FLAGS:
	    cp->user_flags = va_arg(ap, int);
	    cp->log_delay = (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) ?
		0 : DC_DEF_LOG_DELAY;
	    break;
	case DICT_CACHE_CTL_INTERVAL:
	    cp->exp_interval = va_arg(ap, int);
	    if (cp->exp_interval < 0)
		msg_panic("%s: bad %s cache cleanup interval %d",
			  myname, cp->name, cp->exp_interval);
	    break;
	case DICT_CACHE_CTL_VALIDATOR:
	    cp->exp_validator = va_arg(ap, DICT_CACHE_VALIDATOR_FN);
	    break;
	case DICT_CACHE_CTL_CONTEXT:
	    cp->exp_context = va_arg(ap, char *);
	    break;
	default:
	    msg_panic("%s: bad command: %d", myname, name);
	}
    }
    va_end(ap);

    /*
     * Schedule the cache cleanup thread.
     */
    if (cp->exp_interval && cp->exp_validator) {

	/*
	 * Sanity checks.
	 */
	if (cache_cleanup_is_active)
	    msg_panic("%s: %s cache cleanup is already scheduled",
		      myname, cp->name);

	/*
	 * The next start time depends on the last completion time.
	 */
#define NEXT_START(last, delta) ((delta) + (unsigned long) atol(last))
#define NOW	(time((time_t *) 0))		/* NOT: event_time() */

	if ((last_done = dict_get(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED)) == 0
	    || (next_interval = (NEXT_START(last_done, cp->exp_interval) - NOW)) < 0)
	    next_interval = 0;
	if (next_interval > cp->exp_interval)
	    next_interval = cp->exp_interval;
	if ((cp->user_flags & DICT_CACHE_FLAG_VERBOSE) && next_interval > 0)
	    msg_info("%s cache cleanup will start after %ds",
		     cp->name, (int) next_interval);
	event_request_timer(dict_cache_clean_event, (char *) cp,
			    (int) next_interval);
    }

    /*
     * Cancel the cache cleanup thread.
     */
    else if (cache_cleanup_is_active) {
	if (cp->retained || cp->dropped)
	    dict_cache_clean_stat_log_reset(cp, "partial");
	dict_cache_delete_behind_reset(cp);
	event_cancel_timer(dict_cache_clean_event, (char *) cp);
    }
}
예제 #20
0
static void dict_cache_clean_event(int unused_event, char *cache_context)
{
    const char *myname = "dict_cache_clean_event";
    DICT_CACHE *cp = (DICT_CACHE *) cache_context;
    const char *cache_key;
    const char *cache_val;
    int     next_interval;
    VSTRING *stamp_buf;
    int     first_next;

    /*
     * We interleave cache cleanup with other processing, so that the
     * application's service remains available, with perhaps increased
     * latency.
     */

    /*
     * Start a new cache cleanup run.
     */
    if (cp->saved_curr_key == 0) {
	cp->retained = cp->dropped = 0;
	first_next = DICT_SEQ_FUN_FIRST;
	if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE)
	    msg_info("%s: start %s cache cleanup", myname, cp->name);
    }

    /*
     * Continue a cache cleanup run in progress.
     */
    else {
	first_next = DICT_SEQ_FUN_NEXT;
    }

    /*
     * Examine one cache entry.
     */
    if (dict_cache_sequence(cp, first_next, &cache_key, &cache_val) == 0) {
	if (cp->exp_validator(cache_key, cache_val, cp->exp_context) == 0) {
	    DC_SCHEDULE_FOR_DELETE_BEHIND(cp);
	    cp->dropped++;
	    if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE)
		msg_info("%s: drop %s cache entry for %s",
			 myname, cp->name, cache_key);
	} else {
	    cp->retained++;
	    if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE)
		msg_info("%s: keep %s cache entry for %s",
			 myname, cp->name, cache_key);
	}
	next_interval = 0;
    }

    /*
     * Cache cleanup completed. Report vital statistics.
     */
    else if (cp->error != 0) {
	msg_warn("%s: cache cleanup scan terminated due to error", cp->name);
	dict_cache_clean_stat_log_reset(cp, "partial");
	next_interval = cp->exp_interval;
    } else {
	if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE)
	    msg_info("%s: done %s cache cleanup scan", myname, cp->name);
	dict_cache_clean_stat_log_reset(cp, "full");
	stamp_buf = vstring_alloc(100);
	vstring_sprintf(stamp_buf, "%ld", (long) event_time());
	dict_put(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED,
		 vstring_str(stamp_buf));
	vstring_free(stamp_buf);
	next_interval = cp->exp_interval;
    }
    event_request_timer(dict_cache_clean_event, cache_context, next_interval);
}
예제 #21
0
void    nbbio_slumber(NBBIO *np, int timeout)
{
    np->flags &= ~NBBIO_MASK_ACTIVE;
    event_disable_readwrite(np->fd);
    event_request_timer(nbbio_event, (char *) np, timeout);
}
예제 #22
0
int     main(int argc, char **argv)
{
    static VSTREAM *lock_fp;
    static VSTREAM *data_lock_fp;
    VSTRING *lock_path;
    VSTRING *data_lock_path;
    off_t   inherited_limit;
    int     debug_me = 0;
    int     ch;
    int     fd;
    int     n;
    int     test_lock = 0;
    VSTRING *why;
    WATCHDOG *watchdog;
    ARGV   *import_env;

    /*
     * Fingerprint executables and core dumps.
     */
    MAIL_VERSION_STAMP_ALLOCATE;

    /*
     * Initialize.
     */
    umask(077);					/* never fails! */

    /*
     * Process environment options as early as we can.
     */
    if (getenv(CONF_ENV_VERB))
	msg_verbose = 1;
    if (getenv(CONF_ENV_DEBUG))
	debug_me = 1;

    /*
     * Don't die when a process goes away unexpectedly.
     */
    signal(SIGPIPE, SIG_IGN);

    /*
     * Strip and save the process name for diagnostics etc.
     */
    var_procname = mystrdup(basename(argv[0]));

    /*
     * When running a child process, don't leak any open files that were
     * leaked to us by our own (privileged) parent process. Descriptors 0-2
     * are taken care of after we have initialized error logging.
     * 
     * Some systems such as AIX have a huge per-process open file limit. In
     * those cases, limit the search for potential file descriptor leaks to
     * just the first couple hundred.
     * 
     * The Debian post-installation script passes an open file descriptor into
     * the master process and waits forever for someone to close it. Because
     * of this we have to close descriptors > 2, and pray that doing so does
     * not break things.
     */
    closefrom(3);

    /*
     * Initialize logging and exit handler.
     */
    msg_syslog_init(mail_task(var_procname), LOG_PID, LOG_FACILITY);

    /*
     * Check the Postfix library version as soon as we enable logging.
     */
    MAIL_VERSION_CHECK;

    /*
     * The mail system must be run by the superuser so it can revoke
     * privileges for selected operations. That's right - it takes privileges
     * to toss privileges.
     */
    if (getuid() != 0)
	msg_fatal("the master command is reserved for the superuser");
    if (unsafe() != 0)
	msg_fatal("the master command must not run as a set-uid process");

    /*
     * Process JCL.
     */
    while ((ch = GETOPT(argc, argv, "c:Dde:tv")) > 0) {
	switch (ch) {
	case 'c':
	    if (setenv(CONF_ENV_PATH, optarg, 1) < 0)
		msg_fatal("out of memory");
	    break;
	case 'd':
	    master_detach = 0;
	    break;
	case 'e':
	    event_request_timer(master_exit_event, (char *) 0, atoi(optarg));
	    break;
	case 'D':
	    debug_me = 1;
	    break;
	case 't':
	    test_lock = 1;
	    break;
	case 'v':
	    msg_verbose++;
	    break;
	default:
	    usage(argv[0]);
	    /* NOTREACHED */
	}
    }

    /*
     * This program takes no other arguments.
     */
    if (argc > optind)
	usage(argv[0]);

    /*
     * If started from a terminal, get rid of any tty association. This also
     * means that all errors and warnings must go to the syslog daemon.
     */
    if (master_detach)
	for (fd = 0; fd < 3; fd++) {
	    (void) close(fd);
	    if (open("/dev/null", O_RDWR, 0) != fd)
		msg_fatal("open /dev/null: %m");
	}

    /*
     * Run in a separate process group, so that "postfix stop" can terminate
     * all MTA processes cleanly. Give up if we can't separate from our
     * parent process. We're not supposed to blow away the parent.
     */
    if (debug_me == 0 && master_detach != 0 && setsid() == -1 && getsid(0) != getpid())
	msg_fatal("unable to set session and process group ID: %m");

    /*
     * Make some room for plumbing with file descriptors. XXX This breaks
     * when a service listens on many ports. In order to do this right we
     * must change the master-child interface so that descriptors do not need
     * to have fixed numbers.
     * 
     * In a child we need two descriptors for the flow control pipe, one for
     * child->master status updates and at least one for listening.
     */
    for (n = 0; n < 5; n++) {
	if (close_on_exec(dup(0), CLOSE_ON_EXEC) < 0)
	    msg_fatal("dup(0): %m");
    }

    /*
     * Final initializations. Unfortunately, we must read the global Postfix
     * configuration file after doing command-line processing, so that we get
     * consistent results when we SIGHUP the server to reload configuration
     * files.
     */
    master_vars_init();

    /*
     * In case of multi-protocol support. This needs to be done because
     * master does not invoke mail_params_init() (it was written before that
     * code existed).
     */
    (void) inet_proto_init(VAR_INET_PROTOCOLS, var_inet_protocols);

    /*
     * Environment import filter, to enforce consistent behavior whether
     * Postfix is started by hand, or at system boot time.
     */
    import_env = argv_split(var_import_environ, ", \t\r\n");
    clean_env(import_env->argv);
    argv_free(import_env);

    if ((inherited_limit = get_file_limit()) < 0)
	set_file_limit(OFF_T_MAX);

    if (chdir(var_queue_dir))
	msg_fatal("chdir %s: %m", var_queue_dir);

    /*
     * Lock down the master.pid file. In test mode, no file means that it
     * isn't locked.
     */
    lock_path = vstring_alloc(10);
    data_lock_path = vstring_alloc(10);
    why = vstring_alloc(10);

    vstring_sprintf(lock_path, "%s/%s.pid", DEF_PID_DIR, var_procname);
    if (test_lock && access(vstring_str(lock_path), F_OK) < 0)
	exit(0);
    lock_fp = open_lock(vstring_str(lock_path), O_RDWR | O_CREAT, 0644, why);
    if (test_lock)
	exit(lock_fp ? 0 : 1);
    if (lock_fp == 0)
	msg_fatal("open lock file %s: %s",
		  vstring_str(lock_path), vstring_str(why));
    vstream_fprintf(lock_fp, "%*lu\n", (int) sizeof(unsigned long) * 4,
		    (unsigned long) var_pid);
    if (vstream_fflush(lock_fp))
	msg_fatal("cannot update lock file %s: %m", vstring_str(lock_path));
    close_on_exec(vstream_fileno(lock_fp), CLOSE_ON_EXEC);

    /*
     * Lock down the Postfix-writable data directory.
     */
    vstring_sprintf(data_lock_path, "%s/%s.lock", var_data_dir, var_procname);
    set_eugid(var_owner_uid, var_owner_gid);
    data_lock_fp =
	open_lock(vstring_str(data_lock_path), O_RDWR | O_CREAT, 0644, why);
    set_ugid(getuid(), getgid());
    if (data_lock_fp == 0)
	msg_fatal("open lock file %s: %s",
		  vstring_str(data_lock_path), vstring_str(why));
    vstream_fprintf(data_lock_fp, "%*lu\n", (int) sizeof(unsigned long) * 4,
		    (unsigned long) var_pid);
    if (vstream_fflush(data_lock_fp))
	msg_fatal("cannot update lock file %s: %m", vstring_str(data_lock_path));
    close_on_exec(vstream_fileno(data_lock_fp), CLOSE_ON_EXEC);

    /*
     * Clean up.
     */
    vstring_free(why);
    vstring_free(lock_path);
    vstring_free(data_lock_path);

    /*
     * Optionally start the debugger on ourself.
     */
    if (debug_me)
	debug_process();

    /*
     * Finish initialization, last part. We must process configuration files
     * after processing command-line parameters, so that we get consistent
     * results when we SIGHUP the server to reload configuration files.
     */
    master_config();
    master_sigsetup();
    master_flow_init();
    msg_info("daemon started -- version %s, configuration %s",
	     var_mail_version, var_config_dir);

    /*
     * Process events. The event handler will execute the read/write/timer
     * action routines. Whenever something has happened, see if we received
     * any signal in the mean time. Although the master process appears to do
     * multiple things at the same time, it really is all a single thread, so
     * that there are no concurrency conflicts within the master process.
     */
#define MASTER_WATCHDOG_TIME	1000

    watchdog = watchdog_create(MASTER_WATCHDOG_TIME, (WATCHDOG_FN) 0, (char *) 0);
    for (;;) {
#ifdef HAS_VOLATILE_LOCKS
	if (myflock(vstream_fileno(lock_fp), INTERNAL_LOCK,
		    MYFLOCK_OP_EXCLUSIVE) < 0)
	    msg_fatal("refresh exclusive lock: %m");
	if (myflock(vstream_fileno(data_lock_fp), INTERNAL_LOCK,
		    MYFLOCK_OP_EXCLUSIVE) < 0)
	    msg_fatal("refresh exclusive lock: %m");
#endif
	watchdog_start(watchdog);		/* same as trigger servers */
	event_loop(MASTER_WATCHDOG_TIME / 2);
	if (master_gotsighup) {
	    msg_info("reload -- version %s, configuration %s",
		     var_mail_version, var_config_dir);
	    master_gotsighup = 0;		/* this first */
	    master_vars_init();			/* then this */
	    master_refresh();			/* then this */
	}
	if (master_gotsigchld) {
	    if (msg_verbose)
		msg_info("got sigchld");
	    master_gotsigchld = 0;		/* this first */
	    master_reap_child();		/* then this */
	}
    }
}
예제 #23
0
int     main(int argc, char **argv)
{
    int     sock;
    int     backlog;
    int     ch;
    int     ttl;
    const char *protocols = INET_PROTO_NAME_ALL;

    /*
     * Fingerprint executables and core dumps.
     */
    MAIL_VERSION_STAMP_ALLOCATE;

    /*
     * Fix 20051207.
     */
    signal(SIGPIPE, SIG_IGN);

    /*
     * Initialize diagnostics.
     */
    msg_vstream_init(argv[0], VSTREAM_ERR);

    /*
     * Parse JCL.
     */
    while ((ch = GETOPT(argc, argv, "46cvx:")) > 0) {
	switch (ch) {
	case '4':
	    protocols = INET_PROTO_NAME_IPV4;
	    break;
	case '6':
	    protocols = INET_PROTO_NAME_IPV6;
	    break;
	case 'c':
	    count_deliveries++;
	    break;
	case 'v':
	    msg_verbose++;
	    break;
	case 'x':
	    if ((ttl = atoi(optarg)) <= 0)
		usage(argv[0]);
	    event_request_timer(terminate, (void *) 0, ttl);
	    break;
	default:
	    usage(argv[0]);
	}
    }
    if (argc - optind != 2)
	usage(argv[0]);
    if ((backlog = atoi(argv[optind + 1])) <= 0)
	usage(argv[0]);

    /*
     * Initialize.
     */
    (void) inet_proto_init("protocols", protocols);
    buffer = vstring_alloc(1024);
    if (strncmp(argv[optind], "unix:", 5) == 0) {
	sock = unix_listen(argv[optind] + 5, backlog, BLOCKING);
    } else {
	if (strncmp(argv[optind], "inet:", 5) == 0)
	    argv[optind] += 5;
	sock = inet_listen(argv[optind], backlog, BLOCKING);
    }

    /*
     * Start the event handler.
     */
    event_enable_read(sock, connect_event, CAST_INT_TO_VOID_PTR(sock));
    for (;;)
	event_loop(-1);
}
예제 #24
0
static int tlsp_eval_tls_error(TLSP_STATE *state, int err)
{
    int     ciphertext_fd = state->ciphertext_fd;

    /*
     * The ciphertext file descriptor is in non-blocking mode, meaning that
     * each SSL_accept/connect/read/write/shutdown request may return an
     * "error" indication that it needs to read or write more ciphertext. The
     * purpose of this routine is to translate those "error" indications into
     * the appropriate read/write/timeout event requests.
     */
    switch (err) {

    /*
     * No error from SSL_read and SSL_write means that the plaintext
     * output buffer is full and that the plaintext input buffer is
     * empty. Stop read/write events on the ciphertext stream. Keep the
     * timer alive as a safety mechanism for the case that the plaintext
     * pseudothreads get stuck.
     */
    case SSL_ERROR_NONE:
        if (state->ssl_last_err != SSL_ERROR_NONE) {
            event_disable_readwrite(ciphertext_fd);
            event_request_timer(tlsp_ciphertext_event, (void *) state,
                                state->timeout);
            state->ssl_last_err = SSL_ERROR_NONE;
        }
        return (0);

    /*
     * The TLS engine wants to write to the network. Turn on
     * write/timeout events on the ciphertext stream.
     */
    case SSL_ERROR_WANT_WRITE:
        if (state->ssl_last_err == SSL_ERROR_WANT_READ)
            event_disable_readwrite(ciphertext_fd);
        if (state->ssl_last_err != SSL_ERROR_WANT_WRITE) {
            event_enable_write(ciphertext_fd, tlsp_ciphertext_event,
                               (void *) state);
            state->ssl_last_err = SSL_ERROR_WANT_WRITE;
        }
        event_request_timer(tlsp_ciphertext_event, (void *) state,
                            state->timeout);
        return (0);

    /*
     * The TLS engine wants to read from the network. Turn on
     * read/timeout events on the ciphertext stream.
     */
    case SSL_ERROR_WANT_READ:
        if (state->ssl_last_err == SSL_ERROR_WANT_WRITE)
            event_disable_readwrite(ciphertext_fd);
        if (state->ssl_last_err != SSL_ERROR_WANT_READ) {
            event_enable_read(ciphertext_fd, tlsp_ciphertext_event,
                              (void *) state);
            state->ssl_last_err = SSL_ERROR_WANT_READ;
        }
        event_request_timer(tlsp_ciphertext_event, (void *) state,
                            state->timeout);
        return (0);

    /*
     * Some error. Self-destruct. This automagically cleans up all
     * pending read/write and timeout event requests, making state a
     * dangling pointer.
     */
    case SSL_ERROR_SSL:
        tls_print_errors();
    /* FALLTHROUGH */
    default:
        tlsp_state_free(state);
        return (-1);
    }
}
예제 #25
0
파일: multi_server.c 프로젝트: LMDB/postfix
NORETURN multi_server_main(int argc, char **argv, MULTI_SERVER_FN service,...)
{
    const char *myname = "multi_server_main";
    VSTREAM *stream = 0;
    char   *root_dir = 0;
    char   *user_name = 0;
    int     debug_me = 0;
    int     daemon_mode = 1;
    char   *service_name = basename(argv[0]);
    int     delay;
    int     c;
    int     fd;
    va_list ap;
    MAIL_SERVER_INIT_FN pre_init = 0;
    MAIL_SERVER_INIT_FN post_init = 0;
    MAIL_SERVER_LOOP_FN loop = 0;
    int     key;
    char   *transport = 0;

#if 0
    char   *lock_path;
    VSTRING *why;

#endif
    int     alone = 0;
    int     zerolimit = 0;
    WATCHDOG *watchdog;
    char   *oname_val;
    char   *oname;
    char   *oval;
    const char *err;
    char   *generation;
    int     msg_vstream_needed = 0;
    int     redo_syslog_init = 0;

    /*
     * Process environment options as early as we can.
     */
    if (getenv(CONF_ENV_VERB))
	msg_verbose = 1;
    if (getenv(CONF_ENV_DEBUG))
	debug_me = 1;

    /*
     * Don't die when a process goes away unexpectedly.
     */
    signal(SIGPIPE, SIG_IGN);

    /*
     * Don't die for frivolous reasons.
     */
#ifdef SIGXFSZ
    signal(SIGXFSZ, SIG_IGN);
#endif

    /*
     * May need this every now and then.
     */
    var_procname = mystrdup(basename(argv[0]));
    set_mail_conf_str(VAR_PROCNAME, var_procname);

    /*
     * Initialize logging and exit handler. Do the syslog first, so that its
     * initialization completes before we enter the optional chroot jail.
     */
    msg_syslog_init(mail_task(var_procname), LOG_PID, LOG_FACILITY);
    if (msg_verbose)
	msg_info("daemon started");

    /*
     * Check the Postfix library version as soon as we enable logging.
     */
    MAIL_VERSION_CHECK;

    /*
     * Initialize from the configuration file. Allow command-line options to
     * override compiled-in defaults or configured parameter values.
     */
    mail_conf_suck();

    /*
     * Register dictionaries that use higher-level interfaces and protocols.
     */
    mail_dict_init();
 
    /*
     * After database open error, continue execution with reduced
     * functionality.
     */
    dict_allow_surrogate = 1;

    /*
     * Pick up policy settings from master process. Shut up error messages to
     * stderr, because no-one is going to see them.
     */
    opterr = 0;
    while ((c = GETOPT(argc, argv, "cdDi:lm:n:o:s:St:uvVz")) > 0) {
	switch (c) {
	case 'c':
	    root_dir = "setme";
	    break;
	case 'd':
	    daemon_mode = 0;
	    break;
	case 'D':
	    debug_me = 1;
	    break;
	case 'i':
	    mail_conf_update(VAR_MAX_IDLE, optarg);
	    break;
	case 'l':
	    alone = 1;
	    break;
	case 'm':
	    mail_conf_update(VAR_MAX_USE, optarg);
	    break;
	case 'n':
	    service_name = optarg;
	    break;
	case 'o':
	    oname_val = mystrdup(optarg);
	    if ((err = split_nameval(oname_val, &oname, &oval)) != 0)
		msg_fatal("invalid \"-o %s\" option value: %s", optarg, err);
	    mail_conf_update(oname, oval);
	    if (strcmp(oname, VAR_SYSLOG_NAME) == 0)
		redo_syslog_init = 1;
	    myfree(oname_val);
	    break;
	case 's':
	    if ((socket_count = atoi(optarg)) <= 0)
		msg_fatal("invalid socket_count: %s", optarg);
	    break;
	case 'S':
	    stream = VSTREAM_IN;
	    break;
	case 'u':
	    user_name = "setme";
	    break;
	case 't':
	    transport = optarg;
	    break;
	case 'v':
	    msg_verbose++;
	    break;
	case 'V':
	    if (++msg_vstream_needed == 1)
		msg_vstream_init(mail_task(var_procname), VSTREAM_ERR);
	    break;
	case 'z':
	    zerolimit = 1;
	    break;
	default:
	    msg_fatal("invalid option: %c", c);
	    break;
	}
    }

    /*
     * Initialize generic parameters.
     */
    mail_params_init();
    if (redo_syslog_init)
	msg_syslog_init(mail_task(var_procname), LOG_PID, LOG_FACILITY);

    /*
     * If not connected to stdin, stdin must not be a terminal.
     */
    if (daemon_mode && stream == 0 && isatty(STDIN_FILENO)) {
	msg_vstream_init(var_procname, VSTREAM_ERR);
	msg_fatal("do not run this command by hand");
    }

    /*
     * Application-specific initialization.
     */
    va_start(ap, service);
    while ((key = va_arg(ap, int)) != 0) {
	switch (key) {
	case MAIL_SERVER_INT_TABLE:
	    get_mail_conf_int_table(va_arg(ap, CONFIG_INT_TABLE *));
	    break;
	case MAIL_SERVER_LONG_TABLE:
	    get_mail_conf_long_table(va_arg(ap, CONFIG_LONG_TABLE *));
	    break;
	case MAIL_SERVER_STR_TABLE:
	    get_mail_conf_str_table(va_arg(ap, CONFIG_STR_TABLE *));
	    break;
	case MAIL_SERVER_BOOL_TABLE:
	    get_mail_conf_bool_table(va_arg(ap, CONFIG_BOOL_TABLE *));
	    break;
	case MAIL_SERVER_TIME_TABLE:
	    get_mail_conf_time_table(va_arg(ap, CONFIG_TIME_TABLE *));
	    break;
	case MAIL_SERVER_RAW_TABLE:
	    get_mail_conf_raw_table(va_arg(ap, CONFIG_RAW_TABLE *));
	    break;
	case MAIL_SERVER_NINT_TABLE:
	    get_mail_conf_nint_table(va_arg(ap, CONFIG_NINT_TABLE *));
	    break;
	case MAIL_SERVER_NBOOL_TABLE:
	    get_mail_conf_nbool_table(va_arg(ap, CONFIG_NBOOL_TABLE *));
	    break;
	case MAIL_SERVER_PRE_INIT:
	    pre_init = va_arg(ap, MAIL_SERVER_INIT_FN);
	    break;
	case MAIL_SERVER_POST_INIT:
	    post_init = va_arg(ap, MAIL_SERVER_INIT_FN);
	    break;
	case MAIL_SERVER_LOOP:
	    loop = va_arg(ap, MAIL_SERVER_LOOP_FN);
	    break;
	case MAIL_SERVER_EXIT:
	    multi_server_onexit = va_arg(ap, MAIL_SERVER_EXIT_FN);
	    break;
	case MAIL_SERVER_PRE_ACCEPT:
	    multi_server_pre_accept = va_arg(ap, MAIL_SERVER_ACCEPT_FN);
	    break;
	case MAIL_SERVER_PRE_DISCONN:
	    multi_server_pre_disconn = va_arg(ap, MAIL_SERVER_DISCONN_FN);
	    break;
	case MAIL_SERVER_IN_FLOW_DELAY:
	    multi_server_in_flow_delay = 1;
	    break;
	case MAIL_SERVER_SOLITARY:
	    if (stream == 0 && !alone)
		msg_fatal("service %s requires a process limit of 1",
			  service_name);
	    break;
	case MAIL_SERVER_UNLIMITED:
	    if (stream == 0 && !zerolimit)
		msg_fatal("service %s requires a process limit of 0",
			  service_name);
	    break;
	case MAIL_SERVER_PRIVILEGED:
	    if (user_name)
		msg_fatal("service %s requires privileged operation",
			  service_name);
	    break;
	default:
	    msg_panic("%s: unknown argument type: %d", myname, key);
	}
    }
    va_end(ap);

    if (root_dir)
	root_dir = var_queue_dir;
    if (user_name)
	user_name = var_mail_owner;

    /*
     * Can options be required?
     */
    if (stream == 0) {
	if (transport == 0)
	    msg_fatal("no transport type specified");
	if (strcasecmp(transport, MASTER_XPORT_NAME_INET) == 0)
	    multi_server_accept = multi_server_accept_inet;
	else if (strcasecmp(transport, MASTER_XPORT_NAME_UNIX) == 0)
	    multi_server_accept = multi_server_accept_local;
#ifdef MASTER_XPORT_NAME_PASS
	else if (strcasecmp(transport, MASTER_XPORT_NAME_PASS) == 0)
	    multi_server_accept = multi_server_accept_pass;
#endif
	else
	    msg_fatal("unsupported transport type: %s", transport);
    }

    /*
     * Retrieve process generation from environment.
     */
    if ((generation = getenv(MASTER_GEN_NAME)) != 0) {
	if (!alldig(generation))
	    msg_fatal("bad generation: %s", generation);
	OCTAL_TO_UNSIGNED(multi_server_generation, generation);
	if (msg_verbose)
	    msg_info("process generation: %s (%o)",
		     generation, multi_server_generation);
    }

    /*
     * Optionally start the debugger on ourself.
     */
    if (debug_me)
	debug_process();

    /*
     * Traditionally, BSD select() can't handle multiple processes selecting
     * on the same socket, and wakes up every process in select(). See TCP/IP
     * Illustrated volume 2 page 532. We avoid select() collisions with an
     * external lock file.
     */

    /*
     * XXX Can't compete for exclusive access to the listen socket because we
     * also have to monitor existing client connections for service requests.
     */
#if 0
    if (stream == 0 && !alone) {
	lock_path = concatenate(DEF_PID_DIR, "/", transport,
				".", service_name, (char *) 0);
	why = vstring_alloc(1);
	if ((multi_server_lock = safe_open(lock_path, O_CREAT | O_RDWR, 0600,
				      (struct stat *) 0, -1, -1, why)) == 0)
	    msg_fatal("open lock file %s: %s", lock_path, vstring_str(why));
	close_on_exec(vstream_fileno(multi_server_lock), CLOSE_ON_EXEC);
	myfree(lock_path);
	vstring_free(why);
    }
#endif

    /*
     * Set up call-back info.
     */
    multi_server_service = service;
    multi_server_name = service_name;
    multi_server_argv = argv + optind;

    /*
     * Run pre-jail initialization.
     */
    if (chdir(var_queue_dir) < 0)
	msg_fatal("chdir(\"%s\"): %m", var_queue_dir);
    if (pre_init)
	pre_init(multi_server_name, multi_server_argv);

    /*
     * Optionally, restrict the damage that this process can do.
     */
    resolve_local_init();
    tzset();
    chroot_uid(root_dir, user_name);

    /*
     * Run post-jail initialization.
     */
    if (post_init)
	post_init(multi_server_name, multi_server_argv);

    /*
     * Are we running as a one-shot server with the client connection on
     * standard input? If so, make sure the output is written to stdout so as
     * to satisfy common expectation.
     */
    if (stream != 0) {
	vstream_control(stream,
			VSTREAM_CTL_DOUBLE,
			VSTREAM_CTL_WRITE_FD, STDOUT_FILENO,
			VSTREAM_CTL_END);
	service(stream, multi_server_name, multi_server_argv);
	vstream_fflush(stream);
	multi_server_exit();
    }

    /*
     * Running as a semi-resident server. Service connection requests.
     * Terminate when we have serviced a sufficient number of clients, when
     * no-one has been talking to us for a configurable amount of time, or
     * when the master process terminated abnormally.
     */
    if (var_idle_limit > 0)
	event_request_timer(multi_server_timeout, (char *) 0, var_idle_limit);
    for (fd = MASTER_LISTEN_FD; fd < MASTER_LISTEN_FD + socket_count; fd++) {
	event_enable_read(fd, multi_server_accept, CAST_INT_TO_CHAR_PTR(fd));
	close_on_exec(fd, CLOSE_ON_EXEC);
    }
    event_enable_read(MASTER_STATUS_FD, multi_server_abort, (char *) 0);
    close_on_exec(MASTER_STATUS_FD, CLOSE_ON_EXEC);
    close_on_exec(MASTER_FLOW_READ, CLOSE_ON_EXEC);
    close_on_exec(MASTER_FLOW_WRITE, CLOSE_ON_EXEC);
    watchdog = watchdog_create(var_daemon_timeout, (WATCHDOG_FN) 0, (char *) 0);

    /*
     * The event loop, at last.
     */
    while (var_use_limit == 0 || use_count < var_use_limit || client_count > 0) {
	if (multi_server_lock != 0) {
	    watchdog_stop(watchdog);
	    if (myflock(vstream_fileno(multi_server_lock), INTERNAL_LOCK,
			MYFLOCK_OP_EXCLUSIVE) < 0)
		msg_fatal("select lock: %m");
	}
	watchdog_start(watchdog);
	delay = loop ? loop(multi_server_name, multi_server_argv) : -1;
	event_loop(delay);
    }
    multi_server_exit();
}
예제 #26
0
int     psc_dnsbl_request(const char *client_addr,
			          void (*callback) (int, void *),
			          void *context)
{
    const char *myname = "psc_dnsbl_request";
    int     fd;
    VSTREAM *stream;
    HTABLE_INFO **ht;
    PSC_DNSBL_SCORE *score;
    HTABLE_INFO *hash_node;
    static int request_count;

    /*
     * Some spambots make several connections at nearly the same time,
     * causing their pregreet delays to overlap. Such connections can share
     * the efforts of DNSBL lookup.
     * 
     * We store a reference-counted DNSBL score under its client IP address. We
     * increment the reference count with each score request, and decrement
     * the reference count with each score retrieval.
     * 
     * Do not notify the requestor NOW when the DNS replies are already in.
     * Reason: we must not make a backwards call while we are still in the
     * middle of executing the corresponding forward call. Instead we create
     * a zero-delay timer request and call the notification function from
     * there.
     * 
     * psc_dnsbl_request() could instead return a result value to indicate that
     * the DNSBL score is already available, but that would complicate the
     * caller with two different notification code paths: one asynchronous
     * code path via the callback invocation, and one synchronous code path
     * via the psc_dnsbl_request() result value. That would be a source of
     * future bugs.
     */
    if ((hash_node = htable_locate(dnsbl_score_cache, client_addr)) != 0) {
	score = (PSC_DNSBL_SCORE *) hash_node->value;
	score->refcount += 1;
	PSC_CALL_BACK_EXTEND(hash_node, score);
	PSC_CALL_BACK_ENTER(score, callback, context);
	if (msg_verbose > 1)
	    msg_info("%s: reuse blocklist score for %s refcount=%d pending=%d",
		     myname, client_addr, score->refcount,
		     score->pending_lookups);
	if (score->pending_lookups == 0)
	    event_request_timer(callback, context, EVENT_NULL_DELAY);
	return (PSC_CALL_BACK_INDEX_OF_LAST(score));
    }
    if (msg_verbose > 1)
	msg_info("%s: create blocklist score for %s", myname, client_addr);
    score = (PSC_DNSBL_SCORE *) mymalloc(sizeof(*score));
    score->request_id = request_count++;
    score->dnsbl_name = 0;
    score->dnsbl_weight = 0;
    /* As with dnsblog(8), a value < 0 means no reply TTL. */
    score->pass_ttl = -1;
    score->fail_ttl = -1;
    score->total = 0;
    score->refcount = 1;
    score->pending_lookups = 0;
    PSC_CALL_BACK_INIT(score);
    PSC_CALL_BACK_ENTER(score, callback, context);
    (void) htable_enter(dnsbl_score_cache, client_addr, (void *) score);

    /*
     * Send a query to all DNSBL servers. Later, DNSBL lookup will be done
     * with an UDP-based DNS client that is built directly into Postfix code.
     * We therefore do not optimize the maximum out of this temporary
     * implementation.
     */
    for (ht = dnsbl_site_list; *ht; ht++) {
	if ((fd = LOCAL_CONNECT(psc_dnsbl_service, NON_BLOCKING, 1)) < 0) {
	    msg_warn("%s: connect to %s service: %m",
		     myname, psc_dnsbl_service);
	    continue;
	}
	stream = vstream_fdopen(fd, O_RDWR);
	vstream_control(stream,
			CA_VSTREAM_CTL_CONTEXT(ht[0]->key),
			CA_VSTREAM_CTL_END);
	attr_print(stream, ATTR_FLAG_NONE,
		   SEND_ATTR_STR(MAIL_ATTR_RBL_DOMAIN, ht[0]->key),
		   SEND_ATTR_STR(MAIL_ATTR_ACT_CLIENT_ADDR, client_addr),
		   SEND_ATTR_INT(MAIL_ATTR_LABEL, score->request_id),
		   ATTR_TYPE_END);
	if (vstream_fflush(stream) != 0) {
	    msg_warn("%s: error sending to %s service: %m",
		     myname, psc_dnsbl_service);
	    vstream_fclose(stream);
	    continue;
	}
	PSC_READ_EVENT_REQUEST(vstream_fileno(stream), psc_dnsbl_receive,
			       (void *) stream, var_psc_dnsbl_tmout);
	score->pending_lookups += 1;
    }
    return (PSC_CALL_BACK_INDEX_OF_LAST(score));
}
예제 #27
0
void    qmgr_queue_throttle(QMGR_QUEUE *queue, DSN *dsn)
{
    const char *myname = "qmgr_queue_throttle";
    QMGR_TRANSPORT *transport = queue->transport;
    double  feedback;

    /*
     * Sanity checks.
     */
    if (!QMGR_QUEUE_READY(queue))
	msg_panic("%s: bad queue status: %s", myname, QMGR_QUEUE_STATUS(queue));
    if (queue->dsn)
	msg_panic("%s: queue %s: spurious reason %s",
		  myname, queue->name, queue->dsn->reason);
    if (msg_verbose)
	msg_info("%s: queue %s: %s %s",
		 myname, queue->name, dsn->status, dsn->reason);

    /*
     * Don't restart the positive feedback hysteresis cycle with every
     * negative feedback. Restart it only when we make a negative concurrency
     * adjustment (i.e. at the start of a negative feedback hysteresis
     * cycle). Otherwise positive feedback would be too weak (positive
     * feedback does not take effect until the end of its hysteresis cycle).
     */

    /*
     * This queue is declared dead after a configurable number of
     * pseudo-cohort failures.
     */
    if (QMGR_QUEUE_READY(queue)) {
	queue->fail_cohorts += 1.0 / queue->window;
	if (transport->fail_cohort_limit > 0
	    && queue->fail_cohorts >= transport->fail_cohort_limit)
	    queue->window = QMGR_QUEUE_STAT_THROTTLED;
    }

    /*
     * Decrease the destination's concurrency limit until we reach 1. Base
     * adjustments on the concurrency limit itself, instead of using the
     * actual concurrency. The latter fluctuates wildly when deliveries
     * complete in bursts (artificial benchmark measurements).
     * 
     * Even after reaching 1, we maintain the negative hysteresis cycle so that
     * negative feedback can cancel out positive feedback.
     */
    if (QMGR_QUEUE_READY(queue)) {
	feedback = QMGR_FEEDBACK_VAL(transport->neg_feedback, queue->window);
	QMGR_LOG_FEEDBACK(feedback);
	queue->failure -= feedback;
	/* Prepare for overshoot (feedback > hysteresis, rounding error). */
	while (queue->failure - feedback / 2 < 0) {
	    queue->window -= transport->neg_feedback.hysteresis;
	    queue->success = 0;
	    queue->failure += transport->neg_feedback.hysteresis;
	}
	/* Prepare for overshoot. */
	if (queue->window < 1)
	    queue->window = 1;
    }

    /*
     * Special case for a site that just was declared dead.
     */
    if (QMGR_QUEUE_THROTTLED(queue)) {
	queue->dsn = DSN_COPY(dsn);
	event_request_timer(qmgr_queue_unthrottle_wrapper,
			    (char *) queue, var_min_backoff_time);
	queue->dflags = 0;
    }
    QMGR_LOG_WINDOW(queue);
}
예제 #28
0
static void master_wakeup_timer_event(int unused_event, char *context)
{
    const char *myname = "master_wakeup_timer_event";
    MASTER_SERV *serv = (MASTER_SERV *) context;
    int     status;
    static char wakeup = TRIGGER_REQ_WAKEUP;

    /*
     * Don't wakeup services whose automatic wakeup feature was turned off in
     * the mean time.
     */
    if (serv->wakeup_time == 0)
	return;

    /*
     * Don't wake up services that are throttled. Find out what transport to
     * use. We can't block here so we choose a short timeout.
     */
#define BRIEFLY	1

    if (MASTER_THROTTLED(serv) == 0) {
	if (msg_verbose)
	    msg_info("%s: service %s", myname, serv->name);

	switch (serv->type) {
	case MASTER_SERV_TYPE_INET:
	    status = inet_trigger(serv->name, &wakeup, sizeof(wakeup), BRIEFLY);
	    break;
	case MASTER_SERV_TYPE_UNIX:
	    status = LOCAL_TRIGGER(serv->name, &wakeup, sizeof(wakeup), BRIEFLY);
	    break;
#ifdef MASTER_SERV_TYPE_PASS
	case MASTER_SERV_TYPE_PASS:
	    status = PASS_TRIGGER(serv->name, &wakeup, sizeof(wakeup), BRIEFLY);
	    break;
#endif

	    /*
	     * If someone compromises the postfix account then this must not
	     * overwrite files outside the chroot jail. Countermeasures:
	     * 
	     * - Limit the damage by accessing the FIFO as postfix not root.
	     * 
	     * - Have fifo_trigger() call safe_open() so we won't follow
	     * arbitrary hard/symlinks to files in/outside the chroot jail.
	     * 
	     * - All non-chroot postfix-related files must be root owned (or
	     * postfix check complains).
	     * 
	     * - The postfix user and group ID must not be shared with other
	     * applications (says the INSTALL documentation).
	     * 
	     * Result of a discussion with Michael Tokarev, who received his
	     * insights from Solar Designer, who tested Postfix with a kernel
	     * module that is paranoid about open() calls.
	     */
	case MASTER_SERV_TYPE_FIFO:
	    set_eugid(var_owner_uid, var_owner_gid);
	    status = fifo_trigger(serv->name, &wakeup, sizeof(wakeup), BRIEFLY);
	    set_ugid(getuid(), getgid());
	    break;
	default:
	    msg_panic("%s: unknown service type: %d", myname, serv->type);
	}
	if (status < 0)
	    msg_warn("%s: service %s(%s): %m",
		     myname, serv->ext_name, serv->name);
    }

    /*
     * Schedule another wakeup event.
     */
    event_request_timer(master_wakeup_timer_event, (char *) serv,
			serv->wakeup_time);
}
예제 #29
0
static void tlsp_get_request_event(int event, void *context)
{
    const char *myname = "tlsp_get_request_event";
    TLSP_STATE *state = (TLSP_STATE *) context;
    VSTREAM *plaintext_stream = state->plaintext_stream;
    int     plaintext_fd = vstream_fileno(plaintext_stream);
    static VSTRING *remote_endpt;
    static VSTRING *server_id;
    int     req_flags;
    int     timeout;
    int     ready;

    /*
     * One-time initialization.
     */
    if (remote_endpt == 0) {
        remote_endpt = vstring_alloc(10);
        server_id = vstring_alloc(10);
    }

    /*
     * At this point we still manually manage plaintext read/write/timeout
     * events. Turn off timer events. Below we disable read events on error,
     * and redefine read events on success.
     */
    if (event != EVENT_TIME)
        event_cancel_timer(tlsp_get_request_event, (void *) state);
    else
        errno = ETIMEDOUT;

    /*
     * We must send some data, after receiving the request attributes and
     * before receiving the remote file descriptor. We can't assume
     * UNIX-domain socket semantics here.
     */
    if (event != EVENT_READ
            || attr_scan(plaintext_stream, ATTR_FLAG_STRICT,
                         RECV_ATTR_STR(MAIL_ATTR_REMOTE_ENDPT, remote_endpt),
                         RECV_ATTR_INT(MAIL_ATTR_FLAGS, &req_flags),
                         RECV_ATTR_INT(MAIL_ATTR_TIMEOUT, &timeout),
                         RECV_ATTR_STR(MAIL_ATTR_SERVER_ID, server_id),
                         ATTR_TYPE_END) != 4) {
        msg_warn("%s: receive request attributes: %m", myname);
        event_disable_readwrite(plaintext_fd);
        tlsp_state_free(state);
        return;
    }

    /*
     * If the requested TLS engine is unavailable, hang up after making sure
     * that the plaintext peer has received our "sorry" indication.
     */
    ready = ((req_flags & TLS_PROXY_FLAG_ROLE_SERVER) != 0
             && tlsp_server_ctx != 0);
    if (attr_print(plaintext_stream, ATTR_FLAG_NONE,
                   SEND_ATTR_INT(MAIL_ATTR_STATUS, ready),
                   ATTR_TYPE_END) != 0
            || vstream_fflush(plaintext_stream) != 0
            || ready == 0) {
        read_wait(plaintext_fd, TLSP_INIT_TIMEOUT);	/* XXX */
        event_disable_readwrite(plaintext_fd);
        tlsp_state_free(state);
        return;
    }

    /*
     * XXX We use the same fixed timeout throughout the entire session for
     * both plaintext and ciphertext communication. This timeout is just a
     * safety feature; the real timeout will be enforced by our plaintext
     * peer.
     */
    else {
        state->remote_endpt = mystrdup(STR(remote_endpt));
        state->server_id = mystrdup(STR(server_id));
        msg_info("CONNECT %s %s",
                 (req_flags & TLS_PROXY_FLAG_ROLE_SERVER) ? "from" :
                 (req_flags & TLS_PROXY_FLAG_ROLE_CLIENT) ? "to" :
                 "(bogus_direction)", state->remote_endpt);
        state->req_flags = req_flags;
        state->timeout = timeout + 10;		/* XXX */
        event_enable_read(plaintext_fd, tlsp_get_fd_event, (void *) state);
        event_request_timer(tlsp_get_fd_event, (void *) state,
                            TLSP_INIT_TIMEOUT);
        return;
    }
}
예제 #30
0
void    qmgr_deliver(QMGR_TRANSPORT *transport, VSTREAM *stream)
{
    QMGR_QUEUE *queue;
    QMGR_ENTRY *entry;
    DSN     dsn;

    /*
     * Find out if this delivery process is really available. Once elected,
     * the delivery process is supposed to express its happiness. If there is
     * a problem, wipe the pending deliveries for this transport. This
     * routine runs in response to an external event, so it does not run
     * while some other queue manipulation is happening.
     */
    if (stream == 0 || qmgr_deliver_initial_reply(stream) != 0) {
#if 0
	whatsup = concatenate(transport->name,
			      " mail transport unavailable", (char *) 0);
	qmgr_transport_throttle(transport,
				DSN_SIMPLE(&dsn, "4.3.0", whatsup));
	myfree(whatsup);
#else
	qmgr_transport_throttle(transport,
				DSN_SIMPLE(&dsn, "4.3.0",
					   "mail transport unavailable"));
#endif
	qmgr_defer_transport(transport, &dsn);
	if (stream)
	    (void) vstream_fclose(stream);
	return;
    }

    /*
     * Find a suitable queue entry. Things may have changed since this
     * transport was allocated. If no suitable entry is found,
     * unceremoniously disconnect from the delivery process. The delivery
     * agent request reading routine is prepared for the queue manager to
     * change its mind for no apparent reason.
     */
    if ((queue = qmgr_queue_select(transport)) == 0
	|| (entry = qmgr_entry_select(queue)) == 0) {
	(void) vstream_fclose(stream);
	return;
    }

    /*
     * Send the queue file info and recipient info to the delivery process.
     * If there is a problem, wipe the pending deliveries for this transport.
     * This routine runs in response to an external event, so it does not run
     * while some other queue manipulation is happening.
     */
    if (qmgr_deliver_send_request(entry, stream) < 0) {
	qmgr_entry_unselect(queue, entry);
#if 0
	whatsup = concatenate(transport->name,
			      " mail transport unavailable", (char *) 0);
	qmgr_transport_throttle(transport,
				DSN_SIMPLE(&dsn, "4.3.0", whatsup));
	myfree(whatsup);
#else
	qmgr_transport_throttle(transport,
				DSN_SIMPLE(&dsn, "4.3.0",
					   "mail transport unavailable"));
#endif
	qmgr_defer_transport(transport, &dsn);
	/* warning: entry and queue may be dangling pointers here */
	(void) vstream_fclose(stream);
	return;
    }

    /*
     * If we get this far, go wait for the delivery status report.
     */
    qmgr_deliver_concurrency++;
    entry->stream = stream;
    event_enable_read(vstream_fileno(stream),
		      qmgr_deliver_update, (char *) entry);

    /*
     * Guard against broken systems.
     */
    event_request_timer(qmgr_deliver_abort, (char *) entry, var_daemon_timeout);
}