Exemplo n.º 1
0
int     main(int unused_argc, char **argv)
{
    HTABLE *table = htable_create(1);

    msg_vstream_init(argv[0], VSTREAM_ERR);
    msg_verbose = 1;
    htable_enter(table, "foo-name", mystrdup("foo-value"));
    htable_enter(table, "bar-name", mystrdup("bar-value"));
    attr_print64(VSTREAM_OUT, ATTR_FLAG_NONE,
		 SEND_ATTR_INT(ATTR_NAME_INT, 4711),
		 SEND_ATTR_LONG(ATTR_NAME_LONG, 1234L),
		 SEND_ATTR_STR(ATTR_NAME_STR, "whoopee"),
	       SEND_ATTR_DATA(ATTR_NAME_DATA, strlen("whoopee"), "whoopee"),
		 SEND_ATTR_HASH(table),
		 SEND_ATTR_LONG(ATTR_NAME_LONG, 4321L),
		 ATTR_TYPE_END);
    attr_print64(VSTREAM_OUT, ATTR_FLAG_NONE,
		 SEND_ATTR_INT(ATTR_NAME_INT, 4711),
		 SEND_ATTR_LONG(ATTR_NAME_LONG, 1234L),
		 SEND_ATTR_STR(ATTR_NAME_STR, "whoopee"),
	       SEND_ATTR_DATA(ATTR_NAME_DATA, strlen("whoopee"), "whoopee"),
		 ATTR_TYPE_END);
    if (vstream_fflush(VSTREAM_OUT) != 0)
	msg_fatal("write error: %m");

    htable_free(table, myfree);
    return (0);
}
Exemplo n.º 2
0
int     main(int unused_argc, char **argv)
{
    HTABLE *table = htable_create(1);

    msg_vstream_init(argv[0], VSTREAM_ERR);
    msg_verbose = 1;
    htable_enter(table, "foo-name", mystrdup("foo-value"));
    htable_enter(table, "bar-name", mystrdup("bar-value"));
    attr_print64(VSTREAM_OUT, ATTR_FLAG_NONE,
		 ATTR_TYPE_NUM, ATTR_NAME_NUM, 4711,
		 ATTR_TYPE_LONG, ATTR_NAME_LONG, 1234,
		 ATTR_TYPE_STR, ATTR_NAME_STR, "whoopee",
		 ATTR_TYPE_HASH, table,
		 ATTR_TYPE_END);
    attr_print64(VSTREAM_OUT, ATTR_FLAG_NONE,
		 ATTR_TYPE_NUM, ATTR_NAME_NUM, 4711,
		 ATTR_TYPE_LONG, ATTR_NAME_LONG, 1234,
		 ATTR_TYPE_STR, ATTR_NAME_STR, "whoopee",
		 ATTR_TYPE_END);
    if (vstream_fflush(VSTREAM_OUT) != 0)
	msg_fatal("write error: %m");

    htable_free(table, myfree);
    return (0);
}
Exemplo n.º 3
0
static void hash_parameters(void)
{
    const CONFIG_TIME_TABLE *ctt;
    const CONFIG_BOOL_TABLE *cbt;
    const CONFIG_INT_TABLE *cit;
    const CONFIG_STR_TABLE *cst;
    const CONFIG_STR_FN_TABLE *csft;
    const CONFIG_RAW_TABLE *rst;
    const CONFIG_NINT_TABLE *nst;

    param_table = htable_create(100);

    for (ctt = time_table; ctt->name; ctt++)
	htable_enter(param_table, ctt->name, (char *) ctt);
    for (cbt = bool_table; cbt->name; cbt++)
	htable_enter(param_table, cbt->name, (char *) cbt);
    for (cit = int_table; cit->name; cit++)
	htable_enter(param_table, cit->name, (char *) cit);
    for (cst = str_table; cst->name; cst++)
	htable_enter(param_table, cst->name, (char *) cst);
    for (csft = str_fn_table; csft->name; csft++)
	htable_enter(param_table, csft->name, (char *) csft);
    for (csft = str_fn_table_2; csft->name; csft++)
	htable_enter(param_table, csft->name, (char *) csft);
    for (rst = raw_table; rst->name; rst++)
	htable_enter(param_table, rst->name, (char *) rst);
    for (nst = nint_table; nst->name; nst++)
	htable_enter(param_table, nst->name, (char *) nst);
}
Exemplo n.º 4
0
static QMGR_JOB *qmgr_job_create(QMGR_MESSAGE *message, QMGR_TRANSPORT *transport)
{
    QMGR_JOB *job;

    job = (QMGR_JOB *) mymalloc(sizeof(QMGR_JOB));
    job->message = message;
    QMGR_LIST_APPEND(message->job_list, job, message_peers);
    htable_enter(transport->job_byname, message->queue_id, (void *) job);
    job->transport = transport;
    QMGR_LIST_INIT(job->transport_peers);
    QMGR_LIST_INIT(job->time_peers);
    job->stack_parent = 0;
    QMGR_LIST_INIT(job->stack_children);
    QMGR_LIST_INIT(job->stack_siblings);
    job->stack_level = -1;
    job->blocker_tag = 0;
    job->peer_byname = htable_create(0);
    QMGR_LIST_INIT(job->peer_list);
    job->slots_used = 0;
    job->slots_available = 0;
    job->selected_entries = 0;
    job->read_entries = 0;
    job->rcpt_count = 0;
    job->rcpt_limit = 0;
    return (job);
}
Exemplo n.º 5
0
static struct mypasswd *mypwenter(const struct passwd * pwd)
{
    struct mypasswd *mypwd;

    /*
     * Initialize on the fly.
     */
    if (mypwcache_name == 0) {
	mypwcache_name = htable_create(0);
	mypwcache_uid = binhash_create(0);
    }
    mypwd = (struct mypasswd *) mymalloc(sizeof(*mypwd));
    mypwd->refcount = 0;
    mypwd->pw_name = mystrdup(pwd->pw_name);
    mypwd->pw_passwd = mystrdup(pwd->pw_passwd);
    mypwd->pw_uid = pwd->pw_uid;
    mypwd->pw_gid = pwd->pw_gid;
    mypwd->pw_gecos = mystrdup(pwd->pw_gecos);
    mypwd->pw_dir = mystrdup(pwd->pw_dir);
    mypwd->pw_shell = mystrdup(*pwd->pw_shell ? pwd->pw_shell : _PATH_BSHELL);

    /*
     * Avoid mypwcache_uid memory leak when multiple names have the same UID.
     * This makes the lookup result dependent on program history. But, it was
     * already history-dependent before we added this extra check.
     */
    htable_enter(mypwcache_name, mypwd->pw_name, (char *) mypwd);
    if (binhash_locate(mypwcache_uid, (char *) &mypwd->pw_uid,
		       sizeof(mypwd->pw_uid)) == 0)
	binhash_enter(mypwcache_uid, (char *) &mypwd->pw_uid,
		      sizeof(mypwd->pw_uid), (char *) mypwd);
    return (mypwd);
}
Exemplo n.º 6
0
QMGR_QUEUE *qmgr_queue_create(QMGR_TRANSPORT *transport, const char *name,
			              const char *nexthop)
{
    QMGR_QUEUE *queue;

    /*
     * If possible, choose an initial concurrency of > 1 so that one bad
     * message or one bad network won't slow us down unnecessarily.
     */

    queue = (QMGR_QUEUE *) mymalloc(sizeof(QMGR_QUEUE));
    qmgr_queue_count++;
    queue->dflags = 0;
    queue->last_done = 0;
    queue->name = mystrdup(name);
    queue->nexthop = mystrdup(nexthop);
    queue->todo_refcount = 0;
    queue->busy_refcount = 0;
    queue->transport = transport;
    queue->window = transport->init_dest_concurrency;
    queue->success = queue->failure = queue->fail_cohorts = 0;
    QMGR_LIST_INIT(queue->todo);
    QMGR_LIST_INIT(queue->busy);
    queue->dsn = 0;
    queue->clog_time_to_warn = 0;
    queue->blocker_tag = 0;
    QMGR_LIST_APPEND(transport->queue_list, queue, peers);
    htable_enter(transport->queue_byname, name, (char *) queue);
    return (queue);
}
Exemplo n.º 7
0
int     forward_append(DELIVER_ATTR attr)
{
    FORWARD_INFO *info;
    HTABLE *table_snd;

    /*
     * Sanity checks.
     */
    if (msg_verbose)
	msg_info("forward delivered=%s sender=%s recip=%s",
		 attr.delivered, attr.sender, attr.rcpt.address);
    if (forward_dt == 0)
	msg_panic("forward_append: missing forward_init call");

    /*
     * In order to find the recipient list, first index a table by
     * delivered-to header address, then by envelope sender address.
     */
    if ((table_snd = (HTABLE *) htable_find(forward_dt, attr.delivered)) == 0) {
	table_snd = htable_create(0);
	htable_enter(forward_dt, attr.delivered, (void *) table_snd);
    }
    if ((info = (FORWARD_INFO *) htable_find(table_snd, attr.sender)) == 0) {
	if ((info = forward_open(attr.request, attr.sender)) == 0)
	    return (-1);
	htable_enter(table_snd, attr.sender, (void *) info);
    }

    /*
     * Append the recipient to the message envelope. Don't send the original
     * recipient or notification mask if it was reset due to mailing list
     * expansion.
     */
    if (*attr.rcpt.dsn_orcpt)
	rec_fprintf(info->cleanup, REC_TYPE_ATTR, "%s=%s",
		    MAIL_ATTR_DSN_ORCPT, attr.rcpt.dsn_orcpt);
    if (attr.rcpt.dsn_notify)
	rec_fprintf(info->cleanup, REC_TYPE_ATTR, "%s=%d",
		    MAIL_ATTR_DSN_NOTIFY, attr.rcpt.dsn_notify);
    if (*attr.rcpt.orig_addr)
	rec_fputs(info->cleanup, REC_TYPE_ORCP, attr.rcpt.orig_addr);
    rec_fputs(info->cleanup, REC_TYPE_RCPT, attr.rcpt.address);

    return (vstream_ferror(info->cleanup));
}
Exemplo n.º 8
0
QMGR_TRANSPORT *qmgr_transport_create(const char *name)
{
    QMGR_TRANSPORT *transport;

    if (htable_find(qmgr_transport_byname, name) != 0)
	msg_panic("qmgr_transport_create: transport exists: %s", name);
    transport = (QMGR_TRANSPORT *) mymalloc(sizeof(QMGR_TRANSPORT));
    transport->flags = 0;
    transport->pending = 0;
    transport->name = mystrdup(name);

    /*
     * Use global configuration settings or transport-specific settings.
     */
    transport->dest_concurrency_limit =
	get_mail_conf_int2(name, _DEST_CON_LIMIT,
			   var_dest_con_limit, 0, 0);
    transport->recipient_limit =
	get_mail_conf_int2(name, _DEST_RCPT_LIMIT,
			   var_dest_rcpt_limit, 0, 0);
    transport->init_dest_concurrency =
	get_mail_conf_int2(name, _INIT_DEST_CON,
			   var_init_dest_concurrency, 1, 0);
    transport->xport_rate_delay = get_mail_conf_time2(name, _XPORT_RATE_DELAY,
						      var_xport_rate_delay,
						      's', 0, 0);
    transport->rate_delay = get_mail_conf_time2(name, _DEST_RATE_DELAY,
						var_dest_rate_delay,
						's', 0, 0);

    if (transport->rate_delay > 0)
	transport->dest_concurrency_limit = 1;
    if (transport->dest_concurrency_limit != 0
    && transport->dest_concurrency_limit < transport->init_dest_concurrency)
	transport->init_dest_concurrency = transport->dest_concurrency_limit;

    transport->queue_byname = htable_create(0);
    QMGR_LIST_INIT(transport->queue_list);
    transport->dsn = 0;
    qmgr_feedback_init(&transport->pos_feedback, name, _CONC_POS_FDBACK,
		       VAR_CONC_POS_FDBACK, var_conc_pos_feedback);
    qmgr_feedback_init(&transport->neg_feedback, name, _CONC_NEG_FDBACK,
		       VAR_CONC_NEG_FDBACK, var_conc_neg_feedback);
    transport->fail_cohort_limit =
	get_mail_conf_int2(name, _CONC_COHORT_LIM,
			   var_conc_cohort_limit, 0, 0);
    if (qmgr_transport_byname == 0)
	qmgr_transport_byname = htable_create(10);
    htable_enter(qmgr_transport_byname, name, (void *) transport);
    QMGR_LIST_APPEND(qmgr_transport_list, transport);
    if (msg_verbose)
	msg_info("qmgr_transport_create: %s concurrency %d recipients %d",
		 transport->name, transport->dest_concurrency_limit,
		 transport->recipient_limit);
    return (transport);
}
Exemplo n.º 9
0
int     main(int unused_argc, char **unused_argv)
{
    VSTRING *buf = vstring_alloc(100);
    VSTRING *result = vstring_alloc(100);
    char   *cp;
    char   *name;
    char   *value;
    HTABLE *table;
    int     stat;

    while (!vstream_feof(VSTREAM_IN)) {

	table = htable_create(0);

	/*
	 * Read a block of definitions, terminated with an empty line.
	 */
	while (vstring_get_nonl(buf, VSTREAM_IN) != VSTREAM_EOF) {
	    vstream_printf("<< %s\n", vstring_str(buf));
	    vstream_fflush(VSTREAM_OUT);
	    if (VSTRING_LEN(buf) == 0)
		break;
	    cp = vstring_str(buf);
	    name = mystrtok(&cp, " \t\r\n=");
	    value = mystrtok(&cp, " \t\r\n=");
	    htable_enter(table, name, value ? mystrdup(value) : 0);
	}

	/*
	 * Read a block of patterns, terminated with an empty line or EOF.
	 */
	while (vstring_get_nonl(buf, VSTREAM_IN) != VSTREAM_EOF) {
	    vstream_printf("<< %s\n", vstring_str(buf));
	    vstream_fflush(VSTREAM_OUT);
	    if (VSTRING_LEN(buf) == 0)
		break;
	    cp = vstring_str(buf);
	    VSTRING_RESET(result);
	    stat = mac_expand(result, vstring_str(buf), MAC_EXP_FLAG_NONE,
			      (char *) 0, lookup, (char *) table);
	    vstream_printf("stat=%d result=%s\n", stat, vstring_str(result));
	    vstream_fflush(VSTREAM_OUT);
	}
	htable_free(table, myfree);
	vstream_printf("\n");
    }

    /*
     * Clean up.
     */
    vstring_free(buf);
    vstring_free(result);
    exit(0);
}
Exemplo n.º 10
0
static SMTP_SESSION *smtp_reuse_common(SMTP_STATE *state, int fd,
				               const char *label)
{
    const char *myname = "smtp_reuse_common";
    SMTP_ITERATOR *iter = state->iterator;
    SMTP_SESSION *session;

    /*
     * Can't happen. Both smtp_reuse_nexthop() and smtp_reuse_addr() decline
     * the request when the TLS policy is not TLS_LEV_NONE.
     */
#ifdef USE_TLS
    if (state->tls->level > TLS_LEV_NONE)
	msg_panic("%s: unexpected plain-text cached session to %s",
		  myname, label);
#endif

    /*
     * Re-activate the SMTP_SESSION object.
     */
    session = smtp_session_activate(fd, state->iterator, state->dest_prop,
				    state->endp_prop);
    if (session == 0) {
	msg_warn("%s: bad cached session attribute for %s", myname, label);
	(void) close(fd);
	return (0);
    }
    state->session = session;
    session->state = state;
#ifdef USE_TLS
    session->tls = state->tls;			/* TEMPORARY */
#endif

    /*
     * Send an RSET probe to verify that the session is still good.
     */
    if (smtp_rset(state) < 0
	|| (session->features & SMTP_FEATURE_RSET_REJECTED) != 0) {
	smtp_session_free(session);
	return (state->session = 0);
    }

    /*
     * Avoid poor performance when TCP MSS > VSTREAM_BUFSIZE.
     */
    vstream_tweak_sock(session->stream);

    /*
     * Update the list of used cached addresses.
     */
    htable_enter(state->cache_used, STR(iter->addr), (char *) 0);

    return (session);
}
Exemplo n.º 11
0
static void dict_open_init(void)
{
    const char *myname = "dict_open_init";
    const DICT_OPEN_INFO *dp;

    if (dict_open_hash != 0)
	msg_panic("%s: multiple initialization", myname);
    dict_open_hash = htable_create(10);

    for (dp = dict_open_info; dp->type; dp++)
	htable_enter(dict_open_hash, dp->type, (char *) dp);
}
Exemplo n.º 12
0
static void mkmap_open_init(void)
{
    static const char myname[] = "mkmap_open_init";
    const MKMAP_OPEN_INFO *mp;

    if (mkmap_open_hash != 0)
	msg_panic("%s: multiple initialization", myname);
    mkmap_open_hash = htable_create(10);

    for (mp = mkmap_open_info; mp->type; mp++)
	htable_enter(mkmap_open_hash, mp->type, (char *) mp);
}
Exemplo n.º 13
0
PSC_STATE *psc_new_session_state(VSTREAM *stream,
				         const char *client_addr,
				         const char *client_port,
				         const char *server_addr,
				         const char *server_port)
{
    PSC_STATE *state;
    HTABLE_INFO *ht;

    state = (PSC_STATE *) mymalloc(sizeof(*state));
    PSC_INIT_TESTS(state);
    if ((state->smtp_client_stream = stream) != 0)
	psc_check_queue_length++;
    state->smtp_server_fd = (-1);
    state->smtp_client_addr = mystrdup(client_addr);
    state->smtp_client_port = mystrdup(client_port);
    state->smtp_server_addr = mystrdup(server_addr);
    state->smtp_server_port = mystrdup(server_port);
    state->send_buf = vstring_alloc(100);
    state->test_name = "TEST NAME HERE";
    state->dnsbl_reply = 0;
    state->final_reply = "421 4.3.2 Service currently unavailable\r\n";
    state->rcpt_reply = "450 4.3.2 Service currently unavailable\r\n";
    state->command_count = 0;
    state->protocol = MAIL_PROTO_SMTP;
    state->helo_name = 0;
    state->sender = 0;
    state->cmd_buffer = 0;
    state->read_state = 0;
    state->ehlo_discard_mask = 0;		/* XXX Should be ~0 */
    state->expand_buf = 0;
    state->where = PSC_SMTPD_CMD_CONNECT;

    /*
     * Update the stress level.
     */
    if (psc_stress == 0
	&& psc_check_queue_length >= psc_hiwat_check_queue_length) {
	psc_stress = 1;
	msg_info("entering STRESS mode with %d connections",
		 psc_check_queue_length);
    }

    /*
     * Update the per-client session count.
     */
    if ((ht = htable_locate(psc_client_concurrency, client_addr)) == 0)
	ht = htable_enter(psc_client_concurrency, client_addr, (char *) 0);
    ht->value += 1;
    state->client_concurrency = CAST_CHAR_PTR_TO_INT(ht->value);

    return (state);
}
Exemplo n.º 14
0
NVTABLE_INFO *nvtable_update(NVTABLE * table, const char *key, const char *value)
{
    NVTABLE_INFO *ht;

    if ((ht = htable_locate(table, key)) != 0) {
	myfree(ht->value);
    } else {
	ht = htable_enter(table, key, (char *) 0);
    }
    ht->value = mystrdup(value);
    return (ht);
}
Exemplo n.º 15
0
QMGR_PEER *qmgr_peer_create(QMGR_JOB *job, QMGR_QUEUE *queue)
{
    QMGR_PEER *peer;

    peer = (QMGR_PEER *) mymalloc(sizeof(QMGR_PEER));
    peer->queue = queue;
    peer->job = job;
    QMGR_LIST_APPEND(job->peer_list, peer, peers);
    htable_enter(job->peer_byname, queue->name, (char *) peer);
    peer->refcount = 0;
    QMGR_LIST_INIT(peer->entry_list);
    return (peer);
}
Exemplo n.º 16
0
static void post_jail_init(char *service_name, char **unused_argv)
{
    const char *sep = ", \t\r\n";
    char   *saved_filter;
    char   *bp;
    char   *type_name;

    /*
     * Are we proxy writer?
     */
    if (strcmp(service_name, MAIL_SERVICE_PROXYWRITE) == 0)
        proxy_writer = 1;
    else if (strcmp(service_name, MAIL_SERVICE_PROXYMAP) != 0)
        msg_fatal("service name must be one of %s or %s",
                  MAIL_SERVICE_PROXYMAP, MAIL_SERVICE_PROXYMAP);

    /*
     * Pre-allocate buffers.
     */
    request = vstring_alloc(10);
    request_map = vstring_alloc(10);
    request_key = vstring_alloc(10);
    request_value = vstring_alloc(10);
    map_type_name_flags = vstring_alloc(10);

    /*
     * Prepare the pre-approved list of proxied tables.
     */
    saved_filter = bp = mystrdup(proxy_writer ? var_proxy_write_maps :
                                 var_proxy_read_maps);
    proxy_auth_maps = htable_create(13);
    while ((type_name = mystrtok(&bp, sep)) != 0) {
        if (strncmp(type_name, PROXY_COLON, PROXY_COLON_LEN))
            continue;
        do {
            type_name += PROXY_COLON_LEN;
        } while (!strncmp(type_name, PROXY_COLON, PROXY_COLON_LEN));
        if (strchr(type_name, ':') != 0
                && htable_locate(proxy_auth_maps, type_name) == 0)
            (void) htable_enter(proxy_auth_maps, type_name, (char *) 0);
    }
    myfree(saved_filter);

    /*
     * Never, ever, get killed by a master signal, as that could corrupt a
     * persistent database when we're in the middle of an update.
     */
    if (proxy_writer != 0)
        setsid();
}
Exemplo n.º 17
0
void    dict_open_register(const char *type, DICT_OPEN_FN open)
{
    const char *myname = "dict_open_register";
    DICT_OPEN_INFO *dp;
    HTABLE_INFO *ht;

    if (dict_open_hash == 0)
	dict_open_init();
    if (htable_find(dict_open_hash, type))
	msg_panic("%s: dictionary type exists: %s", myname, type);
    dp = (DICT_OPEN_INFO *) mymalloc(sizeof(*dp));
    dp->open = open;
    ht = htable_enter(dict_open_hash, type, (char *) dp);
    dp->type = ht->key;
}
Exemplo n.º 18
0
void    mkmap_open_register(const char *type, MKMAP_OPEN_FN open_fn)
{
    static const char myname[] = "mkmap_open_register";
    MKMAP_OPEN_INFO *mp;
    HTABLE_INFO *ht;

    if (mkmap_open_hash == 0)
	mkmap_open_init();
    if (htable_find(mkmap_open_hash, type))
	msg_panic("%s: database type exists: %s", myname, type);
    mp = (MKMAP_OPEN_INFO *) mymalloc(sizeof(*mp));
    mp->before_open = open_fn;
    ht = htable_enter(mkmap_open_hash, type, (char *) mp);
    mp->type = ht->key;
}
Exemplo n.º 19
0
void    dict_open_register(const char *type,
			           DICT *(*open) (const char *, int, int))
{
    const char *myname = "dict_open_register";
    DICT_OPEN_INFO *dp;

    if (dict_open_hash == 0)
	dict_open_init();
    if (htable_find(dict_open_hash, type))
	msg_panic("%s: dictionary type exists: %s", myname, type);
    dp = (DICT_OPEN_INFO *) mymalloc(sizeof(*dp));
    dp->type = mystrdup(type);
    dp->open = open;
    htable_enter(dict_open_hash, dp->type, (char *) dp);
}
Exemplo n.º 20
0
static ANVIL_REMOTE *anvil_remote_conn_update(VSTREAM *client_stream, const char *ident)
{
    ANVIL_REMOTE *anvil_remote;
    ANVIL_LOCAL *anvil_local;
    const char *myname = "anvil_remote_conn_update";

    if (msg_verbose)
	msg_info("%s fd=%d stream=0x%lx ident=%s",
		 myname, vstream_fileno(client_stream),
		 (unsigned long) client_stream, ident);

    /*
     * Look up remote connection count information. Update remote connection
     * rate information. Simply reset the counter every var_anvil_time_unit
     * seconds. This is easier than maintaining a moving average and it gives
     * a quicker response to tresspassers.
     */
    if ((anvil_remote =
	 (ANVIL_REMOTE *) htable_find(anvil_remote_map, ident)) == 0) {
	anvil_remote = (ANVIL_REMOTE *) mymalloc(sizeof(*anvil_remote));
	ANVIL_REMOTE_FIRST_CONN(anvil_remote, ident);
	htable_enter(anvil_remote_map, ident, (char *) anvil_remote);
	if (max_cache_size < anvil_remote_map->used) {
	    max_cache_size = anvil_remote_map->used;
	    max_cache_time = event_time();
	}
    } else {
	ANVIL_REMOTE_NEXT_CONN(anvil_remote);
    }

    /*
     * Record this connection under the local server information, so that we
     * can clean up all its connection state when the local server goes away.
     */
    if ((anvil_local = (ANVIL_LOCAL *) vstream_context(client_stream)) == 0) {
	anvil_local = (ANVIL_LOCAL *) mymalloc(sizeof(*anvil_local));
	ANVIL_LOCAL_INIT(anvil_local);
	vstream_control(client_stream,
			VSTREAM_CTL_CONTEXT, (void *) anvil_local,
			VSTREAM_CTL_END);
    }
    ANVIL_LOCAL_ADD_ONE(anvil_local, anvil_remote);
    if (msg_verbose)
	msg_info("%s: anvil_local 0x%lx",
		 myname, (unsigned long) anvil_local);

    return (anvil_remote);
}
Exemplo n.º 21
0
DELIVERED_HDR_INFO *delivered_hdr_init(VSTREAM *fp, off_t offset, int flags)
{
    char   *cp;
    DELIVERED_HDR_INFO *info;
    const HEADER_OPTS *hdr;

    /*
     * Sanity check.
     */
    info = (DELIVERED_HDR_INFO *) mymalloc(sizeof(*info));
    info->flags = flags;
    info->buf = vstring_alloc(10);
    info->table = htable_create(0);

    if (vstream_fseek(fp, offset, SEEK_SET) < 0)
	msg_fatal("seek queue file %s: %m", VSTREAM_PATH(fp));

    /*
     * XXX Assume that mail_copy() produces delivered-to headers that fit in
     * a REC_TYPE_NORM record. Lowercase the delivered-to addresses for
     * consistency.
     * 
     * XXX Don't get bogged down by gazillions of delivered-to headers.
     */
#define DELIVERED_HDR_LIMIT	1000

    while (rec_get(fp, info->buf, 0) == REC_TYPE_NORM
	   && info->table->used < DELIVERED_HDR_LIMIT) {
	if (is_header(STR(info->buf))) {
	    if ((hdr = header_opts_find(STR(info->buf))) != 0
		&& hdr->type == HDR_DELIVERED_TO) {
		cp = STR(info->buf) + strlen(hdr->name) + 1;
		while (ISSPACE(*cp))
		    cp++;
		if (info->flags & FOLD_ADDR_ALL)
		    fold_addr(cp, info->flags);
		if (msg_verbose)
		    msg_info("delivered_hdr_init: %s", cp);
		htable_enter(info->table, cp, (char *) 0);
	    }
	} else if (ISSPACE(STR(info->buf)[0])) {
	    continue;
	} else {
	    break;
	}
    }
    return (info);
}
Exemplo n.º 22
0
void    dict_register(const char *dict_name, DICT *dict_info)
{
    const char *myname = "dict_register";
    DICT_NODE *node;

    if (dict_table == 0)
	dict_table = htable_create(0);
    if ((node = dict_node(dict_name)) == 0) {
	node = (DICT_NODE *) mymalloc(sizeof(*node));
	node->dict = dict_info;
	node->refcount = 0;
	htable_enter(dict_table, dict_name, (char *) node);
    } else if (dict_info != node->dict)
	msg_fatal("%s: dictionary name exists: %s", myname, dict_name);
    node->refcount++;
    if (msg_verbose > 1)
	msg_info("%s: %s %d", myname, dict_name, node->refcount);
}
Exemplo n.º 23
0
static void header_opts_init(void)
{
    const HEADER_OPTS *hp;
    const char *cp;

    /*
     * Build a hash table for quick lookup, and allocate memory for
     * lower-casing the lookup key.
     */
    header_key = vstring_alloc(10);
    header_hash = htable_create(HEADER_OPTS_SIZE);
    for (hp = header_opts; hp < header_opts + HEADER_OPTS_SIZE; hp++) {
	VSTRING_RESET(header_key);
	for (cp = hp->name; *cp; cp++)
	    VSTRING_ADDCH(header_key, TOLOWER(*cp));
	VSTRING_TERMINATE(header_key);
	htable_enter(header_hash, vstring_str(header_key), (char *) hp);
    }
}
Exemplo n.º 24
0
int     tls_mgr_update(const char *unused_type, const char *key,
		               const char *buf, ssize_t len)
{
    HTABLE_INFO *ent;
    VSTRING *s;

    if (tls_cache == 0)
	return TLS_MGR_STAT_ERR;

    if ((ent = htable_locate(tls_cache, key)) == 0) {
	s = vstring_alloc(len);
	ent = htable_enter(tls_cache, key, (char *) s);
    } else {
	s = (VSTRING *) ent->value;
    }
    vstring_memcpy(s, buf, len);

    ++cache_count;
    return (TLS_MGR_STAT_OK);
}
Exemplo n.º 25
0
int     been_here_fixed(BH_TABLE *dup_filter, const char *string)
{
    VSTRING *folded_string;
    const char *lookup_key;
    int     status;

    /*
     * Special processing: case insensitive lookup.
     */
    if (dup_filter->flags & BH_FLAG_FOLD) {
	folded_string = vstring_alloc(100);
	lookup_key = casefold(folded_string, string);
    } else {
	folded_string = 0;
	lookup_key = string;
    }

    /*
     * Do the duplicate check.
     */
    if (htable_locate(dup_filter->table, lookup_key) != 0) {
	status = 1;
    } else {
	if (dup_filter->limit <= 0
	    || dup_filter->limit > dup_filter->table->used)
	    htable_enter(dup_filter->table, lookup_key, (void *) 0);
	status = 0;
    }
    if (msg_verbose)
	msg_info("been_here: %s: %d", string, status);

    /*
     * Cleanup.
     */
    if (folded_string)
	vstring_free(folded_string);

    return (status);
}
Exemplo n.º 26
0
static int dict_ht_update(DICT *dict, const char *name, const char *value)
{
    DICT_HT *dict_ht = (DICT_HT *) dict;
    HTABLE_INFO *ht;
    char   *saved_value = mystrdup(value);

    /*
     * Optionally fold the key.
     */
    if (dict->flags & DICT_FLAG_FOLD_FIX) {
	if (dict->fold_buf == 0)
	    dict->fold_buf = vstring_alloc(10);
	vstring_strcpy(dict->fold_buf, name);
	name = lowercase(vstring_str(dict->fold_buf));
    }
    if ((ht = htable_locate(dict_ht->table, name)) != 0) {
	myfree(ht->value);
    } else {
	ht = htable_enter(dict_ht->table, name, (char *) 0);
    }
    ht->value = saved_value;
    DICT_ERR_VAL_RETURN(dict, DICT_ERR_NONE, DICT_STAT_SUCCESS);
}
Exemplo n.º 27
0
static void psc_dnsbl_add_site(const char *site)
{
    const char *myname = "psc_dnsbl_add_site";
    char   *saved_site = mystrdup(site);
    VSTRING *byte_codes = 0;
    PSC_DNSBL_HEAD *head;
    PSC_DNSBL_SITE *new_site;
    char    junk;
    const char *weight_text;
    char   *pattern_text;
    int     weight;
    HTABLE_INFO *ht;
    char   *parse_err;

    /*
     * Parse the required DNSBL domain name, the optional reply filter and
     * the optional reply weight factor.
     */
#define DO_GRIPE	1

    /* Negative weight means whitelist. */
    if ((weight_text = split_at(saved_site, '*')) != 0) {
	if (sscanf(weight_text, "%d%c", &weight, &junk) != 1)
	    msg_fatal("bad DNSBL weight factor \"%s\" in \"%s\"",
		      weight_text, site);
    } else {
	weight = 1;
    }
    /* Reply filter. */
    if ((pattern_text = split_at(saved_site, '=')) != 0) {
	byte_codes = vstring_alloc(100);
	if ((parse_err = ip_match_parse(byte_codes, pattern_text)) != 0)
	    msg_fatal("bad DNSBL filter syntax: %s", parse_err);
    }
    if (valid_hostname(saved_site, DO_GRIPE) == 0)
	msg_fatal("bad DNSBL domain name \"%s\" in \"%s\"",
		  saved_site, site);

    if (msg_verbose > 1)
	msg_info("%s: \"%s\" -> domain=\"%s\" pattern=\"%s\" weight=%d",
		 myname, site, saved_site, pattern_text ? pattern_text :
		 "null", weight);

    /*
     * Look up or create the (filter, weight) list head for this DNSBL domain
     * name.
     */
    if ((head = (PSC_DNSBL_HEAD *)
	 htable_find(dnsbl_site_cache, saved_site)) == 0) {
	head = (PSC_DNSBL_HEAD *) mymalloc(sizeof(*head));
	ht = htable_enter(dnsbl_site_cache, saved_site, (void *) head);
	/* Translate the DNSBL name into a safe name if available. */
	if (psc_dnsbl_reply == 0
	 || (head->safe_dnsbl = dict_get(psc_dnsbl_reply, saved_site)) == 0)
	    head->safe_dnsbl = ht->key;
	if (psc_dnsbl_reply && psc_dnsbl_reply->error)
	    msg_fatal("%s:%s lookup error", psc_dnsbl_reply->type,
		      psc_dnsbl_reply->name);
	head->first = 0;
    }

    /*
     * Append the new (filter, weight) node to the list for this DNSBL domain
     * name.
     */
    new_site = (PSC_DNSBL_SITE *) mymalloc(sizeof(*new_site));
    new_site->filter = (pattern_text ? mystrdup(pattern_text) : 0);
    new_site->byte_codes = (byte_codes ? ip_match_save(byte_codes) : 0);
    new_site->weight = weight;
    new_site->next = head->first;
    head->first = new_site;

    myfree(saved_site);
    if (byte_codes)
	vstring_free(byte_codes);
}
Exemplo n.º 28
0
int     psc_dnsbl_request(const char *client_addr,
			          void (*callback) (int, void *),
			          void *context)
{
    const char *myname = "psc_dnsbl_request";
    int     fd;
    VSTREAM *stream;
    HTABLE_INFO **ht;
    PSC_DNSBL_SCORE *score;
    HTABLE_INFO *hash_node;
    static int request_count;

    /*
     * Some spambots make several connections at nearly the same time,
     * causing their pregreet delays to overlap. Such connections can share
     * the efforts of DNSBL lookup.
     * 
     * We store a reference-counted DNSBL score under its client IP address. We
     * increment the reference count with each score request, and decrement
     * the reference count with each score retrieval.
     * 
     * Do not notify the requestor NOW when the DNS replies are already in.
     * Reason: we must not make a backwards call while we are still in the
     * middle of executing the corresponding forward call. Instead we create
     * a zero-delay timer request and call the notification function from
     * there.
     * 
     * psc_dnsbl_request() could instead return a result value to indicate that
     * the DNSBL score is already available, but that would complicate the
     * caller with two different notification code paths: one asynchronous
     * code path via the callback invocation, and one synchronous code path
     * via the psc_dnsbl_request() result value. That would be a source of
     * future bugs.
     */
    if ((hash_node = htable_locate(dnsbl_score_cache, client_addr)) != 0) {
	score = (PSC_DNSBL_SCORE *) hash_node->value;
	score->refcount += 1;
	PSC_CALL_BACK_EXTEND(hash_node, score);
	PSC_CALL_BACK_ENTER(score, callback, context);
	if (msg_verbose > 1)
	    msg_info("%s: reuse blocklist score for %s refcount=%d pending=%d",
		     myname, client_addr, score->refcount,
		     score->pending_lookups);
	if (score->pending_lookups == 0)
	    event_request_timer(callback, context, EVENT_NULL_DELAY);
	return (PSC_CALL_BACK_INDEX_OF_LAST(score));
    }
    if (msg_verbose > 1)
	msg_info("%s: create blocklist score for %s", myname, client_addr);
    score = (PSC_DNSBL_SCORE *) mymalloc(sizeof(*score));
    score->request_id = request_count++;
    score->dnsbl_name = 0;
    score->dnsbl_weight = 0;
    /* As with dnsblog(8), a value < 0 means no reply TTL. */
    score->pass_ttl = -1;
    score->fail_ttl = -1;
    score->total = 0;
    score->refcount = 1;
    score->pending_lookups = 0;
    PSC_CALL_BACK_INIT(score);
    PSC_CALL_BACK_ENTER(score, callback, context);
    (void) htable_enter(dnsbl_score_cache, client_addr, (void *) score);

    /*
     * Send a query to all DNSBL servers. Later, DNSBL lookup will be done
     * with an UDP-based DNS client that is built directly into Postfix code.
     * We therefore do not optimize the maximum out of this temporary
     * implementation.
     */
    for (ht = dnsbl_site_list; *ht; ht++) {
	if ((fd = LOCAL_CONNECT(psc_dnsbl_service, NON_BLOCKING, 1)) < 0) {
	    msg_warn("%s: connect to %s service: %m",
		     myname, psc_dnsbl_service);
	    continue;
	}
	stream = vstream_fdopen(fd, O_RDWR);
	vstream_control(stream,
			CA_VSTREAM_CTL_CONTEXT(ht[0]->key),
			CA_VSTREAM_CTL_END);
	attr_print(stream, ATTR_FLAG_NONE,
		   SEND_ATTR_STR(MAIL_ATTR_RBL_DOMAIN, ht[0]->key),
		   SEND_ATTR_STR(MAIL_ATTR_ACT_CLIENT_ADDR, client_addr),
		   SEND_ATTR_INT(MAIL_ATTR_LABEL, score->request_id),
		   ATTR_TYPE_END);
	if (vstream_fflush(stream) != 0) {
	    msg_warn("%s: error sending to %s service: %m",
		     myname, psc_dnsbl_service);
	    vstream_fclose(stream);
	    continue;
	}
	PSC_READ_EVENT_REQUEST(vstream_fileno(stream), psc_dnsbl_receive,
			       (void *) stream, var_psc_dnsbl_tmout);
	score->pending_lookups += 1;
    }
    return (PSC_CALL_BACK_INDEX_OF_LAST(score));
}
Exemplo n.º 29
0
static int flush_send_path(const char *path, int how)
{
    const char *myname = "flush_send_path";
    VSTRING *queue_id;
    VSTRING *queue_file;
    VSTREAM *log;
    struct utimbuf tbuf;
    static char qmgr_flush_trigger[] = {
	QMGR_REQ_FLUSH_DEAD,		/* flush dead site/transport cache */
    };
    static char qmgr_scan_trigger[] = {
	QMGR_REQ_SCAN_INCOMING,		/* scan incoming queue */
    };
    HTABLE *dup_filter;
    int     count;

    /*
     * Sanity check.
     */
    if (!mail_queue_id_ok(path))
	return (FLUSH_STAT_BAD);

    /*
     * Open the logfile. If the file does not exist, then there is no queued
     * mail for this destination.
     */
    if ((log = mail_queue_open(MAIL_QUEUE_FLUSH, path, O_RDWR, 0600)) == 0) {
	if (errno != ENOENT)
	    msg_fatal("%s: open fast flush logfile %s: %m", myname, path);
	return (FLUSH_STAT_OK);
    }

    /*
     * We must lock the logfile, so that we don't lose information when it is
     * truncated. Unfortunately, this means that the file can be locked for a
     * significant amount of time. If things really get stuck the Postfix
     * watchdog will take care of it.
     */
    if (myflock(vstream_fileno(log), INTERNAL_LOCK, MYFLOCK_OP_EXCLUSIVE) < 0)
	msg_fatal("%s: lock fast flush logfile %s: %m", myname, path);

    /*
     * With the UNTHROTTLE_BEFORE strategy, we ask the queue manager to
     * unthrottle all transports and queues before we move a deferred queue
     * file to the incoming queue. This minimizes a race condition where the
     * queue manager seizes a queue file before it knows that we want to
     * flush that message.
     * 
     * This reduces the race condition time window to a very small amount (the
     * flush server does not really know when the queue manager reads its
     * command fifo). But there is a worse race, where the queue manager
     * moves a deferred queue file to the active queue before we have a
     * chance to expedite its delivery.
     */
    if (how & UNTHROTTLE_BEFORE)
	mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service,
		     qmgr_flush_trigger, sizeof(qmgr_flush_trigger));

    /*
     * This is the part that dominates running time: schedule the listed
     * queue files for delivery by updating their file time stamps and by
     * moving them from the deferred queue to the incoming queue. This should
     * take no more than a couple seconds under normal conditions. Filter out
     * duplicate queue file names to avoid hammering the file system, with
     * some finite limit on the amount of memory that we are willing to
     * sacrifice for duplicate filtering. Graceful degradation.
     * 
     * By moving selected queue files from the deferred queue to the incoming
     * queue we optimize for the case where most deferred mail is for other
     * sites. If that assumption does not hold, i.e. all deferred mail is for
     * the same site, then doing a "fast flush" will cost more disk I/O than
     * a "slow flush" that delivers the entire deferred queue. This penalty
     * is only temporary - it will go away after we unite the active queue
     * and the incoming queue.
     */
    queue_id = vstring_alloc(10);
    queue_file = vstring_alloc(10);
    dup_filter = htable_create(10);
    tbuf.actime = tbuf.modtime = event_time();
    for (count = 0; vstring_get_nonl(queue_id, log) != VSTREAM_EOF; count++) {
	if (!mail_queue_id_ok(STR(queue_id))) {
	    msg_warn("bad queue id \"%.30s...\" in fast flush logfile %s",
		     STR(queue_id), path);
	    continue;
	}
	if (dup_filter->used >= FLUSH_DUP_FILTER_SIZE
	    || htable_find(dup_filter, STR(queue_id)) == 0) {
	    if (msg_verbose)
		msg_info("%s: logfile %s: update queue file %s time stamps",
			 myname, path, STR(queue_id));
	    if (dup_filter->used <= FLUSH_DUP_FILTER_SIZE)
		htable_enter(dup_filter, STR(queue_id), 0);
	    count += flush_one_file(STR(queue_id), queue_file, &tbuf, how);
	} else {
	    if (msg_verbose)
		msg_info("%s: logfile %s: skip queue file %s as duplicate",
			 myname, path, STR(queue_file));
	}
    }
    htable_free(dup_filter, (void (*) (void *)) 0);
    vstring_free(queue_file);
    vstring_free(queue_id);

    /*
     * Truncate the fast flush log.
     */
    if (count > 0 && ftruncate(vstream_fileno(log), (off_t) 0) < 0)
	msg_fatal("%s: truncate fast flush logfile %s: %m", myname, path);

    /*
     * Workaround for noatime mounts. Use futimes() if available.
     */
    (void) utimes(VSTREAM_PATH(log), (struct timeval *) 0);

    /*
     * Request delivery and clean up.
     */
    if (myflock(vstream_fileno(log), INTERNAL_LOCK, MYFLOCK_OP_NONE) < 0)
	msg_fatal("%s: unlock fast flush logfile %s: %m", myname, path);
    if (vstream_fclose(log) != 0)
	msg_warn("%s: read fast flush logfile %s: %m", myname, path);
    if (count > 0) {
	if (msg_verbose)
	    msg_info("%s: requesting delivery for logfile %s", myname, path);
	mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service,
		     qmgr_scan_trigger, sizeof(qmgr_scan_trigger));
    }
    return (FLUSH_STAT_OK);
}
Exemplo n.º 30
0
DICT   *dict_thash_open(const char *path, int open_flags, int dict_flags)
{
    DICT_THASH *dict_thash;
    VSTREAM *fp = 0;
    struct stat st;
    time_t  before;
    time_t  after;
    VSTRING *line_buffer = 0;
    int     lineno;
    char   *key;
    char   *value;
    HTABLE *table;
    HTABLE_INFO *ht;

    /*
     * Let the optimizer worry about eliminating redundant code.
     */
#define DICT_THASH_OPEN_RETURN(d) { \
	DICT *__d = (d); \
	if (fp != 0) \
	    vstream_fclose(fp); \
	if (line_buffer != 0) \
	    vstring_free(line_buffer); \
	return (__d); \
    } while (0)

    /*
     * Sanity checks.
     */
    if (open_flags != O_RDONLY)
	DICT_THASH_OPEN_RETURN(dict_surrogate(DICT_TYPE_THASH, path,
					      open_flags, dict_flags,
				  "%s:%s map requires O_RDONLY access mode",
					      DICT_TYPE_THASH, path));

    /*
     * Read the flat text file into in-memory hash. Read the file again if it
     * may have changed while we were reading.
     */
    for (before = time((time_t *) 0); /* see below */ ; before = after) {
	if ((fp = vstream_fopen(path, open_flags, 0644)) == 0) {
	    DICT_THASH_OPEN_RETURN(dict_surrogate(DICT_TYPE_THASH, path,
						  open_flags, dict_flags,
					     "open database %s: %m", path));
	}
	if (line_buffer == 0)
	    line_buffer = vstring_alloc(100);
	lineno = 0;
	table = htable_create(13);
	while (readlline(line_buffer, fp, &lineno)) {

	    /*
	     * Split on the first whitespace character, then trim leading and
	     * trailing whitespace from key and value.
	     */
	    key = STR(line_buffer);
	    value = key + strcspn(key, " \t\r\n");
	    if (*value)
		*value++ = 0;
	    while (ISSPACE(*value))
		value++;
	    trimblanks(key, 0)[0] = 0;
	    trimblanks(value, 0)[0] = 0;

	    /*
	     * Enforce the "key whitespace value" format. Disallow missing
	     * keys or missing values.
	     */
	    if (*key == 0 || *value == 0) {
		msg_warn("%s, line %d: expected format: key whitespace value"
			 " -- ignoring this line", path, lineno);
		continue;
	    }
	    if (key[strlen(key) - 1] == ':')
		msg_warn("%s, line %d: record is in \"key: value\" format;"
			 " is this an alias file?", path, lineno);

	    /*
	     * Optionally fold the key.
	     */
	    if (dict_flags & DICT_FLAG_FOLD_FIX)
		lowercase(key);

	    /*
	     * Store the value under the key. Handle duplicates
	     * appropriately.
	     */
	    if ((ht = htable_locate(table, key)) != 0) {
		if (dict_flags & DICT_FLAG_DUP_IGNORE) {
		     /* void */ ;
		} else if (dict_flags & DICT_FLAG_DUP_REPLACE) {
		    myfree(ht->value);
		    ht->value = mystrdup(value);
		} else if (dict_flags & DICT_FLAG_DUP_WARN) {
		    msg_warn("%s, line %d: duplicate entry: \"%s\"",
			     path, lineno, key);
		} else {
		    msg_fatal("%s, line %d: duplicate entry: \"%s\"",
			      path, lineno, key);
		}
	    } else {
		htable_enter(table, key, mystrdup(value));
	    }
	}

	/*
	 * See if the source file is hot.
	 */
	if (fstat(vstream_fileno(fp), &st) < 0)
	    msg_fatal("fstat %s: %m", path);
	if (vstream_fclose(fp))
	    msg_fatal("read %s: %m", path);
	fp = 0;					/* DICT_THASH_OPEN_RETURN() */
	after = time((time_t *) 0);
	if (st.st_mtime < before - 1 || st.st_mtime > after)
	    break;

	/*
	 * Yes, it is hot. Discard the result and read the file again.
	 */
	htable_free(table, myfree);
	if (msg_verbose > 1)
	    msg_info("pausing to let file %s cool down", path);
	doze(300000);
    }

    /*
     * Create the in-memory table.
     */
    dict_thash = (DICT_THASH *)
	dict_alloc(DICT_TYPE_THASH, path, sizeof(*dict_thash));
    dict_thash->dict.lookup = dict_thash_lookup;
    dict_thash->dict.sequence = dict_thash_sequence;
    dict_thash->dict.close = dict_thash_close;
    dict_thash->dict.flags = dict_flags | DICT_FLAG_DUP_WARN | DICT_FLAG_FIXED;
    if (dict_flags & DICT_FLAG_FOLD_FIX)
	dict_thash->dict.fold_buf = vstring_alloc(10);
    dict_thash->info = 0;
    dict_thash->table = table;
    dict_thash->dict.owner.uid = st.st_uid;
    dict_thash->dict.owner.status = (st.st_uid != 0);

    DICT_THASH_OPEN_RETURN(DICT_DEBUG (&dict_thash->dict));
}