static void control_close(struct ctl_conn *c) { size_t *count; count = tree_xget(&ctl_count, c->euid); (*count)--; if (*count == 0) { tree_xpop(&ctl_count, c->euid); free(count); } tree_xpop(&ctl_conns, c->id); mproc_clear(&c->mproc); free(c); stat_backend->decrement("control.session", 1); if (getdtablesize() - getdtablecount() < CONTROL_FD_RESERVE) return; if (!event_pending(&control_state.ev, EV_READ, NULL)) { log_warnx("warn: re-enabling ctl connections"); event_add(&control_state.ev, NULL); } }
static void filter_dispatch_eom(uint64_t id, size_t datalen) { struct filter_session *s; s = tree_xget(&sessions, id); s->datalen = datalen; filter_trigger_eom(s); }
void lka_session_forward_reply(struct forward_req *fwreq, int fd) { struct lka_session *lks; struct rule *rule; struct expandnode *xn; int ret; lks = tree_xget(&sessions, fwreq->id); xn = lks->node; rule = lks->rule; lks->flags &= ~F_WAITING; switch (fwreq->status) { case 0: /* permanent failure while lookup ~/.forward */ log_trace(TRACE_EXPAND, "expand: ~/.forward failed for user %s", fwreq->user); lks->error = LKA_PERMFAIL; break; case 1: if (fd == -1) { log_trace(TRACE_EXPAND, "expand: no .forward for " "user %s, just deliver", fwreq->user); lka_submit(lks, rule, xn); } else { /* expand for the current user and rule */ lks->expand.rule = rule; lks->expand.parent = xn; lks->expand.alias = 0; xn->mapping = rule->r_mapping; xn->userbase = rule->r_userbase; /* forwards_get() will close the descriptor no matter what */ ret = forwards_get(fd, &lks->expand); if (ret == -1) { log_trace(TRACE_EXPAND, "expand: temporary " "forward error for user %s", fwreq->user); lks->error = LKA_TEMPFAIL; } else if (ret == 0) { log_trace(TRACE_EXPAND, "expand: empty .forward " "for user %s, just deliver", fwreq->user); lka_submit(lks, rule, xn); } } break; default: /* temporary failure while looking up ~/.forward */ lks->error = LKA_TEMPFAIL; } lka_resume(lks); }
int filter_api_accept(uint64_t id) { struct filter_session *s; log_trace(TRACE_FILTERS, "filter-api:%s %016"PRIx64" filter_api_accept()", filter_name, id); s = tree_xget(&sessions, id); filter_response(s, FILTER_OK, 0, NULL); return (1); }
void filter_api_writeln(uint64_t id, const char *line) { struct filter_session *s; log_trace(TRACE_FILTERS, "filter-api:%s %016"PRIx64" filter_api_writeln(%s)", filter_name, id, line); s = tree_xget(&sessions, id); if (s->pipe.oev.sock == -1) { log_warnx("warn: filter:%s: cannot write at this point", filter_name); fatalx("exiting"); } s->pipe.odatalen += strlen(line) + 1; iobuf_fqueue(&s->pipe.obuf, "%s\n", line); io_reload(&s->pipe.oev); }
int filter_api_reject(uint64_t id, enum filter_status status) { struct filter_session *s; log_trace(TRACE_FILTERS, "filter-api:%s %016"PRIx64" filter_api_reject(%d)", filter_name, id, status); s = tree_xget(&sessions, id); /* This is NOT an acceptable status for a failure */ if (status == FILTER_OK) status = FILTER_FAIL; filter_response(s, status, 0, NULL); return (1); }
int filter_api_reject_code(uint64_t id, enum filter_status status, uint32_t code, const char *line) { struct filter_session *s; log_trace(TRACE_FILTERS, "filter-api:%s %016"PRIx64" filter_api_reject_code(%d, %u, %s)", filter_name, id, status, code, line); s = tree_xget(&sessions, id); /* This is NOT an acceptable status for a failure */ if (status == FILTER_OK) status = FILTER_FAIL; filter_response(s, status, code, line); return (1); }
static void filter_register_query(uint64_t id, uint64_t qid, int type) { struct filter_session *s; log_trace(TRACE_FILTERS, "filter-api:%s %016"PRIx64" %s", filter_name, id, query_to_str(type)); s = tree_xget(&sessions, id); if (s->qid) { log_warnx("warn: filter-api:%s query already in progess", filter_name); fatalx("filter-api: exiting"); } s->qid = qid; s->qtype = type; s->response.ready = 0; tree_xset(&queries, qid, s); }
void mta_imsg(struct imsgev *iev, struct imsg *imsg) { struct mta_route *route; struct mta_batch2 *batch; struct mta_task *task; struct envelope *e; struct ssl *ssl; uint64_t id; if (iev->proc == PROC_QUEUE) { switch (imsg->hdr.type) { case IMSG_BATCH_CREATE: id = *(uint64_t*)(imsg->data); batch = xmalloc(sizeof *batch, "mta_batch"); batch->id = id; tree_init(&batch->tasks); tree_xset(&batches, batch->id, batch); log_trace(TRACE_MTA, "mta: batch:%016" PRIx64 " created", batch->id); return; case IMSG_BATCH_APPEND: e = xmemdup(imsg->data, sizeof *e, "mta:envelope"); route = mta_route_for(e); batch = tree_xget(&batches, e->batch_id); if ((task = tree_get(&batch->tasks, route->id)) == NULL) { log_trace(TRACE_MTA, "mta: new task for %s", mta_route_to_text(route)); task = xmalloc(sizeof *task, "mta_task"); TAILQ_INIT(&task->envelopes); task->route = route; tree_xset(&batch->tasks, route->id, task); task->msgid = evpid_to_msgid(e->id); task->sender = e->sender; route->refcount += 1; } /* Technically, we could handle that by adding a msg * level, but the batch sent by the scheduler should * be valid. */ if (task->msgid != evpid_to_msgid(e->id)) errx(1, "msgid mismatch in batch"); /* XXX honour route->maxrcpt */ TAILQ_INSERT_TAIL(&task->envelopes, e, entry); stat_increment("mta.envelope", 1); log_debug("mta: received evp:%016" PRIx64 " for <%s@%s>", e->id, e->dest.user, e->dest.domain); return; case IMSG_BATCH_CLOSE: id = *(uint64_t*)(imsg->data); batch = tree_xpop(&batches, id); log_trace(TRACE_MTA, "mta: batch:%016" PRIx64 " closed", batch->id); /* for all tasks, queue them on there route */ while (tree_poproot(&batch->tasks, &id, (void**)&task)) { if (id != task->route->id) errx(1, "route id mismatch!"); task->route->refcount -= 1; task->route->ntask += 1; TAILQ_INSERT_TAIL(&task->route->tasks, task, entry); stat_increment("mta.task", 1); mta_route_drain(task->route); } free(batch); return; case IMSG_QUEUE_MESSAGE_FD: mta_session_imsg(iev, imsg); return; } } if (iev->proc == PROC_LKA) { switch (imsg->hdr.type) { case IMSG_LKA_SECRET: case IMSG_DNS_HOST: case IMSG_DNS_HOST_END: case IMSG_DNS_PTR: mta_session_imsg(iev, imsg); return; } } if (iev->proc == PROC_PARENT) { switch (imsg->hdr.type) { case IMSG_CONF_START: if (env->sc_flags & SMTPD_CONFIGURING) return; env->sc_flags |= SMTPD_CONFIGURING; env->sc_ssl = xcalloc(1, sizeof *env->sc_ssl, "mta:sc_ssl"); return; case IMSG_CONF_SSL: if (!(env->sc_flags & SMTPD_CONFIGURING)) return; ssl = xmemdup(imsg->data, sizeof *ssl, "mta:ssl"); ssl->ssl_cert = xstrdup((char*)imsg->data + sizeof *ssl, "mta:ssl_cert"); ssl->ssl_key = xstrdup((char*)imsg->data + sizeof *ssl + ssl->ssl_cert_len, "mta:ssl_key"); SPLAY_INSERT(ssltree, env->sc_ssl, ssl); return; case IMSG_CONF_END: if (!(env->sc_flags & SMTPD_CONFIGURING)) return; env->sc_flags &= ~SMTPD_CONFIGURING; return; case IMSG_CTL_VERBOSE: log_verbose(*(int *)imsg->data); return; } } errx(1, "mta_imsg: unexpected %s imsg", imsg_to_str(imsg->hdr.type)); }
static void filter_dispatch(struct mproc *p, struct imsg *imsg) { struct filter_session *s; struct filter_connect q_connect; struct mailaddr maddr; struct msg m; const char *line, *name; uint32_t v, datalen; uint64_t id, qid; int status, type; int fds[2], fdin, fdout; log_trace(TRACE_FILTERS, "filter-api:%s imsg %s", filter_name, filterimsg_to_str(imsg->hdr.type)); switch (imsg->hdr.type) { case IMSG_FILTER_REGISTER: m_msg(&m, imsg); m_get_u32(&m, &v); m_get_string(&m, &name); filter_name = strdup(name); m_end(&m); if (v != FILTER_API_VERSION) { log_warnx("warn: filter-api:%s API mismatch", filter_name); fatalx("filter-api: exiting"); } m_create(p, IMSG_FILTER_REGISTER, 0, 0, -1); m_add_int(p, fi.hooks); m_add_int(p, fi.flags); m_close(p); break; case IMSG_FILTER_EVENT: m_msg(&m, imsg); m_get_id(&m, &id); m_get_int(&m, &type); m_end(&m); switch (type) { case EVENT_CONNECT: s = xcalloc(1, sizeof(*s), "filter_dispatch"); s->id = id; s->pipe.iev.sock = -1; s->pipe.oev.sock = -1; tree_xset(&sessions, id, s); break; case EVENT_DISCONNECT: filter_dispatch_disconnect(id); s = tree_xpop(&sessions, id); free(s); break; case EVENT_RESET: filter_dispatch_reset(id); break; case EVENT_COMMIT: filter_dispatch_commit(id); break; case EVENT_ROLLBACK: filter_dispatch_rollback(id); break; default: log_warnx("warn: filter-api:%s bad event %d", filter_name, type); fatalx("filter-api: exiting"); } break; case IMSG_FILTER_QUERY: m_msg(&m, imsg); m_get_id(&m, &id); m_get_id(&m, &qid); m_get_int(&m, &type); switch(type) { case QUERY_CONNECT: m_get_sockaddr(&m, (struct sockaddr*)&q_connect.local); m_get_sockaddr(&m, (struct sockaddr*)&q_connect.remote); m_get_string(&m, &q_connect.hostname); m_end(&m); filter_register_query(id, qid, type); filter_dispatch_connect(id, &q_connect); break; case QUERY_HELO: m_get_string(&m, &line); m_end(&m); filter_register_query(id, qid, type); filter_dispatch_helo(id, line); break; case QUERY_MAIL: m_get_mailaddr(&m, &maddr); m_end(&m); filter_register_query(id, qid, type); filter_dispatch_mail(id, &maddr); break; case QUERY_RCPT: m_get_mailaddr(&m, &maddr); m_end(&m); filter_register_query(id, qid, type); filter_dispatch_rcpt(id, &maddr); break; case QUERY_DATA: m_end(&m); filter_register_query(id, qid, type); filter_dispatch_data(id); break; case QUERY_EOM: m_get_u32(&m, &datalen); m_end(&m); filter_register_query(id, qid, type); filter_dispatch_eom(id, datalen); break; default: log_warnx("warn: filter-api:%s bad query %d", filter_name, type); fatalx("filter-api: exiting"); } break; case IMSG_FILTER_PIPE: m_msg(&m, imsg); m_get_id(&m, &id); m_end(&m); fdout = imsg->fd; fdin = -1; if (socketpair(AF_UNIX, SOCK_STREAM, PF_UNSPEC, fds) == -1) { log_warn("warn: filter-api:%s socketpair", filter_name); close(fdout); } else { s = tree_xget(&sessions, id); s->pipe.eom_called = 0; s->pipe.error = 0; s->pipe.idatalen = 0; s->pipe.odatalen = 0; iobuf_init(&s->pipe.obuf, 0, 0); io_init(&s->pipe.oev, fdout, s, filter_io_out, &s->pipe.obuf); io_set_write(&s->pipe.oev); iobuf_init(&s->pipe.ibuf, 0, 0); io_init(&s->pipe.iev, fds[0], s, filter_io_in, &s->pipe.ibuf); io_set_read(&s->pipe.iev); fdin = fds[1]; } log_trace(TRACE_FILTERS, "filter-api:%s %016"PRIx64" tx pipe %d -> %d", filter_name, id, fdin, fdout); m_create(&fi.p, IMSG_FILTER_PIPE, 0, 0, fdin); m_add_id(&fi.p, id); m_close(&fi.p); break; } }
void mta_session_imsg(struct imsgev *iev, struct imsg *imsg) { uint64_t id; struct mta_session *s; struct mta_host *host; struct secret *secret; struct dns *dns; const char *error; void *ptr; switch(imsg->hdr.type) { case IMSG_QUEUE_MESSAGE_FD: id = *(uint64_t*)(imsg->data); if (imsg->fd == -1) fatalx("mta: cannot obtain msgfd"); s = tree_xget(&sessions, id); s->datafp = fdopen(imsg->fd, "r"); if (s->datafp == NULL) fatal("mta: fdopen"); if (mta_check_loop(s->datafp)) { log_debug("mta: loop detected"); fclose(s->datafp); s->datafp = NULL; mta_status(s, 0, "646 Loop detected"); mta_enter_state(s, MTA_SMTP_READY); } else { mta_enter_state(s, MTA_SMTP_MAIL); } return; case IMSG_LKA_SECRET: /* LKA responded to AUTH lookup. */ secret = imsg->data; s = tree_xget(&sessions, secret->id); s->secret = xstrdup(secret->secret, "mta: secret"); if (s->secret[0] == '\0') { mta_route_error(s->route, "secrets lookup failed"); mta_enter_state(s, MTA_DONE); } else mta_enter_state(s, MTA_MX); return; case IMSG_DNS_HOST: dns = imsg->data; s = tree_xget(&sessions, dns->id); host = xcalloc(1, sizeof *host, "mta: host"); host->sa = dns->ss; TAILQ_INSERT_TAIL(&s->hosts, host, entry); return; case IMSG_DNS_HOST_END: /* LKA responded to DNS lookup. */ dns = imsg->data; s = tree_xget(&sessions, dns->id); if (!dns->error) { mta_enter_state(s, MTA_CONNECT); return; } if (dns->error == DNS_RETRY) error = "100 MX lookup failed temporarily"; else if (dns->error == DNS_EINVAL) error = "600 Invalid domain name"; else if (dns->error == DNS_ENONAME) error = "600 Domain does not exist"; else if (dns->error == DNS_ENOTFOUND) error = "600 No MX address found for domain"; else error = "100 Weird error"; mta_route_error(s->route, error); mta_enter_state(s, MTA_CONNECT); return; case IMSG_DNS_PTR: dns = imsg->data; s = tree_xget(&sessions, dns->id); host = TAILQ_FIRST(&s->hosts); if (dns->error) strlcpy(host->fqdn, "<unknown>", sizeof host->fqdn); else strlcpy(host->fqdn, dns->host, sizeof host->fqdn); log_debug("mta: %p: connected to %s", s, host->fqdn); /* check if we need to start tls now... */ if (((s->flags & MTA_FORCE_ANYSSL) && host->used == 1) || (s->flags & MTA_FORCE_SMTPS)) { log_debug("mta: %p: trying smtps (ssl=%p)...", s, s->ssl); if ((ptr = ssl_mta_init(s->ssl)) == NULL) fatalx("mta: ssl_mta_init"); io_start_tls(&s->io, ptr); } else { mta_enter_state(s, MTA_SMTP_BANNER); } break; default: errx(1, "mta_session_imsg: unexpected %s imsg", imsg_to_str(imsg->hdr.type)); } }