static void clnt_stream_open(CLNT_STREAM *clnt_stream) { /* * Sanity check. */ if (clnt_stream->vstream) msg_panic("clnt_stream_open: stream is open"); /* * Schedule a read event so that we can clean up when the remote side * disconnects, and schedule a timer event so that we can cleanup an idle * connection. Note that both events are handled by the same routine. * * Finally, schedule an event to force disconnection even when the * connection is not idle. This is to prevent one client from clinging on * to a server forever. */ clnt_stream->vstream = mail_connect_wait(clnt_stream->class, clnt_stream->service); close_on_exec(vstream_fileno(clnt_stream->vstream), CLOSE_ON_EXEC); event_enable_read(vstream_fileno(clnt_stream->vstream), clnt_stream_event, (void *) clnt_stream); event_request_timer(clnt_stream_event, (void *) clnt_stream, clnt_stream->timeout); event_request_timer(clnt_stream_ttl_event, (void *) clnt_stream, clnt_stream->ttl); }
DELIVER_REQUEST *deliver_request_read(VSTREAM *stream) { DELIVER_REQUEST *request; /* * Tell the queue manager that we are ready for this request. */ if (deliver_request_initial(stream) != 0) return (0); /* * Be prepared for the queue manager to change its mind after contacting * us. This can happen when a transport or host goes bad. */ (void) read_wait(vstream_fileno(stream), -1); if (peekfd(vstream_fileno(stream)) <= 0) return (0); /* * Allocate and read the queue manager's delivery request. */ #define XXX_DEFER_STATUS -1 request = deliver_request_alloc(); if (deliver_request_get(stream, request) < 0) { deliver_request_done(stream, request, XXX_DEFER_STATUS); request = 0; } return (request); }
static void smtpd_peer_from_default(SMTPD_STATE *state) { /* * The "no client" routine provides surrogate information so that the * application can produce sensible logging when a client disconnects * before the server wakes up. The "not inet" routine provides surrogate * state for (presumably) local IPC channels. */ state->sockaddr_len = sizeof(state->sockaddr); state->dest_sockaddr_len = sizeof(state->dest_sockaddr); if (getpeername(vstream_fileno(state->client), (struct sockaddr *) &state->sockaddr, &state->sockaddr_len) <0 || getsockname(vstream_fileno(state->client), (struct sockaddr *) &state->dest_sockaddr, &state->dest_sockaddr_len) < 0) { if (errno == ENOTSOCK) smtpd_peer_not_inet(state); else smtpd_peer_no_client(state); } else { if (smtpd_peer_sockaddr_to_hostaddr(state) < 0) smtpd_peer_not_inet(state); } }
static void multi_server_execute(int unused_event, char *context) { VSTREAM *stream = (VSTREAM *) context; HTABLE *attr = (vstream_flags(stream) == multi_server_saved_flags ? (HTABLE *) vstream_context(stream) : 0); if (multi_server_lock != 0 && myflock(vstream_fileno(multi_server_lock), INTERNAL_LOCK, MYFLOCK_OP_NONE) < 0) msg_fatal("select unlock: %m"); /* * Do not bother the application when the client disconnected. Don't drop * the already accepted client request after "postfix reload"; that would * be rude. */ if (peekfd(vstream_fileno(stream)) > 0) { if (master_notify(var_pid, multi_server_generation, MASTER_STAT_TAKEN) < 0) /* void */ ; multi_server_service(stream, multi_server_name, multi_server_argv); if (master_notify(var_pid, multi_server_generation, MASTER_STAT_AVAIL) < 0) multi_server_abort(EVENT_NULL_TYPE, EVENT_NULL_CONTEXT); } else { multi_server_disconnect(stream); } if (attr) htable_free(attr, myfree); }
static void anvil_service_done(VSTREAM *client_stream, char *unused_service, char **unused_argv) { ANVIL_LOCAL *anvil_local; const char *myname = "anvil_service_done"; if (msg_verbose) msg_info("%s fd=%d stream=0x%lx", myname, vstream_fileno(client_stream), (unsigned long) client_stream); /* * Look up the local server, and get rid of any remote connection state * that we still have for this local server. Do not destroy remote client * status information before it expires. */ if ((anvil_local = (ANVIL_LOCAL *) vstream_context(client_stream)) != 0) { if (msg_verbose) msg_info("%s: anvil_local 0x%lx", myname, (unsigned long) anvil_local); ANVIL_LOCAL_DROP_ALL(client_stream, anvil_local); myfree((char *) anvil_local); } else if (msg_verbose) msg_info("client socket not found for fd=%d", vstream_fileno(client_stream)); }
int vstream_tweak_tcp(VSTREAM *fp) { const char *myname = "vstream_tweak_tcp"; int mss; SOCKOPT_SIZE mss_len = sizeof(mss); int err; /* * Avoid Nagle delays when VSTREAM buffers are smaller than the MSS. * * Forcing TCP_NODELAY to be "always on" would hurt performance in the * common case where VSTREAM buffers are larger than the MSS. * * Instead we ask the kernel what the current MSS is, and take appropriate * action. Linux <= 2.2 getsockopt(TCP_MAXSEG) always returns zero (or * whatever value was stored last with setsockopt()). */ if ((err = getsockopt(vstream_fileno(fp), IPPROTO_TCP, TCP_MAXSEG, (char *) &mss, &mss_len)) < 0) { msg_warn("%s: getsockopt TCP_MAXSEG: %m", myname); return (err); } if (msg_verbose) msg_info("%s: TCP_MAXSEG %d", myname, mss); /* * Fix for recent Postfix versions: increase the VSTREAM buffer size if * the VSTREAM buffer is smaller than the MSS. Note: the MSS may change * when the route changes and IP path MTU discovery is turned on, so we * choose a somewhat larger buffer. */ #ifdef VSTREAM_CTL_BUFSIZE if (mss > 0) { if (mss < INT_MAX / 2) mss *= 2; vstream_control(fp, VSTREAM_CTL_BUFSIZE, (ssize_t) mss, VSTREAM_CTL_END); } /* * Workaround for older Postfix versions: turn on TCP_NODELAY if the * VSTREAM buffer size is smaller than the MSS. */ #else if (mss > VSTREAM_BUFSIZE) { int nodelay = 1; if ((err = setsockopt(vstream_fileno(fp), IPPROTO_TCP, TCP_NODELAY, (char *) &nodelay, sizeof(nodelay))) < 0) msg_warn("%s: setsockopt TCP_NODELAY: %m", myname); } #endif return (err); }
static int smtpd_proxy_replay_setup(SMTPD_STATE *state) { const char *myname = "smtpd_proxy_replay_setup"; off_t file_offs; /* * Where possible reuse an existing replay logfile, because creating a * file is expensive compared to reading or writing. For security reasons * we must truncate the file before reuse. For performance reasons we * should truncate the file immediately after the end of a mail * transaction. We enforce the security guarantee upon reuse, by * requiring that no I/O happened since the file was truncated. This is * less expensive than truncating the file redundantly. */ if (smtpd_proxy_replay_stream != 0) { /* vstream_ftell() won't invoke the kernel, so all errors are mine. */ if ((file_offs = vstream_ftell(smtpd_proxy_replay_stream)) != 0) msg_panic("%s: bad before-queue filter speed-adjust log offset %lu", myname, (unsigned long) file_offs); vstream_clearerr(smtpd_proxy_replay_stream); if (msg_verbose) msg_info("%s: reuse speed-adjust stream fd=%d", myname, vstream_fileno(smtpd_proxy_replay_stream)); /* Here, smtpd_proxy_replay_stream != 0 */ } /* * Create a new replay logfile. */ if (smtpd_proxy_replay_stream == 0) { smtpd_proxy_replay_stream = mail_queue_enter(MAIL_QUEUE_INCOMING, 0, (struct timeval *) 0); if (smtpd_proxy_replay_stream == 0) return (smtpd_proxy_replay_rdwr_error(state)); if (unlink(VSTREAM_PATH(smtpd_proxy_replay_stream)) < 0) msg_warn("remove before-queue filter speed-adjust log %s: %m", VSTREAM_PATH(smtpd_proxy_replay_stream)); if (msg_verbose) msg_info("%s: new speed-adjust stream fd=%d", myname, vstream_fileno(smtpd_proxy_replay_stream)); } /* * Needed by our DATA-phase record emulation routines. */ vstream_control(smtpd_proxy_replay_stream, VSTREAM_CTL_CONTEXT, (char *) state, VSTREAM_CTL_END); return (0); }
static void spawn_service(VSTREAM *client_stream, char *service, char **argv) { const char *myname = "spawn_service"; static SPAWN_ATTR attr; WAIT_STATUS_T status; ARGV *export_env; /* * This routine runs whenever a client connects to the UNIX-domain socket * dedicated to running an external command. */ if (msg_verbose) msg_info("%s: service=%s, command=%s...", myname, service, argv[0]); /* * Look up service attributes and config information only once. This is * safe since the information comes from a trusted source. */ if (attr.argv == 0) { get_service_attr(&attr, service, argv); } /* * Execute the command. */ export_env = mail_parm_split(VAR_EXPORT_ENVIRON, var_export_environ); status = spawn_command(CA_SPAWN_CMD_STDIN(vstream_fileno(client_stream)), CA_SPAWN_CMD_STDOUT(vstream_fileno(client_stream)), CA_SPAWN_CMD_STDERR(vstream_fileno(client_stream)), CA_SPAWN_CMD_UID(attr.uid), CA_SPAWN_CMD_GID(attr.gid), CA_SPAWN_CMD_ARGV(attr.argv), CA_SPAWN_CMD_TIME_LIMIT(attr.time_limit), CA_SPAWN_CMD_EXPORT(export_env->argv), CA_SPAWN_CMD_END); argv_free(export_env); /* * Warn about unsuccessful completion. */ if (!NORMAL_EXIT_STATUS(status)) { if (WIFEXITED(status)) msg_warn("command %s exit status %d", attr.argv[0], WEXITSTATUS(status)); if (WIFSIGNALED(status)) msg_warn("command %s killed by signal %d", attr.argv[0], WTERMSIG(status)); } }
void multi_server_disconnect(VSTREAM *stream) { if (msg_verbose) msg_info("connection closed fd %d", vstream_fileno(stream)); if (multi_server_pre_disconn) multi_server_pre_disconn(stream, multi_server_name, multi_server_argv); event_disable_readwrite(vstream_fileno(stream)); (void) vstream_fclose(stream); client_count--; /* Avoid integer wrap-around in a persistent process. */ if (use_count < INT_MAX) use_count++; if (client_count == 0 && var_idle_limit > 0) event_request_timer(multi_server_timeout, (char *) 0, var_idle_limit); }
static void connect_done(int unused_event, char *context) { SESSION *session = (SESSION *) context; int fd = vstream_fileno(session->stream); /* * Try again after some delay when the connection failed, in case they * run a Mickey Mouse protocol stack. */ if (socket_error(fd) < 0) { fail_connect(session); } else { non_blocking(fd, BLOCKING); /* Disable write events. */ event_disable_readwrite(fd); event_enable_read(fd, read_banner, (char *) session); dequeue_connect(session); /* Avoid poor performance when TCP MSS > VSTREAM_BUFSIZE. */ if (sa->sa_family == AF_INET #ifdef AF_INET6 || sa->sa_family == AF_INET6 #endif ) vstream_tweak_tcp(session->stream); } }
void load_file(const char *path, LOAD_FILE_FN action, void *context) { VSTREAM *fp; struct stat st; time_t before; time_t after; /* * Read the file again if it is hot. This may result in reading a partial * parameter name or missing end marker when a file changes in the middle * of a read. */ for (before = time((time_t *) 0); /* see below */ ; before = after) { if ((fp = vstream_fopen(path, O_RDONLY, 0)) == 0) msg_fatal("open %s: %m", path); action(fp, context); if (fstat(vstream_fileno(fp), &st) < 0) msg_fatal("fstat %s: %m", path); if (vstream_ferror(fp) || vstream_fclose(fp)) msg_fatal("read %s: %m", path); after = time((time_t *) 0); if (st.st_mtime < before - 1 || st.st_mtime > after) break; if (msg_verbose) msg_info("pausing to let %s cool down", path); doze(300000); } }
int mark_corrupt(VSTREAM *src) { const char *myname = "mark_corrupt"; uid_t saved_uid; gid_t saved_gid; /* * If not running as the mail system, change privileges first. */ if ((saved_uid = geteuid()) != var_owner_uid) { saved_gid = getegid(); set_eugid(var_owner_uid, var_owner_gid); } /* * For now, the result value is -1; this may become a bit mask, or * something even more advanced than that, when the delivery status * becomes more than just done/deferred. */ msg_warn("corrupted queue file: %s", VSTREAM_PATH(src)); if (fchmod(vstream_fileno(src), MAIL_QUEUE_STAT_CORRUPT)) msg_fatal("%s: fchmod %s: %m", myname, VSTREAM_PATH(src)); /* * Restore privileges. */ if (saved_uid != var_owner_uid) set_eugid(saved_uid, saved_gid); return (DEL_STAT_DEFER); }
int smtp_session_passivate(SMTP_SESSION *session, VSTRING *dest_prop, VSTRING *endp_prop) { int fd; /* * Encode the local-to-physical binding properties: whether or not this * server is best MX host for the next-hop or fall-back logical * destination (this information is needed for loop handling in * smtp_proto()). * * XXX It would be nice to have a VSTRING to VSTREAM adapter so that we can * serialize the properties with attr_print() instead of using ad-hoc, * non-reusable, code and hard-coded format strings. */ vstring_sprintf(dest_prop, "%u", session->features & SMTP_FEATURE_DESTINATION_MASK); /* * Encode the physical endpoint properties: all the session properties * except for "session from cache", "best MX", or "RSET failure". * * XXX It would be nice to have a VSTRING to VSTREAM adapter so that we can * serialize the properties with attr_print() instead of using obscure * hard-coded format strings. * * XXX Should also record an absolute time when a session must be closed, * how many non-delivering mail transactions there were during this * session, and perhaps other statistics, so that we don't reuse a * session too much. * * XXX Be sure to use unsigned types in the format string. Sign characters * would be rejected by the alldig() test on the reading end. */ vstring_sprintf(endp_prop, "%u\n%s\n%s\n%s\n%u\n%u\n%lu", session->reuse_count, session->dest, session->host, session->addr, session->port, session->features & SMTP_FEATURE_ENDPOINT_MASK, (long) session->expire_time); /* * Append the passivated SASL attributes. */ #ifdef notdef if (smtp_sasl_enable) smtp_sasl_passivate(endp_prop, session); #endif /* * Salvage the underlying file descriptor, and destroy the session * object. */ fd = vstream_fileno(session->stream); vstream_fdclose(session->stream); session->stream = 0; smtp_session_free(session); return (fd); }
static void psc_service(VSTREAM *smtp_client_stream, char *unused_service, char **unused_argv) { /* * For sanity, require that at least one of INET or INET6 is enabled. * Otherwise, we can't look up interface information, and we can't * convert names or addresses. */ if (inet_proto_info()->ai_family_list[0] == 0) msg_fatal("all network protocols are disabled (%s = %s)", VAR_INET_PROTOCOLS, var_inet_protocols); /* * This program handles all incoming connections, so it must not block. * We use event-driven code for all operations that introduce latency. * * Note: instead of using VSTREAM-level timeouts, we enforce limits on the * total amount of time to receive a complete SMTP command line. */ non_blocking(vstream_fileno(smtp_client_stream), NON_BLOCKING); /* * Look up the remote SMTP client address and port. */ psc_endpt_lookup(smtp_client_stream, psc_endpt_lookup_done); }
void qmgr_deliver(QMGR_TRANSPORT *transport, VSTREAM *stream) { QMGR_QUEUE *queue; QMGR_ENTRY *entry; /* * Find out if this delivery process is really available. Once elected, * the delivery process is supposed to express its happiness. If there is * a problem, wipe the pending deliveries for this transport. This * routine runs in response to an external event, so it does not run * while some other queue manipulation is happening. */ if (qmgr_deliver_initial_reply(stream) != 0) { qmgr_transport_throttle(transport, "mail transport unavailable"); qmgr_defer_transport(transport, transport->reason); (void) vstream_fclose(stream); return; } /* * Find a suitable queue entry. Things may have changed since this * transport was allocated. If no suitable entry is found, * unceremoniously disconnect from the delivery process. The delivery * agent request reading routine is prepared for the queue manager to * change its mind for no apparent reason. */ if ((queue = qmgr_queue_select(transport)) == 0 || (entry = qmgr_entry_select(queue)) == 0) { (void) vstream_fclose(stream); return; } /* * Send the queue file info and recipient info to the delivery process. * If there is a problem, wipe the pending deliveries for this transport. * This routine runs in response to an external event, so it does not run * while some other queue manipulation is happening. */ if (qmgr_deliver_send_request(entry, stream) < 0) { qmgr_entry_unselect(queue, entry); qmgr_transport_throttle(transport, "mail transport unavailable"); qmgr_defer_transport(transport, transport->reason); /* warning: entry and queue may be dangling pointers here */ (void) vstream_fclose(stream); return; } /* * If we get this far, go wait for the delivery status report. */ qmgr_deliver_concurrency++; entry->stream = stream; event_enable_read(vstream_fileno(stream), qmgr_deliver_update, (char *) entry); /* * Guard against broken systems. */ event_request_timer(qmgr_deliver_abort, (char *) entry, var_daemon_timeout); }
static void rset_done(int unused_event, char *context) { SESSION *session = (SESSION *) context; RESPONSE *resp; int except; /* * Get response to RSET command. */ if ((except = vstream_setjmp(session->stream)) != 0) msg_fatal("%s while sending message", exception_text(except)); if ((resp = response(session->stream, buffer))->code / 100 == 2) { /* void */ } else if (allow_reject) { msg_warn("rset rejected: %d %s", resp->code, resp->str); if (resp->code == 421 || resp->code == 521) { close_session(session); return; } } else { msg_fatal("rset rejected: %d %s", resp->code, resp->str); } /* * Say goodbye or send the next message. */ if (disconnect || message_count < 1) { send_quit(session); } else { event_disable_readwrite(vstream_fileno(session->stream)); start_another(session); } }
static void send_rcpt(int unused_event, char *context) { SESSION *session = (SESSION *) context; int except; /* * Send envelope recipient address. */ if ((except = vstream_setjmp(session->stream)) != 0) msg_fatal("%s while sending recipient", exception_text(except)); if (session->rcpt_count > 1 || number_rcpts > 0) command(session->stream, "RCPT TO:<%d%s>", number_rcpts ? number_rcpts++ : session->rcpt_count, recipient); else command(session->stream, "RCPT TO:<%s>", recipient); session->rcpt_count--; session->rcpt_done++; /* * Prepare for the next event. */ event_enable_read(vstream_fileno(session->stream), rcpt_done, (char *) session); }
void post_mail_fopen_async(const char *sender, const char *recipient, int filter_class, int trace_flags, VSTRING *queue_id, void (*notify) (VSTREAM *, void *), void *context) { VSTREAM *stream; POST_MAIL_STATE *state; stream = mail_connect(MAIL_CLASS_PUBLIC, var_cleanup_service, NON_BLOCKING); state = (POST_MAIL_STATE *) mymalloc(sizeof(*state)); state->sender = mystrdup(sender); state->recipient = mystrdup(recipient); state->filter_class = filter_class; state->trace_flags = trace_flags; state->notify = notify; state->context = context; state->stream = stream; state->queue_id = queue_id; /* * To keep interfaces as simple as possible we report all errors via the * same interface as all successes. */ if (stream != 0) { event_enable_read(vstream_fileno(stream), post_mail_open_event, (void *) state); event_request_timer(post_mail_open_event, (void *) state, var_daemon_timeout); } else { event_request_timer(post_mail_open_event, (void *) state, 0); } }
int edit_file_close(EDIT_FILE *ep) { VSTREAM *fp = ep->tmp_fp; int fd = vstream_fileno(fp); int saved_errno; /* * The rename/unlock portion of the protocol is relatively simple. The * only things that really matter here are that we change permissions as * late as possible, and that we rename the file to its final pathname * before we lose the exclusive lock. * * Applications that are concerned about maximal safety should protect the * edit_file_close() call with sigdelay() and sigresume() calls. It is * not safe for us to call these functions directly, because the calls do * not nest. It is also not nice to force every caller to run with * interrupts turned off. */ if (vstream_fflush(fp) < 0 || fchmod(fd, ep->final_mode) < 0 #ifdef HAS_FSYNC || fsync(fd) < 0 #endif || rename(ep->tmp_path, ep->final_path) < 0) { saved_errno = errno; edit_file_cleanup(ep); errno = saved_errno; return (VSTREAM_EOF); } else { (void) vstream_fclose(ep->tmp_fp); EDIT_FILE_FREE(ep); return (0); } }
static void anvil_remote_disconnect(VSTREAM *client_stream, const char *ident) { ANVIL_REMOTE *anvil_remote; ANVIL_LOCAL *anvil_local; const char *myname = "anvil_remote_disconnect"; if (msg_verbose) msg_info("%s fd=%d stream=0x%lx ident=%s", myname, vstream_fileno(client_stream), (unsigned long) client_stream, ident); /* * Update local and remote info if this remote connection is listed for * this local server. */ if ((anvil_local = (ANVIL_LOCAL *) vstream_context(client_stream)) != 0 && (anvil_remote = (ANVIL_REMOTE *) htable_find(anvil_remote_map, ident)) != 0 && ANVIL_LOCAL_REMOTE_LINKED(anvil_local, anvil_remote)) { ANVIL_REMOTE_DROP_ONE(anvil_remote); ANVIL_LOCAL_DROP_ONE(anvil_local, anvil_remote); } if (msg_verbose) msg_info("%s: anvil_local 0x%lx", myname, (unsigned long) anvil_local); /* * Respond to the local server. */ attr_print_plain(client_stream, ATTR_FLAG_NONE, ATTR_TYPE_INT, ANVIL_ATTR_STATUS, ANVIL_STAT_OK, ATTR_TYPE_END); }
static void send_rset(int unused_event, char *context) { SESSION *session = (SESSION *) context; command(session->stream, "RSET"); event_enable_read(vstream_fileno(session->stream), rset_done, (char *) session); }
int dict_load_file_xt(const char *dict_name, const char *path) { VSTREAM *fp; struct stat st; time_t before; time_t after; /* * Read the file again if it is hot. This may result in reading a partial * parameter name when a file changes in the middle of a read. */ for (before = time((time_t *) 0); /* see below */ ; before = after) { if ((fp = vstream_fopen(path, O_RDONLY, 0)) == 0) return (0); dict_load_fp(dict_name, fp); if (fstat(vstream_fileno(fp), &st) < 0) msg_fatal("fstat %s: %m", path); if (vstream_ferror(fp) || vstream_fclose(fp)) msg_fatal("read %s: %m", path); after = time((time_t *) 0); if (st.st_mtime < before - 1 || st.st_mtime > after) break; if (msg_verbose > 1) msg_info("pausing to let %s cool down", path); doze(300000); } return (1); }
static void close_session(SESSION *session) { event_disable_readwrite(vstream_fileno(session->stream)); vstream_fclose(session->stream); session->stream = 0; start_another(session); }
void dict_load_fp(const char *dict_name, VSTREAM *fp) { const char *myname = "dict_load_fp"; VSTRING *buf; char *member; char *val; int lineno; const char *err; struct stat st; DICT *dict; /* * Instantiate the dictionary even if the file is empty. */ DICT_FIND_FOR_UPDATE(dict, dict_name); buf = vstring_alloc(100); lineno = 0; if (fstat(vstream_fileno(fp), &st) < 0) msg_fatal("fstat %s: %m", VSTREAM_PATH(fp)); while (readlline(buf, fp, &lineno)) { if ((err = split_nameval(STR(buf), &member, &val)) != 0) msg_fatal("%s, line %d: %s: \"%s\"", VSTREAM_PATH(fp), lineno, err, STR(buf)); if (msg_verbose > 1) msg_info("%s: %s = %s", myname, member, val); if (dict->update(dict, member, val) != 0) msg_fatal("%s, line %d: unable to update %s:%s", VSTREAM_PATH(fp), lineno, dict->type, dict->name); } vstring_free(buf); dict->owner.uid = st.st_uid; dict->owner.status = (st.st_uid != 0); }
static void read_data(int unused_event, void *context) { SINK_STATE *state = (SINK_STATE *) context; int fd = vstream_fileno(state->stream); int count; /* * Refill the VSTREAM buffer, if necessary. */ if (VSTREAM_GETC(state->stream) == VSTREAM_EOF) netstring_except(state->stream, vstream_ftimeout(state->stream) ? NETSTRING_ERR_TIME : NETSTRING_ERR_EOF); state->count--; /* * Flush the VSTREAM buffer. As documented, vstream_fseek() discards * unread input. */ if ((count = vstream_peek(state->stream)) > 0) { state->count -= count; if (state->count <= 0) { send_reply(state); return; } vstream_fpurge(state->stream, VSTREAM_PURGE_BOTH); } /* * Do not block while waiting for the arrival of more data. */ event_disable_readwrite(fd); event_enable_read(fd, read_data, context); }
static void single_server_accept_inet(int unused_event, void *context) { int listen_fd = CAST_ANY_PTR_TO_INT(context); int time_left = -1; int fd; /* * Be prepared for accept() to fail because some other process already * got the connection. We use select() + accept(), instead of simply * blocking in accept(), because we must be able to detect that the * master process has gone away unexpectedly. */ if (var_idle_limit > 0) time_left = event_cancel_timer(single_server_timeout, (void *) 0); if (single_server_pre_accept) single_server_pre_accept(single_server_name, single_server_argv); fd = inet_accept(listen_fd); if (single_server_lock != 0 && myflock(vstream_fileno(single_server_lock), INTERNAL_LOCK, MYFLOCK_OP_NONE) < 0) msg_fatal("select unlock: %m"); if (fd < 0) { if (errno != EAGAIN) msg_error("accept connection: %m"); if (time_left >= 0) event_request_timer(single_server_timeout, (void *) 0, time_left); return; } single_server_wakeup(fd, (HTABLE *) 0); }
static void tlsp_service(VSTREAM *plaintext_stream, char *service, char **argv) { TLSP_STATE *state; int plaintext_fd = vstream_fileno(plaintext_stream); /* * Sanity check. This service takes no command-line arguments. */ if (argv[0]) msg_fatal("unexpected command-line argument: %s", argv[0]); /* * This program handles multiple connections, so it must not block. We * use event-driven code for all operations that introduce latency. * Except that attribute lists are sent/received synchronously, once the * socket is found to be ready for transmission. */ non_blocking(plaintext_fd, NON_BLOCKING); vstream_control(plaintext_stream, CA_VSTREAM_CTL_PATH("plaintext"), CA_VSTREAM_CTL_TIMEOUT(5), CA_VSTREAM_CTL_END); /* * Receive postscreen's remote SMTP client address/port and socket. */ state = tlsp_state_create(service, plaintext_stream); event_enable_read(plaintext_fd, tlsp_get_request_event, (void *) state); event_request_timer(tlsp_get_request_event, (void *) state, TLSP_INIT_TIMEOUT); }
static void multi_server_accept_inet(int unused_event, char *context) { int listen_fd = CAST_CHAR_PTR_TO_INT(context); int time_left = -1; int fd; /* * Be prepared for accept() to fail because some other process already * got the connection (the number of processes competing for clients is * kept small, so this is not a "thundering herd" problem). If the * accept() succeeds, be sure to disable non-blocking I/O, in order to * minimize confusion. */ if (client_count == 0 && var_idle_limit > 0) time_left = event_cancel_timer(multi_server_timeout, (char *) 0); if (multi_server_pre_accept) multi_server_pre_accept(multi_server_name, multi_server_argv); fd = inet_accept(listen_fd); if (multi_server_lock != 0 && myflock(vstream_fileno(multi_server_lock), INTERNAL_LOCK, MYFLOCK_OP_NONE) < 0) msg_fatal("select unlock: %m"); if (fd < 0) { if (errno != EAGAIN) msg_error("accept connection: %m"); if (time_left >= 0) event_request_timer(multi_server_timeout, (char *) 0, time_left); return; } multi_server_wakeup(fd, (HTABLE *) 0); }
static void send_data(SESSION *session) { int fd = vstream_fileno(session->stream); int except; /* * Prepare for disaster. */ if ((except = vstream_setjmp(session->stream)) != 0) msg_fatal("%s while sending message", exception_text(except)); /* * Send the message content, by wrapping three netstrings into an * over-all netstring. * * XXX This should be done more carefully to avoid blocking when sending * large messages over slow networks. */ netstring_put_multi(session->stream, STR(message_buffer), LEN(message_buffer), STR(sender_buffer), LEN(sender_buffer), STR(recipient_buffer), LEN(recipient_buffer), (char *) 0); netstring_fflush(session->stream); /* * Wake me up when the server replies or when something bad happens. */ event_enable_read(fd, receive_reply, (char *) session); }
static int flush_add_path(const char *path, const char *queue_id) { const char *myname = "flush_add_path"; VSTREAM *log; /* * Sanity check. */ if (!mail_queue_id_ok(path)) return (FLUSH_STAT_BAD); /* * Open the logfile or bust. */ if ((log = mail_queue_open(MAIL_QUEUE_FLUSH, path, O_CREAT | O_APPEND | O_WRONLY, 0600)) == 0) msg_fatal("%s: open fast flush logfile %s: %m", myname, path); /* * We must lock the logfile, so that we don't lose information due to * concurrent access. If the lock takes too long, the Postfix watchdog * will eventually take care of the problem, but it will take a while. */ if (myflock(vstream_fileno(log), INTERNAL_LOCK, MYFLOCK_OP_EXCLUSIVE) < 0) msg_fatal("%s: lock fast flush logfile %s: %m", myname, path); /* * Append the queue ID. With 15 bits of microsecond time, a queue ID is * not recycled often enough for false hits to be a problem. If it does, * then we could add other signature information, such as the file size * in bytes. */ vstream_fprintf(log, "%s\n", queue_id); if (vstream_fflush(log)) msg_warn("write fast flush logfile %s: %m", path); /* * Clean up. */ if (myflock(vstream_fileno(log), INTERNAL_LOCK, MYFLOCK_OP_NONE) < 0) msg_fatal("%s: unlock fast flush logfile %s: %m", myname, path); if (vstream_fclose(log) != 0) msg_warn("write fast flush logfile %s: %m", path); return (FLUSH_STAT_OK); }