static ssize_t i_stream_file_read(struct istream_private *stream) { struct file_istream *fstream = (struct file_istream *) stream; uoff_t offset; size_t size; ssize_t ret; if (!i_stream_try_alloc(stream, 1, &size)) return -2; if (stream->fd == -1) { if (i_stream_file_open(stream) < 0) return -1; } offset = stream->istream.v_offset + (stream->pos - stream->skip); do { if (fstream->file) { ret = pread(stream->fd, stream->w_buffer + stream->pos, size, offset); } else if (fstream->seen_eof) { /* don't try to read() again. EOF from keyboard (^D) requires this to work right. */ ret = 0; } else { ret = read(stream->fd, stream->w_buffer + stream->pos, size); } } while (unlikely(ret < 0 && errno == EINTR && stream->istream.blocking)); if (ret == 0) { /* EOF */ stream->istream.eof = TRUE; fstream->seen_eof = TRUE; return -1; } if (unlikely(ret < 0)) { if (errno == EINTR || errno == EAGAIN) { i_assert(!stream->istream.blocking); ret = 0; } else { i_assert(errno != 0); /* if we get EBADF for a valid fd, it means something's really wrong and we'd better just crash. */ i_assert(errno != EBADF); if (fstream->file) { io_stream_set_error(&stream->iostream, "pread(size=%"PRIuSIZE_T " offset=%"PRIuUOFF_T") failed: %m", size, offset); } else { io_stream_set_error(&stream->iostream, "read(size=%"PRIuSIZE_T") failed: %m", size); } stream->istream.stream_errno = errno; return -1; } } if (ret > 0 && fstream->skip_left > 0) { i_assert(!fstream->file); i_assert(stream->skip == stream->pos); if (fstream->skip_left >= (size_t)ret) { fstream->skip_left -= ret; ret = 0; } else { ret -= fstream->skip_left; stream->pos += fstream->skip_left; stream->skip += fstream->skip_left; fstream->skip_left = 0; } } stream->pos += ret; i_assert(ret != 0 || !fstream->file); i_assert(ret != -1); return ret; }
struct service_process *service_process_create(struct service *service) { static unsigned int uid_counter = 0; struct service_process *process; unsigned int uid = ++uid_counter; const char *hostdomain; pid_t pid; bool process_forked; i_assert(service->status_fd[0] != -1); if (service->to_throttle != NULL) { /* throttling service, don't create new processes */ return NULL; } if (service->list->destroying) { /* these services are being destroyed, no point in creating new processes now */ return NULL; } /* look this up before fork()ing so that it gets cached for all the future lookups. */ hostdomain = my_hostdomain(); if (service->type == SERVICE_TYPE_ANVIL && service_anvil_global->pid != 0) { pid = service_anvil_global->pid; uid = service_anvil_global->uid; process_forked = FALSE; } else { pid = fork(); process_forked = TRUE; service->list->fork_counter++; } if (pid < 0) { service_error(service, "fork() failed: %m"); return NULL; } if (pid == 0) { /* child */ service_process_setup_environment(service, uid, hostdomain); service_reopen_inet_listeners(service); service_dup_fds(service); drop_privileges(service); process_exec(service->executable, NULL); } i_assert(hash_table_lookup(service_pids, POINTER_CAST(pid)) == NULL); process = i_new(struct service_process, 1); process->service = service; process->refcount = 1; process->pid = pid; process->uid = uid; if (process_forked) { process->to_status = timeout_add(SERVICE_FIRST_STATUS_TIMEOUT_SECS * 1000, service_process_status_timeout, process); } process->available_count = service->client_limit; service->process_count++; service->process_avail++; DLLIST_PREPEND(&service->processes, process); service_list_ref(service->list); hash_table_insert(service_pids, POINTER_CAST(process->pid), process); if (service->type == SERVICE_TYPE_ANVIL && process_forked) service_anvil_process_created(process); return process; }
static void service_dup_fds(struct service *service) { struct service_listener *const *listeners; ARRAY_TYPE(dup2) dups; string_t *listener_settings; int fd = MASTER_LISTEN_FD_FIRST; unsigned int i, count, socket_listener_count; /* stdin/stdout is already redirected to /dev/null. Other master fds should have been opened with fd_close_on_exec() so we don't have to worry about them. because the destination fd might be another one's source fd we have to be careful not to overwrite anything. dup() the fd when needed */ socket_listener_count = 0; listeners = array_get(&service->listeners, &count); t_array_init(&dups, count + 10); switch (service->type) { case SERVICE_TYPE_LOG: i_assert(fd == MASTER_LISTEN_FD_FIRST); services_log_dup2(&dups, service->list, fd, &socket_listener_count); fd += socket_listener_count; break; case SERVICE_TYPE_ANVIL: dup2_append(&dups, service_anvil_global->log_fdpass_fd[0], MASTER_ANVIL_LOG_FDPASS_FD); /* nonblocking anvil fd must be the first one. anvil treats it as the master's fd */ dup2_append(&dups, service_anvil_global->nonblocking_fd[0], fd++); dup2_append(&dups, service_anvil_global->blocking_fd[0], fd++); socket_listener_count += 2; break; default: break; } /* add listeners */ listener_settings = t_str_new(256); for (i = 0; i < count; i++) { if (listeners[i]->fd != -1) { str_truncate(listener_settings, 0); str_append_tabescaped(listener_settings, listeners[i]->name); if (listeners[i]->type == SERVICE_LISTENER_INET) { if (listeners[i]->set.inetset.set->ssl) str_append(listener_settings, "\tssl"); if (listeners[i]->set.inetset.set->haproxy) str_append(listener_settings, "\thaproxy"); } dup2_append(&dups, listeners[i]->fd, fd++); env_put(t_strdup_printf("SOCKET%d_SETTINGS=%s", socket_listener_count, str_c(listener_settings))); socket_listener_count++; } } if (service->login_notify_fd != -1) { dup2_append(&dups, service->login_notify_fd, MASTER_LOGIN_NOTIFY_FD); } switch (service->type) { case SERVICE_TYPE_LOG: case SERVICE_TYPE_ANVIL: case SERVICE_TYPE_CONFIG: dup2_append(&dups, dev_null_fd, MASTER_ANVIL_FD); break; case SERVICE_TYPE_UNKNOWN: case SERVICE_TYPE_LOGIN: case SERVICE_TYPE_STARTUP: dup2_append(&dups, service_anvil_global->blocking_fd[1], MASTER_ANVIL_FD); break; } dup2_append(&dups, service->status_fd[1], MASTER_STATUS_FD); if (service->type != SERVICE_TYPE_ANVIL) { dup2_append(&dups, service->master_dead_pipe_fd[1], MASTER_DEAD_FD); } else { dup2_append(&dups, global_master_dead_pipe_fd[1], MASTER_DEAD_FD); } if (service->type == SERVICE_TYPE_LOG) { /* keep stderr as-is. this is especially important when log_path=/dev/stderr, but might be helpful even in other situations for logging startup errors */ } else { /* set log file to stderr. dup2() here immediately so that we can set up logging to it without causing any log messages to be lost. */ i_assert(service->log_fd[1] != -1); env_put("LOG_SERVICE=1"); if (dup2(service->log_fd[1], STDERR_FILENO) < 0) i_fatal("dup2(log fd) failed: %m"); i_set_failure_internal(); } /* make sure we don't leak syslog fd. try to do it as late as possible, but also before dup2()s in case syslog fd is one of them. */ closelog(); if (dup2_array(&dups) < 0) i_fatal("service(%s): dup2s failed", service->set->name); i_assert(fd == MASTER_LISTEN_FD_FIRST + (int)socket_listener_count); env_put(t_strdup_printf("SOCKET_COUNT=%d", socket_listener_count)); }
struct client * client_create(int fd, bool ssl, pool_t pool, const struct master_service_connection *conn, const struct login_settings *set, const struct master_service_ssl_settings *ssl_set, void **other_sets) { struct client *client; i_assert(fd != -1); client = login_binary->client_vfuncs->alloc(pool); client->v = *login_binary->client_vfuncs; if (client->v.auth_send_challenge == NULL) client->v.auth_send_challenge = client_auth_send_challenge; if (client->v.auth_parse_response == NULL) client->v.auth_parse_response = client_auth_parse_response; client->created = ioloop_time; client->refcount = 1; client->pool = pool; client->set = set; client->ssl_set = ssl_set; client->fd = fd; client->tls = ssl; client->local_ip = conn->local_ip; client->local_port = conn->local_port; client->ip = conn->remote_ip; client->remote_port = conn->remote_port; client->real_local_ip = conn->real_local_ip; client->real_local_port = conn->real_local_port; client->real_remote_ip = conn->real_remote_ip; client->real_remote_port = conn->real_remote_port; client->listener_name = p_strdup(client->pool, conn->name); client->trusted = client_is_trusted(client); client->secured = ssl || client->trusted || net_ip_compare(&conn->real_remote_ip, &conn->real_local_ip); client->proxy_ttl = LOGIN_PROXY_TTL; if (last_client == NULL) last_client = client; DLLIST_PREPEND(&clients, client); clients_count++; client->to_disconnect = timeout_add(CLIENT_LOGIN_TIMEOUT_MSECS, client_idle_disconnect_timeout, client); client_open_streams(client); client->v.create(client, other_sets); if (auth_client_is_connected(auth_client)) client_notify_auth_ready(client); else client_set_auth_waiting(client); login_refresh_proctitle(); return client; }
static ssize_t i_stream_lzma_read(struct istream_private *stream) { struct lzma_istream *zstream = (struct lzma_istream *)stream; const unsigned char *data; uoff_t high_offset; size_t size, out_size; lzma_ret ret; high_offset = stream->istream.v_offset + (stream->pos - stream->skip); if (zstream->eof_offset == high_offset) { i_assert(zstream->high_pos == 0 || zstream->high_pos == stream->pos); stream->istream.eof = TRUE; return -1; } if (stream->pos < zstream->high_pos) { /* we're here because we seeked back within the read buffer. */ ret = zstream->high_pos - stream->pos; stream->pos = zstream->high_pos; zstream->high_pos = 0; if (zstream->eof_offset != (uoff_t)-1) { high_offset = stream->istream.v_offset + (stream->pos - stream->skip); i_assert(zstream->eof_offset == high_offset); stream->istream.eof = TRUE; } return ret; } zstream->high_pos = 0; if (stream->pos + CHUNK_SIZE > stream->buffer_size) { /* try to keep at least CHUNK_SIZE available */ if (!zstream->marked && stream->skip > 0) { /* don't try to keep anything cached if we don't have a seek mark. */ i_stream_compress(stream); } if (stream->max_buffer_size == 0 || stream->buffer_size < stream->max_buffer_size) i_stream_grow_buffer(stream, CHUNK_SIZE); if (stream->pos == stream->buffer_size) { if (stream->skip > 0) { /* lose our buffer cache */ i_stream_compress(stream); } if (stream->pos == stream->buffer_size) return -2; /* buffer full */ } } if (i_stream_read_data(stream->parent, &data, &size, 0) < 0) { if (stream->parent->stream_errno != 0) { stream->istream.stream_errno = stream->parent->stream_errno; } else { i_assert(stream->parent->eof); lzma_stream_end(zstream); stream->istream.eof = TRUE; } return -1; } if (size == 0) { /* no more input */ i_assert(!stream->istream.blocking); return 0; } zstream->strm.next_in = data; zstream->strm.avail_in = size; out_size = stream->buffer_size - stream->pos; zstream->strm.next_out = stream->w_buffer + stream->pos; zstream->strm.avail_out = out_size; ret = lzma_code(&zstream->strm, LZMA_RUN); out_size -= zstream->strm.avail_out; stream->pos += out_size; i_stream_skip(stream->parent, size - zstream->strm.avail_in); switch (ret) { case LZMA_OK: break; case LZMA_DATA_ERROR: case LZMA_BUF_ERROR: lzma_read_error(zstream, "corrupted data"); stream->istream.stream_errno = EINVAL; return -1; case LZMA_FORMAT_ERROR: lzma_read_error(zstream, "wrong magic in header (not xz file?)"); stream->istream.stream_errno = EINVAL; return -1; case LZMA_OPTIONS_ERROR: lzma_read_error(zstream, "Unsupported xz options"); stream->istream.stream_errno = EINVAL; return -1; case LZMA_MEM_ERROR: i_fatal_status(FATAL_OUTOFMEM, "lzma.read(%s): Out of memory", i_stream_get_name(&stream->istream)); case LZMA_STREAM_END: lzma_stream_end(zstream); if (out_size == 0) { stream->istream.eof = TRUE; return -1; } break; default: lzma_read_error(zstream, t_strdup_printf( "lzma_code() failed with %d", ret)); stream->istream.stream_errno = EINVAL; return -1; } if (out_size == 0) { /* read more input */ return i_stream_lzma_read(stream); } return out_size; }
static void imap_urlauth_fetch_ref(struct imap_urlauth_fetch *ufetch) { i_assert(ufetch->refcount > 0); ufetch->refcount++; }
static int parse_next_header(struct message_parser_ctx *ctx, struct message_block *block_r) { struct message_part *part = ctx->part; struct message_header_line *hdr; struct message_boundary *boundary; bool full; int ret; if ((ret = message_parser_read_more(ctx, block_r, &full)) == 0) return ret; if (ret > 0 && block_is_at_eoh(block_r) && ctx->last_boundary != NULL && (part->flags & MESSAGE_PART_FLAG_IS_MIME) != 0) { /* we are at the end of headers and we've determined that we're going to start a multipart. add the boundary already here at this point so we can reliably determine whether the "\n--boundary" belongs to us or to a previous boundary. this is a problem if the boundary prefixes are identical, because MIME requires only the prefix to match. */ parse_next_body_multipart_init(ctx); ctx->multipart = TRUE; } /* before parsing the header see if we can find a --boundary from here. we're guaranteed to be at the beginning of the line here. */ if (ret > 0) { ret = ctx->boundaries == NULL ? -1 : boundary_line_find(ctx, block_r->data, block_r->size, full, &boundary); if (ret > 0 && boundary->part == ctx->part) { /* our own body begins with our own --boundary. we don't want to handle that yet. */ ret = -1; } } if (ret < 0) { /* no boundary */ ret = message_parse_header_next(ctx->hdr_parser_ctx, &hdr); if (ret == 0 || (ret < 0 && ctx->input->stream_errno != 0)) { ctx->want_count = i_stream_get_data_size(ctx->input) + 1; return ret; } } else if (ret == 0) { /* need more data */ return 0; } else { /* boundary found. stop parsing headers here. The previous [CR]LF belongs to the MIME boundary though. */ if (ctx->prev_hdr_newline_size > 0) { i_assert(ctx->part->header_size.lines > 0); /* remove the newline size from the MIME header */ ctx->part->header_size.lines--; ctx->part->header_size.physical_size -= ctx->prev_hdr_newline_size; ctx->part->header_size.virtual_size -= 2; /* add the newline size to the parent's body */ ctx->part->parent->body_size.lines++; ctx->part->parent->body_size.physical_size += ctx->prev_hdr_newline_size; ctx->part->parent->body_size.virtual_size += 2; } hdr = NULL; } if (hdr != NULL) { if (hdr->eoh) ; else if (strcasecmp(hdr->name, "Mime-Version") == 0) { /* it's MIME. Content-* headers are valid */ part->flags |= MESSAGE_PART_FLAG_IS_MIME; } else if (strcasecmp(hdr->name, "Content-Type") == 0) { if ((ctx->flags & MESSAGE_PARSER_FLAG_MIME_VERSION_STRICT) == 0) part->flags |= MESSAGE_PART_FLAG_IS_MIME; if (hdr->continues) hdr->use_full_value = TRUE; else T_BEGIN { parse_content_type(ctx, hdr); } T_END; } block_r->hdr = hdr; block_r->size = 0; ctx->prev_hdr_newline_size = hdr->no_newline ? 0 : (hdr->crlf_newline ? 2 : 1); return 1; } /* end of headers */ if ((part->flags & MESSAGE_PART_FLAG_IS_MIME) == 0) { /* It's not MIME. Reset everything we found from Content-Type. */ i_assert(!ctx->multipart); part->flags = 0; } ctx->last_boundary = NULL; if (!ctx->part_seen_content_type || (part->flags & MESSAGE_PART_FLAG_IS_MIME) == 0) { if (part->parent != NULL && (part->parent->flags & MESSAGE_PART_FLAG_MULTIPART_DIGEST) != 0) { /* when there's no content-type specified and we're below multipart/digest, assume message/rfc822 content-type */ part->flags |= MESSAGE_PART_FLAG_MESSAGE_RFC822; } else { /* otherwise we default to text/plain */ part->flags |= MESSAGE_PART_FLAG_TEXT; } } if (message_parse_header_has_nuls(ctx->hdr_parser_ctx)) part->flags |= MESSAGE_PART_FLAG_HAS_NULS; message_parse_header_deinit(&ctx->hdr_parser_ctx); i_assert((part->flags & MUTEX_FLAGS) != MUTEX_FLAGS); ctx->last_chr = '\n'; if (ctx->multipart) { i_assert(ctx->last_boundary == NULL); ctx->multipart = FALSE; ctx->parse_next_block = parse_next_body_to_boundary; } else if ((part->flags & MESSAGE_PART_FLAG_MESSAGE_RFC822) != 0) ctx->parse_next_block = parse_next_body_message_rfc822_init; else if (ctx->boundaries != NULL) ctx->parse_next_block = parse_next_body_to_boundary; else ctx->parse_next_block = parse_next_body_to_eof; ctx->want_count = 1; /* return empty block as end of headers */ block_r->hdr = NULL; block_r->size = 0; return 1; }
static void dsync_brain_mailbox_tree_add_delete(struct dsync_mailbox_tree *tree, struct dsync_mailbox_tree *other_tree, const struct dsync_mailbox_delete *other_del) { const struct dsync_mailbox_node *node; struct dsync_mailbox_node *other_node, *old_node; const char *name; /* see if we can find the deletion based on mailbox tree that should still have the mailbox */ node = dsync_mailbox_tree_find_delete(tree, other_del); if (node == NULL) return; switch (other_del->type) { case DSYNC_MAILBOX_DELETE_TYPE_MAILBOX: /* mailbox is always deleted */ break; case DSYNC_MAILBOX_DELETE_TYPE_DIR: if (other_del->timestamp <= node->last_renamed_or_created) { /* we don't want to delete this directory, we already have a newer timestamp for it */ return; } break; case DSYNC_MAILBOX_DELETE_TYPE_UNSUBSCRIBE: if (other_del->timestamp <= node->last_subscription_change) { /* we don't want to unsubscribe, since we already have a newer subscription timestamp */ return; } break; } /* make a node for it in the other mailbox tree */ name = dsync_mailbox_node_get_full_name(tree, node); other_node = dsync_mailbox_tree_get(other_tree, name); if (other_node->existence == DSYNC_MAILBOX_NODE_EXISTS && (!guid_128_is_empty(other_node->mailbox_guid) || other_del->type != DSYNC_MAILBOX_DELETE_TYPE_MAILBOX)) { /* other side has already created a new mailbox or directory with this name, we can't delete it */ return; } /* ok, mark the other node deleted */ if (other_del->type == DSYNC_MAILBOX_DELETE_TYPE_MAILBOX) { memcpy(other_node->mailbox_guid, node->mailbox_guid, sizeof(other_node->mailbox_guid)); } i_assert(other_node->ns == NULL || other_node->ns == node->ns); other_node->ns = node->ns; if (other_del->type != DSYNC_MAILBOX_DELETE_TYPE_UNSUBSCRIBE) other_node->existence = DSYNC_MAILBOX_NODE_DELETED; else { other_node->last_subscription_change = other_del->timestamp; other_node->subscribed = FALSE; } if (dsync_mailbox_tree_guid_hash_add(other_tree, other_node, &old_node) < 0) i_unreached(); }
int dsync_brain_mailbox_tree_sync_change(struct dsync_brain *brain, const struct dsync_mailbox_tree_sync_change *change, enum mail_error *error_r) { struct mailbox *box = NULL, *destbox; const char *errstr, *func_name = NULL, *storage_name; enum mail_error error; int ret = -1; if (brain->backup_send) { i_assert(brain->no_backup_overwrite); return 0; } switch (change->type) { case DSYNC_MAILBOX_TREE_SYNC_TYPE_DELETE_BOX: /* make sure we're deleting the correct mailbox */ ret = dsync_brain_mailbox_alloc(brain, change->mailbox_guid, &box, &errstr, error_r); if (ret < 0) { i_error("Mailbox sync: Couldn't allocate mailbox GUID %s: %s", guid_128_to_string(change->mailbox_guid), errstr); return -1; } if (ret == 0) { if (brain->debug) { i_debug("brain %c: Change during sync: " "Mailbox GUID %s deletion conflict: %s", brain->master_brain ? 'M' : 'S', guid_128_to_string(change->mailbox_guid), errstr); } brain->changes_during_sync = TRUE; return 0; } break; case DSYNC_MAILBOX_TREE_SYNC_TYPE_DELETE_DIR: storage_name = mailbox_list_get_storage_name(change->ns->list, change->full_name); if (mailbox_list_delete_dir(change->ns->list, storage_name) == 0) return 0; errstr = mailbox_list_get_last_error(change->ns->list, &error); if (error == MAIL_ERROR_NOTFOUND || error == MAIL_ERROR_EXISTS) { if (brain->debug) { i_debug("brain %c: Change during sync: " "Mailbox %s mailbox_list_delete_dir conflict: %s", brain->master_brain ? 'M' : 'S', change->full_name, errstr); } brain->changes_during_sync = TRUE; return 0; } else { i_error("Mailbox sync: mailbox_list_delete_dir failed: %s", errstr); *error_r = error; return -1; } default: box = mailbox_alloc(change->ns->list, change->full_name, 0); break; } switch (change->type) { case DSYNC_MAILBOX_TREE_SYNC_TYPE_CREATE_BOX: ret = sync_create_box(brain, box, change->mailbox_guid, change->uid_validity, error_r); mailbox_free(&box); return ret; case DSYNC_MAILBOX_TREE_SYNC_TYPE_CREATE_DIR: ret = mailbox_create(box, NULL, TRUE); if (ret < 0 && mailbox_get_last_mail_error(box) == MAIL_ERROR_EXISTS) { /* it doesn't matter if somebody else created this directory or we automatically did while creating its child mailbox. it's there now anyway and we don't gain anything by treating this failure any differently from success. */ ret = 0; } func_name = "mailbox_create"; break; case DSYNC_MAILBOX_TREE_SYNC_TYPE_DELETE_BOX: ret = mailbox_delete(box); func_name = "mailbox_delete"; break; case DSYNC_MAILBOX_TREE_SYNC_TYPE_DELETE_DIR: i_unreached(); case DSYNC_MAILBOX_TREE_SYNC_TYPE_RENAME: destbox = mailbox_alloc(change->ns->list, change->rename_dest_name, 0); ret = mailbox_rename(box, destbox); func_name = "mailbox_rename"; mailbox_free(&destbox); break; case DSYNC_MAILBOX_TREE_SYNC_TYPE_SUBSCRIBE: ret = mailbox_set_subscribed(box, TRUE); func_name = "mailbox_set_subscribed"; break; case DSYNC_MAILBOX_TREE_SYNC_TYPE_UNSUBSCRIBE: ret = mailbox_set_subscribed(box, FALSE); func_name = "mailbox_set_subscribed"; break; } if (ret < 0) { errstr = mailbox_get_last_error(box, &error); if (error == MAIL_ERROR_EXISTS || error == MAIL_ERROR_NOTFOUND) { /* mailbox was already created or was already deleted. let the next sync figure out what to do */ if (brain->debug) { i_debug("brain %c: Change during sync: " "Mailbox %s %s conflict: %s", brain->master_brain ? 'M' : 'S', mailbox_get_vname(box), func_name, errstr); } brain->changes_during_sync = TRUE; ret = 0; } else { i_error("Mailbox %s sync: %s failed: %s", mailbox_get_vname(box), func_name, errstr); *error_r = error; } } mailbox_free(&box); return ret; }
static int driver_mysql_connect(struct sql_db *_db) { struct mysql_db *db = (struct mysql_db *)_db; const char *unix_socket, *host; unsigned long client_flags = db->client_flags; unsigned int secs_used; bool failed; i_assert(db->api.state == SQL_DB_STATE_DISCONNECTED); sql_db_set_state(&db->api, SQL_DB_STATE_CONNECTING); if (*db->host == '/') { unix_socket = db->host; host = NULL; } else { unix_socket = NULL; host = db->host; } if (db->option_file != NULL) { mysql_options(db->mysql, MYSQL_READ_DEFAULT_FILE, db->option_file); } mysql_options(db->mysql, MYSQL_READ_DEFAULT_GROUP, db->option_group != NULL ? db->option_group : "client"); if (!db->ssl_set && (db->ssl_ca != NULL || db->ssl_ca_path != NULL)) { #ifdef HAVE_MYSQL_SSL mysql_ssl_set(db->mysql, db->ssl_key, db->ssl_cert, db->ssl_ca, db->ssl_ca_path #ifdef HAVE_MYSQL_SSL_CIPHER , db->ssl_cipher #endif ); db->ssl_set = TRUE; #else i_fatal("mysql: SSL support not compiled in " "(remove ssl_ca and ssl_ca_path settings)"); #endif } alarm(SQL_CONNECT_TIMEOUT_SECS); #ifdef CLIENT_MULTI_RESULTS client_flags |= CLIENT_MULTI_RESULTS; #endif /* CLIENT_MULTI_RESULTS allows the use of stored procedures */ failed = mysql_real_connect(db->mysql, host, db->user, db->password, db->dbname, db->port, unix_socket, client_flags) == NULL; secs_used = SQL_CONNECT_TIMEOUT_SECS - alarm(0); if (failed) { /* connecting could have taken a while. make sure that any timeouts that get added soon will get a refreshed timestamp. */ io_loop_time_refresh(); if (db->api.connect_delay < secs_used) db->api.connect_delay = secs_used; sql_db_set_state(&db->api, SQL_DB_STATE_DISCONNECTED); i_error("%s: Connect failed to database (%s): %s - " "waiting for %u seconds before retry", mysql_prefix(db), db->dbname, mysql_error(db->mysql), db->api.connect_delay); return -1; } else { db->last_success = ioloop_time; sql_db_set_state(&db->api, SQL_DB_STATE_IDLE); return 1; } }
void io_remove_closed(struct io **io) { i_assert(((*io)->condition & IO_NOTIFY) == 0); io_remove_full(io, TRUE); }
static bool client_command_input(struct client_command_context *cmd) { struct client *client = cmd->client; struct command *command; if (cmd->func != NULL) { /* command is being executed - continue it */ if (command_exec(cmd)) { /* command execution was finished */ client_command_free(&cmd); client_add_missing_io(client); return TRUE; } return client_handle_unfinished_cmd(cmd); } if (cmd->tag == NULL) { cmd->tag = imap_parser_read_word(cmd->parser); if (cmd->tag == NULL) return FALSE; /* need more data */ cmd->tag = p_strdup(cmd->pool, cmd->tag); } if (cmd->name == NULL) { cmd->name = imap_parser_read_word(cmd->parser); if (cmd->name == NULL) return FALSE; /* need more data */ /* UID commands are a special case. better to handle them here. */ if (!cmd->uid && strcasecmp(cmd->name, "UID") == 0) { cmd->uid = TRUE; cmd->name = imap_parser_read_word(cmd->parser); if (cmd->name == NULL) return FALSE; /* need more data */ } cmd->name = !cmd->uid ? p_strdup(cmd->pool, cmd->name) : p_strconcat(cmd->pool, "UID ", cmd->name, NULL); imap_refresh_proctitle(); } client->input_skip_line = TRUE; if (cmd->name[0] == '\0') { /* command not given - cmd->func is already NULL. */ } else if ((command = command_find(cmd->name)) != NULL) { cmd->func = command->func; cmd->cmd_flags = command->flags; if (client_command_is_ambiguous(cmd)) { /* do nothing until existing commands are finished */ i_assert(cmd->state == CLIENT_COMMAND_STATE_WAIT_INPUT); cmd->state = CLIENT_COMMAND_STATE_WAIT_UNAMBIGUITY; io_remove(&client->io); return FALSE; } } if (cmd->func == NULL) { /* unknown command */ client_send_command_error(cmd, "Unknown command."); cmd->param_error = TRUE; client_command_free(&cmd); return TRUE; } else { i_assert(!client->disconnected); return client_command_input(cmd); } }
void client_command_free(struct client_command_context **_cmd) { struct client_command_context *cmd = *_cmd; struct client *client = cmd->client; enum client_command_state state = cmd->state; *_cmd = NULL; i_assert(client->output_cmd_lock == NULL); /* reset input idle time because command output might have taken a long time and we don't want to disconnect client immediately then */ client->last_input = ioloop_time; timeout_reset(client->to_idle); if (cmd->cancel) { cmd->cancel = FALSE; client_send_tagline(cmd, "NO Command cancelled."); } if (!cmd->param_error) client->bad_counter = 0; if (client->input_lock == cmd) client->input_lock = NULL; if (client->mailbox_change_lock == cmd) client->mailbox_change_lock = NULL; if (cmd->parser != NULL) { if (client->free_parser == NULL) { imap_parser_reset(cmd->parser); client->free_parser = cmd->parser; } else { imap_parser_unref(&cmd->parser); } } client->command_queue_size--; DLLIST_REMOVE(&client->command_queue, cmd); cmd = NULL; if (client->command_queue == NULL) { /* no commands left in the queue, we can clear the pool */ p_clear(client->command_pool); if (client->to_idle_output != NULL) timeout_remove(&client->to_idle_output); } imap_client_notify_command_freed(client); imap_refresh_proctitle(); /* if command finished from external event, check input for more unhandled commands since we may not be executing from client_input or client_output. */ if (state == CLIENT_COMMAND_STATE_WAIT_EXTERNAL && !client->disconnected) { client_add_missing_io(client); if (client->to_delayed_input == NULL) { client->to_delayed_input = timeout_add(0, client_input, client); } } }
static void client_default_destroy(struct client *client, const char *reason) { struct client_command_context *cmd; const char *cmd_status = ""; i_assert(!client->destroyed); client->destroyed = TRUE; if (!client->disconnected) { client->disconnected = TRUE; if (reason == NULL) { reason = io_stream_get_disconnect_reason(client->input, client->output); cmd_status = client_get_commands_status(client); } i_info("%s%s %s", reason, cmd_status, client_stats(client)); } i_stream_close(client->input); o_stream_close(client->output); /* finish off all the queued commands. */ if (client->output_cmd_lock != NULL) client_command_cancel(&client->output_cmd_lock); while (client->command_queue != NULL) { cmd = client->command_queue; client_command_cancel(&cmd); } /* handle the input_lock command last. it might have been waiting on other queued commands (although we probably should just drop the command at that point since it hasn't started running. but this may change in future). */ if (client->input_lock != NULL) client_command_cancel(&client->input_lock); if (client->mailbox != NULL) { client_search_updates_free(client); mailbox_free(&client->mailbox); } if (client->notify_ctx != NULL) imap_notify_deinit(&client->notify_ctx); if (client->urlauth_ctx != NULL) imap_urlauth_deinit(&client->urlauth_ctx); if (client->anvil_sent) { master_service_anvil_send(master_service, t_strconcat( "DISCONNECT\t", my_pid, "\timap/", mail_user_get_anvil_userip_ident(client->user), "\n", NULL)); } mail_user_unref(&client->user); if (client->free_parser != NULL) imap_parser_unref(&client->free_parser); if (client->io != NULL) io_remove(&client->io); if (client->to_idle_output != NULL) timeout_remove(&client->to_idle_output); if (client->to_delayed_input != NULL) timeout_remove(&client->to_delayed_input); timeout_remove(&client->to_idle); i_stream_destroy(&client->input); o_stream_destroy(&client->output); net_disconnect(client->fd_in); if (client->fd_in != client->fd_out) net_disconnect(client->fd_out); if (array_is_created(&client->search_saved_uidset)) array_free(&client->search_saved_uidset); if (array_is_created(&client->search_updates)) array_free(&client->search_updates); pool_unref(&client->command_pool); mail_storage_service_user_free(&client->service_user); imap_client_count--; DLLIST_REMOVE(&imap_clients, client); pool_unref(&client->pool); master_service_client_connection_destroyed(master_service); imap_refresh_proctitle(); }
void index_storage_get_open_status(struct mailbox *box, enum mailbox_status_items items, struct mailbox_status *status_r) { const struct mail_index_header *hdr; /* we can get most of the status items without any trouble */ hdr = mail_index_get_header(box->view); status_r->messages = hdr->messages_count; if ((items & STATUS_RECENT) != 0) { if ((box->flags & MAILBOX_FLAG_DROP_RECENT) != 0) { /* recent flags are set and dropped by the previous sync while index was locked. if we updated the recent flags here we'd have a race condition. */ i_assert(box->synced); } else { /* make sure recent count is set, in case we haven't synced yet */ index_sync_update_recent_count(box); } status_r->recent = index_mailbox_get_recent_count(box); i_assert(status_r->recent <= status_r->messages); } if ((items & STATUS_UNSEEN) != 0) { if (box->view_pvt == NULL || (mailbox_get_private_flags_mask(box) & MAIL_SEEN) == 0) { status_r->unseen = hdr->messages_count - hdr->seen_messages_count; } else { status_r->unseen = index_storage_count_pvt_unseen(box); } } status_r->uidvalidity = hdr->uid_validity; status_r->uidnext = hdr->next_uid; status_r->first_recent_uid = hdr->first_recent_uid; if ((items & STATUS_HIGHESTMODSEQ) != 0) { status_r->nonpermanent_modseqs = mail_index_is_in_memory(box->index); status_r->no_modseq_tracking = !mail_index_have_modseq_tracking(box->index); status_r->highest_modseq = mail_index_modseq_get_highest(box->view); if (status_r->highest_modseq == 0) { /* modseqs not enabled yet, but we can't return 0 */ status_r->highest_modseq = 1; } } if ((items & STATUS_HIGHESTPVTMODSEQ) != 0 && box->view_pvt != NULL) { status_r->highest_pvt_modseq = mail_index_modseq_get_highest(box->view_pvt); if (status_r->highest_pvt_modseq == 0) { /* modseqs not enabled yet, but we can't return 0 */ status_r->highest_pvt_modseq = 1; } } if ((items & STATUS_FIRST_UNSEEN_SEQ) != 0) { if (box->view_pvt == NULL || (mailbox_get_private_flags_mask(box) & MAIL_SEEN) == 0) { mail_index_lookup_first(box->view, 0, MAIL_SEEN, &status_r->first_unseen_seq); } else { status_r->first_unseen_seq = index_storage_find_first_pvt_unseen_seq(box); } } if ((items & STATUS_LAST_CACHED_SEQ) != 0) get_last_cached_seq(box, &status_r->last_cached_seq); if ((items & STATUS_KEYWORDS) != 0) status_r->keywords = mail_index_get_keywords(box->index); if ((items & STATUS_PERMANENT_FLAGS) != 0) { if (!mailbox_is_readonly(box)) { status_r->permanent_flags = MAIL_FLAGS_NONRECENT; status_r->permanent_keywords = TRUE; status_r->allow_new_keywords = !box->disallow_new_keywords; } } }
bool auth_request_handler_auth_begin(struct auth_request_handler *handler, const char *args) { const struct mech_module *mech; struct auth_request *request; const char *const *list, *name, *arg, *initial_resp; void *initial_resp_data; unsigned int id; buffer_t *buf; i_assert(!handler->destroyed); /* <id> <mechanism> [...] */ list = t_strsplit_tab(args); if (list[0] == NULL || list[1] == NULL || str_to_uint(list[0], &id) < 0) { i_error("BUG: Authentication client %u " "sent broken AUTH request", handler->client_pid); return FALSE; } if (handler->token_auth) { mech = &mech_dovecot_token; if (strcmp(list[1], mech->mech_name) != 0) { /* unsupported mechanism */ i_error("BUG: Authentication client %u requested invalid " "authentication mechanism %s (DOVECOT-TOKEN required)", handler->client_pid, str_sanitize(list[1], MAX_MECH_NAME_LEN)); return FALSE; } } else { mech = mech_module_find(list[1]); if (mech == NULL) { /* unsupported mechanism */ i_error("BUG: Authentication client %u requested unsupported " "authentication mechanism %s", handler->client_pid, str_sanitize(list[1], MAX_MECH_NAME_LEN)); return FALSE; } } request = auth_request_new(mech); request->handler = handler; request->connect_uid = handler->connect_uid; request->client_pid = handler->client_pid; request->id = id; request->auth_only = handler->master_callback == NULL; /* parse optional parameters */ initial_resp = NULL; for (list += 2; *list != NULL; list++) { arg = strchr(*list, '='); if (arg == NULL) { name = *list; arg = ""; } else { name = t_strdup_until(*list, arg); arg++; } if (auth_request_import_auth(request, name, arg)) ; else if (strcmp(name, "resp") == 0) { initial_resp = arg; /* this must be the last parameter */ list++; break; } } if (*list != NULL) { i_error("BUG: Authentication client %u " "sent AUTH parameters after 'resp'", handler->client_pid); auth_request_unref(&request); return FALSE; } if (request->service == NULL) { i_error("BUG: Authentication client %u " "didn't specify service in request", handler->client_pid); auth_request_unref(&request); return FALSE; } if (hash_table_lookup(handler->requests, POINTER_CAST(id)) != NULL) { i_error("BUG: Authentication client %u " "sent a duplicate ID %u", handler->client_pid, id); auth_request_unref(&request); return FALSE; } auth_request_init(request); request->to_abort = timeout_add(MASTER_AUTH_SERVER_TIMEOUT_SECS * 1000, auth_request_timeout, request); hash_table_insert(handler->requests, POINTER_CAST(id), request); if (request->set->ssl_require_client_cert && !request->valid_client_cert) { /* we fail without valid certificate */ auth_request_handler_auth_fail(handler, request, "Client didn't present valid SSL certificate"); return TRUE; } /* Empty initial response is a "=" base64 string. Completely empty string shouldn't really be sent, but at least Exim does it, so just allow it for backwards compatibility.. */ if (initial_resp != NULL && *initial_resp != '\0') { size_t len = strlen(initial_resp); buf = buffer_create_dynamic(pool_datastack_create(), MAX_BASE64_DECODED_SIZE(len)); if (base64_decode(initial_resp, len, NULL, buf) < 0) { auth_request_handler_auth_fail(handler, request, "Invalid base64 data in initial response"); return TRUE; } initial_resp_data = p_malloc(request->pool, I_MAX(buf->used, 1)); memcpy(initial_resp_data, buf->data, buf->used); request->initial_response = initial_resp_data; request->initial_response_len = buf->used; } /* handler is referenced until auth_request_handler_reply() is called. */ handler->refcount++; /* before we start authenticating, see if we need to wait first */ auth_penalty_lookup(auth_penalty, request, auth_penalty_callback); return TRUE; }
bool imap_fetch_binary_init(struct imap_fetch_init_context *ctx) { struct imap_fetch_body_data *body; const struct imap_arg *list_args; unsigned int list_count; const char *str, *p, *error; i_assert(strncmp(ctx->name, "BINARY", 6) == 0); p = ctx->name + 6; body = p_new(ctx->pool, struct imap_fetch_body_data, 1); body->binary = TRUE; if (strncmp(p, ".SIZE", 5) == 0) { /* fetch decoded size of the section */ p += 5; body->binary_size = TRUE; } else if (strncmp(p, ".PEEK", 5) == 0) { p += 5; } else { ctx->fetch_ctx->flags_update_seen = TRUE; } if (*p != '[') { ctx->error = "Invalid BINARY[..] parameter: Missing '['"; return FALSE; } if (imap_arg_get_list_full(&ctx->args[0], &list_args, &list_count)) { /* BINARY[HEADER.FIELDS.. (headers list)] */ if (!imap_arg_get_atom(&ctx->args[1], &str) || str[0] != ']') { ctx->error = "Invalid BINARY[..] parameter: Missing ']'"; return FALSE; } if (body_header_fields_parse(ctx, body, p+1, list_args, list_count) < 0) return FALSE; p = str+1; ctx->args += 2; } else { /* no headers list */ body->section = p+1; p = strchr(body->section, ']'); if (p == NULL) { ctx->error = "Invalid BINARY[..] parameter: Missing ']'"; return FALSE; } body->section = p_strdup_until(ctx->pool, body->section, p); p++; } if (imap_msgpart_parse(body->section, &body->msgpart) < 0) { ctx->error = "Invalid BINARY[..] section"; return FALSE; } imap_msgpart_set_decode_to_binary(body->msgpart); ctx->fetch_ctx->fetch_data |= imap_msgpart_get_fetch_data(body->msgpart); if (!body->binary_size) { if (body_parse_partial(body, p, &error) < 0) { ctx->error = p_strdup_printf(ctx->pool, "Invalid BINARY[..] parameter: %s", error); return FALSE; } } /* update the section name for the imap_fetch_add_handler() */ ctx->name = p_strdup(ctx->pool, get_body_name(body)); if (body->binary_size) { imap_fetch_add_handler(ctx, IMAP_FETCH_HANDLER_FLAG_WANT_DEINIT, "0", fetch_binary_size, body); } else { imap_fetch_add_handler(ctx, IMAP_FETCH_HANDLER_FLAG_WANT_DEINIT, "NIL", fetch_body_msgpart, body); } return TRUE; }
int mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx *ctx, struct mail_cache_iterate_field *field_r) { struct mail_cache *cache = ctx->view->cache; unsigned int field_idx; unsigned int data_size; uint32_t file_field; int ret; i_assert(ctx->remap_counter == cache->remap_counter); if (ctx->pos + sizeof(uint32_t) > ctx->rec_size) { if (ctx->pos != ctx->rec_size) { mail_cache_set_corrupted(cache, "record has invalid size"); return -1; } if ((ret = mail_cache_lookup_iter_next_record(ctx)) <= 0) return ret; } /* return the next field */ file_field = *((const uint32_t *)CONST_PTR_OFFSET(ctx->rec, ctx->pos)); ctx->pos += sizeof(uint32_t); if (file_field >= cache->file_fields_count) { /* new field, have to re-read fields header to figure out its size. don't do this if we're compressing. */ if (!cache->locked) { if (mail_cache_header_fields_read(cache) < 0) return -1; } if (file_field >= cache->file_fields_count) { mail_cache_set_corrupted(cache, "field index too large (%u >= %u)", file_field, cache->file_fields_count); return -1; } /* field reading might have re-mmaped the file and caused rec pointer to break. need to get it again. */ if (mail_cache_get_record(cache, ctx->offset, &ctx->rec) < 0) return -1; ctx->remap_counter = cache->remap_counter; } field_idx = cache->file_field_map[file_field]; data_size = cache->fields[field_idx].field.field_size; if (data_size == UINT_MAX && ctx->pos + sizeof(uint32_t) <= ctx->rec->size) { /* variable size field. get its size from the file. */ data_size = *((const uint32_t *) CONST_PTR_OFFSET(ctx->rec, ctx->pos)); ctx->pos += sizeof(uint32_t); } if (ctx->rec->size - ctx->pos < data_size) { mail_cache_set_corrupted(cache, "record continues outside its allocated size"); return -1; } field_r->field_idx = field_idx; field_r->data = CONST_PTR_OFFSET(ctx->rec, ctx->pos); field_r->size = data_size; field_r->offset = ctx->offset + ctx->pos; /* each record begins from 32bit aligned position */ ctx->pos += (data_size + sizeof(uint32_t)-1) & ~(sizeof(uint32_t)-1); return 1; }
static int parse_next_body_to_boundary(struct message_parser_ctx *ctx, struct message_block *block_r) { struct message_boundary *boundary = NULL; const unsigned char *data, *cur, *next, *end; size_t boundary_start; int ret; bool full; if ((ret = message_parser_read_more(ctx, block_r, &full)) <= 0) return ret; data = block_r->data; if (ctx->last_chr == '\n') { /* handle boundary in first line of message. alternatively it's an empty line. */ ret = boundary_line_find(ctx, block_r->data, block_r->size, full, &boundary); if (ret >= 0) { return ret == 0 ? 0 : parse_part_finish(ctx, boundary, block_r, TRUE); } } i_assert(block_r->size > 0); boundary_start = 0; /* skip to beginning of the next line. the first line was handled already. */ cur = data; end = data + block_r->size; while ((next = memchr(cur, '\n', end - cur)) != NULL) { cur = next + 1; boundary_start = next - data; if (next > data && next[-1] == '\r') boundary_start--; if (boundary_start != 0) { /* we can at least skip data until the first [CR]LF. input buffer can't be full anymore. */ full = FALSE; } ret = boundary_line_find(ctx, cur, end - cur, full, &boundary); if (ret >= 0) { /* found / need more data */ if (ret == 0 && boundary_start == 0) ctx->want_count += cur - block_r->data; break; } } if (next != NULL) { /* found / need more data */ i_assert(ret >= 0); i_assert(!(ret == 0 && full)); } else if (boundary_start == 0) { /* no linefeeds in this block. we can just skip it. */ ret = 0; if (block_r->data[block_r->size-1] == '\r' && !ctx->eof) { /* this may be the beginning of the \r\n--boundary */ block_r->size--; } boundary_start = block_r->size; } else { /* the boundary wasn't found from this data block, we'll need more data. */ ret = 0; ctx->want_count = (block_r->size - boundary_start) + 1; } if (ret > 0 || (ret == 0 && !ctx->eof)) { /* a) we found the boundary b) we need more data and haven't reached EOF yet so leave CR+LF + last line to buffer */ block_r->size = boundary_start; } if (block_r->size != 0) { parse_body_add_block(ctx, block_r); if ((ctx->part->flags & MESSAGE_PART_FLAG_MULTIPART) != 0 && (ctx->flags & MESSAGE_PARSER_FLAG_INCLUDE_MULTIPART_BLOCKS) == 0) return 0; return 1; } return ret <= 0 ? ret : parse_part_finish(ctx, boundary, block_r, FALSE); }
static bool mail_cache_file_has_field(struct mail_cache *cache, unsigned int field) { i_assert(field < cache->fields_count); return cache->field_file_map[field] != (uint32_t)-1; }
static void thread_link_reference(struct mail_thread_cache *cache, uint32_t parent_idx, uint32_t child_idx) { struct mail_thread_node *node, *parent, *child; uint32_t idx; i_assert(parent_idx < cache->first_invalid_msgid_str_idx); /* either child_idx or parent_idx may cause thread_nodes array to grow. in such situation the other pointer may become invalid if we don't get the pointers in correct order. */ if (child_idx < parent_idx) { parent = array_idx_modifiable(&cache->thread_nodes, parent_idx); child = array_idx_modifiable(&cache->thread_nodes, child_idx); } else { child = array_idx_modifiable(&cache->thread_nodes, child_idx); parent = array_idx_modifiable(&cache->thread_nodes, parent_idx); } child->parent_link_refcount++; if (thread_node_has_ancestor(cache, parent, child)) { if (parent == child) { /* loops to itself - ignore */ return; } /* child is an ancestor of parent. Adding child -> parent_node would introduce a loop. If any messages referencing the path between parent_node's parent and child_node get expunged, we have to rebuild the tree because the loop might break. For example: #1: a -> b (a.ref=1, b.ref=1) #2: b -> a (a.ref=2, b.ref=2) #3: c -> a -> b (a.ref=3, b.ref=3, c.ref=1) Expunging #3 wouldn't break the loop, but expunging #1 would. */ node = parent; do { idx = node->parent_idx; i_assert(idx != 0); node = array_idx_modifiable(&cache->thread_nodes, idx); node->child_unref_rebuilds = TRUE; } while (node != child); return; } else if (child->parent_idx == parent_idx) { /* The same link already exists */ return; } /* Set parent_node as child_node's parent */ if (child->parent_idx == 0) { child->parent_idx = parent_idx; } else { /* Conflicting parent already exists, keep the original */ if (MAIL_THREAD_NODE_EXISTS(child)) { /* If this message gets expunged, the parent is changed. */ child->expunge_rebuilds = TRUE; } else { /* Message doesn't exist, so it was one of the node's children that created the original reference. If that reference gets dropped, the parent is changed. We could catch this in one of several ways: a) Link to parent node gets unreferenced b) Link to this node gets unreferenced c) Any of the child nodes gets expunged b) is probably the least likely to happen, so use it */ child->child_unref_rebuilds = TRUE; } } }
void mail_user_ref(struct mail_user *user) { i_assert(user->refcount > 0); user->refcount++; }
void client_destroy(struct client *client, const char *reason) { if (client->destroyed) return; client->destroyed = TRUE; if (!client->login_success && reason != NULL) { reason = t_strconcat(reason, " ", client_get_extra_disconnect_reason(client), NULL); } if (reason != NULL) client_log(client, reason); if (last_client == client) last_client = client->prev; DLLIST_REMOVE(&clients, client); if (client->output != NULL) o_stream_uncork(client->output); if (!client->login_success && client->ssl_proxy != NULL) ssl_proxy_destroy(client->ssl_proxy); if (client->input != NULL) i_stream_close(client->input); if (client->output != NULL) o_stream_close(client->output); if (client->master_tag != 0) { i_assert(client->auth_request == NULL); i_assert(client->authenticating); i_assert(client->refcount > 1); client->authenticating = FALSE; master_auth_request_abort(master_auth, client->master_tag); client->refcount--; } else if (client->auth_request != NULL) { i_assert(client->authenticating); sasl_server_auth_abort(client); } else { i_assert(!client->authenticating); } if (client->io != NULL) io_remove(&client->io); if (client->to_disconnect != NULL) timeout_remove(&client->to_disconnect); if (client->to_auth_waiting != NULL) timeout_remove(&client->to_auth_waiting); if (client->auth_response != NULL) str_free(&client->auth_response); if (client->fd != -1) { net_disconnect(client->fd); client->fd = -1; } if (client->proxy_password != NULL) { safe_memset(client->proxy_password, 0, strlen(client->proxy_password)); i_free_and_null(client->proxy_password); } if (client->proxy_sasl_client != NULL) dsasl_client_free(&client->proxy_sasl_client); if (client->login_proxy != NULL) login_proxy_free(&client->login_proxy); if (client->v.destroy != NULL) client->v.destroy(client); if (client_unref(&client) && initial_service_count == 1) { /* as soon as this connection is done with proxying (or whatever), the process will die. there's no need for authentication anymore, so close the connection. do this only with initial service_count=1, in case there are other clients with pending authentications */ auth_client_disconnect(auth_client, "unnecessary connection"); } login_client_destroyed(); login_refresh_proctitle(); }
static int mdbox_map_open_internal(struct mdbox_map *map, bool create_missing) { enum mail_index_open_flags open_flags; struct mailbox_permissions perm; int ret = 0; if (map->view != NULL) { /* already opened */ return 1; } mailbox_list_get_root_permissions(map->root_list, &perm); mail_index_set_permissions(map->index, perm.file_create_mode, perm.file_create_gid, perm.file_create_gid_origin); open_flags = MAIL_INDEX_OPEN_FLAG_NEVER_IN_MEMORY | mail_storage_settings_to_index_flags(MAP_STORAGE(map)->set); if (create_missing) { if ((ret = mdbox_map_mkdir_storage(map)) < 0) return -1; if (ret > 0) { /* storage/ directory already existed. the index should exist also. */ } else { open_flags |= MAIL_INDEX_OPEN_FLAG_CREATE; } } ret = mail_index_open(map->index, open_flags); if (ret == 0 && create_missing) { /* storage/ already existed, but indexes didn't. we'll need to take extra steps to make sure we won't overwrite any m.* files that may already exist. */ map->verify_existing_file_ids = TRUE; open_flags |= MAIL_INDEX_OPEN_FLAG_CREATE; ret = mail_index_open(map->index, open_flags); } if (ret < 0) { mail_storage_set_internal_error(MAP_STORAGE(map)); mail_index_reset_error(map->index); return -1; } if (ret == 0) { /* index not found - for now just return failure */ i_assert(!create_missing); return 0; } map->view = mail_index_view_open(map->index); mdbox_map_cleanup(map); if (mail_index_get_header(map->view)->uid_validity == 0) { if (mdbox_map_generate_uid_validity(map) < 0 || mdbox_map_refresh(map) < 0) { mail_storage_set_internal_error(MAP_STORAGE(map)); mail_index_reset_error(map->index); mail_index_close(map->index); return -1; } } return 1; }
static void tview_lookup_seq_range(struct mail_index_view *view, uint32_t first_uid, uint32_t last_uid, uint32_t *first_seq_r, uint32_t *last_seq_r) { struct mail_index_view_transaction *tview = (struct mail_index_view_transaction *)view; const struct mail_index_record *rec; uint32_t seq; if (!tview->t->reset) { tview->super->lookup_seq_range(view, first_uid, last_uid, first_seq_r, last_seq_r); } else { /* index is being reset. we never want to return old sequences. */ *first_seq_r = *last_seq_r = 0; } if (tview->t->last_new_seq == 0) { /* no new messages, the results are final. */ return; } rec = mail_index_transaction_lookup(tview->t, tview->t->first_new_seq); if (rec->uid == 0) { /* new messages don't have UIDs */ return; } if (last_uid < rec->uid) { /* all wanted messages were existing */ return; } /* at least some of the wanted messages are newly created */ if (*first_seq_r == 0) { seq = tview->t->first_new_seq; for (; seq <= tview->t->last_new_seq; seq++) { rec = mail_index_transaction_lookup(tview->t, seq); if (first_uid <= rec->uid) break; } if (seq > tview->t->last_new_seq || rec->uid > last_uid) { /* no messages in range */ return; } *first_seq_r = seq; if (rec->uid == last_uid) { /* one seq in range */ *last_seq_r = seq; return; } } seq = tview->t->last_new_seq; for (; seq >= tview->t->first_new_seq; seq--) { rec = mail_index_transaction_lookup(tview->t, seq); if (rec->uid <= last_uid) { *last_seq_r = seq; break; } } i_assert(seq >= tview->t->first_new_seq); }
static void test_seq_range_array_random(void) { #define SEQ_RANGE_TEST_BUFSIZE 100 #define SEQ_RANGE_TEST_COUNT 20000 unsigned char shadowbuf[SEQ_RANGE_TEST_BUFSIZE]; ARRAY_TYPE(seq_range) range; const struct seq_range *seqs; uint32_t seq1, seq2; unsigned int i, j, ret, ret2, count; int test = -1; ret = ret2 = 0; i_array_init(&range, 1); memset(shadowbuf, 0, sizeof(shadowbuf)); for (i = 0; i < SEQ_RANGE_TEST_COUNT; i++) { seq1 = rand() % SEQ_RANGE_TEST_BUFSIZE; seq2 = seq1 + rand() % (SEQ_RANGE_TEST_BUFSIZE - seq1); test = rand() % 4; switch (test) { case 0: ret = seq_range_array_add(&range, seq1) ? 0 : 1; /* FALSE == added */ ret2 = shadowbuf[seq1] == 0 ? 1 : 0; shadowbuf[seq1] = 1; break; case 1: ret = seq_range_array_add_range_count(&range, seq1, seq2); for (ret2 = 0; seq1 <= seq2; seq1++) { if (shadowbuf[seq1] == 0) { ret2++; shadowbuf[seq1] = 1; } } break; case 2: ret = seq_range_array_remove(&range, seq1) ? 1 : 0; ret2 = shadowbuf[seq1] != 0 ? 1 : 0; shadowbuf[seq1] = 0; break; case 3: ret = seq_range_array_remove_range(&range, seq1, seq2); for (ret2 = 0; seq1 <= seq2; seq1++) { if (shadowbuf[seq1] != 0) { ret2++; shadowbuf[seq1] = 0; } } break; } if (ret != ret2) break; seqs = array_get(&range, &count); for (j = 0, seq1 = 0; j < count; j++) { if (j > 0 && seqs[j-1].seq2+1 >= seqs[j].seq1) goto fail; for (; seq1 < seqs[j].seq1; seq1++) { if (shadowbuf[seq1] != 0) goto fail; } for (; seq1 <= seqs[j].seq2; seq1++) { if (shadowbuf[seq1] == 0) goto fail; } } i_assert(seq1 <= SEQ_RANGE_TEST_BUFSIZE); for (; seq1 < SEQ_RANGE_TEST_BUFSIZE; seq1++) { if (shadowbuf[seq1] != 0) goto fail; } } fail: if (i == SEQ_RANGE_TEST_COUNT) test_out("seq_range_array random", TRUE); else { test_out_reason("seq_range_array random", FALSE, t_strdup_printf("round %u test %d failed", i, test)); } array_free(&range); }
void service_process_ref(struct service_process *process) { i_assert(process->refcount > 0); process->refcount++; }
int file_set_size(int fd, off_t size) { #ifdef HAVE_POSIX_FALLOCATE static bool posix_fallocate_supported = TRUE; #endif char block[IO_BLOCK_SIZE]; off_t offset; ssize_t ret; struct stat st; i_assert(size >= 0); if (fstat(fd, &st) < 0) { i_error("fstat() failed: %m"); return -1; } if (size < st.st_size) { if (ftruncate(fd, size) < 0) { i_error("ftruncate() failed: %m"); return -1; } return 0; } if (size == st.st_size) return 0; #ifdef HAVE_POSIX_FALLOCATE if (posix_fallocate_supported) { int err; err = posix_fallocate(fd, st.st_size, size - st.st_size); if (err == 0) return 0; if (err != EINVAL /* Solaris */ && err != EOPNOTSUPP /* AOX */) { if (!ENOSPACE(err)) i_error("posix_fallocate() failed: %m"); return -1; } /* Not supported by kernel, fallback to writing. */ posix_fallocate_supported = FALSE; } #endif /* start growing the file */ offset = st.st_size; memset(block, 0, I_MIN((ssize_t)sizeof(block), size - offset)); while (offset < size) { ret = pwrite(fd, block, I_MIN((ssize_t)sizeof(block), size - offset), offset); if (ret < 0) { if (!ENOSPACE(errno)) i_error("pwrite() failed: %m"); return -1; } offset += ret; } return 0; }
static int sieve_attribute_set_sieve(struct mail_storage *storage, const char *key, const struct mail_attribute_value *value) { struct sieve_storage *svstorage; struct sieve_storage_save_context *save_ctx; struct istream *input; const char *scriptname; int ret; if ((ret = mail_sieve_user_init(storage->user, &svstorage)) <= 0) { if (ret == 0) { mail_storage_set_error(storage, MAIL_ERROR_NOTFOUND, "Sieve not enabled for user"); } return -1; } if (strcmp(key, MAILBOX_ATTRIBUTE_SIEVE_DEFAULT) == 0) return sieve_attribute_set_default(storage, svstorage, value); if (strncmp(key, MAILBOX_ATTRIBUTE_PREFIX_SIEVE_FILES, strlen(MAILBOX_ATTRIBUTE_PREFIX_SIEVE_FILES)) != 0) { mail_storage_set_error(storage, MAIL_ERROR_NOTFOUND, "Nonexistent sieve attribute"); return -1; } scriptname = key + strlen(MAILBOX_ATTRIBUTE_PREFIX_SIEVE_FILES); if (value->value != NULL) { input = i_stream_create_from_data(value->value, strlen(value->value)); save_ctx = sieve_storage_save_init(svstorage, scriptname, input); i_stream_unref(&input); } else if (value->value_stream != NULL) { input = value->value_stream; save_ctx = sieve_storage_save_init(svstorage, scriptname, input); } else { return sieve_attribute_unset_script(storage, svstorage, scriptname); } if (save_ctx == NULL) { /* save initialization failed */ mail_storage_set_critical(storage, "Failed to save sieve script '%s': %s", scriptname, sieve_storage_get_last_error(svstorage, NULL)); return -1; } sieve_storage_save_set_mtime(save_ctx, value->last_change); ret = 0; while (i_stream_read(input) > 0) { if (sieve_storage_save_continue(save_ctx) < 0) { mail_storage_set_critical(storage, "Failed to save sieve script '%s': %s", scriptname, sieve_storage_get_last_error(svstorage, NULL)); ret = -1; break; } } i_assert(input->eof || ret < 0); if (input->stream_errno != 0) { errno = input->stream_errno; mail_storage_set_critical(storage, "Saving sieve script: read(%s) failed: %m", i_stream_get_name(input)); ret = -1; } if (ret == 0 && sieve_storage_save_finish(save_ctx) < 0) { mail_storage_set_critical(storage, "Failed to save sieve script '%s': %s", scriptname, sieve_storage_get_last_error(svstorage, NULL)); ret = -1; } if (ret < 0) sieve_storage_save_cancel(&save_ctx); else if (sieve_storage_save_commit(&save_ctx) < 0) { mail_storage_set_critical(storage, "Failed to save sieve script '%s': %s", scriptname, sieve_storage_get_last_error(svstorage, NULL)); ret = -1; } return ret; }
static int http_client_request_send_real(struct http_client_request *req, bool pipelined, const char **error_r) { const struct http_client_settings *set = &req->client->set; struct http_client_connection *conn = req->conn; struct ostream *output = conn->conn.output; string_t *rtext = t_str_new(256); struct const_iovec iov[3]; int ret = 0; i_assert(!req->conn->output_locked); i_assert(req->payload_output == NULL); /* create request line */ str_append(rtext, req->method); str_append(rtext, " "); str_append(rtext, req->target); str_append(rtext, " HTTP/1.1\r\n"); /* create special headers implicitly if not set explicitly using http_client_request_add_header() */ if (!req->have_hdr_host) { str_append(rtext, "Host: "); str_append(rtext, req->authority); str_append(rtext, "\r\n"); } if (!req->have_hdr_date) { str_append(rtext, "Date: "); str_append(rtext, http_date_create(req->date)); str_append(rtext, "\r\n"); } if (!req->have_hdr_authorization && req->username != NULL && req->password != NULL) { struct http_auth_credentials auth_creds; http_auth_basic_credentials_init(&auth_creds, req->username, req->password); str_append(rtext, "Authorization: "); http_auth_create_credentials(rtext, &auth_creds); str_append(rtext, "\r\n"); } if (http_client_request_to_proxy(req) && set->proxy_username != NULL && set->proxy_password != NULL) { struct http_auth_credentials auth_creds; http_auth_basic_credentials_init(&auth_creds, set->proxy_username, set->proxy_password); str_append(rtext, "Proxy-Authorization: "); http_auth_create_credentials(rtext, &auth_creds); str_append(rtext, "\r\n"); } if (!req->have_hdr_user_agent && req->client->set.user_agent != NULL) { str_printfa(rtext, "User-Agent: %s\r\n", req->client->set.user_agent); } if (!req->have_hdr_expect && req->payload_sync) { str_append(rtext, "Expect: 100-continue\r\n"); } if (req->payload_input != NULL) { if (req->payload_chunked) { // FIXME: can't do this for a HTTP/1.0 server if (!req->have_hdr_body_spec) str_append(rtext, "Transfer-Encoding: chunked\r\n"); req->payload_output = http_transfer_chunked_ostream_create(output); } else { /* send Content-Length if we have specified a payload, even if it's 0 bytes. */ if (!req->have_hdr_body_spec) { str_printfa(rtext, "Content-Length: %"PRIuUOFF_T"\r\n", req->payload_size); } req->payload_output = output; o_stream_ref(output); } } if (!req->have_hdr_connection && !http_client_request_to_proxy(req)) { /* https://tools.ietf.org/html/rfc2068 Section 19.7.1: A client MUST NOT send the Keep-Alive connection token to a proxy server as HTTP/1.0 proxy servers do not obey the rules of HTTP/1.1 for parsing the Connection header field. */ str_append(rtext, "Connection: Keep-Alive\r\n"); } /* request line + implicit headers */ iov[0].iov_base = str_data(rtext); iov[0].iov_len = str_len(rtext); /* explicit headers */ if (req->headers != NULL) { iov[1].iov_base = str_data(req->headers); iov[1].iov_len = str_len(req->headers); } else { iov[1].iov_base = ""; iov[1].iov_len = 0; } /* end of header */ iov[2].iov_base = "\r\n"; iov[2].iov_len = 2; req->state = HTTP_REQUEST_STATE_PAYLOAD_OUT; req->sent_time = ioloop_timeval; o_stream_cork(output); if (o_stream_sendv(output, iov, N_ELEMENTS(iov)) < 0) { *error_r = t_strdup_printf("write(%s) failed: %s", o_stream_get_name(output), o_stream_get_error(output)); ret = -1; } else { http_client_request_debug(req, "Sent header"); if (req->payload_output != NULL) { if (!req->payload_sync) { if (http_client_request_send_more (req, pipelined, error_r) < 0) ret = -1; } else { http_client_request_debug(req, "Waiting for 100-continue"); conn->output_locked = TRUE; } } else { req->state = HTTP_REQUEST_STATE_WAITING; if (!pipelined) http_client_connection_start_request_timeout(req->conn); conn->output_locked = FALSE; } if (ret >= 0 && o_stream_flush(output) < 0) { *error_r = t_strdup_printf("flush(%s) failed: %s", o_stream_get_name(output), o_stream_get_error(output)); ret = -1; } } o_stream_uncork(output); return ret; }