static void zlib_mail_user_created(struct mail_user *user) { struct mail_user_vfuncs *v = user->vlast; struct zlib_user *zuser; const char *name; zuser = p_new(user->pool, struct zlib_user, 1); zuser->module_ctx.super = *v; user->vlast = &zuser->module_ctx.super; v->deinit = zlib_mail_user_deinit; name = mail_user_plugin_getenv(user, "zlib_save"); if (name != NULL && *name != '\0') { zuser->save_handler = compression_lookup_handler(name); if (zuser->save_handler == NULL) i_error("zlib_save: Unknown handler: %s", name); else if (zuser->save_handler->create_ostream == NULL) { i_error("zlib_save: Support not compiled in for handler: %s", name); zuser->save_handler = NULL; } } name = mail_user_plugin_getenv(user, "zlib_save_level"); if (name != NULL) { if (str_to_uint(name, &zuser->save_level) < 0 || zuser->save_level < 1 || zuser->save_level > 9) { i_error("zlib_save_level: Level must be between 1..9"); zuser->save_level = 0; } } if (zuser->save_level == 0) zuser->save_level = ZLIB_PLUGIN_DEFAULT_LEVEL; MODULE_CONTEXT_SET(user, zlib_user_module, zuser); }
static void mail_crypt_mail_user_created(struct mail_user *user) { struct mail_user_vfuncs *v = user->vlast; struct mail_crypt_user *muser; const char *error = NULL; muser = p_new(user->pool, struct mail_crypt_user, 1); muser->module_ctx.super = *v; user->vlast = &muser->module_ctx.super; const char *curve = mail_user_plugin_getenv(user, "mail_crypt_curve"); buffer_t *tmp = t_str_new(64); if (curve == NULL) { if (user->mail_debug) { i_debug("mail_crypt_plugin: mail_crypt_curve setting " "missing - generating EC keys disabled"); } } else if (!dcrypt_name2oid(curve, tmp, &error)) { user->error = p_strdup_printf(user->pool, "mail_crypt_plugin: " "invalid mail_crypt_curve setting %s: %s - " "plugin disabled", curve, error); } else { muser->curve = p_strdup(user->pool, curve); } const char *version = mail_user_plugin_getenv(user, "mail_crypt_save_version"); if (version == NULL) { user->error = p_strdup_printf(user->pool, "mail_crypt_plugin: " "mail_crypt_save_version setting missing " "- plugin disabled"); } else if (version[0] == '0') { muser->save_version = 0; } else if (version[0] == '1') { muser->save_version = 1; } else if (version[0] == '2') { muser->save_version = 2; } else { user->error = p_strdup_printf(user->pool, "mail_crypt_plugin: Invalid " "mail_crypt_save_version %s: use 0, 1, or 2 ", version); } if (mail_crypt_global_keys_load(user, "mail_crypt_global", &muser->global_keys, FALSE, &error) < 0) { user->error = p_strdup_printf(user->pool, "mail_crypt_plugin: %s", error); } v->deinit = mail_crypt_mail_user_deinit; MODULE_CONTEXT_SET(user, mail_crypt_user_module, muser); }
static struct mail_search_context * fts_mailbox_search_init(struct mailbox_transaction_context *t, struct mail_search_args *args, const enum mail_sort_type *sort_program, enum mail_fetch_field wanted_fields, struct mailbox_header_lookup_ctx *wanted_headers) { struct fts_transaction_context *ft = FTS_CONTEXT(t); struct fts_mailbox *fbox = FTS_CONTEXT(t->box); struct fts_mailbox_list *flist = FTS_LIST_CONTEXT(t->box->list); struct mail_search_context *ctx; struct fts_search_context *fctx; ctx = fbox->module_ctx.super.search_init(t, args, sort_program, wanted_fields, wanted_headers); if (!fts_backend_can_lookup(flist->backend, args->args)) return ctx; fctx = i_new(struct fts_search_context, 1); fctx->box = t->box; fctx->backend = flist->backend; fctx->t = t; fctx->args = args; fctx->result_pool = pool_alloconly_create("fts results", 1024*64); fctx->orig_matches = buffer_create_dynamic(default_pool, 64); fctx->virtual_mailbox = strcmp(t->box->storage->name, VIRTUAL_STORAGE_NAME) == 0; fctx->enforced = mail_user_plugin_getenv(t->box->storage->user, "fts_enforced") != NULL; i_array_init(&fctx->levels, 8); fctx->scores = i_new(struct fts_scores, 1); fctx->scores->refcount = 1; i_array_init(&fctx->scores->score_map, 64); MODULE_CONTEXT_SET(ctx, fts_storage_module, fctx); /* FIXME: we'll assume that all the args are fuzzy. not good, but would require much more work to fix it. */ if (!fts_args_have_fuzzy(args->args) && mail_user_plugin_getenv(t->box->storage->user, "fts_no_autofuzzy") != NULL) fctx->flags |= FTS_LOOKUP_FLAG_NO_AUTO_FUZZY; /* transaction contains the last search's scores. they can be queried later with mail_get_special() */ if (ft->scores != NULL) fts_scores_unref(&ft->scores); ft->scores = fctx->scores; ft->scores->refcount++; if (fctx->enforced || fts_want_build_args(args->args)) fts_try_build_init(ctx, fctx); else fts_search_lookup(fctx); return ctx; }
static void fts_tika_parser_response(const struct http_response *response, struct tika_fts_parser *parser) { i_assert(parser->payload == NULL); switch (response->status) { case 200: /* read response */ if (response->payload == NULL) parser->payload = i_stream_create_from_data("", 0); else { i_stream_ref(response->payload); parser->payload = response->payload; } break; case 204: /* empty response */ case 415: /* Unsupported Media Type */ case 422: /* Unprocessable Entity */ if (parser->user->mail_debug) { i_debug("fts_tika: PUT %s failed: %u %s", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); } parser->payload = i_stream_create_from_data("", 0); break; case 500: /* Server Error - the problem could be anything (in Tika or HTTP server or proxy) and might be retriable, but Tika has trouble processing some documents and throws up this error every time for those documents. Unfortunately we can't easily re-send the request here, because we would have to re-send the entire payload, which isn't available anymore here. So we'd need to indicate in fts_parser_deinit() that we want to retry. FIXME: do this in v2.3. For now we'll just ignore it. */ i_info("fts_tika: PUT %s failed: %u %s - ignoring", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); parser->payload = i_stream_create_from_data("", 0); break; default: i_error("fts_tika: PUT %s failed: %u %s", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); parser->failed = TRUE; break; } parser->http_req = NULL; io_loop_stop(current_ioloop); }
static void fts_tika_parser_response(const struct http_response *response, struct tika_fts_parser *parser) { i_assert(parser->payload == NULL); switch (response->status) { case 200: /* read response */ if (response->payload == NULL) parser->payload = i_stream_create_from_data("", 0); else { i_stream_ref(response->payload); parser->payload = response->payload; } break; case 204: /* empty response */ case 415: /* Unsupported Media Type */ case 422: /* Unprocessable Entity */ if (parser->user->mail_debug) { i_debug("fts_tika: PUT %s failed: %u %s", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); } parser->payload = i_stream_create_from_data("", 0); break; case 500: /* Server Error - the problem could be anything (in Tika or HTTP server or proxy) and might be retriable, but Tika has trouble processing some documents and throws up this error every time for those documents. So we try retrying this a couple of times, but if that doesn't work we'll just ignore it. */ if (http_client_request_try_retry(parser->http_req)) return; i_info("fts_tika: PUT %s failed: %u %s - ignoring", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); parser->payload = i_stream_create_from_data("", 0); break; default: i_error("fts_tika: PUT %s failed: %u %s", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); parser->failed = TRUE; break; } parser->http_req = NULL; io_loop_stop(current_ioloop); }
static void mail_log_mail_user_created(struct mail_user *user) { struct mail_log_user *muser; const char *str; muser = p_new(user->pool, struct mail_log_user, 1); MODULE_CONTEXT_SET(user, mail_log_user_module, muser); str = mail_user_plugin_getenv(user, "mail_log_fields"); muser->fields = str == NULL ? MAIL_LOG_DEFAULT_FIELDS : mail_log_parse_fields(str); str = mail_user_plugin_getenv(user, "mail_log_events"); muser->events = str == NULL ? MAIL_LOG_DEFAULT_EVENTS : mail_log_parse_events(str); }
static void mail_filter_parse_setting(struct mail_user *user, const char *name, const char **socket_path_r, const char **args_r) { const char *value, *p; value = mail_user_plugin_getenv(user, name); if (value == NULL) return; p = strchr(value, ' '); if (p == NULL) { *socket_path_r = p_strdup(user->pool, value); *args_r = ""; } else { *socket_path_r = p_strdup_until(user->pool, value, p); *args_r = p_strdup(user->pool, p + 1); } if (**socket_path_r != '/') { /* relative to base_dir */ *socket_path_r = p_strdup_printf(user->pool, "%s/%s", user->set->base_dir, *socket_path_r); } if (user->mail_debug) { i_debug("mail_filter: Filtering %s via socket %s", name, *socket_path_r); } }
static void fts_queue_index(struct mailbox *box) { struct mail_user *user = box->storage->user; string_t *str = t_str_new(256); const char *path, *value; unsigned int max_recent_msgs; int fd; path = t_strconcat(user->set->base_dir, "/"INDEXER_SOCKET_NAME, NULL); fd = net_connect_unix(path); if (fd == -1) { i_error("net_connect_unix(%s) failed: %m", path); return; } value = mail_user_plugin_getenv(user, "fts_autoindex_max_recent_msgs"); if (value == NULL || str_to_uint(value, &max_recent_msgs) < 0) max_recent_msgs = 0; str_append(str, INDEXER_HANDSHAKE); str_append(str, "APPEND\t0\t"); str_append_tabescaped(str, user->username); str_append_c(str, '\t'); str_append_tabescaped(str, box->vname); str_printfa(str, "\t%u", max_recent_msgs); str_append_c(str, '\t'); str_append_tabescaped(str, box->storage->user->session_id); str_append_c(str, '\n'); if (write_full(fd, str_data(str), str_len(str)) < 0) i_error("write(%s) failed: %m", path); i_close_fd(&fd); }
static const char * mail_sieve_get_setting(void *context, const char *identifier) { struct imap_sieve *isieve = (struct imap_sieve *)context; return mail_user_plugin_getenv(isieve->user, identifier); }
static int fts_transaction_commit(struct mailbox_transaction_context *t, struct mail_transaction_commit_changes *changes_r) { struct fts_transaction_context *ft = FTS_CONTEXT(t); struct fts_mailbox *fbox = FTS_CONTEXT(t->box); struct mailbox *box = t->box; bool autoindex; int ret = 0; autoindex = ft->mails_saved && mail_user_plugin_getenv(box->storage->user, "fts_autoindex") != NULL; if (fts_transaction_end(t) < 0) { mail_storage_set_error(t->box->storage, MAIL_ERROR_TEMP, "FTS transaction commit failed"); ret = -1; } if (fbox->module_ctx.super.transaction_commit(t, changes_r) < 0) ret = -1; if (ret < 0) return -1; if (autoindex) fts_queue_index(box); return 0; }
static const char * mail_sieve_get_setting(void *context, const char *identifier) { struct mail_user *mail_user = context; return mail_user_plugin_getenv(mail_user, identifier); }
static const char *doveadm_sieve_cmd_get_setting (void *context, const char *identifier) { struct doveadm_sieve_cmd_context *ctx = (struct doveadm_sieve_cmd_context *) context; return mail_user_plugin_getenv(ctx->ctx.cur_mail_user, identifier); }
static void snarf_mail_storage_created(struct mail_storage *storage) { const char *path; /* snarfing is optional: do it only if the path specified by mbox_snarf exists */ path = mail_user_plugin_getenv(storage->user, "mbox_snarf"); if (path != NULL) snarf_mail_storage_create(storage, path); }
static void listescape_mailbox_list_created(struct mailbox_list *list) { const char *env; if (list->set.escape_char == '\0') { env = mail_user_plugin_getenv(list->ns->user, "listescape_char"); list->set.escape_char = env != NULL && *env != '\0' ? env[0] : DEFAULT_ESCAPE_CHAR; } }
void acl_mail_user_created(struct mail_user *user) { const char *env; env = mail_user_plugin_getenv(user, "acl"); if (env != NULL && *env != '\0') acl_mail_user_create(user, env); else { if (user->mail_debug) i_debug("acl: No acl setting - ACLs are disabled"); } }
static const char *sieve_tool_sieve_get_setting (void *context, const char *identifier) { struct sieve_tool *tool = (struct sieve_tool *) context; if ( tool->setting_callback != NULL ) return tool->setting_callback(tool->setting_callback_context, identifier); if ( tool->mail_user_dovecot == NULL ) return NULL; return mail_user_plugin_getenv(tool->mail_user_dovecot, identifier); }
static void acl_mail_user_create(struct mail_user *user, const char *env) { struct mail_user_vfuncs *v = user->vlast; struct acl_user *auser; auser = p_new(user->pool, struct acl_user, 1); auser->module_ctx.super = *v; user->vlast = &auser->module_ctx.super; v->deinit = acl_user_deinit; auser->acl_lookup_dict = acl_lookup_dict_init(user); auser->acl_env = env; auser->master_user = mail_user_plugin_getenv(user, "master_user"); env = mail_user_plugin_getenv(user, "acl_groups"); if (env != NULL) { auser->groups = (const char *const *)p_strsplit(user->pool, env, ","); } MODULE_CONTEXT_SET(user, acl_user_module, auser); }
int fts_indexer_init(struct fts_backend *backend, struct mailbox *box, struct fts_indexer_context **ctx_r) { struct fts_indexer_context *ctx; struct mailbox_status status; uint32_t last_uid, seq1, seq2; const char *path, *cmd, *value, *error; int fd; if (fts_backend_get_last_uid(backend, box, &last_uid) < 0) return -1; mailbox_get_open_status(box, STATUS_UIDNEXT, &status); if (status.uidnext == last_uid+1) { /* everything is already indexed */ return 0; } mailbox_get_seq_range(box, last_uid+1, (uint32_t)-1, &seq1, &seq2); if (seq1 == 0) { /* no new messages (last messages in mailbox were expunged) */ return 0; } cmd = t_strdup_printf("PREPEND\t1\t%s\t%s\t0\t%s\n", str_tabescape(box->storage->user->username), str_tabescape(box->vname), str_tabescape(box->storage->user->session_id)); fd = fts_indexer_cmd(box->storage->user, cmd, &path); if (fd == -1) return -1; /* connect to indexer and request immediate indexing of the mailbox */ ctx = i_new(struct fts_indexer_context, 1); ctx->box = box; ctx->path = i_strdup(path); ctx->fd = fd; ctx->input = i_stream_create_fd(fd, 128, FALSE); ctx->search_start_time = ioloop_timeval; value = mail_user_plugin_getenv(box->storage->user, "fts_index_timeout"); if (value != NULL) { if (settings_get_time(value, &ctx->timeout_secs, &error) < 0) i_error("Invalid fts_index_timeout setting: %s", error); } *ctx_r = ctx; return 1; }
static void fts_tika_parser_response(const struct http_response *response, struct tika_fts_parser *parser) { i_assert(parser->payload == NULL); switch (response->status) { case 200: /* read response */ if (response->payload == NULL) parser->payload = i_stream_create_from_data("", 0); else { i_stream_ref(response->payload); parser->payload = response->payload; } break; case 204: /* empty response */ case 415: /* Unsupported Media Type */ case 422: /* Unprocessable Entity */ if (parser->user->mail_debug) { i_debug("fts_tika: PUT %s failed: %u %s", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); } parser->payload = i_stream_create_from_data("", 0); break; default: i_error("fts_tika: PUT %s failed: %u %s", mail_user_plugin_getenv(parser->user, "fts_tika"), response->status, response->reason); parser->failed = TRUE; break; } parser->http_req = NULL; io_loop_stop(current_ioloop); }
void fts_mail_namespaces_added(struct mail_namespace *namespaces) { struct mail_namespace *ns; const char *name; name = mail_user_plugin_getenv(namespaces->user, "fts"); if (name == NULL) { if (namespaces->user->mail_debug) i_debug("fts: No fts setting - plugin disabled"); return; } for (ns = namespaces; ns != NULL; ns = ns->next) fts_mailbox_list_init(ns->list, name); }
static int pop3_mailbox_open(struct mail_storage *storage) { struct pop3_migration_mail_storage *mstorage = POP3_MIGRATION_CONTEXT(storage); struct mail_namespace *ns; if (mstorage->pop3_box != NULL) return 0; ns = mail_namespace_find(storage->user->namespaces, mstorage->pop3_box_vname); mstorage->pop3_box = mailbox_alloc(ns->list, mstorage->pop3_box_vname, MAILBOX_FLAG_READONLY | MAILBOX_FLAG_POP3_SESSION); mstorage->all_mailboxes = mail_user_plugin_getenv(storage->user, "pop3_migration_all_mailboxes") != NULL; return 0; }
static int tika_get_http_client_url(struct mail_user *user, struct http_url **http_url_r) { struct fts_parser_tika_user *tuser = TIKA_USER_CONTEXT(user); struct http_client_settings http_set; const char *url, *error; url = mail_user_plugin_getenv(user, "fts_tika"); if (url == NULL) { /* fts_tika disabled */ return -1; } if (tuser != NULL) { *http_url_r = tuser->http_url; return *http_url_r == NULL ? -1 : 0; } tuser = p_new(user->pool, struct fts_parser_tika_user, 1); MODULE_CONTEXT_SET(user, fts_parser_tika_user_module, tuser); if (http_url_parse(url, NULL, 0, user->pool, &tuser->http_url, &error) < 0) { i_error("fts_tika: Failed to parse HTTP url %s: %s", url, error); return -1; } if (tika_http_client == NULL) { memset(&http_set, 0, sizeof(http_set)); http_set.max_idle_time_msecs = 100; http_set.max_parallel_connections = 1; http_set.max_pipelined_requests = 1; http_set.max_redirects = 1; http_set.max_attempts = 3; http_set.connect_timeout_msecs = 5*1000; http_set.request_timeout_msecs = 60*1000; http_set.debug = user->mail_debug; tika_http_client = http_client_init(&http_set); } *http_url_r = tuser->http_url; return 0; }
static bool snarf_box_find(struct mail_user *user, struct mailbox_list **list_r, const char **name_r) { struct mail_namespace *snarf_ns; const char *snarf_name; snarf_name = mail_user_plugin_getenv(user, "snarf"); if (snarf_name == NULL) return FALSE; if (!uni_utf8_str_is_valid(snarf_name)) { i_error("snarf: Mailbox name not UTF-8: %s", snarf_name); return FALSE; } snarf_ns = mail_namespace_find(user->namespaces, snarf_name); *list_r = snarf_ns->list; *name_r = snarf_name; return TRUE; }
static void pop3_migration_mail_storage_created(struct mail_storage *storage) { struct pop3_migration_mail_storage *mstorage; struct mail_storage_vfuncs *v = storage->vlast; const char *pop3_box_vname; pop3_box_vname = mail_user_plugin_getenv(storage->user, "pop3_migration_mailbox"); if (pop3_box_vname == NULL) return; mstorage = p_new(storage->pool, struct pop3_migration_mail_storage, 1); mstorage->module_ctx.super = *v; storage->vlast = &mstorage->module_ctx.super; v->destroy = pop3_migration_mail_storage_destroy; mstorage->pop3_box_vname = p_strdup(storage->pool, pop3_box_vname); MODULE_CONTEXT_SET(storage, pop3_migration_storage_module, mstorage); }
static bool snarf_box_find(struct mail_user *user, struct mailbox_list **list_r, const char **name_r) { struct mail_namespace *snarf_ns; const char *snarf_name; snarf_name = mail_user_plugin_getenv(user, "snarf"); if (snarf_name == NULL) return FALSE; snarf_ns = mail_namespace_find(user->namespaces, &snarf_name); if (snarf_ns == NULL) { i_error("snarf: Namespace not found for mailbox: %s", snarf_name); return FALSE; } *list_r = snarf_ns->list; *name_r = snarf_name; return TRUE; }
static void last_login_mail_user_created(struct mail_user *user) { struct mail_user_vfuncs *v = user->vlast; struct last_login_user *luser; struct dict *dict; struct dict_settings set; struct dict_transaction_context *trans; const char *dict_value, *key_name, *precision, *error; if (user->autocreated) { /* we want to handle only logged in users, not lda's raw user or accessed shared users */ return; } dict_value = mail_user_plugin_getenv(user, "last_login_dict"); if (dict_value == NULL || dict_value[0] == '\0') return; memset(&set, 0, sizeof(set)); set.username = user->username; set.base_dir = user->set->base_dir; if (mail_user_get_home(user, &set.home_dir) <= 0) set.home_dir = NULL; if (dict_init(dict_value, &set, &dict, &error) < 0) { i_error("last_login_dict: dict_init(%s) failed: %s", dict_value, error); return; } luser = p_new(user->pool, struct last_login_user, 1); luser->module_ctx.super = *v; user->vlast = &luser->module_ctx.super; v->deinit = last_login_user_deinit; luser->dict = dict; MODULE_CONTEXT_SET(user, last_login_user_module, luser); key_name = mail_user_plugin_getenv(user, "last_login_key"); if (key_name == NULL) { key_name = t_strdup_printf(LAST_LOGIN_DEFAULT_KEY_PREFIX"%s", user->username); } key_name = t_strconcat(DICT_PATH_SHARED, key_name, NULL); precision = mail_user_plugin_getenv(user, "last_login_precision"); trans = dict_transaction_begin(dict); if (precision == NULL || strcmp(precision, "s") == 0) dict_set(trans, key_name, dec2str(ioloop_time)); else if (strcmp(precision, "ms") == 0) { dict_set(trans, key_name, t_strdup_printf( "%ld%03u", (long)ioloop_timeval.tv_sec, (unsigned int)(ioloop_timeval.tv_usec/1000))); } else if (strcmp(precision, "us") == 0) { dict_set(trans, key_name, t_strdup_printf( "%ld%06u", (long)ioloop_timeval.tv_sec, (unsigned int)ioloop_timeval.tv_usec)); } else if (strcmp(precision, "ns") == 0) { dict_set(trans, key_name, t_strdup_printf( "%ld%06u000", (long)ioloop_timeval.tv_sec, (unsigned int)ioloop_timeval.tv_usec)); } else { i_error("last_login_dict: Invalid last_login_precision '%s'", precision); } dict_transaction_no_slowness_warning(trans); dict_transaction_commit_async(&trans, last_login_dict_commit, user); }
static void stats_user_created(struct mail_user *user) { struct ioloop_context *ioloop_ctx = io_loop_get_current_context(current_ioloop); struct stats_user *suser; struct mail_user_vfuncs *v = user->vlast; const char *path, *str, *error; unsigned int refresh_secs; if (ioloop_ctx == NULL) { /* we're probably running some test program, or at least mail-storage-service wasn't used to create this user. disable stats tracking. */ return; } if (user->autocreated) { /* lda / shared user. we're not tracking this one. */ return; } /* get refresh time */ str = mail_user_plugin_getenv(user, "stats_refresh"); if (str == NULL) return; if (settings_get_time(str, &refresh_secs, &error) < 0) { i_error("stats: Invalid stats_refresh setting: %s", error); return; } if (refresh_secs == 0) return; if (global_stats_conn == NULL) { path = t_strconcat(user->set->base_dir, "/"MAIL_STATS_SOCKET_NAME, NULL); global_stats_conn = stats_connection_create(path); } stats_connection_ref(global_stats_conn); if (stats_user_count == 0) { /* first user connection */ stats_global_user = user; } else if (stats_user_count == 1) { /* second user connection. we'll need to start doing per-io callback tracking now. (we might have been doing it also previously but just temporarily quickly dropped to having 1 user, in which case stats_global_user=NULL) */ if (stats_global_user != NULL) { stats_add_session(stats_global_user); stats_global_user = NULL; } } stats_user_count++; suser = p_new(user->pool, struct stats_user, 1); suser->module_ctx.super = *v; user->vlast = &suser->module_ctx.super; v->deinit = stats_user_deinit; suser->refresh_secs = refresh_secs; str = mail_user_plugin_getenv(user, "stats_track_cmds"); if (str != NULL && strcmp(str, "yes") == 0) suser->track_commands = TRUE; suser->stats_conn = global_stats_conn; guid_128_generate(suser->session_guid); suser->last_session_update = time(NULL); suser->ioloop_ctx = ioloop_ctx; io_loop_context_add_callbacks(ioloop_ctx, stats_io_activate, stats_io_deactivate, user); MODULE_CONTEXT_SET(user, stats_user_module, suser); stats_connection_connect(suser->stats_conn, user); }
static bool cmd_compress(struct client_command_context *cmd) { struct client *client = cmd->client; struct zlib_client *zclient = IMAP_ZLIB_IMAP_CONTEXT(client); const struct compression_handler *handler; const struct imap_arg *args; struct istream *old_input; struct ostream *old_output; const char *mechanism, *value; unsigned int level; /* <mechanism> */ if (!client_read_args(cmd, 0, 0, &args)) return FALSE; if (!imap_arg_get_atom(args, &mechanism) || !IMAP_ARG_IS_EOL(&args[1])) { client_send_command_error(cmd, "Invalid arguments."); return TRUE; } if (zclient->handler != NULL) { client_send_tagline(cmd, t_strdup_printf( "NO [COMPRESSIONACTIVE] COMPRESSION=%s already enabled.", t_str_ucase(zclient->handler->name))); return TRUE; } if (client->tls_compression) { client_send_tagline(cmd, "NO [COMPRESSIONACTIVE] TLS compression already enabled."); return TRUE; } handler = compression_lookup_handler(t_str_lcase(mechanism)); if (handler == NULL || handler->create_istream == NULL) { client_send_tagline(cmd, "NO Unknown compression mechanism."); return TRUE; } imap_zlib_client_skip_line(client); client_send_tagline(cmd, "OK Begin compression."); value = mail_user_plugin_getenv(client->user, "imap_zlib_compress_level"); if (value == NULL || str_to_uint(value, &level) < 0 || level <= 0 || level > 9) level = IMAP_COMPRESS_DEFAULT_LEVEL; old_input = client->input; old_output = client->output; client->input = handler->create_istream(old_input, FALSE); client->output = handler->create_ostream(old_output, level); /* preserve output offset so that the bytes out counter in logout message doesn't get reset here */ client->output->offset = old_output->offset; i_stream_unref(&old_input); o_stream_unref(&old_output); client_update_imap_parser_streams(client); zclient->handler = handler; return TRUE; }
storage = p_new(pool, struct virtual_storage, 1); storage->storage = virtual_storage; storage->storage.pool = pool; p_array_init(&storage->open_stack, pool, 8); return &storage->storage; } static int virtual_storage_create(struct mail_storage *_storage, struct mail_namespace *ns ATTR_UNUSED, const char **error_r) { struct virtual_storage *storage = (struct virtual_storage *)_storage; const char *value; value = mail_user_plugin_getenv(_storage->user, "virtual_max_open_mailboxes"); if (value == NULL) storage->max_open_mailboxes = VIRTUAL_DEFAULT_MAX_OPEN_MAILBOXES; else if (str_to_uint(value, &storage->max_open_mailboxes) < 0) { *error_r = "Invalid virtual_max_open_mailboxes setting"; return -1; } return 0; } static void virtual_storage_get_list_settings(const struct mail_namespace *ns ATTR_UNUSED, struct mailbox_list_settings *set) { if (set->layout == NULL) set->layout = MAILBOX_LIST_NAME_FS;
static void stats_user_created(struct mail_user *user) { struct ioloop_context *ioloop_ctx = io_loop_get_current_context(current_ioloop); struct stats_user *suser; struct mail_user_vfuncs *v = user->vlast; const char *path, *str, *error; unsigned int refresh_secs; if (ioloop_ctx == NULL) { /* we're probably running some test program, or at least mail-storage-service wasn't used to create this user. disable stats tracking. */ return; } if (user->autocreated) { /* lda / shared user. we're not tracking this one. */ return; } /* get refresh time */ str = mail_user_plugin_getenv(user, "stats_refresh"); if (str == NULL) return; if (settings_get_time(str, &refresh_secs, &error) < 0) { i_error("stats: Invalid stats_refresh setting: %s", error); return; } if (refresh_secs == 0) return; if (refresh_secs > SESSION_STATS_FORCE_REFRESH_SECS) { i_warning("stats: stats_refresh too large, changing to %u", SESSION_STATS_FORCE_REFRESH_SECS); refresh_secs = SESSION_STATS_FORCE_REFRESH_SECS; } if (global_stats_conn == NULL) { path = t_strconcat(user->set->base_dir, "/"MAIL_STATS_SOCKET_NAME, NULL); global_stats_conn = stats_connection_create(path); } stats_connection_ref(global_stats_conn); if (stats_user_count == 0) { /* first user connection */ stats_global_user = user; } else if (stats_user_count == 1) { /* second user connection. we'll need to start doing per-io callback tracking now. (we might have been doing it also previously but just temporarily quickly dropped to having 1 user, in which case stats_global_user=NULL) */ if (stats_global_user != NULL) { stats_add_session(stats_global_user); stats_global_user = NULL; } } stats_user_count++; suser = p_new(user->pool, struct stats_user, 1); suser->module_ctx.super = *v; user->vlast = &suser->module_ctx.super; v->deinit = stats_user_deinit; v->stats_fill = stats_user_stats_fill; suser->refresh_secs = refresh_secs; str = mail_user_plugin_getenv(user, "stats_track_cmds"); if (str != NULL && strcmp(str, "yes") == 0) suser->track_commands = TRUE; suser->stats_conn = global_stats_conn; if (user->session_id != NULL && user->session_id[0] != '\0') suser->stats_session_id = user->session_id; else { guid_128_t guid; guid_128_generate(guid); suser->stats_session_id = p_strdup(user->pool, guid_128_to_string(guid)); } suser->last_session_update = time(NULL); user->stats_enabled = TRUE; suser->ioloop_ctx = ioloop_ctx; io_loop_context_add_callbacks(ioloop_ctx, stats_io_activate, stats_io_deactivate, user); suser->pre_io_stats = stats_alloc(user->pool); suser->session_stats = stats_alloc(user->pool); suser->last_sent_session_stats = stats_alloc(user->pool); MODULE_CONTEXT_SET(user, stats_user_module, suser); mail_stats_connection_connect(suser->stats_conn, user); suser->to_stats_timeout = timeout_add(suser->refresh_secs*1000, session_stats_refresh_timeout, user); /* fill the initial values. this is necessary for the process-global values (e.g. getrusage()) if the process is reused for multiple users. otherwise the next user will start with the previous one's last values. */ mail_user_stats_fill(user, suser->pre_io_stats); }