static void verify_dump(char *unused_name, char **unused_argv) { /* * Dump preliminary cache cleanup statistics when the process commits * suicide while a cache cleanup run is in progress. We can't currently * distinguish between "postfix reload" (we should restart) or "maximal * idle time reached" (we could finish the cache cleanup first). */ dict_cache_close(verify_map); verify_map = 0; }
static void psc_dump(void) { /* * Dump preliminary cache cleanup statistics when the process commits * suicide while a cache cleanup run is in progress. We can't currently * distinguish between "postfix reload" (we should restart) or "maximal * idle time reached" (we could finish the cache cleanup first). */ if (psc_cache_map) { dict_cache_close(psc_cache_map); psc_cache_map = 0; } }
static void psc_drain(char *unused_service, char **unused_argv) { int count; /* * After "postfix reload", complete work-in-progress in the background, * instead of dropping already-accepted connections on the floor. * * Unfortunately we must close all writable tables, so we can't store or * look up reputation information. The reason is that we don't have any * multi-writer safety guarantees. We also can't use the single-writer * proxywrite service, because its latency guarantees are too weak. * * All error retry counts shall be limited. Instead of blocking here, we * could retry failed fork() operations in the event call-back routines, * but we don't need perfection. The host system is severely overloaded * and service levels are already way down. * * XXX Some Berkeley DB versions break with close-after-fork. Every new * version is an improvement over its predecessor. */ if (psc_cache_map != 0 /* XXX && psc_cache_map requires locking */ ) { dict_cache_close(psc_cache_map); psc_cache_map = 0; } for (count = 0; /* see below */ ; count++) { if (count >= 5) { msg_fatal("fork: %m"); } else if (event_server_drain() != 0) { msg_warn("fork: %m"); sleep(1); continue; } else { return; } } }
int main(int argc, char **argv) { DICT_CACHE_TEST *test_job; VSTRING *inbuf = vstring_alloc(100); char *bufp; ARGV *args; DICT_CACHE *cache = 0; int stdin_is_tty; msg_vstream_init(argv[0], VSTREAM_ERR); if (argc != 1) usage(argv[0]); test_job = create_requests(DICT_CACHE_SREQ_LIMIT); stdin_is_tty = isatty(0); for (;;) { if (stdin_is_tty) { vstream_printf("> "); vstream_fflush(VSTREAM_OUT); } if (vstring_fgets_nonl(inbuf, VSTREAM_IN) == 0) break; bufp = vstring_str(inbuf); if (!stdin_is_tty) { vstream_printf("> %s\n", bufp); vstream_fflush(VSTREAM_OUT); } if (*bufp == '#') continue; args = argv_split(bufp, DELIMS); if (argc == 0) { vstream_printf("usage: %s\n", USAGE); vstream_fflush(VSTREAM_OUT); continue; } if (strcmp(args->argv[0], "verbose") == 0 && args->argc == 2) { msg_verbose = atoi(args->argv[1]); } else if (strcmp(args->argv[0], "elapsed") == 0 && args->argc == 2) { show_elapsed = atoi(args->argv[1]); #ifdef HAS_LMDB } else if (strcmp(args->argv[0], "lmdb_map_size") == 0 && args->argc == 2) { dict_lmdb_map_size = atol(args->argv[1]); #endif } else if (strcmp(args->argv[0], "cache") == 0 && args->argc == 2) { if (cache) dict_cache_close(cache); cache = dict_cache_open(args->argv[1], O_CREAT | O_RDWR, DICT_CACHE_OPEN_FLAGS); } else if (strcmp(args->argv[0], "reset") == 0 && args->argc == 1) { reset_requests(test_job); } else if (strcmp(args->argv[0], "run") == 0 && args->argc == 1) { run_requests(test_job, cache, inbuf); } else if (strcmp(args->argv[0], "status") == 0 && args->argc == 1) { show_status(test_job, cache); } else { add_request(test_job, args); } vstream_fflush(VSTREAM_OUT); argv_free(args); } vstring_free(inbuf); free_requests(test_job); if (cache) dict_cache_close(cache); return (0); }