static void leave_client(struct wm_client *client) { g_assert(config_focus_follow); if(is_enter_focus_event_ignored(event_curserial)) { wm_debug_type(WM_DEBUG_FOCUS, "Ignoring leave event with serial %lu\n on client 0x%x", event_curserial, client->window); return; } if(client == focus_client) { if(config_focus_delay) { struct focus_delay_data *data; if(unfocus_delay_timeout_id) g_source_remove(unfocus_delay_timeout_id); data = g_slice_new(struct focus_delay_data); data->client = client; data->time = event_time(); data->serial = event_curserial; unfocus_delay_timeout_id = g_timeout_add_full(G_PRIORITY_DEFAULT, config_focus_delay, unfocus_delay_func, data, unfocus_delay_dest); unfocus_delay_timeout_client = client; } else { struct focus_delay_data data; data.client = client; data.time = event_time(); data.serial = event_curserial; unfocus_delay_func(&data); } }
static void qmgr_active_done_25_generic(QMGR_MESSAGE *message) { const char *myname = "qmgr_active_done_25_generic"; /* * If we get to this point we have tried all recipients for this message. * If the message is too old, try to bounce it. * * Bounces are sent asynchronously to avoid stalling while the cleanup * daemon waits for the qmgr to accept the "new mail" trigger. */ if (message->flags) { if (event_time() >= message->create_time + (*message->sender ? var_max_queue_time : var_dsn_queue_time)) { msg_info("%s: from=<%s>, status=expired, returned to sender", message->queue_id, message->sender); if (message->verp_delims == 0 || var_verp_bounce_off) adefer_flush(BOUNCE_FLAG_KEEP, message->queue_name, message->queue_id, message->encoding, message->sender, message->dsn_envid, message->dsn_ret, qmgr_active_done_3_defer_flush, (char *) message); else adefer_flush_verp(BOUNCE_FLAG_KEEP, message->queue_name, message->queue_id, message->encoding, message->sender, message->dsn_envid, message->dsn_ret, message->verp_delims, qmgr_active_done_3_defer_flush, (char *) message); return; } else if (message->warn_time > 0 && event_time() >= message->warn_time - 1) { if (msg_verbose) msg_info("%s: sending defer warning for %s", myname, message->queue_id); adefer_warn(BOUNCE_FLAG_KEEP, message->queue_name, message->queue_id, message->encoding, message->sender, message->dsn_envid, message->dsn_ret, qmgr_active_done_3_defer_warn, (char *) message); return; } } /* * Asynchronous processing does not reach this point. */ qmgr_active_done_3_generic(message); }
static void anvil_remote_newtls_stat(VSTREAM *client_stream, const char *ident) { ANVIL_REMOTE *anvil_remote; int rate; /* * Be prepared for "postfix reload" after "connect". */ if ((anvil_remote = (ANVIL_REMOTE *) htable_find(anvil_remote_map, ident)) == 0) { rate = 0; } /* * Do not report stale information. */ else { if (anvil_remote->start != 0 && anvil_remote->start + var_anvil_time_unit < event_time()) ANVIL_REMOTE_RSET_RATE(anvil_remote, 0); rate = anvil_remote->ntls; } /* * Respond to local server. */ attr_print_plain(client_stream, ATTR_FLAG_NONE, ATTR_TYPE_INT, ANVIL_ATTR_STATUS, ANVIL_STAT_OK, ATTR_TYPE_INT, ANVIL_ATTR_RATE, rate, ATTR_TYPE_END); }
bool execute_insert(CassSession* session, const std::string& table_name) { std::string query = str(boost::format("INSERT INTO %s (id, event_time, text_sample) VALUES (?, ?, ?)") % table_name); test_utils::CassStatementPtr statement(cass_statement_new(query.c_str(), 3)); // Determine if bound parameters can be used based on C* version if (version.major == 1) { test_utils::CassPreparedPtr prepared = test_utils::prepare(session, query.c_str()); statement = test_utils::CassStatementPtr(cass_prepared_bind(prepared.get())); } boost::chrono::system_clock::time_point now(boost::chrono::system_clock::now()); boost::chrono::milliseconds event_time(boost::chrono::duration_cast<boost::chrono::milliseconds>(now.time_since_epoch())); std::string text_sample(test_utils::string_from_time_point(now)); cass_statement_bind_uuid(statement.get(), 0, test_utils::generate_time_uuid(uuid_gen)); cass_statement_bind_int64(statement.get(), 1, event_time.count()); cass_statement_bind_string(statement.get(), 2, text_sample.c_str()); test_utils::CassFuturePtr future(cass_session_execute(session, statement.get())); cass_future_wait(future.get()); CassError code = cass_future_error_code(future.get()); if (code != CASS_OK && code != CASS_ERROR_LIB_REQUEST_TIMED_OUT) { // Timeout is okay CassString message; cass_future_error_message(future.get(), &message.data, &message.length); fprintf(stderr, "Error occurred during insert '%.*s'\n", static_cast<int>(message.length), message.data); return false; } return true; }
static void master_avail_event(int event, char *context) { MASTER_SERV *serv = (MASTER_SERV *) context; time_t now; if (event == 0) /* XXX Can this happen? */ msg_panic("master_avail_event: null event"); else { /* * When all servers for a public internet service are busy, we start * creating server processes with "-o stress=yes" on the command * line, and keep creating such processes until the process count is * below the limit for at least 1000 seconds. This provides a minimal * solution that can be adopted into legacy and stable Postfix * releases. * * This is not the right place to update serv->stress_param_val in * response to stress level changes. Doing so would would contaminate * the "postfix reload" code with stress management implementation * details, creating a source of future bugs. Instead, we update * simple counters or flags here, and use their values to determine * the proper serv->stress_param_val value when exec-ing a server * process. */ if (serv->stress_param_val != 0 && !MASTER_LIMIT_OK(serv->max_proc, serv->total_proc + 1)) { now = event_time(); if (serv->stress_expire_time < now) master_restart_service(serv, NO_CONF_RELOAD); serv->stress_expire_time = now + 1000; } master_spawn(serv); } }
static int flush_send_file_service(const char *queue_id) { const char *myname = "flush_send_file_service"; VSTRING *queue_file; struct utimbuf tbuf; static char qmgr_scan_trigger[] = { QMGR_REQ_SCAN_INCOMING, /* scan incoming queue */ }; /* * Sanity check. */ if (!mail_queue_id_ok(queue_id)) return (FLUSH_STAT_BAD); if (msg_verbose) msg_info("%s: requesting delivery for queue_id %s", myname, queue_id); queue_file = vstring_alloc(30); tbuf.actime = tbuf.modtime = event_time(); if (flush_one_file(queue_id, queue_file, &tbuf, UNTHROTTLE_AFTER) > 0) mail_trigger(MAIL_CLASS_PUBLIC, var_queue_service, qmgr_scan_trigger, sizeof(qmgr_scan_trigger)); vstring_free(queue_file); return (FLUSH_STAT_OK); }
static void handle_events(ARGV *argv) { int delay; time_t before; time_t after; if (argv->argc != 2 || (delay = atoi(argv->argv[1])) <= 0) { msg_error("usage: %s time", argv->argv[0]); return; } before = event_time(); event_drain(delay); after = event_time(); if (after < before + delay) sleep(before + delay - after); }
void prompt_show(ObPrompt *self, ObClient *parent, gboolean modal) { gint i; if (self->mapped) { /* activate the prompt */ OBT_PROP_MSG(ob_screen, self->super.window, NET_ACTIVE_WINDOW, 1, /* from an application.. */ event_time(), 0, 0, 0); return; } /* set the focused button (if not found then the first button is used) */ self->focus = &self->button[0]; for (i = 0; i < self->n_buttons; ++i) if (self->button[i].result == self->default_result) { self->focus = &self->button[i]; break; } if (parent) { Atom states[1]; gint nstates; Window p; XWMHints h; if (parent->group) { /* make it transient for the window's group */ h.flags = WindowGroupHint; h.window_group = parent->group->leader; p = obt_root(ob_screen); } else { /* make it transient for the window directly */ h.flags = 0; p = parent->window; } XSetWMHints(obt_display, self->super.window, &h); OBT_PROP_SET32(self->super.window, WM_TRANSIENT_FOR, WINDOW, p); states[0] = OBT_PROP_ATOM(NET_WM_STATE_MODAL); nstates = (modal ? 1 : 0); OBT_PROP_SETA32(self->super.window, NET_WM_STATE, ATOM, states, nstates); } else OBT_PROP_ERASE(self->super.window, WM_TRANSIENT_FOR); /* set up the dialog and render it */ prompt_layout(self); render_all(self); client_manage(self->super.window, self); self->mapped = TRUE; }
/* ** probe_enq_ok() ** ** does the terminal do enq/ack handshaking? */ static void probe_enq_ok(void) { int tc, len, ulen; put_str("Testing ENQ/ACK, standby..."); fflush(stdout); can_test("u8 u9", FLAG_TESTED); tty_ENQ = user9 ? user9 : "\005"; tc_putp(tty_ENQ); event_start(TIME_SYNC); /* start the timer */ read_key(tty_ACK, TTY_ACK_SIZE - 1); if (event_time(TIME_SYNC) > 400000 || tty_ACK[0] == '\0') { /* These characters came from the user. Sigh. */ tty_can_sync = SYNC_FAILED; ptext("\nThis program expects the ENQ sequence to be"); ptext(" answered with the ACK character. This will help"); ptext(" the program reestablish synchronization when"); ptextln(" the terminal is overrun with data."); ptext("\nENQ sequence from (u9): "); putln(expand(tty_ENQ)); ptext("ACK received: "); putln(expand(tty_ACK)); len = user8 ? strlen(user8) : 0; sprintf(temp, "Length of ACK %d. Expected length of ACK %d.", (int) strlen(tty_ACK), len); ptextln(temp); if (len) { temp[0] = user8[len - 1]; temp[1] = '\0'; ptext("Terminating character found in (u8): "); putln(expand(temp)); } return; } tty_can_sync = SYNC_TESTED; if ((len = strlen(tty_ACK)) == 1) { /* single character acknowledge string */ ACK_terminator = tty_ACK[0]; ACK_length = 4096; return; } tc = tty_ACK[len - 1]; if (user8) { ulen = strlen(user8); if (tc == user8[ulen - 1]) { /* ANSI style acknowledge string */ ACK_terminator = tc; ACK_length = 4096; return; } } /* fixed length acknowledge string */ ACK_length = len; ACK_terminator = -2; }
void mouse_replay_pointer(void) { if (replay_pointer_needed) { /* replay the pointer event before any windows move */ XAllowEvents(obt_display, ReplayPointer, event_time()); replay_pointer_needed = FALSE; } }
static int flush_refresh_service(int max_age) { const char *myname = "flush_refresh_service"; SCAN_DIR *scan; char *site_path; struct stat st; VSTRING *path = vstring_alloc(10); scan = scan_dir_open(MAIL_QUEUE_FLUSH); while ((site_path = mail_scan_dir_next(scan)) != 0) { if (!mail_queue_id_ok(site_path)) continue; /* XXX grumble. */ mail_queue_path(path, MAIL_QUEUE_FLUSH, site_path); if (stat(STR(path), &st) < 0) { if (errno != ENOENT) msg_warn("%s: stat %s: %m", myname, STR(path)); else if (msg_verbose) msg_info("%s: %s: %m", myname, STR(path)); continue; } if (st.st_size == 0) { if (st.st_mtime + var_fflush_purge < event_time()) { if (unlink(STR(path)) < 0) msg_warn("remove logfile %s: %m", STR(path)); else if (msg_verbose) msg_info("%s: unlink %s, empty and unchanged for %d days", myname, STR(path), var_fflush_purge / 86400); } else if (msg_verbose) msg_info("%s: skip logfile %s - empty log", myname, site_path); } else if (st.st_atime + max_age < event_time()) { if (msg_verbose) msg_info("%s: flush logfile %s", myname, site_path); flush_send_path(site_path, REFRESH_ONLY); } else { if (msg_verbose) msg_info("%s: skip logfile %s, unread for <%d hours(s) ", myname, site_path, max_age / 3600); } } scan_dir_close(scan); vstring_free(path); return (FLUSH_STAT_OK); }
void master_avail_listen(MASTER_SERV *serv) { const char *myname = "master_avail_listen"; int listen_flag; time_t now; int n; /* * Caution: several other master_XXX modules call master_avail_listen(), * master_avail_more() or master_avail_less(). To avoid mutual dependency * problems, the code below invokes no code in other master_XXX modules, * and modifies no data that is maintained by other master_XXX modules. * * When no-one else is monitoring the service's listen socket, start * monitoring the socket for connection requests. All this under the * restriction that we have sufficient resources to service a connection * request. */ if (msg_verbose) msg_info("%s: %s avail %d total %d max %d", myname, serv->name, serv->avail_proc, serv->total_proc, serv->max_proc); if (MASTER_THROTTLED(serv) || serv->avail_proc > 0) { listen_flag = 0; } else if (MASTER_LIMIT_OK(serv->max_proc, serv->total_proc)) { listen_flag = 1; } else { listen_flag = 0; if (serv->stress_param_val != 0) { now = event_time(); if (serv->busy_warn_time < now - 1000) { serv->busy_warn_time = now; msg_warn("service \"%s\" (%s) has reached its process limit \"%d\": " "new clients may experience noticeable delays", serv->ext_name, serv->name, serv->max_proc); msg_warn("to avoid this condition, increase the process count " "in master.cf or reduce the service time per client"); msg_warn("see http://www.postfix.org/STRESS_README.html for " "examples of stress-adapting configuration settings"); } } } if (listen_flag && !MASTER_LISTENING(serv)) { if (msg_verbose) msg_info("%s: enable events %s", myname, serv->name); for (n = 0; n < serv->listen_fd_count; n++) event_enable_read(serv->listen_fd[n], master_avail_event, (char *) serv); serv->flags |= MASTER_FLAG_LISTEN; } else if (!listen_flag && MASTER_LISTENING(serv)) { if (msg_verbose) msg_info("%s: disable events %s", myname, serv->name); for (n = 0; n < serv->listen_fd_count; n++) event_disable_readwrite(serv->listen_fd[n]); serv->flags &= ~MASTER_FLAG_LISTEN; } }
static void ecal_objects_changed (ECalView * cview, GList *objects, DatesNavigatorModel * nav) { ECal *ecal = e_cal_view_get_client (cview); GtkListStore * store = GTK_LIST_STORE (gtk_tree_model_filter_get_model ( GTK_TREE_MODEL_FILTER (nav))); for (; objects; objects = objects->next) { const char *uid = icalcomponent_get_uid (objects->data); gchar *uri_uid; GtkTreeIter iter; const gchar * summary; gchar * s = NULL; gchar time[100]; gchar * folded = NULL; if (!uid) continue; uri_uid = g_strconcat (e_cal_get_uri (ecal), uid, NULL); if (!find_item (store, uri_uid, &iter)) gtk_list_store_append (store, &iter); summary = icalcomponent_get_summary (objects->data); if (summary) folded = g_utf8_casefold (summary, -1); /* use only first 15 chars of the summary */ if (summary && g_utf8_strlen (summary, -1) > 15) { s = g_strdup (summary); gchar * p = g_utf8_offset_to_pointer (s, 15); *p = 0; summary = s; } event_time (objects->data, (gchar*)&time, sizeof(time), nav->priv->format); gtk_list_store_set (store, &iter, DN_Name, summary, DN_Time, time, DN_Uid, uri_uid, DN_NameFolded, folded, -1); g_free (uri_uid); g_free (s); g_free (folded); } }
bool bind_and_execute_insert(CassStatement* statement) { boost::chrono::system_clock::time_point now(boost::chrono::system_clock::now()); boost::chrono::milliseconds event_time(boost::chrono::duration_cast<boost::chrono::milliseconds>(now.time_since_epoch())); std::string text_sample(test_utils::string_from_time_point(now)); cass_statement_bind_uuid(statement, 0, test_utils::generate_time_uuid(uuid_gen)); cass_statement_bind_int64(statement, 1, event_time.count()); cass_statement_bind_string_n(statement, 2, text_sample.data(), text_sample.size()); return execute_insert(statement); }
static void rewrite_service(VSTREAM *stream, char *unused_service, char **argv) { int status = -1; #ifdef DETACH_AND_ASK_CLIENTS_TO_RECONNECT static time_t last; time_t now; const char *table; #endif /* * Sanity check. This service takes no command-line arguments. */ if (argv[0]) msg_fatal("unexpected command-line argument: %s", argv[0]); /* * Client connections are long-lived. Be sure to refesh timely. */ #ifdef DETACH_AND_ASK_CLIENTS_TO_RECONNECT if (server_flags == 0 && (now = event_time()) - last > 10) { if ((table = dict_changed_name()) != 0) { msg_info("table %s has changed -- restarting", table); if (multi_server_drain() == 0) server_flags = 1; } last = now; } #endif /* * This routine runs whenever a client connects to the UNIX-domain socket * dedicated to address rewriting. All connection-management stuff is * handled by the common code in multi_server.c. */ if (attr_scan(stream, ATTR_FLAG_STRICT | ATTR_FLAG_MORE, ATTR_TYPE_STR, MAIL_ATTR_REQ, command, ATTR_TYPE_END) == 1) { if (strcmp(vstring_str(command), REWRITE_ADDR) == 0) { status = rewrite_proto(stream); } else if (strcmp(vstring_str(command), RESOLVE_REGULAR) == 0) { status = resolve_proto(&resolve_regular, stream); } else if (strcmp(vstring_str(command), RESOLVE_VERIFY) == 0) { status = resolve_proto(&resolve_verify, stream); } else { msg_warn("bad command %.30s", printable(vstring_str(command), '?')); } } if (status < 0) multi_server_disconnect(stream); }
/* ** spin_flush() ** ** Wait for the input stream to stop. ** Throw away all input characters. */ void spin_flush(void) { unsigned char buf[64]; fflush(stdout); event_start(TIME_FLUSH); /* start the timer */ do { if (char_ready()) { (void) read(fileno(stdin), &buf, sizeof(buf)); } } while (event_time(TIME_FLUSH) < 400000); }
static void qmgr_active_done_3_generic(QMGR_MESSAGE *message) { const char *myname = "qmgr_active_done_3_generic"; int delay; /* * Some recipients need to be tried again. Move the queue file time * stamps into the future by the amount of time that the message is * delayed, and move the message to the deferred queue. Impose minimal * and maximal backoff times. * * Since we look at actual time in queue, not time since last delivery * attempt, backoff times will be distributed. However, we can still see * spikes in delivery activity because the interval between deferred * queue scans is finite. */ if (message->flags) { if (message->create_time > 0) { delay = event_time() - message->create_time; if (delay > var_max_backoff_time) delay = var_max_backoff_time; if (delay < var_min_backoff_time) delay = var_min_backoff_time; } else { delay = var_min_backoff_time; } qmgr_active_defer(message->queue_name, message->queue_id, MAIL_QUEUE_DEFERRED, delay); } /* * All recipients done. Remove the queue file. */ else { if (mail_queue_remove(message->queue_name, message->queue_id)) { if (errno != ENOENT) msg_fatal("%s: remove %s from %s: %m", myname, message->queue_id, message->queue_name); msg_warn("%s: remove %s from %s: %m", myname, message->queue_id, message->queue_name); } else { /* Same format as logged by postsuper. */ msg_info("%s: removed", message->queue_id); } } /* * Finally, delete the in-core message structure. */ qmgr_message_free(message); }
bool create_and_execute_insert(const std::string &query, CassConsistency consistency) { CassUuid time_uuid = test_utils::generate_random_uuid(uuid_gen); std::string time_uuid_string = test_utils::Value<CassUuid>::to_string(time_uuid); boost::chrono::system_clock::time_point now(boost::chrono::system_clock::now()); boost::chrono::milliseconds event_time(boost::chrono::duration_cast<boost::chrono::milliseconds>(now.time_since_epoch())); std::string text_sample("'" + test_utils::string_from_time_point(now) + "'"); std::string simple_query = test_utils::replaceAll(query, "?", "%s"); simple_query = str(boost::format(simple_query) % time_uuid_string % event_time.count() % text_sample); test_utils::CassStatementPtr statement(cass_statement_new(simple_query.c_str(), 0)); cass_statement_set_consistency(statement.get(), consistency); return execute_insert(statement.get()); }
static void qmgr_post_init(char *name, char **unused_argv) { /* * Backwards compatibility. */ if (strcmp(var_procname, "nqmgr") == 0) { msg_warn("please update the %s/%s file; the new queue manager", var_config_dir, MASTER_CONF_FILE); msg_warn("(old name: nqmgr) has become the standard queue manager (new name: qmgr)"); msg_warn("support for the name old name (nqmgr) will be removed from Postfix"); } /* * Sanity check. */ if (var_qmgr_rcpt_limit < var_qmgr_active_limit) { msg_warn("%s is smaller than %s - adjusting %s", VAR_QMGR_RCPT_LIMIT, VAR_QMGR_ACT_LIMIT, VAR_QMGR_RCPT_LIMIT); var_qmgr_rcpt_limit = var_qmgr_active_limit; } if (var_dsn_queue_time > var_max_queue_time) { msg_warn("%s is larger than %s - adjusting %s", VAR_DSN_QUEUE_TIME, VAR_MAX_QUEUE_TIME, VAR_DSN_QUEUE_TIME); var_dsn_queue_time = var_max_queue_time; } /* * This routine runs after the skeleton code has entered the chroot jail. * Prevent automatic process suicide after a limited number of client * requests or after a limited amount of idle time. Move any left-over * entries from the active queue to the incoming queue, and give them a * time stamp into the future, in order to allow ongoing deliveries to * finish first. Start scanning the incoming and deferred queues. * Left-over active queue entries are moved to the incoming queue because * the incoming queue has priority; moving left-overs to the deferred * queue could cause anomalous delays when "postfix reload/start" are * issued often. Override the IPC timeout (default 3600s) so that the * queue manager can reset a broken IPC channel before the watchdog timer * goes off. */ var_ipc_timeout = var_qmgr_ipc_timeout; var_use_limit = 0; var_idle_limit = 0; qmgr_move(MAIL_QUEUE_ACTIVE, MAIL_QUEUE_INCOMING, event_time()); qmgr_scans[QMGR_SCAN_IDX_INCOMING] = qmgr_scan_create(MAIL_QUEUE_INCOMING); qmgr_scans[QMGR_SCAN_IDX_DEFERRED] = qmgr_scan_create(MAIL_QUEUE_DEFERRED); qmgr_scan_request(qmgr_scans[QMGR_SCAN_IDX_INCOMING], QMGR_SCAN_START); qmgr_deferred_run_event(0, (void *) 0); }
static ANVIL_REMOTE *anvil_remote_conn_update(VSTREAM *client_stream, const char *ident) { ANVIL_REMOTE *anvil_remote; ANVIL_LOCAL *anvil_local; const char *myname = "anvil_remote_conn_update"; if (msg_verbose) msg_info("%s fd=%d stream=0x%lx ident=%s", myname, vstream_fileno(client_stream), (unsigned long) client_stream, ident); /* * Look up remote connection count information. Update remote connection * rate information. Simply reset the counter every var_anvil_time_unit * seconds. This is easier than maintaining a moving average and it gives * a quicker response to tresspassers. */ if ((anvil_remote = (ANVIL_REMOTE *) htable_find(anvil_remote_map, ident)) == 0) { anvil_remote = (ANVIL_REMOTE *) mymalloc(sizeof(*anvil_remote)); ANVIL_REMOTE_FIRST_CONN(anvil_remote, ident); htable_enter(anvil_remote_map, ident, (char *) anvil_remote); if (max_cache_size < anvil_remote_map->used) { max_cache_size = anvil_remote_map->used; max_cache_time = event_time(); } } else { ANVIL_REMOTE_NEXT_CONN(anvil_remote); } /* * Record this connection under the local server information, so that we * can clean up all its connection state when the local server goes away. */ if ((anvil_local = (ANVIL_LOCAL *) vstream_context(client_stream)) == 0) { anvil_local = (ANVIL_LOCAL *) mymalloc(sizeof(*anvil_local)); ANVIL_LOCAL_INIT(anvil_local); vstream_control(client_stream, VSTREAM_CTL_CONTEXT, (void *) anvil_local, VSTREAM_CTL_END); } ANVIL_LOCAL_ADD_ONE(anvil_local, anvil_remote); if (msg_verbose) msg_info("%s: anvil_local 0x%lx", myname, (unsigned long) anvil_local); return (anvil_remote); }
void CItem::StopUniqueExpireEvent() { if (!m_pkUniqueExpireEvent) return; if (GetValue(2) != 0) // °ФАУЅГ°ЈБ¦ АМїЬАЗ ѕЖАМЕЫАє UniqueExpireEventё¦ БЯґЬЗТ јц ѕшґЩ. return; // HARD CODING if (GetVnum() == UNIQUE_ITEM_HIDE_ALIGNMENT_TITLE) m_pOwner->ShowAlignment(true); SetSocket(ITEM_SOCKET_UNIQUE_SAVE_TIME, event_time(m_pkUniqueExpireEvent) / passes_per_sec); event_cancel(&m_pkUniqueExpireEvent); ITEM_MANAGER::instance().SaveSingleItem(this); }
void CItem::StopUniqueExpireEvent() { if (!m_pkUniqueExpireEvent) return; if (GetValue(2) != 0) // °ÔÀӽð£Á¦ ÀÌ¿ÜÀÇ ¾ÆÀÌÅÛÀº UniqueExpireEvent¸¦ Áß´ÜÇÒ ¼ö ¾ø´Ù. return; // HARD CODING if (GetVnum() == UNIQUE_ITEM_HIDE_ALIGNMENT_TITLE) m_pOwner->ShowAlignment(true); SetSocket(ITEM_SOCKET_UNIQUE_SAVE_TIME, event_time(m_pkUniqueExpireEvent) / passes_per_sec); event_cancel(&m_pkUniqueExpireEvent); ITEM_MANAGER::instance().SaveSingleItem(this); }
static int psc_cache_validator(const char *client_addr, const char *stamp_str, char *unused_context) { PSC_STATE dummy; /* * This function is called by the cache cleanup pseudo thread. * * When an entry is removed from the cache, the client will be reported as * "NEW" in the next session where it passes all tests again. To avoid * silly logging we remove the cache entry only after all tests have * expired longer ago than the cache retention time. */ psc_parse_tests(&dummy, stamp_str, event_time() - var_psc_cache_ret); return ((dummy.flags & PSC_STATE_MASK_ANY_TODO) == 0); }
int tty_sync_error(void) { int ch, trouble, ack; trouble = FALSE; for (;;) { tt_putp(tty_ENQ); /* send ENQ */ ch = getnext(STRIP_PARITY); event_start(TIME_SYNC); /* start the timer */ /* The timer doesn't start until we get the first character. After that I expect to get the remaining characters of the acknowledge string in a short period of time. If that is not true then these characters are coming from the user and we need to send the ENQ sequence out again. */ for (ack = 0; ; ) { if (ack < TTY_ACK_SIZE - 2) { tty_ACK[ack] = ch; tty_ACK[ack + 1] = '\0'; } if (ch == ACK_terminator) { return trouble; } if (++ack >= ACK_length) { return trouble; } ch = getnext(STRIP_PARITY); if (event_time(TIME_SYNC) > 400000) { break; } } set_attr(0); /* just in case */ put_crlf(); if (trouble) { /* The terminal won't sync. Life is not good. */ return TRUE; } put_str(" -- sync -- "); trouble = TRUE; } }
static void transport_wildcard_init(TRANSPORT_INFO *tp) { VSTRING *channel = vstring_alloc(10); VSTRING *nexthop = vstring_alloc(10); /* * Both channel and nexthop may be zero-length strings. Therefore we must * use something else to represent "wild-card does not exist". We use * null VSTRING pointers, for historical reasons. */ if (tp->wildcard_channel) vstring_free(tp->wildcard_channel); if (tp->wildcard_nexthop) vstring_free(tp->wildcard_nexthop); /* * Technically, the wildcard lookup pattern is redundant. A static map * (keys always match, result is fixed string) could achieve the same: * * transport_maps = hash:/etc/postfix/transport static:xxx:yyy * * But the user interface of such an approach would be less intuitive. We * tolerate the continued existence of wildcard lookup patterns because * of human interface considerations. */ #define WILDCARD "*" #define FULL 0 #define PARTIAL DICT_FLAG_FIXED if (find_transport_entry(tp, WILDCARD, "", FULL, channel, nexthop)) { tp->wildcard_errno = 0; tp->wildcard_channel = channel; tp->wildcard_nexthop = nexthop; if (msg_verbose) msg_info("wildcard_{chan:hop}={%s:%s}", vstring_str(channel), vstring_str(nexthop)); } else { tp->wildcard_errno = tp->transport_path->error; vstring_free(channel); vstring_free(nexthop); tp->wildcard_channel = 0; tp->wildcard_nexthop = 0; } tp->expire = event_time() + 30; /* XXX make configurable */ }
static void pre_accept(char *unused_name, char **unused_argv) { static time_t last_event_time; time_t new_event_time; const char *name; /* * If some table has changed then stop accepting new connections. Don't * check the tables more than once a second. */ new_event_time = event_time(); if (new_event_time >= last_event_time + 1 && (name = dict_changed_name()) != 0) { msg_info("table %s has changed - finishing in the background", name); event_server_drain(); } else { last_event_time = new_event_time; } }
static int verify_cache_validator(const char *addr, const char *raw_data, void *context) { VSTRING *get_buf = (VSTRING *) context; int addr_status; long probed; long updated; char *text; long now = (long) event_time(); #define POS_OR_NEG_ENTRY_EXPIRED(stat, stamp) \ (POSITIVE_ENTRY_EXPIRED((stat), (stamp)) \ || NEGATIVE_ENTRY_EXPIRED((stat), (stamp))) vstring_strcpy(get_buf, raw_data); return (verify_parse_entry(STR(get_buf), &addr_status, /* syntax OK */ &probed, &updated, &text) == 0 && (now - probed < PROBE_TTL /* probe in progress */ || !POS_OR_NEG_ENTRY_EXPIRED(addr_status, updated))); }
static void anvil_remote_lookup(VSTREAM *client_stream, const char *ident) { ANVIL_REMOTE *anvil_remote; const char *myname = "anvil_remote_lookup"; if (msg_verbose) msg_info("%s fd=%d stream=0x%lx ident=%s", myname, vstream_fileno(client_stream), (unsigned long) client_stream, ident); /* * Look up remote client information. */ if ((anvil_remote = (ANVIL_REMOTE *) htable_find(anvil_remote_map, ident)) == 0) { attr_print_plain(client_stream, ATTR_FLAG_NONE, ATTR_TYPE_INT, ANVIL_ATTR_STATUS, ANVIL_STAT_OK, ATTR_TYPE_INT, ANVIL_ATTR_COUNT, 0, ATTR_TYPE_INT, ANVIL_ATTR_RATE, 0, ATTR_TYPE_INT, ANVIL_ATTR_MAIL, 0, ATTR_TYPE_INT, ANVIL_ATTR_RCPT, 0, ATTR_TYPE_INT, ANVIL_ATTR_NTLS, 0, ATTR_TYPE_END); } else { /* * Do not report stale information. */ if (anvil_remote->start != 0 && anvil_remote->start + var_anvil_time_unit < event_time()) ANVIL_REMOTE_RSET_RATE(anvil_remote, 0); attr_print_plain(client_stream, ATTR_FLAG_NONE, ATTR_TYPE_INT, ANVIL_ATTR_STATUS, ANVIL_STAT_OK, ATTR_TYPE_INT, ANVIL_ATTR_COUNT, anvil_remote->count, ATTR_TYPE_INT, ANVIL_ATTR_RATE, anvil_remote->rate, ATTR_TYPE_INT, ANVIL_ATTR_MAIL, anvil_remote->mail, ATTR_TYPE_INT, ANVIL_ATTR_RCPT, anvil_remote->rcpt, ATTR_TYPE_INT, ANVIL_ATTR_NTLS, anvil_remote->ntls, ATTR_TYPE_END); } }
void CItem::StopAccessorySocketExpireEvent() { if (!m_pkAccessorySocketExpireEvent) return; if (!IsAccessoryForSocket()) return; int new_time = GetAccessorySocketDownGradeTime() - (60 - event_time(m_pkAccessorySocketExpireEvent) / passes_per_sec); event_cancel(&m_pkAccessorySocketExpireEvent); if (new_time <= 1) { AccessorySocketDegrade(); } else { SetAccessorySocketDownGradeTime(new_time); } }
static void psc_whitelist_non_dnsbl(PSC_STATE *state) { time_t now; int tindx; /* * If no tests failed (we can't undo those), and if the whitelist * threshold is met, flag non-dnsbl tests that are pending or disabled as * successfully completed, and set their expiration times equal to the * DNSBL expiration time, except for tests that would expire later. * * Why flag disabled tests as passed? When a disabled test is turned on, * postscreen should not apply that test to clients that are already * whitelisted based on their combined DNSBL score. */ if ((state->flags & PSC_STATE_MASK_ANY_FAIL) == 0 && state->dnsbl_score < var_psc_dnsbl_thresh && var_psc_dnsbl_wthresh < 0 && state->dnsbl_score <= var_psc_dnsbl_wthresh) { now = event_time(); for (tindx = 0; tindx < PSC_TINDX_COUNT; tindx++) { if (tindx == PSC_TINDX_DNSBL) continue; if ((state->flags & PSC_STATE_FLAG_BYTINDX_TODO(tindx)) && !(state->flags & PSC_STATE_FLAG_BYTINDX_PASS(tindx))) { if (msg_verbose) msg_info("skip %s test for [%s]:%s", psc_test_name(tindx), PSC_CLIENT_ADDR_PORT(state)); /* Wrong for deep protocol tests, but we disable those. */ state->flags |= PSC_STATE_FLAG_BYTINDX_DONE(tindx); /* This also disables pending deep protocol tests. */ state->flags |= PSC_STATE_FLAG_BYTINDX_PASS(tindx); } /* Update expiration even if the test was completed or disabled. */ if (state->expire_time[tindx] < now + var_psc_dnsbl_ttl) state->expire_time[tindx] = now + var_psc_dnsbl_ttl; } } }