static int select_dispatch(struct event_base *base, void *arg, struct timeval *tv) { int res, i, j; struct selectop *sop = arg; check_selectop(sop); memcpy(sop->event_readset_out, sop->event_readset_in, sop->event_fdsz); memcpy(sop->event_writeset_out, sop->event_writeset_in, sop->event_fdsz); res = select(sop->event_fds + 1, sop->event_readset_out, sop->event_writeset_out, NULL, tv); check_selectop(sop); if (res == -1) { if (errno != EINTR) { event_warn("select"); return (-1); } evsignal_process(base); return (0); } else if (base->sig.evsignal_caught) { evsignal_process(base); } event_debug(("%s: select reports %d", __func__, res)); check_selectop(sop); i = arc4random_uniform(sop->event_fds + 1); for (j = 0; j <= sop->event_fds; ++j) { struct event *r_ev = NULL, *w_ev = NULL; if (++i >= sop->event_fds+1) i = 0; res = 0; if (FD_ISSET(i, sop->event_readset_out)) { r_ev = sop->event_r_by_fd[i]; res |= EV_READ; } if (FD_ISSET(i, sop->event_writeset_out)) { w_ev = sop->event_w_by_fd[i]; res |= EV_WRITE; } if (r_ev && (res & r_ev->ev_events)) { event_active(r_ev, res & r_ev->ev_events, 1); } if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) { event_active(w_ev, res & w_ev->ev_events, 1); } } check_selectop(sop); return (0); }
int select_dispatch(struct event_base *base, void *arg, struct timeval *tv) { int res, i; struct selectop *sop = arg; check_selectop(sop); memcpy(sop->event_readset_out, sop->event_readset_in, sop->event_fdsz); memcpy(sop->event_writeset_out, sop->event_writeset_in, sop->event_fdsz); res = select(sop->event_fds + 1, sop->event_readset_out, sop->event_writeset_out, NULL, tv); check_selectop(sop); if (res == -1) { if (errno != EINTR) { event_warn("select"); return (-1); } evsignal_process(); return (0); } else if (evsignal_caught) evsignal_process(); event_debug(("%s: select reports %d", __func__, res)); check_selectop(sop); for (i = 0; i <= sop->event_fds; ++i) { struct event *r_ev = NULL, *w_ev = NULL; res = 0; if (FD_ISSET(i, sop->event_readset_out)) { r_ev = sop->event_r_by_fd[i]; res |= EV_READ; } if (FD_ISSET(i, sop->event_writeset_out)) { w_ev = sop->event_w_by_fd[i]; res |= EV_WRITE; } if (r_ev && (res & r_ev->ev_events)) { if (!(r_ev->ev_events & EV_PERSIST)) event_del(r_ev); event_active(r_ev, res & r_ev->ev_events, 1); } if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) { if (!(w_ev->ev_events & EV_PERSIST)) event_del(w_ev); event_active(w_ev, res & w_ev->ev_events, 1); } } check_selectop(sop); return (0); }
void evsignal_process(struct event_base *base) { struct evsignal_info *sig = &base->sig; struct event *ev, *next_ev; sig_atomic_t ncalls; int i; base->sig.evsignal_caught = 0; for (i = 1; i < NSIG; ++i) { ncalls = sig->evsigcaught[i]; if (ncalls == 0) continue; sig->evsigcaught[i] -= ncalls; for (ev = TAILQ_FIRST(&sig->evsigevents[i]); ev != NULL; ev = next_ev) { next_ev = TAILQ_NEXT(ev, ev_signal_next); if (!(ev->ev_events & EV_PERSIST)) event_del(ev); event_active(ev, EV_SIGNAL, ncalls); } } }
void run_in_event_loop( const lambda::function<void(void)>& f, EventLoopLogicFlow event_loop_logic_flow) { if (__in_event_loop__ && event_loop_logic_flow == ALLOW_SHORT_CIRCUIT) { f(); return; } synchronized (functions_mutex) { functions->push(f); // Add an event and activate it to interrupt the event loop. // TODO(jmlvanre): after libevent v 2.1 we can use // event_self_cbarg instead of re-assigning the event. For now we // manually re-assign the event to pass in the pointer to the // event itself as the callback argument. event* ev = evtimer_new(base, async_function, NULL); // 'event_assign' is only valid on non-pending AND non-active // events. This means we have to assign the callback before // calling 'event_active'. if (evtimer_assign(ev, base, async_function, ev) < 0) { LOG(FATAL) << "Failed to assign callback on event"; } event_active(ev, EV_TIMEOUT, 0); } }
void HTTPEvent::trigger(struct timeval* tv) { if (tv == NULL) event_active(ev, 0, 0); // immediately trigger event in main thread else evtimer_add(ev, tv); // trigger after timeval passed }
void async_file::write_cycle() { int read_bs = 0; lock(); read_bs = get_read_bs(); unlock(); while(!error && (read_bs > 0)) { int bytes = write_to_file(get_read_ptr(),read_bs); if(bytes < 0) { error = true; ERROR("Error detected: stopped writing"); break; } lock(); skip(bytes); read_bs = get_read_bs(); unlock(); } lock(); if(closed) { if(error || !fifo_buffer::get_buffered_bytes()) on_flushed(); else event_active(ev_write, 0, 0); } unlock(); }
void start_new_job(curl_handler* _curl_handler) { if (_curl_handler->keep_working_) { event_active(_curl_handler->start_task_event_, 0, 0); } }
static void t1func(evutil_socket_t fd, short what, void *arg) { struct event *t1 = (struct event*)arg; struct event *t2; fprintf(stderr, "CAUGHT EVENT\n"); fflush(stderr); event_del(t1); free(t1); loops++; if (loops < 10) { t2 = (struct event*)malloc(sizeof(struct event)); if (event_assign(t2, base, -1, EV_WRITE, t1func, t2) < 0) { die("event_assign_term"); } if (event_priority_set(t2, TERMPRI) < 0) { die("event_priority_set_term"); } fprintf(stderr, "EVENT %d DEFINED\n", loops); fflush(stderr); event_active(t2, EV_WRITE, 1); fprintf(stderr, "EVENT %d ACTIVATED\n", loops); fflush(stderr); } }
tr_watchdir_backend * tr_watchdir_kqueue_new (tr_watchdir_t handle) { const char * const path = tr_watchdir_get_path (handle); struct kevent ke; tr_watchdir_kqueue * backend; backend = tr_new0 (tr_watchdir_kqueue, 1); backend->base.free_func = &tr_watchdir_kqueue_free; backend->kq = -1; backend->dirfd = -1; if ((backend->kq = kqueue ()) == -1) { log_error ("Failed to start kqueue"); goto fail; } /* Open fd for watching */ if ((backend->dirfd = open (path, O_RDONLY | O_EVTONLY)) == -1) { log_error ("Failed to passively watch directory \"%s\": %s", path, tr_strerror (errno)); goto fail; } /* Register kevent filter with kqueue descriptor */ EV_SET (&ke, backend->dirfd, EVFILT_VNODE, EV_ADD | EV_ENABLE | EV_CLEAR, KQUEUE_WATCH_MASK, 0, NULL); if (kevent (backend->kq, &ke, 1, NULL, 0, NULL) == -1) { log_error ("Failed to set directory event filter with fd %d: %s", backend->kq, tr_strerror (errno)); goto fail; } /* Create libevent task for event descriptor */ if ((backend->event = event_new (tr_watchdir_get_event_base (handle), backend->kq, EV_READ | EV_ET | EV_PERSIST, &tr_watchdir_kqueue_on_event, handle)) == NULL) { log_error ("Failed to create event: %s", tr_strerror (errno)); goto fail; } if (event_add (backend->event, NULL) == -1) { log_error ("Failed to add event: %s", tr_strerror (errno)); goto fail; } /* Trigger one event for the initial scan */ event_active (backend->event, EV_READ, 0); return BACKEND_DOWNCAST (backend); fail: tr_watchdir_kqueue_free (BACKEND_DOWNCAST (backend)); return NULL; }
void levent_send_now(struct lldpd *cfg) { struct lldpd_hardware *hardware; TAILQ_FOREACH(hardware, &cfg->g_hardware, h_entries) event_active(hardware->h_timer, EV_TIMEOUT, 1); }
// // Action handler that shutdown the server gracefullly // static void shutdown_action(struct owl_state* state) { TRACE("Asked to shutdown...\n"); state->state = OWL_STATE_SHUTTING_DOWN; event_del(state->sigint); event_active(state->sigint, 0, 1); }
void http_engine_stop() { int i; if (!g_http_engine) { return; } zlog_info(log_get_cat_http(), "Stopping HTTP engine"); /* Stop all the workers */ pthread_mutex_lock(&g_http_engine->lock); if (g_http_engine->workers_running) { for (i = 0; i < g_http_engine->total_workers; i++) { if(g_http_engine->workers[i].worker && g_http_engine->workers[i].user_event) { event_active(g_http_engine->workers[i].user_event, EV_READ|EV_WRITE, 1); } } } pthread_mutex_unlock(&g_http_engine->lock); /* Wait here for the threads to stop */ http_engine_wait_for_completion(); /* Save the test results */ http_engine_save_test_results(); }
/* Loop evbase */ int evepoll_loop(EVBASE *evbase, int loop_flags, struct timeval *tv) { int i = 0, n = 0, timeout = -1, flags = 0, ev_flags = 0, fd = 0, event = 0; struct epoll_event *evp = NULL; EVENT *ev = NULL; if(evbase) { if(tv) { timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000; } //memset(evbase->evs, 0, sizeof(struct epoll_event) * evbase->allowed); n = epoll_wait(evbase->efd, (struct epoll_event *)(evbase->evs), evbase->allowed, timeout); //n = epoll_wait(evbase->efd, (struct epoll_event *)evbase->evs, evbase->maxfd+1, timeout); if(n <= 0) { if(n < 0){fprintf(stderr, "epoll_wait(%d, %p, %d, %d) failed, %s\n", evbase->efd, evbase->evs, evbase->maxfd, timeout, strerror(errno));} return n; } //WARN_LOG("loop()=> %d", n); for(i = 0; i < n; i++) { evp = &(((struct epoll_event *)evbase->evs)[i]); ev = (EVENT *)evp->data.ptr; if(ev == NULL) continue; fd = ev->ev_fd; flags = evp->events; //fd = evp->data.fd; if(fd >= 0 && fd < evbase->allowed && evbase->evlist[fd] && ev == evbase->evlist[fd]) { ev_flags = 0; if(flags & (EPOLLHUP|EPOLLERR)) { ev_flags = E_READ|E_WRITE; } else { if(flags & EPOLLIN) ev_flags |= E_READ; if(flags & EPOLLOUT) ev_flags |= E_WRITE; } //event = (ev_flags & ev->ev_flags); //if((ev_flags &= ev->ev_flags)) if(ev_flags) { event_active(ev, ev_flags); } else { WARN_LOGGER(evbase->logger, "ev:%p fd:%d evflags:%d event:%d", ev, fd, ev->ev_flags, ev_flags); //evepoll_update(evbase, ev); } } } //WARN_LOG("over_loop()=> %d", n); } return n; }
static int kq_dispatch(struct event_base *base, void *arg, struct timeval *tv) { struct kqop *kqop = arg; struct kevent *changes = kqop->changes; struct kevent *events = kqop->events; struct event *ev; struct timespec ts, *ts_p = NULL; int i, res; if (tv != NULL) { TIMEVAL_TO_TIMESPEC(tv, &ts); ts_p = &ts; } res = kevent(kqop->kq, changes, kqop->nchanges, events, kqop->nevents, ts_p); kqop->nchanges = 0; if (res == -1) { if (errno != EINTR) { event_warn("kevent"); return (-1); } return (0); } event_debug(("%s: kevent reports %d", __func__, res)); for (i = 0; i < res; i++) { int which = 0; if (events[i].flags & EV_ERROR) { if (events[i].data == EBADF || events[i].data == EINVAL || events[i].data == ENOENT) continue; errno = events[i].data; return (-1); } if (events[i].filter == EVFILT_READ) { which |= EV_READ; } else if (events[i].filter == EVFILT_WRITE) { which |= EV_WRITE; } else if (events[i].filter == EVFILT_SIGNAL) { which |= EV_SIGNAL; } if (!which) continue; if (events[i].filter == EVFILT_SIGNAL) { struct event_list *head = (struct event_list *)events[i].udata; TAILQ_FOREACH(ev, head, ev_signal_next) { event_active(ev, which, events[i].data); } } else {
GithubWebhooks::~GithubWebhooks() { if (_breakLoop) event_active(_breakLoop, EV_READ, 0); _httpServer.join(); if (_evhttp) evhttp_free(_evhttp); }
static void levent_init(struct lldpd *cfg) { /* Setup libevent */ log_debug("event", "initialize libevent"); event_set_log_callback(levent_log_cb); if (!(cfg->g_base = event_base_new())) fatalx("unable to create a new libevent base"); log_info("event", "libevent %s initialized with %s method", event_get_version(), event_base_get_method(cfg->g_base)); /* Setup SNMP */ #ifdef USE_SNMP if (cfg->g_snmp) { agent_init(cfg, cfg->g_snmp_agentx); cfg->g_snmp_timeout = evtimer_new(cfg->g_base, levent_snmp_timeout, cfg); if (!cfg->g_snmp_timeout) fatalx("unable to setup timeout function for SNMP"); if ((cfg->g_snmp_fds = malloc(sizeof(struct ev_l))) == NULL) fatalx("unable to allocate memory for SNMP events"); TAILQ_INIT(levent_snmp_fds(cfg)); } #endif /* Setup loop that will run every X seconds. */ log_debug("event", "register loop timer"); if (!(cfg->g_main_loop = event_new(cfg->g_base, -1, 0, levent_update_and_send, cfg))) fatalx("unable to setup main timer"); event_active(cfg->g_main_loop, EV_TIMEOUT, 1); /* Setup unix socket */ log_debug("event", "register Unix socket"); TAILQ_INIT(&lldpd_clients); evutil_make_socket_nonblocking(cfg->g_ctl); if ((cfg->g_ctl_event = event_new(cfg->g_base, cfg->g_ctl, EV_READ|EV_PERSIST, levent_ctl_accept, cfg)) == NULL) fatalx("unable to setup control socket event"); event_add(cfg->g_ctl_event, NULL); /* Signals */ log_debug("event", "register signals"); signal(SIGHUP, SIG_IGN); evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, levent_dump, cfg->g_base), NULL); evsignal_add(evsignal_new(cfg->g_base, SIGINT, levent_stop, cfg->g_base), NULL); evsignal_add(evsignal_new(cfg->g_base, SIGTERM, levent_stop, cfg->g_base), NULL); }
static THREAD_FN register_events_subthread(void *arg) { struct timeval tv = {0,0}; SLEEP_MS(100); event_active(&time_events[0], EV_TIMEOUT, 1); SLEEP_MS(100); event_active(&time_events[1], EV_TIMEOUT, 1); SLEEP_MS(100); tv.tv_usec = 100*1000; event_add(&time_events[2], &tv); tv.tv_usec = 150*1000; event_add(&time_events[3], &tv); SLEEP_MS(200); event_active(&time_events[4], EV_TIMEOUT, 1); THREAD_RETURN(); }
void core::curl_handler::add_task(priority_t _priority, milliseconds_t _timeout, CURL* _handle, const completion_handler_t& _completion_handler) { { boost::lock_guard<boost::mutex> lock(jobs_mutex_); pending_jobs_.emplace(_priority, _timeout, _handle, _completion_handler); } event_active(start_task_event_, 0, 0); }
int main(int argc, char **argv) { base = event_base_new(); //ev = event_new(base, -1, EV_PERSIST|EV_READ, cb, NULL); ev = event_new(base, -1, EV_PERSIST|EV_READ, cb, NULL); ev2= event_new(base, -1, EV_PERSIST|EV_READ, cb2, NULL); event_add(ev, NULL); event_add(ev2, NULL); //event_active(ev, EV_WRITE, 0); event_active(ev, 0, 0); event_base_dispatch(base); return 0; }
static int msgpack_write(void *data, const char *buf, unsigned int len) { struct tmate_encoder *encoder = data; evbuffer_add(encoder->buffer, buf, len); if ((encoder->ev_readable.ev_flags & EVLIST_INSERTED) && !(encoder->ev_readable.ev_flags & EVLIST_ACTIVE)) { event_active(&encoder->ev_readable, EV_READ, 0); } return 0; }
static void test_fin_within_cb(void *arg) { struct basic_test_data *data = arg; struct event_base *base = data->base; struct event_and_count evc1, evc2; evc1.count = evc2.count = 0; evc2.ev2 = evc1.ev = evtimer_new(base, timer_callback_2, &evc1); evc1.ev2 = evc2.ev = evtimer_new(base, timer_callback_2, &evc2); /* Activate both. The first one will have its callback run, which * will finalize both of them, preventing the second one's callback * from running. */ event_active(evc1.ev, EV_TIMEOUT, 1); event_active(evc2.ev, EV_TIMEOUT, 1); event_base_dispatch(base); tt_int_op(evc1.count, ==, 101); tt_int_op(evc2.count, ==, 100); event_base_assert_ok_(base); /* Now try with EV_PERSIST events. */ evc1.count = evc2.count = 0; evc2.ev2 = evc1.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc1); evc1.ev2 = evc2.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc2); event_active(evc1.ev, EV_TIMEOUT, 1); event_active(evc2.ev, EV_TIMEOUT, 1); event_base_dispatch(base); tt_int_op(evc1.count, ==, 101); tt_int_op(evc2.count, ==, 100); event_base_assert_ok_(base); end: ; }
void read_cb(evutil_socket_t fd, short evtype, void *arg) { printf("read_cb\n"); char buf[1024]; int ret = read(fd, buf, 1024); buf[ret] = '\0'; printf("read == %s\n", buf); /* test event_active and event_pending function. */ if(event_pending(evw, EV_WRITE, NULL) == 0) { printf("evw being pending, make it active now.\n"); event_active(evw, EV_WRITE, 1); /* no matter the event is pending nor non-pending. */ } }
static int on_encoder_write(void *userdata, const char *buf, size_t len) { struct tmate_encoder *encoder = userdata; if (evbuffer_add(encoder->buffer, buf, len) < 0) tmate_fatal("Cannot buffer encoded data"); if (!encoder->ev_active) { event_active(&encoder->ev_buffer, EV_READ, 0); encoder->ev_active = true; } return 0; }
static int s1_fencenb(opal_list_t *procs, int collect_data, opal_pmix_op_cbfunc_t cbfunc, void *cbdata) { pmi_opcaddy_t *op; /* thread-shift this so we don't block in SLURM's barrier */ op = OBJ_NEW(pmi_opcaddy_t); op->opcbfunc = cbfunc; op->cbdata = cbdata; event_assign(&op->ev, opal_pmix_base.evbase, -1, EV_WRITE, fencenb, op); event_active(&op->ev, EV_WRITE, 1); return OPAL_SUCCESS; }
/* Loop evbase */ int evselect_loop(EVBASE *evbase, short loop_flag, struct timeval *tv) { int i = 0, n = 0; short ev_flags = 0; fd_set rd_fd_set, wr_fd_set ; EVENT *ev = NULL; struct timeval timeout = {0}; //if(evbase && evbase->nfd > 0) if(evbase) { FD_ZERO(&rd_fd_set); memcpy(&rd_fd_set, evbase->ev_read_fds, sizeof(fd_set)); FD_ZERO(&wr_fd_set); memcpy(&wr_fd_set, evbase->ev_write_fds, sizeof(fd_set)); if(tv == NULL) { timeout.tv_sec = 0; timeout.tv_usec = 1000; tv = &timeout; } n = select(evbase->allowed, &rd_fd_set, &wr_fd_set, NULL, tv); //fprintf(stdout, "%s::%d n:%d\n", __FILE__, __LINE__, n); if(n <= 0) return n; DEBUG_LOGGER(evbase->logger, "Actived %d event in %d", n, evbase->allowed); for(i = 0; i < evbase->allowed; ++i) { if((ev = evbase->evlist[i])) { ev_flags = 0; if(FD_ISSET(i, &rd_fd_set)) { ev_flags |= E_READ; } if(FD_ISSET(i, &wr_fd_set)) { ev_flags |= E_WRITE; } if(ev_flags == 0) continue; if((ev_flags &= ev->ev_flags)) { event_active(ev, ev_flags); } } } } return n; }
// retrieve file buffer from local storage // if success == TRUE then "buf" contains "size" bytes of data void cache_mng_retrieve_file_buf (CacheMng *cmng, fuse_ino_t ino, size_t size, off_t off, cache_mng_on_retrieve_file_buf_cb on_retrieve_file_buf_cb, void *ctx) { struct _CacheContext *context; struct _CacheEntry *entry; context = cache_context_create (size, ctx); context->cb.retrieve_cb = on_retrieve_file_buf_cb; entry = g_hash_table_lookup (cmng->h_entries, GUINT_TO_POINTER (ino)); if (entry && range_contain (entry->avail_range, off, off + size)) { int fd; ssize_t res; char path[PATH_MAX]; if (ino != entry->ino) { LOG_err (CMNG_LOG, "Requested inode doesn't match hashed key!"); if (context->cb.retrieve_cb) context->cb.retrieve_cb (NULL, 0, FALSE, context->user_ctx); cache_context_destroy (context); return; } cache_mng_file_name (cmng, path, sizeof (path), ino); fd = open (path, O_RDONLY); context->buf = g_malloc (size); res = pread (fd, context->buf, size, off); close (fd); context->success = (res == (ssize_t) size); if (!context->success) { g_free (context->buf); context->buf = NULL; } // move entry to the front of q_lru g_queue_unlink (cmng->q_lru, entry->ll_lru); g_queue_push_head_link (cmng->q_lru, entry->ll_lru); } else { LOG_debug (CMNG_LOG, "Entry isn't found or doesn't contain requested range: %"INO_FMT, INO ino); } context->ev = event_new (application_get_evbase (cmng->app), -1, 0, cache_read_cb, context); // fire this event at once event_active (context->ev, 0, 0); event_add (context->ev, NULL); }
int async_file::writev(const struct iovec *iov, int iovcnt) { AmLock _l(*this); if(closed) return Closed; if(error) return Error; int ret = fifo_buffer::writev(iov,iovcnt); if(fifo_buffer::get_buffered_bytes() >= write_thresh) { event_active(ev_write, 0, 0); } if(ret < 0) return BufferFull; return ret; }
int async_file::write(const void* buf, unsigned int len) { AmLock _l(*this); if(closed) return Closed; if(error) return Error; int ret = fifo_buffer::write(buf,len); if(fifo_buffer::get_buffered_bytes() >= write_thresh) { event_active(ev_write, 0, 0); } if(ret < 0) return BufferFull; return ret; }
int main(int argc, char **argv) { struct event ev; struct event *t1; event_enable_debug_mode(); fprintf(stderr, "Libevent %s\n", event_get_version()); fflush(stderr); if (!(base = event_base_new())) die("event_base_new"); if (event_base_priority_init(base, 8) < 0) die("event_base_priority_init"); if (event_assign(&ev, base, SIGTERM, EV_SIGNAL|EV_PERSIST, cbfunc, NULL)<0) die("event_assign"); if (event_priority_set(&ev, SIGPRI) < 0) die("event_priority_set"); if (event_add(&ev, NULL) < 0) die("event_add"); fprintf(stderr, "SIGNAL EVENT DEFINED\n"); fflush(stderr); t1 = (struct event*)malloc(sizeof(struct event)); if (event_assign(t1, base, -1, EV_WRITE, t1func, t1) < 0) { die("event_assign_term"); } if (event_priority_set(t1, TERMPRI) < 0) { die("event_priority_set_term"); } event_active(t1, EV_WRITE, 1); fprintf(stderr, "FIRST TERMINATION EVENT DEFINED\n"); fflush(stderr); /* event_dispatch(base); */ while (run) { event_base_loop(base, EVLOOP_ONCE); } fprintf(stderr, "EXITED LOOP - FREEING BASE\n"); fflush(stderr); event_base_free(base); return 0; }
static void chassis_event_thread_update_conn_status(chassis_event_thread_t *thread) { network_mysqld_con *conn = NULL; GList *gl_conn = NULL; network_mysqld_con_lua_t *st = NULL; g_assert(thread != NULL); gl_conn = thread->connection_list; while (gl_conn) { conn = gl_conn->data; st = conn->plugin_con_state; if (chassis_is_shutdown_normal() && g_atomic_int_get(&conn->conn_status.exit_phase) != CON_EXIT_TX) { g_atomic_int_set(&conn->conn_status.exit_begin_time, time(NULL)); g_atomic_int_set(&conn->conn_status.exit_phase, CON_EXIT_TX); } if (g_atomic_int_get(&conn->conn_status.exit_phase) == CON_EXIT_KILL || g_atomic_int_get(&conn->conn_status.exit_phase) == CON_EXIT_TX) { /*|| (st != NULL && st->backend != NULL && IS_BACKEND_WAITING_EXIT(st->backend)))*/ struct event *ev = NULL; gchar *event_msg = NULL; int pending = event_pending(&conn->client->event, EV_READ|EV_WRITE|EV_TIMEOUT, NULL); if (pending) { ev = &conn->client->event; event_msg = "client"; } else { pending = event_pending(&conn->server->event, EV_READ|EV_WRITE|EV_TIMEOUT, NULL); ev = &conn->server->event; event_msg = "server"; } if (pending != 0) { /* * 1 stands for the times of calling callback function after manual active event, * this parameter has been obsoleted at libevent-2.0. */ g_log_dbproxy(g_debug, "pending %s's %d event", event_msg, pending); event_active(ev, pending, 1); } } gl_conn = g_list_next(gl_conn); } }