int main(int argc, char **argv) { GlobalInfo g; (void)argc; (void)argv; memset(&g, 0, sizeof(GlobalInfo)); g.evbase = event_base_new(); init_fifo(&g); g.multi = curl_multi_init(); g.timer_event = evtimer_new(g.evbase, timer_cb, &g); /* setup the generic multi interface options we want */ curl_multi_setopt(g.multi, CURLMOPT_SOCKETFUNCTION, sock_cb); curl_multi_setopt(g.multi, CURLMOPT_SOCKETDATA, &g); curl_multi_setopt(g.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb); curl_multi_setopt(g.multi, CURLMOPT_TIMERDATA, &g); /* we don't call any curl_multi_socket*() function yet as we have no handles added! */ event_base_dispatch(g.evbase); /* this, of course, won't get called since only way to stop this program is via ctrl-C, but it is here to show how cleanup /would/ be done. */ clean_fifo(&g); event_free(g.timer_event); event_base_free(g.evbase); curl_multi_cleanup(g.multi); return 0; }
struct tgl_timer *tgl_timer_alloc (struct tgl_state *TLS, void (*cb)(struct tgl_state *TLS, void *arg), void *arg) { void **p = malloc (sizeof (void *) * 3); p[0] = TLS; p[1] = cb; p[2] = arg; return (void *)evtimer_new (TLS->ev_base, timer_alarm, p); }
int main(int argc, char ** argv) { struct evdns_base * dns_base; struct timeval tv; if (parse_args(argc, argv) < 0) { exit(1); } tv.tv_sec = sec; tv.tv_usec = usec; evbase = event_base_new(); dns_base = evdns_base_new(evbase, 1); bev = bufferevent_socket_new(evbase, -1, BEV_OPT_CLOSE_ON_FREE); sev = evtimer_new(evbase, send_byte, bev); bufferevent_setcb(bev, readcb, NULL, eventcb, evbase); bufferevent_enable(bev, EV_READ | EV_WRITE); bufferevent_socket_connect_hostname(bev, dns_base, AF_UNSPEC, addr, port); event_base_loop(evbase, 0); while (fread(&fbyte, 1, 1, input_file) == 1) { fprintf(stdout, "sending: '%c' (%x)\n", isprint(fbyte) ? fbyte : ' ', fbyte); } return 0; }
void run_in_event_loop( const lambda::function<void(void)>& f, EventLoopLogicFlow event_loop_logic_flow) { if (__in_event_loop__ && event_loop_logic_flow == ALLOW_SHORT_CIRCUIT) { f(); return; } synchronized (functions_mutex) { functions->push(f); // Add an event and activate it to interrupt the event loop. // TODO(jmlvanre): after libevent v 2.1 we can use // event_self_cbarg instead of re-assigning the event. For now we // manually re-assign the event to pass in the pointer to the // event itself as the callback argument. event* ev = evtimer_new(base, async_function, NULL); // 'event_assign' is only valid on non-pending AND non-active // events. This means we have to assign the callback before // calling 'event_active'. if (evtimer_assign(ev, base, async_function, ev) < 0) { LOG(FATAL) << "Failed to assign callback on event"; } event_active(ev, EV_TIMEOUT, 0); } }
/* Thread: main & scan */ static int inofd_event_set(void) { inofd = inotify_init1(IN_CLOEXEC); if (inofd < 0) { DPRINTF(E_FATAL, L_SCAN, "Could not create inotify fd: %s\n", strerror(errno)); return -1; } inoev = event_new(evbase_scan, inofd, EV_READ, inotify_cb, NULL); #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) deferred_inoev = evtimer_new(evbase_scan, inotify_deferred_cb, NULL); if (!deferred_inoev) { DPRINTF(E_LOG, L_SCAN, "Could not create deferred inotify event\n"); return -1; } #endif return 0; }
static int _ev_timeout_add(AvahiTimeout *t, const struct timeval *tv) { struct timeval e_tv; struct timeval now; int ret; if (t->ev) event_free(t->ev); t->ev = evtimer_new(evbase_main, evcb_timeout, t); if (!t->ev) { DPRINTF(E_LOG, L_MDNS, "Could not make event in _ev_timeout_add - out of memory?\n"); return -1; } if ((tv->tv_sec == 0) && (tv->tv_usec == 0)) { evutil_timerclear(&e_tv); } else { ret = gettimeofday(&now, NULL); if (ret != 0) return -1; evutil_timersub(tv, &now, &e_tv); } return evtimer_add(t->ev, &e_tv); }
static void c_service_reconn_timer(evutil_socket_t fd UNUSED, short event UNUSED, void *arg) { mul_service_t *service = arg; struct timeval tv = { 5, 0 }; if (!service->conn.dead) return; c_log_debug("Retry Conn to service %s", service->service_name); if(!c_service_client_sock_init(service, service->server?:__server)) { c_log_debug("Connection to service %s restored", service->service_name); event_del((struct event *)(service->reconn_timer_event)); event_free((struct event *)(service->reconn_timer_event)); service->reconn_timer_event = NULL; service->conn.dead = 0; service->ext_ka_flag = 0; if (service->conn_update) { service->conn_update(service, MUL_SERVICE_UP); } mb(); service->valid_timer_event = evtimer_new(service->ev_base, c_service_validity_timer, (void *)service); evtimer_add(service->valid_timer_event, &tv); return; } evtimer_add(service->reconn_timer_event, &tv); }
//------------------------------------------------ // One event loop runs in each transaction thread. // static void* run_event_loop(void* pv_b) { int b = (int)(uint64_t)pv_b; // Create the event base. if ((g_bases[b].p_event_base = event_base_new()) == NULL) { LOG("ERROR: creating event base %d", b); return NULL; } // Create the trigger timer event, which is used to initiate transactions. if ((g_bases[b].p_trigger_event = evtimer_new(g_bases[b].p_event_base, trigger_cb, pv_b)) == NULL) { LOG("ERROR: creating event trigger event for base %d", b); event_base_free(g_bases[b].p_event_base); return NULL; } // Every base uses k = b for its first transaction, then keeps advancing k // by adding the number of bases N. That way, each base will cover: // k = b + (N * i), for i = 0, 1, 2, 3... // and so together all the bases will cover all the keys. g_bases[b].trigger_phase = INSERT; g_bases[b].trigger_k = b; // Start the event loop. There must be an event added on the base before // calling event_base_dispatch(), or the event loop will just exit. Here we // add the trigger timer event. struct timeval trigger_timeval = { 0, 1 }; if (evtimer_add(g_bases[b].p_trigger_event, &trigger_timeval) == 0) { // event_base_dispatch() will block and run the event loop until no more // events are added, or until something calls event_base_loopbreak() or // event_base_loopexit(). // To keep an event loop running, an application must therefore ensure // at least one event is always added. // In this example's "non-serialized" transaction model, we'll exit the // loop when a trigger event callback is made in which we don't re-add // the trigger event, and all in-progress transactions are completed. if (event_base_dispatch(g_bases[b].p_event_base) < 0) { LOG("ERROR: event base %d dispatch", b); } } else { LOG("ERROR: adding timer on event base %d", b); } // Free the trigger timer event and event base. event_free(g_bases[b].p_trigger_event); event_base_free(g_bases[b].p_event_base); return NULL; }
int main(int argc, const char** argv) { #ifdef WIN32 WSADATA WSAData; WSAStartup(0x101, &WSAData); #endif evthread_use_windows_threads(); //event_enable_debug_mode(); //event_set_log_callback(libevent_log_cb); ev_base = event_base_new(); #if 0 ev_timer = evtimer_new(ev_base, timer_cb, NULL); struct timeval time; time.tv_sec = 0; time.tv_usec = 10*1000; evtimer_add(ev_timer, &time); #endif evhttp = evh_client_init(ev_base); evh_client_setopt(evhttp, EVHTTP_CLIENT_DEBUG, evhttp_log_cb); evh_client_get(evhttp, "www.baidu.com", url_cb, NULL); evh_client_get(evhttp, "www.163.com", url_cb, NULL); event_base_dispatch(ev_base); evh_client_release(evhttp); return 0; }
void c_service_reconnect(mul_service_t *service) { struct timeval tv = { 1, 0 }; if (service->conn.dead && service->reconn_timer_event) return; service->conn.dead = 1; close(service->conn.fd); if (service->reconn_timer_event) { event_del((struct event *)(service->reconn_timer_event)); event_free((struct event *)(service->reconn_timer_event)); service->reconn_timer_event = NULL; } if (service->valid_timer_event) { event_del((struct event *)(service->valid_timer_event)); event_free((struct event *)(service->valid_timer_event)); service->valid_timer_event = NULL; } if (service->conn_update) { service->conn_update(service, MUL_SERVICE_DOWN); } service->reconn_timer_event = evtimer_new(service->ev_base, c_service_reconn_timer, (void *)service); evtimer_add(service->reconn_timer_event, &tv); return; }
int main(int argc, char **argv) { struct event* ev; struct event* timeout; struct event_base* base; evutil_socket_t pair[2]; struct timeval tv; struct cpu_usage_timer timer; double usage, secPassed, secUsed; #ifdef _WIN32 WORD wVersionRequested; WSADATA wsaData; int err; wVersionRequested = MAKEWORD(2, 2); err = WSAStartup(wVersionRequested, &wsaData); #endif if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) return (1); /* Initalize the event library */ base = event_base_new(); /* Initalize a timeout to terminate the test */ timeout = evtimer_new(base,timeout_cb,&timeout); /* and watch for writability on one end of the pipe */ ev = event_new(base,pair[1],EV_WRITE | EV_PERSIST, write_cb, &ev); tv.tv_sec = 1; tv.tv_usec = 500*1000; evtimer_add(timeout, &tv); event_add(ev, NULL); start_cpu_usage_timer(&timer); event_base_dispatch(base); get_cpu_usage(&timer, &secPassed, &secUsed, &usage); /* attempt to calculate our cpu usage over the test should be virtually nil */ printf("usec used=%d, usec passed=%d, cpu usage=%.2f%%\n", (int)(secUsed*1e6), (int)(secPassed*1e6), usage*100); if (usage > 50.0) /* way too high */ return 1; return 0; }
void cNetworkSingleton::RunEventLoop(cNetworkSingleton * a_Self) { auto timer = evtimer_new(a_Self->m_EventBase, SignalizeStartup, a_Self); timeval timeout{}; // Zero timeout - execute immediately evtimer_add(timer, &timeout); event_base_loop(a_Self->m_EventBase, EVLOOP_NO_EXIT_ON_EMPTY); event_free(timer); }
bool LinkScheduler::addIpSchedule(IpContext *ipContext) { struct event *ipScheduleEvent = evtimer_new(_base, on_ip_schedule, ipContext); ipContext->ipScheduleEvent = ipScheduleEvent; struct timeval t = {ipContext->scheduleInterval, 0 }; evtimer_add(ipScheduleEvent, &t); return true; }
void set_timer(void *data) { struct event_base * base = data; struct event * tev = evtimer_new(base,time_cb,NULL); time_event = tev; struct timeval tval={5,0}; evtimer_add(tev,&tval); }
static void levent_init(struct lldpd *cfg) { /* Setup libevent */ log_debug("event", "initialize libevent"); event_set_log_callback(levent_log_cb); if (!(cfg->g_base = event_base_new())) fatalx("unable to create a new libevent base"); log_info("event", "libevent %s initialized with %s method", event_get_version(), event_base_get_method(cfg->g_base)); /* Setup SNMP */ #ifdef USE_SNMP if (cfg->g_snmp) { agent_init(cfg, cfg->g_snmp_agentx); cfg->g_snmp_timeout = evtimer_new(cfg->g_base, levent_snmp_timeout, cfg); if (!cfg->g_snmp_timeout) fatalx("unable to setup timeout function for SNMP"); if ((cfg->g_snmp_fds = malloc(sizeof(struct ev_l))) == NULL) fatalx("unable to allocate memory for SNMP events"); TAILQ_INIT(levent_snmp_fds(cfg)); } #endif /* Setup loop that will run every X seconds. */ log_debug("event", "register loop timer"); if (!(cfg->g_main_loop = event_new(cfg->g_base, -1, 0, levent_update_and_send, cfg))) fatalx("unable to setup main timer"); event_active(cfg->g_main_loop, EV_TIMEOUT, 1); /* Setup unix socket */ log_debug("event", "register Unix socket"); TAILQ_INIT(&lldpd_clients); evutil_make_socket_nonblocking(cfg->g_ctl); if ((cfg->g_ctl_event = event_new(cfg->g_base, cfg->g_ctl, EV_READ|EV_PERSIST, levent_ctl_accept, cfg)) == NULL) fatalx("unable to setup control socket event"); event_add(cfg->g_ctl_event, NULL); /* Signals */ log_debug("event", "register signals"); signal(SIGHUP, SIG_IGN); evsignal_add(evsignal_new(cfg->g_base, SIGUSR1, levent_dump, cfg->g_base), NULL); evsignal_add(evsignal_new(cfg->g_base, SIGINT, levent_stop, cfg->g_base), NULL); evsignal_add(evsignal_new(cfg->g_base, SIGTERM, levent_stop, cfg->g_base), NULL); }
void wait_server_backoff(unsigned int timeout /* seconds */, jsonrpc_server_t* server, bool delay) { if(!server) { ERR("Trying to close/reconnect a NULL server\n"); return; } if(delay == false) { if (requests_using_server(server) <= 0) { if(server->status == JSONRPC_SERVER_RECONNECTING) { bev_connect(server); } else if(server->status == JSONRPC_SERVER_CLOSING) { close_server(server); } return; } } const struct timeval tv = {timeout, 0}; server_backoff_args_t* args = pkg_malloc(sizeof(server_backoff_args_t)); CHECK_MALLOC_VOID(args); memset(args, 0, sizeof(server_backoff_args_t)); args->ev = evtimer_new(global_ev_base, server_backoff_cb, (void*)args); CHECK_MALLOC_GOTO(args->ev, error); args->server = server; args->timeout = timeout; if(evtimer_add(args->ev, &tv)<0) { ERR("event_add failed while setting request timer (%s).", strerror(errno)); goto error; } return; error: ERR("schedule_server failed.\n"); if(args) { if(args->ev) { evtimer_del(args->ev); } pkg_free(args); } if (server->status == JSONRPC_SERVER_CLOSING) { ERR("Closing server now...\n"); close_server(server); } else if (server->status == JSONRPC_SERVER_RECONNECTING) { ERR("Reconnecting server now...\n"); force_reconnect(server); } }
int main (int argc, char *argv[]) { ClientPool *pool; struct event *timeout; struct timeval tv; GList *l_files = NULL; CBData *cb; gchar *in_dir; log_level = LOG_debug; event_set_mem_functions (g_malloc, g_realloc, g_free); in_dir = g_dir_make_tmp (NULL, NULL); g_assert (in_dir); l_files = populate_file_list (100, l_files, in_dir); g_assert (l_files); app = app_create (); app->h_clients_freq = g_hash_table_new (g_direct_hash, g_direct_equal); app->l_files = l_files; // start server start_srv (app->evbase, in_dir); /* pool = client_pool_create (app, 12, http_client_create, http_client_destroy, http_client_set_on_released_cb, http_client_check_rediness ); */ cb = g_new (CBData, 1); cb->pool = pool; cb->l_files = l_files; timeout = evtimer_new (app->evbase, on_output_timer, cb); evutil_timerclear(&tv); tv.tv_sec = 0; tv.tv_usec = 500; event_add (timeout, &tv); event_base_dispatch (app->evbase); g_hash_table_foreach (app->h_clients_freq, (GHFunc)print_foreach, NULL); return 0; }
static void udp_retry_later(struct event_base *base) { static struct event *retry_timer = NULL; struct timeval delay_tv = {SERVER_RECONNECT_INTERVAL_SEC, 0}; if (!retry_timer) { retry_timer = evtimer_new(base, udp_init, base); } if (!evtimer_pending(retry_timer, &delay_tv)) { evtimer_add(retry_timer, &delay_tv); } }
/*建立一个proposer对象,并启动它*/ struct evproposer* evproposer_init(int id, const char* config, struct event_base* b) { int port, acceptor_count; struct evproposer* p; /*读取配置文件*/ struct evpaxos_config* conf = evpaxos_config_read(config); if(conf == NULL) return NULL; /*非法的proposer id*/ if (id < 0 || id >= MAX_N_OF_PROPOSERS) { paxos_log_error("Invalid proposer id: %d", id); return NULL; } /*读取proposer的监听端口*/ port = evpaxos_proposer_listen_port(conf, id); /*读取acceptor的数量*/ acceptor_count = evpaxos_acceptor_count(conf); p = (struct evproposer *)malloc(sizeof(struct evproposer)); p->id = id; p->base = b; /*获得同时提交的议案数量*/ p->preexec_window = paxos_config.proposer_preexec_window; /*产生一个网络消息接收器*/ p->receiver = tcp_receiver_new(b, port, handle_request, p); /*产生一个acceptor的管理器*/ p->acceptors = peers_new(b); /*对每个acceptor发起连接*/ peers_connect_to_acceptors(p->acceptors, conf, handle_request, p); /*设置定时器*/ p->tv.tv_sec = paxos_config.proposer_timeout; p->tv.tv_usec = 0; /*产生一个libevent定时器事件对象,并设置一个定时器*/ p->timeout_ev = evtimer_new(b, proposer_check_timeouts, p); event_add(p->timeout_ev, &p->tv); /*产生一个proposer 消息处理器*/ p->state = proposer_new(p->id, acceptor_count); /*试探性执行prepare过程(提案第一阶段)*/ proposer_preexecute(p); evpaxos_config_free(conf); return p; }
core::curl_handler::connection_context::connection_context(milliseconds_t _timeout, curl_handler* _curl_handler, CURL* _easy_handle, const completion_handler_t& _completion_handler) : timeout_(_timeout) , curl_handler_(_curl_handler) , easy_handle_(_easy_handle) , completion_handler_(_completion_handler) , socket_(0) , event_(nullptr) { const auto tv = make_timeval(timeout_); timeout_event_ = evtimer_new(curl_handler_->event_base_, event_timeout_callback, this); evtimer_add(timeout_event_, &tv); }
void Connection::scheduleDelete() { if ( !m_deleteEvent ) { // // schedule deletion event // m_deleteEvent = evtimer_new( m_base, onDeleteStatic, this ); struct timeval timeout = { 0, 0 }; evtimer_add( m_deleteEvent, &timeout ); } }
static void test_fin_free_finalize(void *arg) { #ifdef EVENT__DISABLE_MM_REPLACEMENT tinytest_set_test_skipped_(); #else struct event_base *base = NULL; struct event *ev, *ev2; int ev_called = 0; int ev2_called = 0; (void)arg; event_set_mem_functions(tfff_malloc, tfff_realloc, tfff_free); base = event_base_new(); ev = evtimer_new(base, timer_callback, &ev_called); ev2 = evtimer_new(base, timer_callback, &ev2_called); tfff_p1 = ev; tfff_p2 = ev2; event_free_finalize(0, ev, event_finalize_callback_1); event_finalize(0, ev2, event_finalize_callback_1); event_base_dispatch(base); tt_int_op(ev_called, ==, 100); tt_int_op(ev2_called, ==, 100); event_base_assert_ok_(base); tt_int_op(tfff_p1_freed, ==, 1); tt_int_op(tfff_p2_freed, ==, 0); event_free(ev2); end: if (base) event_base_free(base); #endif }
int main() { struct httpclient *cli; struct event_base *eb; struct evdns_base *edb; struct event *exit_ev,*ev,*ev2; struct timeval three_sec = {3,0}; struct timeval ten_sec = {10,0}; logging_fd(2); log_set_level("",LOG_DEBUG); eb = event_base_new(); edb = evdns_base_new(eb,1); cli = httpclient_create(eb,edb); exit_ev = evsignal_new(eb,SIGINT,do_exit,eb); event_add(exit_ev,0); http_request(cli,url,0,20,done,0); http_request(cli,url,0,20,done,0); http_request(cli,url,0,20,done,0); http_request(cli,url,0,20,done,0); http_request(cli,url,0,20,done,0); http_request(cli,url,0,20,done,0); ev = evtimer_new(eb,req,cli); evtimer_add(ev,&three_sec); ev2 = evtimer_new(eb,req,cli); evtimer_add(ev2,&ten_sec); event_base_loop(eb,0); httpclient_finish(cli); event_free(exit_ev); event_free(ev); event_free(ev2); evdns_base_free(edb,1); event_base_free(eb); fprintf(stderr,"exit\n"); logging_done(); return 0; }
static void peers_connect(struct peers* p, int id, struct sockaddr_in* addr) { p->peers = realloc(p->peers, sizeof(struct peer*) * (p->peers_count+1)); p->peers[p->peers_count] = make_peer(p, id, addr); struct peer* peer = p->peers[p->peers_count]; bufferevent_setcb(peer->bev, on_read, NULL, on_peer_event, peer); peer->reconnect_ev = evtimer_new(p->base, on_connection_timeout, peer); connect_peer(peer); p->peers_count++; }
/* void cb_func(evutil_socket_t fd,short what,void *arg) { const char* data = arg; //打印事件的类型和传递过来的数据 printf("Got an event on socket %d:%s%s%s%s [%s]",(int)fd,(what & EV_TIMEOUT)? : "timeout" : " ";,(what&EV_READ)? : "read" : " ",(what&EV_WRITE)? : "write" : " ",(what&EV_SIGNAL)? : "signal" : " ",data); } */ void main_loop(evutil_socket_t fd2) { struct event *ev1,*ev2; struct timeval five_seconds = {5,0}; struct event_base *base = event_base_new(); //ev1 = event_new(base,fd1,EV_TIMEOUT|EV_READ|EV_PERSIST,cb_func,(char*)"Reading event"); ev1 = evtimer_new(base,cb_func,(char*)"timeout event"); ev2 = event_new(base,fd2,EV_READ|EV_WRITE|EV_PERSIST,cb_func,(char*)"writing event"); event_add(ev1,&five_seconds); event_add(ev2,NULL); event_base_dispatch(base); }
/* health check thread and reset count */ void* health(void* ptr) { struct event_base* eb = event_base_new(); dop_timer_t* tt = MALLOC(dop_timer_t, 1); tt->tv.tv_sec = 1; tt->tv.tv_usec = 0; tt->timer = evtimer_new(eb, dcenter_health_check, tt); evtimer_add(tt->timer, &(tt->tv)); event_base_dispatch(eb); return 0; }
static int c_worker_thread_final_init(struct c_worker_ctx *w_ctx) { cpu_set_t cpu; char ipc_path_str[64]; struct timeval tv = { C_PER_WORKER_TIMEO, 0 }; int i = 0; int c_listener = 0; extern ctrl_hdl_t ctrl_hdl; w_ctx->cmn_ctx.base = event_base_new(); assert(w_ctx->cmn_ctx.base); snprintf(ipc_path_str, 63, "%s%d", C_IPC_PATH, w_ctx->thread_idx); w_ctx->main_wrk_conn.rd_fd = open(ipc_path_str, O_RDONLY | O_NONBLOCK); assert(w_ctx->main_wrk_conn.rd_fd > 0); w_ctx->main_wrk_conn.rd_event = event_new(w_ctx->cmn_ctx.base, w_ctx->main_wrk_conn.rd_fd, EV_READ|EV_PERSIST, c_worker_ipc_read, (void*)w_ctx); event_add(w_ctx->main_wrk_conn.rd_event, NULL); w_ctx->worker_timer_event = evtimer_new(w_ctx->cmn_ctx.base, c_per_worker_timer_event, (void *)w_ctx); evtimer_add(w_ctx->worker_timer_event, &tv); for (i = 0; i < ctrl_hdl.n_appthreads; i++) { nbq_init(&w_ctx->work_qs[i].q); } c_listener = c_server_socket_create(INADDR_ANY, C_APP_WQ_LISTEN_PORT+w_ctx->thread_idx); assert(c_listener); w_ctx->c_app_accept_event = event_new(w_ctx->cmn_ctx.base, c_listener, EV_READ|EV_PERSIST, c_app_wq_accept, (void*)w_ctx); event_add(w_ctx->c_app_accept_event, NULL); /* Set cpu affinity */ CPU_ZERO(&cpu); CPU_SET(w_ctx->thread_idx, &cpu); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu); w_ctx->cmn_ctx.run_state = THREAD_STATE_RUNNING; return 0; }
static void test_fin_within_cb(void *arg) { struct basic_test_data *data = arg; struct event_base *base = data->base; struct event_and_count evc1, evc2; evc1.count = evc2.count = 0; evc2.ev2 = evc1.ev = evtimer_new(base, timer_callback_2, &evc1); evc1.ev2 = evc2.ev = evtimer_new(base, timer_callback_2, &evc2); /* Activate both. The first one will have its callback run, which * will finalize both of them, preventing the second one's callback * from running. */ event_active(evc1.ev, EV_TIMEOUT, 1); event_active(evc2.ev, EV_TIMEOUT, 1); event_base_dispatch(base); tt_int_op(evc1.count, ==, 101); tt_int_op(evc2.count, ==, 100); event_base_assert_ok_(base); /* Now try with EV_PERSIST events. */ evc1.count = evc2.count = 0; evc2.ev2 = evc1.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc1); evc1.ev2 = evc2.ev = event_new(base, -1, EV_PERSIST, timer_callback_2, &evc2); event_active(evc1.ev, EV_TIMEOUT, 1); event_active(evc2.ev, EV_TIMEOUT, 1); event_base_dispatch(base); tt_int_op(evc1.count, ==, 101); tt_int_op(evc2.count, ==, 100); event_base_assert_ok_(base); end: ; }
static void watcher_check_resolv(evutil_socket_t fd, short events, void *vctx) { struct watcher_ctx *ctx = vctx; ctx->inotify_wd = watch_and_bind_mount(ctx->inotify_fd, ctx->resolv); if (ctx->inotify_wd >= 0) return; /* Check again in one second. */ struct event *ev; struct timeval tv = {1, 0}; ev = evtimer_new(ctx->event_base, &watcher_check_resolv, vctx); evtimer_add(ev, &tv); }
int main(){ struct event_base* base = event_init(); struct event* signal_event = evsignal_new(base, SIGINT, sig_cb, base); event_add(signal_event,NULL); timeval tv = {1,0}; struct event* timeout_event = evtimer_new(base, timeout_cb, NULL); event_add(timeout_event, &tv); event_base_dispatch(base); event_free(timeout_event); event_free(signal_event); event_base_free(base); }