int main (int argc, char *argv[]) { struct ev_loop *loop = EV_DEFAULT; if (argc < 2) { fprintf(stderr, "Usage:\n\t%s domain\n",argv[0]); return 1; } char *hostname = argv[1]; char *jabber = calloc(1,strlen(argv[1]) + strlen(JSRV) + 1); // will be freed on exit ;) strcat(jabber,JSRV); strcat(jabber,argv[1]); // Declare resolver struct; ev_ares resolver; printf("Resolving '%s'\n",hostname); // Initialize ares library. int status; if ((status = ares_library_init(ARES_LIB_INIT_ALL) )!= ARES_SUCCESS) { fprintf(stderr,"Ares error: %s\n",ares_strerror(status)); return 1; } //Initialize resolver with timeout 1.3 if (( status = ev_ares_init(&resolver, 1.3) ) != ARES_SUCCESS) { fprintf(stderr,"Ares error: %s\n",ares_strerror(status)); return 1; } // hostname variable must not be freed until resolve callback, since it referenced as result->host ev_ares_soa(loop,&resolver,hostname,0,callback_soa); ev_ares_ns(loop,&resolver,hostname,0,callback_ns); ev_ares_a(loop,&resolver,hostname,0,callback_a); ev_ares_aaaa(loop,&resolver,hostname,0,callback_aaaa); ev_ares_mx(loop,&resolver,hostname,0,callback_mx); ev_ares_srv(loop,&resolver,jabber,0,callback_srv); ev_ares_txt(loop,&resolver,hostname,0,callback_txt); ev_ares_gethostbyaddr(loop,&resolver,"8.8.8.8", 0, callback_hba); ev_ares_gethostbyaddr(loop,&resolver,"2a00:1450:4010:c04::66", 0,callback_hba); // Raw PTR queries ev_ares_ptr(loop,&resolver,"8.8.8.8.in-addr.arpa", 0,callback_ptr); ev_ares_ptr(loop,&resolver,"a.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.4.0.c.0.0.1.0.4.0.5.4.1.0.0.a.2.ip6.arpa", 0,callback_ptr); //This is the only NAPTR example i found ;) ev_ares_naptr(loop,&resolver,"0.2.0.1.1.6.5.1.0.3.1.loligo.com.",0,callback_naptr); // Run loop ev_run (loop, 0); free(jabber); ev_ares_clean(&resolver); ares_library_cleanup(); }
void RLServer::start() { struct linger ling = {0, 0}; struct sockaddr_in addr; int flags = 1; if((fd = socket(AF_INET, SOCK_STREAM, 0)) == -1) { perror("socket()"); exit(1); } flags = 1; setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&flags, sizeof(flags)); setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&flags, sizeof(flags)); setsockopt(fd, SOL_SOCKET, SO_LINGER, (void *)&ling, sizeof(ling)); /* XXX: Sending single byte chunks in a response body? Perhaps there is a * need to enable the Nagel algorithm dynamically. For now disabling. */ setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void *)&flags, sizeof(flags)); /* the memset call clears nonstandard fields in some impementations that * otherwise mess things up. */ memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons(port); if(!hostaddr.empty()){ addr.sin_addr.s_addr = inet_addr(hostaddr.c_str()); if(addr.sin_addr.s_addr==INADDR_NONE){ printf("Bad address(%s) to listen\n",hostaddr.c_str()); exit(1); } }else{ addr.sin_addr.s_addr = htonl(INADDR_ANY); } if(bind(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { perror("bind()"); if(fd > 0) close(fd); exit(1); } if(listen(fd, MAX_CONNECTIONS) < 0) { perror("listen()"); exit(1); } set_nonblock(fd); ev_init(&connection_watcher, RLServer::on_connection); ev_io_set(&connection_watcher, fd, EV_READ); ev_io_start(loop, &connection_watcher); ev_run(loop, 0); }
/** * Entry point for threads to join the networking * stack. This method blocks indefinitely until the * network stack is shutdown. * @arg netconf The configuration for the networking stack. */ void start_networking_worker(bloom_networking *netconf) { // Allocate our user data worker_ev_userdata data; data.netconf = netconf; // Allocate our pipe if (pipe(data.pipefd)) { perror("failed to allocate worker pipes!"); return; } // Create the event loop if (!(data.loop = ev_loop_new(netconf->ev_mode))) { syslog(LOG_ERR, "Failed to create event loop for worker!"); return; } // Set the user data to be for this thread ev_set_userdata(data.loop, &data); // Setup the pipe listener ev_io_init(&data.pipe_client, handle_worker_notification, data.pipefd[0], EV_READ); ev_io_start(data.loop, &data.pipe_client); // Setup the periodic timers, ev_timer_init(&data.periodic, handle_periodic_timeout, PERIODIC_TIME_SEC, 1); ev_timer_start(data.loop, &data.periodic); // Syncronize until netconf->threads is available barrier_wait(&netconf->thread_barrier); // Register this thread so we can accept connections assert(netconf->threads); pthread_t id = pthread_self(); for (int i=0; i < netconf->config->worker_threads; i++) { if (pthread_equal(id, netconf->threads[i])) { // Provide a pointer to our data netconf->workers[i] = &data; break; } } // Wait for everybody to be registered barrier_wait(&netconf->thread_barrier); // Run the event loop ev_run(data.loop, 0); // Cleanup after exit ev_timer_stop(data.loop, &data.periodic); ev_io_stop(data.loop, &data.pipe_client); close(data.pipefd[0]); close(data.pipefd[1]); ev_loop_destroy(data.loop); }
static int fdevent_libev_poll(fdevents *ev, int timeout_ms) { timeout_watcher.repeat = (timeout_ms > 0) ? timeout_ms/1000.0 : 0.001; ev_timer_again(ev->libev_loop, &timeout_watcher); ev_run(ev->libev_loop, EVRUN_ONCE); fdevent_sched_run(ev->srv, ev); return 0; }
void MatchServerTest::RespondRunnableHandle() { // 开始主循环 ev_io_start(mLoop, &mAccept_watcher); ev_set_userdata(mLoop, this); ev_async_init(&mAsync_send_watcher, async_recv_callback); ev_async_start(mLoop, &mAsync_send_watcher); ev_run(mLoop, 0); }
//------------------------------------------------------------------- void clientThread(struct ev_io *watcher ) { int clientSockDescr = accept(watcher->fd, 0, 0); struct ev_io *clientWatcher = (struct ev_io*) malloc(sizeof(struct ev_io)); ev_io_init(clientWatcher, readCallBack, clientSockDescr, EV_READ); struct ev_loop *threadLoop = ev_loop_new(EVFLAG_AUTO); ev_io_start(threadLoop, clientWatcher); ev_run(threadLoop, 0); }
static void* worker_thread_handler(void* data){ zero_worker_thread *th = data; pthread_mutex_lock(&(th->server->mutex)); th->server->init_count++; pthread_cond_signal(&(th->server->cond)); pthread_mutex_unlock(&(th->server->mutex)); th->worker_id = pthread_self(); ev_run(th->loop,0); return NULL; }
/** * Combined threads and events loop. * * The loop processes first threads (_prepare_cb), then events * (ev_stat_cb, ev_io_cb). It sleeps until the earliest thread resume * time, or an I/O event occurs. * */ int mrkthr_loop(void) { int res; PROFILE_START(mrkthr_sched0_p); res = ev_run(the_loop, 0); PROFILE_STOP(mrkthr_sched0_p); return res; }
spx_private void *spx_nio_thread_listen(void *arg){ struct spx_nio_thread_context *context = (struct spx_nio_thread_context *) arg; if(NULL == context){ return NULL; } ev_io_init(&(context->watcher),context->thread_notify_handle,context->pipe[0],EV_READ); context->watcher.data = context;//libev not the set function ev_io_start(context->loop,&(context->watcher)); ev_run(context->loop,0); return NULL; }
/// Do the polling. If on several threads, this is done in every thread. void onion_poller_poll(onion_poller *poller){ ev_default_fork(); ev_loop_fork(poller->loop); poller->stop=0; while(!poller->stop){ sem_wait(poller->sem); ev_run(poller->loop,EVLOOP_ONESHOT); sem_post(poller->sem); } }
static void* thread_client_handler(void* data) { rpc_client_thread *th = data; pthread_mutex_t mutex = th->client->mutex; pthread_mutex_lock(&mutex); th->client->init_count++; pthread_cond_signal(&(th->client->cond)); pthread_mutex_unlock(&mutex); th->thread_receive_id = pthread_self(); ev_run(th->loop, 0); return NULL; }
static void foreign_event_loop_cleanup_libev(void) { /* cleanup the foreign loop assets */ ev_timer_stop(loop_ev, &timer_outer_ev); ev_signal_stop(loop_ev, &sighandler_ev); ev_run(loop_ev, UV_RUN_DEFAULT); ev_loop_destroy(loop_ev); }
static void lcb_io_run_event_loop(struct lcb_io_opt_st *iops) { struct libev_cookie *io_cookie = iops->v.v2.cookie; io_cookie->suspended = 0; #ifdef HAVE_LIBEV4 ev_run(io_cookie->loop, 0); #else ev_loop(io_cookie->loop, 0); #endif io_cookie->suspended = 1; }
/** * @brief CLogBackend::start * 开始日志引擎后台事件循环 */ void CLogBackend::start() { m_log_worker.init( O_CREAT | O_RDWR,S_IRUSR,O_CREAT | O_RDWR,S_IWUSR | S_IRUSR,PROT_WRITE ); m_log_worker.unwait(); m_log_timer.set< CLogBackend,&CLogBackend::backend >(this); m_log_timer.set( LOGBACKEND_TIME,LOGBACKEND_TIME ); m_log_timer.start(); ev_run( loop,0 ); }
int main(int argc, char **argv) { const char *socket_path = getenv("FBBS_SOCKET_PATH"); server_path = getenv("FBBS_SERVER_PATH"); if (!socket_path || !server_path) return EXIT_FAILURE; start_daemon(); const char *fbbs_max_servers = getenv("FBBS_MAX_SERVERS"); if (fbbs_max_servers) max_servers = strtol(fbbs_max_servers, NULL, 10); if (max_servers <= 0) max_servers = DEFAULT_MAX_SERVERS; servers = malloc(sizeof(*servers) * max_servers); if (!servers) return EXIT_FAILURE; for (int i = 0; i < max_servers; ++i) { servers[i].pid = -1; servers[i].fd = -1; } int max_clients = 0; const char *fbbs_max_clients = getenv("FBBS_MAX_CLIENTS"); if (fbbs_max_clients) max_clients = strtol(fbbs_max_clients, NULL, 10); max_clients = check_max_clients(max_clients); if (max_clients <= 0) return EXIT_FAILURE; if (setgid(BBSGID) != 0) return EXIT_FAILURE; if (setuid(BBSUID) != 0) return EXIT_FAILURE; int fd = bind_unix_path(socket_path); if (fd < 0) return EXIT_FAILURE; fb_signal(SIGPIPE, SIG_IGN); fb_signal(SIGCHLD, reap_child); fb_signal(SIGTERM, shutdown_backend); for (int i = 0; i < max_servers; ++i) { if (!spawn_server(servers + i)) { kill_servers(); return EXIT_FAILURE; } } ev_run(EV_DEFAULT_ 0); return EXIT_SUCCESS; }
void Server::run() { #ifdef LWS_USE_LIBUV while(true) { uv_run((uv_loop_t *) loop, UV_RUN_ONCE); } #else while(true) { ev_run(loop, EVRUN_ONCE); } #endif }
int main(int argc, char **argv) { loop = EV_DEFAULT; periodic_time_test(); //periodic_interval_time_test(); //periodic_time_now_test(); ev_run(loop, 0); return 0; }
void *interface_thread_start(void *arg) { struct interface_config *cfg = arg; uinet_initialize_thread(); ev_run(cfg->loop, 0); uinet_finalize_thread(); return (NULL); }
void CBStartEventLoop(void * vloop){ CBEventLoop * loop = vloop; CBLogVerbose("Starting network event loop."); // Start event loop ev_run(loop->base, 0); // Break from loop. Free everything. ev_loop_destroy(loop->base); free(loop->userEvent); CBFreeCallbackQueue(&loop->queue); CBFreeThread(loop->loopThread); free(loop); }
virtual void Run() { ev_async_init(&s_asEvent, as_cb); ev_async_start(s_loop, &s_asEvent); ev_timer tm; tm_cb(s_loop, &tm, 0); Runtime rt(NULL); ev_run(s_loop, 0); }
void worker(int sv,int pid,const char * home_dir){ //printf("worker %d, sv: %d\n",pid,sv); id=pid; home_directory=home_dir; struct ev_loop * loop=ev_loop_new(0); ev_io w_new_client; ev_io_init(&w_new_client,new_client_cb,sv,EV_READ); ev_io_start(loop,&w_new_client); ev_run(loop); }
spx_private void *spx_thread_listening(void *arg){/*{{{*/ struct spx_thread_pending_transport *tpt = (struct spx_thread_pending_transport *) arg; size_t idx = tpt->idx; struct spx_module_context *mc = tpt->mc; SpxFree(tpt);//free the memory struct spx_receive_context *tc = spx_list_get(mc->receive_triggers,idx); struct spx_thread_context *stc = spx_list_get(mc->threadpool,idx); ev_io_init(&(tc->watcher),tc->receive_handler,stc->pipe[0],EV_READ); ev_io_start(stc->loop,&(tc->watcher)); ev_run(stc->loop,0); return NULL; }/*}}}*/
static int se_schedule(lua_State *L) { ev_prepare prepare; prepare.data = L; ev_prepare_init(&prepare, se_on_schedule); ev_prepare_start(EV_DEFAULT_ &prepare); ev_unref(EV_DEFAULT); ev_run(EV_DEFAULT_ 0); return 0; }
int main() { plan(7); memory_init(); fiber_init(fiber_c_invoke); struct fiber *f = fiber_new("main", main_f); fiber_wakeup(f); ev_run(loop(), 0); fiber_free(); memory_free(); return check_plan(); }
void onRun() { /* run main loop */ ev_io_init(mContainer->GetAcceptWatcher(), accept_callback, mContainer->GetSocket(), EV_READ); ev_io_start(mContainer->GetEvLoop(), mContainer->GetAcceptWatcher()); // ev_async_init(mContainer->GetAsyncSendWatcher(), async_send_callback); // ev_async_start(mContainer->GetEvLoop(), mContainer->GetAsyncSendWatcher()); ev_set_userdata(mContainer->GetEvLoop(), mContainer); ev_run(mContainer->GetEvLoop(), 0); }
int main(int argc, char argv[]) { EV_P = ev_default_loop(0); ev_stat stat_watcher; ev_init(&stat_watcher, stat_callback); ev_stat_set(&stat_watcher, "/home/dj/my/workspace/my_prj/libev_prj/hello.txt", 0); ev_stat_start(EV_A, &stat_watcher); ev_run(EV_A, 0); return 0; }
int main() { struct ev_loop* reactor=ev_loop_new(EVFLAG_AUTO); int fd=common::new_tcp_server(34567); ev_io w; ev_io_init(&w,do_accept,fd,EV_READ); ev_io_start(reactor,&w); cm_printf("ev run!\n"); ev_run(reactor,0); cm_printf("close socket %d;\n", fd); close(fd); ev_loop_destroy(reactor); }
int main(int argc, char **argv) { int sockfd; ev_io w; memset(&parser_settings, 0, sizeof parser_settings); #if 0 parser_settings.on_message_begin = on_message_begin; parser_settings.on_url = on_url; parser_settings.on_header_field = on_header_field; parser_settings.on_header_value = on_header_value; parser_settings.on_headers_complete = on_headers_complete; parser_settings.on_body = on_body; #endif parser_settings.on_message_complete = on_message_complete; if (!ev_default_loop(0)) abort(); E(sockfd = socket(AF_INET, SOCK_STREAM, 0)); setnonblock(sockfd); { int yes = 1; E(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof yes)); } #if defined(HAVE_ACCFILT) && defined(TCP_DEFER_ACCEPT) { int timeout = 60; E(setsockopt(sockfd, IPPROTO_TCP, TCP_DEFER_ACCEPT, &timeout, sizeof timeout)); } #endif { struct sockaddr_in addr; memset(&addr, 0, sizeof addr); addr.sin_family = AF_INET; addr.sin_port = htons(8000); addr.sin_addr.s_addr = INADDR_ANY; E(bind(sockfd, (struct sockaddr *) &addr, sizeof addr)); } E(listen(sockfd, SOMAXCONN)); printf("Listening on http://0.0.0.0:8000/\n"); ev_io_init(&w, on_accept, sockfd, EV_READ); ev_io_start(EV_DEFAULT_UC_ &w); ev_run(EV_DEFAULT_UC_ 0); return 0; }
void *MainService(void * arg) { status=INITIALIZE; if(init("config","orderid")){ puts("Initialization error."); exit(1); } else status=READY; update_display("default.png",0); libusb_init(NULL); handle=libusb_open_device_with_vid_pid(NULL,USB_VENDORID,USB_PRODUCTID); if(!handle) { printf("OPEN DEVICE ERROR.\n"); getchar(); exit(1); } libusb_claim_interface(handle,0); pthread_t readusb_thread; main_thread=pthread_self(); struct ev_loop *loop = EV_DEFAULT; ev_init (&net_watcher, net_cb); ev_signal_init(&usb_watcher,usb_cb,SIGUSR1); ev_signal_start(loop,&usb_watcher); if(pthread_create(&readusb_thread,NULL,read_from_usb,NULL)) { printf("CREATE THREAD ERROR.\n"); getchar(); exit(1); } while(1) { if(!ev_is_active(EV_A_ &net_watcher)) { ev_io_set (&net_watcher, fd_net, EV_READ); ev_io_start(loop,&net_watcher); } ev_run (loop, 0); if(net_io) ev_io_stop (EV_A_ &net_watcher); } munmap((void *)p_order_id,sizeof(unsigned int)); ghttp_close(request); ghttp_request_destroy(request); return 0; }
// L statck: // 4: ud // 3: cb // 2: flags // 1: loop static int loop_run(lua_State *L) { struct ev_loop *loop; loop_t *lo = get_loop(L, 1); int flags = luaL_checkinteger(L, 2); luaL_checktype(L, 3, LUA_TFUNCTION); luaL_checkany(L, 4); lua_pushcfunction(L, traceback); loop = lo->loop; ev_set_userdata(loop, L); lua_pushboolean(L, ev_run(loop, flags)); ev_set_userdata(loop, NULL); return 1; }