scheduler_impl_t::scheduler_impl_t() { ev_loop_ = ev_loop_new(0); ev_set_userdata(ev_loop_, this); ev_async_init(&activate_, activate_cb); ev_async_start(ev_loop_, &activate_); ev_async_init(&break_loop_, break_loop_cb); ev_async_start(ev_loop_, &break_loop_); }
void ZipFile::Initialize(Handle<Object> target) { constructor = Persistent<FunctionTemplate>::New(FunctionTemplate::New(ZipFile::New)); constructor->InstanceTemplate()->SetInternalFieldCount(1); constructor->SetClassName(String::NewSymbol("ZipFile")); // functions NODE_SET_PROTOTYPE_METHOD(constructor, "open", Open); NODE_SET_PROTOTYPE_METHOD(constructor, "read", Read); NODE_SET_PROTOTYPE_METHOD(constructor, "readFileSync", readFileSync); NODE_SET_PROTOTYPE_METHOD(constructor, "close", Close); NODE_SET_PROTOTYPE_METHOD(constructor, "addFile", Add_File); NODE_SET_PROTOTYPE_METHOD(constructor, "replaceFile", Replace_File); NODE_SET_PROTOTYPE_METHOD(constructor, "addDirectory", Add_Directory); NODE_SET_PROTOTYPE_METHOD(constructor, "save", Save); // properties constructor->InstanceTemplate()->SetAccessor(String::NewSymbol("count"), get_prop); constructor->InstanceTemplate()->SetAccessor(String::NewSymbol("names"), get_prop); // Initiate async handler ev_async_init(¬ifier, Save_Callback); ev_async_start(EV_DEFAULT_UC_ ¬ifier); target->Set(String::NewSymbol("ZipFile"),constructor->GetFunction()); }
shared_ptr<event_async_watcher> events::onAsync(function<void(event_async_watcher*)> callback) { auto e_spec = new_watcher<event_async_watcher>(callback); ev_async_init(&e_spec->watcher, events::async_callback); ev_async_start(this->loop, &e_spec->watcher); async_watchers.push_back(e_spec); return e_spec; }
int emc_init_server(struct emc_server_context *ctx) { int result; int max_workers; char *emc_data_file; g_thread_init(NULL); max_workers = emc_config_table_get_or_default_int("emc_max_workers", EMC_DEFAULT_MAX_WORKERS); emc_data_file = emc_config_table_get("emc_data_file"); ctx->nuauth_directory = g_tree_new( emc_netmask_order_func ); result = emc_parse_datafile(ctx, emc_data_file); if (result < 0) { return -1; } loop = ev_default_loop(0); result = emc_setup_servers(loop, ctx); if (result < 0) { return -1; } ev_signal_init(&sigint_watcher, sigint_cb, SIGINT); ev_signal_start(loop, &sigint_watcher); ev_signal_init(&sigterm_watcher, sigint_cb, SIGTERM); ev_signal_start(loop, &sigterm_watcher); ev_signal_init(&sigusr1_watcher, sigusr1_cb, SIGUSR1); ev_signal_start(loop, &sigusr1_watcher); ev_async_init(&client_ready_signal, emc_client_ready_cb); ev_async_start(loop, &client_ready_signal); ctx->continue_processing = 1; sigint_watcher.data = ctx; sigterm_watcher.data = ctx; sigusr1_watcher.data = ctx; client_ready_signal.data = ctx; g_thread_pool_set_max_unused_threads( (int)(max_workers/2) ); ctx->pool_tls_handshake = g_thread_pool_new((GFunc)emc_worker_tls_handshake, NULL, max_workers, FALSE, NULL); ctx->pool_reader = g_thread_pool_new((GFunc)emc_worker_reader, NULL, max_workers, FALSE, NULL); ctx->work_queue = g_async_queue_new(); ctx->tls_client_list_mutex = g_mutex_new(); log_printf(DEBUG_LEVEL_DEBUG, "Max: %d", g_thread_pool_get_max_unused_threads()); return 0; }
void *worker_function(void *arg) { struct sThreadData *tdata = (struct sThreadData *)arg; int sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); struct sockaddr_in addr; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons(tdata->args.port); addr.sin_addr.s_addr = inet_addr(tdata->args.ip); bind(sock, (struct sockaddr *) &addr, sizeof(addr)); listen(sock, SOMAXCONN); struct ev_loop *loop = tdata->loop;//ev_loop_new(EVFLAG_NOSIGMASK); struct ev_io w_accept; ev_io_init(&w_accept, accept_cb, sock, EV_READ); ev_io_start(loop, &w_accept); //struct ev_async w_exit; ev_async_init(tdata->async_watcher, exit_cb); ev_async_start(loop, tdata->async_watcher); ev_loop(loop, 0); if (!tdata->args.daemon) printf("ev_loop finished\n"); //ev_loop_destroy(loop); return NULL; }
void request_complete(ebb_request *request) { ebb_connection *connection = (ebb_connection *)request->data; AsyncConnection *connection_data = (AsyncConnection *)connection->data; connection_data->connection = connection; connection_data->request = request; gettimeofday(&connection_data->starttime, nullptr); connection_data->keep_alive_flag = ebb_request_should_keep_alive(request); ev_async_init(&connection_data->ev_write, write_cb); ev_async_start(connection_data->ev_loop, &connection_data->ev_write); // Try to route to appropriate handler based on path const AbstractRequestHandlerFactory *handler_factory; try { handler_factory = Router::route(connection_data->path); } catch (const RouterException &exc) { std::string exception_message(exc.what()); connection_data->respond("Could not route request, std::exception was: \n" + exception_message); return; } std::shared_ptr<Task> task = handler_factory->create(connection_data); task->setPriority(Task::HIGH_PRIORITY); // give RequestParseTask high priority SharedScheduler::getInstance().getScheduler()->schedule(task); connection_data->waiting_for_response = true; }
/* * Internal generic eredis runner for the event loop (write) */ static void _eredis_run( eredis_t *e ) { if (! e->loop) { ev_timer *levt; ev_async *leva; e->loop = ev_loop_new( EVFLAG_AUTO ); /* Connect timer */ levt = &e->connect_timer; ev_timer_init( levt, _eredis_ev_connect_cb, 0., 1. ); levt->data = e; ev_timer_start( e->loop, levt ); /* Async send */ leva = &e->send_async; ev_async_init( leva, _eredis_ev_send_cb ); leva->data = e; ev_async_start( e->loop, leva ); } SET_INRUN(e); if (IS_INTHR(e)) /* Thread mode - release the thread creator */ pthread_mutex_unlock( &(e->async_lock) ); ev_run( e->loop, 0 ); UNSET_INRUN(e); }
int main (int argc, char**argv) { if ( !argv[1] ) { printf("need dir parameter\n"); exit(1); } char * argvp = NULL; argvp = realpath(argv[1], pwd); dp = opendir(pwd); if ( errno ) { printf("%s, %s is not valid directory\n", strerror(errno), pwd); exit(1); } int dfd = dirfd(dp); loop = ev_loop_new (EVBACKEND_KQUEUE); ev_timer_init (&timeout_watcher, timeout_cb, 1, 0.); ev_timer_start (loop, &timeout_watcher); ev_io_init (&dir_watcher, dir_cb, dfd, EV_LIBUV_KQUEUE_HACK); ev_io_start (loop, &dir_watcher); ev_io_init (&cmd_watcher, cmd_cb, 0, EV_READ); ev_io_start (loop, &cmd_watcher); ev_idle_init (&repeat_watcher, repeat); ev_async_init (&ready_watcher, ready); ev_async_start (loop, &ready_watcher); if (eio_init (want_poll, 0)) { abort (); }; ev_run (loop, 0); if ( root ) { free(root); } printf("count: %d\n", (int)freelist_len); /* free all allocated path */ int i; if ( freelist ) { for (i = 0; i < freelist_len; ++i ) { if (freelist[i]) { free(freelist[i]); } } free(freelist); } if ( dp ) { closedir(dp); } return 0; }
static inline void as_ev_init_loop(as_event_loop* event_loop) { as_queue_init(&event_loop->queue, sizeof(void*), AS_EVENT_QUEUE_INITIAL_CAPACITY); ev_async_init(&event_loop->wakeup, as_ev_wakeup); event_loop->wakeup.data = event_loop; ev_async_start(event_loop->loop, &event_loop->wakeup); }
void MatchServerTest::RespondRunnableHandle() { // 开始主循环 ev_io_start(mLoop, &mAccept_watcher); ev_set_userdata(mLoop, this); ev_async_init(&mAsync_send_watcher, async_recv_callback); ev_async_start(mLoop, &mAsync_send_watcher); ev_run(mLoop, 0); }
/** * Event loop for a client context. * * Usage of the loop is the following: * - client_injector_signal: send new client socket to the loop. Callback is * client_injector_cb(). * - client_writer_signal: if a write is needed, ask for removal of client socket. * from the loop and process to write by poping write event from a per-client * message queue. Callback is client_writer_cb(). * - client_destructor_signal: ask for removal a client socket from the loop. This * is used by the command_mode to trigger disconnection. Callback is * client_destructor_cb(). * - client_accept_cb(): treat new client. This call back is called by watcher * other server socket. * - loop_fini_signal: async signal used to trigger loop end * */ void tls_user_main_loop(struct tls_user_context_t *context, GMutex * mutex) { ev_io client_watcher; ev_timer timer; context->loop = ev_loop_new(0); /* register injector cb */ ev_async_init(&context->client_injector_signal, client_injector_cb); ev_async_start(context->loop, &context->client_injector_signal); context->client_injector_signal.data = context; /* register writer cb */ ev_async_init(&context->client_writer_signal, client_writer_cb); ev_async_start(context->loop, &context->client_writer_signal); context->client_writer_signal.data = context; ev_timer_init (&timer, client_timeout_cb, 0, 0.200); ev_timer_start (context->loop, &timer); /* register destructor cb */ ev_async_init(&context->client_destructor_signal, client_destructor_cb); ev_async_start(context->loop, &context->client_destructor_signal); context->client_destructor_signal.data = context; /* register destructor cb */ ev_async_init(&context->loop_fini_signal, loop_destructor_cb); ev_async_start(context->loop, &context->loop_fini_signal); context->loop_fini_signal.data = context; /* register accept cb */ fcntl(context->sck_inet,F_SETFL,(fcntl(context->sck_inet,F_GETFL)|O_NONBLOCK)); ev_io_init(&client_watcher, client_accept_cb, context->sck_inet, EV_READ); ev_io_start(context->loop, &client_watcher); client_watcher.data = context; log_message(INFO, DEBUG_AREA_USER, "[+] NuAuth is waiting for client connections."); ev_loop(context->loop, 0); ev_loop_destroy(context->loop); close(context->sck_inet); }
void fsock_thread_init (struct fsock_thread *self) { self->loop = ev_loop_new (0); if (self->loop == NULL) { return; } fsock_mutex_init (&self->sync); fsock_queue_init (&self->jobs); fsock_task_init (&self->stop, 0, NULL, NULL); ev_async_init (&self->job_async, async_routine); ev_async_start (self->loop, &self->job_async); }
void * work(void *p) { signal(SIGPIPE, SIG_IGN); ev_idle idle_watcher; ev_idle_init (&idle_watcher, idle_cb); ev_idle_start(work_loop, &idle_watcher); ev_async_init(&async_watcher, async_cb); ev_async_start(work_loop, &async_watcher); ev_loop(work_loop, 0); return (void *)0; }
/* * this thread operates the select() etc. via libev. */ void *client_event_thread(void *arg) { struct CitContext libev_client_CC; CtdlFillSystemContext(&libev_client_CC, "LibEv Thread"); pthread_setspecific(evConKey, IOLog); EVQM_syslog(LOG_DEBUG, "client_event_thread() initializing\n"); event_base = ev_default_loop (EVFLAG_AUTO); ev_async_init(&AddJob, QueueEventAddCallback); ev_async_start(event_base, &AddJob); ev_async_init(&ExitEventLoop, EventExitCallback); ev_async_start(event_base, &ExitEventLoop); ev_async_init(&WakeupCurl, WakeupCurlCallback); ev_async_start(event_base, &WakeupCurl); curl_init_connectionpool(); ev_run (event_base, 0); EVQM_syslog(LOG_DEBUG, "client_event_thread() exiting\n"); ///what todo here? CtdlClearSystemContext(); pthread_mutex_lock(&EventExitQueueMutex); ev_loop_destroy (EV_DEFAULT_UC); event_base = NULL; DeleteHash(&QueueEvents); InboundEventQueue = NULL; DeleteHash(&InboundEventQueues[0]); DeleteHash(&InboundEventQueues[1]); /* citthread_mutex_destroy(&EventQueueMutex); TODO */ evcurl_shutdown(); CtdlDestroyEVCleanupHooks(); pthread_mutex_unlock(&EventExitQueueMutex); EVQShutDown = 1; return(NULL); }
virtual void Run() { ev_async_init(&s_asEvent, as_cb); ev_async_start(s_loop, &s_asEvent); ev_timer tm; tm_cb(s_loop, &tm, 0); Runtime rt(NULL); ev_run(s_loop, 0); }
int uv_async_init(uv_loop_t* loop, uv_async_t* async, uv_async_cb async_cb) { uv__handle_init(loop, (uv_handle_t*)async, UV_ASYNC); loop->counters.async_init++; ev_async_init(&async->async_watcher, uv__async); async->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ ev_async_start(loop->ev, &async->async_watcher); ev_unref(loop->ev); return 0; }
static void setup_ev_loop (MilterLibevEventLoop *loop) { MilterLibevEventLoopPrivate *priv; priv = MILTER_LIBEV_EVENT_LOOP_GET_PRIVATE(loop); if (priv->ev_loop) { ev_set_userdata(priv->ev_loop, loop); priv->breaker = g_new0(ev_async, 1); ev_async_init(priv->breaker, cb_break); ev_async_start(priv->ev_loop, priv->breaker); } }
manos_data_t * manos_init (struct ev_loop *loop) { manos_data_t *data = malloc (sizeof (manos_data_t)); memset (data, 0, sizeof (manos_data_t)); data->loop = loop; ev_idle_init (&eio_idle_watcher, eio_on_idle); eio_idle_watcher.data = data; ev_async_init (&eio_want_poll_watcher, eio_on_want_poll); ev_async_start (EV_DEFAULT_UC_ &eio_want_poll_watcher); eio_want_poll_watcher.data = data; ev_async_init (&eio_done_poll_watcher, eio_on_done_poll); ev_async_start (EV_DEFAULT_UC_ &eio_done_poll_watcher); eio_done_poll_watcher.data = data; eio_init (eio_want_poll, eio_done_poll); }
/* * this thread operates writing to the message database via libev. */ void *db_event_thread(void *arg) { ev_loop *tmp; struct CitContext libev_msg_CC; pthread_setspecific(evConKey, DBLog); CtdlFillSystemContext(&libev_msg_CC, "LibEv DB IO Thread"); EVQM_syslog(LOG_DEBUG, "dbevent_thread() initializing\n"); tmp = event_db = ev_loop_new (EVFLAG_AUTO); ev_async_init(&DBAddJob, DBQueueEventAddCallback); ev_async_start(event_db, &DBAddJob); ev_async_init(&DBExitEventLoop, DBEventExitCallback); ev_async_start(event_db, &DBExitEventLoop); ev_run (event_db, 0); pthread_mutex_lock(&DBEventExitQueueMutex); event_db = NULL; EVQM_syslog(LOG_INFO, "dbevent_thread() exiting\n"); DeleteHash(&DBQueueEvents); DBInboundEventQueue = NULL; DeleteHash(&DBInboundEventQueues[0]); DeleteHash(&DBInboundEventQueues[1]); /* citthread_mutex_destroy(&DBEventQueueMutex); TODO */ ev_loop_destroy (tmp); pthread_mutex_unlock(&DBEventExitQueueMutex); return(NULL); }
int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb, uv_close_cb close_cb, void* data) { uv__handle_init(handle, UV_ASYNC, close_cb, data); ev_async_init(&handle->async_watcher, uv__async); handle->async_watcher.data = handle; handle->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ ev_async_start(EV_DEFAULT_UC_ &handle->async_watcher); ev_unref(EV_DEFAULT_UC); return 0; }
int uv_async_init(uv_async_t* async, uv_async_cb async_cb) { uv__handle_init((uv_handle_t*)async, UV_ASYNC); uv_counters()->async_init++; ev_async_init(&async->async_watcher, uv__async); async->async_watcher.data = async; async->async_cb = async_cb; /* Note: This does not have symmetry with the other libev wrappers. */ ev_async_start(EV_DEFAULT_UC_ &async->async_watcher); ev_unref(EV_DEFAULT_UC); return 0; }
static int wait_connection(server_generic_t **server, size_t nserver) { unsigned int i; ev_async_init(&ev_trigger, ev_trigger_cb); ev_async_start(manager_event_loop, &ev_trigger); for ( i = 0; i < nserver; i++ ) { ev_io_init(&server[i]->evio, connection_cb, server[i]->sock, EV_READ); ev_io_start(manager_event_loop, &server[i]->evio); } while ( continue_processing ) ev_loop(manager_event_loop, 0); return 0; }
bool Loop::init() { SLOG(INFO) << "init loop begin..."; if (_loop != nullptr) { SLOG(WARNING) << "loop has been inited, skip."; return true; } _loop = ev_loop_new(EVBACKEND_EPOLL | EVFLAG_NOENV); if (_loop == nullptr) { SLOG(FATAL) << "create loop fail!"; throw_system_error("create loop fail"); } ev_async_init(&_async, ev_async_cb); ev_async_start(_loop, &_async); return true; }
bool CBNewEventLoop(CBDepObject * loopID,void (*onError)(void *),void (*onDidTimeout)(void *,void *,CBTimeOutType),void * communicator){ struct ev_loop * base = ev_loop_new(0); // Create arguments for the loop CBEventLoop * loop = malloc(sizeof(*loop)); loop->base = base; loop->onError = onError; loop->onTimeOut = onDidTimeout; loop->communicator = communicator; loop->userEvent = malloc(sizeof(*loop->userEvent)); loop->userEvent->loop = loop; ev_async_init((struct ev_async *)loop->userEvent, CBDoRun); ev_async_start(base, (struct ev_async *)loop->userEvent); // Create queue CBInitCallbackQueue(&loop->queue); // Create thread CBNewThread(&loop->loopThread, CBStartEventLoop, loop); loopID->ptr = loop; return true; }
static int Async_init(libevwrapper_Async *self, PyObject *args, PyObject *kwds) { libevwrapper_Loop *loop; static char *kwlist[] = {"loop", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O", kwlist, &loop)) { PyErr_SetString(PyExc_TypeError, "unable to get file descriptor from socket"); return -1; } if (loop) { Py_INCREF(loop); self->loop = loop; } else { return -1; } ev_async_init(&self->async, async_callback); return 0; };
const char* slave_run_cb(void) { struct ev_loop *loop; ev_io io_listen; ev_signal sigint_listen; ev_signal sigterm_listen; pthread_mutex_init(&srv.pending_lk, 0); pthread_mutex_init(&srv.clients_lk, 0); if (!(loop = ev_default_loop(EVFLAG_AUTO))) return 1; syslog(LOG_INFO, "listening on %s:%hd", inet_ntoa(srv.addr.sin_addr), ntohs(srv.addr.sin_port)); ev_signal_init(&sigint_listen, &nol_s_ev_sigint, SIGINT); ev_signal_init(&sigterm_listen, &nol_s_ev_sigint, SIGTERM); ev_io_init(&io_listen, &nol_s_ev_conn_accept, srv.listen_sock, EV_READ); ev_io_init(&srv.master_io, &nol_s_ev_master, srv.master_sock, EV_READ); ev_async_init(&srv.client_status, &nol_s_ev_client_status); /* catch SIGINT so we can close connections and clean up properly */ ev_signal_start(loop, &sigint_listen); ev_signal_start(loop, &sigterm_listen); ev_io_start(loop, &io_listen); ev_io_start(loop, &srv.master_io); ev_async_start(loop, &srv.client_status); /** * This loop will listen for new connections and data from * the master. **/ ev_loop(loop, 0); close(srv.listen_sock); ev_default_destroy(); closelog(); pthread_mutex_destroy(&srv.pending_lk); pthread_mutex_destroy(&srv.clients_lk); nol_s_hook_invoke(HOOK_CLEANUP); return 0; }
static void wait_for_event(void) { size_t i; int udp_event_fd; ev_io events[config.udp_nserver]; ev_async_init(&ev_interrupt, libev_interrupt_cb); ev_async_start(&ev_interrupt); for ( i = 0; i < config.udp_nserver; i++ ) { udp_event_fd = udp_server_get_event_fd(config.udp_server[i]); ev_io_init(&events[i], libev_udp_cb, udp_event_fd, EV_READ); events[i].data = config.udp_server[i]; ev_io_start(&events[i]); } ev_loop(0); }
/* * Set up a thread's information. */ static void setup_thread(WORK_THREAD *me) { me->loop = ev_loop_new(0); if (! me->loop) { fprintf(stderr, "Can't allocate event base\n"); exit(1); } me->async_watcher.data = me; /* Listen for notifications from other threads */ ev_async_init(&me->async_watcher, async_cb); ev_async_start(me->loop, &me->async_watcher); me->new_conn_queue = malloc(sizeof(struct conn_queue)); if (me->new_conn_queue == NULL) { perror("Failed to allocate memory for connection queue\n"); exit(EXIT_FAILURE); } cq_init(me->new_conn_queue); }
ebb_connection *new_connection(ebb_server *server, struct sockaddr_in *addr) { ebb_connection *connection = (ebb_connection *)malloc(sizeof(ebb_connection)); if (connection == nullptr) { return nullptr; } AsyncConnection *connection_data = new AsyncConnection(); connection_data->addr = *addr; // Initializes the connection ebb_connection_init(connection); connection->data = connection_data; connection->new_request = new_request; connection->on_close = on_close; connection->on_timeout = on_timeout; connection_data->ev_loop = server->loop; connection_data->ev_write.data = connection_data; ev_async_init(&connection_data->ev_write, write_cb); ev_async_start(server->loop, &connection_data->ev_write); return connection; }
spx_private void RegisterAayncWatcher(ev_async *watcher, void(*cb)(struct ev_loop *loop, ev_async *watcher, int revents), void *data){/*{{{*/ SpxZero(*watcher); ev_async_init(watcher, cb); watcher->data = data; }/*}}}*/