Пример #1
0
Server::Server(unsigned int port, unsigned int ka_time, unsigned int ka_probes, unsigned int ka_interval)
{
    clws::lws_set_log_level(0, nullptr);

    clws::lws_protocols *protocols = new clws::lws_protocols[2];
    protocols[0] = {"default", callback, sizeof(SocketExtension)};
    protocols[1] = {nullptr, nullptr, 0};

    clws::lws_context_creation_info info = {};
    info.port = port;
    info.protocols = protocols;
    info.gid = info.uid = -1;
    info.user = &internals;
    info.options = clws::LWS_SERVER_OPTION_LIBEV;
    info.ka_time = ka_time;
    info.ka_probes = ka_probes;
    info.ka_interval = ka_interval;

    if (!(context = clws::lws_create_context(&info))) {
        throw;
    }

    clws::lws_sigint_cfg(context, 0, nullptr);
#ifdef LWS_USE_LIBUV
    clws::lws_initloop(context, (uv_loop_t *) (loop = uv_default_loop()));
#else
    clws::lws_initloop(context, loop = ev_loop_new(LWS_FD_BACKEND));
#endif
}
Пример #2
0
struct ub_event_base*
ub_default_event_base(int sigs, time_t* time_secs, struct timeval* time_tv)
{
	void* base;

	(void)base;
#ifdef USE_MINI_EVENT
	(void)sigs;
	/* use mini event time-sharing feature */
	base = event_init(time_secs, time_tv);
#else
	(void)time_secs;
	(void)time_tv;
#  if defined(HAVE_EV_LOOP) || defined(HAVE_EV_DEFAULT_LOOP)
	/* libev */
	if(sigs)
		base = ev_default_loop(EVFLAG_AUTO);
	else
		base = ev_loop_new(EVFLAG_AUTO);
#  else
	(void)sigs;
#    ifdef HAVE_EVENT_BASE_NEW
	base = event_base_new();
#    else
	base = event_init();
#    endif
#  endif
#endif
	return (struct ub_event_base*)base;
}
Пример #3
0
//### global data area ###//
static void build_global_loop()
{
    if(ev_default) return;
    ev_default = ev_loop_new(EVBACKEND_POLL);
    ev_set_timeout_collect_interval(ev_default, 0.1);
    ev_set_io_collect_interval(ev_default, 0.05);
}
Пример #4
0
int mumble_init(struct mumble_t* client)
{
#ifdef _WIN32
    WSADATA wsaData;
    int result = WSAStartup(MAKEWORD(2, 2), &wsaData);

    if (result != 0)
    {
        LOG_FATAL("Failed to initialize winsock");

        return 1;
    }
#endif /* _WIN32 */

    client->servers = NULL;
    client->num_servers = 0;

    if (mumble_ssl_init(client) != 0)
        return 1;

    /* Initialize a new event loop. */
    client->loop = ev_loop_new(0);

    return 0;
}
Пример #5
0
/* Create the libev event loop and incoming event buffer */
static VALUE NIO_Selector_allocate(VALUE klass)
{
    struct NIO_Selector *selector;
    int fds[2];

    /* Use a pipe to implement the wakeup mechanism. I know libev provides
       async watchers that implement this same behavior, but I'm getting
       segvs trying to use that between threads, despite claims of thread
       safety. Pipes are nice and safe to use between threads.

       Note that Java NIO uses this same mechanism */
    if(pipe(fds) < 0) {
        rb_sys_fail("pipe");
    }

    if(fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0) {
        rb_sys_fail("fcntl");
    }

    selector = (struct NIO_Selector *)xmalloc(sizeof(struct NIO_Selector));
    selector->ev_loop = ev_loop_new(0);
    ev_init(&selector->timer, NIO_Selector_timeout_callback);

    selector->wakeup_reader = fds[0];
    selector->wakeup_writer = fds[1];

    ev_io_init(&selector->wakeup, NIO_Selector_wakeup_callback, selector->wakeup_reader, EV_READ);
    selector->wakeup.data = (void *)selector;
    ev_io_start(selector->ev_loop, &selector->wakeup);

    selector->closed = selector->selecting = selector->ready_count = 0;
    selector->ready_array = Qnil;

    return Data_Wrap_Struct(klass, NIO_Selector_mark, NIO_Selector_free, selector);
}
Пример #6
0
/**
 * Normal constructor for EventLoop instance.
 */
PHP_METHOD(EventLoop, __construct)
{
	int backend = EVFLAG_AUTO;
	event_loop_object *obj = (event_loop_object *)zend_object_store_get_object(getThis() TSRMLS_CC);
	
	assert( ! obj->loop);
	
	if(zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|l", &backend) != SUCCESS) {
		return;
	}
	
	/* Check parameter */
	if(EVFLAG_AUTO       != backend &&
	   EVBACKEND_SELECT  != backend &&
	   EVBACKEND_POLL    != backend &&
	   EVBACKEND_EPOLL   != backend &&
	   EVBACKEND_KQUEUE  != backend &&
	   EVBACKEND_DEVPOLL != backend &&
	   EVBACKEND_PORT    != backend &&
	   EVBACKEND_ALL     != backend) {
		/* TODO: libev-specific exception class here */
		zend_throw_exception(NULL, "libev\\EventLoop: backend parameter must be "
			"one of the EventLoop::BACKEND_* constants.", 1 TSRMLS_DC);
		
		return;
	}
	
	obj->loop = ev_loop_new(backend);
	
	IF_DEBUG(ev_verify(obj->loop));
}
Пример #7
0
void test_libev_timer (void)
{
    struct ev_loop *loop;
    ev_timer w;
    int i;

    ok ((loop = ev_loop_new (EVFLAG_AUTO)) != NULL,
        "ev_loop_new works");
    ok (ev_run (loop, 0) == 0,
        "ev_run returns 0 with no watchers configured");

    ev_timer_init (&w, timer_cb, 1E-1, 0.);
    ev_timer_start (loop, &w);
    ok (ev_run (loop, 0) == 0,
        "ev_run returns 0 after no-repeat timer fires once");
    ev_timer_stop (loop, &w);

    i = 0;
    ev_timer_init (&w, timer_arg_cb, 1E-1, 0.);
    w.data = &i;
    ev_timer_start (loop, &w);
    ok (ev_run (loop, 0) == 0 && i == 1,
        "passing arbitrary data using w->data works");
    ev_timer_stop (loop, &w);

    i = 0;
    ev_timer_init (&w, timer_arg_cb, 1E-3, 1E-3);
    w.data = &i;
    ev_timer_start (loop, &w);
    ok (ev_run (loop, 0) != 0 && i == 100,
        "ev_break causes ev_run to return nonzero");
    ev_timer_stop (loop, &w);

    ev_loop_destroy (loop);
}
Пример #8
0
/*
 * Internal generic eredis runner for the event loop (write)
 */
  static void
_eredis_run( eredis_t *e )
{
  if (! e->loop) {
    ev_timer *levt;
    ev_async *leva;

    e->loop = ev_loop_new( EVFLAG_AUTO );

    /* Connect timer */
    levt = &e->connect_timer;
    ev_timer_init( levt, _eredis_ev_connect_cb, 0., 1. );
    levt->data = e;
    ev_timer_start( e->loop, levt );

    /* Async send */
    leva = &e->send_async;
    ev_async_init( leva, _eredis_ev_send_cb );
    leva->data = e;
    ev_async_start( e->loop, leva );
  }

  SET_INRUN(e);

  if (IS_INTHR(e))
    /* Thread mode - release the thread creator */
    pthread_mutex_unlock( &(e->async_lock) );

  ev_run( e->loop, 0 );

  UNSET_INRUN(e);
}
Пример #9
0
boolean zero_arbiter_init(zero_arbiter_thread *th){
    assert(th!=NULL);
    int port = th->server->port;
    
    struct sockaddr_in addr;
    memset(&addr,0,sizeof(addr));
    addr.sin_addr.s_addr = INADDR_ANY;
    addr.sin_family = AF_INET;
    addr.sin_port = htons(port);

    int sock = socket(AF_INET,SOCK_STREAM,0);
    int flag = 1;
    setsockopt(sock,SOL_SOCKET,SO_KEEPALIVE,&flag,sizeof(flag));
    setsockopt(sock,SOL_SOCKET,SO_REUSEADDR,&flag,sizeof(flag));

    if(bind(sock,(struct sockaddr*) &addr,sizeof(addr)) == -1){
        perror("bind");
        return FALSE;
    }
    
    if(listen(sock,BACKLOG) == -1){
        perror("listen");
        return FALSE;
    }
    sock_set_non_block(sock);
    th->sockfd = sock;
    th->last_thread = -1;
    th->loop = ev_loop_new(0);
    th->watcher.data = th;
    ev_io_init(&th->watcher,arbiter_conn_cb,sock,EV_READ);
    ev_io_start(th->loop,&th->watcher);
    return TRUE;
}
static void *thread(void *unused)
{
    struct ev_loop *loop = ev_loop_new(0);
    struct upump_mgr *upump_mgr =
        upump_ev_mgr_alloc(loop, UPUMP_POOL, UPUMP_BLOCKER_POOL);
    assert(upump_mgr != NULL);

    struct upipe *upipe;
    upipe = upipe_void_alloc(&uprobe_test_mgr, uprobe_use(uprobe));
    assert(upump_mgr_get_opaque(upump_mgr, struct upipe *) == NULL);
    uprobe_test_free(upipe);

    uprobe_pthread_upump_mgr_set(uprobe, upump_mgr);
    upipe = upipe_void_alloc(&uprobe_test_mgr, uprobe_use(uprobe));
    assert(upump_mgr_get_opaque(upump_mgr, struct upipe *) == upipe);
    uprobe_test_free(upipe);
    upump_mgr_set_opaque(upump_mgr, NULL);

    uprobe_throw(uprobe, NULL, UPROBE_FREEZE_UPUMP_MGR);
    upipe = upipe_void_alloc(&uprobe_test_mgr, uprobe_use(uprobe));
    assert(upump_mgr_get_opaque(upump_mgr, struct upipe *) == NULL);
    uprobe_test_free(upipe);

    uprobe_throw(uprobe, NULL, UPROBE_THAW_UPUMP_MGR);
    upipe = upipe_void_alloc(&uprobe_test_mgr, uprobe_use(uprobe));
    assert(upump_mgr_get_opaque(upump_mgr, struct upipe *) == upipe);
    uprobe_test_free(upipe);

    upump_mgr_release(upump_mgr);
    ev_loop_destroy(loop);
    return NULL;
}
Пример #11
0
spx_private void *spx_nio_thread_context_new(size_t idx,void *arg,err_t *err){
    struct spx_thread_context_node *tcn = (struct spx_thread_context_node *)arg;
    struct spx_nio_thread_context *context = spx_alloc_alone(sizeof(*context),err);
    if(NULL == context){
        SpxLog2(tcn->log,SpxLogError,*err,\
                "alloc nio thread context is fail.");
        return NULL;
    }

    context->loop = ev_loop_new(EVFLAG_AUTO);
    context->log = tcn->log;
    context->idx = idx;
    context->thread_notify_handle = tcn->thread_notify;
    if(-1 == pipe(context->pipe)){
        SpxLog2(tcn->log,SpxLogError,*err,\
                "open the nio thread pips is fail.");
        *err = errno;
        SpxFree(context);
        return NULL;
    }
    if((0 != (*err = spx_set_nb(context->pipe[0]))) \
            ||(0 != (*err = spx_set_nb(context->pipe[0])))){
        SpxLog2(tcn->log,SpxLogError,*err,\
                "set pipe noblacking is fail.");
        SpxFree(context);
        return NULL;
    }
    return context;
}
MilterEventLoop *
milter_libev_event_loop_new (void)
{
    return g_object_new(MILTER_TYPE_LIBEV_EVENT_LOOP,
                        "ev-loop", ev_loop_new(EVFLAG_FORKCHECK),
                        NULL);
}
Пример #13
0
int create_ev_loop(ev_loop_s* ev_loop_s_ptr)
{
    ev_loop_s_ptr->ev_loop_ptr = ev_loop_new(EVBACKEND_EPOLL);
    assert(ev_loop_s_ptr->ev_loop_ptr != NULL);
    ev_loop_s_ptr->socket_event_ptr = g_socket_events;
    return 0;
}
Пример #14
0
/* timer adds zmsgs to zlist, then stops reactor after 100.
 */
void test_ev_zlist (void)
{
    struct ev_loop *loop;
    ev_zlist list_w;
    ev_timer timer_w;
    zlist_t *l;
    zmsg_t *zmsg;

    ok ((loop = ev_loop_new (EVFLAG_AUTO)) != NULL,
        "ev_loop_new works");

    if (!(l = zlist_new ()) || !(zmsg = zmsg_new ())
                            || zlist_append (l, zmsg) < 0)
        oom ();

    ev_zlist_init (&list_w, list_cb, l, EV_READ);
    ev_timer_init (&timer_w, list_timer_cb, 1E-3, 1E-3);
    timer_w.data = l;
    ev_zlist_start (loop, &list_w);
    ev_timer_start (loop, &timer_w);
    ok (ev_run (loop, 0) != 0 && zlist_size (l) == 0,
        "ev_zlist handler ran 100 times");
    ev_zlist_stop (loop, &list_w);
    ev_timer_stop (loop, &timer_w);

    if (l)
        zlist_destroy (&l);
    ev_loop_destroy (loop);
}
Пример #15
0
Worker *worker_new(uint8_t id, Config *config, uint16_t num_clients, uint64_t num_requests) {
	Worker *worker;
	uint16_t i;

	worker = W_MALLOC(Worker, 1);
	worker->id = id;
	worker->loop = ev_loop_new(config->libev_flags);
	ev_ref(worker->loop);
	worker->config = config;
	worker->num_clients = num_clients;
	worker->stats.req_todo = num_requests;
	worker->progress_interval = num_requests / 10;

	if (worker->progress_interval == 0)
		worker->progress_interval = 1;

	worker->clients = W_MALLOC(Client*, num_clients);

	for (i = 0; i < num_clients; i++) {
		if (NULL == (worker->clients[i] = client_new(worker)))
			return NULL;
	}

	return worker;
}
Пример #16
0
boolean rpc_client_thread_init(rpc_client *client, rpc_client_thread *th) {
	assert(th!=NULL);

	struct sockaddr_in addr;
	memset(&addr, 0, sizeof(addr));
	addr.sin_family = AF_INET;
	addr.sin_port = htons(client->port);
	inet_aton(client->host, &addr.sin_addr);

	th->loop = ev_loop_new(0);
	th->req_conn_count = CLIENT_CONN_NUM;
	th->req_conns = rpc_array_new();
	th->last_conn = -1;
	int i, cfd;
	rpc_conn *c = NULL;
	for (i = 0; i < th->req_conn_count; i++) {
		cfd = socket(AF_INET, SOCK_STREAM, 0);
		if (connect(cfd, (struct sockaddr*) &addr, sizeof(addr)) == -1) {
			perror("connect");
			return FALSE;
		}
		rpc_set_non_block(cfd);
		c = rpc_conn_client_new(cfd, th->loop);
		c->thread = th;
		rpc_array_add(th->req_conns, c);
	}
	th->req_pending = rpc_async_queue_new();
	th->req_pool = rpc_sessionpool_new();
	th->req_timer = rpc_sessionpool_new();
	th->client = client;
	c->thread = th;
	return TRUE;
}
Пример #17
0
void rpc_worker_init(rpc_worker_thread *th) {
	th->loop = ev_loop_new(0);
	th->notify_fd = eventfd(0, 0);
	th->queue = rpc_queue_new();
	th->watcher.data = th;
	ev_io_init(&th->watcher,cb_notify_conn,th->notify_fd,EV_READ);
	ev_io_start(th->loop, &th->watcher);
}
Пример #18
0
static void
evtdomain_on_create(x_object *o)
{
  x_event_domain *evo = (x_event_domain *) (void *) o;
  ENTER;
  evo->eventloop = ev_loop_new(EVFLAG_AUTO);
  EXIT;
}
Пример #19
0
void zero_worker_init(zero_worker_thread *th){
    th->loop = ev_loop_new(0);
    th->notify_fd = eventfd(0,0);
    th->queue = zero_queue_new();
    th->watcher.data = th;
    ev_io_init(&th->watcher,worker_notify_cb,th->notify_fd,EV_READ);
    ev_io_start(th->loop,&th->watcher);
}
Пример #20
0
Файл: evio.c Проект: bartuer/bew
int
main (int argc, char**argv)
{
  if ( !argv[1] ) {
    printf("need dir parameter\n");
    exit(1);
  }
  char * argvp = NULL;
  argvp = realpath(argv[1], pwd);
  dp = opendir(pwd);
  if ( errno ) {
    printf("%s, %s is not valid directory\n", strerror(errno), pwd);
    exit(1);
  }

  int dfd = dirfd(dp);
  loop = ev_loop_new (EVBACKEND_KQUEUE);
  
  ev_timer_init (&timeout_watcher, timeout_cb, 1, 0.);
  ev_timer_start (loop, &timeout_watcher);
  
  ev_io_init (&dir_watcher, dir_cb, dfd, EV_LIBUV_KQUEUE_HACK);
  ev_io_start (loop, &dir_watcher);

  ev_io_init (&cmd_watcher, cmd_cb, 0, EV_READ);
  ev_io_start (loop, &cmd_watcher);
  
  ev_idle_init (&repeat_watcher, repeat);

  ev_async_init (&ready_watcher, ready);
  ev_async_start (loop, &ready_watcher);

  if (eio_init (want_poll, 0)) {
    abort ();
  };
  
  ev_run (loop, 0);

  if ( root ) {
    free(root);
  }
  printf("count: %d\n", (int)freelist_len);
  /* free all allocated path */
  int i;
   if ( freelist ) {
    for (i = 0; i < freelist_len; ++i ) {
     if (freelist[i]) {
         free(freelist[i]);
      }
    }
    free(freelist);
  }
  if ( dp ) {
    closedir(dp);
  }

  return 0;
}
Пример #21
0
/**
 * Entry point for threads to join the networking
 * stack. This method blocks indefinitely until the
 * network stack is shutdown.
 * @arg netconf The configuration for the networking stack.
 */
void start_networking_worker(bloom_networking *netconf) {
    // Allocate our user data
    worker_ev_userdata data;
    data.netconf = netconf;

    // Allocate our pipe
    if (pipe(data.pipefd)) {
        perror("failed to allocate worker pipes!");
        return;
    }

    // Create the event loop
    if (!(data.loop = ev_loop_new(netconf->ev_mode))) {
        syslog(LOG_ERR, "Failed to create event loop for worker!");
        return;
    }

    // Set the user data to be for this thread
    ev_set_userdata(data.loop, &data);

    // Setup the pipe listener
    ev_io_init(&data.pipe_client, handle_worker_notification,
                data.pipefd[0], EV_READ);
    ev_io_start(data.loop, &data.pipe_client);

    // Setup the periodic timers,
    ev_timer_init(&data.periodic, handle_periodic_timeout,
                PERIODIC_TIME_SEC, 1);
    ev_timer_start(data.loop, &data.periodic);

    // Syncronize until netconf->threads is available
    barrier_wait(&netconf->thread_barrier);

    // Register this thread so we can accept connections
    assert(netconf->threads);
    pthread_t id = pthread_self();
    for (int i=0; i < netconf->config->worker_threads; i++) {
        if (pthread_equal(id, netconf->threads[i])) {
            // Provide a pointer to our data
            netconf->workers[i] = &data;
            break;
        }
    }

    // Wait for everybody to be registered
    barrier_wait(&netconf->thread_barrier);

    // Run the event loop
    ev_run(data.loop, 0);

    // Cleanup after exit
    ev_timer_stop(data.loop, &data.periodic);
    ev_io_stop(data.loop, &data.pipe_client);
    close(data.pipefd[0]);
    close(data.pipefd[1]);
    ev_loop_destroy(data.loop);
}
Пример #22
0
LIBCOUCHBASE_API
lcb_error_t lcb_create_libev_io_opts(int version, lcb_io_opt_t *io, void *arg)
{
    struct ev_loop *loop = arg;
    struct lcb_io_opt_st *ret;
    struct libev_cookie *cookie;
    if (version != 0) {
        return LCB_PLUGIN_VERSION_MISMATCH;
    }
    ret = calloc(1, sizeof(*ret));
    cookie = calloc(1, sizeof(*cookie));
    if (ret == NULL || cookie == NULL) {
        free(ret);
        free(cookie);
        return LCB_CLIENT_ENOMEM;
    }

    /* setup io iops! */
    ret->version = 0;
    ret->dlhandle = NULL;
    ret->destructor = lcb_destroy_io_opts;
    /* consider that struct isn't allocated by the library,
     * `need_cleanup' flag might be set in lcb_create() */
    ret->v.v0.need_cleanup = 0;
    ret->v.v0.delete_event = lcb_io_delete_event;
    ret->v.v0.destroy_event = lcb_io_destroy_event;
    ret->v.v0.create_event = lcb_io_create_event;
    ret->v.v0.update_event = lcb_io_update_event;

    ret->v.v0.delete_timer = lcb_io_delete_timer;
    ret->v.v0.destroy_timer = lcb_io_destroy_timer;
    ret->v.v0.create_timer = lcb_io_create_event;
    ret->v.v0.update_timer = lcb_io_update_timer;

    ret->v.v0.run_event_loop = lcb_io_run_event_loop;
    ret->v.v0.stop_event_loop = lcb_io_stop_event_loop;

    wire_lcb_bsd_impl(ret);

    if (loop == NULL) {
        if ((cookie->loop = ev_loop_new(EVFLAG_AUTO | EVFLAG_NOENV)) == NULL) {
            free(ret);
            free(cookie);
            return LCB_CLIENT_ENOMEM;
        }
        cookie->allocated = 1;
    } else {
        cookie->loop = loop;
        cookie->allocated = 0;
    }
    cookie->suspended = 1;
    ret->v.v0.cookie = cookie;

    *io = ret;
    return LCB_SUCCESS;
}
Пример #23
0
LWS_VISIBLE int
lws_ev_initloop(struct lws_context *context, struct ev_loop *loop, int tsi)
{
	struct ev_signal *w_sigint = &context->pt[tsi].w_sigint.ev_watcher;
	struct ev_io *w_accept = &context->pt[tsi].w_accept.ev_watcher;
	const char * backend_name;
	int status = 0;
	int backend;

	if (!loop)
		loop = ev_loop_new(0);

	context->pt[tsi].io_loop_ev = loop;

	/*
	 * Initialize the accept w_accept with the listening socket
	 * and register a callback for read operations
	 */
	ev_io_init(w_accept, lws_accept_cb, context->pt[tsi].lserv_fd, EV_READ);
	ev_io_start(context->pt[tsi].io_loop_ev, w_accept);

	/* Register the signal watcher unless the user says not to */
	if (context->use_ev_sigint) {
		ev_signal_init(w_sigint, context->lws_ev_sigint_cb, SIGINT);
		ev_signal_start(context->pt[tsi].io_loop_ev, w_sigint);
	}
	backend = ev_backend(loop);

	switch (backend) {
	case EVBACKEND_SELECT:
		backend_name = "select";
		break;
	case EVBACKEND_POLL:
		backend_name = "poll";
		break;
	case EVBACKEND_EPOLL:
		backend_name = "epoll";
		break;
	case EVBACKEND_KQUEUE:
		backend_name = "kqueue";
		break;
	case EVBACKEND_DEVPOLL:
		backend_name = "/dev/poll";
		break;
	case EVBACKEND_PORT:
		backend_name = "Solaris 10 \"port\"";
		break;
	default:
		backend_name = "Unknown libev backend";
		break;
	}

	lwsl_notice(" libev backend: %s\n", backend_name);

	return status;
}
Пример #24
0
/* Create a new selector. This is more or less the pure Ruby version
   translated into an MRI cext */
static VALUE NIO_Selector_initialize(int argc, VALUE *argv, VALUE self)
{
    ID backend_id;
    VALUE backend;
    VALUE lock;

    struct NIO_Selector *selector;
    unsigned int flags = 0;

    Data_Get_Struct(self, struct NIO_Selector, selector);

    rb_scan_args(argc, argv, "01", &backend);

    if(backend != Qnil) {
        if(!rb_ary_includes(NIO_Selector_supported_backends(CLASS_OF(self)), backend)) {
            rb_raise(rb_eArgError, "unsupported backend: %s",
                RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
        }

        backend_id = SYM2ID(backend);

        if(backend_id == rb_intern("epoll")) {
            flags = EVBACKEND_EPOLL;
        } else if(backend_id == rb_intern("poll")) {
            flags = EVBACKEND_POLL;
        } else if(backend_id == rb_intern("kqueue")) {
            flags = EVBACKEND_KQUEUE;
        } else if(backend_id == rb_intern("select")) {
            flags = EVBACKEND_SELECT;
        } else if(backend_id == rb_intern("port")) {
            flags = EVBACKEND_PORT;
        } else {
            rb_raise(rb_eArgError, "unsupported backend: %s",
                RSTRING_PTR(rb_funcall(backend, rb_intern("inspect"), 0)));
        }
    }

    /* Ensure the selector loop has not yet been initialized */
    assert(!selector->ev_loop);

    selector->ev_loop = ev_loop_new(flags);
    if(!selector->ev_loop) {
        rb_raise(rb_eIOError, "error initializing event loop");
    }

    ev_io_start(selector->ev_loop, &selector->wakeup);

    rb_ivar_set(self, rb_intern("selectables"), rb_hash_new());
    rb_ivar_set(self, rb_intern("lock_holder"), Qnil);

    lock = rb_class_new_instance(0, 0, rb_const_get(rb_cObject, rb_intern("Mutex")));
    rb_ivar_set(self, rb_intern("lock"), lock);
    rb_ivar_set(self, rb_intern("lock_holder"), Qnil);

    return Qnil;
}
Пример #25
0
//-------------------------------------------------------------------
void clientThread(struct ev_io *watcher ) {
    int clientSockDescr = accept(watcher->fd, 0, 0);

    struct ev_io *clientWatcher = (struct ev_io*) malloc(sizeof(struct ev_io));
    ev_io_init(clientWatcher, readCallBack, clientSockDescr, EV_READ);

    struct ev_loop *threadLoop = ev_loop_new(EVFLAG_AUTO);
    ev_io_start(threadLoop, clientWatcher);
    ev_run(threadLoop, 0);
}
Пример #26
0
scheduler_impl_t::scheduler_impl_t() {
	ev_loop_ = ev_loop_new(0);
	ev_set_userdata(ev_loop_, this);

	ev_async_init(&activate_, activate_cb);
	ev_async_start(ev_loop_, &activate_);

	ev_async_init(&break_loop_, break_loop_cb);
	ev_async_start(ev_loop_, &break_loop_);
}
Пример #27
0
events::events(const string& redis_host, unsigned short redis_port) {
    this->redis = redisAsyncConnect(redis_host.c_str(), redis_port);
    this->redis_pubsub = redisAsyncConnect(redis_host.c_str(), redis_port);
    if (this->redis->err || this->redis_pubsub->err) {
        throw string("Redis Async cannot connect.");
    }
    this->loop = ev_loop_new(EVBACKEND_POLL | EVBACKEND_SELECT);
    redisLibevAttach(this->loop, this->redis);
    redisLibevAttach(this->loop, this->redis_pubsub);
}
Пример #28
0
struct event_base *
event_base_new (void)
{
#if EV_MULTIPLICITY
  return (struct event_base *)ev_loop_new (EVFLAG_AUTO);
#else
  assert (("libev: multiple event bases not supported when not compiled with EV_MULTIPLICITY"));
  return NULL;
#endif
}
Пример #29
0
void fsock_thread_init (struct fsock_thread *self) {
  self->loop = ev_loop_new (0);
  if (self->loop == NULL) {
    return;
  }
  fsock_mutex_init (&self->sync);
  fsock_queue_init (&self->jobs);
  fsock_task_init (&self->stop, 0, NULL, NULL);
  ev_async_init (&self->job_async, async_routine);
  ev_async_start (self->loop, &self->job_async);
}
Пример #30
0
/* Wrapper for populating a Coolio_Loop struct with a new event loop */
static VALUE Coolio_Loop_ev_loop_new(VALUE self, VALUE flags)
{
  struct Coolio_Loop *loop_data;
  Data_Get_Struct(self, struct Coolio_Loop, loop_data);

  if(loop_data->ev_loop)
    rb_raise(rb_eRuntimeError, "loop already initialized");

  loop_data->ev_loop = ev_loop_new(NUM2INT(flags));

  return Qnil;
}