Пример #1
0
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) {
  CFRunLoopSourceContext ctx;
  int r;

  if (uv__kqueue_init(loop))
    return -1;

  loop->cf_loop = NULL;
  if ((r = uv_mutex_init(&loop->cf_mutex)))
    return r;
  if ((r = uv_sem_init(&loop->cf_sem, 0)))
    return r;
  QUEUE_INIT(&loop->cf_signals);

  memset(&ctx, 0, sizeof(ctx));
  ctx.info = loop;
  ctx.perform = uv__cf_loop_cb;
  loop->cf_cb = CFRunLoopSourceCreate(NULL, 0, &ctx);

  if ((r = uv_thread_create(&loop->cf_thread, uv__cf_loop_runner, loop)))
    return r;

  /* Synchronize threads */
  uv_sem_wait(&loop->cf_sem);
  assert(ACCESS_ONCE(CFRunLoopRef, loop->cf_loop) != NULL);

  return 0;
}
static work(async_worker_t *const worker) {
	worker->main = co_active();
	for(;;) {
		uv_sem_wait(&worker->sem);
		if(!worker->work) break;
		co_switch(worker->work);
		uv_async_send(work->async);
	}
}
Пример #3
0
static void nub__work_signal_cb(uv_async_t* handle) {
  nub_loop_t* loop;
  nub_thread_t* thread;

  loop = ((nub_thread_t*) handle->data)->nubloop;
  while (!fuq_empty(&loop->blocking_queue_)) {
    thread = (nub_thread_t*) fuq_dequeue(&loop->blocking_queue_);
    uv_sem_post(&thread->thread_lock_sem_);
    uv_sem_wait(&loop->loop_lock_sem_);
  }
}
Пример #4
0
static PyObject *
Semaphore_func_wait(Semaphore *self)
{
    RAISE_IF_NOT_INITIALIZED(self, NULL);

    Py_BEGIN_ALLOW_THREADS
    uv_sem_wait(&self->uv_semaphore);
    Py_END_ALLOW_THREADS

    Py_RETURN_NONE;
}
Пример #5
0
void uv_barrier_wait(uv_barrier_t* barrier) {
  uv_mutex_lock(&barrier->mutex);
  if (++barrier->count == barrier->n) {
    uv_sem_wait(&barrier->turnstile2);
    uv_sem_post(&barrier->turnstile1);
  }
  uv_mutex_unlock(&barrier->mutex);

  uv_sem_wait(&barrier->turnstile1);
  uv_sem_post(&barrier->turnstile1);

  uv_mutex_lock(&barrier->mutex);
  if (--barrier->count == 0) {
    uv_sem_wait(&barrier->turnstile1);
    uv_sem_post(&barrier->turnstile2);
  }
  uv_mutex_unlock(&barrier->mutex);

  uv_sem_wait(&barrier->turnstile2);
  uv_sem_post(&barrier->turnstile2);
}
Пример #6
0
void Thread::start()
{
	if (this->is_started.exchange(true))
	{
		LOG(INFO) << "Thread " << static_cast<void*>(this) << " already started";
		return;
	}

	uv_sem_init(&this->semaphore, 0);
	uv_thread_create(&this->thread_id, &Thread::thread_worker, static_cast<void*>(this));
	uv_sem_wait(&this->semaphore);
	uv_sem_destroy(&this->semaphore);
}
Пример #7
0
/* more of the same.  this controls the wait acquisition
 * of a os resource or response.  this is probably eliminated
 * by async worker synchro in libuv
 */
void
osblock(void)
{
    
    Osdep *os;

    // get the hosting proc of this proc
    //hproc_t* hp = up->hproc;
    
    // get it's sync sem
	//os = hp->os;
	os = ((kproc_t*)up)->os;
    //print("osblock/sem_wait: on %p\n",os->sem);
	//while(sem_wait(&os->sem))
	//	{}
    uv_sem_wait(&os->sem);
    /* retry on signals (which shouldn't happen) */
}
Пример #8
0
static void embed_thread_runner(void* arg) {
  uv_os_fd_t fd;
  int timeout;

  while (!embed_closed) {
    fd = uv_backend_fd(uv_default_loop());
    timeout = uv_backend_timeout(uv_default_loop());

#if defined(_WIN32)
    embed_thread_poll_win(fd, timeout);
#else
    embed_thread_poll_unix(fd, timeout);
#endif

    uv_async_send(&embed_async);
    uv_sem_wait(&embed_sem);
  }
}
Пример #9
0
static void init_threads(void) {
  unsigned int i;
  const char* val;
  uv_sem_t sem;

  nthreads = ARRAY_SIZE(default_threads);
  val = getenv("UV_THREADPOOL_SIZE");
  if (val != NULL)
    nthreads = atoi(val);
  if (nthreads == 0)
    nthreads = 1;
  if (nthreads > MAX_THREADPOOL_SIZE)
    nthreads = MAX_THREADPOOL_SIZE;

  threads = default_threads;
  if (nthreads > ARRAY_SIZE(default_threads)) {
    threads = (uv_thread_t*)uv__malloc(nthreads * sizeof(threads[0]));
    if (threads == NULL) {
      nthreads = ARRAY_SIZE(default_threads);
      threads = default_threads;
    }
  }

  if (uv_cond_init(&cond))
    abort();

  if (uv_mutex_init(&mutex))
    abort();

  QUEUE_INIT(&wq);

  if (uv_sem_init(&sem, 0))
    abort();

  for (i = 0; i < nthreads; i++)
    if (uv_thread_create(threads + i, worker, &sem))
      abort();

  for (i = 0; i < nthreads; i++)
    uv_sem_wait(&sem);

  uv_sem_destroy(&sem);
}
Пример #10
0
static void nub__thread_entry_cb(void* arg) {
  nub_thread_t* thread;
  fuq_queue_t* queue;
  nub_work_t* item;

  thread = (nub_thread_t*) arg;
  queue = &thread->incoming_;

  for (;;) {
    while (!fuq_empty(queue)) {
      item = (nub_work_t*) fuq_dequeue(queue);
      (item->cb)(thread, item, item->arg);
    }
    if (0 < thread->disposed)
      break;
    uv_sem_wait(&thread->sem_wait_);
  }

  ASSERT(1 == fuq_empty(queue));
  fuq_dispose(&thread->incoming_);
}
Пример #11
0
/* Set up an IPC pipe server that hands out listen sockets to the worker
 * threads. It's kind of cumbersome for such a simple operation, maybe we
 * should revive uv_import() and uv_export().
 */
void start_connection_dispatching(uv_handle_type type, unsigned int num_servers, struct server_ctx* servers, char* listen_address, int listen_port, bool tcp_nodelay)
{
    int rc;
    struct ipc_server_ctx ctx;
    uv_loop_t* loop;
    unsigned int i;
    
    loop = uv_default_loop();
    ctx.num_connects = num_servers;
    ctx.tcp_nodelay = tcp_nodelay;
    
    if (type == UV_TCP)
    {
        uv_ip4_addr(listen_address, listen_port, &listen_addr);
        
        rc = uv_tcp_init(loop, (uv_tcp_t*) &ctx.server_handle);
        
        if (ctx.tcp_nodelay) {
            rc = uv_tcp_nodelay((uv_tcp_t*) &ctx.server_handle, 1);
        }

        rc = uv_tcp_bind((uv_tcp_t*) &ctx.server_handle, (const struct sockaddr*)&listen_addr, 0);
        print_configuration();
        printf("Listening...\n");
    }
    
    rc = uv_pipe_init(loop, &ctx.ipc_pipe, 1);
    rc = uv_pipe_bind(&ctx.ipc_pipe, "HAYWIRE_CONNECTION_DISPATCH_PIPE_NAME");
    rc = uv_listen((uv_stream_t*) &ctx.ipc_pipe, 128, ipc_connection_cb);
    
    for (i = 0; i < num_servers; i++)
        uv_sem_post(&servers[i].semaphore);
    
    rc = uv_run(loop, UV_RUN_DEFAULT);
    uv_close((uv_handle_t*) &ctx.server_handle, NULL);
    rc = uv_run(loop, UV_RUN_DEFAULT);
    
    for (i = 0; i < num_servers; i++)
        uv_sem_wait(&servers[i].semaphore);
}
Пример #12
0
static void nub__async_prepare_cb(uv_prepare_t* handle) {
  nub_loop_t* loop;
  nub_thread_t* thread;
  nub_work_t* work;

  loop = (nub_loop_t*) handle->data;

  while (!fuq_empty(&loop->work_queue_)) {
    work = (nub_work_t*) fuq_dequeue(&loop->work_queue_);
    thread = (nub_thread_t*) work->thread;

    if (NUB_LOOP_QUEUE_LOCK == work->work_type) {
      uv_sem_post(&thread->thread_lock_sem_);
      uv_sem_wait(&loop->loop_lock_sem_);
    } else if (NUB_LOOP_QUEUE_WORK == work->work_type) {
      work->cb(thread, work, work->arg);
      /* TODO(trevnorris): Still need to implement returning status. */
    } else {
      UNREACHABLE();
    }
  }
}
Пример #13
0
static void uv__stream_osx_select(void* arg) {
  uv_stream_t* stream;
  uv__stream_select_t* s;
  char buf[1024];
  int events;
  int fd;
  int r;
  int max_fd;

  stream = arg;
  s = stream->select;
  fd = s->fd;

  if (fd > s->int_fd)
    max_fd = fd;
  else
    max_fd = s->int_fd;

  while (1) {
    /* Terminate on semaphore */
    if (uv_sem_trywait(&s->close_sem) == 0)
      break;

    /* Watch fd using select(2) */
    memset(s->sread, 0, s->sread_sz);
    memset(s->swrite, 0, s->swrite_sz);

    if (uv__io_active(&stream->io_watcher, POLLIN))
      FD_SET(fd, s->sread);
    if (uv__io_active(&stream->io_watcher, POLLOUT))
      FD_SET(fd, s->swrite);
    FD_SET(s->int_fd, s->sread);

    /* Wait indefinitely for fd events */
    r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL);
    if (r == -1) {
      if (errno == EINTR)
        continue;

      /* XXX: Possible?! */
      abort();
    }

    /* Ignore timeouts */
    if (r == 0)
      continue;

    /* Empty socketpair's buffer in case of interruption */
    if (FD_ISSET(s->int_fd, s->sread))
      while (1) {
        r = read(s->int_fd, buf, sizeof(buf));

        if (r == sizeof(buf))
          continue;

        if (r != -1)
          break;

        if (errno == EAGAIN || errno == EWOULDBLOCK)
          break;

        if (errno == EINTR)
          continue;

        abort();
      }

    /* Handle events */
    events = 0;
    if (FD_ISSET(fd, s->sread))
      events |= POLLIN;
    if (FD_ISSET(fd, s->swrite))
      events |= POLLOUT;

    assert(events != 0 || FD_ISSET(s->int_fd, s->sread));
    if (events != 0) {
      ACCESS_ONCE(int, s->events) = events;

      uv_async_send(&s->async);
      uv_sem_wait(&s->async_sem);

      /* Should be processed at this stage */
      assert((s->events == 0) || (stream->flags & UV_CLOSING));
    }
  }
Пример #14
0
int
main(int argc, char *argv[]) {
    int rc;
    uv_loop_t *loop;

    parse_opts(argc, argv);

#if !defined(_WIN32)
    if (xsignal) {
        return signal_process(xsignal, pidfile);
    }
#endif

    if (!password || !server_addr_buf) {
        print_usage(argv[0]);
        return 1;
    }

    init();

#if !defined(_WIN32)
    if (daemon_mode) {
        if (daemonize()) {
            return 1;
        }
        if (already_running(pidfile)) {
            logger_stderr("xsocks already running.");
            return 1;
        }
    }
#endif

    loop = uv_default_loop();

    rc = resolve_addr(local_addr, &bind_addr);
    if (rc) {
        logger_stderr("invalid local address");
        return 1;
    }

    rc = resolve_addr(server_addr_buf, &server_addr);
    if (rc) {
        logger_stderr("invalid server address");
        return 1;
    }

    udprelay_init();

    if (concurrency <= 1) {
        struct server_context ctx;
        ctx.udprelay = 1;
        ctx.udp_fd = create_socket(SOCK_DGRAM, 0);
        ctx.local_addr = &bind_addr;
        ctx.server_addr = &server_addr;

        uv_tcp_init(loop, &ctx.tcp);
        rc = uv_tcp_bind(&ctx.tcp, &bind_addr, 0);
        if (rc) {
            logger_stderr("bind error: %s", uv_strerror(rc));
            return 1;
        }
        rc = uv_listen((uv_stream_t*)&ctx.tcp, 128, client_accept_cb);
        if (rc == 0) {
            logger_log(LOG_INFO, "listening on %s", local_addr);

#if !defined(_WIN32)
            setup_signal(loop, signal_cb, &ctx);
#endif

            udprelay_start(loop, &ctx);

            uv_run(loop, UV_RUN_DEFAULT);

            close_loop(loop);

        } else {
            logger_stderr("listen error: %s", uv_strerror(rc));
        }

    } else {
#if !defined(_WIN32)
        struct server_context *servers = calloc(concurrency, sizeof(servers[0]));
        for (int i = 0; i < concurrency; i++) {
            struct server_context *ctx = servers + i;
            ctx->index = i;
            ctx->tcp_fd = create_socket(SOCK_STREAM, 1);
            ctx->udp_fd = create_socket(SOCK_DGRAM, 1);
            ctx->udprelay = 1;
            ctx->accept_cb = client_accept_cb;
            ctx->local_addr = &bind_addr;
            ctx->server_addr = &server_addr;
            rc = uv_sem_init(&ctx->semaphore, 0);
            rc = uv_thread_create(&ctx->thread_id, consumer_start, ctx);
        }

        logger_log(LOG_INFO, "listening on %s", local_addr);

        setup_signal(loop, signal_cb, servers);

        uv_run(loop, UV_RUN_DEFAULT);

        close_loop(loop);

        for (int i = 0; i < concurrency; i++) {
            uv_sem_wait(&servers[i].semaphore);
        }
        free(servers);
#else
        logger_stderr("don't support multithreading.");
        return 1;
#endif
	}

    udprelay_destroy();

#if !defined(_WIN32)
    if (daemon_mode) {
        delete_pidfile(pidfile);
    }
#endif

    logger_exit();

    return 0;
}
Пример #15
0
void*
uvm_chime_wait(uvm_chime_t* c){
    uv_sem_wait(&c->sem);
    return c->value;
}
Пример #16
0
static mrb_value
mrb_uv_sem_wait(mrb_state *mrb, mrb_value self)
{
  uv_sem_t *sem = (uv_sem_t*)mrb_uv_get_ptr(mrb, self, &sem_type);
  return uv_sem_wait(sem), self;
}
Пример #17
0
int
main(int argc, char *argv[]) {
    int rc;
    uv_loop_t *loop;
    struct sockaddr bind_addr;

    parse_opts(argc, argv);

    if (xsignal) {
        return signal_process(xsignal, pidfile);
    }

    if (!tunnel_mode || !dest_addr || !password) {
        print_usage(argv[0]);
        return 1;
    }

    if (init()) {
        return 1;
    }

    if (daemon_mode) {
        if (daemonize()) {
            return 1;
        }
        if (already_running(pidfile)) {
            logger_stderr("xtunnel already running.");
            return 1;
        }
    }

    loop = uv_default_loop();

    rc = resolve_addr(source_addr, &bind_addr);
    if (rc) {
        logger_stderr("invalid local address");
        return 1;
    }

    rc = resolve_addr(dest_addr, &target_addr);
    if (rc) {
        logger_stderr("invalid target address");
        return 1;
    }

    if (concurrency <= 1) {
        struct server_context ctx;
        uv_tcp_init(loop, &ctx.tcp);
        rc = uv_tcp_bind(&ctx.tcp, &bind_addr, 0);
        if (rc) {
            logger_stderr("bind error: %s", uv_strerror(rc));
            return 1;
        }
        rc = uv_listen((uv_stream_t*)&ctx.tcp, SOMAXCONN, source_accept_cb);
        if (rc == 0) {
            logger_log(LOG_INFO, "listening on %s", source_addr);

            setup_signal(loop, signal_cb, &ctx);

            uv_run(loop, UV_RUN_DEFAULT);

            close_loop(loop);

        } else {
            logger_stderr("listen error: %s", uv_strerror(rc));
        }

    } else {
        struct server_context *servers = calloc(concurrency, sizeof(servers[0]));
        for (int i = 0; i < concurrency; i++) {
            struct server_context *ctx = servers + i;
            ctx->index = i;
            ctx->tcp_fd = create_socket(SOCK_STREAM, 1);
            ctx->accept_cb = source_accept_cb;
            ctx->nameserver_num = -1;
            ctx->local_addr = &bind_addr;
            rc = uv_sem_init(&ctx->semaphore, 0);
            rc = uv_thread_create(&ctx->thread_id, consumer_start, ctx);
        }

        logger_log(LOG_INFO, "listening on %s", source_addr);

        setup_signal(loop, signal_cb, servers);

        uv_run(loop, UV_RUN_DEFAULT);

        close_loop(loop);

        for (int i = 0; i < concurrency; i++) {
            uv_sem_wait(&servers[i].semaphore);
        }
        free(servers);
    }

    if (daemon_mode) {
        delete_pidfile(pidfile);
    }

    return 0;
}
Пример #18
0
 void Semaphore::wait()
 {
     uv_sem_wait(&sem);
 }
Пример #19
0
static void work_cb(uv_work_t* req) {
  uv_sem_wait(pause_sems + (req - pause_reqs));
}