int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { int err; barrier->n = count; barrier->count = 0; err = uv_mutex_init(&barrier->mutex); if (err) return -err; err = uv_sem_init(&barrier->turnstile1, 0); if (err) goto error2; err = uv_sem_init(&barrier->turnstile2, 1); if (err) goto error; return 0; error: uv_sem_destroy(&barrier->turnstile1); error2: uv_mutex_destroy(&barrier->mutex); return -err; }
int nub_thread_create(nub_loop_t* loop, nub_thread_t* thread) { uv_async_t* async_handle; int er; async_handle = (uv_async_t*) malloc(sizeof(*async_handle)); CHECK_NE(NULL, async_handle); er = uv_async_init(&loop->uvloop, async_handle, nub__work_signal_cb); ASSERT(0 == er); async_handle->data = thread; thread->async_signal_ = async_handle; ASSERT(uv_loop_alive(&loop->uvloop)); er = uv_sem_init(&thread->thread_lock_sem_, 0); ASSERT(0 == er); er = uv_sem_init(&thread->sem_wait_, 1); ASSERT(0 == er); fuq_init(&thread->incoming_); thread->disposed = 0; thread->nubloop = loop; thread->disposed_cb_ = NULL; thread->work.thread = thread; thread->work.work_type = NUB_LOOP_QUEUE_NONE; ++loop->ref_; return uv_thread_create(&thread->uvthread, nub__thread_entry_cb, thread); }
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) { CFRunLoopSourceContext ctx; int r; if (uv__kqueue_init(loop)) return -1; loop->cf_loop = NULL; if ((r = uv_mutex_init(&loop->cf_mutex))) return r; if ((r = uv_sem_init(&loop->cf_sem, 0))) return r; QUEUE_INIT(&loop->cf_signals); memset(&ctx, 0, sizeof(ctx)); ctx.info = loop; ctx.perform = uv__cf_loop_cb; loop->cf_cb = CFRunLoopSourceCreate(NULL, 0, &ctx); if ((r = uv_thread_create(&loop->cf_thread, uv__cf_loop_runner, loop))) return r; /* Synchronize threads */ uv_sem_wait(&loop->cf_sem); assert(ACCESS_ONCE(CFRunLoopRef, loop->cf_loop) != NULL); return 0; }
Semaphore::Semaphore(unsigned int value) { int r = uv_sem_init(&sem, value); if (r != 0) { throw std::runtime_error("Failed to create semaphore."); } }
hproc_t* kern_baseproc(void (*start_func)(void *arg), char *start_module) { hproc_t *p; Osdep *os; Osenv *e; /* create a kernel hosting proc */ p = new_hproc(uv_default_loop()); /* bind the base proc to the startup thread */ baseinit(p); /* append it to the proc list */ procs.head = p; p->prev = nil; procs.tail = p; /* setup the skeleton process environment */ e = p->env; e->pgrp = newpgrp(); e->fgrp = newfgrp(nil); e->egrp = newegrp(); e->errstr = e->errbuf0; e->syserrstr = e->errbuf1; //e->user = strdup("node9"); e->uid = hostuid; e->gid = hostgid; /* allocate the os dependency structure */ os = malloc(sizeof(*os)); if(os == nil) { panic("host_proc: no memory"); } p->os = os; os->self = uv_thread_self(); // just the handle for primary thread os->thread = nil; // because its the process thread itself uv_sem_init(&os->sem, 0); /* insert the startup function and module */ p->func = start_func; p->arg = start_module; /* debug watchers */ //uv_idle_init(p->loop, &idle_watcher); //uv_idle_start(&idle_watcher, debug_idle); //uv_prepare_init(p->loop, &prepare_watcher); //uv_prepare_start(&prepare_watcher, debug_prepare); //uv_poll_init(p->loop, &poll_watcher,0); //uv_poll_start(&poll_watcher, 0, debug_poll); //uv_check_init(p->loop, &check_watcher); //uv_check_start(&check_watcher, debug_check); return p; }
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { barrier->n = count; barrier->count = 0; if (uv_mutex_init(&barrier->mutex)) return -1; if (uv_sem_init(&barrier->turnstile1, 0)) goto error2; if (uv_sem_init(&barrier->turnstile2, 1)) goto error; return 0; error: uv_sem_destroy(&barrier->turnstile1); error2: uv_mutex_destroy(&barrier->mutex); return -1; }
static mrb_value mrb_uv_sem_init(mrb_state *mrb, mrb_value self) { mrb_int v; uv_sem_t* s; mrb_get_args(mrb, "i", &v); s = (uv_sem_t*)mrb_malloc(mrb, sizeof(uv_sem_t)); mrb_uv_check_error(mrb, uv_sem_init(s, v)); DATA_TYPE(self) = &sem_type; DATA_PTR(self) = s; return self; }
async_worker_t *async_worker_create(void) { async_worker_t *worker = calloc(1, sizeof(struct async_worker_s)); if(!worker) goto bail; if(uv_sem_init(&worker->sem, 0) < 0) goto bail; worker->async.data = worker; if(uv_async_init(loop, &worker->async, leave) < 0) goto bail; if(uv_thread_create(&worker->thread, work, worker) < 0) goto bail; return worker; bail: async_worker_free(worker); return NULL; }
void Thread::start() { if (this->is_started.exchange(true)) { LOG(INFO) << "Thread " << static_cast<void*>(this) << " already started"; return; } uv_sem_init(&this->semaphore, 0); uv_thread_create(&this->thread_id, &Thread::thread_worker, static_cast<void*>(this)); uv_sem_wait(&this->semaphore); uv_sem_destroy(&this->semaphore); }
static void saturate_threadpool(void) { uv_loop_t* loop; char buf[64]; size_t i; snprintf(buf, sizeof(buf), "UV_THREADPOOL_SIZE=%zu", ARRAY_SIZE(pause_reqs)); putenv(buf); loop = uv_default_loop(); for (i = 0; i < ARRAY_SIZE(pause_reqs); i += 1) { ASSERT(0 == uv_sem_init(pause_sems + i, 0)); ASSERT(0 == uv_queue_work(loop, pause_reqs + i, work_cb, done_cb)); } }
void nub_loop_init(nub_loop_t* loop) { uv_async_t* async_handle; int er; er = uv_loop_init(&loop->uvloop); ASSERT(0 == er); er = uv_prepare_init(&loop->uvloop, &loop->queue_processor_); ASSERT(0 == er); loop->queue_processor_.data = loop; uv_unref((uv_handle_t*) &loop->queue_processor_); er = uv_mutex_init(&loop->queue_processor_lock_); ASSERT(0 == er); fuq_init(&loop->blocking_queue_); er = uv_sem_init(&loop->loop_lock_sem_, 0); ASSERT(0 == er); fuq_init(&loop->thread_dispose_queue_); er = uv_mutex_init(&loop->thread_dispose_lock_); ASSERT(0 == er); fuq_init(&loop->work_queue_); er = uv_mutex_init(&loop->work_lock_); ASSERT(0 == er); async_handle = (uv_async_t*) malloc(sizeof(*async_handle)); CHECK_NE(NULL, async_handle); er = uv_async_init(&loop->uvloop, async_handle, nub__thread_dispose); ASSERT(0 == er); async_handle->data = loop; loop->work_ping_ = async_handle; uv_unref((uv_handle_t*) loop->work_ping_); loop->ref_ = 0; loop->disposed_ = 0; er = uv_prepare_start(&loop->queue_processor_, nub__async_prepare_cb); ASSERT(0 == er); }
static void init_threads(void) { unsigned int i; const char* val; uv_sem_t sem; nthreads = ARRAY_SIZE(default_threads); val = getenv("UV_THREADPOOL_SIZE"); if (val != NULL) nthreads = atoi(val); if (nthreads == 0) nthreads = 1; if (nthreads > MAX_THREADPOOL_SIZE) nthreads = MAX_THREADPOOL_SIZE; threads = default_threads; if (nthreads > ARRAY_SIZE(default_threads)) { threads = (uv_thread_t*)uv__malloc(nthreads * sizeof(threads[0])); if (threads == NULL) { nthreads = ARRAY_SIZE(default_threads); threads = default_threads; } } if (uv_cond_init(&cond)) abort(); if (uv_mutex_init(&mutex)) abort(); QUEUE_INIT(&wq); if (uv_sem_init(&sem, 0)) abort(); for (i = 0; i < nthreads; i++) if (uv_thread_create(threads + i, worker, &sem)) abort(); for (i = 0; i < nthreads; i++) uv_sem_wait(&sem); uv_sem_destroy(&sem); }
static int Semaphore_tp_init(Semaphore *self, PyObject *args, PyObject *kwargs) { unsigned int value = 1; UNUSED_ARG(kwargs); RAISE_IF_INITIALIZED(self, -1); if (!PyArg_ParseTuple(args, "|I:__init__", &value)) { return -1; } if (uv_sem_init(&self->uv_semaphore, value)) { PyErr_SetString(PyExc_ThreadError, "Error initializing Semaphore"); return -1; } self->initialized = True; return 0; }
int uv__stream_try_select(uv_stream_t* stream, int fd) { /* * kqueue doesn't work with some files from /dev mount on osx. * select(2) in separate thread for those fds */ struct kevent filter[1]; struct kevent events[1]; struct timespec timeout; uv__stream_select_t* s; int fds[2]; int ret; int kq; kq = kqueue(); if (kq == -1) { fprintf(stderr, "(libuv) Failed to create kqueue (%d)\n", errno); return uv__set_sys_error(stream->loop, errno); } EV_SET(&filter[0], fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0); /* Use small timeout, because we only want to capture EINVALs */ timeout.tv_sec = 0; timeout.tv_nsec = 1; ret = kevent(kq, filter, 1, events, 1, &timeout); SAVE_ERRNO(close(kq)); if (ret == -1) return uv__set_sys_error(stream->loop, errno); if ((events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL) return 0; /* At this point we definitely know that this fd won't work with kqueue */ s = malloc(sizeof(*s)); if (s == NULL) return uv__set_artificial_error(stream->loop, UV_ENOMEM); s->fd = fd; if (uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb)) { SAVE_ERRNO(free(s)); return uv__set_sys_error(stream->loop, errno); } s->async.flags |= UV__HANDLE_INTERNAL; uv__handle_unref(&s->async); if (uv_sem_init(&s->sem, 0)) goto fatal1; if (uv_mutex_init(&s->mutex)) goto fatal2; /* Create fds for io watcher and to interrupt the select() loop. */ if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds)) goto fatal3; s->fake_fd = fds[0]; s->int_fd = fds[1]; if (uv_thread_create(&s->thread, uv__stream_osx_select, stream)) goto fatal4; s->stream = stream; stream->select = s; return 0; fatal4: close(s->fake_fd); close(s->int_fd); s->fake_fd = -1; s->int_fd = -1; fatal3: uv_mutex_destroy(&s->mutex); fatal2: uv_sem_destroy(&s->sem); fatal1: uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); return uv__set_sys_error(stream->loop, errno); }
int main(int argc, char *argv[]) { int rc; uv_loop_t *loop; struct sockaddr bind_addr; parse_opts(argc, argv); if (xsignal) { return signal_process(xsignal, pidfile); } if (!tunnel_mode || !dest_addr || !password) { print_usage(argv[0]); return 1; } if (init()) { return 1; } if (daemon_mode) { if (daemonize()) { return 1; } if (already_running(pidfile)) { logger_stderr("xtunnel already running."); return 1; } } loop = uv_default_loop(); rc = resolve_addr(source_addr, &bind_addr); if (rc) { logger_stderr("invalid local address"); return 1; } rc = resolve_addr(dest_addr, &target_addr); if (rc) { logger_stderr("invalid target address"); return 1; } if (concurrency <= 1) { struct server_context ctx; uv_tcp_init(loop, &ctx.tcp); rc = uv_tcp_bind(&ctx.tcp, &bind_addr, 0); if (rc) { logger_stderr("bind error: %s", uv_strerror(rc)); return 1; } rc = uv_listen((uv_stream_t*)&ctx.tcp, SOMAXCONN, source_accept_cb); if (rc == 0) { logger_log(LOG_INFO, "listening on %s", source_addr); setup_signal(loop, signal_cb, &ctx); uv_run(loop, UV_RUN_DEFAULT); close_loop(loop); } else { logger_stderr("listen error: %s", uv_strerror(rc)); } } else { struct server_context *servers = calloc(concurrency, sizeof(servers[0])); for (int i = 0; i < concurrency; i++) { struct server_context *ctx = servers + i; ctx->index = i; ctx->tcp_fd = create_socket(SOCK_STREAM, 1); ctx->accept_cb = source_accept_cb; ctx->nameserver_num = -1; ctx->local_addr = &bind_addr; rc = uv_sem_init(&ctx->semaphore, 0); rc = uv_thread_create(&ctx->thread_id, consumer_start, ctx); } logger_log(LOG_INFO, "listening on %s", source_addr); setup_signal(loop, signal_cb, servers); uv_run(loop, UV_RUN_DEFAULT); close_loop(loop); for (int i = 0; i < concurrency; i++) { uv_sem_wait(&servers[i].semaphore); } free(servers); } if (daemon_mode) { delete_pidfile(pidfile); } return 0; }
static void eventpool_execute(uv_async_t *handle) { /* * Make sure we execute in the main thread */ const uv_thread_t pth_cur_id = uv_thread_self(); assert(uv_thread_equal(&pth_main_id, &pth_cur_id)); struct threadpool_tasks_t **node = NULL; int nrlisteners1[REASON_END] = {0}; int nr1 = 0, nrnodes = 16, nrnodes1 = 0, i = 0; if((node = MALLOC(sizeof(struct threadpool_tasks_t *)*nrnodes)) == NULL) { OUT_OF_MEMORY /*LCOV_EXCL_LINE*/ } uv_mutex_lock(&listeners_lock); struct eventqueue_t *queue = NULL; while(eventqueue) { queue = eventqueue; uv_sem_t *ref = NULL; #ifdef _WIN32 if((nr1 = InterlockedExchangeAdd(&nrlisteners[queue->reason], 0)) == 0) { #else if((nr1 = __sync_add_and_fetch(&nrlisteners[queue->reason], 0)) == 0) { #endif if(queue->done != NULL) { queue->done((void *)queue->data); } } else { if(threads == EVENTPOOL_THREADED) { if((ref = MALLOC(sizeof(uv_sem_t))) == NULL) { OUT_OF_MEMORY /*LCOV_EXCL_LINE*/ } uv_sem_init(ref, nr1-1); } struct eventpool_listener_t *listeners = eventpool_listeners; if(listeners == NULL) { if(queue->done != NULL) { queue->done((void *)queue->data); } } while(listeners) { if(listeners->reason == queue->reason) { if(nrnodes1 == nrnodes) { nrnodes *= 2; /*LCOV_EXCL_START*/ if((node = REALLOC(node, sizeof(struct threadpool_tasks_t *)*nrnodes)) == NULL) { OUT_OF_MEMORY } /*LCOV_EXCL_STOP*/ } if((node[nrnodes1] = MALLOC(sizeof(struct threadpool_tasks_t))) == NULL) { OUT_OF_MEMORY /*LCOV_EXCL_LINE*/ } node[nrnodes1]->func = listeners->func; node[nrnodes1]->userdata = queue->data; node[nrnodes1]->done = queue->done; node[nrnodes1]->ref = ref; node[nrnodes1]->reason = listeners->reason; nrnodes1++; if(threads == EVENTPOOL_THREADED) { nrlisteners1[queue->reason]++; } } listeners = listeners->next; } } eventqueue = eventqueue->next; FREE(queue); } uv_mutex_unlock(&listeners_lock); if(nrnodes1 > 0) { for(i=0;i<nrnodes1;i++) { if(threads == EVENTPOOL_NO_THREADS) { nrlisteners1[node[i]->reason]++; node[i]->func(node[i]->reason, node[i]->userdata); #ifdef _WIN32 if(nrlisteners1[node[i]->reason] == InterlockedExchangeAdd(&nrlisteners[node[i]->reason], 0)) { #else if(nrlisteners1[node[i]->reason] == __sync_add_and_fetch(&nrlisteners[node[i]->reason], 0)) { #endif if(node[i]->done != NULL) { node[i]->done((void *)node[i]->userdata); } nrlisteners1[node[i]->reason] = 0; } } else { struct threadpool_data_t *tpdata = NULL; tpdata = MALLOC(sizeof(struct threadpool_data_t)); if(tpdata == NULL) { OUT_OF_MEMORY /*LCOV_EXCL_LINE*/ } tpdata->userdata = node[i]->userdata; tpdata->func = node[i]->func; tpdata->done = node[i]->done; tpdata->ref = node[i]->ref; tpdata->reason = node[i]->reason; tpdata->priority = reasons[node[i]->reason].priority; uv_work_t *tp_work_req = MALLOC(sizeof(uv_work_t)); if(tp_work_req == NULL) { OUT_OF_MEMORY /*LCOV_EXCL_LINE*/ } tp_work_req->data = tpdata; if(uv_queue_work(uv_default_loop(), tp_work_req, reasons[node[i]->reason].reason, fib, fib_free) < 0) { if(node[i]->done != NULL) { node[i]->done((void *)node[i]->userdata); } FREE(tpdata); FREE(node[i]->ref); } } FREE(node[i]); } } for(i=0;i<REASON_END;i++) { nrlisteners1[i] = 0; } FREE(node); uv_mutex_lock(&listeners_lock); if(eventqueue != NULL) { uv_async_send(async_req); } uv_mutex_unlock(&listeners_lock); }
void queue_init(struct queue *q) { uv_sem_init(&q->fill_count, 0); uv_sem_init(&q->empty_count, QUEUE_CAP); q->head = q->tail = 0; }
/* create a slave kernel process */ void kproc(char *name, void (*func)(void*), void *arg, int flags) { uv_thread_t thread; kproc_t* p; Pgrp *pg; Fgrp *fg; Egrp *eg; Osdep *os; /* create a bare kernel proc */ p = new_kproc(); /* the hosting proc should be the same as the callers */ p->hproc = up->hproc; if(p == nil) panic("kproc: no memory"); os = malloc(sizeof(*os)); if(os == nil) { panic("kproc: no memory for os dependencies"); } p->os = os; /* initialize per-kproc os dependencies */ os->self = 0; /* set by tramp */ os->thread = nil; /* set by uv_thread_create */ uv_sem_init(&os->sem, 0); /* copy optional parent environment */ if(flags & KPDUPPG) { pg = up->env->pgrp; incref(&pg->r); p->env->pgrp = pg; } if(flags & KPDUPFDG) { fg = up->env->fgrp; incref(&fg->r); p->env->fgrp = fg; } if(flags & KPDUPENVG) { eg = up->env->egrp; incref(&eg->r); p->env->egrp = eg; } /* copy parent user info */ p->env->uid = up->env->uid; p->env->gid = up->env->gid; kstrdup(&p->env->user, up->env->user); strcpy(p->text, name); /* patch in start function */ p->func = func; p->arg = arg; /* update the proc list */ lock(&procs.l); if(procs.tail != nil) { p->prev = procs.tail; procs.tail->next = p; } else { procs.head = p; p->prev = nil; } procs.tail = p; unlock(&procs.l); if(uv_thread_create(&os->thread, tramp, p)) panic("kernel thread create failed\n"); }
int main(int argc, char *argv[]) { int rc; uv_loop_t *loop; parse_opts(argc, argv); #if !defined(_WIN32) if (xsignal) { return signal_process(xsignal, pidfile); } #endif if (!password || !server_addr_buf) { print_usage(argv[0]); return 1; } init(); #if !defined(_WIN32) if (daemon_mode) { if (daemonize()) { return 1; } if (already_running(pidfile)) { logger_stderr("xsocks already running."); return 1; } } #endif loop = uv_default_loop(); rc = resolve_addr(local_addr, &bind_addr); if (rc) { logger_stderr("invalid local address"); return 1; } rc = resolve_addr(server_addr_buf, &server_addr); if (rc) { logger_stderr("invalid server address"); return 1; } udprelay_init(); if (concurrency <= 1) { struct server_context ctx; ctx.udprelay = 1; ctx.udp_fd = create_socket(SOCK_DGRAM, 0); ctx.local_addr = &bind_addr; ctx.server_addr = &server_addr; uv_tcp_init(loop, &ctx.tcp); rc = uv_tcp_bind(&ctx.tcp, &bind_addr, 0); if (rc) { logger_stderr("bind error: %s", uv_strerror(rc)); return 1; } rc = uv_listen((uv_stream_t*)&ctx.tcp, 128, client_accept_cb); if (rc == 0) { logger_log(LOG_INFO, "listening on %s", local_addr); #if !defined(_WIN32) setup_signal(loop, signal_cb, &ctx); #endif udprelay_start(loop, &ctx); uv_run(loop, UV_RUN_DEFAULT); close_loop(loop); } else { logger_stderr("listen error: %s", uv_strerror(rc)); } } else { #if !defined(_WIN32) struct server_context *servers = calloc(concurrency, sizeof(servers[0])); for (int i = 0; i < concurrency; i++) { struct server_context *ctx = servers + i; ctx->index = i; ctx->tcp_fd = create_socket(SOCK_STREAM, 1); ctx->udp_fd = create_socket(SOCK_DGRAM, 1); ctx->udprelay = 1; ctx->accept_cb = client_accept_cb; ctx->local_addr = &bind_addr; ctx->server_addr = &server_addr; rc = uv_sem_init(&ctx->semaphore, 0); rc = uv_thread_create(&ctx->thread_id, consumer_start, ctx); } logger_log(LOG_INFO, "listening on %s", local_addr); setup_signal(loop, signal_cb, servers); uv_run(loop, UV_RUN_DEFAULT); close_loop(loop); for (int i = 0; i < concurrency; i++) { uv_sem_wait(&servers[i].semaphore); } free(servers); #else logger_stderr("don't support multithreading."); return 1; #endif } udprelay_destroy(); #if !defined(_WIN32) if (daemon_mode) { delete_pidfile(pidfile); } #endif logger_exit(); return 0; }
void uvm_chime_init(uvm_chime_t* in){ in->value = 0; uv_sem_init(&in->sem,0); }