int uv_barrier_wait(uv_barrier_t* barrier) { int serial_thread; uv_mutex_lock(&barrier->mutex); if (++barrier->count == barrier->n) { uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile1); } uv_mutex_unlock(&barrier->mutex); uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile1); uv_mutex_lock(&barrier->mutex); serial_thread = (--barrier->count == 0); if (serial_thread) { uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile2); } uv_mutex_unlock(&barrier->mutex); uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile2); return serial_thread; }
/* Set up an IPC pipe server that hands out listen sockets to the worker * threads. It's kind of cumbersome for such a simple operation, maybe we * should revive uv_import() and uv_export(). */ void start_connection_dispatching(uv_handle_type type, unsigned int num_servers, struct server_ctx* servers, char* listen_address, int listen_port) { int rc; struct ipc_server_ctx ctx; uv_loop_t* loop; unsigned int i; loop = uv_default_loop(); ctx.num_connects = num_servers; if (type == UV_TCP) { uv_ip4_addr(listen_address, listen_port, &listen_addr); rc = uv_tcp_init(loop, (uv_tcp_t*) &ctx.server_handle); rc = uv_tcp_bind((uv_tcp_t*) &ctx.server_handle, (const struct sockaddr*)&listen_addr, 0); printf("Listening on %s:%d\n", listen_address, listen_port); } rc = uv_pipe_init(loop, &ctx.ipc_pipe, 1); rc = uv_pipe_bind(&ctx.ipc_pipe, "HAYWIRE_CONNECTION_DISPATCH_PIPE_NAME"); rc = uv_listen((uv_stream_t*) &ctx.ipc_pipe, 128, ipc_connection_cb); for (i = 0; i < num_servers; i++) uv_sem_post(&servers[i].semaphore); rc = uv_run(loop, UV_RUN_DEFAULT); uv_close((uv_handle_t*) &ctx.server_handle, NULL); rc = uv_run(loop, UV_RUN_DEFAULT); for (i = 0; i < num_servers; i++) uv_sem_wait(&servers[i].semaphore); }
void connection_consumer_start(void *arg) { int rc; struct server_ctx *ctx; uv_loop_t* loop; ctx = arg; loop = uv_loop_new(); listener_event_loops[ctx->index] = *loop; http_request_cache_configure_listener(loop, &listener_async_handles[ctx->index]); uv_barrier_wait(listeners_created_barrier); rc = uv_async_init(loop, &ctx->async_handle, connection_consumer_close); uv_unref((uv_handle_t*) &ctx->async_handle); /* Wait until the main thread is ready. */ uv_sem_wait(&ctx->semaphore); get_listen_handle(loop, (uv_stream_t*) &ctx->server_handle); uv_sem_post(&ctx->semaphore); rc = uv_listen((uv_stream_t*)&ctx->server_handle, 128, connection_consumer_new_connection); rc = uv_run(loop, UV_RUN_DEFAULT); uv_loop_delete(loop); }
void uv__stream_close(uv_stream_t* handle) { #if defined(__APPLE__) /* Terminate select loop first */ if (handle->select != NULL) { uv__stream_select_t* s; s = handle->select; uv_sem_post(&s->sem); uv__stream_osx_interrupt_select(handle); uv_thread_join(&s->thread); uv_sem_destroy(&s->sem); uv_mutex_destroy(&s->mutex); close(s->fake_fd); close(s->int_fd); uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close); handle->select = NULL; } #endif /* defined(__APPLE__) */ uv_read_stop(handle); uv__io_close(handle->loop, &handle->io_watcher); close(handle->io_watcher.fd); handle->io_watcher.fd = -1; if (handle->accepted_fd >= 0) { close(handle->accepted_fd); handle->accepted_fd = -1; } assert(!uv__io_active(&handle->io_watcher, UV__POLLIN | UV__POLLOUT)); }
void async_worker_free(async_worker_t *const worker) { if(!worker) return; assert(!worker->work); uv_sem_post(worker->sem); uv_thread_join(worker->thread); uv_sem_destroy(worker->sem); uv_close((uv_handle_t *)&worker->async); free(worker); }
/* Non-blocking operation. Returns -1 if queue is full */ int queue_push(struct queue *q, struct packet_buffer b) { int success = uv_sem_trywait(&q->empty_count); if (success != 0) { return -1; } q->buf[q->tail] = b; q->tail = (q->tail + 1) % QUEUE_CAP; uv_sem_post(&q->fill_count); return 0; }
/* Non-blocking operation. Returns {0, 0} if queue is empty */ struct packet_buffer queue_pop(struct queue *q) { int success = uv_sem_trywait(&q->fill_count); if (success != 0) { return (struct packet_buffer){.payload_len = 0, .payload = NULL}; } struct packet_buffer result = q->buf[q->head]; q->head = (q->head + 1) % QUEUE_CAP; uv_sem_post(&q->empty_count); return result; }
static PyObject * Semaphore_func_post(Semaphore *self) { RAISE_IF_NOT_INITIALIZED(self, NULL); Py_BEGIN_ALLOW_THREADS uv_sem_post(&self->uv_semaphore); Py_END_ALLOW_THREADS Py_RETURN_NONE; }
static void nub__work_signal_cb(uv_async_t* handle) { nub_loop_t* loop; nub_thread_t* thread; loop = ((nub_thread_t*) handle->data)->nubloop; while (!fuq_empty(&loop->blocking_queue_)) { thread = (nub_thread_t*) fuq_dequeue(&loop->blocking_queue_); uv_sem_post(&thread->thread_lock_sem_); uv_sem_wait(&loop->loop_lock_sem_); } }
void nub_thread_join(nub_thread_t* thread) { ASSERT(NULL != thread); thread->disposed = 1; uv_sem_post(&thread->sem_wait_); uv_thread_join(&thread->uvthread); uv_close((uv_handle_t*) thread->async_signal_, nub__free_handle_cb); uv_sem_destroy(&thread->thread_lock_sem_); uv_sem_destroy(&thread->sem_wait_); --thread->nubloop->ref_; thread->nubloop = NULL; }
void uv_barrier_wait(uv_barrier_t* barrier) { uv_mutex_lock(&barrier->mutex); if (++barrier->count == barrier->n) { uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile1); } uv_mutex_unlock(&barrier->mutex); uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile1); uv_mutex_lock(&barrier->mutex); if (--barrier->count == 0) { uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile2); } uv_mutex_unlock(&barrier->mutex); uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile2); }
void Thread::thread_worker(void *arg) { Thread* self = static_cast<Thread*>(arg); self->on_run(); uv_async_init(&self->loop, &self->async, &Thread::thread_notify); uv_sem_post(&self->semaphore); uv_run(&self->loop, UV_RUN_DEFAULT); uv_close((uv_handle_t*)&self->async, NULL); self->on_stop(); }
/* the same */ void osready(kproc_t* p) { Osdep *os; //hproc_t* hp = up->hproc; //os = hp->os; os = p->os; //print("osready/sem_post: on %p\n",os->sem); //sem_post(&os->sem); uv_sem_post(&os->sem); }
/* To avoid deadlock with uv_cancel() it's crucial that the worker * never holds the global mutex and the loop-local mutex at the same time. */ static void worker(void* arg) { struct uv__work* w; QUEUE* q; uv_sem_post((uv_sem_t*) arg); arg = NULL; for (;;) { uv_mutex_lock(&mutex); while (QUEUE_EMPTY(&wq)) { idle_threads += 1; uv_cond_wait(&cond, &mutex); idle_threads -= 1; } q = QUEUE_HEAD(&wq); if (q == &exit_message) uv_cond_signal(&cond); else { QUEUE_REMOVE(q); QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */ } uv_mutex_unlock(&mutex); if (q == &exit_message) break; w = QUEUE_DATA(q, struct uv__work, wq); w->work(w); uv_mutex_lock(&w->loop->wq_mutex); w->work = NULL; /* Signal uv_cancel() that the work req is done executing. */ QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); uv_async_send(&w->loop->wq_async); uv_mutex_unlock(&w->loop->wq_mutex); } }
void uv__cf_loop_runner(void* arg) { uv_loop_t* loop; loop = arg; /* Get thread's loop */ ACCESS_ONCE(CFRunLoopRef, loop->cf_loop) = CFRunLoopGetCurrent(); CFRunLoopAddSource(loop->cf_loop, loop->cf_cb, kCFRunLoopDefaultMode); uv_sem_post(&loop->cf_sem); CFRunLoopRun(); CFRunLoopRemoveSource(loop->cf_loop, loop->cf_cb, kCFRunLoopDefaultMode); }
static void nub__async_prepare_cb(uv_prepare_t* handle) { nub_loop_t* loop; nub_thread_t* thread; nub_work_t* work; loop = (nub_loop_t*) handle->data; while (!fuq_empty(&loop->work_queue_)) { work = (nub_work_t*) fuq_dequeue(&loop->work_queue_); thread = (nub_thread_t*) work->thread; if (NUB_LOOP_QUEUE_LOCK == work->work_type) { uv_sem_post(&thread->thread_lock_sem_); uv_sem_wait(&loop->loop_lock_sem_); } else if (NUB_LOOP_QUEUE_WORK == work->work_type) { work->cb(thread, work, work->arg); /* TODO(trevnorris): Still need to implement returning status. */ } else { UNREACHABLE(); } } }
void consumer_start(void *arg) { uv_loop_t loop; struct server_context *server = arg; #ifndef CROSS_COMPILE char name[24] = {0}; sprintf(name, "consumer-%d", server->index + 1); pthread_setname_np(pthread_self(), name); #endif uv_loop_init(&loop); struct resolver_context *res = NULL; if (server->nameserver_num >= 0) { res = resolver_init(&loop, 0, server->nameserver_num == 0 ? NULL : server->nameservers, server->nameserver_num); loop.data = res; } tcp_bind(&loop, server); if (server->udprelay) { udprelay_start(&loop, server); } uv_run(&loop, UV_RUN_DEFAULT); close_loop(&loop); if (server->nameserver_num >= 0) { resolver_destroy(res); } uv_sem_post(&server->semaphore); }
static void signal_handling_worker(void* context) { enum signal_action action; uv_signal_t signal1a; uv_signal_t signal1b; uv_signal_t signal2; uv_loop_t loop; int r; action = (enum signal_action) (uintptr_t) context; ASSERT(0 == uv_loop_init(&loop)); /* Setup the signal watchers and start them. */ if (action == ONLY_SIGUSR1 || action == SIGUSR1_AND_SIGUSR2) { r = uv_signal_init(&loop, &signal1a); ASSERT(r == 0); r = uv_signal_start(&signal1a, signal1_cb, SIGUSR1); ASSERT(r == 0); r = uv_signal_init(&loop, &signal1b); ASSERT(r == 0); r = uv_signal_start(&signal1b, signal1_cb, SIGUSR1); ASSERT(r == 0); } if (action == ONLY_SIGUSR2 || action == SIGUSR1_AND_SIGUSR2) { r = uv_signal_init(&loop, &signal2); ASSERT(r == 0); r = uv_signal_start(&signal2, signal2_cb, SIGUSR2); ASSERT(r == 0); } /* Signal watchers are now set up. */ uv_sem_post(&sem); /* Wait for all signals. The signal callbacks stop the watcher, so uv_run * will return when all signal watchers caught a signal. */ r = uv_run(&loop, UV_RUN_DEFAULT); ASSERT(r == 0); /* Restart the signal watchers. */ if (action == ONLY_SIGUSR1 || action == SIGUSR1_AND_SIGUSR2) { r = uv_signal_start(&signal1a, signal1_cb, SIGUSR1); ASSERT(r == 0); r = uv_signal_start(&signal1b, signal1_cb, SIGUSR1); ASSERT(r == 0); } if (action == ONLY_SIGUSR2 || action == SIGUSR1_AND_SIGUSR2) { r = uv_signal_start(&signal2, signal2_cb, SIGUSR2); ASSERT(r == 0); } /* Wait for signals once more. */ uv_sem_post(&sem); r = uv_run(&loop, UV_RUN_DEFAULT); ASSERT(r == 0); /* Close the watchers. */ if (action == ONLY_SIGUSR1 || action == SIGUSR1_AND_SIGUSR2) { uv_close((uv_handle_t*) &signal1a, NULL); uv_close((uv_handle_t*) &signal1b, NULL); } if (action == ONLY_SIGUSR2 || action == SIGUSR1_AND_SIGUSR2) { uv_close((uv_handle_t*) &signal2, NULL); } /* Wait for the signal watchers to close. */ r = uv_run(&loop, UV_RUN_DEFAULT); ASSERT(r == 0); uv_loop_close(&loop); }
void nub_loop_unlock(nub_thread_t* thread) { uv_sem_post(&thread->nubloop->loop_lock_sem_); }
static mrb_value mrb_uv_sem_post(mrb_state *mrb, mrb_value self) { uv_sem_t *sem = (uv_sem_t*)mrb_uv_get_ptr(mrb, self, &sem_type); return uv_sem_post(sem), self; }
void nub_thread_enqueue(nub_thread_t* thread, nub_work_t* work) { fuq_enqueue(&thread->incoming_, (void*) work); uv_sem_post(&thread->sem_wait_); }
static void embed_cb(uv_async_t* async) { uv_run(uv_default_loop(), UV_RUN_ONCE); uv_sem_post(&embed_sem); }
static void signal_handling_worker(void* context) { uintptr_t mask = (uintptr_t) context; uv_loop_t* loop; uv_signal_t signal1a; uv_signal_t signal1b; uv_signal_t signal2; int r; loop = uv_loop_new(); ASSERT(loop != NULL); /* Setup the signal watchers and start them. */ if (mask & SIGUSR1) { r = uv_signal_init(loop, &signal1a); ASSERT(r == 0); r = uv_signal_start(&signal1a, signal1_cb, SIGUSR1); ASSERT(r == 0); r = uv_signal_init(loop, &signal1b); ASSERT(r == 0); r = uv_signal_start(&signal1b, signal1_cb, SIGUSR1); ASSERT(r == 0); } if (mask & SIGUSR2) { r = uv_signal_init(loop, &signal2); ASSERT(r == 0); r = uv_signal_start(&signal2, signal2_cb, SIGUSR2); ASSERT(r == 0); } /* Signal watchers are now set up. */ uv_sem_post(&sem); /* Wait for all signals. The signal callbacks stop the watcher, so uv_run * will return when all signal watchers caught a signal. */ r = uv_run(loop); ASSERT(r == 0); /* Restart the signal watchers. */ if (mask & SIGUSR1) { r = uv_signal_start(&signal1a, signal1_cb, SIGUSR1); ASSERT(r == 0); r = uv_signal_start(&signal1b, signal1_cb, SIGUSR1); ASSERT(r == 0); } if (mask & SIGUSR2) { r = uv_signal_start(&signal2, signal2_cb, SIGUSR2); ASSERT(r == 0); } /* Wait for signals once more. */ uv_sem_post(&sem); r = uv_run(loop); ASSERT(r == 0); /* Close the watchers. */ if (mask & SIGUSR1) { uv_close((uv_handle_t*) &signal1a, NULL); uv_close((uv_handle_t*) &signal1b, NULL); } if (mask & SIGUSR2) { uv_close((uv_handle_t*) &signal2, NULL); } /* Wait for the signal watchers to close. */ r = uv_run(loop); ASSERT(r == 0); uv_loop_delete(loop); }
static void enter(async_worker_t *const worker) { uv_sem_post(worker->sem); }
void Semaphore::post() { uv_sem_post(&sem); }
static void unblock_threadpool(void) { size_t i; for (i = 0; i < ARRAY_SIZE(pause_reqs); i += 1) uv_sem_post(pause_sems + i); }
void uvm_chime_fill(uvm_chime_t* c, void* value){ c->value = value; uv_sem_post(&c->sem); }