static void pollable_destroy(pollable *p) { po_destroy(&p->po); if (p->epfd != -1) { close(p->epfd); grpc_wakeup_fd_destroy(&p->wakeup); } }
void test_many_fds(void) { int i; grpc_wakeup_fd fd[1000]; for (i = 0; i < 1000; i++) { GPR_ASSERT(grpc_wakeup_fd_init(&fd[i]) == GRPC_ERROR_NONE); } for (i = 0; i < 1000; i++) { grpc_wakeup_fd_destroy(&fd[i]); } }
void grpc_pollset_destroy(grpc_pollset *pollset) { GPR_ASSERT(pollset->in_flight_cbs == 0); GPR_ASSERT(!grpc_pollset_has_workers(pollset)); GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail); pollset->vtable->destroy(pollset); while (pollset->local_wakeup_cache) { grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next; grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd); gpr_free(pollset->local_wakeup_cache); pollset->local_wakeup_cache = next; } }
void grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec now, gpr_timespec deadline) { /* pollset->mu already held */ int added_worker = 0; /* this must happen before we (potentially) drop pollset->mu */ worker->next = worker->prev = NULL; /* TODO(ctiller): pool these */ grpc_wakeup_fd_init(&worker->wakeup_fd); if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1)) { goto done; } if (grpc_alarm_check(&pollset->mu, now, &deadline)) { goto done; } if (pollset->shutting_down) { goto done; } if (pollset->in_flight_cbs) { /* Give do_promote priority so we don't starve it out */ gpr_mu_unlock(&pollset->mu); gpr_mu_lock(&pollset->mu); goto done; } if (!pollset->kicked_without_pollers) { push_front_worker(pollset, worker); added_worker = 1; gpr_tls_set(&g_current_thread_poller, (gpr_intptr)pollset); pollset->vtable->maybe_work(pollset, worker, deadline, now, 1); gpr_tls_set(&g_current_thread_poller, 0); } else { pollset->kicked_without_pollers = 0; } done: grpc_wakeup_fd_destroy(&worker->wakeup_fd); if (added_worker) { remove_worker(pollset, worker); } if (pollset->shutting_down) { if (grpc_pollset_has_workers(pollset)) { grpc_pollset_kick(pollset, NULL); } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) { pollset->called_shutdown = 1; gpr_mu_unlock(&pollset->mu); finish_shutdown(pollset); /* Continuing to access pollset here is safe -- it is the caller's * responsibility to not destroy when it has outstanding calls to * grpc_pollset_work. * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */ gpr_mu_lock(&pollset->mu); } } }
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { GPR_TIMER_BEGIN("workqueue.on_readable", 0); grpc_workqueue *workqueue = arg; if (error != GRPC_ERROR_NONE) { /* HACK: let wakeup_fd code know that we stole the fd */ workqueue->wakeup_fd.read_fd = 0; grpc_wakeup_fd_destroy(&workqueue->wakeup_fd); grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy"); GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0); gpr_free(workqueue); } else { error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd); gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue); if (error == GRPC_ERROR_NONE) { grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd, &workqueue->read_closure); } else { /* recurse to get error handling */ on_readable(exec_ctx, arg, error); } if (n == NULL) { /* try again - queue in an inconsistant state */ wakeup(exec_ctx, workqueue); } else { switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) { case 3: // had one count, one unorphaned --> done, unorphaned break; case 2: // had one count, one orphaned --> done, orphaned workqueue_destroy(exec_ctx, workqueue); break; case 1: case 0: // these values are illegal - representing an already done or // deleted workqueue GPR_UNREACHABLE_CODE(break); default: // schedule a wakeup since there's more to do wakeup(exec_ctx, workqueue); } grpc_closure *cl = (grpc_closure *)n; grpc_error *clerr = cl->error; cl->cb(exec_ctx, cl->cb_arg, clerr); GRPC_ERROR_UNREF(clerr); } } GPR_TIMER_END("workqueue.on_readable", 0); }
static void test_threading(void) { threading_shared shared; shared.pollset = gpr_zalloc(grpc_pollset_size()); grpc_pollset_init(shared.pollset, &shared.mu); gpr_thd_id thds[10]; for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_options opt = gpr_thd_options_default(); gpr_thd_options_set_joinable(&opt); gpr_thd_new(&thds[i], test_threading_loop, &shared, &opt); } grpc_wakeup_fd fd; GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&fd))); shared.wakeup_fd = &fd; shared.wakeup_desc = grpc_fd_create(fd.read_fd, "wakeup"); shared.wakeups = 0; { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc); grpc_fd_notify_on_read( &exec_ctx, shared.wakeup_desc, GRPC_CLOSURE_INIT(&shared.on_wakeup, test_threading_wakeup, &shared, grpc_schedule_on_exec_ctx)); grpc_exec_ctx_finish(&exec_ctx); } GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_first", grpc_wakeup_fd_wakeup(shared.wakeup_fd))); for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_join(thds[i]); } fd.read_fd = 0; grpc_wakeup_fd_destroy(&fd); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED); grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, NULL, NULL, false /* already_closed */, "done"); grpc_pollset_shutdown(&exec_ctx, shared.pollset, GRPC_CLOSURE_CREATE(destroy_pollset, shared.pollset, grpc_schedule_on_exec_ctx)); grpc_exec_ctx_finish(&exec_ctx); } gpr_free(shared.pollset); }
static void cleanup_test_fds(grpc_exec_ctx *exec_ctx, test_fd *tfds, const int num_fds) { int release_fd; for (int i = 0; i < num_fds; i++) { grpc_fd_shutdown(exec_ctx, tfds[i].fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("fd cleanup")); grpc_exec_ctx_flush(exec_ctx); /* grpc_fd_orphan frees the memory allocated for grpc_fd. Normally it also * calls close() on the underlying fd. In our case, we are using * grpc_wakeup_fd and we would like to destroy it ourselves (by calling * grpc_wakeup_fd_destroy). To prevent grpc_fd from calling close() on the * underlying fd, call it with a non-NULL 'release_fd' parameter */ grpc_fd_orphan(exec_ctx, tfds[i].fd, NULL, &release_fd, false /* already_closed */, "test_fd_cleanup"); grpc_exec_ctx_flush(exec_ctx); grpc_wakeup_fd_destroy(&tfds[i].wakeup_fd); } }
/* ensure that p->epfd, p->wakeup are initialized; p->po.mu must be held */ static grpc_error *pollable_materialize(pollable *p) { if (p->epfd == -1) { int new_epfd = epoll_create1(EPOLL_CLOEXEC); if (new_epfd < 0) { return GRPC_OS_ERROR(errno, "epoll_create1"); } grpc_error *err = grpc_wakeup_fd_init(&p->wakeup); if (err != GRPC_ERROR_NONE) { close(new_epfd); return err; } struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET), .data.ptr = (void *)(1 | (intptr_t)&p->wakeup)}; if (epoll_ctl(new_epfd, EPOLL_CTL_ADD, p->wakeup.read_fd, &ev) != 0) { err = GRPC_OS_ERROR(errno, "epoll_ctl"); close(new_epfd); grpc_wakeup_fd_destroy(&p->wakeup); return err; } p->epfd = new_epfd; } return GRPC_ERROR_NONE; }
void grpc_pollset_global_shutdown(void) { grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd); gpr_tls_destroy(&g_current_thread_poller); gpr_tls_destroy(&g_current_thread_worker); grpc_wakeup_fd_global_destroy(); }
static void multipoll_with_epoll_pollset_destroy(grpc_pollset *pollset) { pollset_hdr *h = pollset->data.ptr; grpc_wakeup_fd_destroy(&h->wakeup_fd); close(h->epoll_fd); gpr_free(h); }