grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, gpr_timespec deadline) { event *ev = NULL; gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if ((ev = pluck_event(cc, tag))) { break; } if (cc->shutdown) { ev = create_shutdown_event(); break; } if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) { continue; } if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset), GRPC_POLLSET_MU(&cc->pollset), deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); return NULL; } } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base); return &ev->base; }
/* Public function. Stops and destroys a grpc_tcp_server. */ void grpc_tcp_server_destroy(grpc_tcp_server *s, void (*shutdown_done)(void *shutdown_done_arg), void *shutdown_done_arg) { size_t i; gpr_mu_lock(&s->mu); /* First, shutdown all fd's. This will queue abortion calls for all of the pending accepts. */ for (i = 0; i < s->nports; i++) { server_port *sp = &s->ports[i]; grpc_winsocket_shutdown(sp->socket); } /* This happens asynchronously. Wait while that happens. */ while (s->active_ports) { gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future); } gpr_mu_unlock(&s->mu); /* Now that the accepts have been aborted, we can destroy the sockets. The IOCP won't get notified on these, so we can flag them as already closed by the system. */ for (i = 0; i < s->nports; i++) { server_port *sp = &s->ports[i]; sp->socket->closed_early = 1; grpc_winsocket_orphan(sp->socket); } gpr_free(s->ports); gpr_free(s); if (shutdown_done) { shutdown_done(shutdown_done_arg); } }
static int is_stack_running_on_compute_engine(void) { compute_engine_detector detector; grpc_httpcli_request request; /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ gpr_timespec max_detection_delay = {1, 0}; gpr_mu_init(&detector.mu); gpr_cv_init(&detector.cv); detector.is_done = 0; detector.success = 0; memset(&request, 0, sizeof(grpc_httpcli_request)); request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST; request.path = "/"; grpc_httpcli_get(&request, gpr_time_add(gpr_now(), max_detection_delay), on_compute_engine_detection_http_response, &detector); /* Block until we get the response. This is not ideal but this should only be called once for the lifetime of the process by the default credentials. */ gpr_mu_lock(&detector.mu); while (!detector.is_done) { gpr_cv_wait(&detector.cv, &detector.mu, gpr_inf_future); } gpr_mu_unlock(&detector.mu); gpr_mu_destroy(&detector.mu); gpr_cv_destroy(&detector.cv); return detector.success; }
int gpr_cv_cancellable_wait(gpr_cv *cv, gpr_mu *mu, gpr_timespec abs_deadline, gpr_cancellable *c) { gpr_int32 timeout; gpr_mu_lock(&c->mu); timeout = gpr_cancellable_is_cancelled(c); if (!timeout) { struct gpr_cancellable_list_ le; le.mu = mu; le.cv = cv; le.next = c->waiters.next; le.prev = &c->waiters; le.next->prev = ≤ le.prev->next = ≤ gpr_mu_unlock(&c->mu); timeout = gpr_cv_wait(cv, mu, abs_deadline); gpr_mu_lock(&c->mu); le.next->prev = le.prev; le.prev->next = le.next; if (!timeout) { timeout = gpr_cancellable_is_cancelled(c); } } gpr_mu_unlock(&c->mu); return timeout; }
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline) { gpr_timespec now; int added_worker = 0; now = gpr_now(GPR_CLOCK_MONOTONIC); if (gpr_time_cmp(now, deadline) > 0) { return 0 /* GPR_FALSE */; } worker->next = worker->prev = NULL; gpr_cv_init(&worker->cv); if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) { goto done; } if (grpc_alarm_check(&pollset->mu, now, &deadline)) { goto done; } if (!pollset->kicked_without_pollers && !pollset->shutting_down) { push_front_worker(pollset, worker); added_worker = 1; gpr_cv_wait(&worker->cv, &pollset->mu, deadline); } else { pollset->kicked_without_pollers = 0; } done: gpr_cv_destroy(&worker->cv); if (added_worker) { remove_worker(pollset, worker); } return 1 /* GPR_TRUE */; }
static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker, gpr_timespec now, gpr_timespec deadline) { non_polling_poller *npp = (non_polling_poller *)pollset; if (npp->shutdown) return GRPC_ERROR_NONE; non_polling_worker w; gpr_cv_init(&w.cv); if (worker != NULL) *worker = (grpc_pollset_worker *)&w; if (npp->root == NULL) { npp->root = w.next = w.prev = &w; } else { w.next = npp->root; w.prev = w.next->prev; w.next->prev = w.prev->next = &w; } w.kicked = false; while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline)) ; if (&w == npp->root) { npp->root = w.next; if (&w == npp->root) { if (npp->shutdown) { GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE); } npp->root = NULL; } } w.next->prev = w.prev; w.prev->next = w.next; gpr_cv_destroy(&w.cv); if (worker != NULL) *worker = NULL; return GRPC_ERROR_NONE; }
static void cpu_test(void) { uint32_t i; int cores_seen = 0; struct cpu_test ct; gpr_thd_id thd; ct.ncores = gpr_cpu_num_cores(); GPR_ASSERT(ct.ncores > 0); ct.nthreads = (int)ct.ncores * 3; ct.used = gpr_malloc(ct.ncores * sizeof(int)); memset(ct.used, 0, ct.ncores * sizeof(int)); gpr_mu_init(&ct.mu); gpr_cv_init(&ct.done_cv); ct.is_done = 0; for (i = 0; i < ct.ncores * 3; i++) { GPR_ASSERT(gpr_thd_new(&thd, &worker_thread, &ct, NULL)); } gpr_mu_lock(&ct.mu); while (!ct.is_done) { gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&ct.mu); fprintf(stderr, "Saw cores ["); for (i = 0; i < ct.ncores; i++) { if (ct.used[i]) { fprintf(stderr, "%d,", i); cores_seen++; } } fprintf(stderr, "] (%d/%d)\n", cores_seen, ct.ncores); gpr_free(ct.used); }
/* Test that we can create a number of threads and wait for them. */ static void test(void) { int i; gpr_thd_id thd; gpr_thd_id thds[1000]; struct test t; int n = 1000; gpr_thd_options options = gpr_thd_options_default(); gpr_mu_init(&t.mu); gpr_cv_init(&t.done_cv); t.n = n; t.is_done = 0; for (i = 0; i != n; i++) { GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, NULL)); } gpr_mu_lock(&t.mu); while (!t.is_done) { gpr_cv_wait(&t.done_cv, &t.mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&t.mu); GPR_ASSERT(t.n == 0); gpr_thd_options_set_joinable(&options); for (i = 0; i < n; i++) { GPR_ASSERT(gpr_thd_new(&thds[i], &thd_body_joinable, NULL, &options)); } for (i = 0; i < n; i++) { gpr_thd_join(thds[i]); } }
// Mocks posix poll() function int mock_poll(struct pollfd *fds, nfds_t nfds, int timeout) { int res = 0; gpr_timespec poll_time; gpr_mu_lock(&poll_mu); GPR_ASSERT(nfds == 3); GPR_ASSERT(fds[0].fd == 20); GPR_ASSERT(fds[1].fd == 30); GPR_ASSERT(fds[2].fd == 50); GPR_ASSERT(fds[0].events == (POLLIN | POLLHUP)); GPR_ASSERT(fds[1].events == (POLLIN | POLLHUP)); GPR_ASSERT(fds[2].events == POLLIN); if (timeout < 0) { poll_time = gpr_inf_future(GPR_CLOCK_REALTIME); } else { poll_time = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(timeout, GPR_TIMESPAN)); } if (socket_event || !gpr_cv_wait(&poll_cv, &poll_mu, poll_time)) { fds[0].revents = POLLIN; res = 1; } gpr_mu_unlock(&poll_mu); return res; }
// Reads and verifies the specified number of records. Reader can also be // stopped via gpr_cv_signal(&args->stop). Sleeps for 'read_interval_in_msec' // between read iterations. static void reader_thread(void* arg) { reader_thread_args* args = (reader_thread_args*)arg; if (VERBOSE) { printf(" Reader starting\n"); } gpr_timespec interval = gpr_time_from_micros( args->read_iteration_interval_in_msec * 1000, GPR_TIMESPAN); gpr_mu_lock(args->mu); int records_read = 0; int num_iterations = 0; int counter = 0; while (!args->stop_flag && records_read < args->total_records) { gpr_cv_wait(&args->stop, args->mu, interval); if (!args->stop_flag) { records_read += perform_read_iteration(args->record_size); GPR_ASSERT(records_read <= args->total_records); if (VERBOSE && (counter++ == 100000)) { printf(" Reader: %d out of %d read\n", records_read, args->total_records); counter = 0; } ++num_iterations; } } // Done args->running = 0; gpr_cv_signal(args->done); if (VERBOSE) { printf(" Reader: records: %d, iterations: %d\n", records_read, num_iterations); } gpr_mu_unlock(args->mu); }
/* Wait until all threads report done. */ static void test_wait(struct test *m) { gpr_mu_lock(&m->mu); while (m->done != 0) { gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&m->mu); }
static void* wait_for_watch_state_op_complete_without_gvl(void* arg) { watch_state_stack* stack = (watch_state_stack*)arg; watch_state_op* op = NULL; void* success = (void*)0; gpr_mu_lock(&global_connection_polling_mu); // its unsafe to do a "watch" after "channel polling abort" because the cq has // been shut down. if (abort_channel_polling || stack->bg_wrapped->channel_destroyed) { gpr_mu_unlock(&global_connection_polling_mu); return (void*)0; } op = gpr_zalloc(sizeof(watch_state_op)); op->op_type = WATCH_STATE_API; grpc_channel_watch_connectivity_state(stack->bg_wrapped->channel, stack->last_state, stack->deadline, channel_polling_cq, op); while (!op->op.api_callback_args.called_back) { gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } if (op->op.api_callback_args.success) { success = (void*)1; } gpr_free(op); gpr_mu_unlock(&global_connection_polling_mu); return success; }
/* Reads and verifies the specified number of records. Reader can also be stopped via gpr_cv_signal(&args->stop). Sleeps for 'read_interval_in_msec' between read iterations. */ static void reader_thread(void* arg) { gpr_int32 records_read = 0; reader_thread_args* args = (reader_thread_args*)arg; gpr_int32 num_iterations = 0; gpr_timespec interval; int counter = 0; printf(" Reader starting\n"); interval = gpr_time_from_micros(args->read_iteration_interval_in_msec * 1000); gpr_mu_lock(args->mu); while (!args->stop_flag && records_read < args->total_records) { gpr_cv_wait(&args->stop, args->mu, interval); if (!args->stop_flag) { records_read += perform_read_iteration(args->record_size); GPR_ASSERT(records_read <= args->total_records); if (counter++ == 100000) { printf(" Reader: %d out of %d read\n", records_read, args->total_records); counter = 0; } ++num_iterations; } } /* Done */ args->running = 0; gpr_cv_broadcast(args->done); printf(" Reader: records: %d, iterations: %d\n", records_read, num_iterations); gpr_mu_unlock(args->mu); }
/* cancel handshaking: cancel all requests, and shutdown (the caller promises not to initiate again) */ static void setup_cancel(grpc_transport_setup *sp) { grpc_client_setup *s = (grpc_client_setup *)sp; int cancel_alarm = 0; gpr_mu_lock(&s->mu); s->cancelled = 1; while (s->in_cb) { gpr_cv_wait(&s->cv, &s->mu, gpr_inf_future); } GPR_ASSERT(s->refs > 0); /* effectively cancels the current request (if any) */ s->active_request = NULL; if (s->in_alarm) { cancel_alarm = 1; } if (--s->refs == 0) { gpr_mu_unlock(&s->mu); destroy_setup(s); } else { gpr_mu_unlock(&s->mu); } if (cancel_alarm) { grpc_alarm_cancel(&s->backoff_alarm); } }
/* Wait for the signal to shutdown a client. */ static void client_wait_and_shutdown(client *cl) { gpr_mu_lock(&cl->mu); while (!cl->done) gpr_cv_wait(&cl->done_cv, &cl->mu, gpr_inf_future); gpr_mu_unlock(&cl->mu); gpr_mu_destroy(&cl->mu); gpr_cv_destroy(&cl->done_cv); }
/* Wait and shutdown a sever. */ static void server_wait_and_shutdown(server *sv) { gpr_mu_lock(&sv->mu); while (!sv->done) gpr_cv_wait(&sv->done_cv, &sv->mu, gpr_inf_future); gpr_mu_unlock(&sv->mu); gpr_mu_destroy(&sv->mu); gpr_cv_destroy(&sv->done_cv); }
void grpc_iomgr_shutdown(void) { grpc_iomgr_object *obj; grpc_iomgr_closure *closure; gpr_timespec shutdown_deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(10)); gpr_mu_lock(&g_mu); g_shutdown = 1; while (g_cbs_head || g_root_object.next != &g_root_object) { size_t nobjs = count_objects(); gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", nobjs, g_cbs_head ? " and executing final callbacks" : ""); if (g_cbs_head) { do { closure = g_cbs_head; g_cbs_head = closure->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); closure->cb(closure->cb_arg, 0); gpr_mu_lock(&g_mu); } while (g_cbs_head); continue; } if (nobjs > 0) { int timeout = 0; gpr_timespec short_deadline = gpr_time_add(gpr_now(), gpr_time_from_millis(100)); while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) { timeout = 1; break; } } if (timeout) { gpr_log(GPR_DEBUG, "Failed to free %d iomgr objects before shutdown deadline: " "memory leaks are likely", count_objects()); for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) { gpr_log(GPR_DEBUG, "LEAKED OBJECT: %s", obj->name); } break; } } } gpr_mu_unlock(&g_mu); grpc_kick_poller(); gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future); grpc_iomgr_platform_shutdown(); grpc_alarm_list_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_rcv); }
/* Return true if this thread should poll */ static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl, gpr_timespec *now, gpr_timespec deadline) { bool do_poll = true; if (worker_hdl != NULL) *worker_hdl = worker; worker->initialized_cv = false; worker->kicked = false; worker->pollset = pollset; worker->pollable = pollset->current_pollable; if (pollset_is_pollable_fd(pollset, worker->pollable)) { REF_BY((grpc_fd *)worker->pollable, 2, "one_poll"); } worker_insert(&pollset->root_worker, PWL_POLLSET, worker); if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) { worker->initialized_cv = true; gpr_cv_init(&worker->cv); if (worker->pollable != &pollset->pollable) { gpr_mu_unlock(&pollset->pollable.po.mu); } if (GRPC_TRACER_ON(grpc_polling_trace) && worker->pollable->root_worker != worker) { gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset, worker->pollable, worker, poll_deadline_to_millis_timeout(deadline, *now)); } while (do_poll && worker->pollable->root_worker != worker) { if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) { if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset, worker->pollable, worker); } do_poll = false; } else if (worker->kicked) { if (GRPC_TRACER_ON(grpc_polling_trace)) { gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable, worker); } do_poll = false; } else if (GRPC_TRACER_ON(grpc_polling_trace) && worker->pollable->root_worker != worker) { gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset, worker->pollable, worker); } } if (worker->pollable != &pollset->pollable) { gpr_mu_unlock(&worker->pollable->po.mu); gpr_mu_lock(&pollset->pollable.po.mu); gpr_mu_lock(&worker->pollable->po.mu); } *now = gpr_now(now->clock_type); } return do_poll && pollset->shutdown_closure == NULL && pollset->current_pollable == worker->pollable; }
void *gpr_event_wait(gpr_event *ev, gpr_timespec abs_deadline) { void *result = (void *)gpr_atm_acq_load(&ev->state); if (result == NULL) { struct sync_array_s *s = hash(ev); gpr_mu_lock(&s->mu); do { result = (void *)gpr_atm_acq_load(&ev->state); } while (result == NULL && !gpr_cv_wait(&s->cv, &s->mu, abs_deadline)); gpr_mu_unlock(&s->mu); } return result; }
// Note requires wrapper->wrapped, wrapper->channel_mu/cv initialized static void grpc_rb_channel_safe_destroy(grpc_rb_channel *wrapper) { gpr_mu_lock(&wrapper->channel_mu); wrapper->request_safe_destroy = 1; while (!wrapper->safe_to_destroy) { gpr_cv_wait(&wrapper->channel_cv, &wrapper->channel_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } GPR_ASSERT(wrapper->safe_to_destroy); gpr_mu_unlock(&wrapper->channel_mu); grpc_channel_destroy(wrapper->wrapped); }
static void* wait_until_channel_polling_thread_started_no_gil(void* arg) { int* stop_waiting = (int*)arg; gpr_log(GPR_DEBUG, "GRPC_RUBY: wait for channel polling thread to start"); gpr_mu_lock(&global_connection_polling_mu); while (!channel_polling_thread_started && !abort_channel_polling && !*stop_waiting) { gpr_cv_wait(&global_connection_polling_cv, &global_connection_polling_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&global_connection_polling_mu); return NULL; }
void grpc_iomgr_shutdown(void) { delayed_callback *cb; gpr_timespec shutdown_deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(10)); gpr_mu_lock(&g_mu); g_shutdown = 1; while (g_cbs_head || g_refs) { gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", g_refs, g_cbs_head ? " and executing final callbacks" : ""); while (g_cbs_head) { cb = g_cbs_head; g_cbs_head = cb->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); cb->cb(cb->cb_arg, 0); gpr_free(cb); gpr_mu_lock(&g_mu); } if (g_refs) { int timeout = 0; gpr_timespec short_deadline = gpr_time_add(gpr_now(), gpr_time_from_millis(100)); while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) { timeout = 1; break; } } if (timeout) { gpr_log(GPR_DEBUG, "Failed to free %d iomgr objects before shutdown deadline: " "memory leaks are likely", g_refs); break; } } } gpr_mu_unlock(&g_mu); grpc_kick_poller(); gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future); grpc_iomgr_platform_shutdown(); grpc_alarm_list_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_cv); gpr_cv_destroy(&g_rcv); }
void grpc_child_channel_destroy(grpc_child_channel *channel, int wait_for_callbacks) { grpc_channel_element *lbelem = LINK_BACK_ELEM_FROM_CHANNEL(channel); lb_channel_data *chand = lbelem->channel_data; gpr_mu_lock(&chand->mu); while (wait_for_callbacks && chand->calling_back) { gpr_cv_wait(&chand->cv, &chand->mu, gpr_inf_future); } chand->back = NULL; chand->destroyed = 1; maybe_destroy_channel(channel); gpr_mu_unlock(&chand->mu); }
/* Wait a millisecond and increment counter on each iteration; then mark thread as done. */ static void inc_with_1ms_delay(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_timespec deadline; gpr_mu_lock(&m->mu); deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000, GPR_TIMESPAN)); while (!gpr_cv_wait(&m->cv, &m->mu, deadline)) { } m->counter++; gpr_mu_unlock(&m->mu); } mark_thread_done(m); }
/* Increment counter only when (m->counter%m->threads)==m->thread_id; then mark thread as done. */ static void inc_by_turns(void *v /*=m*/) { struct test *m = v; gpr_int64 i; int id = thread_id(m); for (i = 0; i != m->iterations; i++) { gpr_mu_lock(&m->mu); while ((m->counter % m->threads) != id) { gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } m->counter++; gpr_cv_broadcast(&m->cv); gpr_mu_unlock(&m->mu); } mark_thread_done(m); }
static void test_connect(int n) { struct sockaddr_storage addr; socklen_t addr_len = sizeof(addr); int svrfd, clifd; grpc_tcp_server *s = grpc_tcp_server_create(); int nconnects_before; gpr_timespec deadline; int i; LOG_TEST(); gpr_log(GPR_INFO, "clients=%d", n); gpr_mu_lock(&mu); memset(&addr, 0, sizeof(addr)); addr.ss_family = AF_INET; GPR_ASSERT(grpc_tcp_server_add_port(s, (struct sockaddr *)&addr, addr_len)); svrfd = grpc_tcp_server_get_fd(s, 0); GPR_ASSERT(svrfd >= 0); GPR_ASSERT(getsockname(svrfd, (struct sockaddr *)&addr, &addr_len) == 0); GPR_ASSERT(addr_len <= sizeof(addr)); grpc_tcp_server_start(s, NULL, 0, on_connect, NULL); for (i = 0; i < n; i++) { deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); nconnects_before = nconnects; clifd = socket(addr.ss_family, SOCK_STREAM, 0); GPR_ASSERT(clifd >= 0); GPR_ASSERT(connect(clifd, (struct sockaddr *)&addr, addr_len) == 0); while (nconnects == nconnects_before) { GPR_ASSERT(gpr_cv_wait(&cv, &mu, deadline) == 0); } GPR_ASSERT(nconnects == nconnects_before + 1); close(clifd); if (i != n - 1) { sleep(1); } } gpr_mu_unlock(&mu); grpc_tcp_server_destroy(s); }
static void *grpc_rb_wait_for_event_no_gil(void *param) { grpc_rb_event *event = NULL; (void)param; gpr_mu_lock(&event_queue.mu); while ((event = grpc_rb_event_queue_dequeue()) == NULL) { gpr_cv_wait(&event_queue.cv, &event_queue.mu, gpr_inf_future(GPR_CLOCK_REALTIME)); if (event_queue.abort) { gpr_mu_unlock(&event_queue.mu); return NULL; } } gpr_mu_unlock(&event_queue.mu); return event; }
void grpc_server_destroy(grpc_server *server) { channel_data *c; listener *l; size_t i; call_data *calld; gpr_mu_lock(&server->mu); if (!server->shutdown) { gpr_mu_unlock(&server->mu); grpc_server_shutdown(server); gpr_mu_lock(&server->mu); } while (server->listeners_destroyed != num_listeners(server)) { for (i = 0; i < server->cq_count; i++) { gpr_mu_unlock(&server->mu); grpc_cq_hack_spin_pollset(server->cqs[i]); gpr_mu_lock(&server->mu); } gpr_cv_wait(&server->cv, &server->mu, gpr_time_add(gpr_now(), gpr_time_from_millis(100))); } while (server->listeners) { l = server->listeners; server->listeners = l->next; gpr_free(l); } while ((calld = call_list_remove_head(&server->lists[PENDING_START], PENDING_START)) != NULL) { gpr_log(GPR_DEBUG, "server destroys call %p", calld->call); calld->state = ZOMBIED; grpc_iomgr_add_callback( kill_zombie, grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0)); } for (c = server->root_channel_data.next; c != &server->root_channel_data; c = c->next) { shutdown_channel(c); } gpr_mu_unlock(&server->mu); server_unref(server); }
int grpc_pollset_work(grpc_pollset *pollset, gpr_timespec deadline) { gpr_timespec now; now = gpr_now(GPR_CLOCK_MONOTONIC); if (gpr_time_cmp(now, deadline) > 0) { return 0 /* GPR_FALSE */; } if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) { return 1 /* GPR_TRUE */; } if (grpc_alarm_check(&pollset->mu, now, &deadline)) { return 1 /* GPR_TRUE */; } if (!pollset->shutting_down) { gpr_cv_wait(&pollset->cv, &pollset->mu, deadline); } return 1 /* GPR_TRUE */; }
grpc_event *grpc_completion_queue_next(grpc_completion_queue *cc, gpr_timespec deadline) { event *ev = NULL; gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if (cc->queue != NULL) { gpr_uintptr bucket; ev = cc->queue; bucket = ((gpr_uintptr)ev->base.tag) % NUM_TAG_BUCKETS; cc->queue = ev->queue_next; ev->queue_next->queue_prev = ev->queue_prev; ev->queue_prev->queue_next = ev->queue_next; ev->bucket_next->bucket_prev = ev->bucket_prev; ev->bucket_prev->bucket_next = ev->bucket_next; if (ev == cc->buckets[bucket]) { cc->buckets[bucket] = ev->bucket_next; if (ev == cc->buckets[bucket]) { cc->buckets[bucket] = NULL; } } if (cc->queue == ev) { cc->queue = NULL; } break; } if (cc->shutdown) { ev = create_shutdown_event(); break; } if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) { continue; } if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset), GRPC_POLLSET_MU(&cc->pollset), deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); return NULL; } } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base); return &ev->base; }