/* Return pointer to a new struct test. */ static void test_destroy(struct test *m) { gpr_mu_destroy(&m->mu); gpr_cv_destroy(&m->cv); gpr_cv_destroy(&m->done_cv); queue_destroy(&m->q); gpr_free(m); }
void grpc_iomgr_shutdown(void) { delayed_callback *cb; gpr_timespec shutdown_deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(10)); gpr_mu_lock(&g_mu); g_shutdown = 1; while (g_cbs_head || g_refs) { gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", g_refs, g_cbs_head ? " and executing final callbacks" : ""); while (g_cbs_head) { cb = g_cbs_head; g_cbs_head = cb->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); cb->cb(cb->cb_arg, 0); gpr_free(cb); gpr_mu_lock(&g_mu); } if (g_refs) { int timeout = 0; gpr_timespec short_deadline = gpr_time_add(gpr_now(), gpr_time_from_millis(100)); while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) { timeout = 1; break; } } if (timeout) { gpr_log(GPR_DEBUG, "Failed to free %d iomgr objects before shutdown deadline: " "memory leaks are likely", g_refs); break; } } } gpr_mu_unlock(&g_mu); grpc_kick_poller(); gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future); grpc_iomgr_platform_shutdown(); grpc_alarm_list_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_cv); gpr_cv_destroy(&g_rcv); }
static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker **worker, gpr_timespec now, gpr_timespec deadline) { non_polling_poller *npp = (non_polling_poller *)pollset; if (npp->shutdown) return GRPC_ERROR_NONE; non_polling_worker w; gpr_cv_init(&w.cv); if (worker != NULL) *worker = (grpc_pollset_worker *)&w; if (npp->root == NULL) { npp->root = w.next = w.prev = &w; } else { w.next = npp->root; w.prev = w.next->prev; w.next->prev = w.prev->next = &w; } w.kicked = false; while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline)) ; if (&w == npp->root) { npp->root = w.next; if (&w == npp->root) { if (npp->shutdown) { GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE); } npp->root = NULL; } } w.next->prev = w.prev; w.prev->next = w.next; gpr_cv_destroy(&w.cv); if (worker != NULL) *worker = NULL; return GRPC_ERROR_NONE; }
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline) { gpr_timespec now; int added_worker = 0; now = gpr_now(GPR_CLOCK_MONOTONIC); if (gpr_time_cmp(now, deadline) > 0) { return 0 /* GPR_FALSE */; } worker->next = worker->prev = NULL; gpr_cv_init(&worker->cv); if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) { goto done; } if (grpc_alarm_check(&pollset->mu, now, &deadline)) { goto done; } if (!pollset->kicked_without_pollers && !pollset->shutting_down) { push_front_worker(pollset, worker); added_worker = 1; gpr_cv_wait(&worker->cv, &pollset->mu, deadline); } else { pollset->kicked_without_pollers = 0; } done: gpr_cv_destroy(&worker->cv); if (added_worker) { remove_worker(pollset, worker); } return 1 /* GPR_TRUE */; }
static int is_stack_running_on_compute_engine(void) { compute_engine_detector detector; grpc_httpcli_request request; /* The http call is local. If it takes more than one sec, it is for sure not on compute engine. */ gpr_timespec max_detection_delay = {1, 0}; gpr_mu_init(&detector.mu); gpr_cv_init(&detector.cv); detector.is_done = 0; detector.success = 0; memset(&request, 0, sizeof(grpc_httpcli_request)); request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST; request.path = "/"; grpc_httpcli_get(&request, gpr_time_add(gpr_now(), max_detection_delay), on_compute_engine_detection_http_response, &detector); /* Block until we get the response. This is not ideal but this should only be called once for the lifetime of the process by the default credentials. */ gpr_mu_lock(&detector.mu); while (!detector.is_done) { gpr_cv_wait(&detector.cv, &detector.mu, gpr_inf_future); } gpr_mu_unlock(&detector.mu); gpr_mu_destroy(&detector.mu); gpr_cv_destroy(&detector.cv); return detector.success; }
static void server_unref(grpc_server *server) { registered_method *rm; size_t i; if (gpr_unref(&server->internal_refcount)) { grpc_channel_args_destroy(server->channel_args); gpr_mu_destroy(&server->mu); gpr_cv_destroy(&server->cv); gpr_free(server->channel_filters); requested_call_array_destroy(&server->requested_calls); while ((rm = server->registered_methods) != NULL) { server->registered_methods = rm->next; gpr_free(rm->method); gpr_free(rm->host); requested_call_array_destroy(&rm->requested); gpr_free(rm); } for (i = 0; i < server->cq_count; i++) { grpc_cq_internal_unref(server->cqs[i]); } gpr_free(server->cqs); gpr_free(server->pollsets); gpr_free(server->shutdown_tags); gpr_free(server); } }
/* Wait for the signal to shutdown a client. */ static void client_wait_and_shutdown(client *cl) { gpr_mu_lock(&cl->mu); while (!cl->done) gpr_cv_wait(&cl->done_cv, &cl->mu, gpr_inf_future); gpr_mu_unlock(&cl->mu); gpr_mu_destroy(&cl->mu); gpr_cv_destroy(&cl->done_cv); }
/* Wait and shutdown a sever. */ static void server_wait_and_shutdown(server *sv) { gpr_mu_lock(&sv->mu); while (!sv->done) gpr_cv_wait(&sv->done_cv, &sv->mu, gpr_inf_future); gpr_mu_unlock(&sv->mu); gpr_mu_destroy(&sv->mu); gpr_cv_destroy(&sv->done_cv); }
static void destroy_setup(grpc_client_setup *s) { gpr_mu_destroy(&s->mu); gpr_cv_destroy(&s->cv); s->done(s->user_data); grpc_channel_args_destroy(s->args); grpc_pollset_set_destroy(&s->interested_parties); gpr_free(s); }
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) { grpc_exec_ctx_enqueue(exec_ctx, s->shutdown_complete, 1); gpr_mu_destroy(&s->mu); gpr_cv_destroy(&s->cv); gpr_free(s->ports); gpr_free(s); }
void grpc_iomgr_shutdown(void) { grpc_iomgr_object *obj; grpc_iomgr_closure *closure; gpr_timespec shutdown_deadline = gpr_time_add(gpr_now(), gpr_time_from_seconds(10)); gpr_mu_lock(&g_mu); g_shutdown = 1; while (g_cbs_head || g_root_object.next != &g_root_object) { size_t nobjs = count_objects(); gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed%s", nobjs, g_cbs_head ? " and executing final callbacks" : ""); if (g_cbs_head) { do { closure = g_cbs_head; g_cbs_head = closure->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); closure->cb(closure->cb_arg, 0); gpr_mu_lock(&g_mu); } while (g_cbs_head); continue; } if (nobjs > 0) { int timeout = 0; gpr_timespec short_deadline = gpr_time_add(gpr_now(), gpr_time_from_millis(100)); while (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(), shutdown_deadline) > 0) { timeout = 1; break; } } if (timeout) { gpr_log(GPR_DEBUG, "Failed to free %d iomgr objects before shutdown deadline: " "memory leaks are likely", count_objects()); for (obj = g_root_object.next; obj != &g_root_object; obj = obj->next) { gpr_log(GPR_DEBUG, "LEAKED OBJECT: %s", obj->name); } break; } } } gpr_mu_unlock(&g_mu); grpc_kick_poller(); gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future); grpc_iomgr_platform_shutdown(); grpc_alarm_list_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_rcv); }
static void finish_shutdown(grpc_udp_server *s) { s->shutdown_complete(s->shutdown_complete_arg); gpr_mu_destroy(&s->mu); gpr_cv_destroy(&s->cv); gpr_free(s->ports); gpr_free(s); }
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, grpc_pollset_worker **worker_hdl) { if (NEW_ROOT == worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) { gpr_cv_signal(&worker->pollable->root_worker->cv); } if (worker->initialized_cv) { gpr_cv_destroy(&worker->cv); } if (pollset_is_pollable_fd(pollset, worker->pollable)) { UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll"); } if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) { pollset_maybe_finish_shutdown(exec_ctx, pollset); } }
int main(int argc, char **argv) { grpc_test_init(argc, argv); grpc_iomgr_init(); gpr_mu_init(&mu); gpr_cv_init(&cv); test_no_op(); test_no_op_with_start(); test_no_op_with_port(); test_no_op_with_port_and_start(); test_connect(1); test_connect(10); grpc_iomgr_shutdown(); gpr_mu_destroy(&mu); gpr_cv_destroy(&cv); return 0; }
/* Destroys Channel instances. */ static void grpc_rb_channel_free(void *p) { grpc_rb_channel *ch = NULL; if (p == NULL) { return; }; ch = (grpc_rb_channel *)p; if (ch->wrapped != NULL) { grpc_rb_channel_safe_destroy(ch); ch->wrapped = NULL; } if (ch->mu_init_done) { gpr_mu_destroy(&ch->channel_mu); gpr_cv_destroy(&ch->channel_cv); } xfree(p); }
static void multiple_writers_single_reader(int circular_log) { /* Sleep interval between read iterations. */ static const int32_t READ_ITERATION_INTERVAL_IN_MSEC = 10; /* Number of records written by each writer. */ static const int32_t NUM_RECORDS_PER_WRITER = 10 * 1024 * 1024; /* Maximum record size. */ static const size_t MAX_RECORD_SIZE = 10; int ix; gpr_thd_id id; gpr_cv writers_done; int writers_count = NUM_WRITERS; gpr_mu writers_mu; /* protects writers_done and writers_count */ writer_thread_args writers[NUM_WRITERS]; gpr_cv reader_done; gpr_mu reader_mu; /* protects reader_done and reader.running */ reader_thread_args reader; int32_t record_size = 1 + rand() % MAX_RECORD_SIZE; printf(" Record size: %d\n", record_size); /* Create and start writers. */ gpr_cv_init(&writers_done); gpr_mu_init(&writers_mu); for (ix = 0; ix < NUM_WRITERS; ++ix) { writers[ix].index = ix; writers[ix].record_size = record_size; writers[ix].num_records = NUM_RECORDS_PER_WRITER; writers[ix].done = &writers_done; writers[ix].count = &writers_count; writers[ix].mu = &writers_mu; gpr_thd_new(&id, &writer_thread, &writers[ix], NULL); } /* Start reader. */ reader.record_size = record_size; reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC; reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER; reader.stop_flag = 0; gpr_cv_init(&reader.stop); gpr_cv_init(&reader_done); reader.done = &reader_done; gpr_mu_init(&reader_mu); reader.mu = &reader_mu; reader.running = 1; gpr_thd_new(&id, &reader_thread, &reader, NULL); /* Wait for writers to finish. */ gpr_mu_lock(&writers_mu); while (writers_count != 0) { gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&writers_mu); gpr_mu_destroy(&writers_mu); gpr_cv_destroy(&writers_done); gpr_mu_lock(&reader_mu); if (circular_log) { /* Stop reader. */ reader.stop_flag = 1; gpr_cv_signal(&reader.stop); } /* wait for reader to finish */ while (reader.running) { gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } if (circular_log) { /* Assert that there were no out-of-space errors. */ GPR_ASSERT(0 == census_log_out_of_space_count()); } gpr_mu_unlock(&reader_mu); gpr_mu_destroy(&reader_mu); gpr_cv_destroy(&reader_done); printf(" Reader: finished\n"); }
/* Destructor for channel_data */ static void lb_destroy_channel_elem(grpc_channel_element *elem) { lb_channel_data *chand = elem->channel_data; gpr_mu_destroy(&chand->mu); gpr_cv_destroy(&chand->cv); }
void grpc_iomgr_shutdown(void) { gpr_timespec shutdown_deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)); gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_iomgr_platform_flush(); gpr_mu_lock(&g_mu); g_shutdown = 1; while (g_root_object.next != &g_root_object) { if (gpr_time_cmp( gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time), gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { if (g_root_object.next != &g_root_object) { gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed", count_objects()); } last_warning_time = gpr_now(GPR_CLOCK_REALTIME); } if (grpc_timer_check(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL)) { gpr_mu_unlock(&g_mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(&g_mu); continue; } if (g_root_object.next != &g_root_object) { gpr_timespec short_deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN)); if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) { if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) { if (g_root_object.next != &g_root_object) { gpr_log(GPR_DEBUG, "Failed to free %d iomgr objects before shutdown deadline: " "memory leaks are likely", count_objects()); dump_objects("LEAKED"); if (grpc_iomgr_abort_on_leaks()) { abort(); } } break; } } } } gpr_mu_unlock(&g_mu); grpc_timer_list_shutdown(&exec_ctx); grpc_exec_ctx_finish(&exec_ctx); /* ensure all threads have left g_mu */ gpr_mu_lock(&g_mu); gpr_mu_unlock(&g_mu); grpc_pollset_global_shutdown(); grpc_iomgr_platform_shutdown(); grpc_exec_ctx_global_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_rcv); }
/* Free storage associated with *q. */ void queue_destroy(queue *q) { gpr_mu_destroy(&q->mu); gpr_cv_destroy(&q->non_empty); gpr_cv_destroy(&q->non_full); }
/* Do both reading and writing using the grpc_endpoint API. This also includes a test of the shutdown behavior. */ static void read_and_write_test(grpc_endpoint_test_config config, size_t num_bytes, size_t write_size, size_t slice_size, int shutdown) { struct read_and_write_test_state state; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_endpoint_test_fixture f = begin_test(config, __FUNCTION__, slice_size); if (shutdown) { gpr_log(GPR_INFO, "Start read and write shutdown test"); } else { gpr_log(GPR_INFO, "Start read and write test with %d bytes, slice size %d", num_bytes, slice_size); } gpr_mu_init(&state.mu); gpr_cv_init(&state.cv); state.read_ep = f.client_ep; state.write_ep = f.server_ep; state.target_bytes = num_bytes; state.bytes_read = 0; state.current_write_size = write_size; state.bytes_written = 0; state.read_done = 0; state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; /* Get started by pretending an initial write completed */ /* NOTE: Sets up initial conditions so we can have the same write handler for the first iteration as for later iterations. It does the right thing even when bytes_written is unsigned. */ state.bytes_written -= state.current_write_size; read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK); grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler, &state); if (shutdown) { gpr_log(GPR_DEBUG, "shutdown read"); grpc_endpoint_shutdown(state.read_ep); gpr_log(GPR_DEBUG, "shutdown write"); grpc_endpoint_shutdown(state.write_ep); } gpr_mu_lock(&state.mu); while (!state.read_done || !state.write_done) { if (gpr_cv_wait(&state.cv, &state.mu, deadline)) { gpr_log(GPR_ERROR, "timeout: read_done=%d, write_done=%d", state.read_done, state.write_done); abort(); } } gpr_mu_unlock(&state.mu); grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); gpr_mu_destroy(&state.mu); gpr_cv_destroy(&state.cv); end_test(config); }
/* Test grpc_alarm add and cancel. */ static void test_grpc_alarm(void) { grpc_alarm alarm; grpc_alarm alarm_to_cancel; /* Timeout on the alarm cond. var, so make big enough to absorb time deviations. Otherwise, operations after wait will not be properly ordered */ gpr_timespec alarm_deadline; gpr_timespec followup_deadline; alarm_arg arg; alarm_arg arg2; void *fdone; grpc_iomgr_init(); arg.counter = 0; arg.success = SUCCESS_NOT_SET; arg.done_success_ctr = 0; arg.done_cancel_ctr = 0; arg.done = 0; gpr_mu_init(&arg.mu); gpr_cv_init(&arg.cv); gpr_event_init(&arg.fcb_arg); grpc_alarm_init(&alarm, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100), alarm_cb, &arg, gpr_now()); alarm_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); gpr_mu_lock(&arg.mu); while (arg.done == 0) { if (gpr_cv_wait(&arg.cv, &arg.mu, alarm_deadline)) { gpr_log(GPR_ERROR, "alarm deadline exceeded"); break; } } gpr_mu_unlock(&arg.mu); followup_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); fdone = gpr_event_wait(&arg.fcb_arg, followup_deadline); if (arg.counter != 1) { gpr_log(GPR_ERROR, "Alarm callback not called"); GPR_ASSERT(0); } else if (arg.done_success_ctr != 1) { gpr_log(GPR_ERROR, "Alarm done callback not called with success"); GPR_ASSERT(0); } else if (arg.done_cancel_ctr != 0) { gpr_log(GPR_ERROR, "Alarm done callback called with cancel"); GPR_ASSERT(0); } else if (arg.success == SUCCESS_NOT_SET) { gpr_log(GPR_ERROR, "Alarm callback without status"); GPR_ASSERT(0); } else { gpr_log(GPR_INFO, "Alarm callback called successfully"); } if (fdone != (void *)&arg.fcb_arg) { gpr_log(GPR_ERROR, "Followup callback #1 not invoked properly %p %p", fdone, &arg.fcb_arg); GPR_ASSERT(0); } gpr_cv_destroy(&arg.cv); gpr_mu_destroy(&arg.mu); arg2.counter = 0; arg2.success = SUCCESS_NOT_SET; arg2.done_success_ctr = 0; arg2.done_cancel_ctr = 0; arg2.done = 0; gpr_mu_init(&arg2.mu); gpr_cv_init(&arg2.cv); gpr_event_init(&arg2.fcb_arg); grpc_alarm_init(&alarm_to_cancel, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100), alarm_cb, &arg2, gpr_now()); grpc_alarm_cancel(&alarm_to_cancel); alarm_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); gpr_mu_lock(&arg2.mu); while (arg2.done == 0) { gpr_cv_wait(&arg2.cv, &arg2.mu, alarm_deadline); } gpr_mu_unlock(&arg2.mu); gpr_log(GPR_INFO, "alarm done = %d", arg2.done); followup_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); fdone = gpr_event_wait(&arg2.fcb_arg, followup_deadline); if (arg2.counter != arg2.done_success_ctr) { gpr_log(GPR_ERROR, "Alarm callback called but didn't lead to done success"); GPR_ASSERT(0); } else if (arg2.done_success_ctr && arg2.done_cancel_ctr) { gpr_log(GPR_ERROR, "Alarm done callback called with success and cancel"); GPR_ASSERT(0); } else if (arg2.done_cancel_ctr + arg2.done_success_ctr != 1) { gpr_log(GPR_ERROR, "Alarm done callback called incorrect number of times"); GPR_ASSERT(0); } else if (arg2.success == SUCCESS_NOT_SET) { gpr_log(GPR_ERROR, "Alarm callback without status"); GPR_ASSERT(0); } else if (arg2.done_success_ctr) { gpr_log(GPR_INFO, "Alarm callback executed before cancel"); gpr_log(GPR_INFO, "Current value of triggered is %d\n", alarm_to_cancel.triggered); } else if (arg2.done_cancel_ctr) { gpr_log(GPR_INFO, "Alarm callback canceled"); gpr_log(GPR_INFO, "Current value of triggered is %d\n", alarm_to_cancel.triggered); } else { gpr_log(GPR_ERROR, "Alarm cancel test should not be here"); GPR_ASSERT(0); } if (fdone != (void *)&arg2.fcb_arg) { gpr_log(GPR_ERROR, "Followup callback #2 not invoked properly %p %p", fdone, &arg2.fcb_arg); GPR_ASSERT(0); } gpr_cv_destroy(&arg2.cv); gpr_mu_destroy(&arg2.mu); grpc_iomgr_shutdown(); }
void destroy_change_data(fd_change_data *fdc) { gpr_mu_destroy(&fdc->mu); gpr_cv_destroy(&fdc->cv); }
void grpc_pollset_destroy(grpc_pollset *pollset) { gpr_mu_destroy(&pollset->mu); gpr_cv_destroy(&pollset->cv); }
void grpc_iomgr_shutdown(void) { grpc_iomgr_closure *closure; gpr_timespec shutdown_deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN)); gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME); gpr_mu_lock(&g_mu); g_shutdown = 1; while (g_cbs_head != NULL || g_root_object.next != &g_root_object) { if (gpr_time_cmp( gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time), gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) { if (g_cbs_head != NULL && g_root_object.next != &g_root_object) { gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed and executing " "final callbacks", count_objects()); } else if (g_cbs_head != NULL) { gpr_log(GPR_DEBUG, "Executing final iomgr callbacks"); } else { gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed", count_objects()); } last_warning_time = gpr_now(GPR_CLOCK_REALTIME); } if (g_cbs_head) { do { closure = g_cbs_head; g_cbs_head = closure->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); closure->cb(closure->cb_arg, 0); gpr_mu_lock(&g_mu); } while (g_cbs_head); continue; } if (grpc_alarm_check(&g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL)) { continue; } if (g_root_object.next != &g_root_object) { int timeout = 0; while (g_cbs_head == NULL) { gpr_timespec short_deadline = gpr_time_add( gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN)); if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline) && g_cbs_head == NULL) { if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) { timeout = 1; break; } } } if (timeout) { gpr_log(GPR_DEBUG, "Failed to free %d iomgr objects before shutdown deadline: " "memory leaks are likely", count_objects()); dump_objects("LEAKED"); break; } } } gpr_mu_unlock(&g_mu); grpc_kick_poller(); gpr_event_wait(&g_background_callback_executor_done, gpr_inf_future(GPR_CLOCK_REALTIME)); grpc_alarm_list_shutdown(); grpc_iomgr_platform_shutdown(); gpr_mu_destroy(&g_mu); gpr_cv_destroy(&g_rcv); }
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec now, gpr_timespec deadline) { int added_worker = 0; worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next = worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev = worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next = worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL; worker->kicked = 0; worker->pollset = pollset; gpr_cv_init(&worker->cv); if (grpc_timer_check(exec_ctx, now, &deadline)) { goto done; } if (!pollset->kicked_without_pollers && !pollset->shutting_down) { if (g_active_poller == NULL) { grpc_pollset_worker *next_worker; /* become poller */ pollset->is_iocp_worker = 1; g_active_poller = worker; gpr_mu_unlock(&grpc_polling_mu); grpc_iocp_work(exec_ctx, deadline); grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(&grpc_polling_mu); pollset->is_iocp_worker = 0; g_active_poller = NULL; /* try to get a worker from this pollsets worker list */ next_worker = pop_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET); if (next_worker == NULL) { /* try to get a worker from the global list */ next_worker = pop_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL); } if (next_worker != NULL) { next_worker->kicked = 1; gpr_cv_signal(&next_worker->cv); } if (pollset->shutting_down && pollset->on_shutdown != NULL) { grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1); pollset->on_shutdown = NULL; } goto done; } push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL, worker); push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET, worker); added_worker = 1; while (!worker->kicked) { if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) { break; } } } else { pollset->kicked_without_pollers = 0; } done: if (!grpc_closure_list_empty(exec_ctx->closure_list)) { gpr_mu_unlock(&grpc_polling_mu); grpc_exec_ctx_flush(exec_ctx); gpr_mu_lock(&grpc_polling_mu); } if (added_worker) { remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL); remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET); } gpr_cv_destroy(&worker->cv); }
static void test(void) { int i; gpr_thd_id thd; struct test t; int n = 1; gpr_timespec interval; gpr_mu_init(&t.mu); gpr_cv_init(&t.cv); gpr_event_init(&t.ev); gpr_event_init(&t.done); gpr_cancellable_init(&t.cancel); /* A gpr_cancellable starts not cancelled. */ GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel)); /* Test timeout on event wait for uncancelled gpr_cancellable */ interval = gpr_now(GPR_CLOCK_REALTIME); gpr_event_cancellable_wait( &t.ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel); interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(interval, gpr_time_from_micros(500000, GPR_TIMESPAN)) >= 0); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(2000000, GPR_TIMESPAN), interval) >= 0); /* Test timeout on cv wait for uncancelled gpr_cancellable */ gpr_mu_lock(&t.mu); interval = gpr_now(GPR_CLOCK_REALTIME); while (!gpr_cv_cancellable_wait( &t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel)) { } interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(interval, gpr_time_from_micros(500000, GPR_TIMESPAN)) >= 0); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(2000000, GPR_TIMESPAN), interval) >= 0); gpr_mu_unlock(&t.mu); /* Create some threads. They all wait until cancelled; the last to finish sets t.done. */ t.n = n; for (i = 0; i != n; i++) { GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, NULL)); } /* Check that t.cancel still is not cancelled. */ GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel)); /* Wait a second, and check that no threads have finished waiting. */ gpr_mu_lock(&t.mu); gpr_cv_wait(&t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN))); GPR_ASSERT(t.n == n); gpr_mu_unlock(&t.mu); /* Check that t.cancel still is not cancelled, but when cancelled it retports that it is cacncelled. */ GPR_ASSERT(!gpr_cancellable_is_cancelled(&t.cancel)); gpr_cancellable_cancel(&t.cancel); GPR_ASSERT(gpr_cancellable_is_cancelled(&t.cancel)); /* Wait for threads to finish. */ gpr_event_wait(&t.done, gpr_inf_future(GPR_CLOCK_REALTIME)); GPR_ASSERT(t.n == 0); /* Test timeout on cv wait for cancelled gpr_cancellable */ gpr_mu_lock(&t.mu); interval = gpr_now(GPR_CLOCK_REALTIME); while (!gpr_cv_cancellable_wait( &t.cv, &t.mu, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel)) { } interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(100000, GPR_TIMESPAN), interval) >= 0); gpr_mu_unlock(&t.mu); /* Test timeout on event wait for cancelled gpr_cancellable */ interval = gpr_now(GPR_CLOCK_REALTIME); gpr_event_cancellable_wait( &t.ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_micros(1000000, GPR_TIMESPAN)), &t.cancel); interval = gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), interval); GPR_ASSERT( gpr_time_cmp(gpr_time_from_micros(100000, GPR_TIMESPAN), interval) >= 0); gpr_mu_destroy(&t.mu); gpr_cv_destroy(&t.cv); gpr_cancellable_destroy(&t.cancel); }
static void grpc_rb_event_queue_destroy() { gpr_mu_destroy(&event_queue.mu); gpr_cv_destroy(&event_queue.cv); }
static void multiple_writers_single_reader(int circular_log) { // Sleep interval between read iterations. static const int READ_ITERATION_INTERVAL_IN_MSEC = 10; // Maximum record size. static const size_t MAX_RECORD_SIZE = 20; // Number of records written by each writer. This is sized such that we // will write through the entire log ~10 times. const int NUM_RECORDS_PER_WRITER = (int)((10 * census_log_remaining_space()) / (MAX_RECORD_SIZE / 2)) / NUM_WRITERS; size_t record_size = ((size_t)rand() % MAX_RECORD_SIZE) + 1; // Create and start writers. writer_thread_args writers[NUM_WRITERS]; int writers_count = NUM_WRITERS; gpr_cv writers_done; gpr_mu writers_mu; // protects writers_done and writers_count gpr_cv_init(&writers_done); gpr_mu_init(&writers_mu); gpr_thd_id id; for (int i = 0; i < NUM_WRITERS; ++i) { writers[i].index = i; writers[i].record_size = record_size; writers[i].num_records = NUM_RECORDS_PER_WRITER; writers[i].done = &writers_done; writers[i].count = &writers_count; writers[i].mu = &writers_mu; gpr_thd_new(&id, &writer_thread, &writers[i], NULL); } // Start reader. gpr_cv reader_done; gpr_mu reader_mu; // protects reader_done and reader.running reader_thread_args reader; reader.record_size = record_size; reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC; reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER; reader.stop_flag = 0; gpr_cv_init(&reader.stop); gpr_cv_init(&reader_done); reader.done = &reader_done; gpr_mu_init(&reader_mu); reader.mu = &reader_mu; reader.running = 1; gpr_thd_new(&id, &reader_thread, &reader, NULL); // Wait for writers to finish. gpr_mu_lock(&writers_mu); while (writers_count != 0) { gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } gpr_mu_unlock(&writers_mu); gpr_mu_destroy(&writers_mu); gpr_cv_destroy(&writers_done); gpr_mu_lock(&reader_mu); if (circular_log) { // Stop reader. reader.stop_flag = 1; gpr_cv_signal(&reader.stop); } // wait for reader to finish while (reader.running) { gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME)); } if (circular_log) { // Assert that there were no out-of-space errors. GPR_ASSERT(0 == census_log_out_of_space_count()); } gpr_mu_unlock(&reader_mu); gpr_mu_destroy(&reader_mu); gpr_cv_destroy(&reader_done); if (VERBOSE) { printf(" Reader: finished\n"); } }