static void consumer_thread(void *arg) { test_thread_options *opt = arg; grpc_event *ev; gpr_log(GPR_INFO, "consumer %d started", opt->id); gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); gpr_log(GPR_INFO, "consumer %d phase 1", opt->id); gpr_log(GPR_INFO, "consumer %d phase 1 done", opt->id); gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1); GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); gpr_log(GPR_INFO, "consumer %d phase 2", opt->id); for (;;) { ev = grpc_completion_queue_next(opt->cc, ten_seconds_time()); GPR_ASSERT(ev); switch (ev->type) { case GRPC_WRITE_ACCEPTED: GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK); opt->events_triggered++; grpc_event_finish(ev); break; case GRPC_QUEUE_SHUTDOWN: gpr_log(GPR_INFO, "consumer %d phase 2 done", opt->id); gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); grpc_event_finish(ev); return; default: gpr_log(GPR_ERROR, "Invalid event received: %d", ev->type); abort(); } } }
static void producer_thread(void *arg) { test_thread_options *opt = arg; int i; gpr_log(GPR_INFO, "producer %d started", opt->id); gpr_event_set(&opt->on_started, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); gpr_log(GPR_INFO, "producer %d phase 1", opt->id); for (i = 0; i < TEST_THREAD_EVENTS; i++) { grpc_cq_begin_op(opt->cc); } gpr_log(GPR_INFO, "producer %d phase 1 done", opt->id); gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); gpr_log(GPR_INFO, "producer %d phase 2", opt->id); for (i = 0; i < TEST_THREAD_EVENTS; i++) { grpc_cq_end_op(opt->cc, (void *)(gpr_intptr)1, 1, free_completion, NULL, gpr_malloc(sizeof(grpc_cq_completion))); opt->events_triggered++; } gpr_log(GPR_INFO, "producer %d phase 2 done", opt->id); gpr_event_set(&opt->on_finished, (void *)(gpr_intptr)1); }
static void producer_thread(void *arg) { test_thread_options *opt = arg; int i; gpr_log(GPR_INFO, "producer %d started", opt->id); gpr_event_set(&opt->on_started, (void *)(gpr_intptr) 1); GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); gpr_log(GPR_INFO, "producer %d phase 1", opt->id); for (i = 0; i < TEST_THREAD_EVENTS; i++) { grpc_cq_begin_op(opt->cc, NULL, GRPC_WRITE_ACCEPTED); } gpr_log(GPR_INFO, "producer %d phase 1 done", opt->id); gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr) 1); GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); gpr_log(GPR_INFO, "producer %d phase 2", opt->id); for (i = 0; i < TEST_THREAD_EVENTS; i++) { grpc_cq_end_write_accepted(opt->cc, (void *)(gpr_intptr) 1, NULL, NULL, NULL, GRPC_OP_OK); opt->events_triggered++; } gpr_log(GPR_INFO, "producer %d phase 2 done", opt->id); gpr_event_set(&opt->on_finished, (void *)(gpr_intptr) 1); }
static void consumer_thread(void *arg) { test_thread_options *opt = arg; grpc_event ev; gpr_log(GPR_INFO, "consumer %d started", opt->id); gpr_event_set(&opt->on_started, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase1, ten_seconds_time())); gpr_log(GPR_INFO, "consumer %d phase 1", opt->id); gpr_log(GPR_INFO, "consumer %d phase 1 done", opt->id); gpr_event_set(&opt->on_phase1_done, (void *)(gpr_intptr)1); GPR_ASSERT(gpr_event_wait(opt->phase2, ten_seconds_time())); gpr_log(GPR_INFO, "consumer %d phase 2", opt->id); for (;;) { ev = grpc_completion_queue_next(opt->cc, ten_seconds_time(), NULL); switch (ev.type) { case GRPC_OP_COMPLETE: GPR_ASSERT(ev.success); opt->events_triggered++; break; case GRPC_QUEUE_SHUTDOWN: gpr_log(GPR_INFO, "consumer %d phase 2 done", opt->id); gpr_event_set(&opt->on_finished, (void *)(gpr_intptr)1); return; case GRPC_QUEUE_TIMEOUT: gpr_log(GPR_ERROR, "Invalid timeout received"); abort(); } } }
static void actually_poll(void *argsp) { args_struct *args = argsp; gpr_timespec deadline = n_sec_deadline(10); while (true) { bool done = gpr_atm_acq_load(&args->done_atm) != 0; if (done) { break; } gpr_timespec time_left = gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, time_left.tv_sec, time_left.tv_nsec); GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0); grpc_pollset_worker *worker = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(args->mu); GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, args->pollset, &worker, gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1))); gpr_mu_unlock(args->mu); grpc_exec_ctx_finish(&exec_ctx); } gpr_event_set(&args->ev, (void *)1); }
static void iocp_loop(void *p) { while (gpr_atm_acq_load(&g_orphans) || !gpr_event_get(&g_shutdown_iocp)) { grpc_maybe_call_delayed_callbacks(NULL, 1); do_iocp_work(); } gpr_event_set(&g_iocp_done, (void *)1); }
void grpc_iocp_shutdown(void) { BOOL success; gpr_event_set(&g_shutdown_iocp, (void *)1); grpc_iocp_kick(); gpr_event_wait(&g_iocp_done, gpr_inf_future(GPR_CLOCK_REALTIME)); success = CloseHandle(g_iocp); GPR_ASSERT(success); }
/* Increment m->refcount m->iterations times, decrement m->thread_refcount once, and if it reaches zero, set m->event to (void*)1; then mark thread as done. */ static void refinc(void *v /*=m*/) { struct test *m = v; gpr_int64 i; for (i = 0; i != m->iterations; i++) { gpr_ref(&m->refcount); } if (gpr_unref(&m->thread_refcount)) { gpr_event_set(&m->event, (void *)1); } mark_thread_done(m); }
static void child_thread(void *arg) { child_events *ce = arg; grpc_event ev; gpr_event_set(&ce->started, (void *)1); gpr_log(GPR_DEBUG, "verifying"); ev = grpc_completion_queue_next(ce->cq, gpr_inf_future(GPR_CLOCK_MONOTONIC), NULL); GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); GPR_ASSERT(ev.tag == tag(1)); GPR_ASSERT(ev.success == 0); }
void grpc_iocp_shutdown(void) { BOOL success; gpr_event_set(&g_shutdown_iocp, (void *)1); success = PostQueuedCompletionStatus(g_iocp, 0, (ULONG_PTR) &g_iocp_kick_token, &g_iocp_custom_overlap); GPR_ASSERT(success); gpr_event_wait(&g_iocp_done, gpr_inf_future); success = CloseHandle(g_iocp); GPR_ASSERT(success); }
/* A thread body. Wait until t->cancel is cancelledm then decrement t->n. If t->n becomes 0, set t->done. */ static void thd_body(void *v) { struct test *t = v; gpr_mu_lock(&t->mu); while (!gpr_cv_cancellable_wait( &t->cv, &t->mu, gpr_inf_future(GPR_CLOCK_REALTIME), &t->cancel)) { } t->n--; if (t->n == 0) { gpr_event_set(&t->done, (void *)1); } gpr_mu_unlock(&t->mu); }
static void shutdown_during_write_test_write_handler( void *user_data, grpc_endpoint_cb_status error) { shutdown_during_write_test_state *st = user_data; gpr_log(GPR_INFO, "shutdown_during_write_test_write_handler: error = %d", error); if (error == 0) { /* This happens about 0.5% of the time when run under TSAN, and is entirely legitimate, but means we aren't testing the path we think we are. */ /* TODO(klempner): Change this test to retry the write in that case */ gpr_log(GPR_ERROR, "shutdown_during_write_test_write_handler completed unexpectedly"); } gpr_event_set(&st->ev, (void *)(gpr_intptr) 1); }
static void actually_poll_server(void *arg) { poll_args *pa = arg; gpr_timespec deadline = n_sec_deadline(10); while (true) { bool done = gpr_atm_acq_load(&state.done_atm) != 0; gpr_timespec time_left = gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, time_left.tv_sec, time_left.tv_nsec); if (done || gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) < 0) { break; } test_tcp_server_poll(pa->server, 1); } gpr_event_set(pa->signal_when_done, (void *)1); gpr_free(pa); }
static void shutdown_during_write_test_read_handler( void *user_data, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status error) { size_t i; shutdown_during_write_test_state *st = user_data; for (i = 0; i < nslices; i++) { gpr_slice_unref(slices[i]); } if (error != GRPC_ENDPOINT_CB_OK) { grpc_endpoint_destroy(st->ep); gpr_event_set(&st->ev, (void *)(gpr_intptr) error); } else { grpc_endpoint_notify_on_read( st->ep, shutdown_during_write_test_read_handler, user_data); } }
void bad_server_thread(void *vargs) { struct server_thread_args *args = (struct server_thread_args *)vargs; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_resolved_address resolved_addr; struct sockaddr_storage *addr = (struct sockaddr_storage *)resolved_addr.addr; int port; grpc_tcp_server *s; grpc_error *error = grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s); GPR_ASSERT(error == GRPC_ERROR_NONE); memset(&resolved_addr, 0, sizeof(resolved_addr)); addr->ss_family = AF_INET; error = grpc_tcp_server_add_port(s, &resolved_addr, &port); GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_tcp_server_add_port", error)); GPR_ASSERT(port > 0); gpr_asprintf(&args->addr, "localhost:%d", port); grpc_tcp_server_start(&exec_ctx, s, &args->pollset, 1, on_connect, args); gpr_event_set(&args->ready, (void *)1); gpr_mu_lock(args->mu); while (gpr_atm_acq_load(&args->stop) == 0) { gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec deadline = gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN)); grpc_pollset_worker *worker = NULL; if (!GRPC_LOG_IF_ERROR("pollset_work", grpc_pollset_work(&exec_ctx, args->pollset, &worker, now, deadline))) { gpr_atm_rel_store(&args->stop, 1); } gpr_mu_unlock(args->mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(args->mu); } gpr_mu_unlock(args->mu); grpc_tcp_server_unref(&exec_ctx, s); grpc_exec_ctx_finish(&exec_ctx); gpr_free(args->addr); }
/* Execute followup callbacks continuously. Other threads may check in and help during pollset_work() */ static void background_callback_executor(void *ignored) { gpr_mu_lock(&g_mu); while (!g_shutdown) { gpr_timespec deadline = gpr_inf_future; if (g_cbs_head) { delayed_callback *cb = g_cbs_head; g_cbs_head = cb->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); cb->cb(cb->cb_arg, cb->success); gpr_free(cb); gpr_mu_lock(&g_mu); } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) { } else { gpr_cv_wait(&g_cv, &g_mu, deadline); } } gpr_mu_unlock(&g_mu); gpr_event_set(&g_background_callback_executor_done, (void *)1); }
static void test_mt_multipop(void) { gpr_log(GPR_DEBUG, "test_mt_multipop"); gpr_event start; gpr_event_init(&start); gpr_thd_id thds[100]; gpr_thd_id pull_thds[100]; thd_args ta[GPR_ARRAY_SIZE(thds)]; gpr_mpscq q; gpr_mpscq_init(&q); for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options_set_joinable(&options); ta[i].ctr = 0; ta[i].q = &q; ta[i].start = &start; GPR_ASSERT(gpr_thd_new(&thds[i], test_thread, &ta[i], &options)); } pull_args pa; pa.ta = ta; pa.num_thds = GPR_ARRAY_SIZE(thds); pa.spins = 0; pa.num_done = 0; pa.q = &q; pa.start = &start; gpr_mu_init(&pa.mu); for (size_t i = 0; i < GPR_ARRAY_SIZE(pull_thds); i++) { gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options_set_joinable(&options); GPR_ASSERT(gpr_thd_new(&pull_thds[i], pull_thread, &pa, &options)); } gpr_event_set(&start, (void *)1); for (size_t i = 0; i < GPR_ARRAY_SIZE(pull_thds); i++) { gpr_thd_join(pull_thds[i]); } gpr_log(GPR_DEBUG, "spins: %" PRIdPTR, pa.spins); for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_join(thds[i]); } gpr_mpscq_destroy(&q); }
/* Execute followup callbacks continuously. Other threads may check in and help during pollset_work() */ static void background_callback_executor(void *ignored) { gpr_mu_lock(&g_mu); while (!g_shutdown) { gpr_timespec deadline = gpr_inf_future; gpr_timespec short_deadline = gpr_time_add(gpr_now(), gpr_time_from_millis(100)); if (g_cbs_head) { grpc_iomgr_closure *closure = g_cbs_head; g_cbs_head = closure->next; if (!g_cbs_head) g_cbs_tail = NULL; gpr_mu_unlock(&g_mu); closure->cb(closure->cb_arg, closure->success); gpr_mu_lock(&g_mu); } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) { } else { gpr_mu_unlock(&g_mu); gpr_sleep_until(gpr_time_min(short_deadline, deadline)); gpr_mu_lock(&g_mu); } } gpr_mu_unlock(&g_mu); gpr_event_set(&g_background_callback_executor_done, (void *)1); }
void bad_server_thread(void *vargs) { struct server_thread_args *args = (struct server_thread_args *)vargs; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; struct sockaddr_storage addr; socklen_t addr_len = sizeof(addr); int port; grpc_tcp_server *s = grpc_tcp_server_create(NULL); memset(&addr, 0, sizeof(addr)); addr.ss_family = AF_INET; port = grpc_tcp_server_add_port(s, (struct sockaddr *)&addr, addr_len); GPR_ASSERT(port > 0); gpr_asprintf(&args->addr, "localhost:%d", port); grpc_tcp_server_start(&exec_ctx, s, &args->pollset, 1, on_connect, args); gpr_event_set(&args->ready, (void *)1); gpr_mu_lock(args->mu); while (gpr_atm_acq_load(&args->stop) == 0) { gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec deadline = gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN)); grpc_pollset_worker *worker = NULL; grpc_pollset_work(&exec_ctx, args->pollset, &worker, now, deadline); gpr_mu_unlock(args->mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(args->mu); } gpr_mu_unlock(args->mu); grpc_tcp_server_unref(&exec_ctx, s); grpc_exec_ctx_finish(&exec_ctx); gpr_free(args->addr); }
static void test_mt(void) { gpr_log(GPR_DEBUG, "test_mt"); gpr_event start; gpr_event_init(&start); gpr_thd_id thds[100]; thd_args ta[GPR_ARRAY_SIZE(thds)]; gpr_mpscq q; gpr_mpscq_init(&q); for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_options options = gpr_thd_options_default(); gpr_thd_options_set_joinable(&options); ta[i].ctr = 0; ta[i].q = &q; ta[i].start = &start; GPR_ASSERT(gpr_thd_new(&thds[i], test_thread, &ta[i], &options)); } size_t num_done = 0; size_t spins = 0; gpr_event_set(&start, (void *)1); while (num_done != GPR_ARRAY_SIZE(thds)) { gpr_mpscq_node *n; while ((n = gpr_mpscq_pop(&q)) == NULL) { spins++; } test_node *tn = (test_node *)n; GPR_ASSERT(*tn->ctr == tn->i - 1); *tn->ctr = tn->i; if (tn->i == THREAD_ITERATIONS) num_done++; gpr_free(tn); } gpr_log(GPR_DEBUG, "spins: %" PRIdPTR, spins); for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_join(thds[i]); } gpr_mpscq_destroy(&q); }
static void done_write(void *arg, grpc_endpoint_cb_status status) { thd_args *a = arg; gpr_event_set(&a->done_write, (void *)1); }
static void thd_func(void *arg) { thd_args *a = arg; a->validator(a->server, a->cq); gpr_event_set(&a->done_thd, (void *)1); }
static void test_threading(int producers, int consumers) { test_thread_options *options = gpr_malloc((producers + consumers) * sizeof(test_thread_options)); gpr_event phase1 = GPR_EVENT_INIT; gpr_event phase2 = GPR_EVENT_INIT; grpc_completion_queue *cc = grpc_completion_queue_create(NULL); int i; int total_consumed = 0; static int optid = 101; gpr_log(GPR_INFO, "%s: %d producers, %d consumers", "test_threading", producers, consumers); /* start all threads: they will wait for phase1 */ for (i = 0; i < producers + consumers; i++) { gpr_thd_id id; gpr_event_init(&options[i].on_started); gpr_event_init(&options[i].on_phase1_done); gpr_event_init(&options[i].on_finished); options[i].phase1 = &phase1; options[i].phase2 = &phase2; options[i].events_triggered = 0; options[i].cc = cc; options[i].id = optid++; GPR_ASSERT(gpr_thd_new(&id, i < producers ? producer_thread : consumer_thread, options + i, NULL)); gpr_event_wait(&options[i].on_started, ten_seconds_time()); } /* start phase1: producers will pre-declare all operations they will complete */ gpr_log(GPR_INFO, "start phase 1"); gpr_event_set(&phase1, (void *)(gpr_intptr)1); gpr_log(GPR_INFO, "wait phase 1"); for (i = 0; i < producers + consumers; i++) { GPR_ASSERT(gpr_event_wait(&options[i].on_phase1_done, ten_seconds_time())); } gpr_log(GPR_INFO, "done phase 1"); /* start phase2: operations will complete, and consumers will consume them */ gpr_log(GPR_INFO, "start phase 2"); gpr_event_set(&phase2, (void *)(gpr_intptr)1); /* in parallel, we shutdown the completion channel - all events should still be consumed */ grpc_completion_queue_shutdown(cc); /* join all threads */ gpr_log(GPR_INFO, "wait phase 2"); for (i = 0; i < producers + consumers; i++) { GPR_ASSERT(gpr_event_wait(&options[i].on_finished, ten_seconds_time())); } gpr_log(GPR_INFO, "done phase 2"); /* destroy the completion channel */ grpc_completion_queue_destroy(cc); /* verify that everything was produced and consumed */ for (i = 0; i < producers + consumers; i++) { if (i < producers) { GPR_ASSERT(options[i].events_triggered == TEST_THREAD_EVENTS); } else { total_consumed += options[i].events_triggered; } } GPR_ASSERT(total_consumed == producers * TEST_THREAD_EVENTS); gpr_free(options); }
static void done_write(void *arg, int success) { thd_args *a = arg; gpr_event_set(&a->done_write, (void *)1); }
static void must_succeed(void *arg, grpc_endpoint *tcp) { GPR_ASSERT(tcp); grpc_endpoint_shutdown(tcp); grpc_endpoint_destroy(tcp); gpr_event_set(arg, (void *)1); }
static void must_fail(void *arg, grpc_endpoint *tcp) { GPR_ASSERT(!tcp); gpr_event_set(arg, (void *)1); }
static void followup_cb(void *arg, int success) { gpr_event_set((gpr_event *)arg, arg); }
static void on_done(grpc_exec_ctx *exec_ctx, void *ev, grpc_error *error) { gpr_event_set(ev, (void *)1); }
static void done_write(grpc_exec_ctx *exec_ctx, void *arg, int success) { thd_args *a = arg; gpr_event_set(&a->done_write, (void *)1); }
static void on_finish(void *arg, const grpc_httpcli_response *response) { GPR_ASSERT(arg == (void *)42); GPR_ASSERT(response); GPR_ASSERT(response->status == 200); gpr_event_set(&g_done, (void *)1); }