static void shutdown_during_write_test(grpc_endpoint_test_config config, size_t slice_size) { /* test that shutdown with a pending write creates no leaks */ gpr_timespec deadline; size_t size; size_t nblocks; int current_data = 1; shutdown_during_write_test_state read_st; shutdown_during_write_test_state write_st; gpr_slice *slices; grpc_endpoint_test_fixture f = begin_test(config, "shutdown_during_write_test", slice_size); gpr_log(GPR_INFO, "testing shutdown during a write"); read_st.ep = f.client_ep; write_st.ep = f.server_ep; read_st.done = 0; write_st.done = 0; grpc_endpoint_notify_on_read( read_st.ep, shutdown_during_write_test_read_handler, &read_st); for (size = 1;; size *= 2) { slices = allocate_blocks(size, 1, &nblocks, ¤t_data); switch (grpc_endpoint_write(write_st.ep, slices, nblocks, shutdown_during_write_test_write_handler, &write_st)) { case GRPC_ENDPOINT_WRITE_DONE: break; case GRPC_ENDPOINT_WRITE_ERROR: gpr_log(GPR_ERROR, "error writing"); abort(); case GRPC_ENDPOINT_WRITE_PENDING: grpc_endpoint_shutdown(write_st.ep); deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!write_st.done) { GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0); grpc_pollset_work(g_pollset, deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); grpc_endpoint_destroy(write_st.ep); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!read_st.done) { GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0); grpc_pollset_work(g_pollset, deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_free(slices); end_test(config); return; } gpr_free(slices); } gpr_log(GPR_ERROR, "should never reach here"); abort(); }
static void test_flush(void) { grpc_closure c; int done = 0; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_workqueue *wq = grpc_workqueue_create(&exec_ctx); gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); grpc_pollset_worker *worker = NULL; grpc_closure_init(&c, must_succeed, &done); grpc_exec_ctx_enqueue(&exec_ctx, &c, true, NULL); grpc_workqueue_flush(&exec_ctx, wq); grpc_workqueue_add_to_pollset(&exec_ctx, wq, g_pollset); gpr_mu_lock(g_mu); while (!done) { grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(deadline.clock_type), deadline); } gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); GPR_ASSERT(done); GRPC_WORKQUEUE_UNREF(&exec_ctx, wq, "destroy"); grpc_exec_ctx_finish(&exec_ctx); }
static void test_post(int port) { grpc_httpcli_request req; char *host; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; g_done = 0; gpr_log(GPR_INFO, "test_post"); gpr_asprintf(&host, "localhost:%d", port); gpr_log(GPR_INFO, "posting to %s", host); memset(&req, 0, sizeof(req)); req.host = host; req.ssl_host_override = "foo.test.google.fr"; req.path = "/post"; req.handshaker = &grpc_httpcli_ssl; grpc_httpcli_post(&exec_ctx, &g_context, &g_pollset, &req, "hello", 5, n_seconds_time(15), on_finish, (void *)42); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (!g_done) { grpc_pollset_worker *worker = NULL; grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_free(host); }
static void test_get(int port) { grpc_httpcli_request req; char *host; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; g_done = 0; gpr_log(GPR_INFO, "test_get"); gpr_asprintf(&host, "localhost:%d", port); gpr_log(GPR_INFO, "requesting from %s", host); memset(&req, 0, sizeof(req)); req.host = host; req.path = "/get"; req.handshaker = &grpc_httpcli_plaintext; grpc_httpcli_get(&exec_ctx, &g_context, &g_pollset, &req, n_seconds_time(15), on_finish, (void *)42); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (!g_done) { grpc_pollset_worker worker; grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_free(host); }
static grpc_connected_subchannel *connect_subchannel(grpc_subchannel *c) { grpc_pollset pollset; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_init(&pollset); grpc_pollset_set_init(&g_interested_parties); grpc_pollset_set_add_pollset(&exec_ctx, &g_interested_parties, &pollset); grpc_subchannel_notify_on_state_change(&exec_ctx, c, &g_interested_parties, &g_state, grpc_closure_create(state_changed, c)); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&pollset)); while (g_state != GRPC_CHANNEL_READY) { grpc_pollset_worker worker; grpc_pollset_work(&exec_ctx, &pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)); gpr_mu_unlock(GRPC_POLLSET_MU(&pollset)); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&pollset)); } grpc_pollset_shutdown(&exec_ctx, &pollset, grpc_closure_create(destroy_pollset, &pollset)); grpc_pollset_set_destroy(&g_interested_parties); gpr_mu_unlock(GRPC_POLLSET_MU(&pollset)); grpc_exec_ctx_finish(&exec_ctx); return grpc_subchannel_get_connected_subchannel(c); }
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) { gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); grpc_pollset_kick(&cc->pollset); grpc_pollset_work(&cc->pollset, gpr_time_add(gpr_now(), gpr_time_from_millis(100))); gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); }
grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, gpr_timespec deadline) { event *ev = NULL; gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if ((ev = pluck_event(cc, tag))) { break; } if (cc->shutdown) { ev = create_shutdown_event(); break; } if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) { continue; } if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset), GRPC_POLLSET_MU(&cc->pollset), deadline)) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); return NULL; } } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base); return &ev->base; }
static void tcp_connect(grpc_exec_ctx *exec_ctx, const struct sockaddr *remote, socklen_t remote_len, on_connect_result *result) { gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); int clifd = socket(remote->sa_family, SOCK_STREAM, 0); int nconnects_before; gpr_mu_lock(g_mu); nconnects_before = g_nconnects; on_connect_result_init(&g_result); GPR_ASSERT(clifd >= 0); gpr_log(GPR_DEBUG, "start connect"); GPR_ASSERT(connect(clifd, remote, remote_len) == 0); gpr_log(GPR_DEBUG, "wait"); while (g_nconnects == nconnects_before && gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) { grpc_pollset_worker *worker = NULL; grpc_pollset_work(exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(exec_ctx); gpr_mu_lock(g_mu); } gpr_log(GPR_DEBUG, "wait done"); GPR_ASSERT(g_nconnects == nconnects_before + 1); close(clifd); *result = g_result; gpr_mu_unlock(g_mu); }
void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) { unsigned char *buf = malloc(read_size); ssize_t bytes_read; size_t bytes_left = num_bytes; int flags; int current = 0; int i; flags = fcntl(fd, F_GETFL, 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0); for (;;) { grpc_pollset_worker worker; gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); do { bytes_read = read(fd, buf, bytes_left > read_size ? read_size : bytes_left); } while (bytes_read < 0 && errno == EINTR); GPR_ASSERT(bytes_read >= 0); for (i = 0; i < bytes_read; ++i) { GPR_ASSERT(buf[i] == current); current = (current + 1) % 256; } bytes_left -= bytes_read; if (bytes_left == 0) break; } flags = fcntl(fd, F_GETFL, 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0); gpr_free(buf); }
static void actually_poll(void *argsp) { args_struct *args = argsp; gpr_timespec deadline = n_sec_deadline(10); while (true) { bool done = gpr_atm_acq_load(&args->done_atm) != 0; if (done) { break; } gpr_timespec time_left = gpr_time_sub(deadline, gpr_now(GPR_CLOCK_REALTIME)); gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRId64 ".%09d", done, time_left.tv_sec, time_left.tv_nsec); GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0); grpc_pollset_worker *worker = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(args->mu); GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, args->pollset, &worker, gpr_now(GPR_CLOCK_REALTIME), n_sec_deadline(1))); gpr_mu_unlock(args->mu); grpc_exec_ctx_finish(&exec_ctx); } gpr_event_set(&args->ev, (void *)1); }
char *grpc_test_fetch_oauth2_token_with_credentials( grpc_call_credentials *creds) { oauth2_request request; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure do_nothing_closure; grpc_auth_metadata_context null_ctx = {"", "", NULL, NULL}; grpc_pollset_init(&request.pollset); request.is_done = 0; grpc_closure_init(&do_nothing_closure, do_nothing, NULL); grpc_call_credentials_get_request_metadata(&exec_ctx, creds, &request.pollset, null_ctx, on_oauth2_response, &request); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset)); while (!request.is_done) { grpc_pollset_worker *worker = NULL; grpc_pollset_work(&exec_ctx, &request.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)); } gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset)); grpc_pollset_shutdown(&exec_ctx, &request.pollset, &do_nothing_closure); grpc_exec_ctx_finish(&exec_ctx); grpc_pollset_destroy(&request.pollset); return request.token; }
/* Write to a socket using the grpc_tcp API, then drain it directly. Note that if the write does not complete immediately we need to drain the socket in parallel with the read. */ static void write_test(size_t num_bytes, size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct write_socket_state state; size_t num_blocks; grpc_slice *slices; uint8_t current_data = 0; grpc_slice_buffer outgoing; grpc_closure write_done_closure; gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR, num_bytes, slice_size); create_sockets(sv); grpc_resource_quota *resource_quota = grpc_resource_quota_create("write_test"); ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); state.ep = ep; state.write_done = 0; slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); grpc_slice_buffer_init(&outgoing); grpc_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_closure_init(&write_done_closure, write_done, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure); drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(g_mu); for (;;) { grpc_pollset_worker *worker = NULL; if (state.write_done) { break; } GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } gpr_mu_unlock(g_mu); grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing); grpc_endpoint_destroy(&exec_ctx, ep); gpr_free(slices); grpc_exec_ctx_finish(&exec_ctx); }
int main(int argc, char **argv) { int result = 0; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; synchronizer sync; grpc_channel_credentials *creds = NULL; char *service_url = "https://test.foo.google.com/Foo"; grpc_auth_metadata_context context; gpr_cmdline *cl = gpr_cmdline_create("print_google_default_creds_token"); gpr_cmdline_add_string(cl, "service_url", "Service URL for the token request.", &service_url); gpr_cmdline_parse(cl, argc, argv); memset(&context, 0, sizeof(context)); context.service_url = service_url; grpc_init(); creds = grpc_google_default_credentials_create(); if (creds == NULL) { fprintf(stderr, "\nCould not find default credentials.\n\n"); result = 1; goto end; } grpc_pollset *pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(pollset, &sync.mu); sync.pops = grpc_polling_entity_create_from_pollset(pollset); sync.is_done = 0; grpc_call_credentials_get_request_metadata( &exec_ctx, ((grpc_composite_channel_credentials *)creds)->call_creds, &sync.pops, context, on_metadata_response, &sync); gpr_mu_lock(sync.mu); while (!sync.is_done) { grpc_pollset_worker *worker = NULL; if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&sync.pops), &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)))) sync.is_done = 1; gpr_mu_unlock(sync.mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(sync.mu); } gpr_mu_unlock(sync.mu); grpc_exec_ctx_finish(&exec_ctx); grpc_channel_credentials_release(creds); gpr_free(grpc_polling_entity_pollset(&sync.pops)); end: gpr_cmdline_destroy(cl); grpc_shutdown(); return result; }
/* Write to a socket using the grpc_tcp API, then drain it directly. Note that if the write does not complete immediately we need to drain the socket in parallel with the read. */ static void write_test(ssize_t num_bytes, ssize_t slice_size) { int sv[2]; grpc_endpoint *ep; struct write_socket_state state; ssize_t read_bytes; size_t num_blocks; gpr_slice *slices; int current_data = 0; gpr_slice_buffer outgoing; grpc_iomgr_closure write_done_closure; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes, slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); grpc_endpoint_add_to_pollset(ep, &g_pollset); state.ep = ep; state.write_done = 0; slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); gpr_slice_buffer_init(&outgoing); gpr_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_iomgr_closure_init(&write_done_closure, write_done, &state); switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) { case GRPC_ENDPOINT_DONE: /* Write completed immediately */ read_bytes = drain_socket(sv[0]); GPR_ASSERT(read_bytes == num_bytes); break; case GRPC_ENDPOINT_PENDING: drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { grpc_pollset_worker worker; if (state.write_done) { break; } grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); break; case GRPC_ENDPOINT_ERROR: gpr_log(GPR_ERROR, "endpoint got error"); abort(); } gpr_slice_buffer_destroy(&outgoing); grpc_endpoint_destroy(ep); gpr_free(slices); }
void test_succeeds(void) { struct sockaddr_in addr; socklen_t addr_len = sizeof(addr); int svr_fd; int r; int connections_complete_before; grpc_closure done; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_succeeds"); memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; /* create a dummy server */ svr_fd = socket(AF_INET, SOCK_STREAM, 0); GPR_ASSERT(svr_fd >= 0); GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len)); GPR_ASSERT(0 == listen(svr_fd, 1)); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); connections_complete_before = g_connections_complete; gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); /* connect to it */ GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0); grpc_closure_init(&done, must_succeed, NULL); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, &g_pollset_set, (struct sockaddr *)&addr, addr_len, gpr_inf_future(GPR_CLOCK_REALTIME)); /* await the connection */ do { addr_len = sizeof(addr); r = accept(svr_fd, (struct sockaddr *)&addr, &addr_len); } while (r == -1 && errno == EINTR); GPR_ASSERT(r >= 0); close(r); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (g_connections_complete == connections_complete_before) { grpc_pollset_worker *worker = NULL; grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); }
/* Do both reading and writing using the grpc_endpoint API. This also includes a test of the shutdown behavior. */ static void read_and_write_test(grpc_endpoint_test_config config, size_t num_bytes, size_t write_size, size_t slice_size, int shutdown) { struct read_and_write_test_state state; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_endpoint_test_fixture f = begin_test(config, "read_and_write_test", slice_size); if (shutdown) { gpr_log(GPR_INFO, "Start read and write shutdown test"); } else { gpr_log(GPR_INFO, "Start read and write test with %d bytes, slice size %d", num_bytes, slice_size); } state.read_ep = f.client_ep; state.write_ep = f.server_ep; state.target_bytes = num_bytes; state.bytes_read = 0; state.current_write_size = write_size; state.bytes_written = 0; state.read_done = 0; state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; /* Get started by pretending an initial write completed */ /* NOTE: Sets up initial conditions so we can have the same write handler for the first iteration as for later iterations. It does the right thing even when bytes_written is unsigned. */ state.bytes_written -= state.current_write_size; read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK); grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler, &state); if (shutdown) { gpr_log(GPR_DEBUG, "shutdown read"); grpc_endpoint_shutdown(state.read_ep); gpr_log(GPR_DEBUG, "shutdown write"); grpc_endpoint_shutdown(state.write_ep); } gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!state.read_done || !state.write_done) { GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0); grpc_pollset_work(g_pollset, deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); end_test(config); }
static grpc_error *tcp_connect(grpc_exec_ctx *exec_ctx, const test_addr *remote, on_connect_result *result) { gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10); int clifd; int nconnects_before; const struct sockaddr *remote_addr = (const struct sockaddr *)remote->addr.addr; gpr_log(GPR_INFO, "Connecting to %s", remote->str); gpr_mu_lock(g_mu); nconnects_before = g_nconnects; on_connect_result_init(&g_result); clifd = socket(remote_addr->sa_family, SOCK_STREAM, 0); if (clifd < 0) { gpr_mu_unlock(g_mu); return GRPC_OS_ERROR(errno, "Failed to create socket"); } gpr_log(GPR_DEBUG, "start connect to %s", remote->str); if (connect(clifd, remote_addr, (socklen_t)remote->addr.len) != 0) { gpr_mu_unlock(g_mu); close(clifd); return GRPC_OS_ERROR(errno, "connect"); } gpr_log(GPR_DEBUG, "wait"); while (g_nconnects == nconnects_before && gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0) { grpc_pollset_worker *worker = NULL; grpc_error *err; if ((err = grpc_pollset_work(exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline)) != GRPC_ERROR_NONE) { gpr_mu_unlock(g_mu); close(clifd); return err; } gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(exec_ctx); gpr_mu_lock(g_mu); } gpr_log(GPR_DEBUG, "wait done"); if (g_nconnects != nconnects_before + 1) { gpr_mu_unlock(g_mu); close(clifd); return GRPC_ERROR_CREATE_FROM_STATIC_STRING("Didn't connect"); } close(clifd); *result = g_result; gpr_mu_unlock(g_mu); gpr_log(GPR_INFO, "Result (%d, %d) fd %d", result->port_index, result->fd_index, result->server_fd); grpc_tcp_server_unref(exec_ctx, result->server); return GRPC_ERROR_NONE; }
void test_tcp_server_poll(test_tcp_server *server, int seconds) { grpc_pollset_worker *worker = NULL; gpr_timespec deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC), gpr_time_from_seconds(seconds, GPR_TIMESPAN)); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset)); grpc_pollset_work(&exec_ctx, &server->pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset)); grpc_exec_ctx_finish(&exec_ctx); }
int grpc_pick_port_using_server(char *server) { grpc_httpcli_context context; grpc_httpcli_request req; portreq pr; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure *shutdown_closure; grpc_init(); memset(&pr, 0, sizeof(pr)); memset(&req, 0, sizeof(req)); grpc_pollset *pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(pollset, &pr.mu); pr.pops = grpc_polling_entity_create_from_pollset(pollset); shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops); pr.port = -1; pr.server = server; pr.ctx = &context; req.host = server; req.http.path = "/get"; grpc_httpcli_context_init(&context); grpc_resource_quota *resource_quota = grpc_resource_quota_create("port_server_client/pick"); grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), grpc_closure_create(got_port_from_server, &pr), &pr.response); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(pr.mu); while (pr.port == -1) { grpc_pollset_worker *worker = NULL; if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)))) { pr.port = 0; } } gpr_mu_unlock(pr.mu); grpc_http_response_destroy(&pr.response); grpc_httpcli_context_destroy(&context); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), shutdown_closure); grpc_exec_ctx_finish(&exec_ctx); return pr.port; }
void grpc_free_port_using_server(char *server, int port) { grpc_httpcli_context context; grpc_httpcli_request req; grpc_httpcli_response rsp; freereq pr; char *path; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_closure *shutdown_closure; grpc_init(); memset(&pr, 0, sizeof(pr)); memset(&req, 0, sizeof(req)); memset(&rsp, 0, sizeof(rsp)); grpc_pollset *pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(pollset, &pr.mu); pr.pops = grpc_polling_entity_create_from_pollset(pollset); shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops); req.host = server; gpr_asprintf(&path, "/drop/%d", port); req.http.path = path; grpc_httpcli_context_init(&context); grpc_resource_quota *resource_quota = grpc_resource_quota_create("port_server_client/free"); grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), grpc_closure_create(freed_port_from_server, &pr), &rsp); grpc_resource_quota_internal_unref(&exec_ctx, resource_quota); gpr_mu_lock(pr.mu); while (!pr.done) { grpc_pollset_worker *worker = NULL; if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)))) { pr.done = 1; } } gpr_mu_unlock(pr.mu); grpc_httpcli_context_destroy(&context); grpc_exec_ctx_finish(&exec_ctx); grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops), shutdown_closure); grpc_exec_ctx_finish(&exec_ctx); gpr_free(path); grpc_http_response_destroy(&rsp); }
void test_succeeds(void) { grpc_resolved_address resolved_addr; struct sockaddr_in *addr = (struct sockaddr_in *)resolved_addr.addr; uv_tcp_t *svr_handle = gpr_malloc(sizeof(uv_tcp_t)); int connections_complete_before; grpc_closure done; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_succeeds"); memset(&resolved_addr, 0, sizeof(resolved_addr)); resolved_addr.len = sizeof(struct sockaddr_in); addr->sin_family = AF_INET; /* create a dummy server */ GPR_ASSERT(0 == uv_tcp_init(uv_default_loop(), svr_handle)); GPR_ASSERT(0 == uv_tcp_bind(svr_handle, (struct sockaddr *)addr, 0)); GPR_ASSERT(0 == uv_listen((uv_stream_t *)svr_handle, 1, connection_cb)); gpr_mu_lock(g_mu); connections_complete_before = g_connections_complete; gpr_mu_unlock(g_mu); /* connect to it */ GPR_ASSERT(uv_tcp_getsockname(svr_handle, (struct sockaddr *)addr, (int *)&resolved_addr.len) == 0); GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL, &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_mu_lock(g_mu); while (g_connections_complete == connections_complete_before) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), grpc_timeout_seconds_to_deadline(5)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(g_mu); } // This will get cleaned up when the pollset runs again or gets shutdown uv_close((uv_handle_t *)svr_handle, close_cb); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); }
/* Write to a socket using the grpc_tcp API, then drain it directly. Note that if the write does not complete immediately we need to drain the socket in parallel with the read. */ static void write_test(size_t num_bytes, size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct write_socket_state state; size_t num_blocks; gpr_slice *slices; gpr_uint8 current_data = 0; gpr_slice_buffer outgoing; grpc_closure write_done_closure; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes, slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test"); grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset); state.ep = ep; state.write_done = 0; slices = allocate_blocks(num_bytes, slice_size, &num_blocks, ¤t_data); gpr_slice_buffer_init(&outgoing); gpr_slice_buffer_addn(&outgoing, slices, num_blocks); grpc_closure_init(&write_done_closure, write_done, &state); grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure); drain_socket_blocking(sv[0], num_bytes, num_bytes); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { grpc_pollset_worker worker; if (state.write_done) { break; } grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_slice_buffer_destroy(&outgoing); grpc_endpoint_destroy(&exec_ctx, ep); gpr_free(slices); grpc_exec_ctx_finish(&exec_ctx); }
int main(int argc, char **argv) { synchronizer sync; grpc_jwt_verifier *verifier; gpr_cmdline *cl; char *jwt = NULL; char *aud = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_init(); cl = gpr_cmdline_create("JWT verifier tool"); gpr_cmdline_add_string(cl, "jwt", "JSON web token to verify", &jwt); gpr_cmdline_add_string(cl, "aud", "Audience for the JWT", &aud); gpr_cmdline_parse(cl, argc, argv); if (jwt == NULL || aud == NULL) { print_usage_and_exit(cl, argv[0]); } verifier = grpc_jwt_verifier_create(NULL, 0); grpc_init(); sync.pollset = gpr_malloc(grpc_pollset_size()); grpc_pollset_init(sync.pollset, &sync.mu); sync.is_done = 0; grpc_jwt_verifier_verify(&exec_ctx, verifier, sync.pollset, jwt, aud, on_jwt_verification_done, &sync); gpr_mu_lock(sync.mu); while (!sync.is_done) { grpc_pollset_worker *worker = NULL; if (!GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, sync.pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)))) sync.is_done = true; gpr_mu_unlock(sync.mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(sync.mu); } gpr_mu_unlock(sync.mu); gpr_free(sync.pollset); grpc_jwt_verifier_destroy(&exec_ctx, verifier); grpc_exec_ctx_finish(&exec_ctx); gpr_cmdline_destroy(cl); grpc_shutdown(); return !sync.success; }
static void test_connect(int n) { struct sockaddr_storage addr; socklen_t addr_len = sizeof(addr); int svrfd, clifd; grpc_tcp_server *s = grpc_tcp_server_create(); int nconnects_before; gpr_timespec deadline; grpc_pollset *pollsets[1]; int i; LOG_TEST("test_connect"); gpr_log(GPR_INFO, "clients=%d", n); memset(&addr, 0, sizeof(addr)); addr.ss_family = AF_INET; GPR_ASSERT(grpc_tcp_server_add_port(s, (struct sockaddr *)&addr, addr_len)); svrfd = grpc_tcp_server_get_fd(s, 0); GPR_ASSERT(svrfd >= 0); GPR_ASSERT(getsockname(svrfd, (struct sockaddr *)&addr, &addr_len) == 0); GPR_ASSERT(addr_len <= sizeof(addr)); pollsets[0] = &g_pollset; grpc_tcp_server_start(s, pollsets, 1, on_connect, NULL); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (i = 0; i < n; i++) { deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); nconnects_before = g_nconnects; clifd = socket(addr.ss_family, SOCK_STREAM, 0); GPR_ASSERT(clifd >= 0); gpr_log(GPR_DEBUG, "start connect"); GPR_ASSERT(connect(clifd, (struct sockaddr *)&addr, addr_len) == 0); gpr_log(GPR_DEBUG, "wait"); while (g_nconnects == nconnects_before && gpr_time_cmp(deadline, gpr_now(GPR_CLOCK_REALTIME)) > 0) { grpc_pollset_work(&g_pollset, deadline); } gpr_log(GPR_DEBUG, "wait done"); GPR_ASSERT(g_nconnects == nconnects_before + 1); close(clifd); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_tcp_server_destroy(s, NULL, NULL); }
void test_fails(void) { grpc_resolved_address resolved_addr; struct sockaddr_in *addr = (struct sockaddr_in *)resolved_addr.addr; int connections_complete_before; grpc_closure done; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_fails"); memset(&resolved_addr, 0, sizeof(resolved_addr)); resolved_addr.len = sizeof(struct sockaddr_in); addr->sin_family = AF_INET; gpr_mu_lock(g_mu); connections_complete_before = g_connections_complete; gpr_mu_unlock(g_mu); /* connect to a broken address */ GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL, &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME)); gpr_mu_lock(g_mu); /* wait for the connection callback to finish */ while (g_connections_complete == connections_complete_before) { grpc_pollset_worker *worker = NULL; gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC); gpr_timespec polling_deadline = test_deadline(); switch (grpc_timer_check(&exec_ctx, now, &polling_deadline)) { case GRPC_TIMERS_FIRED: break; case GRPC_TIMERS_NOT_CHECKED: polling_deadline = now; /* fall through */ case GRPC_TIMERS_CHECKED_AND_EMPTY: GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline))); break; } gpr_mu_unlock(g_mu); grpc_exec_ctx_flush(&exec_ctx); gpr_mu_lock(g_mu); } gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); }
grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, gpr_timespec deadline, void *reserved) { grpc_event ret; grpc_pollset_worker worker; int first_loop = 1; gpr_timespec now; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_ASSERT(!reserved); deadline = gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC); GRPC_CQ_INTERNAL_REF(cc, "next"); gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); for (;;) { if (cc->completed_tail != &cc->completed_head) { grpc_cq_completion *c = (grpc_cq_completion *)cc->completed_head.next; cc->completed_head.next = c->next & ~(gpr_uintptr)1; if (c == cc->completed_tail) { cc->completed_tail = &cc->completed_head; } gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); ret.type = GRPC_OP_COMPLETE; ret.success = c->next & 1u; ret.tag = c->tag; c->done(&exec_ctx, c->done_arg, c); break; } if (cc->shutdown) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_SHUTDOWN; break; } now = gpr_now(GPR_CLOCK_MONOTONIC); if (!first_loop && gpr_time_cmp(now, deadline) >= 0) { gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); memset(&ret, 0, sizeof(ret)); ret.type = GRPC_QUEUE_TIMEOUT; break; } first_loop = 0; grpc_pollset_work(&exec_ctx, &cc->pollset, &worker, now, deadline); } GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); GRPC_CQ_INTERNAL_UNREF(cc, "next"); grpc_exec_ctx_finish(&exec_ctx); return ret; }
static void test_threading_loop(void *arg) { threading_shared *shared = arg; while (thread_wakeups < 1000000) { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker; gpr_mu_lock(shared->mu); GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, shared->pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)))); gpr_mu_unlock(shared->mu); grpc_exec_ctx_finish(&exec_ctx); } }
/* Write to a socket until it fills up, then read from it using the grpc_tcp API. */ static void large_read_test(size_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; ssize_t written_bytes; gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size); create_sockets(sv); grpc_resource_quota *resource_quota = grpc_resource_quota_create("large_read_test"); ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota, slice_size, "test"); grpc_resource_quota_unref_internal(&exec_ctx, resource_quota); grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset); written_bytes = fill_socket(sv[0]); gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = (size_t)written_bytes; grpc_slice_buffer_init(&state.incoming); grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx); grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb); gpr_mu_lock(g_mu); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(g_mu); grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming); grpc_endpoint_destroy(&exec_ctx, ep); grpc_exec_ctx_finish(&exec_ctx); }
/* Wait for the signal to shutdown a client. */ static void client_wait_and_shutdown(client *cl) { gpr_mu_lock(g_mu); while (!cl->done) { grpc_pollset_worker *worker = NULL; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } gpr_mu_unlock(g_mu); }
/* Write to a socket until it fills up, then read from it using the grpc_tcp API. */ static void large_read_test(ssize_t slice_size) { int sv[2]; grpc_endpoint *ep; struct read_socket_state state; ssize_t written_bytes; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); gpr_log(GPR_INFO, "Start large read test, slice size %d", slice_size); create_sockets(sv); ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), slice_size, "test"); grpc_endpoint_add_to_pollset(ep, &g_pollset); written_bytes = fill_socket(sv[0]); gpr_log(GPR_INFO, "Wrote %d bytes", written_bytes); state.ep = ep; state.read_bytes = 0; state.target_read_bytes = written_bytes; gpr_slice_buffer_init(&state.incoming); grpc_iomgr_closure_init(&state.read_cb, read_cb, &state); switch (grpc_endpoint_read(ep, &state.incoming, &state.read_cb)) { case GRPC_ENDPOINT_DONE: read_cb(&state, 1); break; case GRPC_ENDPOINT_ERROR: read_cb(&state, 0); break; case GRPC_ENDPOINT_PENDING: break; } gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); while (state.read_bytes < state.target_read_bytes) { grpc_pollset_worker worker; grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline); } GPR_ASSERT(state.read_bytes == state.target_read_bytes); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); gpr_slice_buffer_destroy(&state.incoming); grpc_endpoint_destroy(ep); }