static void read_and_write_test_read_handler(void *data, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status error) { struct read_and_write_test_state *state = data; GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR); if (error == GRPC_ENDPOINT_CB_SHUTDOWN) { gpr_log(GPR_INFO, "Read handler shutdown"); gpr_mu_lock(&state->mu); state->read_done = 1; gpr_cv_signal(&state->cv); gpr_mu_unlock(&state->mu); return; } state->bytes_read += count_and_unref_slices(slices, nslices, &state->current_read_data); if (state->bytes_read == state->target_bytes) { gpr_log(GPR_INFO, "Read handler done"); gpr_mu_lock(&state->mu); state->read_done = 1; gpr_cv_signal(&state->cv); gpr_mu_unlock(&state->mu); } else { grpc_endpoint_notify_on_read(state->read_ep, read_and_write_test_read_handler, data); } }
static void on_read(void *user_data, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status status) { internal_request *req = user_data; size_t i; for (i = 0; i < nslices; i++) { if (GPR_SLICE_LENGTH(slices[i])) { req->have_read_byte = 1; if (!grpc_httpcli_parser_parse(&req->parser, slices[i])) { finish(req, 0); goto done; } } } switch (status) { case GRPC_ENDPOINT_CB_OK: grpc_endpoint_notify_on_read(req->ep, on_read, req); break; case GRPC_ENDPOINT_CB_EOF: case GRPC_ENDPOINT_CB_ERROR: case GRPC_ENDPOINT_CB_SHUTDOWN: if (!req->have_read_byte) { next_address(req); } else { finish(req, grpc_httpcli_parser_eof(&req->parser)); } break; } done: for (i = 0; i < nslices; i++) { gpr_slice_unref(slices[i]); } }
static void shutdown_during_write_test(grpc_endpoint_test_config config, size_t slice_size) { /* test that shutdown with a pending write creates no leaks */ gpr_timespec deadline; size_t size; size_t nblocks; int current_data = 1; shutdown_during_write_test_state read_st; shutdown_during_write_test_state write_st; gpr_slice *slices; grpc_endpoint_test_fixture f = begin_test(config, "shutdown_during_write_test", slice_size); gpr_log(GPR_INFO, "testing shutdown during a write"); read_st.ep = f.client_ep; write_st.ep = f.server_ep; read_st.done = 0; write_st.done = 0; grpc_endpoint_notify_on_read( read_st.ep, shutdown_during_write_test_read_handler, &read_st); for (size = 1;; size *= 2) { slices = allocate_blocks(size, 1, &nblocks, ¤t_data); switch (grpc_endpoint_write(write_st.ep, slices, nblocks, shutdown_during_write_test_write_handler, &write_st)) { case GRPC_ENDPOINT_WRITE_DONE: break; case GRPC_ENDPOINT_WRITE_ERROR: gpr_log(GPR_ERROR, "error writing"); abort(); case GRPC_ENDPOINT_WRITE_PENDING: grpc_endpoint_shutdown(write_st.ep); deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!write_st.done) { GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0); grpc_pollset_work(g_pollset, deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); grpc_endpoint_destroy(write_st.ep); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!read_st.done) { GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0); grpc_pollset_work(g_pollset, deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); gpr_free(slices); end_test(config); return; } gpr_free(slices); } gpr_log(GPR_ERROR, "should never reach here"); abort(); }
/* Do both reading and writing using the grpc_endpoint API. This also includes a test of the shutdown behavior. */ static void read_and_write_test(grpc_endpoint_test_config config, size_t num_bytes, size_t write_size, size_t slice_size, int shutdown) { struct read_and_write_test_state state; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_endpoint_test_fixture f = begin_test(config, "read_and_write_test", slice_size); if (shutdown) { gpr_log(GPR_INFO, "Start read and write shutdown test"); } else { gpr_log(GPR_INFO, "Start read and write test with %d bytes, slice size %d", num_bytes, slice_size); } state.read_ep = f.client_ep; state.write_ep = f.server_ep; state.target_bytes = num_bytes; state.bytes_read = 0; state.current_write_size = write_size; state.bytes_written = 0; state.read_done = 0; state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; /* Get started by pretending an initial write completed */ /* NOTE: Sets up initial conditions so we can have the same write handler for the first iteration as for later iterations. It does the right thing even when bytes_written is unsigned. */ state.bytes_written -= state.current_write_size; read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK); grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler, &state); if (shutdown) { gpr_log(GPR_DEBUG, "shutdown read"); grpc_endpoint_shutdown(state.read_ep); gpr_log(GPR_DEBUG, "shutdown write"); grpc_endpoint_shutdown(state.write_ep); } gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); while (!state.read_done || !state.write_done) { GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0); grpc_pollset_work(g_pollset, deadline); } gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); end_test(config); }
static void endpoint_notify_on_read(grpc_endpoint *secure_ep, grpc_endpoint_read_cb cb, void *user_data) { secure_endpoint *ep = (secure_endpoint *)secure_ep; ep->read_cb = cb; ep->read_user_data = user_data; secure_endpoint_ref(ep); if (ep->leftover_bytes.count) { size_t leftover_nslices = ep->leftover_bytes.count; ep->leftover_bytes.count = 0; on_read(ep, ep->leftover_bytes.slices, leftover_nslices, GRPC_ENDPOINT_CB_OK); return; } grpc_endpoint_notify_on_read(ep->wrapped_ep, on_read, ep); }
static void shutdown_during_write_test_read_handler( void *user_data, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status error) { size_t i; shutdown_during_write_test_state *st = user_data; for (i = 0; i < nslices; i++) { gpr_slice_unref(slices[i]); } if (error != GRPC_ENDPOINT_CB_OK) { grpc_endpoint_destroy(st->ep); gpr_event_set(&st->ev, (void *)(gpr_intptr) error); } else { grpc_endpoint_notify_on_read( st->ep, shutdown_during_write_test_read_handler, user_data); } }
/* If setup is NULL, the setup is done. */ static void on_handshake_data_sent_to_peer(void *setup, grpc_endpoint_cb_status error) { grpc_secure_transport_setup *s = setup; /* Make sure that write is OK. */ if (error != GRPC_ENDPOINT_CB_OK) { gpr_log(GPR_ERROR, "Write failed with error %d.", error); if (setup != NULL) secure_transport_setup_done(s, 0); return; } /* We may be done. */ if (tsi_handshaker_is_in_progress(s->handshaker)) { /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ grpc_endpoint_notify_on_read(s->endpoint, on_handshake_data_received_from_peer, setup); } else { check_peer(s); } }
static void shutdown_during_write_test_read_handler( void *user_data, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status error) { size_t i; shutdown_during_write_test_state *st = user_data; for (i = 0; i < nslices; i++) { gpr_slice_unref(slices[i]); } if (error != GRPC_ENDPOINT_CB_OK) { grpc_endpoint_destroy(st->ep); gpr_mu_lock(GRPC_POLLSET_MU(g_pollset)); st->done = error; grpc_pollset_kick(g_pollset); gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset)); } else { grpc_endpoint_notify_on_read( st->ep, shutdown_during_write_test_read_handler, user_data); } }
static void reading_action(void *pt, int iomgr_success_ignored) { grpc_chttp2_transport *t = pt; grpc_endpoint_notify_on_read(t->ep, recv_data, t); }
static void on_written(internal_request *req) { gpr_log(GPR_DEBUG, "%s", __FUNCTION__); grpc_endpoint_notify_on_read(req->ep, on_read, req); }
static void on_written(internal_request *req) { grpc_endpoint_notify_on_read(req->ep, on_read, req); }
/* Do both reading and writing using the grpc_endpoint API. This also includes a test of the shutdown behavior. */ static void read_and_write_test(grpc_endpoint_test_config config, size_t num_bytes, size_t write_size, size_t slice_size, int shutdown) { struct read_and_write_test_state state; gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20); grpc_endpoint_test_fixture f = begin_test(config, __FUNCTION__, slice_size); if (shutdown) { gpr_log(GPR_INFO, "Start read and write shutdown test"); } else { gpr_log(GPR_INFO, "Start read and write test with %d bytes, slice size %d", num_bytes, slice_size); } gpr_mu_init(&state.mu); gpr_cv_init(&state.cv); state.read_ep = f.client_ep; state.write_ep = f.server_ep; state.target_bytes = num_bytes; state.bytes_read = 0; state.current_write_size = write_size; state.bytes_written = 0; state.read_done = 0; state.write_done = 0; state.current_read_data = 0; state.current_write_data = 0; /* Get started by pretending an initial write completed */ /* NOTE: Sets up initial conditions so we can have the same write handler for the first iteration as for later iterations. It does the right thing even when bytes_written is unsigned. */ state.bytes_written -= state.current_write_size; read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK); grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler, &state); if (shutdown) { gpr_log(GPR_DEBUG, "shutdown read"); grpc_endpoint_shutdown(state.read_ep); gpr_log(GPR_DEBUG, "shutdown write"); grpc_endpoint_shutdown(state.write_ep); } gpr_mu_lock(&state.mu); while (!state.read_done || !state.write_done) { if (gpr_cv_wait(&state.cv, &state.mu, deadline)) { gpr_log(GPR_ERROR, "timeout: read_done=%d, write_done=%d", state.read_done, state.write_done); abort(); } } gpr_mu_unlock(&state.mu); grpc_endpoint_destroy(state.read_ep); grpc_endpoint_destroy(state.write_ep); gpr_mu_destroy(&state.mu); gpr_cv_destroy(&state.cv); end_test(config); }
static void on_handshake_data_received_from_peer( void *setup, gpr_slice *slices, size_t nslices, grpc_endpoint_cb_status error) { grpc_secure_transport_setup *s = setup; size_t consumed_slice_size = 0; tsi_result result = TSI_OK; size_t i; size_t num_left_overs; int has_left_overs_in_current_slice = 0; if (error != GRPC_ENDPOINT_CB_OK) { gpr_log(GPR_ERROR, "Read failed."); cleanup_slices(slices, nslices); secure_transport_setup_done(s, 0); return; } for (i = 0; i < nslices; i++) { consumed_slice_size = GPR_SLICE_LENGTH(slices[i]); result = tsi_handshaker_process_bytes_from_peer( s->handshaker, GPR_SLICE_START_PTR(slices[i]), &consumed_slice_size); if (!tsi_handshaker_is_in_progress(s->handshaker)) break; } if (tsi_handshaker_is_in_progress(s->handshaker)) { /* We may need more data. */ if (result == TSI_INCOMPLETE_DATA) { /* TODO(klempner,jboeuf): This should probably use the client setup deadline */ grpc_endpoint_notify_on_read(s->endpoint, on_handshake_data_received_from_peer, setup); cleanup_slices(slices, nslices); return; } else { send_handshake_bytes_to_peer(s); cleanup_slices(slices, nslices); return; } } if (result != TSI_OK) { gpr_log(GPR_ERROR, "Handshake failed with error %s", tsi_result_to_string(result)); cleanup_slices(slices, nslices); secure_transport_setup_done(s, 0); return; } /* Handshake is done and successful this point. */ has_left_overs_in_current_slice = (consumed_slice_size < GPR_SLICE_LENGTH(slices[i])); num_left_overs = (has_left_overs_in_current_slice ? 1 : 0) + nslices - i - 1; if (num_left_overs == 0) { cleanup_slices(slices, nslices); check_peer(s); return; } cleanup_slices(slices, nslices - num_left_overs); /* Put the leftovers in our buffer (ownership transfered). */ if (has_left_overs_in_current_slice) { gpr_slice_buffer_add(&s->left_overs, gpr_slice_split_tail(&slices[i], consumed_slice_size)); gpr_slice_unref(slices[i]); /* split_tail above increments refcount. */ } gpr_slice_buffer_addn(&s->left_overs, &slices[i + 1], num_left_overs - has_left_overs_in_current_slice); check_peer(s); }