// Writes the given number of records of random size (up to kMaxRecordSize) and // random data to the specified log. static void writer_thread(void* arg) { writer_thread_args* args = (writer_thread_args*)arg; // Maximum number of times to spin between writes. static const int MAX_SPIN_COUNT = 50; int records_written = 0; if (VERBOSE) { printf(" Writer %d starting\n", args->index); } while (records_written < args->num_records) { records_written += write_records_to_log(args->index, args->record_size, args->num_records - records_written, MAX_SPIN_COUNT); if (records_written < args->num_records) { // Ran out of log space. Sleep for a bit and let the reader catch up. // This should never happen for circular logs. if (VERBOSE) { printf( " Writer %d stalled due to out-of-space: %d out of %d " "written\n", args->index, records_written, args->num_records); } gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); } } // Done. Decrement count and signal. gpr_mu_lock(args->mu); (*args->count)--; gpr_cv_signal(args->done); if (VERBOSE) { printf(" Writer %d done\n", args->index); } gpr_mu_unlock(args->mu); }
void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) { unsigned char *buf = malloc(read_size); ssize_t bytes_read; size_t bytes_left = num_bytes; int flags; int current = 0; int i; flags = fcntl(fd, F_GETFL, 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0); for (;;) { grpc_pollset_worker worker; gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); do { bytes_read = read(fd, buf, bytes_left > read_size ? read_size : bytes_left); } while (bytes_read < 0 && errno == EINTR); GPR_ASSERT(bytes_read >= 0); for (i = 0; i < bytes_read; ++i) { GPR_ASSERT(buf[i] == current); current = (current + 1) % 256; } bytes_left -= bytes_read; if (bytes_left == 0) break; } flags = fcntl(fd, F_GETFL, 0); GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0); gpr_free(buf); }
static void verifier(grpc_server *server, grpc_completion_queue *cq) { while (grpc_server_has_open_connections(server)) { GPR_ASSERT(grpc_completion_queue_next(cq, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(20), NULL).type == GRPC_QUEUE_TIMEOUT); } }
/* Writes the given number of records of random size (up to kMaxRecordSize) and random data to the specified log. */ static void writer_thread(void *arg) { writer_thread_args *args = (writer_thread_args *)arg; /* Maximum number of times to spin between writes. */ static const int32_t MAX_SPIN_COUNT = 50; int records_written = 0; printf(" Writer: %d\n", args->index); while (records_written < args->num_records) { records_written += write_records_to_log(args->index, args->record_size, args->num_records - records_written, MAX_SPIN_COUNT); if (records_written < args->num_records) { /* Ran out of log space. Sleep for a bit and let the reader catch up. This should never happen for circular logs. */ printf(" Writer stalled due to out-of-space: %d out of %d written\n", records_written, args->num_records); gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); } } /* Done. Decrement count and signal. */ gpr_mu_lock(args->mu); (*args->count)--; gpr_cv_broadcast(args->done); printf(" Writer done: %d\n", args->index); gpr_mu_unlock(args->mu); }
void create_loop_destroy(void *addr) { for (int i = 0; i < NUM_OUTER_LOOPS; ++i) { grpc_completion_queue *cq = grpc_completion_queue_create(NULL); grpc_channel *chan = grpc_insecure_channel_create((char *)addr, NULL, NULL); for (int j = 0; j < NUM_INNER_LOOPS; ++j) { gpr_timespec later_time = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(DELAY_MILLIS); grpc_connectivity_state state = grpc_channel_check_connectivity_state(chan, 1); grpc_channel_watch_connectivity_state(chan, state, later_time, cq, NULL); gpr_timespec poll_time = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(POLL_MILLIS); GPR_ASSERT(grpc_completion_queue_next(cq, poll_time, NULL).type == GRPC_OP_COMPLETE); } grpc_channel_destroy(chan); grpc_completion_queue_destroy(cq); } }
void test_times_out(void) { struct sockaddr_in addr; socklen_t addr_len = sizeof(addr); int svr_fd; #define NUM_CLIENT_CONNECTS 10 int client_fd[NUM_CLIENT_CONNECTS]; int i; int r; gpr_event ev; gpr_timespec connect_deadline; gpr_event_init(&ev); memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; /* create a dummy server */ svr_fd = socket(AF_INET, SOCK_STREAM, 0); GPR_ASSERT(svr_fd >= 0); GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len)); GPR_ASSERT(0 == listen(svr_fd, 1)); /* Get its address */ GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0); /* tie up the listen buffer, which is somewhat arbitrarily sized. */ for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) { client_fd[i] = socket(AF_INET, SOCK_STREAM, 0); grpc_set_socket_nonblocking(client_fd[i], 1); do { r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len); } while (r == -1 && errno == EINTR); GPR_ASSERT(r < 0); GPR_ASSERT(errno == EWOULDBLOCK || errno == EINPROGRESS); } /* connect to dummy server address */ connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); grpc_tcp_client_connect(must_fail, &ev, (struct sockaddr *)&addr, addr_len, connect_deadline); /* Make sure the event doesn't trigger early */ GPR_ASSERT(!gpr_event_wait(&ev, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(500))); /* Now wait until it should have triggered */ sleep(1); /* wait for the connection callback to finish */ GPR_ASSERT(gpr_event_wait(&ev, test_deadline())); close(svr_fd); for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) { close(client_fd[i]); } }
static void test_too_many_plucks(void) { grpc_event ev; grpc_completion_queue *cc; void *tags[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS]; grpc_cq_completion completions[GPR_ARRAY_SIZE(tags)]; gpr_thd_id thread_ids[GPR_ARRAY_SIZE(tags)]; struct thread_state thread_states[GPR_ARRAY_SIZE(tags)]; gpr_thd_options thread_options = gpr_thd_options_default(); grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; unsigned i, j; LOG_TEST("test_too_many_plucks"); cc = grpc_completion_queue_create(NULL); gpr_thd_options_set_joinable(&thread_options); for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { tags[i] = create_test_tag(); for (j = 0; j < i; j++) { GPR_ASSERT(tags[i] != tags[j]); } thread_states[i].cc = cc; thread_states[i].tag = tags[i]; gpr_thd_new(thread_ids + i, pluck_one, thread_states + i, &thread_options); } /* wait until all other threads are plucking */ gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(1000)); ev = grpc_completion_queue_pluck(cc, create_test_tag(), gpr_inf_future(GPR_CLOCK_REALTIME), NULL); GPR_ASSERT(ev.type == GRPC_QUEUE_TIMEOUT); for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { grpc_cq_begin_op(cc, tags[i]); grpc_cq_end_op(&exec_ctx, cc, tags[i], GRPC_ERROR_NONE, do_nothing_end_completion, NULL, &completions[i]); } for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) { gpr_thd_join(thread_ids[i]); } shutdown_and_destroy(cc); grpc_exec_ctx_finish(&exec_ctx); }
static void test_invoke_request_with_flags( grpc_end2end_test_config config, uint32_t *flags_for_op, grpc_call_error call_start_batch_expected_result) { grpc_call *c; gpr_slice request_payload_slice = gpr_slice_from_copied_string("hello world"); grpc_byte_buffer *request_payload = grpc_raw_byte_buffer_create(&request_payload_slice, 1); gpr_timespec deadline = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10); grpc_end2end_test_fixture f = begin_test(config, "test_invoke_request_with_flags", NULL, NULL); cq_verifier *cqv = cq_verifier_create(f.cq); grpc_op ops[6]; grpc_op *op; grpc_metadata_array initial_metadata_recv; grpc_metadata_array trailing_metadata_recv; grpc_metadata_array request_metadata_recv; grpc_byte_buffer *request_payload_recv = NULL; grpc_call_details call_details; grpc_status_code status; grpc_call_error error; char *details = NULL; size_t details_capacity = 0; grpc_call_error expectation; c = grpc_channel_create_call(f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, "/foo", "foo.test.google.fr", deadline, NULL); GPR_ASSERT(c); grpc_metadata_array_init(&initial_metadata_recv); grpc_metadata_array_init(&trailing_metadata_recv); grpc_metadata_array_init(&request_metadata_recv); grpc_call_details_init(&call_details); op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op->flags = flags_for_op[op->op]; op->reserved = NULL; op++; op->op = GRPC_OP_SEND_MESSAGE; op->data.send_message = request_payload; op->flags = flags_for_op[op->op]; op->reserved = NULL; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; op->flags = flags_for_op[op->op]; op->reserved = NULL; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &initial_metadata_recv; op->flags = flags_for_op[op->op]; op->reserved = NULL; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv; op->data.recv_status_on_client.status = &status; op->data.recv_status_on_client.status_details = &details; op->data.recv_status_on_client.status_details_capacity = &details_capacity; op->flags = flags_for_op[op->op]; op->reserved = NULL; op++; expectation = call_start_batch_expected_result; error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL); GPR_ASSERT(expectation == error); if (expectation == GRPC_CALL_OK) { cq_expect_completion(cqv, tag(1), 1); cq_verify(cqv); } gpr_free(details); grpc_metadata_array_destroy(&initial_metadata_recv); grpc_metadata_array_destroy(&trailing_metadata_recv); grpc_metadata_array_destroy(&request_metadata_recv); grpc_call_details_destroy(&call_details); grpc_call_destroy(c); cq_verifier_destroy(cqv); grpc_byte_buffer_destroy(request_payload); grpc_byte_buffer_destroy(request_payload_recv); end_test(&f); config.tear_down_data(&f); }
/** Returns connection sequence (server indices), which must be freed */ static int *perform_request(servers_fixture *f, grpc_channel *client, request_data *rdata, const test_spec *spec) { grpc_call *c; int s_idx; int *s_valid; grpc_op ops[6]; grpc_op *op; int was_cancelled; size_t i, iter_num; grpc_event ev; int read_tag; int *connection_sequence; int completed_client; s_valid = gpr_malloc(sizeof(int) * f->num_servers); connection_sequence = gpr_malloc(sizeof(int) * spec->num_iters); for (iter_num = 0; iter_num < spec->num_iters; iter_num++) { cq_verifier *cqv = cq_verifier_create(f->cq); rdata->details = NULL; rdata->details_capacity = 0; was_cancelled = 2; for (i = 0; i < f->num_servers; i++) { if (spec->kill_at[iter_num][i] != 0) { kill_server(f, i); } else if (spec->revive_at[iter_num][i] != 0) { /* killing takes precedence */ revive_server(f, rdata, i); } } connection_sequence[iter_num] = -1; grpc_metadata_array_init(&rdata->initial_metadata_recv); grpc_metadata_array_init(&rdata->trailing_metadata_recv); for (i = 0; i < f->num_servers; i++) { grpc_call_details_init(&rdata->call_details[i]); } memset(s_valid, 0, f->num_servers * sizeof(int)); c = grpc_channel_create_call(client, NULL, GRPC_PROPAGATE_DEFAULTS, f->cq, "/foo", "foo.test.google.fr", gpr_inf_future(GPR_CLOCK_REALTIME), NULL); GPR_ASSERT(c); completed_client = 0; op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op->flags = 0; op->reserved = NULL; op++; op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT; op->flags = 0; op->reserved = NULL; op++; op->op = GRPC_OP_RECV_INITIAL_METADATA; op->data.recv_initial_metadata = &rdata->initial_metadata_recv; op->flags = 0; op->reserved = NULL; op++; op->op = GRPC_OP_RECV_STATUS_ON_CLIENT; op->data.recv_status_on_client.trailing_metadata = &rdata->trailing_metadata_recv; op->data.recv_status_on_client.status = &rdata->status; op->data.recv_status_on_client.status_details = &rdata->details; op->data.recv_status_on_client.status_details_capacity = &rdata->details_capacity; op->flags = 0; op->reserved = NULL; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL)); s_idx = -1; while ((ev = grpc_completion_queue_next( f->cq, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1), NULL)) .type != GRPC_QUEUE_TIMEOUT) { GPR_ASSERT(ev.type == GRPC_OP_COMPLETE); read_tag = ((int)(gpr_intptr)ev.tag); gpr_log(GPR_DEBUG, "EVENT: success:%d, type:%d, tag:%d iter:%d", ev.success, ev.type, read_tag, iter_num); if (ev.success && read_tag >= 1000) { GPR_ASSERT(s_idx == -1); /* only one server must reply */ /* only server notifications for non-shutdown events */ s_idx = read_tag - 1000; s_valid[s_idx] = 1; connection_sequence[iter_num] = s_idx; break; } else if (read_tag == 1) { gpr_log(GPR_DEBUG, "client timed out"); GPR_ASSERT(ev.success); completed_client = 1; } } if (s_idx >= 0) { op = ops; op->op = GRPC_OP_SEND_INITIAL_METADATA; op->data.send_initial_metadata.count = 0; op->flags = 0; op->reserved = NULL; op++; op->op = GRPC_OP_SEND_STATUS_FROM_SERVER; op->data.send_status_from_server.trailing_metadata_count = 0; op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED; op->data.send_status_from_server.status_details = "xyz"; op->flags = 0; op->reserved = NULL; op++; op->op = GRPC_OP_RECV_CLOSE_ON_SERVER; op->data.recv_close_on_server.cancelled = &was_cancelled; op->flags = 0; op->reserved = NULL; op++; GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(f->server_calls[s_idx], ops, (size_t)(op - ops), tag(102), NULL)); cq_expect_completion(cqv, tag(102), 1); if (!completed_client) { cq_expect_completion(cqv, tag(1), 1); } cq_verify(cqv); gpr_log(GPR_DEBUG, "status=%d; %s", rdata->status, rdata->details); GPR_ASSERT(rdata->status == GRPC_STATUS_UNIMPLEMENTED); GPR_ASSERT(0 == strcmp(rdata->details, "xyz")); GPR_ASSERT(0 == strcmp(rdata->call_details[s_idx].method, "/foo")); GPR_ASSERT(0 == strcmp(rdata->call_details[s_idx].host, "foo.test.google.fr")); GPR_ASSERT(was_cancelled == 1); grpc_call_destroy(f->server_calls[s_idx]); /* ask for the next request on this server */ GPR_ASSERT(GRPC_CALL_OK == grpc_server_request_call( f->servers[s_idx], &f->server_calls[s_idx], &rdata->call_details[s_idx], &f->request_metadata_recv[s_idx], f->cq, f->cq, tag(1000 + (int)s_idx))); } else { /* no response from server */ grpc_call_cancel(c, NULL); if (!completed_client) { cq_expect_completion(cqv, tag(1), 1); cq_verify(cqv); } } GPR_ASSERT(grpc_completion_queue_next( f->cq, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(200), NULL).type == GRPC_QUEUE_TIMEOUT); grpc_metadata_array_destroy(&rdata->initial_metadata_recv); grpc_metadata_array_destroy(&rdata->trailing_metadata_recv); cq_verifier_destroy(cqv); grpc_call_destroy(c); for (i = 0; i < f->num_servers; i++) { grpc_call_details_destroy(&rdata->call_details[i]); } gpr_free(rdata->details); } gpr_free(s_valid); return connection_sequence; }
static void test_connectivity(grpc_end2end_test_config config) { grpc_end2end_test_fixture f = config.create_fixture(NULL, NULL); grpc_connectivity_state state; cq_verifier *cqv = cq_verifier_create(f.cq); child_events ce; gpr_thd_options thdopt = gpr_thd_options_default(); gpr_thd_id thdid; config.init_client(&f, NULL); ce.channel = f.client; ce.cq = f.cq; gpr_event_init(&ce.started); gpr_thd_options_set_joinable(&thdopt); GPR_ASSERT(gpr_thd_new(&thdid, child_thread, &ce, &thdopt)); gpr_event_wait(&ce.started, gpr_inf_future(GPR_CLOCK_MONOTONIC)); /* channels should start life in IDLE, and stay there */ GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 0) == GRPC_CHANNEL_IDLE); gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100)); GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 0) == GRPC_CHANNEL_IDLE); /* start watching for a change */ gpr_log(GPR_DEBUG, "watching"); grpc_channel_watch_connectivity_state( f.client, GRPC_CHANNEL_IDLE, gpr_now(GPR_CLOCK_MONOTONIC), f.cq, tag(1)); /* eventually the child thread completion should trigger */ gpr_thd_join(thdid); /* check that we're still in idle, and start connecting */ GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 1) == GRPC_CHANNEL_IDLE); /* start watching for a change */ grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_IDLE, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(2)); /* and now the watch should trigger */ cq_expect_completion(cqv, tag(2), 1); cq_verify(cqv); state = grpc_channel_check_connectivity_state(f.client, 0); GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE || state == GRPC_CHANNEL_CONNECTING); /* quickly followed by a transition to TRANSIENT_FAILURE */ grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_CONNECTING, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(3)); cq_expect_completion(cqv, tag(3), 1); cq_verify(cqv); state = grpc_channel_check_connectivity_state(f.client, 0); GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE || state == GRPC_CHANNEL_CONNECTING); gpr_log(GPR_DEBUG, "*** STARTING SERVER ***"); /* now let's bring up a server to connect to */ config.init_server(&f, NULL); gpr_log(GPR_DEBUG, "*** STARTED SERVER ***"); /* we'll go through some set of transitions (some might be missed), until READY is reached */ while (state != GRPC_CHANNEL_READY) { grpc_channel_watch_connectivity_state( f.client, state, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(4)); cq_expect_completion(cqv, tag(4), 1); cq_verify(cqv); state = grpc_channel_check_connectivity_state(f.client, 0); GPR_ASSERT(state == GRPC_CHANNEL_READY || state == GRPC_CHANNEL_CONNECTING || state == GRPC_CHANNEL_TRANSIENT_FAILURE); } /* bring down the server again */ /* we should go immediately to TRANSIENT_FAILURE */ gpr_log(GPR_DEBUG, "*** SHUTTING DOWN SERVER ***"); grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_READY, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(5)); grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead)); cq_expect_completion(cqv, tag(5), 1); cq_expect_completion(cqv, tag(0xdead), 1); cq_verify(cqv); state = grpc_channel_check_connectivity_state(f.client, 0); GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE || state == GRPC_CHANNEL_CONNECTING || state == GRPC_CHANNEL_IDLE); /* cleanup server */ grpc_server_destroy(f.server); gpr_log(GPR_DEBUG, "*** SHUTDOWN SERVER ***"); grpc_channel_destroy(f.client); grpc_completion_queue_shutdown(f.cq); grpc_completion_queue_destroy(f.cq); config.tear_down_data(&f); cq_verifier_destroy(cqv); }
static gpr_timespec ms_from_now(int ms) { return GRPC_TIMEOUT_MILLIS_TO_DEADLINE(ms); }
void test_times_out(void) { struct sockaddr_in addr; socklen_t addr_len = sizeof(addr); int svr_fd; #define NUM_CLIENT_CONNECTS 100 int client_fd[NUM_CLIENT_CONNECTS]; int i; int r; int connections_complete_before; gpr_timespec connect_deadline; grpc_closure done; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; gpr_log(GPR_DEBUG, "test_times_out"); memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; /* create a dummy server */ svr_fd = socket(AF_INET, SOCK_STREAM, 0); GPR_ASSERT(svr_fd >= 0); GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len)); GPR_ASSERT(0 == listen(svr_fd, 1)); /* Get its address */ GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0); /* tie up the listen buffer, which is somewhat arbitrarily sized. */ for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) { client_fd[i] = socket(AF_INET, SOCK_STREAM, 0); grpc_set_socket_nonblocking(client_fd[i], 1); do { r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len); } while (r == -1 && errno == EINTR); GPR_ASSERT(r < 0); GPR_ASSERT(errno == EWOULDBLOCK || errno == EINPROGRESS); } /* connect to dummy server address */ connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); connections_complete_before = g_connections_complete; gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_closure_init(&done, must_fail, NULL); grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, &g_pollset_set, (struct sockaddr *)&addr, addr_len, connect_deadline); /* Make sure the event doesn't trigger early */ gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); for (;;) { grpc_pollset_worker worker; gpr_timespec now = gpr_now(connect_deadline.clock_type); gpr_timespec continue_verifying_time = gpr_time_from_seconds(5, GPR_TIMESPAN); gpr_timespec grace_time = gpr_time_from_seconds(3, GPR_TIMESPAN); gpr_timespec finish_time = gpr_time_add(connect_deadline, continue_verifying_time); gpr_timespec restart_verifying_time = gpr_time_add(connect_deadline, grace_time); int is_after_deadline = gpr_time_cmp(now, connect_deadline) > 0; if (gpr_time_cmp(now, finish_time) > 0) { break; } gpr_log(GPR_DEBUG, "now=%lld.%09d connect_deadline=%lld.%09d", (long long)now.tv_sec, (int)now.tv_nsec, (long long)connect_deadline.tv_sec, (int)connect_deadline.tv_nsec); if (is_after_deadline && gpr_time_cmp(now, restart_verifying_time) <= 0) { /* allow some slack before insisting that things be done */ } else { GPR_ASSERT(g_connections_complete == connections_complete_before + is_after_deadline); } grpc_pollset_work(&exec_ctx, &g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10)); gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset)); } gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset)); close(svr_fd); for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) { close(client_fd[i]); } }
/* Test grpc_alarm add and cancel. */ static void test_grpc_alarm(void) { grpc_alarm alarm; grpc_alarm alarm_to_cancel; /* Timeout on the alarm cond. var, so make big enough to absorb time deviations. Otherwise, operations after wait will not be properly ordered */ gpr_timespec alarm_deadline; gpr_timespec followup_deadline; alarm_arg arg; alarm_arg arg2; void *fdone; grpc_iomgr_init(); arg.counter = 0; arg.success = SUCCESS_NOT_SET; arg.done_success_ctr = 0; arg.done_cancel_ctr = 0; arg.done = 0; gpr_mu_init(&arg.mu); gpr_cv_init(&arg.cv); gpr_event_init(&arg.fcb_arg); grpc_alarm_init(&alarm, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100), alarm_cb, &arg, gpr_now()); alarm_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); gpr_mu_lock(&arg.mu); while (arg.done == 0) { if (gpr_cv_wait(&arg.cv, &arg.mu, alarm_deadline)) { gpr_log(GPR_ERROR, "alarm deadline exceeded"); break; } } gpr_mu_unlock(&arg.mu); followup_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); fdone = gpr_event_wait(&arg.fcb_arg, followup_deadline); if (arg.counter != 1) { gpr_log(GPR_ERROR, "Alarm callback not called"); GPR_ASSERT(0); } else if (arg.done_success_ctr != 1) { gpr_log(GPR_ERROR, "Alarm done callback not called with success"); GPR_ASSERT(0); } else if (arg.done_cancel_ctr != 0) { gpr_log(GPR_ERROR, "Alarm done callback called with cancel"); GPR_ASSERT(0); } else if (arg.success == SUCCESS_NOT_SET) { gpr_log(GPR_ERROR, "Alarm callback without status"); GPR_ASSERT(0); } else { gpr_log(GPR_INFO, "Alarm callback called successfully"); } if (fdone != (void *)&arg.fcb_arg) { gpr_log(GPR_ERROR, "Followup callback #1 not invoked properly %p %p", fdone, &arg.fcb_arg); GPR_ASSERT(0); } gpr_cv_destroy(&arg.cv); gpr_mu_destroy(&arg.mu); arg2.counter = 0; arg2.success = SUCCESS_NOT_SET; arg2.done_success_ctr = 0; arg2.done_cancel_ctr = 0; arg2.done = 0; gpr_mu_init(&arg2.mu); gpr_cv_init(&arg2.cv); gpr_event_init(&arg2.fcb_arg); grpc_alarm_init(&alarm_to_cancel, GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100), alarm_cb, &arg2, gpr_now()); grpc_alarm_cancel(&alarm_to_cancel); alarm_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1); gpr_mu_lock(&arg2.mu); while (arg2.done == 0) { gpr_cv_wait(&arg2.cv, &arg2.mu, alarm_deadline); } gpr_mu_unlock(&arg2.mu); gpr_log(GPR_INFO, "alarm done = %d", arg2.done); followup_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(5); fdone = gpr_event_wait(&arg2.fcb_arg, followup_deadline); if (arg2.counter != arg2.done_success_ctr) { gpr_log(GPR_ERROR, "Alarm callback called but didn't lead to done success"); GPR_ASSERT(0); } else if (arg2.done_success_ctr && arg2.done_cancel_ctr) { gpr_log(GPR_ERROR, "Alarm done callback called with success and cancel"); GPR_ASSERT(0); } else if (arg2.done_cancel_ctr + arg2.done_success_ctr != 1) { gpr_log(GPR_ERROR, "Alarm done callback called incorrect number of times"); GPR_ASSERT(0); } else if (arg2.success == SUCCESS_NOT_SET) { gpr_log(GPR_ERROR, "Alarm callback without status"); GPR_ASSERT(0); } else if (arg2.done_success_ctr) { gpr_log(GPR_INFO, "Alarm callback executed before cancel"); gpr_log(GPR_INFO, "Current value of triggered is %d\n", alarm_to_cancel.triggered); } else if (arg2.done_cancel_ctr) { gpr_log(GPR_INFO, "Alarm callback canceled"); gpr_log(GPR_INFO, "Current value of triggered is %d\n", alarm_to_cancel.triggered); } else { gpr_log(GPR_ERROR, "Alarm cancel test should not be here"); GPR_ASSERT(0); } if (fdone != (void *)&arg2.fcb_arg) { gpr_log(GPR_ERROR, "Followup callback #2 not invoked properly %p %p", fdone, &arg2.fcb_arg); GPR_ASSERT(0); } gpr_cv_destroy(&arg2.cv); gpr_mu_destroy(&arg2.mu); grpc_iomgr_shutdown(); }