/* Start a client to send a stream of bytes. */ static void client_start(grpc_exec_ctx *exec_ctx, client *cl, int port) { int fd; struct sockaddr_in sin; create_test_socket(port, &fd, &sin); if (connect(fd, (struct sockaddr *)&sin, sizeof(sin)) == -1) { if (errno == EINPROGRESS) { struct pollfd pfd; pfd.fd = fd; pfd.events = POLLOUT; pfd.revents = 0; if (poll(&pfd, 1, -1) == -1) { gpr_log(GPR_ERROR, "poll() failed during connect; errno=%d", errno); abort(); } } else { gpr_log(GPR_ERROR, "Failed to connect to the server (errno=%d)", errno); abort(); } } cl->em_fd = grpc_fd_create(fd, "client"); grpc_pollset_add_fd(exec_ctx, g_pollset, cl->em_fd); client_session_write(exec_ctx, cl, GRPC_ERROR_NONE); }
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, grpc_pollset **pollsets, size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *on_accept_cb_arg) { size_t i, j; GPR_ASSERT(on_accept_cb); gpr_mu_lock(&s->mu); GPR_ASSERT(!s->on_accept_cb); GPR_ASSERT(s->active_ports == 0); s->on_accept_cb = on_accept_cb; s->on_accept_cb_arg = on_accept_cb_arg; s->pollsets = pollsets; s->pollset_count = pollset_count; for (i = 0; i < s->nports; i++) { for (j = 0; j < pollset_count; j++) { grpc_pollset_add_fd(exec_ctx, pollsets[j], s->ports[i].emfd); } s->ports[i].read_closure.cb = on_read; s->ports[i].read_closure.cb_arg = &s->ports[i]; grpc_fd_notify_on_read(exec_ctx, s->ports[i].emfd, &s->ports[i].read_closure); s->active_ports++; } gpr_mu_unlock(&s->mu); }
/* Called when a new TCP connection request arrives in the listening port. */ static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/ grpc_error *error) { server *sv = arg; int fd; int flags; session *se; struct sockaddr_storage ss; socklen_t slen = sizeof(ss); grpc_fd *listen_em_fd = sv->em_fd; if (error != GRPC_ERROR_NONE) { listen_shutdown_cb(exec_ctx, arg, 1); return; } fd = accept(grpc_fd_wrapped_fd(listen_em_fd), (struct sockaddr *)&ss, &slen); GPR_ASSERT(fd >= 0); GPR_ASSERT(fd < FD_SETSIZE); flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, flags | O_NONBLOCK); se = gpr_malloc(sizeof(*se)); se->sv = sv; se->em_fd = grpc_fd_create(fd, "listener"); grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd); GRPC_CLOSURE_INIT(&se->session_read_closure, session_read_cb, se, grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure); grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure); }
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, grpc_pollset **pollsets, size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *on_accept_cb_arg) { size_t i; grpc_tcp_listener *sp; GPR_ASSERT(on_accept_cb); gpr_mu_lock(&s->mu); GPR_ASSERT(!s->on_accept_cb); GPR_ASSERT(s->active_ports == 0); s->on_accept_cb = on_accept_cb; s->on_accept_cb_arg = on_accept_cb_arg; s->pollsets = pollsets; s->pollset_count = pollset_count; for (sp = s->head; sp; sp = sp->next) { for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); } sp->read_closure.cb = on_read; sp->read_closure.cb_arg = sp; grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); s->active_ports++; } gpr_mu_unlock(&s->mu); }
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s, grpc_pollset **pollsets, size_t pollset_count, void *user_data) { size_t i; gpr_mu_lock(&s->mu); grpc_udp_listener *sp; GPR_ASSERT(s->active_ports == 0); s->pollsets = pollsets; s->user_data = user_data; sp = s->head; while (sp != NULL) { for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); } GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp, grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp, grpc_schedule_on_exec_ctx); grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure); /* Registered for both read and write callbacks: increment active_ports * twice to account for this, and delay free-ing of memory until both * on_read and on_write have fired. */ s->active_ports += 2; sp = sp->next; } gpr_mu_unlock(&s->mu); }
void grpc_tcp_server_start(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s, grpc_pollset **pollsets, size_t pollset_count, grpc_tcp_server_cb on_accept_cb, void *on_accept_cb_arg) { size_t i; grpc_tcp_listener *sp; GPR_ASSERT(on_accept_cb); gpr_mu_lock(&s->mu); GPR_ASSERT(!s->on_accept_cb); GPR_ASSERT(s->active_ports == 0); s->on_accept_cb = on_accept_cb; s->on_accept_cb_arg = on_accept_cb_arg; s->pollsets = pollsets; s->pollset_count = pollset_count; sp = s->head; while (sp != NULL) { if (s->so_reuseport && !grpc_is_unix_socket(&sp->addr) && pollset_count > 1) { GPR_ASSERT(GRPC_LOG_IF_ERROR( "clone_port", clone_port(sp, (unsigned)(pollset_count - 1)))); for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); sp->read_closure.cb = on_read; sp->read_closure.cb_arg = sp; grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; } } else { for (i = 0; i < pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd); } sp->read_closure.cb = on_read; sp->read_closure.cb_arg = sp; grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); s->active_ports++; sp = sp->next; } } gpr_mu_unlock(&s->mu); }
void grpc_udp_server_start(grpc_udp_server *s, grpc_pollset **pollsets, size_t pollset_count) { size_t i, j; gpr_mu_lock(&s->mu); GPR_ASSERT(s->active_ports == 0); s->pollsets = pollsets; for (i = 0; i < s->nports; i++) { for (j = 0; j < pollset_count; j++) { grpc_pollset_add_fd(pollsets[j], s->ports[i].emfd); } s->ports[i].read_closure.cb = on_read; s->ports[i].read_closure.cb_arg = &s->ports[i]; grpc_fd_notify_on_read(s->ports[i].emfd, &s->ports[i].read_closure); s->active_ports++; } gpr_mu_unlock(&s->mu); }
static void test_threading(void) { threading_shared shared; shared.pollset = gpr_zalloc(grpc_pollset_size()); grpc_pollset_init(shared.pollset, &shared.mu); gpr_thd_id thds[10]; for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_options opt = gpr_thd_options_default(); gpr_thd_options_set_joinable(&opt); gpr_thd_new(&thds[i], test_threading_loop, &shared, &opt); } grpc_wakeup_fd fd; GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&fd))); shared.wakeup_fd = &fd; shared.wakeup_desc = grpc_fd_create(fd.read_fd, "wakeup"); shared.wakeups = 0; { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc); grpc_fd_notify_on_read( &exec_ctx, shared.wakeup_desc, GRPC_CLOSURE_INIT(&shared.on_wakeup, test_threading_wakeup, &shared, grpc_schedule_on_exec_ctx)); grpc_exec_ctx_finish(&exec_ctx); } GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_first", grpc_wakeup_fd_wakeup(shared.wakeup_fd))); for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) { gpr_thd_join(thds[i]); } fd.read_fd = 0; grpc_wakeup_fd_destroy(&fd); { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED); grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, NULL, NULL, false /* already_closed */, "done"); grpc_pollset_shutdown(&exec_ctx, shared.pollset, GRPC_CLOSURE_CREATE(destroy_pollset, shared.pollset, grpc_schedule_on_exec_ctx)); grpc_exec_ctx_finish(&exec_ctx); } gpr_free(shared.pollset); }
/* Start a test server, return the TCP listening port bound to listen_fd. listen_cb() is registered to be interested in reading from listen_fd. When connection request arrives, listen_cb() is called to accept the connection request. */ static int server_start(grpc_exec_ctx *exec_ctx, server *sv) { int port = 0; int fd; struct sockaddr_in sin; socklen_t addr_len; create_test_socket(port, &fd, &sin); addr_len = sizeof(sin); GPR_ASSERT(bind(fd, (struct sockaddr *)&sin, addr_len) == 0); GPR_ASSERT(getsockname(fd, (struct sockaddr *)&sin, &addr_len) == 0); port = ntohs(sin.sin_port); GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0); sv->em_fd = grpc_fd_create(fd, "server"); grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd); /* Register to be interested in reading from listen_fd. */ GRPC_CLOSURE_INIT(&sv->listen_closure, listen_cb, sv, grpc_schedule_on_exec_ctx); grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure); return port; }
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset) { size_t i, j; gpr_mu_lock(&pollset_set->mu); if (pollset_set->pollset_count == pollset_set->pollset_capacity) { pollset_set->pollset_capacity = GPR_MAX(8, 2 * pollset_set->pollset_capacity); pollset_set->pollsets = gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets)); } pollset_set->pollsets[pollset_set->pollset_count++] = pollset; for (i = 0, j = 0; i < pollset_set->fd_count; i++) { if (grpc_fd_is_orphaned(pollset_set->fds[i])) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } else { grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); pollset_set->fds[j++] = pollset_set->fds[i]; } } pollset_set->fd_count = j; gpr_mu_unlock(&pollset_set->mu); }
static void test_add_fd_to_pollset() { grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; test_fd tfds[NUM_FDS]; int fds[NUM_FDS]; test_pollset pollsets[NUM_POLLSETS]; void *expected_pi = NULL; int i; test_fd_init(tfds, fds, NUM_FDS); test_pollset_init(pollsets, NUM_POLLSETS); /*Step 1. * Create three polling islands (This will exercise test case 1 and 2) with * the following configuration: * polling island 0 = { fds:0,1,2, pollsets:0} * polling island 1 = { fds:3,4, pollsets:1} * polling island 2 = { fds:5,6,7 pollsets:2} * *Step 2. * Add pollset 3 to polling island 0 (by adding fds 0 and 1 to pollset 3) * (This will exercise test cases 3 and 4.1). The configuration becomes: * polling island 0 = { fds:0,1,2, pollsets:0,3} <<< pollset 3 added here * polling island 1 = { fds:3,4, pollsets:1} * polling island 2 = { fds:5,6,7 pollsets:2} * *Step 3. * Merge polling islands 0 and 1 by adding fd 0 to pollset 1 (This will * exercise test case 4.2). The configuration becomes: * polling island (merged) = {fds: 0,1,2,3,4, pollsets: 0,1,3} * polling island 2 = {fds: 5,6,7 pollsets: 2} * *Step 4. * Finally do one more merge by adding fd 3 to pollset 2. * polling island (merged) = {fds: 0,1,2,3,4,5,6,7, pollsets: 0,1,2,3} */ /* == Step 1 == */ for (i = 0; i <= 2; i++) { grpc_pollset_add_fd(&exec_ctx, pollsets[0].pollset, tfds[i].fd); grpc_exec_ctx_flush(&exec_ctx); } for (i = 3; i <= 4; i++) { grpc_pollset_add_fd(&exec_ctx, pollsets[1].pollset, tfds[i].fd); grpc_exec_ctx_flush(&exec_ctx); } for (i = 5; i <= 7; i++) { grpc_pollset_add_fd(&exec_ctx, pollsets[2].pollset, tfds[i].fd); grpc_exec_ctx_flush(&exec_ctx); } /* == Step 2 == */ for (i = 0; i <= 1; i++) { grpc_pollset_add_fd(&exec_ctx, pollsets[3].pollset, tfds[i].fd); grpc_exec_ctx_flush(&exec_ctx); } /* == Step 3 == */ grpc_pollset_add_fd(&exec_ctx, pollsets[1].pollset, tfds[0].fd); grpc_exec_ctx_flush(&exec_ctx); /* == Step 4 == */ grpc_pollset_add_fd(&exec_ctx, pollsets[2].pollset, tfds[3].fd); grpc_exec_ctx_flush(&exec_ctx); /* All polling islands are merged at this point */ /* Compare Fd:0's polling island with that of all other Fds */ expected_pi = grpc_fd_get_polling_island(tfds[0].fd); for (i = 1; i < NUM_FDS; i++) { GPR_ASSERT(grpc_are_polling_islands_equal( expected_pi, grpc_fd_get_polling_island(tfds[i].fd))); } /* Compare Fd:0's polling island with that of all other pollsets */ for (i = 0; i < NUM_POLLSETS; i++) { GPR_ASSERT(grpc_are_polling_islands_equal( expected_pi, grpc_pollset_get_polling_island(pollsets[i].pollset))); } test_fd_cleanup(&exec_ctx, tfds, NUM_FDS); test_pollset_cleanup(&exec_ctx, pollsets, NUM_POLLSETS); grpc_exec_ctx_finish(&exec_ctx); }
/* event manager callback when reads are ready */ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) { grpc_tcp_listener *sp = arg; grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index, sp->fd_index}; grpc_pollset *read_notifier_pollset = NULL; grpc_fd *fdobj; if (err != GRPC_ERROR_NONE) { goto error; } read_notifier_pollset = sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add( &sp->server->next_pollset_to_assign, 1) % sp->server->pollset_count]; /* loop until accept4 returns EAGAIN, and then re-arm notification */ for (;;) { struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); char *addr_str; char *name; /* Note: If we ever decide to return this address to the user, remember to strip off the ::ffff:0.0.0.0/96 prefix first. */ int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1); if (fd < 0) { switch (errno) { case EINTR: continue; case EAGAIN: grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); return; default: gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno)); goto error; } } grpc_set_socket_no_sigpipe_if_possible(fd); addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); if (grpc_tcp_trace) { gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); } fdobj = grpc_fd_create(fd, name); if (read_notifier_pollset == NULL) { gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd"); goto error; } grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj); sp->server->on_accept_cb( exec_ctx, sp->server->on_accept_cb_arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str), read_notifier_pollset, &acceptor); gpr_free(name); gpr_free(addr_str); } GPR_UNREACHABLE_CODE(return ); error: gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports) { gpr_mu_unlock(&sp->server->mu); deactivated_all_ports(exec_ctx, sp->server); } else { gpr_mu_unlock(&sp->server->mu); } }
/* event manager callback when reads are ready */ static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) { grpc_tcp_listener *sp = arg; grpc_fd *fdobj; size_t i; if (!success) { goto error; } /* loop until accept4 returns EAGAIN, and then re-arm notification */ for (;;) { struct sockaddr_storage addr; socklen_t addrlen = sizeof(addr); char *addr_str; char *name; /* Note: If we ever decide to return this address to the user, remember to strip off the ::ffff:0.0.0.0/96 prefix first. */ int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1); if (fd < 0) { switch (errno) { case EINTR: continue; case EAGAIN: grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure); return; default: gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno)); goto error; } } grpc_set_socket_no_sigpipe_if_possible(fd); addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr); gpr_asprintf(&name, "tcp-server-connection:%s", addr_str); if (grpc_tcp_trace) { gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str); } fdobj = grpc_fd_create(fd, name); /* TODO(ctiller): revise this when we have server-side sharding of channels -- we certainly should not be automatically adding every incoming channel to every pollset owned by the server */ for (i = 0; i < sp->server->pollset_count; i++) { grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj); } sp->server->on_accept_cb( exec_ctx, sp->server->on_accept_cb_arg, grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str)); gpr_free(name); gpr_free(addr_str); } GPR_UNREACHABLE_CODE(return ); error: gpr_mu_lock(&sp->server->mu); if (0 == --sp->server->active_ports) { gpr_mu_unlock(&sp->server->mu); deactivated_all_ports(exec_ctx, sp->server); } else { gpr_mu_unlock(&sp->server->mu); } }
grpc_fd *grpc_fd_create(int fd, const char *name) { grpc_fd *r = alloc_fd(fd); grpc_iomgr_register_object(&r->iomgr_object, name); grpc_pollset_add_fd(grpc_backup_pollset(), r); return r; }
/* Test some typical scenarios in pollset_set */ static void pollset_set_test_basic() { /* We construct the following structure for this test: * * +---> FD0 (Added before PSS1, PS1 and PS2 are added to PSS0) * | * +---> FD5 (Added after PSS1, PS1 and PS2 are added to PSS0) * | * | * | +---> FD1 (Added before PSS1 is added to PSS0) * | | * | +---> FD6 (Added after PSS1 is added to PSS0) * | | * +---> PSS1--+ +--> FD2 (Added before PS0 is added to PSS1) * | | | * | +---> PS0---+ * | | * PSS0---+ +--> FD7 (Added after PS0 is added to PSS1) * | * | * | +---> FD3 (Added before PS1 is added to PSS0) * | | * +---> PS1---+ * | | * | +---> FD8 (Added after PS1 added to PSS0) * | * | * | +---> FD4 (Added before PS2 is added to PSS0) * | | * +---> PS2---+ * | * +---> FD9 (Added after PS2 is added to PSS0) */ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker; gpr_timespec deadline; test_fd tfds[10]; test_pollset pollsets[3]; test_pollset_set pollset_sets[2]; const int num_fds = GPR_ARRAY_SIZE(tfds); const int num_ps = GPR_ARRAY_SIZE(pollsets); const int num_pss = GPR_ARRAY_SIZE(pollset_sets); init_test_fds(&exec_ctx, tfds, num_fds); init_test_pollsets(pollsets, num_ps); init_test_pollset_sets(pollset_sets, num_pss); /* Construct the pollset_set/pollset/fd tree (see diagram above) */ grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd); grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[2].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[3].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[4].fd); grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss, pollset_sets[1].pss); grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps); grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps); grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps); grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd); grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[7].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[8].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[9].fd); grpc_exec_ctx_flush(&exec_ctx); /* Test that if any FD in the above structure is readable, it is observable by * doing grpc_pollset_work on any pollset * * For every pollset, do the following: * - (Ensure that all FDs are in reset state) * - Make all FDs readable * - Call grpc_pollset_work() on the pollset * - Flush the exec_ctx * - Verify that on_readable call back was called for all FDs (and * reset the FDs) * */ for (int i = 0; i < num_ps; i++) { make_test_fds_readable(tfds, num_fds); gpr_mu_lock(pollsets[i].mu); deadline = grpc_timeout_milliseconds_to_deadline(2); GPR_ASSERT(GRPC_ERROR_NONE == grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline)); gpr_mu_unlock(pollsets[i].mu); grpc_exec_ctx_flush(&exec_ctx); verify_readable_and_reset(&exec_ctx, tfds, num_fds); grpc_exec_ctx_flush(&exec_ctx); } /* Test tear down */ grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd); grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd); grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd); grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd); grpc_exec_ctx_flush(&exec_ctx); grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps); grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps); grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps); grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss, pollset_sets[1].pss); grpc_exec_ctx_flush(&exec_ctx); cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, pollsets, num_ps); cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss); grpc_exec_ctx_finish(&exec_ctx); }
static void tcp_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep, grpc_pollset *pollset) { grpc_tcp *tcp = (grpc_tcp *)ep; grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd); }
/* Test that changing the callback we use for notify_on_read actually works. Note that we have two different but almost identical callbacks above -- the point is to have two different function pointers and two different data pointers and make sure that changing both really works. */ static void test_grpc_fd_change(void) { grpc_fd *em_fd; fd_change_data a, b; int flags; int sv[2]; char data; ssize_t result; grpc_closure first_closure; grpc_closure second_closure; grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; GRPC_CLOSURE_INIT(&first_closure, first_read_callback, &a, grpc_schedule_on_exec_ctx); GRPC_CLOSURE_INIT(&second_closure, second_read_callback, &b, grpc_schedule_on_exec_ctx); init_change_data(&a); init_change_data(&b); GPR_ASSERT(socketpair(AF_UNIX, SOCK_STREAM, 0, sv) == 0); flags = fcntl(sv[0], F_GETFL, 0); GPR_ASSERT(fcntl(sv[0], F_SETFL, flags | O_NONBLOCK) == 0); flags = fcntl(sv[1], F_GETFL, 0); GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0); em_fd = grpc_fd_create(sv[0], "test_grpc_fd_change"); grpc_pollset_add_fd(&exec_ctx, g_pollset, em_fd); /* Register the first callback, then make its FD readable */ grpc_fd_notify_on_read(&exec_ctx, em_fd, &first_closure); data = 0; result = write(sv[1], &data, 1); GPR_ASSERT(result == 1); /* And now wait for it to run. */ gpr_mu_lock(g_mu); while (a.cb_that_ran == NULL) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } GPR_ASSERT(a.cb_that_ran == first_read_callback); gpr_mu_unlock(g_mu); /* And drain the socket so we can generate a new read edge */ result = read(sv[0], &data, 1); GPR_ASSERT(result == 1); /* Now register a second callback with distinct change data, and do the same thing again. */ grpc_fd_notify_on_read(&exec_ctx, em_fd, &second_closure); data = 0; result = write(sv[1], &data, 1); GPR_ASSERT(result == 1); gpr_mu_lock(g_mu); while (b.cb_that_ran == NULL) { grpc_pollset_worker *worker = NULL; GPR_ASSERT(GRPC_LOG_IF_ERROR( "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC), gpr_inf_future(GPR_CLOCK_MONOTONIC)))); gpr_mu_unlock(g_mu); grpc_exec_ctx_finish(&exec_ctx); gpr_mu_lock(g_mu); } /* Except now we verify that second_read_callback ran instead */ GPR_ASSERT(b.cb_that_ran == second_read_callback); gpr_mu_unlock(g_mu); grpc_fd_orphan(&exec_ctx, em_fd, NULL, NULL, "d"); grpc_exec_ctx_finish(&exec_ctx); destroy_change_data(&a); destroy_change_data(&b); close(sv[1]); }
/* Pollset_set with an empty pollset */ void pollset_set_test_empty_pollset() { /* We construct the following structure for this test: * * +---> PS0 (EMPTY) * | * +---> FD0 * | * PSS0---+ * | +---> FD1 * | | * +---> PS1--+ * | * +---> FD2 */ grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT; grpc_pollset_worker *worker; gpr_timespec deadline; test_fd tfds[3]; test_pollset pollsets[2]; test_pollset_set pollset_set; const int num_fds = GPR_ARRAY_SIZE(tfds); const int num_ps = GPR_ARRAY_SIZE(pollsets); const int num_pss = 1; init_test_fds(&exec_ctx, tfds, num_fds); init_test_pollsets(pollsets, num_ps); init_test_pollset_sets(&pollset_set, num_pss); /* Construct the structure */ grpc_pollset_set_add_fd(&exec_ctx, pollset_set.pss, tfds[0].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[1].fd); grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[2].fd); grpc_pollset_set_add_pollset(&exec_ctx, pollset_set.pss, pollsets[0].ps); grpc_pollset_set_add_pollset(&exec_ctx, pollset_set.pss, pollsets[1].ps); /* Test. Make all FDs readable and make sure that can be observed by doing * grpc_pollset_work on the empty pollset 'PS0' */ make_test_fds_readable(tfds, num_fds); gpr_mu_lock(pollsets[0].mu); deadline = grpc_timeout_milliseconds_to_deadline(2); GPR_ASSERT(GRPC_ERROR_NONE == grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker, gpr_now(GPR_CLOCK_MONOTONIC), deadline)); gpr_mu_unlock(pollsets[0].mu); grpc_exec_ctx_flush(&exec_ctx); verify_readable_and_reset(&exec_ctx, tfds, num_fds); grpc_exec_ctx_flush(&exec_ctx); /* Tear down */ grpc_pollset_set_del_fd(&exec_ctx, pollset_set.pss, tfds[0].fd); grpc_pollset_set_del_pollset(&exec_ctx, pollset_set.pss, pollsets[0].ps); grpc_pollset_set_del_pollset(&exec_ctx, pollset_set.pss, pollsets[1].ps); grpc_exec_ctx_flush(&exec_ctx); cleanup_test_fds(&exec_ctx, tfds, num_fds); cleanup_test_pollsets(&exec_ctx, pollsets, num_ps); cleanup_test_pollset_sets(&exec_ctx, &pollset_set, num_pss); grpc_exec_ctx_finish(&exec_ctx); }