Esempio n. 1
0
static void pollset_kick_locked(grpc_fd_watcher *watcher) {
  gpr_mu_lock(GRPC_POLLSET_MU(watcher->pollset));
  GPR_ASSERT(watcher->worker);
  grpc_pollset_kick_ext(watcher->pollset, watcher->worker,
                        GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
  gpr_mu_unlock(GRPC_POLLSET_MU(watcher->pollset));
}
Esempio n. 2
0
char *grpc_test_fetch_oauth2_token_with_credentials(
    grpc_call_credentials *creds) {
  oauth2_request request;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure do_nothing_closure;
  grpc_auth_metadata_context null_ctx = {"", "", NULL, NULL};

  grpc_pollset_init(&request.pollset);
  request.is_done = 0;

  grpc_closure_init(&do_nothing_closure, do_nothing, NULL);

  grpc_call_credentials_get_request_metadata(&exec_ctx, creds, &request.pollset,
                                             null_ctx, on_oauth2_response,
                                             &request);

  grpc_exec_ctx_finish(&exec_ctx);

  gpr_mu_lock(GRPC_POLLSET_MU(&request.pollset));
  while (!request.is_done) {
    grpc_pollset_worker *worker = NULL;
    grpc_pollset_work(&exec_ctx, &request.pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC),
                      gpr_inf_future(GPR_CLOCK_MONOTONIC));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&request.pollset));

  grpc_pollset_shutdown(&exec_ctx, &request.pollset, &do_nothing_closure);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_pollset_destroy(&request.pollset);
  return request.token;
}
Esempio n. 3
0
static void test_get(int port) {
  grpc_httpcli_request req;
  char *host;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  g_done = 0;
  gpr_log(GPR_INFO, "test_get");

  gpr_asprintf(&host, "localhost:%d", port);
  gpr_log(GPR_INFO, "requesting from %s", host);

  memset(&req, 0, sizeof(req));
  req.host = host;
  req.path = "/get";
  req.handshaker = &grpc_httpcli_plaintext;

  grpc_httpcli_get(&exec_ctx, &g_context, &g_pollset, &req, n_seconds_time(15),
                   on_finish, (void *)42);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (!g_done) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
  gpr_free(host);
}
Esempio n. 4
0
static void test_post(int port) {
  grpc_httpcli_request req;
  char *host;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  g_done = 0;
  gpr_log(GPR_INFO, "test_post");

  gpr_asprintf(&host, "localhost:%d", port);
  gpr_log(GPR_INFO, "posting to %s", host);

  memset(&req, 0, sizeof(req));
  req.host = host;
  req.ssl_host_override = "foo.test.google.fr";
  req.path = "/post";
  req.handshaker = &grpc_httpcli_ssl;

  grpc_httpcli_post(&exec_ctx, &g_context, &g_pollset, &req, "hello", 5,
                    n_seconds_time(15), on_finish, (void *)42);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  while (!g_done) {
    grpc_pollset_worker *worker = NULL;
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), n_seconds_time(20));
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
  gpr_free(host);
}
Esempio n. 5
0
static void read_and_write_test_read_handler(void *data, gpr_slice *slices,
        size_t nslices,
        grpc_endpoint_cb_status error) {
    struct read_and_write_test_state *state = data;
    GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR);
    if (error == GRPC_ENDPOINT_CB_SHUTDOWN) {
        gpr_log(GPR_INFO, "Read handler shutdown");
        gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
        state->read_done = 1;
        grpc_pollset_kick(g_pollset);
        gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
        return;
    }

    state->bytes_read +=
        count_and_unref_slices(slices, nslices, &state->current_read_data);
    if (state->bytes_read == state->target_bytes) {
        gpr_log(GPR_INFO, "Read handler done");
        gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
        state->read_done = 1;
        grpc_pollset_kick(g_pollset);
        gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
    } else {
        grpc_endpoint_notify_on_read(state->read_ep,
                                     read_and_write_test_read_handler, data);
    }
}
Esempio n. 6
0
void drain_socket_blocking(int fd, size_t num_bytes, size_t read_size) {
  unsigned char *buf = malloc(read_size);
  ssize_t bytes_read;
  size_t bytes_left = num_bytes;
  int flags;
  int current = 0;
  int i;

  flags = fcntl(fd, F_GETFL, 0);
  GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0);

  for (;;) {
    grpc_pollset_worker worker;
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
    grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                      GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    do {
      bytes_read =
          read(fd, buf, bytes_left > read_size ? read_size : bytes_left);
    } while (bytes_read < 0 && errno == EINTR);
    GPR_ASSERT(bytes_read >= 0);
    for (i = 0; i < bytes_read; ++i) {
      GPR_ASSERT(buf[i] == current);
      current = (current + 1) % 256;
    }
    bytes_left -= bytes_read;
    if (bytes_left == 0) break;
  }
  flags = fcntl(fd, F_GETFL, 0);
  GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);

  gpr_free(buf);
}
Esempio n. 7
0
/* Queue a GRPC_OP_COMPLETED operation */
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, int success,
                    void (*done)(void *done_arg, grpc_cq_completion *storage),
                    void *done_arg, grpc_cq_completion *storage) {
  int shutdown;

  storage->tag = tag;
  storage->done = done;
  storage->done_arg = done_arg;
  storage->next =
      ((gpr_uintptr)&cc->completed_head) | ((gpr_uintptr)(success != 0));

  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  shutdown = gpr_unref(&cc->pending_events);
  if (!shutdown) {
    cc->completed_tail->next =
        ((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
    cc->completed_tail = storage;
    grpc_pollset_kick(&cc->pollset);
    gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
  } else {
    cc->completed_tail->next =
        ((gpr_uintptr)storage) | (1u & (gpr_uintptr)cc->completed_tail->next);
    cc->completed_tail = storage;
    GPR_ASSERT(!cc->shutdown);
    GPR_ASSERT(cc->shutdown_called);
    cc->shutdown = 1;
    gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
    grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
  }
}
Esempio n. 8
0
void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) {
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  grpc_pollset_kick(&cc->pollset);
  grpc_pollset_work(&cc->pollset,
                    gpr_time_add(gpr_now(), gpr_time_from_millis(100)));
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 9
0
static grpc_connected_subchannel *connect_subchannel(grpc_subchannel *c) {
  grpc_pollset pollset;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_pollset_init(&pollset);
  grpc_pollset_set_init(&g_interested_parties);
  grpc_pollset_set_add_pollset(&exec_ctx, &g_interested_parties, &pollset);
  grpc_subchannel_notify_on_state_change(&exec_ctx, c, &g_interested_parties,
                                         &g_state,
                                         grpc_closure_create(state_changed, c));
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_mu_lock(GRPC_POLLSET_MU(&pollset));
  while (g_state != GRPC_CHANNEL_READY) {
    grpc_pollset_worker worker;
    grpc_pollset_work(&exec_ctx, &pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC),
                      GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1));
    gpr_mu_unlock(GRPC_POLLSET_MU(&pollset));
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&pollset));
  }
  grpc_pollset_shutdown(&exec_ctx, &pollset,
                        grpc_closure_create(destroy_pollset, &pollset));
  grpc_pollset_set_destroy(&g_interested_parties);
  gpr_mu_unlock(GRPC_POLLSET_MU(&pollset));
  grpc_exec_ctx_finish(&exec_ctx);
  return grpc_subchannel_get_connected_subchannel(c);
}
Esempio n. 10
0
grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
                                        gpr_timespec deadline) {
  event *ev = NULL;

  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  for (;;) {
    if ((ev = pluck_event(cc, tag))) {
      break;
    }
    if (cc->shutdown) {
      ev = create_shutdown_event();
      break;
    }
    if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
      continue;
    }
    if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
                    GRPC_POLLSET_MU(&cc->pollset), deadline)) {
      gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
      return NULL;
    }
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
  GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
  return &ev->base;
}
Esempio n. 11
0
static void read_and_write_test_read_handler(void *data, int success) {
  struct read_and_write_test_state *state = data;

loop:
  state->bytes_read += count_slices(
      state->incoming.slices, state->incoming.count, &state->current_read_data);
  if (state->bytes_read == state->target_bytes || !success) {
    gpr_log(GPR_INFO, "Read handler done");
    gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
    state->read_done = 1 + success;
    grpc_pollset_kick(g_pollset, NULL);
    gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
  } else if (success) {
    switch (grpc_endpoint_read(state->read_ep, &state->incoming,
                               &state->done_read)) {
      case GRPC_ENDPOINT_ERROR:
        success = 0;
        goto loop;
      case GRPC_ENDPOINT_DONE:
        success = 1;
        goto loop;
      case GRPC_ENDPOINT_PENDING:
        break;
    }
  }
}
Esempio n. 12
0
static void must_succeed(grpc_exec_ctx *exec_ctx, void *p, int success) {
  GPR_ASSERT(success == 1);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  *(int *)p = 1;
  grpc_pollset_kick(&g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
Esempio n. 13
0
static void read_and_write_test_write_handler(grpc_exec_ctx *exec_ctx,
                                              void *data, bool success) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;

  if (success) {
    state->bytes_written += state->current_write_size;
    if (state->target_bytes - state->bytes_written <
        state->current_write_size) {
      state->current_write_size = state->target_bytes - state->bytes_written;
    }
    if (state->current_write_size != 0) {
      slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                               &state->current_write_data);
      gpr_slice_buffer_reset_and_unref(&state->outgoing);
      gpr_slice_buffer_addn(&state->outgoing, slices, nslices);
      grpc_endpoint_write(exec_ctx, state->write_ep, &state->outgoing,
                          &state->done_write);
      free(slices);
      return;
    }
  }

  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
  state->write_done = 1 + success;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
Esempio n. 14
0
static void read_cb(void *user_data, int success) {
  struct read_socket_state *state = (struct read_socket_state *)user_data;
  ssize_t read_bytes;
  int current_data;

  GPR_ASSERT(success);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  current_data = state->read_bytes % 256;
  read_bytes = count_slices(state->incoming.slices, state->incoming.count,
                            &current_data);
  state->read_bytes += read_bytes;
  gpr_log(GPR_INFO, "Read %d bytes of %d", read_bytes,
          state->target_read_bytes);
  if (state->read_bytes >= state->target_read_bytes) {
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
  } else {
    switch (grpc_endpoint_read(state->ep, &state->incoming, &state->read_cb)) {
      case GRPC_ENDPOINT_DONE:
        gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
        read_cb(user_data, 1);
        break;
      case GRPC_ENDPOINT_ERROR:
        gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
        read_cb(user_data, 0);
        break;
      case GRPC_ENDPOINT_PENDING:
        gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
        break;
    }
  }
}
Esempio n. 15
0
static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
                                 const grpc_httpcli_response *response) {
  size_t i;
  int port = 0;
  portreq *pr = arg;

  if (!response || response->status != 200) {
    grpc_httpcli_request req;
    memset(&req, 0, sizeof(req));
    GPR_ASSERT(pr->retries < 10);
    pr->retries++;
    req.host = pr->server;
    req.path = "/get";
    gpr_log(GPR_DEBUG, "failed port pick from server: retrying");
    sleep(1);
    grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pollset, &req,
                     GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
                     pr);
    return;
  }
  GPR_ASSERT(response);
  GPR_ASSERT(response->status == 200);
  for (i = 0; i < response->body_length; i++) {
    GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9');
    port = port * 10 + response->body[i] - '0';
  }
  GPR_ASSERT(port > 1024);
  gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
  pr->port = port;
  grpc_pollset_kick(&pr->pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
}
Esempio n. 16
0
static void on_jwt_verification_done(void *user_data,
                                     grpc_jwt_verifier_status status,
                                     grpc_jwt_claims *claims) {
  synchronizer *sync = user_data;

  sync->success = (status == GRPC_JWT_VERIFIER_OK);
  if (sync->success) {
    char *claims_str;
    GPR_ASSERT(claims != NULL);
    claims_str =
        grpc_json_dump_to_string((grpc_json *)grpc_jwt_claims_json(claims), 2);
    printf("Claims: \n\n%s\n", claims_str);
    gpr_free(claims_str);
    grpc_jwt_claims_destroy(claims);
  } else {
    GPR_ASSERT(claims == NULL);
    fprintf(stderr, "Verification failed with error %s\n",
            grpc_jwt_verifier_status_to_string(status));
  }

  gpr_mu_lock(GRPC_POLLSET_MU(&sync->pollset));
  sync->is_done = 1;
  grpc_pollset_kick(&sync->pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(&sync->pollset));
}
Esempio n. 17
0
static void freed_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
                                   const grpc_httpcli_response *response) {
  freereq *pr = arg;
  gpr_mu_lock(GRPC_POLLSET_MU(&pr->pollset));
  pr->done = 1;
  grpc_pollset_kick(&pr->pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(&pr->pollset));
}
Esempio n. 18
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(ssize_t num_bytes, ssize_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  ssize_t read_bytes;
  size_t num_blocks;
  gpr_slice *slices;
  int current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_iomgr_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_iomgr_closure_init(&write_done_closure, write_done, &state);

  switch (grpc_endpoint_write(ep, &outgoing, &write_done_closure)) {
    case GRPC_ENDPOINT_DONE:
      /* Write completed immediately */
      read_bytes = drain_socket(sv[0]);
      GPR_ASSERT(read_bytes == num_bytes);
      break;
    case GRPC_ENDPOINT_PENDING:
      drain_socket_blocking(sv[0], num_bytes, num_bytes);
      gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
      for (;;) {
        grpc_pollset_worker worker;
        if (state.write_done) {
          break;
        }
        grpc_pollset_work(&g_pollset, &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          deadline);
      }
      gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
      break;
    case GRPC_ENDPOINT_ERROR:
      gpr_log(GPR_ERROR, "endpoint got error");
      abort();
  }

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(ep);
  gpr_free(slices);
}
Esempio n. 19
0
static void shutdown_during_write_test(grpc_endpoint_test_config config,
                                       size_t slice_size) {
    /* test that shutdown with a pending write creates no leaks */
    gpr_timespec deadline;
    size_t size;
    size_t nblocks;
    int current_data = 1;
    shutdown_during_write_test_state read_st;
    shutdown_during_write_test_state write_st;
    gpr_slice *slices;
    grpc_endpoint_test_fixture f =
        begin_test(config, "shutdown_during_write_test", slice_size);

    gpr_log(GPR_INFO, "testing shutdown during a write");

    read_st.ep = f.client_ep;
    write_st.ep = f.server_ep;
    read_st.done = 0;
    write_st.done = 0;

    grpc_endpoint_notify_on_read(
        read_st.ep, shutdown_during_write_test_read_handler, &read_st);
    for (size = 1;; size *= 2) {
        slices = allocate_blocks(size, 1, &nblocks, &current_data);
        switch (grpc_endpoint_write(write_st.ep, slices, nblocks,
                                    shutdown_during_write_test_write_handler,
                                    &write_st)) {
        case GRPC_ENDPOINT_WRITE_DONE:
            break;
        case GRPC_ENDPOINT_WRITE_ERROR:
            gpr_log(GPR_ERROR, "error writing");
            abort();
        case GRPC_ENDPOINT_WRITE_PENDING:
            grpc_endpoint_shutdown(write_st.ep);
            deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);
            gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
            while (!write_st.done) {
                GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
                grpc_pollset_work(g_pollset, deadline);
            }
            gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
            grpc_endpoint_destroy(write_st.ep);
            gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
            while (!read_st.done) {
                GPR_ASSERT(gpr_time_cmp(gpr_now(deadline.clock_type), deadline) < 0);
                grpc_pollset_work(g_pollset, deadline);
            }
            gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
            gpr_free(slices);
            end_test(config);
            return;
        }
        gpr_free(slices);
    }

    gpr_log(GPR_ERROR, "should never reach here");
    abort();
}
Esempio n. 20
0
static void write_done(void *user_data /* write_socket_state */, int success) {
  struct write_socket_state *state = (struct write_socket_state *)user_data;
  gpr_log(GPR_INFO, "Write done callback called");
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  gpr_log(GPR_INFO, "Signalling write done");
  state->write_done = 1;
  grpc_pollset_kick(&g_pollset, NULL);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
Esempio n. 21
0
static void on_connect(void *arg, grpc_endpoint *tcp) {
  grpc_endpoint_shutdown(tcp);
  grpc_endpoint_destroy(tcp);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  g_nconnects++;
  grpc_pollset_kick(&g_pollset);
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
}
Esempio n. 22
0
void grpc_cq_end_read(grpc_completion_queue *cc, void *tag, grpc_call *call,
                      grpc_event_finish_func on_finish, void *user_data,
                      grpc_byte_buffer *read) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_READ, tag, call, on_finish, user_data);
  ev->base.data.read = read;
  end_op_locked(cc, GRPC_READ);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 23
0
void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
                    grpc_event_finish_func on_finish, void *user_data,
                    grpc_op_error error) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call, on_finish, user_data);
  ev->base.data.write_accepted = error;
  end_op_locked(cc, GRPC_OP_COMPLETE);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 24
0
/* Do both reading and writing using the grpc_endpoint API.

   This also includes a test of the shutdown behavior.
 */
static void read_and_write_test(grpc_endpoint_test_config config,
                                size_t num_bytes, size_t write_size,
                                size_t slice_size, int shutdown) {
    struct read_and_write_test_state state;
    gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
    grpc_endpoint_test_fixture f =
        begin_test(config, "read_and_write_test", slice_size);

    if (shutdown) {
        gpr_log(GPR_INFO, "Start read and write shutdown test");
    } else {
        gpr_log(GPR_INFO, "Start read and write test with %d bytes, slice size %d",
                num_bytes, slice_size);
    }

    state.read_ep = f.client_ep;
    state.write_ep = f.server_ep;
    state.target_bytes = num_bytes;
    state.bytes_read = 0;
    state.current_write_size = write_size;
    state.bytes_written = 0;
    state.read_done = 0;
    state.write_done = 0;
    state.current_read_data = 0;
    state.current_write_data = 0;

    /* Get started by pretending an initial write completed */
    /* NOTE: Sets up initial conditions so we can have the same write handler
       for the first iteration as for later iterations. It does the right thing
       even when bytes_written is unsigned. */
    state.bytes_written -= state.current_write_size;
    read_and_write_test_write_handler(&state, GRPC_ENDPOINT_CB_OK);

    grpc_endpoint_notify_on_read(state.read_ep, read_and_write_test_read_handler,
                                 &state);

    if (shutdown) {
        gpr_log(GPR_DEBUG, "shutdown read");
        grpc_endpoint_shutdown(state.read_ep);
        gpr_log(GPR_DEBUG, "shutdown write");
        grpc_endpoint_shutdown(state.write_ep);
    }

    gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
    while (!state.read_done || !state.write_done) {
        GPR_ASSERT(gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), deadline) < 0);
        grpc_pollset_work(g_pollset, deadline);
    }
    gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));

    grpc_endpoint_destroy(state.read_ep);
    grpc_endpoint_destroy(state.write_ep);
    end_test(config);
}
Esempio n. 25
0
void test_tcp_server_poll(test_tcp_server *server, int seconds) {
  grpc_pollset_worker *worker = NULL;
  gpr_timespec deadline =
      gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                   gpr_time_from_seconds(seconds, GPR_TIMESPAN));
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_mu_lock(GRPC_POLLSET_MU(&server->pollset));
  grpc_pollset_work(&exec_ctx, &server->pollset, &worker,
                    gpr_now(GPR_CLOCK_MONOTONIC), deadline);
  gpr_mu_unlock(GRPC_POLLSET_MU(&server->pollset));
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 26
0
static void read_and_write_test_write_handler(void *data,
        grpc_endpoint_cb_status error) {
    struct read_and_write_test_state *state = data;
    gpr_slice *slices = NULL;
    size_t nslices;
    grpc_endpoint_write_status write_status;

    GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR);

    gpr_log(GPR_DEBUG, "%s: error=%d", "read_and_write_test_write_handler",
            error);

    if (error == GRPC_ENDPOINT_CB_SHUTDOWN) {
        gpr_log(GPR_INFO, "Write handler shutdown");
        gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
        state->write_done = 1;
        grpc_pollset_kick(g_pollset);
        gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
        return;
    }

    for (;;) {
        /* Need to do inline writes until they don't succeed synchronously or we
           finish writing */
        state->bytes_written += state->current_write_size;
        if (state->target_bytes - state->bytes_written <
                state->current_write_size) {
            state->current_write_size = state->target_bytes - state->bytes_written;
        }
        if (state->current_write_size == 0) {
            break;
        }

        slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                                 &state->current_write_data);
        write_status =
            grpc_endpoint_write(state->write_ep, slices, nslices,
                                read_and_write_test_write_handler, state);
        gpr_log(GPR_DEBUG, "write_status=%d", write_status);
        GPR_ASSERT(write_status != GRPC_ENDPOINT_WRITE_ERROR);
        free(slices);
        if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
            return;
        }
    }
    GPR_ASSERT(state->bytes_written == state->target_bytes);

    gpr_log(GPR_INFO, "Write handler done");
    gpr_mu_lock(GRPC_POLLSET_MU(g_pollset));
    state->write_done = 1;
    grpc_pollset_kick(g_pollset);
    gpr_mu_unlock(GRPC_POLLSET_MU(g_pollset));
}
Esempio n. 27
0
/* Shutdown simply drops a ref that we reserved at creation time; if we drop
   to zero here, then enter shutdown mode and wake up any waiters */
void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  cc->shutdown_called = 1;
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));

  if (gpr_unref(&cc->refs)) {
    gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
    GPR_ASSERT(!cc->shutdown);
    cc->shutdown = 1;
    gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
    gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
  }
}
Esempio n. 28
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  gpr_slice *slices;
  gpr_uint8 current_data = 0;
  gpr_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start write test with %d bytes, slice size %d", num_bytes,
          slice_size);

  create_sockets(sv);

  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"),
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, &g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  gpr_slice_buffer_init(&outgoing);
  gpr_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  for (;;) {
    grpc_pollset_worker worker;
    if (state.write_done) {
      break;
    }
    grpc_pollset_work(&exec_ctx, &g_pollset, &worker,
                      gpr_now(GPR_CLOCK_MONOTONIC), deadline);
    gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));
  }
  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  gpr_slice_buffer_destroy(&outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 29
0
void grpc_cq_end_client_metadata_read(grpc_completion_queue *cc, void *tag,
                                      grpc_call *call,
                                      grpc_event_finish_func on_finish,
                                      void *user_data, size_t count,
                                      grpc_metadata *elements) {
  event *ev;
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  ev = add_locked(cc, GRPC_CLIENT_METADATA_READ, tag, call, on_finish,
                  user_data);
  ev->base.data.client_metadata_read.count = count;
  ev->base.data.client_metadata_read.elements = elements;
  end_op_locked(cc, GRPC_CLIENT_METADATA_READ);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 30
0
static void test_connect(int n) {
  struct sockaddr_storage addr;
  socklen_t addr_len = sizeof(addr);
  int svrfd, clifd;
  grpc_tcp_server *s = grpc_tcp_server_create();
  int nconnects_before;
  gpr_timespec deadline;
  grpc_pollset *pollsets[1];
  int i;
  LOG_TEST("test_connect");
  gpr_log(GPR_INFO, "clients=%d", n);

  memset(&addr, 0, sizeof(addr));
  addr.ss_family = AF_INET;
  GPR_ASSERT(grpc_tcp_server_add_port(s, (struct sockaddr *)&addr, addr_len));

  svrfd = grpc_tcp_server_get_fd(s, 0);
  GPR_ASSERT(svrfd >= 0);
  GPR_ASSERT(getsockname(svrfd, (struct sockaddr *)&addr, &addr_len) == 0);
  GPR_ASSERT(addr_len <= sizeof(addr));

  pollsets[0] = &g_pollset;
  grpc_tcp_server_start(s, pollsets, 1, on_connect, NULL);

  gpr_mu_lock(GRPC_POLLSET_MU(&g_pollset));

  for (i = 0; i < n; i++) {
    deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10);

    nconnects_before = g_nconnects;
    clifd = socket(addr.ss_family, SOCK_STREAM, 0);
    GPR_ASSERT(clifd >= 0);
    gpr_log(GPR_DEBUG, "start connect");
    GPR_ASSERT(connect(clifd, (struct sockaddr *)&addr, addr_len) == 0);

    gpr_log(GPR_DEBUG, "wait");
    while (g_nconnects == nconnects_before &&
           gpr_time_cmp(deadline, gpr_now(GPR_CLOCK_REALTIME)) > 0) {
      grpc_pollset_work(&g_pollset, deadline);
    }
    gpr_log(GPR_DEBUG, "wait done");

    GPR_ASSERT(g_nconnects == nconnects_before + 1);
    close(clifd);
  }

  gpr_mu_unlock(GRPC_POLLSET_MU(&g_pollset));

  grpc_tcp_server_destroy(s, NULL, NULL);
}