Beispiel #1
0
static void test_scavenge(void) {
  gpr_log(GPR_INFO, "** test_scavenge **");
  grpc_resource_quota *q = grpc_resource_quota_create("test_scavenge");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user *usr1 = grpc_resource_user_create(q, "usr1");
  grpc_resource_user *usr2 = grpc_resource_user_create(q, "usr2");
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, usr1, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, usr2, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(usr1);
  destroy_user(usr2);
}
Beispiel #2
0
grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
                               grpc_channel_args *channel_args,
                               char *peer_string) {
  grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
  if (channel_args != NULL) {
    for (size_t i = 0; i < channel_args->num_args; i++) {
      if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
        resource_quota = grpc_resource_quota_ref_internal(
            channel_args->args[i].value.pointer.p);
      }
    }
  }
  grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
  memset(tcp, 0, sizeof(grpc_tcp));
  tcp->base.vtable = &vtable;
  tcp->socket = socket;
  gpr_mu_init(&tcp->mu);
  gpr_ref_init(&tcp->refcount, 1);
  GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
  tcp->peer_string = gpr_strdup(peer_string);
  tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
  /* Tell network status tracking code about the new endpoint */
  grpc_network_status_register_endpoint(&tcp->base);

  return &tcp->base;
}
Beispiel #3
0
static void test_resize_then_destroy(void) {
  gpr_log(GPR_INFO, "** test_resize_then_destroy **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_resize_then_destroy");
  grpc_resource_quota_resize(q, 1024 * 1024);
  grpc_resource_quota_unref(q);
}
Beispiel #4
0
static void test_one_slice(void) {
  gpr_log(GPR_INFO, "** test_one_slice **");

  grpc_resource_quota *q = grpc_resource_quota_create("test_one_slice");
  grpc_resource_quota_resize(q, 1024);

  grpc_resource_user usr;
  grpc_resource_user_init(&usr, q, "usr");

  grpc_resource_user_slice_allocator alloc;
  int num_allocs = 0;
  grpc_resource_user_slice_allocator_init(&alloc, &usr, inc_int_cb,
                                          &num_allocs);

  gpr_slice_buffer buffer;
  gpr_slice_buffer_init(&buffer);

  {
    const int start_allocs = num_allocs;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(num_allocs == start_allocs + 1);
  }

  gpr_slice_buffer_destroy(&buffer);
  destroy_user(&usr);
  grpc_resource_quota_unref(q);
}
Beispiel #5
0
grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
                                   grpc_closure *shutdown_complete,
                                   const grpc_channel_args *args,
                                   grpc_tcp_server **server) {
  grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
  s->resource_quota = grpc_resource_quota_create(NULL);
  for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
    if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
      if (args->args[i].type == GRPC_ARG_POINTER) {
        grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
        s->resource_quota =
            grpc_resource_quota_ref_internal(args->args[i].value.pointer.p);
      } else {
        grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
        gpr_free(s);
        return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
            GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
      }
    }
  }
  gpr_ref_init(&s->refs, 1);
  s->on_accept_cb = NULL;
  s->on_accept_cb_arg = NULL;
  s->open_ports = 0;
  s->head = NULL;
  s->tail = NULL;
  s->shutdown_starting.head = NULL;
  s->shutdown_starting.tail = NULL;
  s->shutdown_complete = shutdown_complete;
  *server = s;
  return GRPC_ERROR_NONE;
}
Beispiel #6
0
static void test_resource_user_stays_allocated_until_memory_released(void) {
  gpr_log(GPR_INFO,
          "** test_resource_user_stays_allocated_until_memory_released **");
  grpc_resource_quota *q = grpc_resource_quota_create(
      "test_resource_user_stays_allocated_until_memory_released");
  grpc_resource_quota_resize(q, 1024 * 1024);
  grpc_resource_user usr;
  grpc_resource_user_init(&usr, q, "usr");
  bool done = false;
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr, 1024, NULL);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_quota_unref(q);
    grpc_resource_user_shutdown(&exec_ctx, &usr, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(!done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, &usr, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_destroy(&exec_ctx, &usr);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
Beispiel #7
0
grpc_endpoint *grpc_tcp_client_create_from_fd(
    grpc_exec_ctx *exec_ctx, grpc_fd *fd, const grpc_channel_args *channel_args,
    const char *addr_str) {
  size_t tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
  grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
  if (channel_args != NULL) {
    for (size_t i = 0; i < channel_args->num_args; i++) {
      if (0 ==
          strcmp(channel_args->args[i].key, GRPC_ARG_TCP_READ_CHUNK_SIZE)) {
        grpc_integer_options options = {(int)tcp_read_chunk_size, 1,
                                        8 * 1024 * 1024};
        tcp_read_chunk_size = (size_t)grpc_channel_arg_get_integer(
            &channel_args->args[i], options);
      } else if (0 ==
                 strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
        grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
        resource_quota = grpc_resource_quota_internal_ref(
            channel_args->args[i].value.pointer.p);
      }
    }
  }

  grpc_endpoint *ep =
      grpc_tcp_create(fd, resource_quota, tcp_read_chunk_size, addr_str);
  grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
  return ep;
}
Beispiel #8
0
static void test_unused_reclaim_is_cancelled(void) {
  gpr_log(GPR_INFO, "** test_unused_reclaim_is_cancelled **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_unused_reclaim_is_cancelled");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user usr;
  grpc_resource_user_init(&usr, q, "usr");
  bool benign_done = false;
  bool destructive_done = false;
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_post_reclaimer(
        &exec_ctx, &usr, false, make_unused_reclaimer(set_bool(&benign_done)));
    grpc_resource_user_post_reclaimer(
        &exec_ctx, &usr, true,
        make_unused_reclaimer(set_bool(&destructive_done)));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(!benign_done);
    GPR_ASSERT(!destructive_done);
  }
  grpc_resource_quota_unref(q);
  destroy_user(&usr);
  GPR_ASSERT(benign_done);
  GPR_ASSERT(destructive_done);
}
Beispiel #9
0
static void test_resource_user_no_op(void) {
  gpr_log(GPR_INFO, "** test_resource_user_no_op **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_resource_user_no_op");
  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
  grpc_resource_quota_unref(q);
  destroy_user(usr);
}
Beispiel #10
0
/* Write to a socket using the grpc_tcp API, then drain it directly.
   Note that if the write does not complete immediately we need to drain the
   socket in parallel with the read. */
static void write_test(size_t num_bytes, size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct write_socket_state state;
  size_t num_blocks;
  grpc_slice *slices;
  uint8_t current_data = 0;
  grpc_slice_buffer outgoing;
  grpc_closure write_done_closure;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO,
          "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
          num_bytes, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("write_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), resource_quota,
                       GRPC_TCP_DEFAULT_READ_SLICE_SIZE, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  state.ep = ep;
  state.write_done = 0;

  slices = allocate_blocks(num_bytes, slice_size, &num_blocks, &current_data);

  grpc_slice_buffer_init(&outgoing);
  grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
  grpc_closure_init(&write_done_closure, write_done, &state,
                    grpc_schedule_on_exec_ctx);

  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
  drain_socket_blocking(sv[0], num_bytes, num_bytes);
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    if (state.write_done) {
      break;
    }
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
  grpc_endpoint_destroy(&exec_ctx, ep);
  gpr_free(slices);
  grpc_exec_ctx_finish(&exec_ctx);
}
Beispiel #11
0
static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
                                 grpc_error *error) {
  size_t i;
  int port = 0;
  portreq *pr = arg;
  int failed = 0;
  grpc_httpcli_response *response = &pr->response;

  if (error != GRPC_ERROR_NONE) {
    failed = 1;
    const char *msg = grpc_error_string(error);
    gpr_log(GPR_DEBUG, "failed port pick from server: retrying [%s]", msg);
    grpc_error_free_string(msg);
  } else if (response->status != 200) {
    failed = 1;
    gpr_log(GPR_DEBUG, "failed port pick from server: status=%d",
            response->status);
  }

  if (failed) {
    grpc_httpcli_request req;
    memset(&req, 0, sizeof(req));
    GPR_ASSERT(pr->retries < 10);
    gpr_sleep_until(gpr_time_add(
        gpr_now(GPR_CLOCK_REALTIME),
        gpr_time_from_millis(
            (int64_t)(1000.0 * (1 + pow(1.3, pr->retries) * rand() / RAND_MAX)),
            GPR_TIMESPAN)));
    pr->retries++;
    req.host = pr->server;
    req.http.path = "/get";
    grpc_http_response_destroy(&pr->response);
    memset(&pr->response, 0, sizeof(pr->response));
    grpc_resource_quota *resource_quota =
        grpc_resource_quota_create("port_server_client/pick_retry");
    grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
                     GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
                     grpc_closure_create(got_port_from_server, pr),
                     &pr->response);
    grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
    return;
  }
  GPR_ASSERT(response);
  GPR_ASSERT(response->status == 200);
  for (i = 0; i < response->body_length; i++) {
    GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9');
    port = port * 10 + response->body[i] - '0';
  }
  GPR_ASSERT(port > 1024);
  gpr_mu_lock(pr->mu);
  pr->port = port;
  GRPC_LOG_IF_ERROR(
      "pollset_kick",
      grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), NULL));
  gpr_mu_unlock(pr->mu);
}
Beispiel #12
0
static void
test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_released(
    void) {
  gpr_log(GPR_INFO,
          "** "
          "test_resource_user_stays_allocated_and_reclaimers_unrun_until_"
          "memory_released **");
  grpc_resource_quota *q = grpc_resource_quota_create(
      "test_resource_user_stays_allocated_and_reclaimers_unrun_until_memory_"
      "released");
  grpc_resource_quota_resize(q, 1024);
  for (int i = 0; i < 10; i++) {
    grpc_resource_user usr;
    grpc_resource_user_init(&usr, q, "usr");
    bool done = false;
    bool reclaimer_cancelled = false;
    {
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_post_reclaimer(
          &exec_ctx, &usr, false,
          make_unused_reclaimer(set_bool(&reclaimer_cancelled)));
      grpc_exec_ctx_finish(&exec_ctx);
      GPR_ASSERT(!reclaimer_cancelled);
    }
    {
      bool allocated = false;
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&allocated));
      grpc_exec_ctx_finish(&exec_ctx);
      GPR_ASSERT(allocated);
      GPR_ASSERT(!reclaimer_cancelled);
    }
    {
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_shutdown(&exec_ctx, &usr, set_bool(&done));
      grpc_exec_ctx_finish(&exec_ctx);
      GPR_ASSERT(!done);
      GPR_ASSERT(!reclaimer_cancelled);
    }
    {
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_free(&exec_ctx, &usr, 1024);
      grpc_exec_ctx_finish(&exec_ctx);
      GPR_ASSERT(done);
      GPR_ASSERT(reclaimer_cancelled);
    }
    {
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_destroy(&exec_ctx, &usr);
      grpc_exec_ctx_finish(&exec_ctx);
    }
  }
  grpc_resource_quota_unref(q);
}
Beispiel #13
0
void grpc_free_port_using_server(char *server, int port) {
  grpc_httpcli_context context;
  grpc_httpcli_request req;
  grpc_httpcli_response rsp;
  freereq pr;
  char *path;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure *shutdown_closure;

  grpc_init();

  memset(&pr, 0, sizeof(pr));
  memset(&req, 0, sizeof(req));
  memset(&rsp, 0, sizeof(rsp));

  grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
  grpc_pollset_init(pollset, &pr.mu);
  pr.pops = grpc_polling_entity_create_from_pollset(pollset);
  shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops);

  req.host = server;
  gpr_asprintf(&path, "/drop/%d", port);
  req.http.path = path;

  grpc_httpcli_context_init(&context);
  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("port_server_client/free");
  grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
                   GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
                   grpc_closure_create(freed_port_from_server, &pr), &rsp);
  grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
  gpr_mu_lock(pr.mu);
  while (!pr.done) {
    grpc_pollset_worker *worker = NULL;
    if (!GRPC_LOG_IF_ERROR(
            "pollset_work",
            grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
                              &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                              GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)))) {
      pr.done = 1;
    }
  }
  gpr_mu_unlock(pr.mu);

  grpc_httpcli_context_destroy(&context);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
                        shutdown_closure);
  grpc_exec_ctx_finish(&exec_ctx);
  gpr_free(path);
  grpc_http_response_destroy(&rsp);
}
Beispiel #14
0
int grpc_pick_port_using_server(char *server) {
  grpc_httpcli_context context;
  grpc_httpcli_request req;
  portreq pr;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_closure *shutdown_closure;

  grpc_init();

  memset(&pr, 0, sizeof(pr));
  memset(&req, 0, sizeof(req));
  grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
  grpc_pollset_init(pollset, &pr.mu);
  pr.pops = grpc_polling_entity_create_from_pollset(pollset);
  shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops);
  pr.port = -1;
  pr.server = server;
  pr.ctx = &context;

  req.host = server;
  req.http.path = "/get";

  grpc_httpcli_context_init(&context);
  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("port_server_client/pick");
  grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
                   GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
                   grpc_closure_create(got_port_from_server, &pr),
                   &pr.response);
  grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
  grpc_exec_ctx_finish(&exec_ctx);
  gpr_mu_lock(pr.mu);
  while (pr.port == -1) {
    grpc_pollset_worker *worker = NULL;
    if (!GRPC_LOG_IF_ERROR(
            "pollset_work",
            grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
                              &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                              GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1)))) {
      pr.port = 0;
    }
  }
  gpr_mu_unlock(pr.mu);

  grpc_http_response_destroy(&pr.response);
  grpc_httpcli_context_destroy(&context);
  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
                        shutdown_closure);
  grpc_exec_ctx_finish(&exec_ctx);

  return pr.port;
}
Beispiel #15
0
static void on_openid_config_retrieved(grpc_exec_ctx *exec_ctx, void *user_data,
                                       grpc_error *error) {
  const grpc_json *cur;
  verifier_cb_ctx *ctx = (verifier_cb_ctx *)user_data;
  const grpc_http_response *response = &ctx->responses[HTTP_RESPONSE_OPENID];
  grpc_json *json = json_from_http(response);
  grpc_httpcli_request req;
  const char *jwks_uri;

  /* TODO(jboeuf): Cache the jwks_uri in order to avoid this hop next time. */
  if (json == NULL) goto error;
  cur = find_property_by_name(json, "jwks_uri");
  if (cur == NULL) {
    gpr_log(GPR_ERROR, "Could not find jwks_uri in openid config.");
    goto error;
  }
  jwks_uri = validate_string_field(cur, "jwks_uri");
  if (jwks_uri == NULL) goto error;
  if (strstr(jwks_uri, "https://") != jwks_uri) {
    gpr_log(GPR_ERROR, "Invalid non https jwks_uri: %s.", jwks_uri);
    goto error;
  }
  jwks_uri += 8;
  req.handshaker = &grpc_httpcli_ssl;
  req.host = gpr_strdup(jwks_uri);
  req.http.path = strchr(jwks_uri, '/');
  if (req.http.path == NULL) {
    req.http.path = "";
  } else {
    *(req.host + (req.http.path - jwks_uri)) = '\0';
  }

  /* TODO(ctiller): Carry the resource_quota in ctx and share it with the host
     channel. This would allow us to cancel an authentication query when under
     extreme memory pressure. */
  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("jwt_verifier");
  grpc_httpcli_get(
      exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
      gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
      grpc_closure_create(on_keys_retrieved, ctx),
      &ctx->responses[HTTP_RESPONSE_KEYS]);
  grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
  grpc_json_destroy(json);
  gpr_free(req.host);
  return;

error:
  if (json != NULL) grpc_json_destroy(json);
  ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, NULL);
  verifier_cb_ctx_destroy(ctx);
}
Beispiel #16
0
grpc_error *grpc_tcp_server_create(grpc_exec_ctx *exec_ctx,
                                   grpc_closure *shutdown_complete,
                                   const grpc_channel_args *args,
                                   grpc_tcp_server **server) {
  gpr_once_init(&check_init, init);

  grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
  s->so_reuseport = has_so_reuseport;
  s->resource_quota = grpc_resource_quota_create(NULL);
  for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
    if (0 == strcmp(GRPC_ARG_ALLOW_REUSEPORT, args->args[i].key)) {
      if (args->args[i].type == GRPC_ARG_INTEGER) {
        s->so_reuseport =
            has_so_reuseport && (args->args[i].value.integer != 0);
      } else {
        grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
        gpr_free(s);
        return GRPC_ERROR_CREATE(GRPC_ARG_ALLOW_REUSEPORT
                                 " must be an integer");
      }
    } else if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
      if (args->args[i].type == GRPC_ARG_POINTER) {
        grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
        s->resource_quota =
            grpc_resource_quota_internal_ref(args->args[i].value.pointer.p);
      } else {
        grpc_resource_quota_internal_unref(exec_ctx, s->resource_quota);
        gpr_free(s);
        return GRPC_ERROR_CREATE(GRPC_ARG_RESOURCE_QUOTA
                                 " must be a pointer to a buffer pool");
      }
    }
  }
  gpr_ref_init(&s->refs, 1);
  gpr_mu_init(&s->mu);
  s->active_ports = 0;
  s->destroyed_ports = 0;
  s->shutdown = false;
  s->shutdown_starting.head = NULL;
  s->shutdown_starting.tail = NULL;
  s->shutdown_complete = shutdown_complete;
  s->on_accept_cb = NULL;
  s->on_accept_cb_arg = NULL;
  s->head = NULL;
  s->tail = NULL;
  s->nports = 0;
  gpr_atm_no_barrier_store(&s->next_pollset_to_assign, 0);
  *server = s;
  return GRPC_ERROR_NONE;
}
Beispiel #17
0
grpc_resource_quota *grpc_resource_quota_from_channel_args(
    const grpc_channel_args *channel_args) {
  for (size_t i = 0; i < channel_args->num_args; i++) {
    if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
      if (channel_args->args[i].type == GRPC_ARG_POINTER) {
        return grpc_resource_quota_internal_ref(
            channel_args->args[i].value.pointer.p);
      } else {
        gpr_log(GPR_DEBUG, GRPC_ARG_RESOURCE_QUOTA " should be a pointer");
      }
    }
  }
  return grpc_resource_quota_create(NULL);
}
Beispiel #18
0
static void test_instant_alloc_free_pair(void) {
  gpr_log(GPR_INFO, "** test_instant_alloc_free_pair **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_instant_alloc_free_pair");
  grpc_resource_quota_resize(q, 1024 * 1024);
  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, usr, 1024, NULL);
    grpc_resource_user_free(&exec_ctx, usr, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(usr);
}
Beispiel #19
0
static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
    grpc_channel_args *client_args, grpc_channel_args *server_args) {
  grpc_endpoint_pair *sfd = gpr_malloc(sizeof(grpc_endpoint_pair));

  grpc_end2end_test_fixture f;
  memset(&f, 0, sizeof(f));
  f.fixture_data = sfd;
  f.cq = grpc_completion_queue_create(NULL);

  grpc_resource_quota *resource_quota = grpc_resource_quota_create("fixture");
  *sfd = grpc_iomgr_create_endpoint_pair("fixture", resource_quota, 1);
  grpc_resource_quota_unref(resource_quota);

  return f;
}
Beispiel #20
0
/* Write to a socket until it fills up, then read from it using the grpc_tcp
   API. */
static void large_read_test(size_t slice_size) {
  int sv[2];
  grpc_endpoint *ep;
  struct read_socket_state state;
  ssize_t written_bytes;
  gpr_timespec deadline = grpc_timeout_seconds_to_deadline(20);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);

  create_sockets(sv);

  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("large_read_test");
  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), resource_quota,
                       slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);

  written_bytes = fill_socket(sv[0]);
  gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);

  state.ep = ep;
  state.read_bytes = 0;
  state.target_read_bytes = (size_t)written_bytes;
  grpc_slice_buffer_init(&state.incoming);
  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);

  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);

  gpr_mu_lock(g_mu);
  while (state.read_bytes < state.target_read_bytes) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC), deadline)));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  GPR_ASSERT(state.read_bytes == state.target_read_bytes);
  gpr_mu_unlock(g_mu);

  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
  grpc_endpoint_destroy(&exec_ctx, ep);
  grpc_exec_ctx_finish(&exec_ctx);
}
Beispiel #21
0
static void test_multiple_reclaims_can_be_triggered(void) {
  gpr_log(GPR_INFO, "** test_multiple_reclaims_can_be_triggered **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_multiple_reclaims_can_be_triggered");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user usr;
  grpc_resource_user_init(&usr, q, "usr");
  bool benign_done = false;
  bool destructive_done = false;
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_post_reclaimer(
        &exec_ctx, &usr, false,
        make_reclaimer(&usr, 512, set_bool(&benign_done)));
    grpc_resource_user_post_reclaimer(
        &exec_ctx, &usr, true,
        make_reclaimer(&usr, 512, set_bool(&destructive_done)));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(!benign_done);
    GPR_ASSERT(!destructive_done);
  }
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(benign_done);
    GPR_ASSERT(destructive_done);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, &usr, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(&usr);
  GPR_ASSERT(benign_done);
  GPR_ASSERT(destructive_done);
}
Beispiel #22
0
static void test_negative_rq_free_pool(void) {
  gpr_log(GPR_INFO, "** test_negative_rq_free_pool **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_negative_rq_free_pool");
  grpc_resource_quota_resize(q, 1024);

  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");

  grpc_resource_user_slice_allocator alloc;
  int num_allocs = 0;
  grpc_resource_user_slice_allocator_init(&alloc, usr, inc_int_cb, &num_allocs);

  grpc_slice_buffer buffer;
  grpc_slice_buffer_init(&buffer);

  {
    const int start_allocs = num_allocs;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(num_allocs == start_allocs + 1);
  }

  grpc_resource_quota_resize(q, 512);

  double eps = 0.0001;
  GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) < 1 + eps);
  GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) > 1 - eps);

  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_unref(&exec_ctx, usr);
    grpc_exec_ctx_finish(&exec_ctx);
  }

  grpc_resource_quota_unref(q);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
    grpc_exec_ctx_finish(&exec_ctx);
  }
}
Beispiel #23
0
static void test_post(int port) {
  grpc_httpcli_request req;
  char *host;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  g_done = 0;
  gpr_log(GPR_INFO, "test_post");

  gpr_asprintf(&host, "localhost:%d", port);
  gpr_log(GPR_INFO, "posting to %s", host);

  memset(&req, 0, sizeof(req));
  req.host = host;
  req.ssl_host_override = "foo.test.google.fr";
  req.http.path = "/post";
  req.handshaker = &grpc_httpcli_ssl;

  grpc_http_response response;
  memset(&response, 0, sizeof(response));
  grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
  grpc_httpcli_post(
      &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
      n_seconds_time(15),
      grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
      &response);
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  gpr_mu_lock(g_mu);
  while (!g_done) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
                          &worker, gpr_now(GPR_CLOCK_MONOTONIC),
                          n_seconds_time(20))));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);
  gpr_free(host);
  grpc_http_response_destroy(&response);
}
Beispiel #24
0
static void test_reclaimers_can_be_posted_repeatedly(void) {
  gpr_log(GPR_INFO, "** test_reclaimers_can_be_posted_repeatedly **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_reclaimers_can_be_posted_repeatedly");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user usr;
  grpc_resource_user_init(&usr, q, "usr");
  {
    bool allocated = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&allocated));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(allocated);
  }
  for (int i = 0; i < 10; i++) {
    bool reclaimer_done = false;
    {
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_post_reclaimer(
          &exec_ctx, &usr, false,
          make_reclaimer(&usr, 1024, set_bool(&reclaimer_done)));
      grpc_exec_ctx_finish(&exec_ctx);
      GPR_ASSERT(!reclaimer_done);
    }
    {
      bool allocated = false;
      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
      grpc_resource_user_alloc(&exec_ctx, &usr, 1024, set_bool(&allocated));
      grpc_exec_ctx_finish(&exec_ctx);
      GPR_ASSERT(allocated);
      GPR_ASSERT(reclaimer_done);
    }
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, &usr, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  destroy_user(&usr);
  grpc_resource_quota_unref(q);
}
Beispiel #25
0
static void test_blocked_until_scheduled_reclaim_and_scavenge(void) {
  gpr_log(GPR_INFO, "** test_blocked_until_scheduled_reclaim_and_scavenge **");
  grpc_resource_quota *q = grpc_resource_quota_create(
      "test_blocked_until_scheduled_reclaim_and_scavenge");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user usr1;
  grpc_resource_user usr2;
  grpc_resource_user_init(&usr1, q, "usr1");
  grpc_resource_user_init(&usr2, q, "usr2");
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr1, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  bool reclaim_done = false;
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_post_reclaimer(
        &exec_ctx, &usr1, false,
        make_reclaimer(&usr1, 1024, set_bool(&reclaim_done)));
    grpc_exec_ctx_finish(&exec_ctx);
  }
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr2, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(reclaim_done);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, &usr2, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(&usr1);
  destroy_user(&usr2);
}
Beispiel #26
0
static grpc_endpoint_test_fixture create_fixture_tcp_socketpair(
    size_t slice_size) {
  int sv[2];
  grpc_endpoint_test_fixture f;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  create_sockets(sv);
  grpc_resource_quota *resource_quota =
      grpc_resource_quota_create("tcp_posix_test_socketpair");
  f.client_ep = grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"),
                                resource_quota, slice_size, "test");
  f.server_ep = grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"),
                                resource_quota, slice_size, "test");
  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);

  grpc_exec_ctx_finish(&exec_ctx);

  return f;
}
Beispiel #27
0
static void test_simple_async_alloc(void) {
  gpr_log(GPR_INFO, "** test_simple_async_alloc **");
  grpc_resource_quota *q =
      grpc_resource_quota_create("test_simple_async_alloc");
  grpc_resource_quota_resize(q, 1024 * 1024);
  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, usr, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(usr);
}
Beispiel #28
0
static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
                                    grpc_closure *closure, grpc_endpoint **ep,
                                    grpc_pollset_set *interested_parties,
                                    const grpc_channel_args *channel_args,
                                    const grpc_resolved_address *resolved_addr,
                                    gpr_timespec deadline) {
  grpc_uv_tcp_connect *connect;
  grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
  (void)channel_args;
  (void)interested_parties;

  if (channel_args != NULL) {
    for (size_t i = 0; i < channel_args->num_args; i++) {
      if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
        resource_quota = grpc_resource_quota_ref_internal(
            channel_args->args[i].value.pointer.p);
      }
    }
  }

  connect = gpr_malloc(sizeof(grpc_uv_tcp_connect));
  memset(connect, 0, sizeof(grpc_uv_tcp_connect));
  connect->closure = closure;
  connect->endpoint = ep;
  connect->tcp_handle = gpr_malloc(sizeof(uv_tcp_t));
  connect->addr_name = grpc_sockaddr_to_uri(resolved_addr);
  connect->resource_quota = resource_quota;
  uv_tcp_init(uv_default_loop(), connect->tcp_handle);
  connect->connect_req.data = connect;
  // TODO(murgatroid99): figure out what the return value here means
  uv_tcp_connect(&connect->connect_req, connect->tcp_handle,
                 (const struct sockaddr *)resolved_addr->addr,
                 uv_tc_on_connect);
  grpc_closure_init(&connect->on_alarm, uv_tc_on_alarm, connect,
                    grpc_schedule_on_exec_ctx);
  grpc_timer_init(exec_ctx, &connect->alarm,
                  gpr_convert_clock_type(deadline, GPR_CLOCK_MONOTONIC),
                  &connect->on_alarm, gpr_now(GPR_CLOCK_MONOTONIC));
}
Beispiel #29
0
static void test_scavenge_blocked(void) {
  gpr_log(GPR_INFO, "** test_scavenge_blocked **");
  grpc_resource_quota *q = grpc_resource_quota_create("test_scavenge_blocked");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user usr1;
  grpc_resource_user usr2;
  grpc_resource_user_init(&usr1, q, "usr1");
  grpc_resource_user_init(&usr2, q, "usr2");
  bool done;
  {
    done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr1, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, &usr2, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(!done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, &usr1, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, &usr2, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(&usr1);
  destroy_user(&usr2);
}
Beispiel #30
0
static void test_blocked_until_scheduled_destructive_reclaim(void) {
  gpr_log(GPR_INFO, "** test_blocked_until_scheduled_destructive_reclaim **");
  grpc_resource_quota *q = grpc_resource_quota_create(
      "test_blocked_until_scheduled_destructive_reclaim");
  grpc_resource_quota_resize(q, 1024);
  grpc_resource_user *usr = grpc_resource_user_create(q, "usr");
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(done);
  }
  bool reclaim_done = false;
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_post_reclaimer(
        &exec_ctx, usr, true,
        make_reclaimer(usr, 1024, set_bool(&reclaim_done)));
    grpc_exec_ctx_finish(&exec_ctx);
  }
  {
    bool done = false;
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_bool(&done));
    grpc_exec_ctx_finish(&exec_ctx);
    GPR_ASSERT(reclaim_done);
    GPR_ASSERT(done);
  }
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_resource_user_free(&exec_ctx, usr, 1024);
    grpc_exec_ctx_finish(&exec_ctx);
  }
  grpc_resource_quota_unref(q);
  destroy_user(usr);
}