コード例 #1
0
ファイル: mlog_test.c プロジェクト: github188/grpc
// Writes the given number of records of random size (up to kMaxRecordSize) and
// random data to the specified log.
static void writer_thread(void* arg) {
  writer_thread_args* args = (writer_thread_args*)arg;
  // Maximum number of times to spin between writes.
  static const int MAX_SPIN_COUNT = 50;
  int records_written = 0;
  if (VERBOSE) {
    printf("   Writer %d starting\n", args->index);
  }
  while (records_written < args->num_records) {
    records_written += write_records_to_log(args->index, args->record_size,
                                            args->num_records - records_written,
                                            MAX_SPIN_COUNT);
    if (records_written < args->num_records) {
      // Ran out of log space. Sleep for a bit and let the reader catch up.
      // This should never happen for circular logs.
      if (VERBOSE) {
        printf(
            "   Writer %d stalled due to out-of-space: %d out of %d "
            "written\n",
            args->index, records_written, args->num_records);
      }
      gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
    }
  }
  // Done. Decrement count and signal.
  gpr_mu_lock(args->mu);
  (*args->count)--;
  gpr_cv_signal(args->done);
  if (VERBOSE) {
    printf("   Writer %d done\n", args->index);
  }
  gpr_mu_unlock(args->mu);
}
コード例 #2
0
ファイル: census_log_tests.c プロジェクト: An-mol/grpc
/* Writes the given number of records of random size (up to kMaxRecordSize) and
   random data to the specified log. */
static void writer_thread(void *arg) {
  writer_thread_args *args = (writer_thread_args *)arg;
  /* Maximum number of times to spin between writes. */
  static const int32_t MAX_SPIN_COUNT = 50;
  int records_written = 0;
  printf("   Writer: %d\n", args->index);
  while (records_written < args->num_records) {
    records_written += write_records_to_log(args->index, args->record_size,
                                            args->num_records - records_written,
                                            MAX_SPIN_COUNT);
    if (records_written < args->num_records) {
      /* Ran out of log space. Sleep for a bit and let the reader catch up.
         This should never happen for circular logs. */
      printf("   Writer stalled due to out-of-space: %d out of %d written\n",
             records_written, args->num_records);
      gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
    }
  }
  /* Done. Decrement count and signal. */
  gpr_mu_lock(args->mu);
  (*args->count)--;
  gpr_cv_broadcast(args->done);
  printf("   Writer done: %d\n", args->index);
  gpr_mu_unlock(args->mu);
}
コード例 #3
0
ファイル: port_server_client.c プロジェクト: gnirodi/grpc
static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
                                 grpc_error *error) {
  size_t i;
  int port = 0;
  portreq *pr = arg;
  int failed = 0;
  grpc_httpcli_response *response = &pr->response;

  if (error != GRPC_ERROR_NONE) {
    failed = 1;
    const char *msg = grpc_error_string(error);
    gpr_log(GPR_DEBUG, "failed port pick from server: retrying [%s]", msg);
    grpc_error_free_string(msg);
  } else if (response->status != 200) {
    failed = 1;
    gpr_log(GPR_DEBUG, "failed port pick from server: status=%d",
            response->status);
  }

  if (failed) {
    grpc_httpcli_request req;
    memset(&req, 0, sizeof(req));
    GPR_ASSERT(pr->retries < 10);
    gpr_sleep_until(gpr_time_add(
        gpr_now(GPR_CLOCK_REALTIME),
        gpr_time_from_millis(
            (int64_t)(1000.0 * (1 + pow(1.3, pr->retries) * rand() / RAND_MAX)),
            GPR_TIMESPAN)));
    pr->retries++;
    req.host = pr->server;
    req.http.path = "/get";
    grpc_http_response_destroy(&pr->response);
    memset(&pr->response, 0, sizeof(pr->response));
    grpc_resource_quota *resource_quota =
        grpc_resource_quota_create("port_server_client/pick_retry");
    grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
                     GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
                     grpc_closure_create(got_port_from_server, pr),
                     &pr->response);
    grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
    return;
  }
  GPR_ASSERT(response);
  GPR_ASSERT(response->status == 200);
  for (i = 0; i < response->body_length; i++) {
    GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9');
    port = port * 10 + response->body[i] - '0';
  }
  GPR_ASSERT(port > 1024);
  gpr_mu_lock(pr->mu);
  pr->port = port;
  GRPC_LOG_IF_ERROR(
      "pollset_kick",
      grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), NULL));
  gpr_mu_unlock(pr->mu);
}
コード例 #4
0
ファイル: httpscli_test.c プロジェクト: sanatgersappa/grpc
int main(int argc, char **argv) {
  grpc_closure destroyed;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_subprocess *server;
  char *me = argv[0];
  char *lslash = strrchr(me, '/');
  char *args[5];
  int port = grpc_pick_unused_port_or_die();

  GPR_ASSERT(argc <= 2);
  if (argc == 2) {
    args[0] = gpr_strdup(argv[1]);
  } else {
    /* figure out where we are */
    char *root;
    if (lslash) {
      root = gpr_malloc((size_t)(lslash - me + 1));
      memcpy(root, me, (size_t)(lslash - me));
      root[lslash - me] = 0;
    } else {
      root = gpr_strdup(".");
    }
    gpr_asprintf(&args[0], "%s/../../test/core/httpcli/test_server.py", root);
    gpr_free(root);
  }

  /* start the server */
  args[1] = "--port";
  gpr_asprintf(&args[2], "%d", port);
  args[3] = "--ssl";
  server = gpr_subprocess_create(4, (const char **)args);
  GPR_ASSERT(server);
  gpr_free(args[0]);
  gpr_free(args[2]);

  gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                               gpr_time_from_seconds(5, GPR_TIMESPAN)));

  grpc_test_init(argc, argv);
  grpc_init();
  grpc_httpcli_context_init(&g_context);
  grpc_pollset_init(&g_pollset);

  test_get(port);
  test_post(port);

  grpc_httpcli_context_destroy(&g_context);
  grpc_closure_init(&destroyed, destroy_pollset, &g_pollset);
  grpc_pollset_shutdown(&exec_ctx, &g_pollset, &destroyed);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_shutdown();

  gpr_subprocess_destroy(server);

  return 0;
}
コード例 #5
0
static void test_too_many_plucks(void) {
  grpc_event ev;
  grpc_completion_queue *cc;
  void *tags[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
  grpc_cq_completion completions[GPR_ARRAY_SIZE(tags)];
  gpr_thd_id thread_ids[GPR_ARRAY_SIZE(tags)];
  struct thread_state thread_states[GPR_ARRAY_SIZE(tags)];
  gpr_thd_options thread_options = gpr_thd_options_default();
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  unsigned i, j;

  LOG_TEST("test_too_many_plucks");

  cc = grpc_completion_queue_create(NULL);
  gpr_thd_options_set_joinable(&thread_options);

  for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
    tags[i] = create_test_tag();
    for (j = 0; j < i; j++) {
      GPR_ASSERT(tags[i] != tags[j]);
    }
    thread_states[i].cc = cc;
    thread_states[i].tag = tags[i];
    gpr_thd_new(thread_ids + i, pluck_one, thread_states + i, &thread_options);
  }

  /* wait until all other threads are plucking */
  gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(1000));

  ev = grpc_completion_queue_pluck(cc, create_test_tag(),
                                   gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
  GPR_ASSERT(ev.type == GRPC_QUEUE_TIMEOUT);

  for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
    grpc_cq_begin_op(cc, tags[i]);
    grpc_cq_end_op(&exec_ctx, cc, tags[i], GRPC_ERROR_NONE,
                   do_nothing_end_completion, NULL, &completions[i]);
  }

  for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
    gpr_thd_join(thread_ids[i]);
  }

  shutdown_and_destroy(cc);
  grpc_exec_ctx_finish(&exec_ctx);
}
コード例 #6
0
ファイル: port_server_client.c プロジェクト: NaughtyCode/grpc
static void got_port_from_server(grpc_exec_ctx *exec_ctx, void *arg,
                                 const grpc_httpcli_response *response) {
  size_t i;
  int port = 0;
  portreq *pr = arg;
  int failed = 0;

  if (!response) {
    failed = 1;
    gpr_log(GPR_DEBUG,
            "failed port pick from server: retrying [response=NULL]");
  } else if (response->status != 200) {
    failed = 1;
    gpr_log(GPR_DEBUG, "failed port pick from server: status=%d",
            response->status);
  }

  if (failed) {
    grpc_httpcli_request req;
    memset(&req, 0, sizeof(req));
    GPR_ASSERT(pr->retries < 10);
    gpr_sleep_until(gpr_time_add(
        gpr_now(GPR_CLOCK_REALTIME),
        gpr_time_from_millis(
            (int64_t)(1000.0 * (1 + pow(1.3, pr->retries) * rand() / RAND_MAX)),
            GPR_TIMESPAN)));
    pr->retries++;
    req.host = pr->server;
    req.http.path = "/get";
    grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, &req,
                     GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10), got_port_from_server,
                     pr);
    return;
  }
  GPR_ASSERT(response);
  GPR_ASSERT(response->status == 200);
  for (i = 0; i < response->body_length; i++) {
    GPR_ASSERT(response->body[i] >= '0' && response->body[i] <= '9');
    port = port * 10 + response->body[i] - '0';
  }
  GPR_ASSERT(port > 1024);
  gpr_mu_lock(pr->mu);
  pr->port = port;
  grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), NULL);
  gpr_mu_unlock(pr->mu);
}
コード例 #7
0
ファイル: trace_test.c プロジェクト: Abioy/kythe
static void mimic_trace_op_sequences(void* arg) {
  census_op_id id;
  const char* method_name = "service_foo/method_bar";
  int i = 0;
  const int num_iter = 200;
  thd_arg* args = (thd_arg*)arg;
  GPR_ASSERT(args != NULL);
  gpr_log(GPR_INFO, "Start trace op sequence thread.");
  for (i = 0; i < num_iter; i++) {
    id = census_tracing_start_op();
    census_add_method_tag(id, method_name);
    /* pretend doing 1us work. */
    gpr_sleep_until(GRPC_TIMEOUT_MICROS_TO_DEADLINE(1));
    census_tracing_end_op(id);
  }
  gpr_log(GPR_INFO, "End trace op sequence thread.");
  gpr_mu_lock(&args->mu);
  args->num_done += 1;
  gpr_cv_broadcast(&args->done);
  gpr_mu_unlock(&args->mu);
}
コード例 #8
0
ファイル: iomgr.c プロジェクト: Infixz/grpc
/* Execute followup callbacks continuously.
   Other threads may check in and help during pollset_work() */
static void background_callback_executor(void *ignored) {
  gpr_mu_lock(&g_mu);
  while (!g_shutdown) {
    gpr_timespec deadline = gpr_inf_future;
    gpr_timespec short_deadline =
        gpr_time_add(gpr_now(), gpr_time_from_millis(100));
    if (g_cbs_head) {
      grpc_iomgr_closure *closure = g_cbs_head;
      g_cbs_head = closure->next;
      if (!g_cbs_head) g_cbs_tail = NULL;
      gpr_mu_unlock(&g_mu);
      closure->cb(closure->cb_arg, closure->success);
      gpr_mu_lock(&g_mu);
    } else if (grpc_alarm_check(&g_mu, gpr_now(), &deadline)) {
    } else {
      gpr_mu_unlock(&g_mu);
      gpr_sleep_until(gpr_time_min(short_deadline, deadline));
      gpr_mu_lock(&g_mu);
    }
  }
  gpr_mu_unlock(&g_mu);
  gpr_event_set(&g_background_callback_executor_done, (void *)1);
}
コード例 #9
0
static void test_connectivity(grpc_end2end_test_config config) {
  grpc_end2end_test_fixture f = config.create_fixture(NULL, NULL);
  grpc_connectivity_state state;
  cq_verifier *cqv = cq_verifier_create(f.cq);
  child_events ce;
  gpr_thd_options thdopt = gpr_thd_options_default();
  gpr_thd_id thdid;

  config.init_client(&f, NULL);

  ce.channel = f.client;
  ce.cq = f.cq;
  gpr_event_init(&ce.started);
  gpr_thd_options_set_joinable(&thdopt);
  GPR_ASSERT(gpr_thd_new(&thdid, child_thread, &ce, &thdopt));

  gpr_event_wait(&ce.started, gpr_inf_future(GPR_CLOCK_MONOTONIC));

  /* channels should start life in IDLE, and stay there */
  GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 0) ==
             GRPC_CHANNEL_IDLE);
  gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(100));
  GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 0) ==
             GRPC_CHANNEL_IDLE);

  /* start watching for a change */
  gpr_log(GPR_DEBUG, "watching");
  grpc_channel_watch_connectivity_state(
      f.client, GRPC_CHANNEL_IDLE, gpr_now(GPR_CLOCK_MONOTONIC), f.cq, tag(1));

  /* eventually the child thread completion should trigger */
  gpr_thd_join(thdid);

  /* check that we're still in idle, and start connecting */
  GPR_ASSERT(grpc_channel_check_connectivity_state(f.client, 1) ==
             GRPC_CHANNEL_IDLE);
  /* start watching for a change */
  grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_IDLE,
                                        GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3),
                                        f.cq, tag(2));

  /* and now the watch should trigger */
  cq_expect_completion(cqv, tag(2), 1);
  cq_verify(cqv);
  state = grpc_channel_check_connectivity_state(f.client, 0);
  GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
             state == GRPC_CHANNEL_CONNECTING);

  /* quickly followed by a transition to TRANSIENT_FAILURE */
  grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_CONNECTING,
                                        GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3),
                                        f.cq, tag(3));
  cq_expect_completion(cqv, tag(3), 1);
  cq_verify(cqv);
  state = grpc_channel_check_connectivity_state(f.client, 0);
  GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
             state == GRPC_CHANNEL_CONNECTING);

  gpr_log(GPR_DEBUG, "*** STARTING SERVER ***");

  /* now let's bring up a server to connect to */
  config.init_server(&f, NULL);

  gpr_log(GPR_DEBUG, "*** STARTED SERVER ***");

  /* we'll go through some set of transitions (some might be missed), until
     READY is reached */
  while (state != GRPC_CHANNEL_READY) {
    grpc_channel_watch_connectivity_state(
        f.client, state, GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3), f.cq, tag(4));
    cq_expect_completion(cqv, tag(4), 1);
    cq_verify(cqv);
    state = grpc_channel_check_connectivity_state(f.client, 0);
    GPR_ASSERT(state == GRPC_CHANNEL_READY ||
               state == GRPC_CHANNEL_CONNECTING ||
               state == GRPC_CHANNEL_TRANSIENT_FAILURE);
  }

  /* bring down the server again */
  /* we should go immediately to TRANSIENT_FAILURE */
  gpr_log(GPR_DEBUG, "*** SHUTTING DOWN SERVER ***");

  grpc_channel_watch_connectivity_state(f.client, GRPC_CHANNEL_READY,
                                        GRPC_TIMEOUT_SECONDS_TO_DEADLINE(3),
                                        f.cq, tag(5));

  grpc_server_shutdown_and_notify(f.server, f.cq, tag(0xdead));

  cq_expect_completion(cqv, tag(5), 1);
  cq_expect_completion(cqv, tag(0xdead), 1);
  cq_verify(cqv);
  state = grpc_channel_check_connectivity_state(f.client, 0);
  GPR_ASSERT(state == GRPC_CHANNEL_TRANSIENT_FAILURE ||
             state == GRPC_CHANNEL_CONNECTING || state == GRPC_CHANNEL_IDLE);

  /* cleanup server */
  grpc_server_destroy(f.server);

  gpr_log(GPR_DEBUG, "*** SHUTDOWN SERVER ***");

  grpc_channel_destroy(f.client);
  grpc_completion_queue_shutdown(f.cq);
  grpc_completion_queue_destroy(f.cq);
  config.tear_down_data(&f);

  cq_verifier_destroy(cqv);
}
コード例 #10
0
ファイル: timeval.c プロジェクト: mdhheydari/grpc-php7
/**
 * Sleep until this time, interpreted as an absolute timeout
 * @return void
 */
PHP_METHOD(Timeval, sleepUntil) {
  wrapped_grpc_timeval *this = Z_WRAPPED_GRPC_TIMEVAL_P(getThis());
  gpr_sleep_until(this->wrapped);
}