예제 #1
0
파일: sync_test.c 프로젝트: madongfly/grpc
/* Initialize *q. */
void queue_init(queue *q) {
  gpr_mu_init(&q->mu);
  gpr_cv_init(&q->non_empty);
  gpr_cv_init(&q->non_full);
  q->head = 0;
  q->length = 0;
}
예제 #2
0
파일: iomgr.c 프로젝트: Abioy/kythe
void grpc_iomgr_init(void) {
  gpr_thd_id id;
  gpr_mu_init(&g_mu);
  gpr_cv_init(&g_cv);
  gpr_cv_init(&g_rcv);
  grpc_alarm_list_init(gpr_now());
  g_refs = 0;
  grpc_iomgr_platform_init();
  gpr_event_init(&g_background_callback_executor_done);
  gpr_thd_new(&id, background_callback_executor, NULL, NULL);
}
예제 #3
0
int grpc_pollset_work(grpc_pollset *pollset, grpc_pollset_worker *worker,
                      gpr_timespec deadline) {
  gpr_timespec now;
  int added_worker = 0;
  now = gpr_now(GPR_CLOCK_MONOTONIC);
  if (gpr_time_cmp(now, deadline) > 0) {
    return 0 /* GPR_FALSE */;
  }
  worker->next = worker->prev = NULL;
  gpr_cv_init(&worker->cv);
  if (grpc_maybe_call_delayed_callbacks(&pollset->mu, 1 /* GPR_TRUE */)) {
    goto done;
  }
  if (grpc_alarm_check(&pollset->mu, now, &deadline)) {
    goto done;
  }
  if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
    push_front_worker(pollset, worker);
    added_worker = 1;
    gpr_cv_wait(&worker->cv, &pollset->mu, deadline);
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  gpr_cv_destroy(&worker->cv);
  if (added_worker) {
    remove_worker(pollset, worker);
  }
  return 1 /* GPR_TRUE */;
}
예제 #4
0
파일: thd_test.c 프로젝트: rootusr/grpc
/* Test that we can create a number of threads and wait for them. */
static void test(void) {
  int i;
  gpr_thd_id thd;
  gpr_thd_id thds[1000];
  struct test t;
  int n = 1000;
  gpr_thd_options options = gpr_thd_options_default();
  gpr_mu_init(&t.mu);
  gpr_cv_init(&t.done_cv);
  t.n = n;
  t.is_done = 0;
  for (i = 0; i != n; i++) {
    GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, NULL));
  }
  gpr_mu_lock(&t.mu);
  while (!t.is_done) {
    gpr_cv_wait(&t.done_cv, &t.mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&t.mu);
  GPR_ASSERT(t.n == 0);
  gpr_thd_options_set_joinable(&options);
  for (i = 0; i < n; i++) {
    GPR_ASSERT(gpr_thd_new(&thds[i], &thd_body_joinable, NULL, &options));
  }
  for (i = 0; i < n; i++) {
    gpr_thd_join(thds[i]);
  }
}
예제 #5
0
static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
                                           grpc_pollset *pollset,
                                           grpc_pollset_worker **worker,
                                           gpr_timespec now,
                                           gpr_timespec deadline) {
  non_polling_poller *npp = (non_polling_poller *)pollset;
  if (npp->shutdown) return GRPC_ERROR_NONE;
  non_polling_worker w;
  gpr_cv_init(&w.cv);
  if (worker != NULL) *worker = (grpc_pollset_worker *)&w;
  if (npp->root == NULL) {
    npp->root = w.next = w.prev = &w;
  } else {
    w.next = npp->root;
    w.prev = w.next->prev;
    w.next->prev = w.prev->next = &w;
  }
  w.kicked = false;
  while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline))
    ;
  if (&w == npp->root) {
    npp->root = w.next;
    if (&w == npp->root) {
      if (npp->shutdown) {
        GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE);
      }
      npp->root = NULL;
    }
  }
  w.next->prev = w.prev;
  w.prev->next = w.next;
  gpr_cv_destroy(&w.cv);
  if (worker != NULL) *worker = NULL;
  return GRPC_ERROR_NONE;
}
예제 #6
0
void grpc_client_setup_create_and_attach(
    grpc_channel_stack *newly_minted_channel, const grpc_channel_args *args,
    grpc_mdctx *mdctx,
    void (*initiate)(void *user_data, grpc_client_setup_request *request),
    void (*done)(void *user_data), void *user_data) {
  grpc_client_setup *s = gpr_malloc(sizeof(grpc_client_setup));

  s->base.vtable = &setup_vtable;
  gpr_mu_init(&s->mu);
  gpr_cv_init(&s->cv);
  s->refs = 1;
  s->mdctx = mdctx;
  s->initiate = initiate;
  s->done = done;
  s->user_data = user_data;
  s->active_request = NULL;
  s->args = grpc_channel_args_copy(args);
  s->current_backoff_interval = gpr_time_from_micros(1000000);
  s->in_alarm = 0;
  s->in_cb = 0;
  s->cancelled = 0;
  grpc_pollset_set_init(&s->interested_parties);

  grpc_client_channel_set_transport_setup(newly_minted_channel, &s->base);
}
예제 #7
0
static int is_stack_running_on_compute_engine(void) {
  compute_engine_detector detector;
  grpc_httpcli_request request;

  /* The http call is local. If it takes more than one sec, it is for sure not
     on compute engine. */
  gpr_timespec max_detection_delay = {1, 0};

  gpr_mu_init(&detector.mu);
  gpr_cv_init(&detector.cv);
  detector.is_done = 0;
  detector.success = 0;

  memset(&request, 0, sizeof(grpc_httpcli_request));
  request.host = GRPC_COMPUTE_ENGINE_DETECTION_HOST;
  request.path = "/";

  grpc_httpcli_get(&request, gpr_time_add(gpr_now(), max_detection_delay),
                   on_compute_engine_detection_http_response, &detector);

  /* Block until we get the response. This is not ideal but this should only be
     called once for the lifetime of the process by the default credentials. */
  gpr_mu_lock(&detector.mu);
  while (!detector.is_done) {
    gpr_cv_wait(&detector.cv, &detector.mu, gpr_inf_future);
  }
  gpr_mu_unlock(&detector.mu);

  gpr_mu_destroy(&detector.mu);
  gpr_cv_destroy(&detector.cv);
  return detector.success;
}
예제 #8
0
static void event_initialize(void) {
  int i;
  for (i = 0; i != event_sync_partitions; i++) {
    gpr_mu_init(&sync_array[i].mu);
    gpr_cv_init(&sync_array[i].cv);
  }
}
예제 #9
0
static void cpu_test(void) {
  uint32_t i;
  int cores_seen = 0;
  struct cpu_test ct;
  gpr_thd_id thd;
  ct.ncores = gpr_cpu_num_cores();
  GPR_ASSERT(ct.ncores > 0);
  ct.nthreads = (int)ct.ncores * 3;
  ct.used = gpr_malloc(ct.ncores * sizeof(int));
  memset(ct.used, 0, ct.ncores * sizeof(int));
  gpr_mu_init(&ct.mu);
  gpr_cv_init(&ct.done_cv);
  ct.is_done = 0;
  for (i = 0; i < ct.ncores * 3; i++) {
    GPR_ASSERT(gpr_thd_new(&thd, &worker_thread, &ct, NULL));
  }
  gpr_mu_lock(&ct.mu);
  while (!ct.is_done) {
    gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&ct.mu);
  fprintf(stderr, "Saw cores [");
  for (i = 0; i < ct.ncores; i++) {
    if (ct.used[i]) {
      fprintf(stderr, "%d,", i);
      cores_seen++;
    }
  }
  fprintf(stderr, "] (%d/%d)\n", cores_seen, ct.ncores);
  gpr_free(ct.used);
}
예제 #10
0
파일: fd_posix_test.c 프로젝트: Infixz/grpc
static void client_init(client *cl) {
  memset(cl->write_buf, 0, sizeof(cl->write_buf));
  cl->write_bytes_total = 0;
  cl->client_write_cnt = 0;
  gpr_mu_init(&cl->mu);
  gpr_cv_init(&cl->done_cv);
  cl->done = 0;
}
예제 #11
0
파일: sync_test.c 프로젝트: madongfly/grpc
/* Return pointer to a new struct test. */
static struct test *test_new(int threads, gpr_int64 iterations) {
  struct test *m = gpr_malloc(sizeof(*m));
  m->threads = threads;
  m->iterations = iterations;
  m->counter = 0;
  m->thread_count = 0;
  m->done = threads;
  gpr_mu_init(&m->mu);
  gpr_cv_init(&m->cv);
  gpr_cv_init(&m->done_cv);
  queue_init(&m->q);
  gpr_stats_init(&m->stats_counter, 0);
  gpr_ref_init(&m->refcount, 0);
  gpr_ref_init(&m->thread_refcount, threads);
  gpr_event_init(&m->event);
  return m;
}
예제 #12
0
void grpc_rb_event_queue_thread_start() {
  event_queue.head = event_queue.tail = NULL;
  event_queue.abort = false;
  gpr_mu_init(&event_queue.mu);
  gpr_cv_init(&event_queue.cv);

  rb_thread_create(grpc_rb_event_thread, NULL);
}
예제 #13
0
/* Return true if this thread should poll */
static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
                         grpc_pollset_worker **worker_hdl, gpr_timespec *now,
                         gpr_timespec deadline) {
  bool do_poll = true;
  if (worker_hdl != NULL) *worker_hdl = worker;
  worker->initialized_cv = false;
  worker->kicked = false;
  worker->pollset = pollset;
  worker->pollable = pollset->current_pollable;

  if (pollset_is_pollable_fd(pollset, worker->pollable)) {
    REF_BY((grpc_fd *)worker->pollable, 2, "one_poll");
  }

  worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
  if (!worker_insert(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
    worker->initialized_cv = true;
    gpr_cv_init(&worker->cv);
    if (worker->pollable != &pollset->pollable) {
      gpr_mu_unlock(&pollset->pollable.po.mu);
    }
    if (GRPC_TRACER_ON(grpc_polling_trace) &&
        worker->pollable->root_worker != worker) {
      gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
              worker->pollable, worker,
              poll_deadline_to_millis_timeout(deadline, *now));
    }
    while (do_poll && worker->pollable->root_worker != worker) {
      if (gpr_cv_wait(&worker->cv, &worker->pollable->po.mu, deadline)) {
        if (GRPC_TRACER_ON(grpc_polling_trace)) {
          gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
                  worker->pollable, worker);
        }
        do_poll = false;
      } else if (worker->kicked) {
        if (GRPC_TRACER_ON(grpc_polling_trace)) {
          gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset, worker->pollable,
                  worker);
        }
        do_poll = false;
      } else if (GRPC_TRACER_ON(grpc_polling_trace) &&
                 worker->pollable->root_worker != worker) {
        gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
                worker->pollable, worker);
      }
    }
    if (worker->pollable != &pollset->pollable) {
      gpr_mu_unlock(&worker->pollable->po.mu);
      gpr_mu_lock(&pollset->pollable.po.mu);
      gpr_mu_lock(&worker->pollable->po.mu);
    }
    *now = gpr_now(now->clock_type);
  }

  return do_poll && pollset->shutdown_closure == NULL &&
         pollset->current_pollable == worker->pollable;
}
예제 #14
0
파일: iomgr.c 프로젝트: Infixz/grpc
void grpc_iomgr_init(void) {
  gpr_thd_id id;
  gpr_mu_init(&g_mu);
  gpr_cv_init(&g_rcv);
  grpc_alarm_list_init(gpr_now());
  g_root_object.next = g_root_object.prev = &g_root_object;
  g_root_object.name = "root";
  grpc_iomgr_platform_init();
  gpr_event_init(&g_background_callback_executor_done);
  gpr_thd_new(&id, background_callback_executor, NULL, NULL);
}
예제 #15
0
파일: iomgr.c 프로젝트: Saviio/grpc
void grpc_iomgr_init(void) {
  g_shutdown = 0;
  gpr_mu_init(&g_mu);
  gpr_cv_init(&g_rcv);
  grpc_exec_ctx_global_init();
  grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
  g_root_object.next = g_root_object.prev = &g_root_object;
  g_root_object.name = "root";
  grpc_iomgr_platform_init();
  grpc_pollset_global_init();
}
예제 #16
0
파일: iomgr.c 프로젝트: aaronjheng/grpc
void grpc_iomgr_init(grpc_exec_ctx *exec_ctx) {
  g_shutdown = 0;
  gpr_mu_init(&g_mu);
  gpr_cv_init(&g_rcv);
  grpc_exec_ctx_global_init();
  grpc_executor_init(exec_ctx);
  grpc_timer_list_init(gpr_now(GPR_CLOCK_MONOTONIC));
  g_root_object.next = g_root_object.prev = &g_root_object;
  g_root_object.name = "root";
  grpc_network_status_init();
  grpc_iomgr_platform_init();
}
예제 #17
0
/* Public function. Allocates the proper data structures to hold a
   grpc_tcp_server. */
grpc_tcp_server *grpc_tcp_server_create(void) {
  grpc_tcp_server *s = gpr_malloc(sizeof(grpc_tcp_server));
  gpr_mu_init(&s->mu);
  gpr_cv_init(&s->cv);
  s->active_ports = 0;
  s->cb = NULL;
  s->cb_arg = NULL;
  s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
  s->nports = 0;
  s->port_capacity = INIT_PORT_CAP;
  return s;
}
예제 #18
0
grpc_udp_server *grpc_udp_server_create(void) {
  grpc_udp_server *s = gpr_malloc(sizeof(grpc_udp_server));
  gpr_mu_init(&s->mu);
  gpr_cv_init(&s->cv);
  s->active_ports = 0;
  s->destroyed_ports = 0;
  s->shutdown = 0;
  s->ports = gpr_malloc(sizeof(server_port) * INIT_PORT_CAP);
  s->nports = 0;
  s->port_capacity = INIT_PORT_CAP;

  return s;
}
예제 #19
0
int main(int argc, char **argv) {
  gpr_setenv("GRPC_POLL_STRATEGY", "poll-cv");
  grpc_poll_function = &mock_poll;
  gpr_mu_init(&poll_mu);
  gpr_cv_init(&poll_cv);

  grpc_iomgr_platform_init();
  test_many_fds();
  grpc_iomgr_platform_shutdown();

  grpc_iomgr_platform_init();
  test_poll_cv_trigger();
  grpc_iomgr_platform_shutdown();
  return 0;
}
예제 #20
0
int main(int argc, char **argv) {
  grpc_test_init(argc, argv);
  grpc_iomgr_init();
  gpr_mu_init(&mu);
  gpr_cv_init(&cv);

  test_no_op();
  test_no_op_with_start();
  test_no_op_with_port();
  test_no_op_with_port_and_start();
  test_connect(1);
  test_connect(10);

  grpc_iomgr_shutdown();
  gpr_mu_destroy(&mu);
  gpr_cv_destroy(&cv);
  return 0;
}
예제 #21
0
파일: child_channel.c 프로젝트: Infixz/grpc
/* Constructor for channel_data */
static void lb_init_channel_elem(grpc_channel_element *elem,
                                 const grpc_channel_args *args,
                                 grpc_mdctx *metadata_context, int is_first,
                                 int is_last) {
  lb_channel_data *chand = elem->channel_data;
  GPR_ASSERT(is_first);
  GPR_ASSERT(!is_last);
  gpr_mu_init(&chand->mu);
  gpr_cv_init(&chand->cv);
  chand->back = NULL;
  chand->destroyed = 0;
  chand->disconnected = 0;
  chand->active_calls = 0;
  chand->sent_goaway = 0;
  chand->calling_back = 0;
  chand->sending_farewell = 0;
  chand->sent_farewell = 0;
}
예제 #22
0
파일: server.c 프로젝트: penser/grpc
grpc_server *grpc_server_create_from_filters(grpc_completion_queue *cq,
                                             grpc_channel_filter **filters,
                                             size_t filter_count,
                                             const grpc_channel_args *args) {
  size_t i;
  int census_enabled = grpc_channel_args_is_census_enabled(args);

  grpc_server *server = gpr_malloc(sizeof(grpc_server));

  GPR_ASSERT(grpc_is_initialized() && "call grpc_init()");

  memset(server, 0, sizeof(grpc_server));
  if (cq) addcq(server, cq);

  gpr_mu_init(&server->mu);
  gpr_cv_init(&server->cv);

  server->unregistered_cq = cq;
  /* decremented by grpc_server_destroy */
  gpr_ref_init(&server->internal_refcount, 1);
  server->root_channel_data.next = server->root_channel_data.prev =
      &server->root_channel_data;

  /* Server filter stack is:

     server_surface_filter - for making surface API calls
     grpc_server_census_filter (optional) - for stats collection and tracing
     {passed in filter stack}
     grpc_connected_channel_filter - for interfacing with transports */
  server->channel_filter_count = filter_count + 1 + census_enabled;
  server->channel_filters =
      gpr_malloc(server->channel_filter_count * sizeof(grpc_channel_filter *));
  server->channel_filters[0] = &server_surface_filter;
  if (census_enabled) {
    server->channel_filters[1] = &grpc_server_census_filter;
  }
  for (i = 0; i < filter_count; i++) {
    server->channel_filters[i + 1 + census_enabled] = filters[i];
  }

  server->channel_args = grpc_channel_args_copy(args);

  return server;
}
예제 #23
0
/* Temporary fix for
 * https://github.com/GoogleCloudPlatform/google-cloud-ruby/issues/899.
 * Transports in idle channels can get destroyed. Normally c-core re-connects,
 * but in grpc-ruby core never gets a thread until an RPC is made, because ruby
 * only calls c-core's "completion_queu_pluck" API.
 * This uses a global background thread that calls
 * "completion_queue_next" on registered "watch_channel_connectivity_state"
 * calls - so that c-core can reconnect if needed, when there aren't any RPC's.
 * TODO(apolcyn) remove this when core handles new RPCs on dead connections.
 */
void grpc_rb_channel_polling_thread_start() {
  VALUE background_thread = Qnil;

  GPR_ASSERT(!abort_channel_polling);
  GPR_ASSERT(!channel_polling_thread_started);
  GPR_ASSERT(channel_polling_cq == NULL);

  gpr_mu_init(&global_connection_polling_mu);
  gpr_cv_init(&global_connection_polling_cv);

  channel_polling_cq = grpc_completion_queue_create_for_next(NULL);
  background_thread = rb_thread_create(run_poll_channels_loop, NULL);

  if (!RTEST(background_thread)) {
    gpr_log(GPR_DEBUG, "GRPC_RUBY: failed to spawn channel polling thread");
    rb_thread_call_without_gvl(set_abort_channel_polling_without_gil, NULL,
                               NULL, NULL);
  }
}
예제 #24
0
파일: trace_test.c 프로젝트: Abioy/kythe
static void test_concurrency(void) {
#define NUM_THREADS 1000
  gpr_thd_id tid[NUM_THREADS];
  int i = 0;
  thd_arg arg;
  arg.num_done = 0;
  gpr_mu_init(&arg.mu);
  gpr_cv_init(&arg.done);
  census_tracing_init();
  for (i = 0; i < NUM_THREADS; ++i) {
    gpr_thd_new(tid + i, mimic_trace_op_sequences, &arg, NULL);
  }
  gpr_mu_lock(&arg.mu);
  while (arg.num_done < NUM_THREADS) {
    gpr_log(GPR_INFO, "num done %d", arg.num_done);
    gpr_cv_wait(&arg.done, &arg.mu, gpr_inf_future);
  }
  gpr_mu_unlock(&arg.mu);
  census_tracing_shutdown();
#undef NUM_THREADS
}
예제 #25
0
void grpc_pollset_init(grpc_pollset *pollset) {
  gpr_mu_init(&pollset->mu);
  gpr_cv_init(&pollset->cv);
}
예제 #26
0
파일: mlog_test.c 프로젝트: github188/grpc
static void multiple_writers_single_reader(int circular_log) {
  // Sleep interval between read iterations.
  static const int READ_ITERATION_INTERVAL_IN_MSEC = 10;
  // Maximum record size.
  static const size_t MAX_RECORD_SIZE = 20;
  // Number of records written by each writer. This is sized such that we
  // will write through the entire log ~10 times.
  const int NUM_RECORDS_PER_WRITER =
      (int)((10 * census_log_remaining_space()) / (MAX_RECORD_SIZE / 2)) /
      NUM_WRITERS;
  size_t record_size = ((size_t)rand() % MAX_RECORD_SIZE) + 1;
  // Create and start writers.
  writer_thread_args writers[NUM_WRITERS];
  int writers_count = NUM_WRITERS;
  gpr_cv writers_done;
  gpr_mu writers_mu;  // protects writers_done and writers_count
  gpr_cv_init(&writers_done);
  gpr_mu_init(&writers_mu);
  gpr_thd_id id;
  for (int i = 0; i < NUM_WRITERS; ++i) {
    writers[i].index = i;
    writers[i].record_size = record_size;
    writers[i].num_records = NUM_RECORDS_PER_WRITER;
    writers[i].done = &writers_done;
    writers[i].count = &writers_count;
    writers[i].mu = &writers_mu;
    gpr_thd_new(&id, &writer_thread, &writers[i], NULL);
  }
  // Start reader.
  gpr_cv reader_done;
  gpr_mu reader_mu;  // protects reader_done and reader.running
  reader_thread_args reader;
  reader.record_size = record_size;
  reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC;
  reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER;
  reader.stop_flag = 0;
  gpr_cv_init(&reader.stop);
  gpr_cv_init(&reader_done);
  reader.done = &reader_done;
  gpr_mu_init(&reader_mu);
  reader.mu = &reader_mu;
  reader.running = 1;
  gpr_thd_new(&id, &reader_thread, &reader, NULL);
  // Wait for writers to finish.
  gpr_mu_lock(&writers_mu);
  while (writers_count != 0) {
    gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&writers_mu);
  gpr_mu_destroy(&writers_mu);
  gpr_cv_destroy(&writers_done);
  gpr_mu_lock(&reader_mu);
  if (circular_log) {
    // Stop reader.
    reader.stop_flag = 1;
    gpr_cv_signal(&reader.stop);
  }
  // wait for reader to finish
  while (reader.running) {
    gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  if (circular_log) {
    // Assert that there were no out-of-space errors.
    GPR_ASSERT(0 == census_log_out_of_space_count());
  }
  gpr_mu_unlock(&reader_mu);
  gpr_mu_destroy(&reader_mu);
  gpr_cv_destroy(&reader_done);
  if (VERBOSE) {
    printf("   Reader: finished\n");
  }
}
예제 #27
0
static void multiple_writers_single_reader(int circular_log) {
  /* Sleep interval between read iterations. */
  static const int32_t READ_ITERATION_INTERVAL_IN_MSEC = 10;
  /* Number of records written by each writer. */
  static const int32_t NUM_RECORDS_PER_WRITER = 10 * 1024 * 1024;
  /* Maximum record size. */
  static const size_t MAX_RECORD_SIZE = 10;
  int ix;
  gpr_thd_id id;
  gpr_cv writers_done;
  int writers_count = NUM_WRITERS;
  gpr_mu writers_mu; /* protects writers_done and writers_count */
  writer_thread_args writers[NUM_WRITERS];
  gpr_cv reader_done;
  gpr_mu reader_mu; /* protects reader_done and reader.running */
  reader_thread_args reader;
  int32_t record_size = 1 + rand() % MAX_RECORD_SIZE;
  printf("   Record size: %d\n", record_size);
  /* Create and start writers. */
  gpr_cv_init(&writers_done);
  gpr_mu_init(&writers_mu);
  for (ix = 0; ix < NUM_WRITERS; ++ix) {
    writers[ix].index = ix;
    writers[ix].record_size = record_size;
    writers[ix].num_records = NUM_RECORDS_PER_WRITER;
    writers[ix].done = &writers_done;
    writers[ix].count = &writers_count;
    writers[ix].mu = &writers_mu;
    gpr_thd_new(&id, &writer_thread, &writers[ix], NULL);
  }
  /* Start reader. */
  reader.record_size = record_size;
  reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC;
  reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER;
  reader.stop_flag = 0;
  gpr_cv_init(&reader.stop);
  gpr_cv_init(&reader_done);
  reader.done = &reader_done;
  gpr_mu_init(&reader_mu);
  reader.mu = &reader_mu;
  reader.running = 1;
  gpr_thd_new(&id, &reader_thread, &reader, NULL);
  /* Wait for writers to finish. */
  gpr_mu_lock(&writers_mu);
  while (writers_count != 0) {
    gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&writers_mu);
  gpr_mu_destroy(&writers_mu);
  gpr_cv_destroy(&writers_done);
  gpr_mu_lock(&reader_mu);
  if (circular_log) {
    /* Stop reader. */
    reader.stop_flag = 1;
    gpr_cv_signal(&reader.stop);
  }
  /* wait for reader to finish */
  while (reader.running) {
    gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  if (circular_log) {
    /* Assert that there were no out-of-space errors. */
    GPR_ASSERT(0 == census_log_out_of_space_count());
  }
  gpr_mu_unlock(&reader_mu);
  gpr_mu_destroy(&reader_mu);
  gpr_cv_destroy(&reader_done);
  printf("   Reader: finished\n");
}
예제 #28
0
파일: fd_posix_test.c 프로젝트: Infixz/grpc
void init_change_data(fd_change_data *fdc) {
  gpr_mu_init(&fdc->mu);
  gpr_cv_init(&fdc->cv);
  fdc->cb_that_ran = NULL;
}
예제 #29
0
파일: fd_posix_test.c 프로젝트: Infixz/grpc
static void server_init(server *sv) {
  sv->read_bytes_total = 0;
  gpr_mu_init(&sv->mu);
  gpr_cv_init(&sv->done_cv);
  sv->done = 0;
}
예제 #30
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker, gpr_timespec now,
                       gpr_timespec deadline) {
  int added_worker = 0;
  worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
      worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
          worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
              worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL;
  worker->kicked = 0;
  worker->pollset = pollset;
  gpr_cv_init(&worker->cv);
  if (grpc_timer_check(exec_ctx, now, &deadline)) {
    goto done;
  }
  if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
    if (g_active_poller == NULL) {
      grpc_pollset_worker *next_worker;
      /* become poller */
      pollset->is_iocp_worker = 1;
      g_active_poller = worker;
      gpr_mu_unlock(&grpc_polling_mu);
      grpc_iocp_work(exec_ctx, deadline);
      grpc_exec_ctx_flush(exec_ctx);
      gpr_mu_lock(&grpc_polling_mu);
      pollset->is_iocp_worker = 0;
      g_active_poller = NULL;
      /* try to get a worker from this pollsets worker list */
      next_worker = pop_front_worker(&pollset->root_worker,
                                     GRPC_POLLSET_WORKER_LINK_POLLSET);
      if (next_worker == NULL) {
        /* try to get a worker from the global list */
        next_worker = pop_front_worker(&g_global_root_worker,
                                       GRPC_POLLSET_WORKER_LINK_GLOBAL);
      }
      if (next_worker != NULL) {
        next_worker->kicked = 1;
        gpr_cv_signal(&next_worker->cv);
      }

      if (pollset->shutting_down && pollset->on_shutdown != NULL) {
        grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1);
        pollset->on_shutdown = NULL;
      }
      goto done;
    }
    push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL,
                      worker);
    push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET,
                      worker);
    added_worker = 1;
    while (!worker->kicked) {
      if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) {
        break;
      }
    }
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
    gpr_mu_unlock(&grpc_polling_mu);
    grpc_exec_ctx_flush(exec_ctx);
    gpr_mu_lock(&grpc_polling_mu);
  }
  if (added_worker) {
    remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
    remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
  }
  gpr_cv_destroy(&worker->cv);
}