示例#1
0
static void read_and_write_test_read_handler(void *data, gpr_slice *slices,
                                             size_t nslices,
                                             grpc_endpoint_cb_status error) {
  struct read_and_write_test_state *state = data;
  GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR);
  if (error == GRPC_ENDPOINT_CB_SHUTDOWN) {
    gpr_log(GPR_INFO, "Read handler shutdown");
    gpr_mu_lock(&state->mu);
    state->read_done = 1;
    gpr_cv_signal(&state->cv);
    gpr_mu_unlock(&state->mu);
    return;
  }

  state->bytes_read +=
      count_and_unref_slices(slices, nslices, &state->current_read_data);
  if (state->bytes_read == state->target_bytes) {
    gpr_log(GPR_INFO, "Read handler done");
    gpr_mu_lock(&state->mu);
    state->read_done = 1;
    gpr_cv_signal(&state->cv);
    gpr_mu_unlock(&state->mu);
  } else {
    grpc_endpoint_notify_on_read(state->read_ep,
                                 read_and_write_test_read_handler, data);
  }
}
示例#2
0
static void read_and_write_test_write_handler(void *data,
                                              grpc_endpoint_cb_status error) {
  struct read_and_write_test_state *state = data;
  gpr_slice *slices = NULL;
  size_t nslices;
  grpc_endpoint_write_status write_status;

  GPR_ASSERT(error != GRPC_ENDPOINT_CB_ERROR);

  gpr_log(GPR_DEBUG, "%s: error=%d", __FUNCTION__, error);

  if (error == GRPC_ENDPOINT_CB_SHUTDOWN) {
    gpr_log(GPR_INFO, "Write handler shutdown");
    gpr_mu_lock(&state->mu);
    state->write_done = 1;
    gpr_cv_signal(&state->cv);
    gpr_mu_unlock(&state->mu);
    return;
  }

  for (;;) {
    /* Need to do inline writes until they don't succeed synchronously or we
       finish writing */
    state->bytes_written += state->current_write_size;
    if (state->target_bytes - state->bytes_written <
        state->current_write_size) {
      state->current_write_size = state->target_bytes - state->bytes_written;
    }
    if (state->current_write_size == 0) {
      break;
    }

    slices = allocate_blocks(state->current_write_size, 8192, &nslices,
                             &state->current_write_data);
    write_status =
        grpc_endpoint_write(state->write_ep, slices, nslices,
                            read_and_write_test_write_handler, state);
    gpr_log(GPR_DEBUG, "write_status=%d", write_status);
    GPR_ASSERT(write_status != GRPC_ENDPOINT_WRITE_ERROR);
    free(slices);
    if (write_status == GRPC_ENDPOINT_WRITE_PENDING) {
      return;
    }
  }
  GPR_ASSERT(state->bytes_written == state->target_bytes);

  gpr_log(GPR_INFO, "Write handler done");
  gpr_mu_lock(&state->mu);
  state->write_done = 1;
  gpr_cv_signal(&state->cv);
  gpr_mu_unlock(&state->mu);
}
示例#3
0
void grpc_client_setup_cb_end(grpc_client_setup_request *r,
                              const char *reason) {
  gpr_mu_lock(&r->setup->mu);
  r->setup->in_cb--;
  if (r->setup->cancelled) gpr_cv_signal(&r->setup->cv);
  gpr_mu_unlock(&r->setup->mu);
}
示例#4
0
文件: cpu_test.c 项目: makdharma/grpc
static void worker_thread(void *arg) {
  struct cpu_test *ct = (struct cpu_test *)arg;
  uint32_t cpu;
  unsigned r = 12345678;
  unsigned i, j;
  /* Avoid repetitive division calculations */
  int64_t max_i = 1000 / grpc_test_slowdown_factor();
  int64_t max_j = 1000000 / grpc_test_slowdown_factor();
  for (i = 0; i < max_i; i++) {
    /* run for a bit - just calculate something random. */
    for (j = 0; j < max_j; j++) {
      r = (r * 17) & ((r - i) | (r * i));
    }
    cpu = gpr_cpu_current_cpu();
    GPR_ASSERT(cpu < ct->ncores);
    gpr_mu_lock(&ct->mu);
    ct->used[cpu] = 1;
    for (j = 0; j < ct->ncores; j++) {
      if (!ct->used[j]) break;
    }
    gpr_mu_unlock(&ct->mu);
    if (j == ct->ncores) {
      break; /* all cpus have been used - no further use in running this test */
    }
  }
  gpr_mu_lock(&ct->mu);
  ct->r = r; /* make it look like we care about r's value... */
  ct->nthreads--;
  if (ct->nthreads == 0) {
    ct->is_done = 1;
    gpr_cv_signal(&ct->done_cv);
  }
  gpr_mu_unlock(&ct->mu);
}
示例#5
0
// Writes the given number of records of random size (up to kMaxRecordSize) and
// random data to the specified log.
static void writer_thread(void* arg) {
  writer_thread_args* args = (writer_thread_args*)arg;
  // Maximum number of times to spin between writes.
  static const int MAX_SPIN_COUNT = 50;
  int records_written = 0;
  if (VERBOSE) {
    printf("   Writer %d starting\n", args->index);
  }
  while (records_written < args->num_records) {
    records_written += write_records_to_log(args->index, args->record_size,
                                            args->num_records - records_written,
                                            MAX_SPIN_COUNT);
    if (records_written < args->num_records) {
      // Ran out of log space. Sleep for a bit and let the reader catch up.
      // This should never happen for circular logs.
      if (VERBOSE) {
        printf(
            "   Writer %d stalled due to out-of-space: %d out of %d "
            "written\n",
            args->index, records_written, args->num_records);
      }
      gpr_sleep_until(GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10));
    }
  }
  // Done. Decrement count and signal.
  gpr_mu_lock(args->mu);
  (*args->count)--;
  gpr_cv_signal(args->done);
  if (VERBOSE) {
    printf("   Writer %d done\n", args->index);
  }
  gpr_mu_unlock(args->mu);
}
示例#6
0
static void do_kick_all(grpc_exec_ctx *exec_ctx, void *arg,
                        grpc_error *error_unused) {
  grpc_error *error = GRPC_ERROR_NONE;
  grpc_pollset *pollset = arg;
  gpr_mu_lock(&pollset->pollable.po.mu);
  if (pollset->root_worker != NULL) {
    grpc_pollset_worker *worker = pollset->root_worker;
    do {
      if (worker->pollable != &pollset->pollable) {
        gpr_mu_lock(&worker->pollable->po.mu);
      }
      if (worker->initialized_cv) {
        worker->kicked = true;
        gpr_cv_signal(&worker->cv);
      } else {
        append_error(&error, grpc_wakeup_fd_wakeup(&worker->pollable->wakeup),
                     "pollset_shutdown");
      }
      if (worker->pollable != &pollset->pollable) {
        gpr_mu_unlock(&worker->pollable->po.mu);
      }

      worker = worker->links[PWL_POLLSET].next;
    } while (worker != pollset->root_worker);
  }
  pollset->kick_alls_pending--;
  pollset_maybe_finish_shutdown(exec_ctx, pollset);
  gpr_mu_unlock(&pollset->pollable.po.mu);
  GRPC_LOG_IF_ERROR("kick_all", error);
}
示例#7
0
文件: server.c 项目: penser/grpc
void grpc_server_listener_destroy_done(void *s) {
  grpc_server *server = s;
  gpr_mu_lock(&server->mu);
  server->listeners_destroyed++;
  gpr_cv_signal(&server->cv);
  gpr_mu_unlock(&server->mu);
}
示例#8
0
文件: iomgr.c 项目: Abioy/kythe
void grpc_iomgr_unref(void) {
  gpr_mu_lock(&g_mu);
  if (0 == --g_refs) {
    gpr_cv_signal(&g_rcv);
  }
  gpr_mu_unlock(&g_mu);
}
示例#9
0
static void worker_thread(void *arg) {
  struct cpu_test *ct = (struct cpu_test *)arg;
  uint32_t cpu;
  unsigned r = 12345678;
  unsigned i, j;
  for (i = 0; i < 1000 / GRPC_TEST_SLOWDOWN_FACTOR; i++) {
    /* run for a bit - just calculate something random. */
    for (j = 0; j < 1000000 / GRPC_TEST_SLOWDOWN_FACTOR; j++) {
      r = (r * 17) & ((r - i) | (r * i));
    }
    cpu = gpr_cpu_current_cpu();
    GPR_ASSERT(cpu < ct->ncores);
    gpr_mu_lock(&ct->mu);
    ct->used[cpu] = 1;
    for (j = 0; j < ct->ncores; j++) {
      if (!ct->used[j]) break;
    }
    gpr_mu_unlock(&ct->mu);
    if (j == ct->ncores) {
      break; /* all cpus have been used - no further use in running this test */
    }
  }
  gpr_mu_lock(&ct->mu);
  ct->r = r; /* make it look like we care about r's value... */
  ct->nthreads--;
  if (ct->nthreads == 0) {
    ct->is_done = 1;
    gpr_cv_signal(&ct->done_cv);
  }
  gpr_mu_unlock(&ct->mu);
}
示例#10
0
static void grpc_rb_event_unblocking_func(void *arg) {
  (void)arg;
  gpr_mu_lock(&event_queue.mu);
  event_queue.abort = true;
  gpr_cv_signal(&event_queue.cv);
  gpr_mu_unlock(&event_queue.mu);
}
示例#11
0
// Reads and verifies the specified number of records. Reader can also be
// stopped via gpr_cv_signal(&args->stop). Sleeps for 'read_interval_in_msec'
// between read iterations.
static void reader_thread(void* arg) {
  reader_thread_args* args = (reader_thread_args*)arg;
  if (VERBOSE) {
    printf("   Reader starting\n");
  }
  gpr_timespec interval = gpr_time_from_micros(
      args->read_iteration_interval_in_msec * 1000, GPR_TIMESPAN);
  gpr_mu_lock(args->mu);
  int records_read = 0;
  int num_iterations = 0;
  int counter = 0;
  while (!args->stop_flag && records_read < args->total_records) {
    gpr_cv_wait(&args->stop, args->mu, interval);
    if (!args->stop_flag) {
      records_read += perform_read_iteration(args->record_size);
      GPR_ASSERT(records_read <= args->total_records);
      if (VERBOSE && (counter++ == 100000)) {
        printf("   Reader: %d out of %d read\n", records_read,
               args->total_records);
        counter = 0;
      }
      ++num_iterations;
    }
  }
  // Done
  args->running = 0;
  gpr_cv_signal(args->done);
  if (VERBOSE) {
    printf("   Reader: records: %d, iterations: %d\n", records_read,
           num_iterations);
  }
  gpr_mu_unlock(args->mu);
}
void grpc_iomgr_unregister_object(grpc_iomgr_object *obj) {
  gpr_mu_lock(&g_mu);
  obj->next->prev = obj->prev;
  obj->prev->next = obj->next;
  gpr_cv_signal(&g_rcv);
  gpr_mu_unlock(&g_mu);
  gpr_free(obj->name);
}
示例#13
0
static void second_read_callback(void *arg /* fd_change_data */, int success) {
  fd_change_data *fdc = arg;

  gpr_mu_lock(&fdc->mu);
  fdc->cb_that_ran = second_read_callback;
  gpr_cv_signal(&fdc->cv);
  gpr_mu_unlock(&fdc->mu);
}
示例#14
0
/* Indicate that a thread is done, by decrementing m->done
   and signalling done_cv if m->done==0. */
static void mark_thread_done(struct test *m) {
  gpr_mu_lock(&m->mu);
  GPR_ASSERT(m->done != 0);
  m->done--;
  if (m->done == 0) {
    gpr_cv_signal(&m->done_cv);
  }
  gpr_mu_unlock(&m->mu);
}
示例#15
0
文件: thd_test.c 项目: rootusr/grpc
/* A Thread body.   Decrement t->n, and if is becomes zero, set t->done. */
static void thd_body(void *v) {
  struct test *t = v;
  gpr_mu_lock(&t->mu);
  t->n--;
  if (t->n == 0) {
    t->is_done = 1;
    gpr_cv_signal(&t->done_cv);
  }
  gpr_mu_unlock(&t->mu);
}
示例#16
0
/* Called when the listen FD can be safely shutdown.
   Close listen FD and signal that server can be shutdown. */
static void listen_shutdown_cb(void *arg /*server*/, int success) {
  server *sv = arg;

  grpc_fd_orphan(sv->em_fd, NULL, NULL);

  gpr_mu_lock(&sv->mu);
  sv->done = 1;
  gpr_cv_signal(&sv->done_cv);
  gpr_mu_unlock(&sv->mu);
}
示例#17
0
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
  if (specific_worker != NULL) {
    if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
      for (specific_worker = p->root_worker.next;
           specific_worker != &p->root_worker;
           specific_worker = specific_worker->next) {
        gpr_cv_signal(&specific_worker->cv);
      }
      p->kicked_without_pollers = 1;
    } else {
      gpr_cv_signal(&specific_worker->cv);
    }
  } else {
    specific_worker = pop_front_worker(p);
    if (specific_worker != NULL) {
      push_back_worker(p, specific_worker);
      gpr_cv_signal(&specific_worker->cv);
    } else {
      p->kicked_without_pollers = 1;
    }
  }
}
示例#18
0
static grpc_error *non_polling_poller_kick(
    grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
    grpc_pollset_worker *specific_worker) {
  non_polling_poller *p = (non_polling_poller *)pollset;
  if (specific_worker == NULL) specific_worker = (grpc_pollset_worker *)p->root;
  if (specific_worker != NULL) {
    non_polling_worker *w = (non_polling_worker *)specific_worker;
    if (!w->kicked) {
      w->kicked = true;
      gpr_cv_signal(&w->cv);
    }
  }
  return GRPC_ERROR_NONE;
}
示例#19
0
void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
  if (specific_worker != NULL) {
    if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
      for (specific_worker =
               p->root_worker.links[GRPC_POLLSET_WORKER_LINK_POLLSET].next;
           specific_worker != &p->root_worker;
           specific_worker =
               specific_worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next) {
        specific_worker->kicked = 1;
        gpr_cv_signal(&specific_worker->cv);
      }
      p->kicked_without_pollers = 1;
      if (p->is_iocp_worker) {
        grpc_iocp_kick();
      }
    } else {
      if (p->is_iocp_worker) {
        if (g_active_poller == specific_worker) {
          grpc_iocp_kick();
        }
      } else {
        specific_worker->kicked = 1;
        gpr_cv_signal(&specific_worker->cv);
      }
    }
  } else {
    specific_worker =
        pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
    if (specific_worker != NULL) {
      grpc_pollset_kick(p, specific_worker);
    } else if (p->is_iocp_worker) {
      grpc_iocp_kick();
    } else {
      p->kicked_without_pollers = 1;
    }
  }
}
示例#20
0
/* Currently we assume all channel operations should just be pushed up. */
static void lb_channel_op(grpc_channel_element *elem,
                          grpc_channel_element *from_elem,
                          grpc_channel_op *op) {
  lb_channel_data *chand = elem->channel_data;
  grpc_channel_element *back;
  int calling_back = 0;

  switch (op->dir) {
    case GRPC_CALL_UP:
      gpr_mu_lock(&chand->mu);
      back = chand->back;
      if (back) {
        chand->calling_back++;
        calling_back = 1;
      }
      gpr_mu_unlock(&chand->mu);
      if (back) {
        back->filter->channel_op(chand->back, elem, op);
      } else if (op->type == GRPC_TRANSPORT_GOAWAY) {
        gpr_slice_unref(op->data.goaway.message);
      }
      break;
    case GRPC_CALL_DOWN:
      grpc_channel_next_op(elem, op);
      break;
  }

  gpr_mu_lock(&chand->mu);
  switch (op->type) {
    case GRPC_TRANSPORT_CLOSED:
      chand->disconnected = 1;
      maybe_destroy_channel(grpc_channel_stack_from_top_element(elem));
      break;
    case GRPC_CHANNEL_GOAWAY:
      chand->sent_goaway = 1;
      break;
    case GRPC_CHANNEL_DISCONNECT:
    case GRPC_TRANSPORT_GOAWAY:
    case GRPC_ACCEPT_CALL:
      break;
  }

  if (calling_back) {
    chand->calling_back--;
    gpr_cv_signal(&chand->cv);
    maybe_destroy_channel(grpc_channel_stack_from_top_element(elem));
  }
  gpr_mu_unlock(&chand->mu);
}
示例#21
0
文件: alarm_test.c 项目: Abioy/kythe
/* Called when an alarm expires. */
static void alarm_cb(void *arg /* alarm_arg */, int success) {
  alarm_arg *a = arg;
  gpr_mu_lock(&a->mu);
  if (success) {
    a->counter++;
    a->done_success_ctr++;
  } else {
    a->done_cancel_ctr++;
  }
  a->done = 1;
  a->success = success;
  gpr_cv_signal(&a->cv);
  gpr_mu_unlock(&a->mu);
  grpc_iomgr_add_callback(followup_cb, &a->fcb_arg);
}
示例#22
0
static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx,
                                        grpc_pollset *pollset,
                                        grpc_closure *closure) {
  non_polling_poller *p = (non_polling_poller *)pollset;
  GPR_ASSERT(closure != NULL);
  p->shutdown = closure;
  if (p->root == NULL) {
    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
  } else {
    non_polling_worker *w = p->root;
    do {
      gpr_cv_signal(&w->cv);
      w = w->next;
    } while (w != p->root);
  }
}
示例#23
0
void grpc_rb_event_queue_enqueue(void (*callback)(void*),
                                 void *argument) {
  grpc_rb_event *event = gpr_malloc(sizeof(grpc_rb_event));
  event->callback = callback;
  event->argument = argument;
  event->next = NULL;
  gpr_mu_lock(&event_queue.mu);
  if (event_queue.tail == NULL) {
    event_queue.head = event_queue.tail = event;
  } else {
    event_queue.tail->next = event;
    event_queue.tail = event;
  }
  gpr_cv_signal(&event_queue.cv);
  gpr_mu_unlock(&event_queue.mu);
}
示例#24
0
static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker,
                       grpc_pollset_worker **worker_hdl) {
  if (NEW_ROOT ==
      worker_remove(&worker->pollable->root_worker, PWL_POLLABLE, worker)) {
    gpr_cv_signal(&worker->pollable->root_worker->cv);
  }
  if (worker->initialized_cv) {
    gpr_cv_destroy(&worker->cv);
  }
  if (pollset_is_pollable_fd(pollset, worker->pollable)) {
    UNREF_BY(exec_ctx, (grpc_fd *)worker->pollable, 2, "one_poll");
  }
  if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
    pollset_maybe_finish_shutdown(exec_ctx, pollset);
  }
}
示例#25
0
文件: iomgr.c 项目: Abioy/kythe
void grpc_iomgr_add_delayed_callback(grpc_iomgr_cb_func cb, void *cb_arg,
                                     int success) {
  delayed_callback *dcb = gpr_malloc(sizeof(delayed_callback));
  dcb->cb = cb;
  dcb->cb_arg = cb_arg;
  dcb->success = success;
  gpr_mu_lock(&g_mu);
  dcb->next = NULL;
  if (!g_cbs_tail) {
    g_cbs_head = g_cbs_tail = dcb;
  } else {
    g_cbs_tail->next = dcb;
    g_cbs_tail = dcb;
  }
  gpr_cv_signal(&g_cv);
  gpr_mu_unlock(&g_mu);
}
void grpc_iomgr_add_delayed_callback(grpc_iomgr_closure *closure, int success) {
  closure->success = success;
  GPR_ASSERT(closure->cb);
  gpr_mu_lock(&g_mu);
  assert_not_scheduled_locked(closure);
  closure->next = NULL;
  if (!g_cbs_tail) {
    g_cbs_head = g_cbs_tail = closure;
  } else {
    g_cbs_tail->next = closure;
    g_cbs_tail = closure;
  }
  if (g_shutdown) {
    gpr_cv_signal(&g_rcv);
  }
  gpr_mu_unlock(&g_mu);
}
示例#27
0
static void on_compute_engine_detection_http_response(
    void *user_data, const grpc_httpcli_response *response) {
  compute_engine_detector *detector = (compute_engine_detector *)user_data;
  if (response != NULL && response->status == 200 && response->hdr_count > 0) {
    /* Internet providers can return a generic response to all requests, so
       it is necessary to check that metadata header is present also. */
    size_t i;
    for (i = 0; i < response->hdr_count; i++) {
      grpc_httpcli_header *header = &response->hdrs[i];
      if (strcmp(header->key, "Metadata-Flavor") == 0 &&
          strcmp(header->value, "Google") == 0) {
        detector->success = 1;
        break;
      }
    }
  }
  gpr_mu_lock(&detector->mu);
  detector->is_done = 1;
  gpr_mu_unlock(&detector->mu);
  gpr_cv_signal(&detector->cv);
}
示例#28
0
void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                       grpc_pollset_worker *worker, gpr_timespec now,
                       gpr_timespec deadline) {
  int added_worker = 0;
  worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].next =
      worker->links[GRPC_POLLSET_WORKER_LINK_POLLSET].prev =
          worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].next =
              worker->links[GRPC_POLLSET_WORKER_LINK_GLOBAL].prev = NULL;
  worker->kicked = 0;
  worker->pollset = pollset;
  gpr_cv_init(&worker->cv);
  if (grpc_timer_check(exec_ctx, now, &deadline)) {
    goto done;
  }
  if (!pollset->kicked_without_pollers && !pollset->shutting_down) {
    if (g_active_poller == NULL) {
      grpc_pollset_worker *next_worker;
      /* become poller */
      pollset->is_iocp_worker = 1;
      g_active_poller = worker;
      gpr_mu_unlock(&grpc_polling_mu);
      grpc_iocp_work(exec_ctx, deadline);
      grpc_exec_ctx_flush(exec_ctx);
      gpr_mu_lock(&grpc_polling_mu);
      pollset->is_iocp_worker = 0;
      g_active_poller = NULL;
      /* try to get a worker from this pollsets worker list */
      next_worker = pop_front_worker(&pollset->root_worker,
                                     GRPC_POLLSET_WORKER_LINK_POLLSET);
      if (next_worker == NULL) {
        /* try to get a worker from the global list */
        next_worker = pop_front_worker(&g_global_root_worker,
                                       GRPC_POLLSET_WORKER_LINK_GLOBAL);
      }
      if (next_worker != NULL) {
        next_worker->kicked = 1;
        gpr_cv_signal(&next_worker->cv);
      }

      if (pollset->shutting_down && pollset->on_shutdown != NULL) {
        grpc_exec_ctx_enqueue(exec_ctx, pollset->on_shutdown, 1);
        pollset->on_shutdown = NULL;
      }
      goto done;
    }
    push_front_worker(&g_global_root_worker, GRPC_POLLSET_WORKER_LINK_GLOBAL,
                      worker);
    push_front_worker(&pollset->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET,
                      worker);
    added_worker = 1;
    while (!worker->kicked) {
      if (gpr_cv_wait(&worker->cv, &grpc_polling_mu, deadline)) {
        break;
      }
    }
  } else {
    pollset->kicked_without_pollers = 0;
  }
done:
  if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
    gpr_mu_unlock(&grpc_polling_mu);
    grpc_exec_ctx_flush(exec_ctx);
    gpr_mu_lock(&grpc_polling_mu);
  }
  if (added_worker) {
    remove_worker(worker, GRPC_POLLSET_WORKER_LINK_GLOBAL);
    remove_worker(worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
  }
  gpr_cv_destroy(&worker->cv);
}
示例#29
0
static void multiple_writers_single_reader(int circular_log) {
  /* Sleep interval between read iterations. */
  static const int32_t READ_ITERATION_INTERVAL_IN_MSEC = 10;
  /* Number of records written by each writer. */
  static const int32_t NUM_RECORDS_PER_WRITER = 10 * 1024 * 1024;
  /* Maximum record size. */
  static const size_t MAX_RECORD_SIZE = 10;
  int ix;
  gpr_thd_id id;
  gpr_cv writers_done;
  int writers_count = NUM_WRITERS;
  gpr_mu writers_mu; /* protects writers_done and writers_count */
  writer_thread_args writers[NUM_WRITERS];
  gpr_cv reader_done;
  gpr_mu reader_mu; /* protects reader_done and reader.running */
  reader_thread_args reader;
  int32_t record_size = 1 + rand() % MAX_RECORD_SIZE;
  printf("   Record size: %d\n", record_size);
  /* Create and start writers. */
  gpr_cv_init(&writers_done);
  gpr_mu_init(&writers_mu);
  for (ix = 0; ix < NUM_WRITERS; ++ix) {
    writers[ix].index = ix;
    writers[ix].record_size = record_size;
    writers[ix].num_records = NUM_RECORDS_PER_WRITER;
    writers[ix].done = &writers_done;
    writers[ix].count = &writers_count;
    writers[ix].mu = &writers_mu;
    gpr_thd_new(&id, &writer_thread, &writers[ix], NULL);
  }
  /* Start reader. */
  reader.record_size = record_size;
  reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC;
  reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER;
  reader.stop_flag = 0;
  gpr_cv_init(&reader.stop);
  gpr_cv_init(&reader_done);
  reader.done = &reader_done;
  gpr_mu_init(&reader_mu);
  reader.mu = &reader_mu;
  reader.running = 1;
  gpr_thd_new(&id, &reader_thread, &reader, NULL);
  /* Wait for writers to finish. */
  gpr_mu_lock(&writers_mu);
  while (writers_count != 0) {
    gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&writers_mu);
  gpr_mu_destroy(&writers_mu);
  gpr_cv_destroy(&writers_done);
  gpr_mu_lock(&reader_mu);
  if (circular_log) {
    /* Stop reader. */
    reader.stop_flag = 1;
    gpr_cv_signal(&reader.stop);
  }
  /* wait for reader to finish */
  while (reader.running) {
    gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  if (circular_log) {
    /* Assert that there were no out-of-space errors. */
    GPR_ASSERT(0 == census_log_out_of_space_count());
  }
  gpr_mu_unlock(&reader_mu);
  gpr_mu_destroy(&reader_mu);
  gpr_cv_destroy(&reader_done);
  printf("   Reader: finished\n");
}
示例#30
0
static void multiple_writers_single_reader(int circular_log) {
  // Sleep interval between read iterations.
  static const int READ_ITERATION_INTERVAL_IN_MSEC = 10;
  // Maximum record size.
  static const size_t MAX_RECORD_SIZE = 20;
  // Number of records written by each writer. This is sized such that we
  // will write through the entire log ~10 times.
  const int NUM_RECORDS_PER_WRITER =
      (int)((10 * census_log_remaining_space()) / (MAX_RECORD_SIZE / 2)) /
      NUM_WRITERS;
  size_t record_size = ((size_t)rand() % MAX_RECORD_SIZE) + 1;
  // Create and start writers.
  writer_thread_args writers[NUM_WRITERS];
  int writers_count = NUM_WRITERS;
  gpr_cv writers_done;
  gpr_mu writers_mu;  // protects writers_done and writers_count
  gpr_cv_init(&writers_done);
  gpr_mu_init(&writers_mu);
  gpr_thd_id id;
  for (int i = 0; i < NUM_WRITERS; ++i) {
    writers[i].index = i;
    writers[i].record_size = record_size;
    writers[i].num_records = NUM_RECORDS_PER_WRITER;
    writers[i].done = &writers_done;
    writers[i].count = &writers_count;
    writers[i].mu = &writers_mu;
    gpr_thd_new(&id, &writer_thread, &writers[i], NULL);
  }
  // Start reader.
  gpr_cv reader_done;
  gpr_mu reader_mu;  // protects reader_done and reader.running
  reader_thread_args reader;
  reader.record_size = record_size;
  reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC;
  reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER;
  reader.stop_flag = 0;
  gpr_cv_init(&reader.stop);
  gpr_cv_init(&reader_done);
  reader.done = &reader_done;
  gpr_mu_init(&reader_mu);
  reader.mu = &reader_mu;
  reader.running = 1;
  gpr_thd_new(&id, &reader_thread, &reader, NULL);
  // Wait for writers to finish.
  gpr_mu_lock(&writers_mu);
  while (writers_count != 0) {
    gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  gpr_mu_unlock(&writers_mu);
  gpr_mu_destroy(&writers_mu);
  gpr_cv_destroy(&writers_done);
  gpr_mu_lock(&reader_mu);
  if (circular_log) {
    // Stop reader.
    reader.stop_flag = 1;
    gpr_cv_signal(&reader.stop);
  }
  // wait for reader to finish
  while (reader.running) {
    gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
  }
  if (circular_log) {
    // Assert that there were no out-of-space errors.
    GPR_ASSERT(0 == census_log_out_of_space_count());
  }
  gpr_mu_unlock(&reader_mu);
  gpr_mu_destroy(&reader_mu);
  gpr_cv_destroy(&reader_done);
  if (VERBOSE) {
    printf("   Reader: finished\n");
  }
}