Example #1
0
grpc_resource_user *grpc_resource_user_create(
    grpc_resource_quota *resource_quota, const char *name) {
  grpc_resource_user *resource_user = gpr_malloc(sizeof(*resource_user));
  resource_user->resource_quota =
      grpc_resource_quota_internal_ref(resource_quota);
  grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
                    resource_user);
  grpc_closure_init(&resource_user->add_to_free_pool_closure,
                    &ru_add_to_free_pool, resource_user);
  grpc_closure_init(&resource_user->post_reclaimer_closure[0],
                    &ru_post_benign_reclaimer, resource_user);
  grpc_closure_init(&resource_user->post_reclaimer_closure[1],
                    &ru_post_destructive_reclaimer, resource_user);
  grpc_closure_init(&resource_user->destroy_closure, &ru_destroy,
                    resource_user);
  gpr_mu_init(&resource_user->mu);
  gpr_atm_rel_store(&resource_user->refs, 1);
  gpr_atm_rel_store(&resource_user->shutdown, 0);
  resource_user->free_pool = 0;
  grpc_closure_list_init(&resource_user->on_allocated);
  resource_user->allocating = false;
  resource_user->added_to_free_pool = false;
  resource_user->reclaimers[0] = NULL;
  resource_user->reclaimers[1] = NULL;
  for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
    resource_user->links[i].next = resource_user->links[i].prev = NULL;
  }
  if (name != NULL) {
    resource_user->name = gpr_strdup(name);
  } else {
    gpr_asprintf(&resource_user->name, "anonymous_resource_user_%" PRIxPTR,
                 (intptr_t)resource_user);
  }
  return resource_user;
}
Example #2
0
static grpc_fd *alloc_fd(int fd) {
  grpc_fd *r = NULL;
  gpr_mu_lock(&fd_freelist_mu);
  if (fd_freelist != NULL) {
    r = fd_freelist;
    fd_freelist = fd_freelist->freelist_next;
  }
  gpr_mu_unlock(&fd_freelist_mu);
  if (r == NULL) {
    r = gpr_malloc(sizeof(grpc_fd));
    gpr_mu_init(&r->set_state_mu);
    gpr_mu_init(&r->watcher_mu);
  }

  gpr_atm_rel_store(&r->refst, 1);
  gpr_atm_rel_store(&r->readst, NOT_READY);
  gpr_atm_rel_store(&r->writest, NOT_READY);
  gpr_atm_rel_store(&r->shutdown, 0);
  r->fd = fd;
  r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
      &r->inactive_watcher_root;
  r->freelist_next = NULL;
  r->read_watcher = r->write_watcher = NULL;
  return r;
}
Example #3
0
static grpc_server_retry_throttle_data* grpc_server_retry_throttle_data_create(
    int max_milli_tokens, int milli_token_ratio,
    grpc_server_retry_throttle_data* old_throttle_data) {
  grpc_server_retry_throttle_data* throttle_data =
      gpr_malloc(sizeof(*throttle_data));
  memset(throttle_data, 0, sizeof(*throttle_data));
  gpr_ref_init(&throttle_data->refs, 1);
  throttle_data->max_milli_tokens = max_milli_tokens;
  throttle_data->milli_token_ratio = milli_token_ratio;
  int initial_milli_tokens = max_milli_tokens;
  // If there was a pre-existing entry for this server name, initialize
  // the token count by scaling proportionately to the old data.  This
  // ensures that if we're already throttling retries on the old scale,
  // we will start out doing the same thing on the new one.
  if (old_throttle_data != NULL) {
    double token_fraction =
        (int)gpr_atm_acq_load(&old_throttle_data->milli_tokens) /
        (double)old_throttle_data->max_milli_tokens;
    initial_milli_tokens = (int)(token_fraction * max_milli_tokens);
  }
  gpr_atm_rel_store(&throttle_data->milli_tokens,
                    (gpr_atm)initial_milli_tokens);
  // If there was a pre-existing entry, mark it as stale and give it a
  // pointer to the new entry, which is its replacement.
  if (old_throttle_data != NULL) {
    grpc_server_retry_throttle_data_ref(throttle_data);
    gpr_atm_rel_store(&old_throttle_data->replacement, (gpr_atm)throttle_data);
  }
  return throttle_data;
}
Example #4
0
static void cl_block_initialize(cl_block* block, char* buffer) {
  block->buffer = buffer;
  gpr_atm_rel_store(&block->writer_lock, 0);
  gpr_atm_rel_store(&block->reader_lock, 0);
  gpr_atm_rel_store(&block->bytes_committed, 0);
  block->bytes_read = 0;
  cl_block_list_struct_initialize(&block->link, block);
}
Example #5
0
/* Gets the next block to read and tries to free 'prev' block (if not NULL).
   Returns NULL if reached the end. */
static cl_block *cl_next_block_to_read(cl_block *prev) {
  cl_block *block = NULL;
  if (g_log.read_iterator_state == g_log.num_cores) {
    /* We are traversing dirty list; find the next dirty block. */
    if (prev != NULL) {
      /* Try to free the previous block if there is no unread data. This block
         may have unread data if previously incomplete record completed between
         read_next() calls. */
      block = prev->link.next->block;
      if (cl_block_try_disable_access(prev, 0 /* do not discard data */)) {
        cl_block_list_remove(&g_log.dirty_block_list, prev);
        cl_block_list_insert_at_head(&g_log.free_block_list, prev);
        gpr_atm_rel_store(&g_log.is_full, 0);
      }
    } else {
      block = cl_block_list_head(&g_log.dirty_block_list);
    }
    if (block != NULL) {
      return block;
    }
    /* We are done with the dirty list; moving on to core-local blocks. */
  }
  while (g_log.read_iterator_state > 0) {
    g_log.read_iterator_state--;
    block = cl_core_local_block_get_block(
        &g_log.core_local_blocks[g_log.read_iterator_state]);
    if (block != NULL) {
      return block;
    }
  }
  return NULL;
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
                             grpc_error *error) {
  grpc_subchannel_call_holder *holder = arg;
  gpr_mu_lock(&holder->mu);
  GPR_ASSERT(holder->creation_phase ==
             GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
  holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  if (holder->connected_subchannel == NULL) {
    gpr_atm_no_barrier_store(&holder->subchannel_call, 1);
    fail_locked(exec_ctx, holder,
                GRPC_ERROR_CREATE_REFERENCING("Failed to create subchannel",
                                              &error, 1));
  } else if (1 == gpr_atm_acq_load(&holder->subchannel_call)) {
    /* already cancelled before subchannel became ready */
    fail_locked(exec_ctx, holder,
                GRPC_ERROR_CREATE_REFERENCING(
                    "Cancelled before creating subchannel", &error, 1));
  } else {
    gpr_atm_rel_store(
        &holder->subchannel_call,
        (gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
            exec_ctx, holder->connected_subchannel, holder->pollent));
    retry_waiting_locked(exec_ctx, holder);
  }
  gpr_mu_unlock(&holder->mu);
  GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
Example #7
0
static grpc_fd *alloc_fd(int fd) {
  grpc_fd *r = NULL;
  gpr_mu_lock(&fd_freelist_mu);
  if (fd_freelist != NULL) {
    r = fd_freelist;
    fd_freelist = fd_freelist->freelist_next;
  }
  gpr_mu_unlock(&fd_freelist_mu);
  if (r == NULL) {
    r = gpr_malloc(sizeof(grpc_fd));
    gpr_mu_init(&r->mu);
  }

  r->shutdown = 0;
  r->read_closure = CLOSURE_NOT_READY;
  r->write_closure = CLOSURE_NOT_READY;
  r->fd = fd;
  r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
      &r->inactive_watcher_root;
  r->freelist_next = NULL;
  r->read_watcher = r->write_watcher = NULL;
  r->on_done_closure = NULL;
  r->closed = 0;
  r->released = 0;
  // The last operation on r before returning it should be a release-store
  // so that all the above fields are globally visible before the value of
  // r could escape to another thread. Our refcount itself needs a release-store
  // so use this
  gpr_atm_rel_store(&r->refst, 1);
  return r;
}
Example #8
0
static grpc_fd *alloc_fd(int fd) {
  grpc_fd *r = NULL;
  gpr_mu_lock(&fd_freelist_mu);
  if (fd_freelist != NULL) {
    r = fd_freelist;
    fd_freelist = fd_freelist->freelist_next;
  }
  gpr_mu_unlock(&fd_freelist_mu);
  if (r == NULL) {
    r = gpr_malloc(sizeof(grpc_fd));
    gpr_mu_init(&r->mu);
  }

  gpr_atm_rel_store(&r->refst, 1);
  r->shutdown = 0;
  r->read_closure = CLOSURE_NOT_READY;
  r->write_closure = CLOSURE_NOT_READY;
  r->fd = fd;
  r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
      &r->inactive_watcher_root;
  r->freelist_next = NULL;
  r->read_watcher = r->write_watcher = NULL;
  r->on_done_closure = NULL;
  r->closed = 0;
  r->released = 0;
  return r;
}
Example #9
0
/* Convert a timespec to milliseconds:
   - Very small or negative poll times are clamped to zero to do a non-blocking
     poll (which becomes spin polling)
   - Other small values are rounded up to one millisecond
   - Longer than a millisecond polls are rounded up to the next nearest
     millisecond to avoid spinning
   - Infinite timeouts are converted to -1 */
static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
                                           gpr_timespec now) {
  gpr_timespec timeout;
  if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
    return -1;
  }

  if (gpr_time_cmp(deadline, now) <= 0) {
    return 0;
  }

  static const gpr_timespec round_up = {
      .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
  timeout = gpr_time_sub(deadline, now);
  int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
  return millis >= 1 ? millis : 1;
}

static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
                               grpc_pollset *notifier) {
  grpc_lfev_set_ready(exec_ctx, &fd->read_closure);

  /* Note, it is possible that fd_become_readable might be called twice with
     different 'notifier's when an fd becomes readable and it is in two epoll
     sets (This can happen briefly during polling island merges). In such cases
     it does not really matter which notifer is set as the read_notifier_pollset
     (They would both point to the same polling island anyway) */
  /* Use release store to match with acquire load in fd_get_read_notifier */
  gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
}
Example #10
0
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
                             grpc_error *error) {
  call_data *calld = arg;
  gpr_mu_lock(&calld->mu);
  GPR_ASSERT(calld->creation_phase ==
             GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
  calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  if (calld->connected_subchannel == NULL) {
    gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
    fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
                                     "Failed to create subchannel", &error, 1));
  } else if (1 == gpr_atm_acq_load(&calld->subchannel_call)) {
    /* already cancelled before subchannel became ready */
    fail_locked(exec_ctx, calld,
                GRPC_ERROR_CREATE_REFERENCING(
                    "Cancelled before creating subchannel", &error, 1));
  } else {
    grpc_subchannel_call *subchannel_call = NULL;
    grpc_error *new_error = grpc_connected_subchannel_create_call(
        exec_ctx, calld->connected_subchannel, calld->pollent,
        &subchannel_call);
    if (new_error != GRPC_ERROR_NONE) {
      new_error = grpc_error_add_child(new_error, error);
      subchannel_call = CANCELLED_CALL;
      fail_locked(exec_ctx, calld, new_error);
    }
    gpr_atm_rel_store(&calld->subchannel_call,
                      (gpr_atm)(uintptr_t)subchannel_call);
    retry_waiting_locked(exec_ctx, calld);
  }
  gpr_mu_unlock(&calld->mu);
  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
}
Example #11
0
static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
                             size_t *ncallbacks) {
  gpr_intptr state = gpr_atm_acq_load(st);

  switch (state) {
    case READY:
      /* duplicate ready, ignore */
      return;
    case NOT_READY:
      if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
        /* swap was successful -- the closure will run after the next
           notify_on call. */
        return;
      }
      /* swap was unsuccessful due to an intervening set_ready call.
         Fall through to the WAITING code below */
      state = gpr_atm_acq_load(st);
    default: /* waiting */
      GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
                 gpr_atm_no_barrier_load(st) != NOT_READY);
      callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
      gpr_atm_rel_store(st, NOT_READY);
      return;
  }
}
static void must_succeed(grpc_exec_ctx *exec_ctx, void *argsp,
                         grpc_error *err) {
  args_struct *args = argsp;
  GPR_ASSERT(err == GRPC_ERROR_NONE);
  GPR_ASSERT(args->addrs != NULL);
  GPR_ASSERT(args->addrs->naddrs > 0);
  gpr_atm_rel_store(&args->done_atm, 1);
}
static void poll_server_until_read_done(test_tcp_server *server,
                                        gpr_event *signal_when_done) {
  gpr_atm_rel_store(&state.done_atm, 0);
  gpr_thd_id id;
  poll_args *pa = gpr_malloc(sizeof(*pa));
  pa->server = server;
  pa->signal_when_done = signal_when_done;
  gpr_thd_new(&id, actually_poll_server, pa, NULL);
}
Example #14
0
void gpr_event_set(gpr_event *ev, void *value) {
  struct sync_array_s *s = hash(ev);
  gpr_mu_lock(&s->mu);
  GPR_ASSERT(gpr_atm_acq_load(&ev->state) == 0);
  gpr_atm_rel_store(&ev->state, (gpr_atm)value);
  gpr_cv_broadcast(&s->cv);
  gpr_mu_unlock(&s->mu);
  GPR_ASSERT(value != NULL);
}
Example #15
0
void grpc_server_shutdown_and_notify(grpc_server *server,
                                     grpc_completion_queue *cq, void *tag) {
  listener *l;
  shutdown_tag *sdt;
  channel_broadcaster broadcaster;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
                 (server, cq, tag));

  /* lock, and gather up some stuff to do */
  gpr_mu_lock(&server->mu_global);
  grpc_cq_begin_op(cq, tag);
  if (server->shutdown_published) {
    grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
                   NULL, gpr_malloc(sizeof(grpc_cq_completion)));
    gpr_mu_unlock(&server->mu_global);
    goto done;
  }
  server->shutdown_tags =
      gpr_realloc(server->shutdown_tags,
                  sizeof(shutdown_tag) * (server->num_shutdown_tags + 1));
  sdt = &server->shutdown_tags[server->num_shutdown_tags++];
  sdt->tag = tag;
  sdt->cq = cq;
  if (gpr_atm_acq_load(&server->shutdown_flag)) {
    gpr_mu_unlock(&server->mu_global);
    goto done;
  }

  server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);

  channel_broadcaster_init(server, &broadcaster);

  gpr_atm_rel_store(&server->shutdown_flag, 1);

  /* collect all unregistered then registered calls */
  gpr_mu_lock(&server->mu_call);
  kill_pending_work_locked(&exec_ctx, server,
                           GRPC_ERROR_CREATE("Server Shutdown"));
  gpr_mu_unlock(&server->mu_call);

  maybe_finish_shutdown(&exec_ctx, server);
  gpr_mu_unlock(&server->mu_global);

  /* Shutdown listeners */
  for (l = server->listeners; l; l = l->next) {
    grpc_closure_init(&l->destroy_done, listener_destroy_done, server);
    l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
  }

  channel_broadcaster_shutdown(&exec_ctx, &broadcaster, 1, 0);

done:
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #16
0
void grpc_fd_shutdown(grpc_fd *fd) {
  size_t ncb = 0;
  gpr_mu_lock(&fd->set_state_mu);
  GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
  gpr_atm_rel_store(&fd->shutdown, 1);
  set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
  set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
  gpr_mu_unlock(&fd->set_state_mu);
  GPR_ASSERT(ncb <= 2);
  process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
                    0 /* GPR_FALSE */);
}
Example #17
0
void gpr_cancellable_cancel(gpr_cancellable *c) {
  if (!gpr_cancellable_is_cancelled(c)) {
    int failures;
    int backoff = 1;
    do {
      struct gpr_cancellable_list_ *l;
      struct gpr_cancellable_list_ *nl;
      gpr_mu *omu = 0; /* one-element cache of a processed gpr_mu */
      gpr_cv *ocv = 0; /* one-element cache of a processd gpr_cv */
      gpr_mu_lock(&c->mu);
      gpr_atm_rel_store(&c->cancelled, 1);
      failures = 0;
      for (l = c->waiters.next; l != &c->waiters; l = nl) {
        nl = l->next;
        if (omu != l->mu) {
          omu = l->mu;
          if (gpr_mu_trylock(l->mu)) {
            gpr_mu_unlock(l->mu);
            l->next->prev = l->prev; /* remove *l from list */
            l->prev->next = l->next;
            /* allow unconditional dequeue in gpr_cv_cancellable_wait() */
            l->next = l;
            l->prev = l;
            ocv = 0; /* force broadcast */
          } else {
            failures++;
          }
        }
        if (ocv != l->cv) {
          ocv = l->cv;
          gpr_cv_broadcast(l->cv);
        }
      }
      gpr_mu_unlock(&c->mu);
      if (failures != 0) {
        if (backoff < 10) {
          volatile int i;
          for (i = 0; i != (1 << backoff); i++) {
          }
          backoff++;
        } else {
          gpr_event ev;
          gpr_event_init(&ev);
          gpr_event_wait(
              &ev, gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                gpr_time_from_micros(1000, GPR_TIMESPAN)));
        }
      }
    } while (failures != 0);
  }
}
Example #18
0
void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) {
  gpr_atm_rel_store(&proxy->shutdown, 1);  // Signal proxy thread to shutdown.
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  gpr_thd_join(proxy->thd);
  grpc_tcp_server_shutdown_listeners(&exec_ctx, proxy->server);
  grpc_tcp_server_unref(&exec_ctx, proxy->server);
  gpr_free(proxy->proxy_name);
  grpc_channel_args_destroy(proxy->channel_args);
  grpc_closure destroyed;
  grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset);
  grpc_pollset_shutdown(&exec_ctx, proxy->pollset, &destroyed);
  gpr_free(proxy);
  grpc_exec_ctx_finish(&exec_ctx);
}
Example #19
0
void grpc_subchannel_call_holder_init(
    grpc_subchannel_call_holder *holder,
    grpc_subchannel_call_holder_pick_subchannel pick_subchannel,
    void *pick_subchannel_arg) {
  gpr_atm_rel_store(&holder->subchannel_call, 0);
  holder->pick_subchannel = pick_subchannel;
  holder->pick_subchannel_arg = pick_subchannel_arg;
  gpr_mu_init(&holder->mu);
  holder->subchannel = NULL;
  holder->waiting_ops = NULL;
  holder->waiting_ops_count = 0;
  holder->waiting_ops_capacity = 0;
  holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
}
static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
  GPR_ASSERT(error == GRPC_ERROR_NONE);
  gpr_slice_buffer_move_into(&state.temp_incoming_buffer,
                             &state.incoming_buffer);
  gpr_log(GPR_DEBUG, "got %" PRIuPTR " bytes, magic is %" PRIuPTR " bytes",
          state.incoming_buffer.length, strlen(magic_connect_string));
  if (state.incoming_buffer.length > strlen(magic_connect_string)) {
    gpr_atm_rel_store(&state.done_atm, 1);
    grpc_endpoint_shutdown(exec_ctx, state.tcp);
    grpc_endpoint_destroy(exec_ctx, state.tcp);
  } else {
    grpc_endpoint_read(exec_ctx, state.tcp, &state.temp_incoming_buffer,
                       &on_read);
  }
}
Example #21
0
/* Constructor for call_data */
static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
                                     grpc_call_element *elem,
                                     grpc_call_element_args *args) {
  call_data *calld = elem->call_data;
  gpr_atm_rel_store(&calld->subchannel_call, 0);
  gpr_mu_init(&calld->mu);
  calld->connected_subchannel = NULL;
  calld->waiting_ops = NULL;
  calld->waiting_ops_count = 0;
  calld->waiting_ops_capacity = 0;
  calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  calld->owning_call = args->call_stack;
  calld->pollent = NULL;
  return GRPC_ERROR_NONE;
}
Example #22
0
/* External functions: primary stats_log interface */
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
  gpr_int32 ix;
  /* Check cacheline alignment. */
  GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(!g_log.initialized);
  g_log.discard_old_records = discard_old_records;
  g_log.num_cores = gpr_cpu_num_cores();
  /* Ensure at least as many blocks as there are cores. */
  g_log.num_blocks = GPR_MAX(
      g_log.num_cores, (size_in_mb << 20) >> CENSUS_LOG_2_MAX_RECORD_SIZE);
  gpr_mu_init(&g_log.lock);
  g_log.read_iterator_state = 0;
  g_log.block_being_read = NULL;
  gpr_atm_rel_store(&g_log.is_full, 0);
  g_log.core_local_blocks = (cl_core_local_block *)gpr_malloc_aligned(
      g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.core_local_blocks, 0,
         g_log.num_cores * sizeof(cl_core_local_block));
  g_log.blocks = (cl_block *)gpr_malloc_aligned(
      g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
  g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  cl_block_list_initialize(&g_log.free_block_list);
  cl_block_list_initialize(&g_log.dirty_block_list);
  for (ix = 0; ix < g_log.num_blocks; ++ix) {
    cl_block *block = g_log.blocks + ix;
    cl_block_initialize(block,
                        g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * ix));
    cl_block_try_disable_access(block, 1 /* discard data */);
    cl_block_list_insert_at_tail(&g_log.free_block_list, block);
  }
  gpr_atm_rel_store(&g_log.out_of_space_count, 0);
  g_log.initialized = 1;
}
static void handle_read(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
  GPR_ASSERT(success);
  gpr_slice_buffer_move_into(&state.temp_incoming_buffer,
                             &state.incoming_buffer);
  gpr_log(GPR_DEBUG, "got %d bytes, magic is %d bytes",
          state.incoming_buffer.length, strlen(magic_connect_string));
  if (state.incoming_buffer.length > strlen(magic_connect_string)) {
    gpr_atm_rel_store(&state.done_atm, 1);
    grpc_endpoint_shutdown(exec_ctx, state.tcp);
    grpc_endpoint_destroy(exec_ctx, state.tcp);
  } else {
    grpc_endpoint_read(exec_ctx, state.tcp, &state.temp_incoming_buffer,
                       &on_read);
  }
}
Example #24
0
// Returns an error if the call has been cancelled.  Otherwise, sets the
// cancellation function to be called upon cancellation.
static grpc_error *set_cancel_func(grpc_call_element *elem,
                                   grpc_iomgr_cb_func func) {
  call_data *calld = (call_data *)elem->call_data;
  // Decode original state.
  gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
  grpc_error *original_error = GRPC_ERROR_NONE;
  grpc_closure *original_func = NULL;
  decode_cancel_state(original_state, &original_func, &original_error);
  // If error is set, return it.
  if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
  // Otherwise, store func.
  GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
                    grpc_schedule_on_exec_ctx);
  GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
  gpr_atm_rel_store(&calld->cancellation_state,
                    (gpr_atm)&calld->cancel_closure);
  return GRPC_ERROR_NONE;
}
void bad_server_thread(void *vargs) {
  struct server_thread_args *args = (struct server_thread_args *)vargs;

  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_resolved_address resolved_addr;
  struct sockaddr_storage *addr = (struct sockaddr_storage *)resolved_addr.addr;
  int port;
  grpc_tcp_server *s;
  grpc_error *error = grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s);
  GPR_ASSERT(error == GRPC_ERROR_NONE);
  memset(&resolved_addr, 0, sizeof(resolved_addr));
  addr->ss_family = AF_INET;
  error = grpc_tcp_server_add_port(s, &resolved_addr, &port);
  GPR_ASSERT(GRPC_LOG_IF_ERROR("grpc_tcp_server_add_port", error));
  GPR_ASSERT(port > 0);
  gpr_asprintf(&args->addr, "localhost:%d", port);

  grpc_tcp_server_start(&exec_ctx, s, &args->pollset, 1, on_connect, args);
  gpr_event_set(&args->ready, (void *)1);

  gpr_mu_lock(args->mu);
  while (gpr_atm_acq_load(&args->stop) == 0) {
    gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
    gpr_timespec deadline =
        gpr_time_add(now, gpr_time_from_millis(100, GPR_TIMESPAN));

    grpc_pollset_worker *worker = NULL;
    if (!GRPC_LOG_IF_ERROR("pollset_work",
                           grpc_pollset_work(&exec_ctx, args->pollset, &worker,
                                             now, deadline))) {
      gpr_atm_rel_store(&args->stop, 1);
    }
    gpr_mu_unlock(args->mu);
    grpc_exec_ctx_finish(&exec_ctx);
    gpr_mu_lock(args->mu);
  }
  gpr_mu_unlock(args->mu);

  grpc_tcp_server_unref(&exec_ctx, s);

  grpc_exec_ctx_finish(&exec_ctx);

  gpr_free(args->addr);
}
Example #26
0
/* Allocates a new block and updates core id => block mapping. 'old_block'
   points to the block that the caller thinks is attached to
   'core_id'. 'old_block' may be NULL. Returns non-zero if:
   - allocated a new block OR
   - 'core_id' => 'old_block' mapping changed (another thread allocated a
     block before lock was acquired). */
static int cl_allocate_core_local_block(gpr_int32 core_id,
                                        cl_block *old_block) {
  /* Now that we have the lock, check if core-local mapping has changed. */
  cl_core_local_block *core_local_block = &g_log.core_local_blocks[core_id];
  cl_block *block = cl_core_local_block_get_block(core_local_block);
  if ((block != NULL) && (block != old_block)) {
    return 1;
  }
  if (block != NULL) {
    cl_core_local_block_set_block(core_local_block, NULL);
    cl_block_list_insert_at_tail(&g_log.dirty_block_list, block);
  }
  block = cl_allocate_block();
  if (block == NULL) {
    gpr_atm_rel_store(&g_log.is_full, 1);
    return 0;
  }
  cl_core_local_block_set_block(core_local_block, block);
  cl_block_enable_access(block);
  return 1;
}
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg, int success) {
  grpc_subchannel_call_holder *holder = arg;
  grpc_subchannel_call *call;
  gpr_mu_lock(&holder->mu);
  GPR_ASSERT(holder->creation_phase ==
             GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
  call = GET_CALL(holder);
  GPR_ASSERT(call == NULL || call == CANCELLED_CALL);
  holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  if (holder->connected_subchannel == NULL) {
    fail_locked(exec_ctx, holder);
  } else {
    gpr_atm_rel_store(
        &holder->subchannel_call,
        (gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
            exec_ctx, holder->connected_subchannel, holder->pollset));
    retry_waiting_locked(exec_ctx, holder);
  }
  gpr_mu_unlock(&holder->mu);
  GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
}
Example #28
0
static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
                             grpc_error *error) {
  grpc_call_element *elem = arg;
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  gpr_mu_lock(&calld->mu);
  GPR_ASSERT(calld->creation_phase ==
             GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL);
  grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
                                           chand->interested_parties);
  calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
  if (calld->connected_subchannel == NULL) {
    gpr_atm_no_barrier_store(&calld->subchannel_call, 1);
    fail_locked(exec_ctx, calld, GRPC_ERROR_CREATE_REFERENCING(
                                     "Failed to create subchannel", &error, 1));
  } else if (GET_CALL(calld) == CANCELLED_CALL) {
    /* already cancelled before subchannel became ready */
    fail_locked(exec_ctx, calld,
                GRPC_ERROR_CREATE_REFERENCING(
                    "Cancelled before creating subchannel", &error, 1));
  } else {
    /* Create call on subchannel. */
    grpc_subchannel_call *subchannel_call = NULL;
    grpc_error *new_error = grpc_connected_subchannel_create_call(
        exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
        calld->deadline, &subchannel_call);
    if (new_error != GRPC_ERROR_NONE) {
      new_error = grpc_error_add_child(new_error, error);
      subchannel_call = CANCELLED_CALL;
      fail_locked(exec_ctx, calld, new_error);
    }
    gpr_atm_rel_store(&calld->subchannel_call,
                      (gpr_atm)(uintptr_t)subchannel_call);
    retry_waiting_locked(exec_ctx, calld);
  }
  gpr_mu_unlock(&calld->mu);
  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
}
Example #29
0
static grpc_fd *fd_create(int fd, const char *name) {
  grpc_fd *new_fd = NULL;

  gpr_mu_lock(&fd_freelist_mu);
  if (fd_freelist != NULL) {
    new_fd = fd_freelist;
    fd_freelist = fd_freelist->freelist_next;
  }
  gpr_mu_unlock(&fd_freelist_mu);

  if (new_fd == NULL) {
    new_fd = gpr_malloc(sizeof(grpc_fd));
  }

  pollable_init(&new_fd->pollable, PO_FD);

  gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
  new_fd->fd = fd;
  gpr_mu_init(&new_fd->orphaned_mu);
  new_fd->orphaned = false;
  grpc_lfev_init(&new_fd->read_closure);
  grpc_lfev_init(&new_fd->write_closure);
  gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);

  new_fd->freelist_next = NULL;
  new_fd->on_done_closure = NULL;

  char *fd_name;
  gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
  grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
#ifndef NDEBUG
  if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
    gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
  }
#endif
  gpr_free(fd_name);
  return new_fd;
}
Example #30
0
// External functions: primary stats_log interface
void census_log_initialize(size_t size_in_mb, int discard_old_records) {
  // Check cacheline alignment.
  GPR_ASSERT(sizeof(cl_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(sizeof(cl_core_local_block) % GPR_CACHELINE_SIZE == 0);
  GPR_ASSERT(!g_log.initialized);
  g_log.discard_old_records = discard_old_records;
  g_log.num_cores = gpr_cpu_num_cores();
  // Ensure that we will not get any overflow in calaculating num_blocks
  GPR_ASSERT(CL_LOG_2_MB >= CENSUS_LOG_2_MAX_RECORD_SIZE);
  GPR_ASSERT(size_in_mb < 1000);
  // Ensure at least 2x as many blocks as there are cores.
  g_log.num_blocks =
      (uint32_t)GPR_MAX(2 * g_log.num_cores, (size_in_mb << CL_LOG_2_MB) >>
                                                 CENSUS_LOG_2_MAX_RECORD_SIZE);
  gpr_mu_init(&g_log.lock);
  g_log.read_iterator_state = 0;
  g_log.block_being_read = NULL;
  g_log.core_local_blocks = (cl_core_local_block*)gpr_malloc_aligned(
      g_log.num_cores * sizeof(cl_core_local_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.core_local_blocks, 0,
         g_log.num_cores * sizeof(cl_core_local_block));
  g_log.blocks = (cl_block*)gpr_malloc_aligned(
      g_log.num_blocks * sizeof(cl_block), GPR_CACHELINE_SIZE_LOG);
  memset(g_log.blocks, 0, g_log.num_blocks * sizeof(cl_block));
  g_log.buffer = gpr_malloc(g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  memset(g_log.buffer, 0, g_log.num_blocks * CENSUS_LOG_MAX_RECORD_SIZE);
  cl_block_list_initialize(&g_log.free_block_list);
  cl_block_list_initialize(&g_log.dirty_block_list);
  for (uint32_t i = 0; i < g_log.num_blocks; ++i) {
    cl_block* block = g_log.blocks + i;
    cl_block_initialize(block, g_log.buffer + (CENSUS_LOG_MAX_RECORD_SIZE * i));
    cl_block_try_disable_access(block, 1 /* discard data */);
    cl_block_list_insert_at_tail(&g_log.free_block_list, block);
  }
  gpr_atm_rel_store(&g_log.out_of_space_count, 0);
  g_log.initialized = 1;
}