Пример #1
0
static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
                             size_t *ncallbacks) {
  gpr_intptr state = gpr_atm_acq_load(st);

  switch (state) {
    case READY:
      /* duplicate ready, ignore */
      return;
    case NOT_READY:
      if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
        /* swap was successful -- the closure will run after the next
           notify_on call. */
        return;
      }
      /* swap was unsuccessful due to an intervening set_ready call.
         Fall through to the WAITING code below */
      state = gpr_atm_acq_load(st);
    default: /* waiting */
      GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
                 gpr_atm_no_barrier_load(st) != NOT_READY);
      callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
      gpr_atm_rel_store(st, NOT_READY);
      return;
  }
}
Пример #2
0
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
                   int line) {
  gpr_log(GPR_DEBUG, "FD %d %p   ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
          gpr_atm_no_barrier_load(&fd->refst),
          gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(fd, n, reason) unref_by(fd, n)
static void ref_by(grpc_fd *fd, int n) {
#endif
  GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
Пример #3
0
static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
                   int line) {
  if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
    gpr_log(GPR_DEBUG,
            "FD %d %p   ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
            fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
            gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
  }
#else
#define REF_BY(fd, n, reason) ref_by(fd, n)
#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
static void ref_by(grpc_fd *fd, int n) {
#endif
  GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
}
Пример #4
0
void grpc_stream_unref(grpc_exec_ctx *exec_ctx, grpc_stream_refcount *refcount,
                       const char *reason) {
  gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
  gpr_log(GPR_DEBUG, "%s %p:%p UNREF %d->%d %s", refcount->object_type,
          refcount, refcount->destroy.cb_arg, val, val - 1, reason);
#else
void grpc_stream_unref(grpc_exec_ctx *exec_ctx,
                       grpc_stream_refcount *refcount) {
#endif
  if (gpr_unref(&refcount->refs)) {
    grpc_exec_ctx_enqueue(exec_ctx, &refcount->destroy, true, NULL);
  }
}

#ifdef GRPC_STREAM_REFCOUNT_DEBUG
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
                          grpc_iomgr_cb_func cb, void *cb_arg,
                          const char *object_type) {
  refcount->object_type = object_type;
#else
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
                          grpc_iomgr_cb_func cb, void *cb_arg) {
#endif
  gpr_ref_init(&refcount->refs, initial_refs);
  grpc_closure_init(&refcount->destroy, cb, cb_arg);
}
Пример #5
0
int gpr_stack_lockfree_pop(gpr_stack_lockfree *stack) {
  lockfree_node head;
  lockfree_node newhead;

  do {
    head.atm = gpr_atm_acq_load(&(stack->head.atm));
    if (head.contents.index == INVALID_ENTRY_INDEX) {
      return -1;
    }
    newhead.atm =
        gpr_atm_no_barrier_load(&(stack->entries[head.contents.index].atm));

  } while (!gpr_atm_no_barrier_cas(&(stack->head.atm), head.atm, newhead.atm));
#ifndef NDEBUG
  /* Check for valid pop */
  {
    int pushed_index = head.contents.index / (8 * sizeof(gpr_atm));
    int pushed_bit = head.contents.index % (8 * sizeof(gpr_atm));
    gpr_atm old_val;

    old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
                                           -((gpr_atm)1 << pushed_bit));
    GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) != 0);
  }
#endif

  return head.contents.index;
}
Пример #6
0
static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
  GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
  GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
  gpr_mpscq_destroy(&lock->queue);
  GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner");
  gpr_free(lock);
}
Пример #7
0
void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
                             grpc_resource_user *resource_user, size_t size) {
  gpr_mu_lock(&resource_user->mu);
  GPR_ASSERT(resource_user->allocated >= (int64_t)size);
  bool was_zero_or_negative = resource_user->free_pool <= 0;
  resource_user->free_pool += (int64_t)size;
  resource_user->allocated -= (int64_t)size;
  if (grpc_resource_quota_trace) {
    gpr_log(GPR_DEBUG, "RQ %s %s: free %" PRIdPTR "; allocated -> %" PRId64
                       ", free_pool -> %" PRId64,
            resource_user->resource_quota->name, resource_user->name, size,
            resource_user->allocated, resource_user->free_pool);
  }
  bool is_bigger_than_zero = resource_user->free_pool > 0;
  if (is_bigger_than_zero && was_zero_or_negative &&
      !resource_user->added_to_free_pool) {
    resource_user->added_to_free_pool = true;
    grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
                          &resource_user->add_to_free_pool_closure,
                          GRPC_ERROR_NONE, false);
  }
  grpc_closure *on_done_destroy = (grpc_closure *)gpr_atm_no_barrier_load(
      &resource_user->on_done_destroy_closure);
  if (on_done_destroy != NULL && resource_user->allocated == 0) {
    grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
                          &resource_user->destroy_closure, GRPC_ERROR_NONE,
                          false);
  }
  gpr_mu_unlock(&resource_user->mu);
}
Пример #8
0
void grpc_fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
  gpr_mu_lock(&fd->mu);
  GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
  fd->shutdown = 1;
  set_ready_locked(exec_ctx, fd, &fd->read_closure);
  set_ready_locked(exec_ctx, fd, &fd->write_closure);
  gpr_mu_unlock(&fd->mu);
}
Пример #9
0
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
                     int line) {
  gpr_atm old;
  gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
          gpr_atm_no_barrier_load(&fd->refst),
          gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd *fd, int n) {
  gpr_atm old;
#endif
  old = gpr_atm_full_fetch_add(&fd->refst, -n);
  if (old == n) {
    freelist_fd(fd);
  } else {
    GPR_ASSERT(old > n);
  }
}
Пример #10
0
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
  gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
  gpr_log(GPR_DEBUG, "STREAM %p:%p   REF %d->%d %s", refcount,
          refcount->destroy.cb_arg, val, val + 1, reason);
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
  gpr_ref(&refcount->refs);
}
Пример #11
0
static void tcp_ref(grpc_tcp *tcp, const char *reason, const char *file,
                    int line) {
  if (GRPC_TRACER_ON(grpc_tcp_trace)) {
    gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
            "TCP   ref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
            val + 1);
  }
  gpr_ref(&tcp->refcount);
}
Пример #12
0
static void secure_endpoint_ref(secure_endpoint *ep, const char *reason,
                                const char *file, int line) {
  if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
    gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
            "SECENDP   ref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
            val + 1);
  }
  gpr_ref(&ep->ref);
}
Пример #13
0
int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
  lockfree_node head;
  lockfree_node newhead;
  lockfree_node curent;
  lockfree_node newent;

  /* First fill in the entry's index and aba ctr for new head */
  newhead.contents.index = (uint16_t)entry;
#ifdef GPR_ARCH_64
  /* Fill in the pad to avoid confusing memcheck tools */
  newhead.contents.pad = 0;
#endif

  /* Also post-increment the aba_ctr */
  curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
  newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
  gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);

#ifndef NDEBUG
  /* Check for double push */
  {
    int pushed_index = entry / (int)(8 * sizeof(gpr_atm));
    int pushed_bit = entry % (int)(8 * sizeof(gpr_atm));
    gpr_atm old_val;

    old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
                                           ((gpr_atm)1 << pushed_bit));
    GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) == 0);
  }
#endif

  do {
    /* Atomically get the existing head value for use */
    head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
    /* Point to it */
    newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
    newent.contents.index = head.contents.index;
    gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm);
  } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
  /* Use rel_cas above to make sure that entry index is set properly */
  return head.contents.index == INVALID_ENTRY_INDEX;
}
Пример #14
0
static void alarm_unref_dbg(grpc_alarm *alarm, const char *reason,
                            const char *file, int line) {
  if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
    gpr_atm val = gpr_atm_no_barrier_load(&alarm->refs.count);
    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
            "Alarm:%p  Unref %" PRIdPTR " -> %" PRIdPTR " %s", alarm, val,
            val - 1, reason);
  }

  alarm_unref(alarm);
}
Пример #15
0
grpc_connectivity_state grpc_connectivity_state_check(
    grpc_connectivity_state_tracker *tracker) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
    gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
            grpc_connectivity_state_name(cur));
  }
  return cur;
}
Пример #16
0
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
                     const char *reason, const char *file, int line) {
  if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
    gpr_log(GPR_DEBUG,
            "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
            fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
            gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
  }
#else
static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
#endif
  gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
  if (old == n) {
    GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
                                                     grpc_schedule_on_exec_ctx),
                       GRPC_ERROR_NONE);
  } else {
    GPR_ASSERT(old > n);
  }
}
Пример #17
0
void grpc_fd_shutdown(grpc_fd *fd) {
  size_t ncb = 0;
  gpr_mu_lock(&fd->set_state_mu);
  GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
  gpr_atm_rel_store(&fd->shutdown, 1);
  set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
  set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
  gpr_mu_unlock(&fd->set_state_mu);
  GPR_ASSERT(ncb <= 2);
  process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
                    0 /* GPR_FALSE */);
}
Пример #18
0
static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
                     int line) {
  gpr_atm old;
  gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
          gpr_atm_no_barrier_load(&fd->refst),
          gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
#else
static void unref_by(grpc_fd *fd, int n) {
  gpr_atm old;
#endif
  old = gpr_atm_full_fetch_add(&fd->refst, -n);
  if (old == n) {
    if (fd->on_done_closure) {
      grpc_iomgr_add_callback(fd->on_done_closure);
    }
    grpc_iomgr_unregister_object(&fd->iomgr_object);
    freelist_fd(fd);
  } else {
    GPR_ASSERT(old > n);
  }
}
Пример #19
0
static void tcp_unref(grpc_exec_ctx *exec_ctx, grpc_tcp *tcp,
                      const char *reason, const char *file, int line) {
  if (GRPC_TRACER_ON(grpc_tcp_trace)) {
    gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
            "TCP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, tcp, reason, val,
            val - 1);
  }
  if (gpr_unref(&tcp->refcount)) {
    tcp_free(exec_ctx, tcp);
  }
}
Пример #20
0
void grpc_stream_ref(grpc_stream_refcount *refcount, const char *reason) {
  if (GRPC_TRACER_ON(grpc_trace_stream_refcount)) {
    gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
    gpr_log(GPR_DEBUG, "%s %p:%p   REF %" PRIdPTR "->%" PRIdPTR " %s",
            refcount->object_type, refcount, refcount->destroy.cb_arg, val,
            val + 1, reason);
  }
#else
void grpc_stream_ref(grpc_stream_refcount *refcount) {
#endif
  gpr_ref_non_zero(&refcount->refs);
}
Пример #21
0
size_t grpc_channel_get_call_size_estimate(grpc_channel *channel) {
#define ROUND_UP_SIZE 256
  /* We round up our current estimate to the NEXT value of ROUND_UP_SIZE.
     This ensures:
      1. a consistent size allocation when our estimate is drifting slowly
         (which is common) - which tends to help most allocators reuse memory
      2. a small amount of allowed growth over the estimate without hitting
         the arena size doubling case, reducing overall memory usage */
  return ((size_t)gpr_atm_no_barrier_load(&channel->call_size_estimate) +
          2 * ROUND_UP_SIZE) &
         ~(size_t)(ROUND_UP_SIZE - 1);
}
Пример #22
0
void grpc_cq_internal_ref(grpc_completion_queue *cq, const char *reason,
                          const char *file, int line) {
  if (GRPC_TRACER_ON(grpc_trace_cq_refcount)) {
    gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count);
    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
            "CQ:%p   ref %" PRIdPTR " -> %" PRIdPTR " %s", cq, val, val + 1,
            reason);
  }
#else
void grpc_cq_internal_ref(grpc_completion_queue *cq) {
#endif
  gpr_ref(&cq->owning_refs);
}
Пример #23
0
static void secure_endpoint_unref(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
                                  const char *reason, const char *file,
                                  int line) {
  if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
    gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
    gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
            "SECENDP unref %p : %s %" PRIdPTR " -> %" PRIdPTR, ep, reason, val,
            val - 1);
  }
  if (gpr_unref(&ep->ref)) {
    destroy(exec_ctx, ep);
  }
}
Пример #24
0
bool grpc_connectivity_state_notify_on_state_change(
    grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
    grpc_connectivity_state *current, grpc_closure *notify) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
    if (current == NULL) {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
              tracker->name, notify);
    } else {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
              tracker->name, grpc_connectivity_state_name(*current),
              grpc_connectivity_state_name(cur), notify);
    }
  }
  if (current == NULL) {
    grpc_connectivity_state_watcher *w = tracker->watchers;
    if (w != NULL && w->notify == notify) {
      GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
      tracker->watchers = w->next;
      gpr_free(w);
      return false;
    }
    while (w != NULL) {
      grpc_connectivity_state_watcher *rm_candidate = w->next;
      if (rm_candidate != NULL && rm_candidate->notify == notify) {
        GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
        w->next = w->next->next;
        gpr_free(rm_candidate);
        return false;
      }
      w = w->next;
    }
    return false;
  } else {
    if (cur != *current) {
      *current = cur;
      GRPC_CLOSURE_SCHED(exec_ctx, notify,
                         GRPC_ERROR_REF(tracker->current_error));
    } else {
      grpc_connectivity_state_watcher *w =
          (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w));
      w->current = current;
      w->notify = notify;
      w->next = tracker->watchers;
      tracker->watchers = w;
    }
    return cur == GRPC_CHANNEL_IDLE;
  }
}
Пример #25
0
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
                              grpc_pollset_worker *worker, gpr_uint32 read_mask,
                              gpr_uint32 write_mask, grpc_fd_watcher *watcher) {
  gpr_uint32 mask = 0;
  grpc_closure *cur;
  int requested;
  /* keep track of pollers that have requested our events, in case they change
   */
  GRPC_FD_REF(fd, "poll");

  gpr_mu_lock(&fd->mu);

  /* if we are shutdown, then don't add to the watcher set */
  if (gpr_atm_no_barrier_load(&fd->shutdown)) {
    watcher->fd = NULL;
    watcher->pollset = NULL;
    watcher->worker = NULL;
    gpr_mu_unlock(&fd->mu);
    GRPC_FD_UNREF(fd, "poll");
    return 0;
  }

  /* if there is nobody polling for read, but we need to, then start doing so */
  cur = fd->read_closure;
  requested = cur != CLOSURE_READY;
  if (read_mask && fd->read_watcher == NULL && requested) {
    fd->read_watcher = watcher;
    mask |= read_mask;
  }
  /* if there is nobody polling for write, but we need to, then start doing so
   */
  cur = fd->write_closure;
  requested = cur != CLOSURE_READY;
  if (write_mask && fd->write_watcher == NULL && requested) {
    fd->write_watcher = watcher;
    mask |= write_mask;
  }
  /* if not polling, remember this watcher in case we need someone to later */
  if (mask == 0 && worker != NULL) {
    watcher->next = &fd->inactive_watcher_root;
    watcher->prev = watcher->next->prev;
    watcher->next->prev = watcher->prev->next = watcher;
  }
  watcher->pollset = pollset;
  watcher->worker = worker;
  watcher->fd = fd;
  gpr_mu_unlock(&fd->mu);

  return mask;
}
Пример #26
0
grpc_connectivity_state grpc_connectivity_state_get(
    grpc_connectivity_state_tracker *tracker, grpc_error **error) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
    gpr_log(GPR_DEBUG, "CONWATCH: %p %s: get %s", tracker, tracker->name,
            grpc_connectivity_state_name(cur));
  }
  if (error != NULL) {
    *error = GRPC_ERROR_REF(tracker->current_error);
  }
  return cur;
}
Пример #27
0
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
  GPR_TIMER_BEGIN("workqueue.on_readable", 0);

  grpc_workqueue *workqueue = arg;

  if (error != GRPC_ERROR_NONE) {
    /* HACK: let wakeup_fd code know that we stole the fd */
    workqueue->wakeup_fd.read_fd = 0;
    grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
    grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
    GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0);
    gpr_free(workqueue);
  } else {
    error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
    gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue);
    if (error == GRPC_ERROR_NONE) {
      grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
                             &workqueue->read_closure);
    } else {
      /* recurse to get error handling */
      on_readable(exec_ctx, arg, error);
    }
    if (n == NULL) {
      /* try again - queue in an inconsistant state */
      wakeup(exec_ctx, workqueue);
    } else {
      switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) {
        case 3:  // had one count, one unorphaned --> done, unorphaned
          break;
        case 2:  // had one count, one orphaned --> done, orphaned
          workqueue_destroy(exec_ctx, workqueue);
          break;
        case 1:
        case 0:
          // these values are illegal - representing an already done or
          // deleted workqueue
          GPR_UNREACHABLE_CODE(break);
        default:
          // schedule a wakeup since there's more to do
          wakeup(exec_ctx, workqueue);
      }
      grpc_closure *cl = (grpc_closure *)n;
      grpc_error *clerr = cl->error;
      cl->cb(exec_ctx, cl->cb_arg, clerr);
      GRPC_ERROR_UNREF(clerr);
    }
  }

  GPR_TIMER_END("workqueue.on_readable", 0);
}
Пример #28
0
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
                                 grpc_resource_user *resource_user,
                                 grpc_closure *on_done) {
  gpr_mu_lock(&resource_user->mu);
  GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->on_done_destroy_closure) ==
             0);
  gpr_atm_no_barrier_store(&resource_user->on_done_destroy_closure,
                           (gpr_atm)on_done);
  if (resource_user->allocated == 0) {
    grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
                          &resource_user->destroy_closure, GRPC_ERROR_NONE,
                          false);
  }
  gpr_mu_unlock(&resource_user->mu);
}
Пример #29
0
static void interned_slice_destroy(interned_slice_refcount *s) {
  slice_shard *shard = &g_shards[SHARD_IDX(s->hash)];
  gpr_mu_lock(&shard->mu);
  GPR_ASSERT(0 == gpr_atm_no_barrier_load(&s->refcnt));
  interned_slice_refcount **prev_next;
  interned_slice_refcount *cur;
  for (prev_next = &shard->strs[TABLE_IDX(s->hash, shard->capacity)],
      cur = *prev_next;
       cur != s; prev_next = &cur->bucket_next, cur = cur->bucket_next)
    ;
  *prev_next = cur->bucket_next;
  shard->count--;
  gpr_free(s);
  gpr_mu_unlock(&shard->mu);
}
Пример #30
0
void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
                                 grpc_connectivity_state_tracker *tracker,
                                 grpc_connectivity_state state,
                                 grpc_error *error, const char *reason) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  grpc_connectivity_state_watcher *w;
  if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
    const char *error_string = grpc_error_string(error);
    gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
            tracker->name, grpc_connectivity_state_name(cur),
            grpc_connectivity_state_name(state), reason, error, error_string);
  }
  switch (state) {
    case GRPC_CHANNEL_INIT:
    case GRPC_CHANNEL_CONNECTING:
    case GRPC_CHANNEL_IDLE:
    case GRPC_CHANNEL_READY:
      GPR_ASSERT(error == GRPC_ERROR_NONE);
      break;
    case GRPC_CHANNEL_SHUTDOWN:
    case GRPC_CHANNEL_TRANSIENT_FAILURE:
      GPR_ASSERT(error != GRPC_ERROR_NONE);
      break;
  }
  GRPC_ERROR_UNREF(tracker->current_error);
  tracker->current_error = error;
  if (cur == state) {
    return;
  }
  GPR_ASSERT(cur != GRPC_CHANNEL_SHUTDOWN);
  gpr_atm_no_barrier_store(&tracker->current_state_atm, state);
  while ((w = tracker->watchers) != NULL) {
    *w->current = state;
    tracker->watchers = w->next;
    if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
      gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
              w->notify);
    }
    GRPC_CLOSURE_SCHED(exec_ctx, w->notify,
                       GRPC_ERROR_REF(tracker->current_error));
    gpr_free(w);
  }
}