Ejemplo n.º 1
0
static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
                             size_t *ncallbacks) {
  gpr_intptr state = gpr_atm_acq_load(st);

  switch (state) {
    case READY:
      /* duplicate ready, ignore */
      return;
    case NOT_READY:
      if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
        /* swap was successful -- the closure will run after the next
           notify_on call. */
        return;
      }
      /* swap was unsuccessful due to an intervening set_ready call.
         Fall through to the WAITING code below */
      state = gpr_atm_acq_load(st);
    default: /* waiting */
      GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
                 gpr_atm_no_barrier_load(st) != NOT_READY);
      callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
      gpr_atm_rel_store(st, NOT_READY);
      return;
  }
}
Ejemplo n.º 2
0
int gpr_stack_lockfree_push(gpr_stack_lockfree *stack, int entry) {
  lockfree_node head;
  lockfree_node newhead;
  lockfree_node curent;
  lockfree_node newent;

  /* First fill in the entry's index and aba ctr for new head */
  newhead.contents.index = (uint16_t)entry;
#ifdef GPR_ARCH_64
  /* Fill in the pad to avoid confusing memcheck tools */
  newhead.contents.pad = 0;
#endif

  /* Also post-increment the aba_ctr */
  curent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
  newhead.contents.aba_ctr = ++curent.contents.aba_ctr;
  gpr_atm_no_barrier_store(&stack->entries[entry].atm, curent.atm);

#ifndef NDEBUG
  /* Check for double push */
  {
    int pushed_index = entry / (int)(8 * sizeof(gpr_atm));
    int pushed_bit = entry % (int)(8 * sizeof(gpr_atm));
    gpr_atm old_val;

    old_val = gpr_atm_no_barrier_fetch_add(&stack->pushed[pushed_index],
                                           ((gpr_atm)1 << pushed_bit));
    GPR_ASSERT((old_val & (((gpr_atm)1) << pushed_bit)) == 0);
  }
#endif

  do {
    /* Atomically get the existing head value for use */
    head.atm = gpr_atm_no_barrier_load(&(stack->head.atm));
    /* Point to it */
    newent.atm = gpr_atm_no_barrier_load(&stack->entries[entry].atm);
    newent.contents.index = head.contents.index;
    gpr_atm_no_barrier_store(&stack->entries[entry].atm, newent.atm);
  } while (!gpr_atm_rel_cas(&(stack->head.atm), head.atm, newhead.atm));
  /* Use rel_cas above to make sure that entry index is set properly */
  return head.contents.index == INVALID_ENTRY_INDEX;
}
Ejemplo n.º 3
0
static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
                      int allow_synchronous_callback) {
  switch (gpr_atm_acq_load(st)) {
    case NOT_READY:
      /* There is no race if the descriptor is already ready, so we skip
         the interlocked op in that case.  As long as the app doesn't
         try to set the same upcall twice (which it shouldn't) then
         oldval should never be anything other than READY or NOT_READY.  We
         don't
         check for user error on the fast path. */
      if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
        /* swap was successful -- the closure will run after the next
           set_ready call.  NOTE: we don't have an ABA problem here,
           since we should never have concurrent calls to the same
           notify_on function. */
        maybe_wake_one_watcher(fd);
        return;
      }
    /* swap was unsuccessful due to an intervening set_ready call.
       Fall through to the READY code below */
    case READY:
      GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
      gpr_atm_rel_store(st, NOT_READY);
      process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
                    allow_synchronous_callback);
      return;
    default: /* WAITING */
      /* upcallptr was set to a different closure.  This is an error! */
      gpr_log(GPR_ERROR,
              "User called a notify_on function with a previous callback still "
              "pending");
      abort();
  }
  gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
  abort();
}
Ejemplo n.º 4
0
// The logic here is fairly complicated, due to (a) the fact that we
// need to handle the case where we receive the send op before the
// initial metadata op, and (b) the need for efficiency, especially in
// the streaming case.
// TODO(ctiller): Explain this more thoroughly.
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
                                         grpc_call_element *elem,
                                         grpc_transport_stream_op *op) {
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
  grpc_deadline_state_client_start_transport_stream_op(exec_ctx, elem, op);
  /* try to (atomically) get the call */
  grpc_subchannel_call *call = GET_CALL(calld);
  GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
  if (call == CANCELLED_CALL) {
    grpc_transport_stream_op_finish_with_failure(
        exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  if (call != NULL) {
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  /* we failed; lock and figure out what to do */
  gpr_mu_lock(&calld->mu);
retry:
  /* need to recheck that another thread hasn't set the call */
  call = GET_CALL(calld);
  if (call == CANCELLED_CALL) {
    gpr_mu_unlock(&calld->mu);
    grpc_transport_stream_op_finish_with_failure(
        exec_ctx, op, GRPC_ERROR_REF(calld->cancel_error));
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  if (call != NULL) {
    gpr_mu_unlock(&calld->mu);
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  /* if this is a cancellation, then we can raise our cancelled flag */
  if (op->cancel_error != GRPC_ERROR_NONE) {
    if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
                         (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
      goto retry;
    } else {
      // Stash a copy of cancel_error in our call data, so that we can use
      // it for subsequent operations.  This ensures that if the call is
      // cancelled before any ops are passed down (e.g., if the deadline
      // is in the past when the call starts), we can return the right
      // error to the caller when the first op does get passed down.
      calld->cancel_error = GRPC_ERROR_REF(op->cancel_error);
      switch (calld->creation_phase) {
        case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
          fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
          break;
        case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
          pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
                          NULL, GRPC_ERROR_REF(op->cancel_error));
          break;
      }
      gpr_mu_unlock(&calld->mu);
      grpc_transport_stream_op_finish_with_failure(
          exec_ctx, op, GRPC_ERROR_REF(op->cancel_error));
      GPR_TIMER_END("cc_start_transport_stream_op", 0);
      return;
    }
  }
  /* if we don't have a subchannel, try to get one */
  if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      calld->connected_subchannel == NULL &&
      op->send_initial_metadata != NULL) {
    calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
    grpc_closure_init(&calld->next_step, subchannel_ready, elem);
    GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
    /* If a subchannel is not available immediately, the polling entity from
       call_data should be provided to channel_data's interested_parties, so
       that IO of the lb_policy and resolver could be done under it. */
    if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
                        op->send_initial_metadata_flags,
                        &calld->connected_subchannel, &calld->next_step,
                        GRPC_ERROR_NONE)) {
      calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
      GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
    } else {
      grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
                                             chand->interested_parties);
    }
  }
  /* if we've got a subchannel, then let's ask it to create a call */
  if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      calld->connected_subchannel != NULL) {
    grpc_subchannel_call *subchannel_call = NULL;
    grpc_error *error = grpc_connected_subchannel_create_call(
        exec_ctx, calld->connected_subchannel, calld->pollent, calld->path,
        calld->deadline, &subchannel_call);
    if (error != GRPC_ERROR_NONE) {
      subchannel_call = CANCELLED_CALL;
      fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
      grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
    }
    gpr_atm_rel_store(&calld->subchannel_call,
                      (gpr_atm)(uintptr_t)subchannel_call);
    retry_waiting_locked(exec_ctx, calld);
    goto retry;
  }
  /* nothing to be done but wait */
  add_waiting_locked(calld, op);
  gpr_mu_unlock(&calld->mu);
  GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
Ejemplo n.º 5
0
// The logic here is fairly complicated, due to (a) the fact that we
// need to handle the case where we receive the send op before the
// initial metadata op, and (b) the need for efficiency, especially in
// the streaming case.
// TODO(ctiller): Explain this more thoroughly.
static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
                                         grpc_call_element *elem,
                                         grpc_transport_stream_op *op) {
  call_data *calld = elem->call_data;
  GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
  /* try to (atomically) get the call */
  grpc_subchannel_call *call = GET_CALL(calld);
  GPR_TIMER_BEGIN("cc_start_transport_stream_op", 0);
  if (call == CANCELLED_CALL) {
    grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
                                                 GRPC_ERROR_CANCELLED);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  if (call != NULL) {
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  /* we failed; lock and figure out what to do */
  gpr_mu_lock(&calld->mu);
retry:
  /* need to recheck that another thread hasn't set the call */
  call = GET_CALL(calld);
  if (call == CANCELLED_CALL) {
    gpr_mu_unlock(&calld->mu);
    grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
                                                 GRPC_ERROR_CANCELLED);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  if (call != NULL) {
    gpr_mu_unlock(&calld->mu);
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("cc_start_transport_stream_op", 0);
    return;
  }
  /* if this is a cancellation, then we can raise our cancelled flag */
  if (op->cancel_error != GRPC_ERROR_NONE) {
    if (!gpr_atm_rel_cas(&calld->subchannel_call, 0,
                         (gpr_atm)(uintptr_t)CANCELLED_CALL)) {
      goto retry;
    } else {
      switch (calld->creation_phase) {
        case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
          fail_locked(exec_ctx, calld, GRPC_ERROR_REF(op->cancel_error));
          break;
        case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
          pick_subchannel(exec_ctx, elem, NULL, 0, &calld->connected_subchannel,
                          NULL);
          break;
      }
      gpr_mu_unlock(&calld->mu);
      grpc_transport_stream_op_finish_with_failure(exec_ctx, op,
                                                   GRPC_ERROR_CANCELLED);
      GPR_TIMER_END("cc_start_transport_stream_op", 0);
      return;
    }
  }
  /* if we don't have a subchannel, try to get one */
  if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      calld->connected_subchannel == NULL &&
      op->send_initial_metadata != NULL) {
    calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
    grpc_closure_init(&calld->next_step, subchannel_ready, calld);
    GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
    if (pick_subchannel(exec_ctx, elem, op->send_initial_metadata,
                        op->send_initial_metadata_flags,
                        &calld->connected_subchannel, &calld->next_step)) {
      calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
      GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_subchannel");
    }
  }
  /* if we've got a subchannel, then let's ask it to create a call */
  if (calld->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      calld->connected_subchannel != NULL) {
    grpc_subchannel_call *subchannel_call = NULL;
    grpc_error *error = grpc_connected_subchannel_create_call(
        exec_ctx, calld->connected_subchannel, calld->pollent,
        &subchannel_call);
    if (error != GRPC_ERROR_NONE) {
      subchannel_call = CANCELLED_CALL;
      fail_locked(exec_ctx, calld, GRPC_ERROR_REF(error));
      grpc_transport_stream_op_finish_with_failure(exec_ctx, op, error);
    }
    gpr_atm_rel_store(&calld->subchannel_call,
                      (gpr_atm)(uintptr_t)subchannel_call);
    retry_waiting_locked(exec_ctx, calld);
    goto retry;
  }
  /* nothing to be done but wait */
  add_waiting_locked(calld, op);
  gpr_mu_unlock(&calld->mu);
  GPR_TIMER_END("cc_start_transport_stream_op", 0);
}
Ejemplo n.º 6
0
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
                                            grpc_subchannel_call_holder *holder,
                                            grpc_transport_stream_op *op) {
  /* try to (atomically) get the call */
  grpc_subchannel_call *call = GET_CALL(holder);
  GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
  if (call == CANCELLED_CALL) {
    grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  if (call != NULL) {
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  /* we failed; lock and figure out what to do */
  gpr_mu_lock(&holder->mu);
retry:
  /* need to recheck that another thread hasn't set the call */
  call = GET_CALL(holder);
  if (call == CANCELLED_CALL) {
    gpr_mu_unlock(&holder->mu);
    grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  if (call != NULL) {
    gpr_mu_unlock(&holder->mu);
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  /* if this is a cancellation, then we can raise our cancelled flag */
  if (op->cancel_with_status != GRPC_STATUS_OK) {
    if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
      goto retry;
    } else {
      switch (holder->creation_phase) {
        case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
          fail_locked(exec_ctx, holder);
          break;
        case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
          holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
                                  &holder->connected_subchannel, NULL);
          break;
      }
      gpr_mu_unlock(&holder->mu);
      grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
      GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
      return;
    }
  }
  /* if we don't have a subchannel, try to get one */
  if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      holder->connected_subchannel == NULL &&
      op->send_initial_metadata != NULL) {
    holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
    grpc_closure_init(&holder->next_step, subchannel_ready, holder);
    GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
    if (holder->pick_subchannel(
            exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
            &holder->connected_subchannel, &holder->next_step)) {
      holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
      GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
    }
  }
  /* if we've got a subchannel, then let's ask it to create a call */
  if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      holder->connected_subchannel != NULL) {
    gpr_atm_rel_store(
        &holder->subchannel_call,
        (gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
            exec_ctx, holder->connected_subchannel, holder->pollset));
    retry_waiting_locked(exec_ctx, holder);
    goto retry;
  }
  /* nothing to be done but wait */
  add_waiting_locked(holder, op);
  gpr_mu_unlock(&holder->mu);
  GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
}
Ejemplo n.º 7
0
static void compress_start_transport_stream_op_batch(
    grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
    grpc_transport_stream_op_batch *op) {
  call_data *calld = elem->call_data;

  GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);

  if (op->cancel_stream) {
    GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error);
    gpr_atm cur = gpr_atm_full_xchg(
        &calld->send_initial_metadata_state,
        CANCELLED_BIT | (gpr_atm)op->payload->cancel_stream.cancel_error);
    switch (cur) {
      case HAS_COMPRESSION_ALGORITHM:
      case NO_COMPRESSION_ALGORITHM:
      case INITIAL_METADATA_UNSEEN:
        break;
      default:
        if ((cur & CANCELLED_BIT) == 0) {
          grpc_transport_stream_op_batch_finish_with_failure(
              exec_ctx, (grpc_transport_stream_op_batch *)cur,
              GRPC_ERROR_REF(op->payload->cancel_stream.cancel_error));
        } else {
          GRPC_ERROR_UNREF((grpc_error *)(cur & ~CANCELLED_BIT));
        }
        break;
    }
  }

  if (op->send_initial_metadata) {
    bool has_compression_algorithm;
    grpc_error *error = process_send_initial_metadata(
        exec_ctx, elem,
        op->payload->send_initial_metadata.send_initial_metadata,
        &has_compression_algorithm);
    if (error != GRPC_ERROR_NONE) {
      grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error);
      return;
    }
    gpr_atm cur;
  retry_send_im:
    cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
    GPR_ASSERT(cur != HAS_COMPRESSION_ALGORITHM &&
               cur != NO_COMPRESSION_ALGORITHM);
    if ((cur & CANCELLED_BIT) == 0) {
      if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
                           has_compression_algorithm
                               ? HAS_COMPRESSION_ALGORITHM
                               : NO_COMPRESSION_ALGORITHM)) {
        goto retry_send_im;
      }
      if (cur != INITIAL_METADATA_UNSEEN) {
        grpc_call_next_op(exec_ctx, elem,
                          (grpc_transport_stream_op_batch *)cur);
      }
    }
  }
  if (op->send_message) {
    gpr_atm cur;
  retry_send:
    cur = gpr_atm_acq_load(&calld->send_initial_metadata_state);
    switch (cur) {
      case INITIAL_METADATA_UNSEEN:
        if (!gpr_atm_rel_cas(&calld->send_initial_metadata_state, cur,
                             (gpr_atm)op)) {
          goto retry_send;
        }
        break;
      case HAS_COMPRESSION_ALGORITHM:
      case NO_COMPRESSION_ALGORITHM:
        if (!skip_compression(elem,
                              op->payload->send_message.send_message->flags,
                              cur == HAS_COMPRESSION_ALGORITHM)) {
          calld->send_op = op;
          calld->send_length = op->payload->send_message.send_message->length;
          calld->send_flags = op->payload->send_message.send_message->flags;
          continue_send_message(exec_ctx, elem);
        } else {
          /* pass control down the stack */
          grpc_call_next_op(exec_ctx, elem, op);
        }
        break;
      default:
        if (cur & CANCELLED_BIT) {
          grpc_transport_stream_op_batch_finish_with_failure(
              exec_ctx, op,
              GRPC_ERROR_REF((grpc_error *)(cur & ~CANCELLED_BIT)));
        } else {
          /* >1 send_message concurrently */
          GPR_UNREACHABLE_CODE(break);
        }
    }
  } else {
Ejemplo n.º 8
0
grpc_slice grpc_slice_intern(grpc_slice slice) {
  GPR_TIMER_BEGIN("grpc_slice_intern", 0);
  if (GRPC_IS_STATIC_METADATA_STRING(slice)) {
    GPR_TIMER_END("grpc_slice_intern", 0);
    return slice;
  }

  uint32_t hash = grpc_slice_hash(slice);
  for (uint32_t i = 0; i <= max_static_metadata_hash_probe; i++) {
    static_metadata_hash_ent ent =
        static_metadata_hash[(hash + i) % GPR_ARRAY_SIZE(static_metadata_hash)];
    if (ent.hash == hash && ent.idx < GRPC_STATIC_MDSTR_COUNT &&
        grpc_slice_eq(grpc_static_slice_table[ent.idx], slice)) {
      GPR_TIMER_END("grpc_slice_intern", 0);
      return grpc_static_slice_table[ent.idx];
    }
  }

  interned_slice_refcount *s;
  slice_shard *shard = &g_shards[SHARD_IDX(hash)];

  gpr_mu_lock(&shard->mu);

  /* search for an existing string */
  size_t idx = TABLE_IDX(hash, shard->capacity);
  for (s = shard->strs[idx]; s; s = s->bucket_next) {
    if (s->hash == hash && grpc_slice_eq(slice, materialize(s))) {
      if (gpr_atm_no_barrier_fetch_add(&s->refcnt, 1) == 0) {
        /* If we get here, we've added a ref to something that was about to
         * die - drop it immediately.
         * The *only* possible path here (given the shard mutex) should be to
         * drop from one ref back to zero - assert that with a CAS */
        GPR_ASSERT(gpr_atm_rel_cas(&s->refcnt, 1, 0));
        /* and treat this as if we were never here... sshhh */
      } else {
        gpr_mu_unlock(&shard->mu);
        GPR_TIMER_END("grpc_slice_intern", 0);
        return materialize(s);
      }
    }
  }

  /* not found: create a new string */
  /* string data goes after the internal_string header */
  s = (interned_slice_refcount *)gpr_malloc(sizeof(*s) +
                                            GRPC_SLICE_LENGTH(slice));
  gpr_atm_rel_store(&s->refcnt, 1);
  s->length = GRPC_SLICE_LENGTH(slice);
  s->hash = hash;
  s->base.vtable = &interned_slice_vtable;
  s->base.sub_refcount = &s->sub;
  s->sub.vtable = &interned_slice_sub_vtable;
  s->sub.sub_refcount = &s->sub;
  s->bucket_next = shard->strs[idx];
  shard->strs[idx] = s;
  memcpy(s + 1, GRPC_SLICE_START_PTR(slice), GRPC_SLICE_LENGTH(slice));

  shard->count++;

  if (shard->count > shard->capacity * 2) {
    grow_shard(shard);
  }

  gpr_mu_unlock(&shard->mu);

  GPR_TIMER_END("grpc_slice_intern", 0);
  return materialize(s);
}