Example #1
0
void grpc_metadata_batch_add_tail(grpc_metadata_batch *batch,
                                  grpc_linked_mdelem *storage,
                                  grpc_mdelem *elem_to_add) {
  GPR_ASSERT(elem_to_add);
  storage->md = elem_to_add;
  grpc_metadata_batch_link_tail(batch, storage);
}
Example #2
0
static grpc_error *fill_in_metadata(grpc_exec_ctx *exec_ctx, inproc_stream *s,
                                    const grpc_metadata_batch *metadata,
                                    uint32_t flags, grpc_metadata_batch *out_md,
                                    uint32_t *outflags, bool *markfilled) {
  if (GRPC_TRACER_ON(grpc_inproc_trace)) {
    log_metadata(metadata, s->t->is_client, outflags != NULL);
  }

  if (outflags != NULL) {
    *outflags = flags;
  }
  if (markfilled != NULL) {
    *markfilled = true;
  }
  grpc_error *error = GRPC_ERROR_NONE;
  for (grpc_linked_mdelem *elem = metadata->list.head;
       (elem != NULL) && (error == GRPC_ERROR_NONE); elem = elem->next) {
    grpc_linked_mdelem *nelem = gpr_arena_alloc(s->arena, sizeof(*nelem));
    nelem->md = grpc_mdelem_from_slices(
        exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
        grpc_slice_intern(GRPC_MDVALUE(elem->md)));

    error = grpc_metadata_batch_link_tail(exec_ctx, out_md, nelem);
  }
  return error;
}
Example #3
0
static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
                               grpc_error *error) {
  INPROC_LOG(GPR_DEBUG, "read_state_machine %p fail_helper", s);
  // If we're failing this side, we need to make sure that
  // we also send or have already sent trailing metadata
  if (!s->trailing_md_sent) {
    // Send trailing md to the other side indicating cancellation
    s->trailing_md_sent = true;

    grpc_metadata_batch fake_md;
    grpc_metadata_batch_init(&fake_md);

    inproc_stream *other = s->other_side;
    grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
                                                : &other->to_read_trailing_md;
    bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
                                       : &other->to_read_trailing_md_filled;
    fill_in_metadata(exec_ctx, s, &fake_md, 0, dest, NULL, destfilled);
    grpc_metadata_batch_destroy(exec_ctx, &fake_md);

    if (other != NULL) {
      if (other->cancel_other_error == GRPC_ERROR_NONE) {
        other->cancel_other_error = GRPC_ERROR_REF(error);
      }
      if (other->reads_needed) {
        if (!other->read_closure_scheduled) {
          GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
                             GRPC_ERROR_REF(error));
          other->read_closure_scheduled = true;
        }
        other->reads_needed = false;
      }
    } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
      s->write_buffer_cancel_error = GRPC_ERROR_REF(error);
    }
  }
  if (s->recv_initial_md_op) {
    grpc_error *err;
    if (!s->t->is_client) {
      // If this is a server, provide initial metadata with a path and authority
      // since it expects that as well as no error yet
      grpc_metadata_batch fake_md;
      grpc_metadata_batch_init(&fake_md);
      grpc_linked_mdelem *path_md = gpr_arena_alloc(s->arena, sizeof(*path_md));
      path_md->md =
          grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
      GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
                 GRPC_ERROR_NONE);
      grpc_linked_mdelem *auth_md = gpr_arena_alloc(s->arena, sizeof(*auth_md));
      auth_md->md =
          grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
      GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
                 GRPC_ERROR_NONE);

      fill_in_metadata(
          exec_ctx, s, &fake_md, 0,
          s->recv_initial_md_op->payload->recv_initial_metadata
              .recv_initial_metadata,
          s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
          NULL);
      grpc_metadata_batch_destroy(exec_ctx, &fake_md);
      err = GRPC_ERROR_NONE;
    } else {
      err = GRPC_ERROR_REF(error);
    }
    INPROC_LOG(GPR_DEBUG,
               "fail_helper %p scheduling initial-metadata-ready %p %p", s,
               error, err);
    GRPC_CLOSURE_SCHED(exec_ctx,
                       s->recv_initial_md_op->payload->recv_initial_metadata
                           .recv_initial_metadata_ready,
                       err);
    // Last use of err so no need to REF and then UNREF it

    if ((s->recv_initial_md_op != s->recv_message_op) &&
        (s->recv_initial_md_op != s->recv_trailing_md_op)) {
      INPROC_LOG(GPR_DEBUG,
                 "fail_helper %p scheduling initial-metadata-on-complete %p",
                 error, s);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
                         GRPC_ERROR_REF(error));
    }
    s->recv_initial_md_op = NULL;
  }
  if (s->recv_message_op) {
    INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s,
               error);
    GRPC_CLOSURE_SCHED(
        exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
        GRPC_ERROR_REF(error));
    if (s->recv_message_op != s->recv_trailing_md_op) {
      INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-on-complete %p",
                 s, error);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
                         GRPC_ERROR_REF(error));
    }
    s->recv_message_op = NULL;
  }
  if (s->recv_trailing_md_op) {
    INPROC_LOG(GPR_DEBUG,
               "fail_helper %p scheduling trailing-md-on-complete %p", s,
               error);
    GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
                       GRPC_ERROR_REF(error));
    s->recv_trailing_md_op = NULL;
  }
  close_other_side_locked(exec_ctx, s, "fail_helper:other_side");
  close_stream_locked(exec_ctx, s);

  GRPC_ERROR_UNREF(error);
}