Ejemplo n.º 1
0
// This function means that we are done talking/listening to the other side
static void close_other_side_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
                                    const char *reason) {
  if (s->other_side != NULL) {
    // First release the metadata that came from the other side's arena
    grpc_metadata_batch_destroy(exec_ctx, &s->to_read_initial_md);
    grpc_metadata_batch_destroy(exec_ctx, &s->to_read_trailing_md);

    unref_stream(exec_ctx, s->other_side, reason);
    s->other_side_closed = true;
    s->other_side = NULL;
  } else if (!s->other_side_closed) {
    s->write_buffer_other_side_closed = true;
  }
}
Ejemplo n.º 2
0
static void destroy_call(grpc_exec_ctx *exec_ctx, void *call, int success) {
  size_t i;
  int ii;
  grpc_call *c = call;
  GPR_TIMER_BEGIN("destroy_call", 0);
  for (i = 0; i < 2; i++) {
    grpc_metadata_batch_destroy(
        &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
  }
  if (c->receiving_stream != NULL) {
    grpc_byte_stream_destroy(c->receiving_stream);
  }
  grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c));
  GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, c->channel, "call");
  gpr_mu_destroy(&c->mu);
  for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
    if (c->status[i].details) {
      GRPC_MDSTR_UNREF(c->status[i].details);
    }
  }
  for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
    GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
  }
  for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
    if (c->context[i].destroy) {
      c->context[i].destroy(c->context[i].value);
    }
  }
  if (c->cq) {
    GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
  }
  gpr_free(c);
  GPR_TIMER_END("destroy_call", 0);
}
Ejemplo n.º 3
0
static void verify_table_size_change_match_elem_size(const char *key,
                                                     const char *value) {
  grpc_slice_buffer output;
  grpc_mdelem *elem = grpc_mdelem_from_strings(key, value);
  size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem);
  size_t initial_table_size = g_compressor.table_size;
  grpc_linked_mdelem *e = gpr_malloc(sizeof(*e));
  grpc_metadata_batch b;
  grpc_metadata_batch_init(&b);
  e[0].md = elem;
  e[0].prev = NULL;
  e[0].next = NULL;
  b.list.head = &e[0];
  b.list.tail = &e[0];
  grpc_slice_buffer_init(&output);

  grpc_transport_one_way_stats stats;
  memset(&stats, 0, sizeof(stats));
  grpc_chttp2_encode_header(&g_compressor, 0xdeadbeef, &b, 0, 16384, &stats,
                            &output);
  grpc_slice_buffer_destroy(&output);
  grpc_metadata_batch_destroy(&b);

  GPR_ASSERT(g_compressor.table_size == elem_size + initial_table_size);
  gpr_free(e);
}
Ejemplo n.º 4
0
/* verify that the output generated by encoding the stream matches the
   hexstring passed in */
static void verify(size_t window_available, int eof, size_t expect_window_used,
                   const char *expected, size_t nheaders, ...) {
  grpc_slice_buffer output;
  grpc_slice merged;
  grpc_slice expect = parse_hexstring(expected);
  size_t i;
  va_list l;
  grpc_linked_mdelem *e = gpr_malloc(sizeof(*e) * nheaders);
  grpc_metadata_batch b;

  grpc_metadata_batch_init(&b);

  va_start(l, nheaders);
  for (i = 0; i < nheaders; i++) {
    char *key = va_arg(l, char *);
    char *value = va_arg(l, char *);
    if (i) {
      e[i - 1].next = &e[i];
      e[i].prev = &e[i - 1];
    }
    e[i].md = grpc_mdelem_from_strings(key, value);
  }
  e[0].prev = NULL;
  e[nheaders - 1].next = NULL;
  va_end(l);

  b.list.head = &e[0];
  b.list.tail = &e[nheaders - 1];

  if (cap_to_delete == num_to_delete) {
    cap_to_delete = GPR_MAX(2 * cap_to_delete, 1000);
    to_delete = gpr_realloc(to_delete, sizeof(*to_delete) * cap_to_delete);
  }
  to_delete[num_to_delete++] = e;

  grpc_slice_buffer_init(&output);

  grpc_transport_one_way_stats stats;
  memset(&stats, 0, sizeof(stats));
  grpc_chttp2_encode_header(&g_compressor, 0xdeadbeef, &b, eof, 16384, &stats,
                            &output);
  merged = grpc_slice_merge(output.slices, output.count);
  grpc_slice_buffer_destroy(&output);
  grpc_metadata_batch_destroy(&b);

  if (0 != grpc_slice_cmp(merged, expect)) {
    char *expect_str = grpc_dump_slice(expect, GPR_DUMP_HEX | GPR_DUMP_ASCII);
    char *got_str = grpc_dump_slice(merged, GPR_DUMP_HEX | GPR_DUMP_ASCII);
    gpr_log(GPR_ERROR, "mismatched output for %s", expected);
    gpr_log(GPR_ERROR, "EXPECT: %s", expect_str);
    gpr_log(GPR_ERROR, "GOT:    %s", got_str);
    gpr_free(expect_str);
    gpr_free(got_str);
    g_failure = 1;
  }

  grpc_slice_unref(merged);
  grpc_slice_unref(expect);
}
Ejemplo n.º 5
0
/* Called either:
     - in response to an API call (or similar) from above, to send something
     - a network event (or similar) from below, to receive something
   op contains type and call direction information, in addition to the data
   that is being sent or received. */
static void call_op(grpc_call_element *elem, grpc_call_element *from_elem,
                    grpc_call_op *op) {
  /* grab pointers to our data from the call element */
  call_data *calld = elem->call_data;
  channel_data *channeld = elem->channel_data;
  GRPC_CALL_LOG_OP(GPR_INFO, elem, op);

  switch (op->type) {
    case GRPC_RECV_METADATA:
      grpc_metadata_batch_filter(&op->data.metadata, server_filter, elem);
      if (!calld->got_initial_metadata) {
        calld->got_initial_metadata = 1;
        /* Have we seen the required http2 transport headers?
           (:method, :scheme, content-type, with :path and :authority covered
           at the channel level right now) */
        if (calld->seen_post && calld->seen_scheme && calld->seen_te_trailers &&
            calld->seen_path) {
          grpc_call_next_op(elem, op);
        } else {
          if (!calld->seen_path) {
            gpr_log(GPR_ERROR, "Missing :path header");
          }
          if (!calld->seen_post) {
            gpr_log(GPR_ERROR, "Missing :method header");
          }
          if (!calld->seen_scheme) {
            gpr_log(GPR_ERROR, "Missing :scheme header");
          }
          if (!calld->seen_te_trailers) {
            gpr_log(GPR_ERROR, "Missing te trailers header");
          }
          /* Error this call out */
          grpc_metadata_batch_destroy(&op->data.metadata);
          op->done_cb(op->user_data, GRPC_OP_OK);
          grpc_call_element_send_cancel(elem);
        }
      } else {
        grpc_call_next_op(elem, op);
      }
      break;
    case GRPC_SEND_METADATA:
      /* If we haven't sent status 200 yet, we need to so so because it needs to
         come before any non : prefixed metadata. */
      if (!calld->sent_status) {
        calld->sent_status = 1;
        grpc_metadata_batch_add_head(&op->data.metadata, &calld->status,
                                     grpc_mdelem_ref(channeld->status_ok));
      }
      grpc_call_next_op(elem, op);
      break;
    default:
      /* pass control up or down the stack depending on op->dir */
      grpc_call_next_op(elem, op);
      break;
  }
}
Ejemplo n.º 6
0
static void close_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
  if (!s->closed) {
    // Release the metadata that we would have written out
    grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_initial_md);
    grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_trailing_md);

    if (s->listed) {
      inproc_stream *p = s->stream_list_prev;
      inproc_stream *n = s->stream_list_next;
      if (p != NULL) {
        p->stream_list_next = n;
      } else {
        s->t->stream_list = n;
      }
      if (n != NULL) {
        n->stream_list_prev = p;
      }
      s->listed = false;
      unref_stream(exec_ctx, s, "close_stream:list");
    }
    s->closed = true;
    unref_stream(exec_ctx, s, "close_stream:closing");
  }
}
Ejemplo n.º 7
0
void grpc_stream_ops_unref_owned_objects(grpc_stream_op *ops, size_t nops) {
  size_t i;
  for (i = 0; i < nops; i++) {
    switch (ops[i].type) {
      case GRPC_OP_SLICE:
        gpr_slice_unref(ops[i].data.slice);
        break;
      case GRPC_OP_METADATA:
        grpc_metadata_batch_destroy(&ops[i].data.metadata);
        break;
      case GRPC_NO_OP:
      case GRPC_OP_BEGIN_MESSAGE:
        break;
    }
  }
}
Ejemplo n.º 8
0
static void finish_batch(grpc_exec_ctx *exec_ctx, void *bctlp, int success) {
  batch_control *bctl = bctlp;
  grpc_call *call = bctl->call;
  grpc_call *child_call;
  grpc_call *next_child_call;

  gpr_mu_lock(&call->mu);
  if (bctl->send_initial_metadata) {
    grpc_metadata_batch_destroy(
        &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
  }
  if (bctl->send_message) {
    call->sending_message = 0;
  }
  if (bctl->send_final_op) {
    grpc_metadata_batch_destroy(
        &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
  }
  if (bctl->recv_initial_metadata) {
    grpc_metadata_batch *md =
        &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
    grpc_metadata_batch_filter(md, recv_initial_filter, call);

    if (gpr_time_cmp(md->deadline, gpr_inf_future(md->deadline.clock_type)) !=
            0 &&
        !call->is_client) {
      GPR_TIMER_BEGIN("set_deadline_alarm", 0);
      set_deadline_alarm(exec_ctx, call, md->deadline);
      GPR_TIMER_END("set_deadline_alarm", 0);
    }
  }
  if (bctl->recv_final_op) {
    grpc_metadata_batch *md =
        &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
    grpc_metadata_batch_filter(md, recv_trailing_filter, call);

    if (call->have_alarm) {
      grpc_timer_cancel(exec_ctx, &call->alarm);
    }
    /* propagate cancellation to any interested children */
    child_call = call->first_child;
    if (child_call != NULL) {
      do {
        next_child_call = child_call->sibling_next;
        if (child_call->cancellation_is_inherited) {
          GRPC_CALL_INTERNAL_REF(child_call, "propagate_cancel");
          grpc_call_cancel(child_call, NULL);
          GRPC_CALL_INTERNAL_UNREF(exec_ctx, child_call, "propagate_cancel");
        }
        child_call = next_child_call;
      } while (child_call != call->first_child);
    }

    if (call->is_client) {
      get_final_status(call, set_status_value_directly,
                       call->final_op.client.status);
      get_final_details(call, call->final_op.client.status_details,
                        call->final_op.client.status_details_capacity);
    } else {
      get_final_status(call, set_cancelled_value,
                       call->final_op.server.cancelled);
    }

    success = 1;
  }
  bctl->success = success != 0;
  gpr_mu_unlock(&call->mu);
  if (gpr_unref(&bctl->steps_to_complete)) {
    post_batch_completion(exec_ctx, bctl);
  }
}
Ejemplo n.º 9
0
static bool cancel_stream_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
                                 grpc_error *error) {
  bool ret = false;  // was the cancel accepted
  INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s,
             grpc_error_string(error));
  if (s->cancel_self_error == GRPC_ERROR_NONE) {
    ret = true;
    s->cancel_self_error = GRPC_ERROR_REF(error);
    if (s->reads_needed) {
      if (!s->read_closure_scheduled) {
        GRPC_CLOSURE_SCHED(exec_ctx, &s->read_closure,
                           GRPC_ERROR_REF(s->cancel_self_error));
        s->read_closure_scheduled = true;
      }
      s->reads_needed = false;
    }
    // Send trailing md to the other side indicating cancellation, even if we
    // already have
    s->trailing_md_sent = true;

    grpc_metadata_batch cancel_md;
    grpc_metadata_batch_init(&cancel_md);

    inproc_stream *other = s->other_side;
    grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
                                                : &other->to_read_trailing_md;
    bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
                                       : &other->to_read_trailing_md_filled;
    fill_in_metadata(exec_ctx, s, &cancel_md, 0, dest, NULL, destfilled);
    grpc_metadata_batch_destroy(exec_ctx, &cancel_md);

    if (other != NULL) {
      if (other->cancel_other_error == GRPC_ERROR_NONE) {
        other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error);
      }
      if (other->reads_needed) {
        if (!other->read_closure_scheduled) {
          GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
                             GRPC_ERROR_REF(other->cancel_other_error));
          other->read_closure_scheduled = true;
        }
        other->reads_needed = false;
      }
    } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
      s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error);
    }

    // if we are a server and already received trailing md but
    // couldn't complete that because we hadn't yet sent out trailing
    // md, now's the chance
    if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
      INPROC_LOG(GPR_DEBUG,
                 "cancel_stream %p scheduling trailing-md-on-complete %p", s,
                 s->cancel_self_error);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
                         GRPC_ERROR_REF(s->cancel_self_error));
      s->recv_trailing_md_op = NULL;
    }
  }

  close_other_side_locked(exec_ctx, s, "cancel_stream:other_side");
  close_stream_locked(exec_ctx, s);

  GRPC_ERROR_UNREF(error);
  return ret;
}
Ejemplo n.º 10
0
static void fail_helper_locked(grpc_exec_ctx *exec_ctx, inproc_stream *s,
                               grpc_error *error) {
  INPROC_LOG(GPR_DEBUG, "read_state_machine %p fail_helper", s);
  // If we're failing this side, we need to make sure that
  // we also send or have already sent trailing metadata
  if (!s->trailing_md_sent) {
    // Send trailing md to the other side indicating cancellation
    s->trailing_md_sent = true;

    grpc_metadata_batch fake_md;
    grpc_metadata_batch_init(&fake_md);

    inproc_stream *other = s->other_side;
    grpc_metadata_batch *dest = (other == NULL) ? &s->write_buffer_trailing_md
                                                : &other->to_read_trailing_md;
    bool *destfilled = (other == NULL) ? &s->write_buffer_trailing_md_filled
                                       : &other->to_read_trailing_md_filled;
    fill_in_metadata(exec_ctx, s, &fake_md, 0, dest, NULL, destfilled);
    grpc_metadata_batch_destroy(exec_ctx, &fake_md);

    if (other != NULL) {
      if (other->cancel_other_error == GRPC_ERROR_NONE) {
        other->cancel_other_error = GRPC_ERROR_REF(error);
      }
      if (other->reads_needed) {
        if (!other->read_closure_scheduled) {
          GRPC_CLOSURE_SCHED(exec_ctx, &other->read_closure,
                             GRPC_ERROR_REF(error));
          other->read_closure_scheduled = true;
        }
        other->reads_needed = false;
      }
    } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
      s->write_buffer_cancel_error = GRPC_ERROR_REF(error);
    }
  }
  if (s->recv_initial_md_op) {
    grpc_error *err;
    if (!s->t->is_client) {
      // If this is a server, provide initial metadata with a path and authority
      // since it expects that as well as no error yet
      grpc_metadata_batch fake_md;
      grpc_metadata_batch_init(&fake_md);
      grpc_linked_mdelem *path_md = gpr_arena_alloc(s->arena, sizeof(*path_md));
      path_md->md =
          grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
      GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
                 GRPC_ERROR_NONE);
      grpc_linked_mdelem *auth_md = gpr_arena_alloc(s->arena, sizeof(*auth_md));
      auth_md->md =
          grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
      GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
                 GRPC_ERROR_NONE);

      fill_in_metadata(
          exec_ctx, s, &fake_md, 0,
          s->recv_initial_md_op->payload->recv_initial_metadata
              .recv_initial_metadata,
          s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
          NULL);
      grpc_metadata_batch_destroy(exec_ctx, &fake_md);
      err = GRPC_ERROR_NONE;
    } else {
      err = GRPC_ERROR_REF(error);
    }
    INPROC_LOG(GPR_DEBUG,
               "fail_helper %p scheduling initial-metadata-ready %p %p", s,
               error, err);
    GRPC_CLOSURE_SCHED(exec_ctx,
                       s->recv_initial_md_op->payload->recv_initial_metadata
                           .recv_initial_metadata_ready,
                       err);
    // Last use of err so no need to REF and then UNREF it

    if ((s->recv_initial_md_op != s->recv_message_op) &&
        (s->recv_initial_md_op != s->recv_trailing_md_op)) {
      INPROC_LOG(GPR_DEBUG,
                 "fail_helper %p scheduling initial-metadata-on-complete %p",
                 error, s);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_initial_md_op->on_complete,
                         GRPC_ERROR_REF(error));
    }
    s->recv_initial_md_op = NULL;
  }
  if (s->recv_message_op) {
    INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s,
               error);
    GRPC_CLOSURE_SCHED(
        exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
        GRPC_ERROR_REF(error));
    if (s->recv_message_op != s->recv_trailing_md_op) {
      INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-on-complete %p",
                 s, error);
      GRPC_CLOSURE_SCHED(exec_ctx, s->recv_message_op->on_complete,
                         GRPC_ERROR_REF(error));
    }
    s->recv_message_op = NULL;
  }
  if (s->recv_trailing_md_op) {
    INPROC_LOG(GPR_DEBUG,
               "fail_helper %p scheduling trailing-md-on-complete %p", s,
               error);
    GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
                       GRPC_ERROR_REF(error));
    s->recv_trailing_md_op = NULL;
  }
  close_other_side_locked(exec_ctx, s, "fail_helper:other_side");
  close_stream_locked(exec_ctx, s);

  GRPC_ERROR_UNREF(error);
}