示例#1
0
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     grpc_slice_buffer *read_slices, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->read_info;
  int status;
  DWORD bytes_read = 0;
  DWORD flags = 0;
  WSABUF buffer;

  if (tcp->shutting_down) {
    GRPC_CLOSURE_SCHED(
        exec_ctx, cb,
        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
            "TCP socket is shutting down", &tcp->shutdown_error, 1));
    return;
  }

  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);

  tcp->read_slice = GRPC_SLICE_MALLOC(8192);

  buffer.len = (ULONG)GRPC_SLICE_LENGTH(
      tcp->read_slice);  // we know slice size fits in 32bit.
  buffer.buf = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);

  TCP_REF(tcp, "read");

  /* First let's try a synchronous, non-blocking read. */
  status =
      WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* Did we get data immediately ? Yay. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    info->bytes_transfered = bytes_read;
    GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
    return;
  }

  /* Otherwise, let's retry, by queuing a read. */
  memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
                   &info->overlapped, NULL);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      info->wsa_error = wsa_error;
      GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read,
                         GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
      return;
    }
  }

  grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
示例#2
0
bool grpc_connectivity_state_notify_on_state_change(
    grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
    grpc_connectivity_state *current, grpc_closure *notify) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
    if (current == NULL) {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
              tracker->name, notify);
    } else {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
              tracker->name, grpc_connectivity_state_name(*current),
              grpc_connectivity_state_name(cur), notify);
    }
  }
  if (current == NULL) {
    grpc_connectivity_state_watcher *w = tracker->watchers;
    if (w != NULL && w->notify == notify) {
      GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
      tracker->watchers = w->next;
      gpr_free(w);
      return false;
    }
    while (w != NULL) {
      grpc_connectivity_state_watcher *rm_candidate = w->next;
      if (rm_candidate != NULL && rm_candidate->notify == notify) {
        GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
        w->next = w->next->next;
        gpr_free(rm_candidate);
        return false;
      }
      w = w->next;
    }
    return false;
  } else {
    if (cur != *current) {
      *current = cur;
      GRPC_CLOSURE_SCHED(exec_ctx, notify,
                         GRPC_ERROR_REF(tracker->current_error));
    } else {
      grpc_connectivity_state_watcher *w =
          (grpc_connectivity_state_watcher *)gpr_malloc(sizeof(*w));
      w->current = current;
      w->notify = notify;
      w->next = tracker->watchers;
      tracker->watchers = w;
    }
    return cur == GRPC_CHANNEL_IDLE;
  }
}
示例#3
0
static void perform_transport_op(grpc_exec_ctx *exec_ctx, grpc_transport *gt,
                                 grpc_transport_op *op) {
  inproc_transport *t = (inproc_transport *)gt;
  INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op);
  gpr_mu_lock(&t->mu->mu);
  if (op->on_connectivity_state_change) {
    grpc_connectivity_state_notify_on_state_change(
        exec_ctx, &t->connectivity, op->connectivity_state,
        op->on_connectivity_state_change);
  }
  if (op->set_accept_stream) {
    t->accept_stream_cb = op->set_accept_stream_fn;
    t->accept_stream_data = op->set_accept_stream_user_data;
  }
  if (op->on_consumed) {
    GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
  }

  bool do_close = false;
  if (op->goaway_error != GRPC_ERROR_NONE) {
    do_close = true;
    GRPC_ERROR_UNREF(op->goaway_error);
  }
  if (op->disconnect_with_error != GRPC_ERROR_NONE) {
    do_close = true;
    GRPC_ERROR_UNREF(op->disconnect_with_error);
  }

  if (do_close) {
    close_transport_locked(exec_ctx, t);
  }
  gpr_mu_unlock(&t->mu->mu);
}
示例#4
0
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
  grpc_tcp *tcp = tcpp;
  grpc_closure *cb = tcp->read_cb;
  grpc_winsocket *socket = tcp->socket;
  grpc_slice sub;
  grpc_winsocket_callback_info *info = &socket->read_info;

  GRPC_ERROR_REF(error);

  if (error == GRPC_ERROR_NONE) {
    if (info->wsa_error != 0 && !tcp->shutting_down) {
      char *utf8_message = gpr_format_message(info->wsa_error);
      error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
      gpr_free(utf8_message);
      grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
    } else {
      if (info->bytes_transfered != 0 && !tcp->shutting_down) {
        sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
        grpc_slice_buffer_add(tcp->read_slices, sub);
      } else {
        grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
        error = tcp->shutting_down
                    ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                          "TCP stream shutting down", &tcp->shutdown_error, 1)
                    : GRPC_ERROR_CREATE_FROM_STATIC_STRING("End of TCP stream");
      }
    }
  }

  tcp->read_cb = NULL;
  TCP_UNREF(exec_ctx, tcp, "read");
  GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
示例#5
0
static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                    grpc_slice_buffer *slices, grpc_closure *cb) {
  half *m = (half *)ep;
  gpr_mu_lock(&m->parent->mu);
  if (m->parent->shutdown) {
    GRPC_CLOSURE_SCHED(
        exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already shutdown"));
  } else if (m->read_buffer.count > 0) {
    grpc_slice_buffer_swap(&m->read_buffer, slices);
    GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
  } else {
    m->on_read = cb;
    m->on_read_out = slices;
  }
  gpr_mu_unlock(&m->parent->mu);
}
示例#6
0
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
                                             security_handshaker *h,
                                             grpc_error *error) {
  if (error == GRPC_ERROR_NONE) {
    // If we were shut down after the handshake succeeded but before an
    // endpoint callback was invoked, we need to generate our own error.
    error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
  }
  const char *msg = grpc_error_string(error);
  gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);

  if (!h->shutdown) {
    // TODO(ctiller): It is currently necessary to shutdown endpoints
    // before destroying them, even if we know that there are no
    // pending read/write callbacks.  This should be fixed, at which
    // point this can be removed.
    grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(error));
    // Not shutting down, so the write failed.  Clean up before
    // invoking the callback.
    cleanup_args_for_failure_locked(exec_ctx, h);
    // Set shutdown to true so that subsequent calls to
    // security_handshaker_shutdown() do nothing.
    h->shutdown = true;
  }
  // Invoke callback.
  GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
}
示例#7
0
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
                                          grpc_chttp2_transport *t,
                                          grpc_chttp2_stream *s,
                                          grpc_slice slice, int is_last) {
  /* grpc_error *error = parse_inner_buffer(exec_ctx, p, t, s, slice); */
  if (!s->pending_byte_stream) {
    grpc_slice_ref_internal(slice);
    grpc_slice_buffer_add(&s->frame_storage, slice);
    grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
  } else if (s->on_next) {
    GPR_ASSERT(s->frame_storage.length == 0);
    grpc_slice_ref_internal(slice);
    grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice);
    GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_NONE);
    s->on_next = NULL;
  } else {
    grpc_slice_ref_internal(slice);
    grpc_slice_buffer_add(&s->frame_storage, slice);
  }

  if (is_last && s->received_last_frame) {
    grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
                                   GRPC_ERROR_NONE);
  }

  return GRPC_ERROR_NONE;
}
示例#8
0
static grpc_error *non_polling_poller_work(grpc_exec_ctx *exec_ctx,
                                           grpc_pollset *pollset,
                                           grpc_pollset_worker **worker,
                                           gpr_timespec now,
                                           gpr_timespec deadline) {
  non_polling_poller *npp = (non_polling_poller *)pollset;
  if (npp->shutdown) return GRPC_ERROR_NONE;
  non_polling_worker w;
  gpr_cv_init(&w.cv);
  if (worker != NULL) *worker = (grpc_pollset_worker *)&w;
  if (npp->root == NULL) {
    npp->root = w.next = w.prev = &w;
  } else {
    w.next = npp->root;
    w.prev = w.next->prev;
    w.next->prev = w.prev->next = &w;
  }
  w.kicked = false;
  while (!npp->shutdown && !w.kicked && !gpr_cv_wait(&w.cv, &npp->mu, deadline))
    ;
  if (&w == npp->root) {
    npp->root = w.next;
    if (&w == npp->root) {
      if (npp->shutdown) {
        GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE);
      }
      npp->root = NULL;
    }
  }
  w.next->prev = w.prev;
  w.prev->next = w.next;
  gpr_cv_destroy(&w.cv);
  if (worker != NULL) *worker = NULL;
  return GRPC_ERROR_NONE;
}
示例#9
0
static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
                                          grpc_pollset *pollset) {
  if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
      pollset->kick_alls_pending == 0) {
    GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
    pollset->shutdown_closure = NULL;
  }
}
示例#10
0
static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     grpc_slice_buffer *slices, grpc_closure *cb) {
  grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
  for (size_t i = 0; i < slices->count; i++) {
    m->on_write(slices->slices[i]);
  }
  GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
}
示例#11
0
static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
                                         grpc_handshaker *handshaker,
                                         grpc_tcp_server_acceptor *acceptor,
                                         grpc_closure *on_handshake_done,
                                         grpc_handshaker_args *args) {
  GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done,
                     GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                         "Failed to create security handshaker"));
}
示例#12
0
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                           grpc_closure *closure) {
  pollset->shutting_down = 1;
  grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
  if (!pollset->is_iocp_worker) {
    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
  } else {
    pollset->on_shutdown = closure;
  }
}
示例#13
0
static void fake_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
                                          grpc_resolver* resolver) {
  fake_resolver* r = (fake_resolver*)resolver;
  if (r->next_completion != NULL) {
    *r->target_result = NULL;
    GRPC_CLOSURE_SCHED(
        exec_ctx, r->next_completion,
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
    r->next_completion = NULL;
  }
}
示例#14
0
static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
                                              sockaddr_resolver *r) {
  if (r->next_completion != NULL && !r->published) {
    r->published = true;
    grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
    *r->target_result =
        grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
    GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
    r->next_completion = NULL;
  }
}
示例#15
0
static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
                                                   fake_resolver* r) {
  if (r->next_completion != NULL && r->next_results != NULL) {
    *r->target_result =
        grpc_channel_args_union(r->next_results, r->channel_args);
    grpc_channel_args_destroy(exec_ctx, r->next_results);
    r->next_results = NULL;
    GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
    r->next_completion = NULL;
  }
}
示例#16
0
void grpc_fake_resolver_response_generator_set_response(
    grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
    grpc_channel_args* next_response) {
  GPR_ASSERT(generator->resolver != NULL);
  generator->next_response = grpc_channel_args_copy(next_response);
  GRPC_CLOSURE_SCHED(
      exec_ctx, GRPC_CLOSURE_CREATE(set_response_cb, generator,
                                    grpc_combiner_scheduler(
                                        generator->resolver->base.combiner)),
      GRPC_ERROR_NONE);
}
示例#17
0
static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
                            grpc_error *error) {
  security_handshaker *h = arg;
  gpr_mu_lock(&h->mu);
  if (error != GRPC_ERROR_NONE || h->shutdown) {
    security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
    goto done;
  }
  // Create frame protector.
  tsi_frame_protector *protector;
  tsi_result result = tsi_handshaker_result_create_frame_protector(
      h->handshaker_result, NULL, &protector);
  if (result != TSI_OK) {
    error = grpc_set_tsi_error_result(
        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Frame protector creation failed"),
        result);
    security_handshake_failed_locked(exec_ctx, h, error);
    goto done;
  }
  // Get unused bytes.
  unsigned char *unused_bytes = NULL;
  size_t unused_bytes_size = 0;
  result = tsi_handshaker_result_get_unused_bytes(
      h->handshaker_result, &unused_bytes, &unused_bytes_size);
  // Create secure endpoint.
  if (unused_bytes_size > 0) {
    grpc_slice slice =
        grpc_slice_from_copied_buffer((char *)unused_bytes, unused_bytes_size);
    h->args->endpoint =
        grpc_secure_endpoint_create(protector, h->args->endpoint, &slice, 1);
    grpc_slice_unref_internal(exec_ctx, slice);
  } else {
    h->args->endpoint =
        grpc_secure_endpoint_create(protector, h->args->endpoint, NULL, 0);
  }
  tsi_handshaker_result_destroy(h->handshaker_result);
  h->handshaker_result = NULL;
  // Clear out the read buffer before it gets passed to the transport.
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, h->args->read_buffer);
  // Add auth context to channel args.
  grpc_arg auth_context_arg = grpc_auth_context_to_arg(h->auth_context);
  grpc_channel_args *tmp_args = h->args->args;
  h->args->args =
      grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
  grpc_channel_args_destroy(exec_ctx, tmp_args);
  // Invoke callback.
  GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
  // Set shutdown to true so that subsequent calls to
  // security_handshaker_shutdown() do nothing.
  h->shutdown = true;
done:
  gpr_mu_unlock(&h->mu);
  security_handshaker_unref(exec_ctx, h);
}
示例#18
0
static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                    grpc_slice_buffer *slices, grpc_closure *cb) {
  grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
  gpr_mu_lock(&m->mu);
  if (m->read_buffer.count > 0) {
    grpc_slice_buffer_swap(&m->read_buffer, slices);
    GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
  } else {
    m->on_read = cb;
    m->on_read_out = slices;
  }
  gpr_mu_unlock(&m->mu);
}
static void call_resolver_next_after_locking(grpc_exec_ctx *exec_ctx,
                                             grpc_resolver *resolver,
                                             grpc_channel_args **result,
                                             grpc_closure *on_complete) {
  next_args *a = gpr_malloc(sizeof(*a));
  a->resolver = resolver;
  a->result = result;
  a->on_complete = on_complete;
  GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(
                                   call_resolver_next_now_lock_taken, a,
                                   grpc_combiner_scheduler(resolver->combiner)),
                     GRPC_ERROR_NONE);
}
示例#20
0
static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                        grpc_error *why) {
  half *m = (half *)ep;
  gpr_mu_lock(&m->parent->mu);
  m->parent->shutdown = true;
  if (m->on_read) {
    GRPC_CLOSURE_SCHED(
        exec_ctx, m->on_read,
        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
    m->on_read = NULL;
  }
  m = other_half(m);
  if (m->on_read) {
    GRPC_CLOSURE_SCHED(
        exec_ctx, m->on_read,
        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
    m->on_read = NULL;
  }
  gpr_mu_unlock(&m->parent->mu);
  grpc_resource_user_shutdown(exec_ctx, m->resource_user);
  GRPC_ERROR_UNREF(why);
}
示例#21
0
void grpc_mock_endpoint_put_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                                 grpc_slice slice) {
  grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
  gpr_mu_lock(&m->mu);
  if (m->on_read != NULL) {
    grpc_slice_buffer_add(m->on_read_out, slice);
    GRPC_CLOSURE_SCHED(exec_ctx, m->on_read, GRPC_ERROR_NONE);
    m->on_read = NULL;
  } else {
    grpc_slice_buffer_add(&m->read_buffer, slice);
  }
  gpr_mu_unlock(&m->mu);
}
示例#22
0
/* Callback to be passed to grpc_executor to asynch-ify
 * grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
                              grpc_error *error) {
  request *r = rp;
  if (error == GRPC_ERROR_NONE) {
    error =
        grpc_blocking_resolve_address(r->name, r->default_port, r->addresses);
  } else {
    GRPC_ERROR_REF(error);
  }
  GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error);
  gpr_free(r->name);
  gpr_free(r->default_port);
  gpr_free(r);
}
示例#23
0
文件: transport.c 项目: mdsteele/grpc
void grpc_transport_stream_op_batch_finish_with_failure(
    grpc_exec_ctx *exec_ctx, grpc_transport_stream_op_batch *batch,
    grpc_error *error) {
  if (batch->send_message) {
    grpc_byte_stream_destroy(exec_ctx,
                             batch->payload->send_message.send_message);
  }
  if (batch->recv_message) {
    GRPC_CLOSURE_SCHED(exec_ctx,
                       batch->payload->recv_message.recv_message_ready,
                       GRPC_ERROR_REF(error));
  }
  if (batch->recv_initial_metadata) {
    GRPC_CLOSURE_SCHED(
        exec_ctx,
        batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
        GRPC_ERROR_REF(error));
  }
  GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
  if (batch->cancel_stream) {
    GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error);
  }
}
示例#24
0
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
                                 const char *default_port,
                                 grpc_pollset_set *interested_parties,
                                 grpc_closure *on_done,
                                 grpc_resolved_addresses **addresses) {
  request *r = gpr_malloc(sizeof(request));
  GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
                    grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
  r->name = gpr_strdup(name);
  r->default_port = gpr_strdup(default_port);
  r->on_done = on_done;
  r->addresses = addresses;
  GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
示例#25
0
void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                           grpc_closure *closure) {
  GPR_ASSERT(!pollset->shutting_down);
  GRPC_UV_ASSERT_SAME_THREAD();
  pollset->shutting_down = 1;
  if (grpc_pollset_work_run_loop) {
    // Drain any pending UV callbacks without blocking
    uv_run(uv_default_loop(), UV_RUN_NOWAIT);
  } else {
    // kick the loop once
    uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
  }
  GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
}
示例#26
0
static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     grpc_slice_buffer *slices, grpc_closure *cb) {
  half *m = other_half((half *)ep);
  gpr_mu_lock(&m->parent->mu);
  grpc_error *error = GRPC_ERROR_NONE;
  m->parent->stats->num_writes++;
  if (m->parent->shutdown) {
    error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Endpoint already shutdown");
  } else if (m->on_read != NULL) {
    for (size_t i = 0; i < slices->count; i++) {
      grpc_slice_buffer_add(m->on_read_out, grpc_slice_copy(slices->slices[i]));
    }
    GRPC_CLOSURE_SCHED(exec_ctx, m->on_read, GRPC_ERROR_NONE);
    m->on_read = NULL;
  } else {
    for (size_t i = 0; i < slices->count; i++) {
      grpc_slice_buffer_add(&m->read_buffer,
                            grpc_slice_copy(slices->slices[i]));
    }
  }
  gpr_mu_unlock(&m->parent->mu);
  GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
}
示例#27
0
static void call_read_cb(grpc_exec_ctx *exec_ctx, secure_endpoint *ep,
                         grpc_error *error) {
  if (GRPC_TRACER_ON(grpc_trace_secure_endpoint)) {
    size_t i;
    for (i = 0; i < ep->read_buffer->count; i++) {
      char *data = grpc_dump_slice(ep->read_buffer->slices[i],
                                   GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "READ %p: %s", ep, data);
      gpr_free(data);
    }
  }
  ep->read_buffer = NULL;
  GRPC_CLOSURE_SCHED(exec_ctx, ep->read_cb, error);
  SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
}
示例#28
0
static void really_destroy_stream(grpc_exec_ctx *exec_ctx, inproc_stream *s) {
  INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);

  slice_buffer_list_destroy(exec_ctx, &s->to_read_message);
  slice_buffer_list_destroy(exec_ctx, &s->write_buffer_message);
  GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
  GRPC_ERROR_UNREF(s->cancel_self_error);
  GRPC_ERROR_UNREF(s->cancel_other_error);

  unref_transport(exec_ctx, s->t);

  if (s->closure_at_destroy) {
    GRPC_CLOSURE_SCHED(exec_ctx, s->closure_at_destroy, GRPC_ERROR_NONE);
  }
}
示例#29
0
static void non_polling_poller_shutdown(grpc_exec_ctx *exec_ctx,
                                        grpc_pollset *pollset,
                                        grpc_closure *closure) {
  non_polling_poller *p = (non_polling_poller *)pollset;
  GPR_ASSERT(closure != NULL);
  p->shutdown = closure;
  if (p->root == NULL) {
    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
  } else {
    non_polling_worker *w = p->root;
    do {
      gpr_cv_signal(&w->cv);
      w = w->next;
    } while (w != p->root);
  }
}
示例#30
0
void grpc_connectivity_state_set(grpc_exec_ctx *exec_ctx,
                                 grpc_connectivity_state_tracker *tracker,
                                 grpc_connectivity_state state,
                                 grpc_error *error, const char *reason) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  grpc_connectivity_state_watcher *w;
  if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
    const char *error_string = grpc_error_string(error);
    gpr_log(GPR_DEBUG, "SET: %p %s: %s --> %s [%s] error=%p %s", tracker,
            tracker->name, grpc_connectivity_state_name(cur),
            grpc_connectivity_state_name(state), reason, error, error_string);
  }
  switch (state) {
    case GRPC_CHANNEL_INIT:
    case GRPC_CHANNEL_CONNECTING:
    case GRPC_CHANNEL_IDLE:
    case GRPC_CHANNEL_READY:
      GPR_ASSERT(error == GRPC_ERROR_NONE);
      break;
    case GRPC_CHANNEL_SHUTDOWN:
    case GRPC_CHANNEL_TRANSIENT_FAILURE:
      GPR_ASSERT(error != GRPC_ERROR_NONE);
      break;
  }
  GRPC_ERROR_UNREF(tracker->current_error);
  tracker->current_error = error;
  if (cur == state) {
    return;
  }
  GPR_ASSERT(cur != GRPC_CHANNEL_SHUTDOWN);
  gpr_atm_no_barrier_store(&tracker->current_state_atm, state);
  while ((w = tracker->watchers) != NULL) {
    *w->current = state;
    tracker->watchers = w->next;
    if (GRPC_TRACER_ON(grpc_connectivity_state_trace)) {
      gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
              w->notify);
    }
    GRPC_CLOSURE_SCHED(exec_ctx, w->notify,
                       GRPC_ERROR_REF(tracker->current_error));
    gpr_free(w);
  }
}