コード例 #1
0
ファイル: tcp_windows.c プロジェクト: Indifer/grpc
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     grpc_slice_buffer *read_slices, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->read_info;
  int status;
  DWORD bytes_read = 0;
  DWORD flags = 0;
  WSABUF buffer;

  if (tcp->shutting_down) {
    grpc_closure_sched(exec_ctx, cb,
                       GRPC_ERROR_CREATE("TCP socket is shutting down"));
    return;
  }

  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);

  tcp->read_slice = grpc_slice_malloc(8192);

  buffer.len = (ULONG)GRPC_SLICE_LENGTH(
      tcp->read_slice);  // we know slice size fits in 32bit.
  buffer.buf = (char *)GRPC_SLICE_START_PTR(tcp->read_slice);

  TCP_REF(tcp, "read");

  /* First let's try a synchronous, non-blocking read. */
  status =
      WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* Did we get data immediately ? Yay. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    info->bytes_transfered = bytes_read;
    grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
    return;
  }

  /* Otherwise, let's retry, by queuing a read. */
  memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
                   &info->overlapped, NULL);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      info->wsa_error = wsa_error;
      grpc_closure_sched(exec_ctx, &tcp->on_read,
                         GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
      return;
    }
  }

  grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
コード例 #2
0
ファイル: tcp_uv.c プロジェクト: yugui/grpc
static void uv_endpoint_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                              grpc_slice_buffer *write_slices,
                              grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  uv_buf_t *buffers;
  unsigned int buffer_count;
  unsigned int i;
  grpc_slice *slice;
  uv_write_t *write_req;

  if (grpc_tcp_trace) {
    size_t j;

    for (j = 0; j < write_slices->count; j++) {
      char *data = grpc_dump_slice(write_slices->slices[j],
                                   GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
      gpr_free(data);
    }
  }

  if (tcp->shutting_down) {
    grpc_closure_sched(exec_ctx, cb,
                       GRPC_ERROR_CREATE("TCP socket is shutting down"));
    return;
  }

  GPR_ASSERT(tcp->write_cb == NULL);
  tcp->write_slices = write_slices;
  GPR_ASSERT(tcp->write_slices->count <= UINT_MAX);
  if (tcp->write_slices->count == 0) {
    // No slices means we don't have to do anything,
    // and libuv doesn't like empty writes
    grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
    return;
  }

  tcp->write_cb = cb;
  buffer_count = (unsigned int)tcp->write_slices->count;
  buffers = gpr_malloc(sizeof(uv_buf_t) * buffer_count);
  grpc_resource_user_alloc(exec_ctx, tcp->resource_user,
                           sizeof(uv_buf_t) * buffer_count, NULL);
  for (i = 0; i < buffer_count; i++) {
    slice = &tcp->write_slices->slices[i];
    buffers[i].base = (char *)GRPC_SLICE_START_PTR(*slice);
    buffers[i].len = GRPC_SLICE_LENGTH(*slice);
  }
  tcp->write_buffers = buffers;
  write_req = &tcp->write_req;
  write_req->data = tcp;
  TCP_REF(tcp, "write");
  // TODO(murgatroid99): figure out what the return value here means
  uv_write(write_req, (uv_stream_t *)tcp->handle, buffers, buffer_count,
           write_callback);
}
コード例 #3
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
  grpc_resource_user *resource_user = ru;
  grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
                     GRPC_ERROR_CANCELLED);
  grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
                     GRPC_ERROR_CANCELLED);
  resource_user->reclaimers[0] = NULL;
  resource_user->reclaimers[1] = NULL;
  rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
  rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
}
コード例 #4
0
bool grpc_connectivity_state_notify_on_state_change(
    grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
    grpc_connectivity_state *current, grpc_closure *notify) {
  grpc_connectivity_state cur =
      (grpc_connectivity_state)gpr_atm_no_barrier_load(
          &tracker->current_state_atm);
  if (grpc_connectivity_state_trace) {
    if (current == NULL) {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
              tracker->name, notify);
    } else {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
              tracker->name, grpc_connectivity_state_name(*current),
              grpc_connectivity_state_name(cur), notify);
    }
  }
  if (current == NULL) {
    grpc_connectivity_state_watcher *w = tracker->watchers;
    if (w != NULL && w->notify == notify) {
      grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
      tracker->watchers = w->next;
      gpr_free(w);
      return false;
    }
    while (w != NULL) {
      grpc_connectivity_state_watcher *rm_candidate = w->next;
      if (rm_candidate != NULL && rm_candidate->notify == notify) {
        grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
        w->next = w->next->next;
        gpr_free(rm_candidate);
        return false;
      }
      w = w->next;
    }
    return false;
  } else {
    if (cur != *current) {
      *current = cur;
      grpc_closure_sched(exec_ctx, notify,
                         GRPC_ERROR_REF(tracker->current_error));
    } else {
      grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
      w->current = current;
      w->notify = notify;
      w->next = tracker->watchers;
      tracker->watchers = w;
    }
    return cur == GRPC_CHANNEL_IDLE;
  }
}
コード例 #5
0
ファイル: resolve_address_uv.c プロジェクト: royalharsh/grpc
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
                                 const char *default_port,
                                 grpc_pollset_set *interested_parties,
                                 grpc_closure *on_done,
                                 grpc_resolved_addresses **addrs) {
  uv_getaddrinfo_t *req;
  request *r;
  struct addrinfo *hints;
  char *host;
  char *port;
  grpc_error *err;
  int s;
  err = try_split_host_port(name, default_port, &host, &port);
  if (err != GRPC_ERROR_NONE) {
    grpc_closure_sched(exec_ctx, on_done, err);
    return;
  }
  r = gpr_malloc(sizeof(request));
  r->on_done = on_done;
  r->addresses = addrs;
  r->host = host;
  r->port = port;
  req = gpr_malloc(sizeof(uv_getaddrinfo_t));
  req->data = r;

  /* Call getaddrinfo */
  hints = gpr_malloc(sizeof(struct addrinfo));
  memset(hints, 0, sizeof(struct addrinfo));
  hints->ai_family = AF_UNSPEC;     /* ipv4 or ipv6 */
  hints->ai_socktype = SOCK_STREAM; /* stream socket */
  hints->ai_flags = AI_PASSIVE;     /* for wildcard IP address */
  r->hints = hints;

  s = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_callback, host, port,
                     hints);

  if (s != 0) {
    *addrs = NULL;
    err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
    err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
                             grpc_slice_from_static_string(uv_strerror(s)));
    grpc_closure_sched(exec_ctx, on_done, err);
    gpr_free(r);
    gpr_free(req);
    gpr_free(hints);
    gpr_free(host);
    gpr_free(port);
  }
}
コード例 #6
0
ファイル: round_robin.c プロジェクト: Indifer/grpc
static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
  pending_pick *pp;
  size_t i;

  gpr_mu_lock(&p->mu);
  if (grpc_lb_round_robin_trace) {
    gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
  }

  p->shutdown = 1;
  while ((pp = p->pending_picks)) {
    p->pending_picks = pp->next;
    *pp->target = NULL;
    grpc_closure_sched(exec_ctx, pp->on_complete,
                       GRPC_ERROR_CREATE("Channel Shutdown"));
    gpr_free(pp);
  }
  grpc_connectivity_state_set(
      exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
      GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
  for (i = 0; i < p->num_subchannels; i++) {
    subchannel_data *sd = p->subchannels[i];
    grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
                                           &sd->connectivity_changed_closure);
  }
  gpr_mu_unlock(&p->mu);
}
コード例 #7
0
ファイル: frame_data.c プロジェクト: BattleDrive/grpc
grpc_error *grpc_chttp2_data_parser_parse(grpc_exec_ctx *exec_ctx, void *parser,
                                          grpc_chttp2_transport *t,
                                          grpc_chttp2_stream *s,
                                          grpc_slice slice, int is_last) {
  /* grpc_error *error = parse_inner_buffer(exec_ctx, p, t, s, slice); */
  s->stats.incoming.framing_bytes += GRPC_SLICE_LENGTH(slice);
  if (!s->pending_byte_stream) {
    grpc_slice_ref_internal(slice);
    grpc_slice_buffer_add(&s->frame_storage, slice);
    grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
  } else if (s->on_next) {
    GPR_ASSERT(s->frame_storage.length == 0);
    grpc_slice_ref_internal(slice);
    grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice);
    grpc_closure_sched(exec_ctx, s->on_next, GRPC_ERROR_NONE);
    s->on_next = NULL;
  } else {
    grpc_slice_ref_internal(slice);
    grpc_slice_buffer_add(&s->frame_storage, slice);
  }

  if (is_last && s->received_last_frame) {
    grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
                                   GRPC_ERROR_NONE);
  }

  return GRPC_ERROR_NONE;
}
コード例 #8
0
ファイル: tcp_client_uv.c プロジェクト: Indifer/grpc
static void uv_tc_on_connect(uv_connect_t *req, int status) {
  grpc_uv_tcp_connect *connect = req->data;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_error *error = GRPC_ERROR_NONE;
  int done;
  grpc_closure *closure = connect->closure;
  grpc_timer_cancel(&exec_ctx, &connect->alarm);
  if (status == 0) {
    *connect->endpoint = grpc_tcp_create(
        connect->tcp_handle, connect->resource_quota, connect->addr_name);
  } else {
    error = GRPC_ERROR_CREATE("Failed to connect to remote host");
    error = grpc_error_set_int(error, GRPC_ERROR_INT_ERRNO, -status);
    error =
        grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
    if (status == UV_ECANCELED) {
      error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
                                 "Timeout occurred");
      // This should only happen if the handle is already closed
    } else {
      error = grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR,
                                 uv_strerror(status));
      uv_close((uv_handle_t *)connect->tcp_handle, tcp_close_callback);
    }
  }
  done = (--connect->refs == 0);
  if (done) {
    uv_tcp_connect_cleanup(&exec_ctx, connect);
  }
  grpc_closure_sched(&exec_ctx, closure, error);
  grpc_exec_ctx_finish(&exec_ctx);
}
コード例 #9
0
ファイル: round_robin.c プロジェクト: Indifer/grpc
static void rr_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                            uint32_t initial_metadata_flags_mask,
                            uint32_t initial_metadata_flags_eq,
                            grpc_error *error) {
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
  pending_pick *pp;
  gpr_mu_lock(&p->mu);
  pp = p->pending_picks;
  p->pending_picks = NULL;
  while (pp != NULL) {
    pending_pick *next = pp->next;
    if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
        initial_metadata_flags_eq) {
      *pp->target = NULL;
      grpc_closure_sched(
          exec_ctx, pp->on_complete,
          GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
      gpr_free(pp);
    } else {
      pp->next = p->pending_picks;
      p->pending_picks = pp;
    }
    pp = next;
  }
  gpr_mu_unlock(&p->mu);
  GRPC_ERROR_UNREF(error);
}
コード例 #10
0
ファイル: chttp2_connector.c プロジェクト: BattleDrive/grpc
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
  chttp2_connector *c = arg;
  gpr_mu_lock(&c->mu);
  GPR_ASSERT(c->connecting);
  c->connecting = false;
  if (error != GRPC_ERROR_NONE || c->shutdown) {
    if (error == GRPC_ERROR_NONE) {
      error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("connector shutdown");
    } else {
      error = GRPC_ERROR_REF(error);
    }
    memset(c->result, 0, sizeof(*c->result));
    grpc_closure *notify = c->notify;
    c->notify = NULL;
    grpc_closure_sched(exec_ctx, notify, error);
    if (c->endpoint != NULL) {
      grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
    }
    gpr_mu_unlock(&c->mu);
    chttp2_connector_unref(exec_ctx, arg);
  } else {
    GPR_ASSERT(c->endpoint != NULL);
    start_handshake_locked(exec_ctx, c);
    gpr_mu_unlock(&c->mu);
  }
}
コード例 #11
0
ファイル: tcp_windows.c プロジェクト: Indifer/grpc
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
  grpc_tcp *tcp = tcpp;
  grpc_closure *cb = tcp->read_cb;
  grpc_winsocket *socket = tcp->socket;
  grpc_slice sub;
  grpc_winsocket_callback_info *info = &socket->read_info;

  GRPC_ERROR_REF(error);

  if (error == GRPC_ERROR_NONE) {
    if (info->wsa_error != 0 && !tcp->shutting_down) {
      char *utf8_message = gpr_format_message(info->wsa_error);
      error = GRPC_ERROR_CREATE(utf8_message);
      gpr_free(utf8_message);
      grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
    } else {
      if (info->bytes_transfered != 0 && !tcp->shutting_down) {
        sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
        grpc_slice_buffer_add(tcp->read_slices, sub);
      } else {
        grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
        error = GRPC_ERROR_CREATE("End of TCP stream");
      }
    }
  }

  tcp->read_cb = NULL;
  TCP_UNREF(exec_ctx, tcp, "read");
  grpc_closure_sched(exec_ctx, cb, error);
}
コード例 #12
0
ファイル: round_robin.c プロジェクト: Indifer/grpc
static void rr_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                           grpc_connected_subchannel **target,
                           grpc_error *error) {
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
  pending_pick *pp;
  gpr_mu_lock(&p->mu);
  pp = p->pending_picks;
  p->pending_picks = NULL;
  while (pp != NULL) {
    pending_pick *next = pp->next;
    if (pp->target == target) {
      *target = NULL;
      grpc_closure_sched(
          exec_ctx, pp->on_complete,
          GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
      gpr_free(pp);
    } else {
      pp->next = p->pending_picks;
      p->pending_picks = pp;
    }
    pp = next;
  }
  gpr_mu_unlock(&p->mu);
  GRPC_ERROR_UNREF(error);
}
コード例 #13
0
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
                                    http_connect_handshaker* handshaker,
                                    grpc_error* error) {
  if (error == GRPC_ERROR_NONE) {
    // If we were shut down after an endpoint operation succeeded but
    // before the endpoint callback was invoked, we need to generate our
    // own error.
    error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshaker shutdown");
  }
  if (!handshaker->shutdown) {
    // TODO(ctiller): It is currently necessary to shutdown endpoints
    // before destroying them, even if we know that there are no
    // pending read/write callbacks.  This should be fixed, at which
    // point this can be removed.
    grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint,
                           GRPC_ERROR_REF(error));
    // Not shutting down, so the handshake failed.  Clean up before
    // invoking the callback.
    cleanup_args_for_failure_locked(exec_ctx, handshaker);
    // Set shutdown to true so that subsequent calls to
    // http_connect_handshaker_shutdown() do nothing.
    handshaker->shutdown = true;
  }
  // Invoke callback.
  grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
}
コード例 #14
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
static void rq_step_sched(grpc_exec_ctx *exec_ctx,
                          grpc_resource_quota *resource_quota) {
  if (resource_quota->step_scheduled) return;
  resource_quota->step_scheduled = true;
  grpc_resource_quota_ref_internal(resource_quota);
  grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure,
                     GRPC_ERROR_NONE);
}
コード例 #15
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
  grpc_resource_user *resource_user = ru;
  GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
  for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
    rulist_remove(resource_user, (grpc_rulist)i);
  }
  grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
                     GRPC_ERROR_CANCELLED);
  grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
                     GRPC_ERROR_CANCELLED);
  if (resource_user->free_pool != 0) {
    resource_user->resource_quota->free_pool += resource_user->free_pool;
    rq_step_sched(exec_ctx, resource_user->resource_quota);
  }
  grpc_resource_quota_unref_internal(exec_ctx, resource_user->resource_quota);
  gpr_mu_destroy(&resource_user->mu);
  gpr_free(resource_user->name);
  gpr_free(resource_user);
}
コード例 #16
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
/* Public API */
void grpc_resource_quota_resize(grpc_resource_quota *resource_quota,
                                size_t size) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  rq_resize_args *a = gpr_malloc(sizeof(*a));
  a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
  a->size = (int64_t)size;
  grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
  grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
  grpc_exec_ctx_finish(&exec_ctx);
}
コード例 #17
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
static void ru_unref_by(grpc_exec_ctx *exec_ctx,
                        grpc_resource_user *resource_user, gpr_atm amount) {
  GPR_ASSERT(amount > 0);
  gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
  GPR_ASSERT(old >= amount);
  if (old == amount) {
    grpc_closure_sched(exec_ctx, &resource_user->destroy_closure,
                       GRPC_ERROR_NONE);
  }
}
コード例 #18
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
                                           grpc_resource_user *resource_user) {
  if (grpc_resource_quota_trace) {
    gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
            resource_user->resource_quota->name, resource_user->name);
  }
  grpc_closure_sched(
      exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
      GRPC_ERROR_NONE);
}
コード例 #19
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
                                       grpc_resource_user *resource_user,
                                       bool destructive,
                                       grpc_closure *closure) {
  GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
  resource_user->new_reclaimers[destructive] = closure;
  grpc_closure_sched(exec_ctx,
                     &resource_user->post_reclaimer_closure[destructive],
                     GRPC_ERROR_NONE);
}
コード例 #20
0
ファイル: resolve_address_posix.c プロジェクト: baylabs/grpc
/* Callback to be passed to grpc_executor to asynch-ify
 * grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
                              grpc_error *error) {
  request *r = rp;
  grpc_closure_sched(
      exec_ctx, r->on_done,
      grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
  gpr_free(r->name);
  gpr_free(r->default_port);
  gpr_free(r);
}
コード例 #21
0
ファイル: fake_resolver.c プロジェクト: yugui/grpc
static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
                                                   fake_resolver* r) {
  if (r->next_completion != NULL && !r->published) {
    r->published = true;
    grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
    *r->target_result =
        grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
    r->next_completion = NULL;
  }
}
コード例 #22
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
                                 grpc_resource_user *resource_user) {
  if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
    grpc_closure_sched(exec_ctx,
                       grpc_closure_create(
                           ru_shutdown, resource_user,
                           grpc_combiner_scheduler(
                               resource_user->resource_quota->combiner, false)),
                       GRPC_ERROR_NONE);
  }
}
コード例 #23
0
ファイル: filter_causes_close.c プロジェクト: royalharsh/grpc
static void recv_im_ready(grpc_exec_ctx *exec_ctx, void *arg,
                          grpc_error *error) {
  grpc_call_element *elem = arg;
  call_data *calld = elem->call_data;
  grpc_closure_sched(
      exec_ctx, calld->recv_im_ready,
      grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                             "Failure that's not preventable.", &error, 1),
                         GRPC_ERROR_INT_GRPC_STATUS,
                         GRPC_STATUS_PERMISSION_DENIED));
}
コード例 #24
0
ファイル: fake_resolver.c プロジェクト: yugui/grpc
static void fake_resolver_shutdown(grpc_exec_ctx* exec_ctx,
                                   grpc_resolver* resolver) {
  fake_resolver* r = (fake_resolver*)resolver;
  gpr_mu_lock(&r->mu);
  if (r->next_completion != NULL) {
    *r->target_result = NULL;
    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
    r->next_completion = NULL;
  }
  gpr_mu_unlock(&r->mu);
}
コード例 #25
0
ファイル: socket_windows.c プロジェクト: Indifer/grpc
/* Calling notify_on_read or write means either of two things:
-) The IOCP already completed in the background, and we need to call
the callback now.
-) The IOCP hasn't completed yet, and we're queuing it for later. */
static void socket_notify_on_iocp(grpc_exec_ctx *exec_ctx,
                                  grpc_winsocket *socket, grpc_closure *closure,
                                  grpc_winsocket_callback_info *info) {
  GPR_ASSERT(info->closure == NULL);
  gpr_mu_lock(&socket->state_mu);
  if (info->has_pending_iocp) {
    info->has_pending_iocp = 0;
    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
  } else {
    info->closure = closure;
  }
  gpr_mu_unlock(&socket->state_mu);
}
コード例 #26
0
ファイル: resolve_address_uv.c プロジェクト: Indifer/grpc
static void getaddrinfo_callback(uv_getaddrinfo_t *req, int status,
                                 struct addrinfo *res) {
  request *r = (request *)req->data;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_error *error;
  error = handle_addrinfo_result(status, res, r->addresses);
  grpc_closure_sched(&exec_ctx, r->on_done, error);
  grpc_exec_ctx_finish(&exec_ctx);

  gpr_free(r->hints);
  gpr_free(r);
  gpr_free(req);
  uv_freeaddrinfo(res);
}
コード例 #27
0
ファイル: resource_quota.c プロジェクト: yugui/grpc
static bool ru_post_reclaimer(grpc_exec_ctx *exec_ctx,
                              grpc_resource_user *resource_user,
                              bool destructive) {
  grpc_closure *closure = resource_user->new_reclaimers[destructive];
  GPR_ASSERT(closure != NULL);
  resource_user->new_reclaimers[destructive] = NULL;
  GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
  if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED);
    return false;
  }
  resource_user->reclaimers[destructive] = closure;
  return true;
}
コード例 #28
0
ファイル: socket_windows.c プロジェクト: Indifer/grpc
void grpc_socket_become_ready(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
                              grpc_winsocket_callback_info *info) {
  GPR_ASSERT(!info->has_pending_iocp);
  gpr_mu_lock(&socket->state_mu);
  if (info->closure) {
    grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE);
    info->closure = NULL;
  } else {
    info->has_pending_iocp = 1;
  }
  bool should_destroy = check_destroyable(socket);
  gpr_mu_unlock(&socket->state_mu);
  if (should_destroy) destroy(socket);
}
コード例 #29
0
ファイル: resolve_address_posix.c プロジェクト: baylabs/grpc
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
                                 const char *default_port,
                                 grpc_pollset_set *interested_parties,
                                 grpc_closure *on_done,
                                 grpc_resolved_addresses **addrs) {
  request *r = gpr_malloc(sizeof(request));
  grpc_closure_init(&r->request_closure, do_request_thread, r,
                    grpc_executor_scheduler);
  r->name = gpr_strdup(name);
  r->default_port = gpr_strdup(default_port);
  r->on_done = on_done;
  r->addrs_out = addrs;
  grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
コード例 #30
0
ファイル: tcp_server_uv.c プロジェクト: baylabs/grpc
static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
  if (s->shutdown_complete != NULL) {
    grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
  }

  while (s->head) {
    grpc_tcp_listener *sp = s->head;
    s->head = sp->next;
    sp->next = NULL;
    gpr_free(sp->handle);
    gpr_free(sp);
  }
  grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
  gpr_free(s);
}