예제 #1
0
static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
                                  grpc_channel_element *elem,
                                  grpc_transport_op *op) {
  channel_data *chand = elem->channel_data;

  grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);

  GPR_ASSERT(op->set_accept_stream == false);
  if (op->bind_pollset != NULL) {
    grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
                                 op->bind_pollset);
  }

  gpr_mu_lock(&chand->mu);
  if (op->on_connectivity_state_change != NULL) {
    grpc_connectivity_state_notify_on_state_change(
        exec_ctx, &chand->state_tracker, op->connectivity_state,
        op->on_connectivity_state_change);
    op->on_connectivity_state_change = NULL;
    op->connectivity_state = NULL;
  }

  if (op->send_ping != NULL) {
    if (chand->lb_policy == NULL) {
      grpc_exec_ctx_sched(exec_ctx, op->send_ping,
                          GRPC_ERROR_CREATE("Ping with no load balancing"),
                          NULL);
    } else {
      grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
      op->bind_pollset = NULL;
    }
    op->send_ping = NULL;
  }

  if (op->disconnect_with_error != GRPC_ERROR_NONE) {
    if (chand->resolver != NULL) {
      set_channel_connectivity_state_locked(
          exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
          GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
      grpc_resolver_shutdown(exec_ctx, chand->resolver);
      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
      chand->resolver = NULL;
      if (!chand->started_resolving) {
        grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
                                   GRPC_ERROR_REF(op->disconnect_with_error));
        grpc_exec_ctx_enqueue_list(exec_ctx,
                                   &chand->waiting_for_config_closures, NULL);
      }
      if (chand->lb_policy != NULL) {
        grpc_pollset_set_del_pollset_set(exec_ctx,
                                         chand->lb_policy->interested_parties,
                                         chand->interested_parties);
        GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
        chand->lb_policy = NULL;
      }
    }
    GRPC_ERROR_UNREF(op->disconnect_with_error);
  }
  gpr_mu_unlock(&chand->mu);
}
예제 #2
0
static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
  grpc_resource_user *resource_user = ru;
  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
                      GRPC_ERROR_CANCELLED, NULL);
  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
                      GRPC_ERROR_CANCELLED, NULL);
  resource_user->reclaimers[0] = NULL;
  resource_user->reclaimers[1] = NULL;
}
예제 #3
0
static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     gpr_slice_buffer *read_slices, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_winsocket *handle = tcp->socket;
  grpc_winsocket_callback_info *info = &handle->read_info;
  int status;
  DWORD bytes_read = 0;
  DWORD flags = 0;
  WSABUF buffer;

  if (tcp->shutting_down) {
    grpc_exec_ctx_sched(exec_ctx, cb,
                        GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
    return;
  }

  tcp->read_cb = cb;
  tcp->read_slices = read_slices;
  gpr_slice_buffer_reset_and_unref(read_slices);

  tcp->read_slice = gpr_slice_malloc(8192);

  buffer.len = (ULONG)GPR_SLICE_LENGTH(
      tcp->read_slice);  // we know slice size fits in 32bit.
  buffer.buf = (char *)GPR_SLICE_START_PTR(tcp->read_slice);

  TCP_REF(tcp, "read");

  /* First let's try a synchronous, non-blocking read. */
  status =
      WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags, NULL, NULL);
  info->wsa_error = status == 0 ? 0 : WSAGetLastError();

  /* Did we get data immediately ? Yay. */
  if (info->wsa_error != WSAEWOULDBLOCK) {
    info->bytes_transfered = bytes_read;
    grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL);
    return;
  }

  /* Otherwise, let's retry, by queuing a read. */
  memset(&tcp->socket->read_info.overlapped, 0, sizeof(OVERLAPPED));
  status = WSARecv(tcp->socket->socket, &buffer, 1, &bytes_read, &flags,
                   &info->overlapped, NULL);

  if (status != 0) {
    int wsa_error = WSAGetLastError();
    if (wsa_error != WSA_IO_PENDING) {
      info->wsa_error = wsa_error;
      grpc_exec_ctx_sched(exec_ctx, &tcp->on_read,
                          GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL);
      return;
    }
  }

  grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
}
예제 #4
0
파일: tcp_posix.c 프로젝트: izouxv/grpc
static void tcp_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                      grpc_slice_buffer *buf, grpc_closure *cb) {
  grpc_tcp *tcp = (grpc_tcp *)ep;
  grpc_error *error = GRPC_ERROR_NONE;

  if (grpc_tcp_trace) {
    size_t i;

    for (i = 0; i < buf->count; i++) {
      char *data =
          grpc_dump_slice(buf->slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p (peer=%s): %s", tcp, tcp->peer_string, data);
      gpr_free(data);
    }
  }

  GPR_TIMER_BEGIN("tcp_write", 0);
  GPR_ASSERT(tcp->write_cb == NULL);

  if (buf->length == 0) {
    GPR_TIMER_END("tcp_write", 0);
    grpc_exec_ctx_sched(exec_ctx, cb,
                        grpc_fd_is_shutdown(tcp->em_fd)
                            ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
                            : GRPC_ERROR_NONE,
                        NULL);
    return;
  }
  tcp->outgoing_buffer = buf;
  tcp->outgoing_slice_idx = 0;
  tcp->outgoing_byte_idx = 0;

  if (!tcp_flush(tcp, &error)) {
    TCP_REF(tcp, "write");
    tcp->write_cb = cb;
    if (grpc_tcp_trace) {
      gpr_log(GPR_DEBUG, "write: delayed");
    }
    grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_closure);
  } else {
    if (grpc_tcp_trace) {
      const char *str = grpc_error_string(error);
      gpr_log(GPR_DEBUG, "write: %s", str);
      grpc_error_free_string(str);
    }
    grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
  }

  GPR_TIMER_END("tcp_write", 0);
}
예제 #5
0
static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg,
                             grpc_error *error) {
  continue_picking_args *cpa = arg;
  if (cpa->connected_subchannel == NULL) {
    /* cancelled, do nothing */
  } else if (error != GRPC_ERROR_NONE) {
    grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
  } else if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
                             cpa->initial_metadata_flags,
                             cpa->connected_subchannel, cpa->on_ready)) {
    grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
  }
  gpr_free(cpa);
}
예제 #6
0
파일: grpclb.c 프로젝트: andrewpollock/grpc
/* The \a on_complete closure passed as part of the pick requires keeping a
 * reference to its associated round robin instance. We wrap this closure in
 * order to unref the round robin instance upon its invocation */
static void wrapped_rr_closure(grpc_exec_ctx *exec_ctx, void *arg,
                               grpc_error *error) {
  wrapped_rr_closure_arg *wc_arg = arg;
  if (wc_arg->rr_policy != NULL) {
    if (grpc_lb_glb_trace) {
      gpr_log(GPR_INFO, "Unreffing RR (0x%" PRIxPTR ")",
              (intptr_t)wc_arg->rr_policy);
    }
    GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");

    /* if target is NULL, no pick has been made by the RR policy (eg, all
     * addresses failed to connect). There won't be any user_data/token
     * available */
    if (wc_arg->target != NULL) {
      initial_metadata_add_lb_token(wc_arg->initial_metadata,
                                    wc_arg->lb_token_mdelem_storage,
                                    GRPC_MDELEM_REF(wc_arg->lb_token));
    }
  }
  GPR_ASSERT(wc_arg->wrapped_closure != NULL);
  grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
                      NULL);
  GPR_ASSERT(wc_arg->free_when_done != NULL);
  gpr_free(wc_arg->free_when_done);
}
예제 #7
0
static void me_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                    grpc_slice_buffer *slices, grpc_closure *cb) {
    half *m = (half *)ep;
    gpr_mu_lock(&m->parent->mu);
    if (m->parent->shutdown) {
        grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown"),
                            NULL);
    } else if (m->read_buffer.count > 0) {
        grpc_slice_buffer_swap(&m->read_buffer, slices);
        grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
    } else {
        m->on_read = cb;
        m->on_read_out = slices;
    }
    gpr_mu_unlock(&m->parent->mu);
}
예제 #8
0
void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
                              grpc_resource_user *resource_user, size_t size,
                              grpc_closure *optional_on_done) {
  gpr_mu_lock(&resource_user->mu);
  ru_ref_by(resource_user, (gpr_atm)size);
  resource_user->free_pool -= (int64_t)size;
  if (grpc_resource_quota_trace) {
    gpr_log(GPR_DEBUG, "RQ %s %s: alloc %" PRIdPTR "; free_pool -> %" PRId64,
            resource_user->resource_quota->name, resource_user->name, size,
            resource_user->free_pool);
  }
  if (resource_user->free_pool < 0) {
    grpc_closure_list_append(&resource_user->on_allocated, optional_on_done,
                             GRPC_ERROR_NONE);
    if (!resource_user->allocating) {
      resource_user->allocating = true;
      grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
                            &resource_user->allocate_closure, GRPC_ERROR_NONE,
                            false);
    }
  } else {
    grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
  }
  gpr_mu_unlock(&resource_user->mu);
}
예제 #9
0
static void pf_cancel_picks(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                            uint32_t initial_metadata_flags_mask,
                            uint32_t initial_metadata_flags_eq,
                            grpc_error *error) {
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
  pending_pick *pp;
  gpr_mu_lock(&p->mu);
  pp = p->pending_picks;
  p->pending_picks = NULL;
  while (pp != NULL) {
    pending_pick *next = pp->next;
    if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
        initial_metadata_flags_eq) {
      grpc_exec_ctx_sched(
          exec_ctx, pp->on_complete,
          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
      gpr_free(pp);
    } else {
      pp->next = p->pending_picks;
      p->pending_picks = pp;
    }
    pp = next;
  }
  gpr_mu_unlock(&p->mu);
  GRPC_ERROR_UNREF(error);
}
예제 #10
0
파일: server.c 프로젝트: PiotrSikora/grpc
static void finish_start_new_rpc(
    grpc_exec_ctx *exec_ctx, grpc_server *server, grpc_call_element *elem,
    request_matcher *rm,
    grpc_server_register_method_payload_handling payload_handling) {
  call_data *calld = elem->call_data;

  if (gpr_atm_acq_load(&server->shutdown_flag)) {
    gpr_mu_lock(&calld->mu_state);
    calld->state = ZOMBIED;
    gpr_mu_unlock(&calld->mu_state);
    grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
    grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
                        NULL);
    return;
  }

  calld->request_matcher = rm;

  switch (payload_handling) {
    case GRPC_SRM_PAYLOAD_NONE:
      publish_new_rpc(exec_ctx, elem, GRPC_ERROR_NONE);
      break;
    case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
      grpc_op op;
      memset(&op, 0, sizeof(op));
      op.op = GRPC_OP_RECV_MESSAGE;
      op.data.recv_message = &calld->payload;
      grpc_closure_init(&calld->publish, publish_new_rpc, elem);
      grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
                                        &calld->publish);
      break;
    }
  }
}
예제 #11
0
파일: server.c 프로젝트: PiotrSikora/grpc
static grpc_call_error queue_call_request(grpc_exec_ctx *exec_ctx,
                                          grpc_server *server, size_t cq_idx,
                                          requested_call *rc) {
  call_data *calld = NULL;
  request_matcher *rm = NULL;
  int request_id;
  if (gpr_atm_acq_load(&server->shutdown_flag)) {
    fail_call(exec_ctx, server, cq_idx, rc,
              GRPC_ERROR_CREATE("Server Shutdown"));
    return GRPC_CALL_OK;
  }
  request_id = gpr_stack_lockfree_pop(server->request_freelist_per_cq[cq_idx]);
  if (request_id == -1) {
    /* out of request ids: just fail this one */
    fail_call(exec_ctx, server, cq_idx, rc,
              grpc_error_set_int(GRPC_ERROR_CREATE("Out of request ids"),
                                 GRPC_ERROR_INT_LIMIT,
                                 server->max_requested_calls_per_cq));
    return GRPC_CALL_OK;
  }
  switch (rc->type) {
    case BATCH_CALL:
      rm = &server->unregistered_request_matcher;
      break;
    case REGISTERED_CALL:
      rm = &rc->data.registered.registered_method->request_matcher;
      break;
  }
  server->requested_calls_per_cq[cq_idx][request_id] = *rc;
  gpr_free(rc);
  if (gpr_stack_lockfree_push(rm->requests_per_cq[cq_idx], request_id)) {
    /* this was the first queued request: we need to lock and start
       matching calls */
    gpr_mu_lock(&server->mu_call);
    while ((calld = rm->pending_head) != NULL) {
      request_id = gpr_stack_lockfree_pop(rm->requests_per_cq[cq_idx]);
      if (request_id == -1) break;
      rm->pending_head = calld->pending_next;
      gpr_mu_unlock(&server->mu_call);
      gpr_mu_lock(&calld->mu_state);
      if (calld->state == ZOMBIED) {
        gpr_mu_unlock(&calld->mu_state);
        grpc_closure_init(
            &calld->kill_zombie_closure, kill_zombie,
            grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
        grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
                            GRPC_ERROR_NONE, NULL);
      } else {
        GPR_ASSERT(calld->state == PENDING);
        calld->state = ACTIVATED;
        gpr_mu_unlock(&calld->mu_state);
        publish_call(exec_ctx, server, calld, cq_idx,
                     &server->requested_calls_per_cq[cq_idx][request_id]);
      }
      gpr_mu_lock(&server->mu_call);
    }
    gpr_mu_unlock(&server->mu_call);
  }
  return GRPC_CALL_OK;
}
예제 #12
0
static void on_secure_handshake_done(grpc_exec_ctx *exec_ctx, void *arg,
                                     grpc_security_status status,
                                     grpc_endpoint *secure_endpoint,
                                     grpc_auth_context *auth_context) {
    connector *c = arg;
    gpr_mu_lock(&c->mu);
    grpc_error *error = GRPC_ERROR_NONE;
    if (c->connecting_endpoint == NULL) {
        memset(c->result, 0, sizeof(*c->result));
        gpr_mu_unlock(&c->mu);
    } else if (status != GRPC_SECURITY_OK) {
        error = grpc_error_set_int(GRPC_ERROR_CREATE("Secure handshake failed"),
                                   GRPC_ERROR_INT_SECURITY_STATUS, status);
        memset(c->result, 0, sizeof(*c->result));
        c->connecting_endpoint = NULL;
        gpr_mu_unlock(&c->mu);
    } else {
        grpc_arg auth_context_arg;
        c->connecting_endpoint = NULL;
        gpr_mu_unlock(&c->mu);
        c->result->transport = grpc_create_chttp2_transport(
                                   exec_ctx, c->args.channel_args, secure_endpoint, 1);
        grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport, NULL);
        auth_context_arg = grpc_auth_context_to_arg(auth_context);
        c->result->channel_args =
            grpc_channel_args_copy_and_add(c->tmp_args, &auth_context_arg, 1);
    }
    grpc_closure *notify = c->notify;
    c->notify = NULL;
    grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
}
예제 #13
0
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
    connector *c = arg;
    grpc_endpoint *tcp = c->newly_connecting_endpoint;
    if (tcp != NULL) {
        gpr_mu_lock(&c->mu);
        GPR_ASSERT(c->connecting_endpoint == NULL);
        c->connecting_endpoint = tcp;
        gpr_mu_unlock(&c->mu);
        if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
            grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
                              c);
            grpc_slice_buffer_init(&c->initial_string_buffer);
            grpc_slice_buffer_add(&c->initial_string_buffer,
                                  c->args.initial_connect_string);
            grpc_endpoint_write(exec_ctx, tcp, &c->initial_string_buffer,
                                &c->initial_string_sent);
        } else {
            grpc_handshake_manager_do_handshake(
                exec_ctx, c->handshake_mgr, tcp, c->args.channel_args,
                c->args.deadline, NULL /* acceptor */, on_handshake_done, c);
        }
    } else {
        memset(c->result, 0, sizeof(*c->result));
        grpc_closure *notify = c->notify;
        c->notify = NULL;
        grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_REF(error), NULL);
    }
}
예제 #14
0
파일: round_robin.c 프로젝트: pquerna/grpc
static void rr_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
  round_robin_lb_policy *p = (round_robin_lb_policy *)pol;
  pending_pick *pp;
  size_t i;

  gpr_mu_lock(&p->mu);
  if (grpc_lb_round_robin_trace) {
    gpr_log(GPR_DEBUG, "Shutting down Round Robin policy at %p", (void *)pol);
  }

  p->shutdown = 1;
  while ((pp = p->pending_picks)) {
    p->pending_picks = pp->next;
    *pp->target = NULL;
    grpc_exec_ctx_sched(exec_ctx, pp->on_complete,
                        GRPC_ERROR_CREATE("Channel Shutdown"), NULL);
    gpr_free(pp);
  }
  grpc_connectivity_state_set(
      exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
      GRPC_ERROR_CREATE("Channel Shutdown"), "rr_shutdown");
  for (i = 0; i < p->num_subchannels; i++) {
    subchannel_data *sd = p->subchannels[i];
    grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, NULL, NULL,
                                           &sd->connectivity_changed_closure);
  }
  gpr_mu_unlock(&p->mu);
}
예제 #15
0
static void connected(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
  chttp2_connector *c = arg;
  gpr_mu_lock(&c->mu);
  GPR_ASSERT(c->connecting);
  c->connecting = false;
  if (error != GRPC_ERROR_NONE || c->shutdown) {
    if (error == GRPC_ERROR_NONE) {
      error = GRPC_ERROR_CREATE("connector shutdown");
    } else {
      error = GRPC_ERROR_REF(error);
    }
    memset(c->result, 0, sizeof(*c->result));
    grpc_closure *notify = c->notify;
    c->notify = NULL;
    grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
    if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint);
    gpr_mu_unlock(&c->mu);
    chttp2_connector_unref(exec_ctx, arg);
  } else {
    GPR_ASSERT(c->endpoint != NULL);
    if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
      grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
                        c);
      grpc_slice_buffer_init(&c->initial_string_buffer);
      grpc_slice_buffer_add(&c->initial_string_buffer,
                            c->args.initial_connect_string);
      grpc_endpoint_write(exec_ctx, c->endpoint, &c->initial_string_buffer,
                          &c->initial_string_sent);
    } else {
      start_handshake_locked(exec_ctx, c);
    }
    gpr_mu_unlock(&c->mu);
  }
}
예제 #16
0
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
                                    http_connect_handshaker* handshaker,
                                    grpc_error* error) {
  if (error == GRPC_ERROR_NONE) {
    // If we were shut down after an endpoint operation succeeded but
    // before the endpoint callback was invoked, we need to generate our
    // own error.
    error = GRPC_ERROR_CREATE("Handshaker shutdown");
  }
  if (!handshaker->shutdown) {
    // TODO(ctiller): It is currently necessary to shutdown endpoints
    // before destroying them, even if we know that there are no
    // pending read/write callbacks.  This should be fixed, at which
    // point this can be removed.
    grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint);
    // Not shutting down, so the handshake failed.  Clean up before
    // invoking the callback.
    cleanup_args_for_failure_locked(handshaker);
    // Set shutdown to true so that subsequent calls to
    // http_connect_handshaker_shutdown() do nothing.
    handshaker->shutdown = true;
  }
  // Invoke callback.
  grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
}
예제 #17
0
/* Asynchronous callback from the IOCP, or the background thread. */
static void on_read(grpc_exec_ctx *exec_ctx, void *tcpp, grpc_error *error) {
  grpc_tcp *tcp = tcpp;
  grpc_closure *cb = tcp->read_cb;
  grpc_winsocket *socket = tcp->socket;
  gpr_slice sub;
  grpc_winsocket_callback_info *info = &socket->read_info;

  GRPC_ERROR_REF(error);

  if (error == GRPC_ERROR_NONE) {
    if (info->wsa_error != 0 && !tcp->shutting_down) {
      char *utf8_message = gpr_format_message(info->wsa_error);
      error = GRPC_ERROR_CREATE(utf8_message);
      gpr_free(utf8_message);
      gpr_slice_unref(tcp->read_slice);
    } else {
      if (info->bytes_transfered != 0 && !tcp->shutting_down) {
        sub = gpr_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
        gpr_slice_buffer_add(tcp->read_slices, sub);
      } else {
        gpr_slice_unref(tcp->read_slice);
        error = GRPC_ERROR_CREATE("End of TCP stream");
      }
    }
  }

  tcp->read_cb = NULL;
  TCP_UNREF(tcp, "read");
  grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
}
예제 #18
0
static void pf_cancel_pick(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                           grpc_connected_subchannel **target,
                           grpc_error *error) {
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
  pending_pick *pp;
  gpr_mu_lock(&p->mu);
  pp = p->pending_picks;
  p->pending_picks = NULL;
  while (pp != NULL) {
    pending_pick *next = pp->next;
    if (pp->target == target) {
      *target = NULL;
      grpc_exec_ctx_sched(
          exec_ctx, pp->on_complete,
          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
      gpr_free(pp);
    } else {
      pp->next = p->pending_picks;
      p->pending_picks = pp;
    }
    pp = next;
  }
  gpr_mu_unlock(&p->mu);
  GRPC_ERROR_UNREF(error);
}
예제 #19
0
void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
                              grpc_call_element_args* args) {
  grpc_deadline_state* deadline_state = elem->call_data;
  memset(deadline_state, 0, sizeof(*deadline_state));
  deadline_state->call_stack = args->call_stack;
  gpr_mu_init(&deadline_state->timer_mu);
  // Deadline will always be infinite on servers, so the timer will only be
  // set on clients with a finite deadline.
  const gpr_timespec deadline =
      gpr_convert_clock_type(args->deadline, GPR_CLOCK_MONOTONIC);
  if (gpr_time_cmp(deadline, gpr_inf_future(GPR_CLOCK_MONOTONIC)) != 0) {
    // When the deadline passes, we indicate the failure by sending down
    // an op with cancel_error set.  However, we can't send down any ops
    // until after the call stack is fully initialized.  If we start the
    // timer here, we have no guarantee that the timer won't pop before
    // call stack initialization is finished.  To avoid that problem, we
    // create a closure to start the timer, and we schedule that closure
    // to be run after call stack initialization is done.
    struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
    state->elem = elem;
    state->deadline = deadline;
    grpc_closure_init(&state->closure, start_timer_after_init, state);
    grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL);
  }
}
예제 #20
0
static void pf_shutdown(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol) {
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
  pending_pick *pp;
  grpc_connected_subchannel *selected;
  gpr_mu_lock(&p->mu);
  selected = GET_SELECTED(p);
  p->shutdown = 1;
  pp = p->pending_picks;
  p->pending_picks = NULL;
  grpc_connectivity_state_set(
      exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
      GRPC_ERROR_CREATE("Channel shutdown"), "shutdown");
  /* cancel subscription */
  if (selected != NULL) {
    grpc_connected_subchannel_notify_on_state_change(
        exec_ctx, selected, NULL, NULL, &p->connectivity_changed);
  } else if (p->num_subchannels > 0) {
    grpc_subchannel_notify_on_state_change(
        exec_ctx, p->subchannels[p->checking_subchannel], NULL, NULL,
        &p->connectivity_changed);
  }
  gpr_mu_unlock(&p->mu);
  while (pp != NULL) {
    pending_pick *next = pp->next;
    *pp->target = NULL;
    grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
    gpr_free(pp);
    pp = next;
  }
}
예제 #21
0
// If the handshake failed or we're shutting down, clean up and invoke the
// callback with the error.
static void security_handshake_failed_locked(grpc_exec_ctx *exec_ctx,
                                             security_handshaker *h,
                                             grpc_error *error) {
  if (error == GRPC_ERROR_NONE) {
    // If we were shut down after the handshake succeeded but before an
    // endpoint callback was invoked, we need to generate our own error.
    error = GRPC_ERROR_CREATE("Handshaker shutdown");
  }
  const char *msg = grpc_error_string(error);
  gpr_log(GPR_DEBUG, "Security handshake failed: %s", msg);
  grpc_error_free_string(msg);
  if (!h->shutdown) {
    // TODO(ctiller): It is currently necessary to shutdown endpoints
    // before destroying them, even if we know that there are no
    // pending read/write callbacks.  This should be fixed, at which
    // point this can be removed.
    grpc_endpoint_shutdown(exec_ctx, h->args->endpoint);
    // Not shutting down, so the write failed.  Clean up before
    // invoking the callback.
    cleanup_args_for_failure_locked(h);
    // Set shutdown to true so that subsequent calls to
    // security_handshaker_shutdown() do nothing.
    h->shutdown = true;
  }
  // Invoke callback.
  grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL);
}
예제 #22
0
int grpc_connectivity_state_notify_on_state_change(
    grpc_exec_ctx *exec_ctx, grpc_connectivity_state_tracker *tracker,
    grpc_connectivity_state *current, grpc_closure *notify) {
  if (grpc_connectivity_state_trace) {
    if (current == NULL) {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: unsubscribe notify=%p", tracker,
              tracker->name, notify);
    } else {
      gpr_log(GPR_DEBUG, "CONWATCH: %p %s: from %s [cur=%s] notify=%p", tracker,
              tracker->name, grpc_connectivity_state_name(*current),
              grpc_connectivity_state_name(tracker->current_state), notify);
    }
  }
  if (current == NULL) {
    grpc_connectivity_state_watcher *w = tracker->watchers;
    if (w != NULL && w->notify == notify) {
      grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
      tracker->watchers = w->next;
      gpr_free(w);
      return 0;
    }
    while (w != NULL) {
      grpc_connectivity_state_watcher *rm_candidate = w->next;
      if (rm_candidate != NULL && rm_candidate->notify == notify) {
        grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
        w->next = w->next->next;
        gpr_free(rm_candidate);
        return 0;
      }
      w = w->next;
    }
    return 0;
  } else {
    if (tracker->current_state != *current) {
      *current = tracker->current_state;
      grpc_exec_ctx_sched(exec_ctx, notify,
                          GRPC_ERROR_REF(tracker->current_error), NULL);
    } else {
      grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
      w->current = current;
      w->notify = notify;
      w->next = tracker->watchers;
      tracker->watchers = w;
    }
    return tracker->current_state == GRPC_CHANNEL_IDLE;
  }
}
예제 #23
0
파일: mock_endpoint.c 프로젝트: izouxv/grpc
static void me_write(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
                     grpc_slice_buffer *slices, grpc_closure *cb) {
  grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
  for (size_t i = 0; i < slices->count; i++) {
    m->on_write(slices->slices[i]);
  }
  grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
}
예제 #24
0
static void fail_handshaker_do_handshake(grpc_exec_ctx *exec_ctx,
                                         grpc_handshaker *handshaker,
                                         grpc_tcp_server_acceptor *acceptor,
                                         grpc_closure *on_handshake_done,
                                         grpc_handshaker_args *args) {
  grpc_exec_ctx_sched(exec_ctx, on_handshake_done,
                      GRPC_ERROR_CREATE("Failed to create security handshaker"),
                      NULL);
}
예제 #25
0
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
  grpc_resource_user *resource_user = ru;
  GPR_ASSERT(resource_user->allocated == 0);
  for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
    rulist_remove(resource_user, (grpc_rulist)i);
  }
  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
                      GRPC_ERROR_CANCELLED, NULL);
  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
                      GRPC_ERROR_CANCELLED, NULL);
  grpc_exec_ctx_sched(exec_ctx, (grpc_closure *)gpr_atm_no_barrier_load(
                                    &resource_user->on_done_destroy_closure),
                      GRPC_ERROR_NONE, NULL);
  if (resource_user->free_pool != 0) {
    resource_user->resource_quota->free_pool += resource_user->free_pool;
    rq_step_sched(exec_ctx, resource_user->resource_quota);
  }
}
예제 #26
0
static void me_shutdown(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep) {
    half *m = (half *)ep;
    gpr_mu_lock(&m->parent->mu);
    m->parent->shutdown = true;
    if (m->on_read) {
        grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"),
                            NULL);
        m->on_read = NULL;
    }
    m = other_half(m);
    if (m->on_read) {
        grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"),
                            NULL);
        m->on_read = NULL;
    }
    gpr_mu_unlock(&m->parent->mu);
    grpc_resource_user_shutdown(exec_ctx, m->resource_user);
}
예제 #27
0
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
                                 const char *default_port,
                                 grpc_closure *on_done,
                                 grpc_resolved_addresses **addrs) {
  uv_getaddrinfo_t *req;
  request *r;
  struct addrinfo *hints;
  char *host;
  char *port;
  grpc_error *err;
  int s;
  err = try_split_host_port(name, default_port, &host, &port);
  if (err != GRPC_ERROR_NONE) {
    grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
    return;
  }
  r = gpr_malloc(sizeof(request));
  r->on_done = on_done;
  r->addresses = addrs;
  req = gpr_malloc(sizeof(uv_getaddrinfo_t));
  req->data = r;

  /* Call getaddrinfo */
  hints = gpr_malloc(sizeof(struct addrinfo));
  memset(hints, 0, sizeof(struct addrinfo));
  hints->ai_family = AF_UNSPEC;     /* ipv4 or ipv6 */
  hints->ai_socktype = SOCK_STREAM; /* stream socket */
  hints->ai_flags = AI_PASSIVE;     /* for wildcard IP address */
  r->hints = hints;

  s = uv_getaddrinfo(uv_default_loop(), req, getaddrinfo_callback, host, port,
                     hints);

  if (s != 0) {
    *addrs = NULL;
    err = GRPC_ERROR_CREATE("getaddrinfo failed");
    err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
    grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
    gpr_free(r);
    gpr_free(req);
    gpr_free(hints);
  }
}
예제 #28
0
static void ru_destroy(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
  grpc_resource_user *resource_user = ru;
  GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
  for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
    rulist_remove(resource_user, (grpc_rulist)i);
  }
  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
                      GRPC_ERROR_CANCELLED, NULL);
  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
                      GRPC_ERROR_CANCELLED, NULL);
  if (resource_user->free_pool != 0) {
    resource_user->resource_quota->free_pool += resource_user->free_pool;
    rq_step_sched(exec_ctx, resource_user->resource_quota);
  }
  grpc_resource_quota_internal_unref(exec_ctx, resource_user->resource_quota);
  gpr_mu_destroy(&resource_user->mu);
  gpr_free(resource_user->name);
  gpr_free(resource_user);
}
예제 #29
0
/* Callback to be passed to grpc_executor to asynch-ify
 * grpc_blocking_resolve_address */
static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
                              grpc_error *error) {
  request *r = rp;
  grpc_exec_ctx_sched(
      exec_ctx, r->on_done,
      grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out),
      NULL);
  gpr_free(r->name);
  gpr_free(r->default_port);
  gpr_free(r);
}
예제 #30
0
static void pf_ping_one(grpc_exec_ctx *exec_ctx, grpc_lb_policy *pol,
                        grpc_closure *closure) {
  pick_first_lb_policy *p = (pick_first_lb_policy *)pol;
  grpc_connected_subchannel *selected = GET_SELECTED(p);
  if (selected) {
    grpc_connected_subchannel_ping(exec_ctx, selected, closure);
  } else {
    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"),
                        NULL);
  }
}