예제 #1
0
파일: udp_server.c 프로젝트: mdsteele/grpc
/* called when all listening endpoints have been shutdown, so no further
   events will be received on them - at this point it's safe to destroy
   things */
static void deactivated_all_ports(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
  /* delete ALL the things */
  gpr_mu_lock(&s->mu);

  GPR_ASSERT(s->shutdown);

  if (s->head) {
    grpc_udp_listener *sp;
    for (sp = s->head; sp; sp = sp->next) {
      grpc_unlink_if_unix_domain_socket(&sp->addr);

      GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
                        grpc_schedule_on_exec_ctx);
      if (!sp->orphan_notified) {
        /* Call the orphan_cb to signal that the FD is about to be closed and
         * should no longer be used. Because at this point, all listening ports
         * have been shutdown already, no need to shutdown again.*/
        GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd,
                          grpc_schedule_on_exec_ctx);
        GPR_ASSERT(sp->orphan_cb);
        sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
                      sp->server->user_data);
      }
      grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
                     false /* already_closed */, "udp_listener_shutdown");
    }
    gpr_mu_unlock(&s->mu);
  } else {
    gpr_mu_unlock(&s->mu);
    finish_shutdown(exec_ctx, s);
  }
}
예제 #2
0
파일: udp_server.c 프로젝트: mdsteele/grpc
void grpc_udp_server_start(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
                           grpc_pollset **pollsets, size_t pollset_count,
                           void *user_data) {
  size_t i;
  gpr_mu_lock(&s->mu);
  grpc_udp_listener *sp;
  GPR_ASSERT(s->active_ports == 0);
  s->pollsets = pollsets;
  s->user_data = user_data;

  sp = s->head;
  while (sp != NULL) {
    for (i = 0; i < pollset_count; i++) {
      grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
    }
    GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
                      grpc_schedule_on_exec_ctx);
    grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);

    GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp,
                      grpc_schedule_on_exec_ctx);
    grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);

    /* Registered for both read and write callbacks: increment active_ports
     * twice to account for this, and delay free-ing of memory until both
     * on_read and on_write have fired. */
    s->active_ports += 2;

    sp = sp->next;
  }

  gpr_mu_unlock(&s->mu);
}
예제 #3
0
grpc_endpoint *grpc_tcp_create(grpc_exec_ctx *exec_ctx, grpc_winsocket *socket,
                               grpc_channel_args *channel_args,
                               char *peer_string) {
  grpc_resource_quota *resource_quota = grpc_resource_quota_create(NULL);
  if (channel_args != NULL) {
    for (size_t i = 0; i < channel_args->num_args; i++) {
      if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
        resource_quota = grpc_resource_quota_ref_internal(
            channel_args->args[i].value.pointer.p);
      }
    }
  }
  grpc_tcp *tcp = (grpc_tcp *)gpr_malloc(sizeof(grpc_tcp));
  memset(tcp, 0, sizeof(grpc_tcp));
  tcp->base.vtable = &vtable;
  tcp->socket = socket;
  gpr_mu_init(&tcp->mu);
  gpr_ref_init(&tcp->refcount, 1);
  GRPC_CLOSURE_INIT(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
  tcp->peer_string = gpr_strdup(peer_string);
  tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
  /* Tell network status tracking code about the new endpoint */
  grpc_network_status_register_endpoint(&tcp->base);

  return &tcp->base;
}
예제 #4
0
void grpc_channel_watch_connectivity_state(
    grpc_channel *channel, grpc_connectivity_state last_observed_state,
    gpr_timespec deadline, grpc_completion_queue *cq, void *tag) {
  grpc_channel_element *client_channel_elem =
      grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  state_watcher *w = (state_watcher *)gpr_malloc(sizeof(*w));

  GRPC_API_TRACE(
      "grpc_channel_watch_connectivity_state("
      "channel=%p, last_observed_state=%d, "
      "deadline=gpr_timespec { tv_sec: %" PRId64
      ", tv_nsec: %d, clock_type: %d }, "
      "cq=%p, tag=%p)",
      7, (channel, (int)last_observed_state, deadline.tv_sec, deadline.tv_nsec,
          (int)deadline.clock_type, cq, tag));

  GPR_ASSERT(grpc_cq_begin_op(cq, tag));

  gpr_mu_init(&w->mu);
  GRPC_CLOSURE_INIT(&w->on_complete, watch_complete, w,
                    grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&w->on_timeout, timeout_complete, w,
                    grpc_schedule_on_exec_ctx);
  w->phase = WAITING;
  w->state = last_observed_state;
  w->cq = cq;
  w->tag = tag;
  w->channel = channel;
  w->error = NULL;

  watcher_timer_init_arg *wa =
      (watcher_timer_init_arg *)gpr_malloc(sizeof(watcher_timer_init_arg));
  wa->w = w;
  wa->deadline = deadline;
  GRPC_CLOSURE_INIT(&w->watcher_timer_init, watcher_timer_init, wa,
                    grpc_schedule_on_exec_ctx);

  if (client_channel_elem->filter == &grpc_client_channel_filter) {
    GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
    grpc_client_channel_watch_connectivity_state(
        &exec_ctx, client_channel_elem,
        grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &w->state,
        &w->on_complete, &w->watcher_timer_init);
  } else {
    abort();
  }

  grpc_exec_ctx_finish(&exec_ctx);
}
예제 #5
0
/* Write as much as possible, then register notify_on_write. */
static void client_session_write(grpc_exec_ctx *exec_ctx, void *arg, /*client */
                                 grpc_error *error) {
  client *cl = arg;
  int fd = grpc_fd_wrapped_fd(cl->em_fd);
  ssize_t write_once = 0;

  if (error != GRPC_ERROR_NONE) {
    gpr_mu_lock(g_mu);
    client_session_shutdown_cb(exec_ctx, arg, 1);
    gpr_mu_unlock(g_mu);
    return;
  }

  do {
    write_once = write(fd, cl->write_buf, CLIENT_WRITE_BUF_SIZE);
    if (write_once > 0) cl->write_bytes_total += write_once;
  } while (write_once > 0);

  if (errno == EAGAIN) {
    gpr_mu_lock(g_mu);
    if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
      GRPC_CLOSURE_INIT(&cl->write_closure, client_session_write, cl,
                        grpc_schedule_on_exec_ctx);
      grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure);
      cl->client_write_cnt++;
    } else {
      client_session_shutdown_cb(exec_ctx, arg, 1);
    }
    gpr_mu_unlock(g_mu);
  } else {
    gpr_log(GPR_ERROR, "unknown errno %s", strerror(errno));
    abort();
  }
}
예제 #6
0
/* Called when a new TCP connection request arrives in the listening port. */
static void listen_cb(grpc_exec_ctx *exec_ctx, void *arg, /*=sv_arg*/
                      grpc_error *error) {
  server *sv = arg;
  int fd;
  int flags;
  session *se;
  struct sockaddr_storage ss;
  socklen_t slen = sizeof(ss);
  grpc_fd *listen_em_fd = sv->em_fd;

  if (error != GRPC_ERROR_NONE) {
    listen_shutdown_cb(exec_ctx, arg, 1);
    return;
  }

  fd = accept(grpc_fd_wrapped_fd(listen_em_fd), (struct sockaddr *)&ss, &slen);
  GPR_ASSERT(fd >= 0);
  GPR_ASSERT(fd < FD_SETSIZE);
  flags = fcntl(fd, F_GETFL, 0);
  fcntl(fd, F_SETFL, flags | O_NONBLOCK);
  se = gpr_malloc(sizeof(*se));
  se->sv = sv;
  se->em_fd = grpc_fd_create(fd, "listener");
  grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
  GRPC_CLOSURE_INIT(&se->session_read_closure, session_read_cb, se,
                    grpc_schedule_on_exec_ctx);
  grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);

  grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure);
}
예제 #7
0
grpc_endpoint *grpc_secure_endpoint_create(
    struct tsi_frame_protector *protector,
    struct tsi_zero_copy_grpc_protector *zero_copy_protector,
    grpc_endpoint *transport, grpc_slice *leftover_slices,
    size_t leftover_nslices) {
  size_t i;
  secure_endpoint *ep = (secure_endpoint *)gpr_malloc(sizeof(secure_endpoint));
  ep->base.vtable = &vtable;
  ep->wrapped_ep = transport;
  ep->protector = protector;
  ep->zero_copy_protector = zero_copy_protector;
  grpc_slice_buffer_init(&ep->leftover_bytes);
  for (i = 0; i < leftover_nslices; i++) {
    grpc_slice_buffer_add(&ep->leftover_bytes,
                          grpc_slice_ref_internal(leftover_slices[i]));
  }
  ep->write_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
  ep->read_staging_buffer = GRPC_SLICE_MALLOC(STAGING_BUFFER_SIZE);
  grpc_slice_buffer_init(&ep->output_buffer);
  grpc_slice_buffer_init(&ep->source_buffer);
  ep->read_buffer = NULL;
  GRPC_CLOSURE_INIT(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
  gpr_mu_init(&ep->protector_mu);
  gpr_ref_init(&ep->ref, 1);
  return &ep->base;
}
예제 #8
0
static void reset_test_fd(grpc_exec_ctx *exec_ctx, test_fd *tfd) {
  tfd->is_on_readable_called = false;

  GRPC_CLOSURE_INIT(&tfd->on_readable, on_readable, tfd,
                    grpc_schedule_on_exec_ctx);
  grpc_fd_notify_on_read(exec_ctx, tfd->fd, &tfd->on_readable);
}
예제 #9
0
파일: udp_server.c 프로젝트: mdsteele/grpc
void grpc_udp_server_destroy(grpc_exec_ctx *exec_ctx, grpc_udp_server *s,
                             grpc_closure *on_done) {
  grpc_udp_listener *sp;
  gpr_mu_lock(&s->mu);

  GPR_ASSERT(!s->shutdown);
  s->shutdown = 1;

  s->shutdown_complete = on_done;

  /* shutdown all fd's */
  if (s->active_ports) {
    for (sp = s->head; sp; sp = sp->next) {
      GPR_ASSERT(sp->orphan_cb);
      struct shutdown_fd_args *args = gpr_malloc(sizeof(*args));
      args->fd = sp->emfd;
      args->server_mu = &s->mu;
      GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
                        grpc_schedule_on_exec_ctx);
      sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
                    sp->server->user_data);
      sp->orphan_notified = true;
    }
    gpr_mu_unlock(&s->mu);
  } else {
    gpr_mu_unlock(&s->mu);
    deactivated_all_ports(exec_ctx, s);
  }
}
예제 #10
0
// Constructor for call_data.
static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
                                  grpc_call_element* elem,
                                  const grpc_call_element_args* args) {
  channel_data* chand = (channel_data*)elem->channel_data;
  call_data* calld = (call_data*)elem->call_data;
  calld->call_combiner = args->call_combiner;
  calld->next_recv_message_ready = NULL;
  GRPC_CLOSURE_INIT(&calld->recv_message_ready, recv_message_ready, elem,
                    grpc_schedule_on_exec_ctx);
  // Get max sizes from channel data, then merge in per-method config values.
  // Note: Per-method config is only available on the client, so we
  // apply the max request size to the send limit and the max response
  // size to the receive limit.
  calld->limits = chand->limits;
  if (chand->method_limit_table != NULL) {
    message_size_limits* limits =
        (message_size_limits*)grpc_method_config_table_get(
            exec_ctx, chand->method_limit_table, args->path);
    if (limits != NULL) {
      if (limits->max_send_size >= 0 &&
          (limits->max_send_size < calld->limits.max_send_size ||
           calld->limits.max_send_size < 0)) {
        calld->limits.max_send_size = limits->max_send_size;
      }
      if (limits->max_recv_size >= 0 &&
          (limits->max_recv_size < calld->limits.max_recv_size ||
           calld->limits.max_recv_size < 0)) {
        calld->limits.max_recv_size = limits->max_recv_size;
      }
    }
  }
  return GRPC_ERROR_NONE;
}
예제 #11
0
파일: transport.c 프로젝트: mdsteele/grpc
grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
  made_transport_op *op = gpr_malloc(sizeof(*op));
  GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_op, op,
                    grpc_schedule_on_exec_ctx);
  op->inner_on_complete = on_complete;
  memset(&op->op, 0, sizeof(op->op));
  op->op.on_consumed = &op->outer_on_complete;
  return &op->op;
}
예제 #12
0
static void send_security_metadata(grpc_exec_ctx *exec_ctx,
                                   grpc_call_element *elem,
                                   grpc_transport_stream_op_batch *batch) {
  call_data *calld = elem->call_data;
  channel_data *chand = elem->channel_data;
  grpc_client_security_context *ctx =
      (grpc_client_security_context *)batch->payload
          ->context[GRPC_CONTEXT_SECURITY]
          .value;
  grpc_call_credentials *channel_call_creds =
      chand->security_connector->request_metadata_creds;
  int call_creds_has_md = (ctx != NULL) && (ctx->creds != NULL);

  if (channel_call_creds == NULL && !call_creds_has_md) {
    /* Skip sending metadata altogether. */
    grpc_call_next_op(exec_ctx, elem, batch);
    return;
  }

  if (channel_call_creds != NULL && call_creds_has_md) {
    calld->creds = grpc_composite_call_credentials_create(channel_call_creds,
                                                          ctx->creds, NULL);
    if (calld->creds == NULL) {
      grpc_transport_stream_op_batch_finish_with_failure(
          exec_ctx, batch,
          grpc_error_set_int(
              GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                  "Incompatible credentials set on channel and call."),
              GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAUTHENTICATED));
      return;
    }
  } else {
    calld->creds = grpc_call_credentials_ref(
        call_creds_has_md ? ctx->creds : channel_call_creds);
  }

  build_auth_metadata_context(&chand->security_connector->base,
                              chand->auth_context, calld);

  grpc_error *cancel_error = set_cancel_func(elem, cancel_get_request_metadata);
  if (cancel_error != GRPC_ERROR_NONE) {
    grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch,
                                                       cancel_error);
    return;
  }
  GPR_ASSERT(calld->pollent != NULL);
  GRPC_CLOSURE_INIT(&calld->closure, on_credentials_metadata, batch,
                    grpc_schedule_on_exec_ctx);
  grpc_error *error = GRPC_ERROR_NONE;
  if (grpc_call_credentials_get_request_metadata(
          exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
          &calld->md_array, &calld->closure, &error)) {
    // Synchronous return; invoke on_credentials_metadata() directly.
    on_credentials_metadata(exec_ctx, batch, error);
    GRPC_ERROR_UNREF(error);
  }
}
예제 #13
0
파일: transport.c 프로젝트: mdsteele/grpc
grpc_transport_stream_op_batch *grpc_make_transport_stream_op(
    grpc_closure *on_complete) {
  made_transport_stream_op *op = gpr_zalloc(sizeof(*op));
  op->op.payload = &op->payload;
  GRPC_CLOSURE_INIT(&op->outer_on_complete, destroy_made_transport_stream_op,
                    op, grpc_schedule_on_exec_ctx);
  op->inner_on_complete = on_complete;
  op->op.on_complete = &op->outer_on_complete;
  return &op->op;
}
예제 #14
0
파일: grpc_filter.c 프로젝트: endobson/grpc
static grpc_error *server_init_call_elem(grpc_exec_ctx *exec_ctx,
                                         grpc_call_element *elem,
                                         const grpc_call_element_args *args) {
  call_data *d = (call_data *)elem->call_data;
  GPR_ASSERT(d != NULL);
  memset(d, 0, sizeof(*d));
  d->start_ts = args->start_time;
  /* TODO(hongyu): call census_tracing_start_op here. */
  GRPC_CLOSURE_INIT(&d->finish_recv, server_on_done_recv, elem,
                    grpc_schedule_on_exec_ctx);
  return GRPC_ERROR_NONE;
}
예제 #15
0
파일: transport.c 프로젝트: mdsteele/grpc
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
                          grpc_iomgr_cb_func cb, void *cb_arg,
                          const char *object_type) {
  refcount->object_type = object_type;
#else
void grpc_stream_ref_init(grpc_stream_refcount *refcount, int initial_refs,
                          grpc_iomgr_cb_func cb, void *cb_arg) {
#endif
  gpr_ref_init(&refcount->refs, initial_refs);
  GRPC_CLOSURE_INIT(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
  refcount->slice_refcount.vtable = &stream_ref_slice_vtable;
  refcount->slice_refcount.sub_refcount = &refcount->slice_refcount;
}
예제 #16
0
static grpc_handshaker *security_handshaker_create(
    grpc_exec_ctx *exec_ctx, tsi_handshaker *handshaker,
    grpc_security_connector *connector) {
  security_handshaker *h = gpr_zalloc(sizeof(security_handshaker));
  grpc_handshaker_init(&security_handshaker_vtable, &h->base);
  h->handshaker = handshaker;
  h->connector = GRPC_SECURITY_CONNECTOR_REF(connector, "handshake");
  gpr_mu_init(&h->mu);
  gpr_ref_init(&h->refs, 1);
  h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
  h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
  GRPC_CLOSURE_INIT(&h->on_handshake_data_sent_to_peer,
                    on_handshake_data_sent_to_peer, h,
                    grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&h->on_handshake_data_received_from_peer,
                    on_handshake_data_received_from_peer, h,
                    grpc_schedule_on_exec_ctx);
  GRPC_CLOSURE_INIT(&h->on_peer_checked, on_peer_checked, h,
                    grpc_schedule_on_exec_ctx);
  grpc_slice_buffer_init(&h->outgoing);
  return &h->base;
}
예제 #17
0
void test_succeeds(void) {
  grpc_resolved_address resolved_addr;
  struct sockaddr_in *addr = (struct sockaddr_in *)resolved_addr.addr;
  uv_tcp_t *svr_handle = gpr_malloc(sizeof(uv_tcp_t));
  int connections_complete_before;
  grpc_closure done;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_DEBUG, "test_succeeds");

  memset(&resolved_addr, 0, sizeof(resolved_addr));
  resolved_addr.len = sizeof(struct sockaddr_in);
  addr->sin_family = AF_INET;

  /* create a dummy server */
  GPR_ASSERT(0 == uv_tcp_init(uv_default_loop(), svr_handle));
  GPR_ASSERT(0 == uv_tcp_bind(svr_handle, (struct sockaddr *)addr, 0));
  GPR_ASSERT(0 == uv_listen((uv_stream_t *)svr_handle, 1, connection_cb));

  gpr_mu_lock(g_mu);
  connections_complete_before = g_connections_complete;
  gpr_mu_unlock(g_mu);

  /* connect to it */
  GPR_ASSERT(uv_tcp_getsockname(svr_handle, (struct sockaddr *)addr,
                                (int *)&resolved_addr.len) == 0);
  GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL,
                          &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));

  gpr_mu_lock(g_mu);

  while (g_connections_complete == connections_complete_before) {
    grpc_pollset_worker *worker = NULL;
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "pollset_work",
        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                          gpr_now(GPR_CLOCK_MONOTONIC),
                          grpc_timeout_seconds_to_deadline(5))));
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }

  // This will get cleaned up when the pollset runs again or gets shutdown
  uv_close((uv_handle_t *)svr_handle, close_cb);

  gpr_mu_unlock(g_mu);

  grpc_exec_ctx_finish(&exec_ctx);
}
예제 #18
0
static void test_pollset_cleanup(grpc_exec_ctx *exec_ctx,
                                 test_pollset *pollsets, int num_pollsets) {
  grpc_closure destroyed;
  int i;

  for (i = 0; i < num_pollsets; i++) {
    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, pollsets[i].pollset,
                      grpc_schedule_on_exec_ctx);
    grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);

    grpc_exec_ctx_flush(exec_ctx);
    gpr_free(pollsets[i].pollset);
  }
}
예제 #19
0
파일: combiner.c 프로젝트: aaronjheng/grpc
grpc_combiner *grpc_combiner_create(void) {
  grpc_combiner *lock = gpr_malloc(sizeof(*lock));
  gpr_ref_init(&lock->refs, 1);
  lock->next_combiner_on_this_exec_ctx = NULL;
  lock->time_to_execute_final_list = false;
  lock->scheduler.vtable = &scheduler;
  lock->finally_scheduler.vtable = &finally_scheduler;
  gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
  gpr_mpscq_init(&lock->queue);
  grpc_closure_list_init(&lock->final_list);
  GRPC_CLOSURE_INIT(&lock->offload, offload, lock, grpc_executor_scheduler);
  GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
  return lock;
}
예제 #20
0
static void resolve_address_impl(grpc_exec_ctx *exec_ctx, const char *name,
                                 const char *default_port,
                                 grpc_pollset_set *interested_parties,
                                 grpc_closure *on_done,
                                 grpc_resolved_addresses **addresses) {
  request *r = gpr_malloc(sizeof(request));
  GRPC_CLOSURE_INIT(&r->request_closure, do_request_thread, r,
                    grpc_executor_scheduler(GRPC_EXECUTOR_SHORT));
  r->name = gpr_strdup(name);
  r->default_port = gpr_strdup(default_port);
  r->on_done = on_done;
  r->addresses = addresses;
  GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
}
예제 #21
0
void args_finish(grpc_exec_ctx *exec_ctx, args_struct *args) {
  GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline()));
  grpc_resolved_addresses_destroy(args->addrs);
  grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
  grpc_pollset_set_destroy(exec_ctx, args->pollset_set);
  grpc_closure do_nothing_cb;
  GRPC_CLOSURE_INIT(&do_nothing_cb, do_nothing, NULL,
                    grpc_schedule_on_exec_ctx);
  grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
  // exec_ctx needs to be flushed before calling grpc_pollset_destroy()
  grpc_exec_ctx_flush(exec_ctx);
  grpc_pollset_destroy(exec_ctx, args->pollset);
  gpr_free(args->pollset);
}
예제 #22
0
static void cleanup_test_pollsets(grpc_exec_ctx *exec_ctx,
                                  test_pollset *pollsets,
                                  const int num_pollsets) {
  grpc_closure destroyed;
  for (int i = 0; i < num_pollsets; i++) {
    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, pollsets[i].ps,
                      grpc_schedule_on_exec_ctx);
    grpc_pollset_shutdown(exec_ctx, pollsets[i].ps, &destroyed);

    grpc_exec_ctx_flush(exec_ctx);
    gpr_free(pollsets[i].ps);
    pollsets[i].ps = NULL;
  }
}
예제 #23
0
void test_fails(void) {
  grpc_resolved_address resolved_addr;
  struct sockaddr_in *addr = (struct sockaddr_in *)resolved_addr.addr;
  int connections_complete_before;
  grpc_closure done;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_DEBUG, "test_fails");

  memset(&resolved_addr, 0, sizeof(resolved_addr));
  resolved_addr.len = sizeof(struct sockaddr_in);
  addr->sin_family = AF_INET;

  gpr_mu_lock(g_mu);
  connections_complete_before = g_connections_complete;
  gpr_mu_unlock(g_mu);

  /* connect to a broken address */
  GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL,
                          &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));

  gpr_mu_lock(g_mu);

  /* wait for the connection callback to finish */
  while (g_connections_complete == connections_complete_before) {
    grpc_pollset_worker *worker = NULL;
    gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
    gpr_timespec polling_deadline = test_deadline();
    switch (grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
      case GRPC_TIMERS_FIRED:
        break;
      case GRPC_TIMERS_NOT_CHECKED:
        polling_deadline = now;
      /* fall through */
      case GRPC_TIMERS_CHECKED_AND_EMPTY:
        GPR_ASSERT(GRPC_LOG_IF_ERROR(
            "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
                                              now, polling_deadline)));
        break;
    }
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }

  gpr_mu_unlock(g_mu);
  grpc_exec_ctx_finish(&exec_ctx);
}
예제 #24
0
파일: alarm.c 프로젝트: endobson/grpc
grpc_alarm *grpc_alarm_create(void *reserved) {
  grpc_alarm *alarm = (grpc_alarm *)gpr_malloc(sizeof(grpc_alarm));

#ifndef NDEBUG
  if (GRPC_TRACER_ON(grpc_trace_alarm_refcount)) {
    gpr_log(GPR_DEBUG, "Alarm:%p created (ref: 1)", alarm);
  }
#endif

  gpr_ref_init(&alarm->refs, 1);
  grpc_timer_init_unset(&alarm->alarm);
  alarm->cq = NULL;
  GRPC_CLOSURE_INIT(&alarm->on_alarm, alarm_cb, alarm,
                    grpc_schedule_on_exec_ctx);
  return alarm;
}
예제 #25
0
// Returns an error if the call has been cancelled.  Otherwise, sets the
// cancellation function to be called upon cancellation.
static grpc_error *set_cancel_func(grpc_call_element *elem,
                                   grpc_iomgr_cb_func func) {
  call_data *calld = (call_data *)elem->call_data;
  // Decode original state.
  gpr_atm original_state = gpr_atm_acq_load(&calld->cancellation_state);
  grpc_error *original_error = GRPC_ERROR_NONE;
  grpc_closure *original_func = NULL;
  decode_cancel_state(original_state, &original_func, &original_error);
  // If error is set, return it.
  if (original_error != GRPC_ERROR_NONE) return GRPC_ERROR_REF(original_error);
  // Otherwise, store func.
  GRPC_CLOSURE_INIT(&calld->cancel_closure, func, elem,
                    grpc_schedule_on_exec_ctx);
  GPR_ASSERT(((gpr_atm)&calld->cancel_closure & (gpr_atm)1) == 0);
  gpr_atm_rel_store(&calld->cancellation_state,
                    (gpr_atm)&calld->cancel_closure);
  return GRPC_ERROR_NONE;
}
예제 #26
0
static void test_threading(void) {
  threading_shared shared;
  shared.pollset = gpr_zalloc(grpc_pollset_size());
  grpc_pollset_init(shared.pollset, &shared.mu);

  gpr_thd_id thds[10];
  for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
    gpr_thd_options opt = gpr_thd_options_default();
    gpr_thd_options_set_joinable(&opt);
    gpr_thd_new(&thds[i], test_threading_loop, &shared, &opt);
  }
  grpc_wakeup_fd fd;
  GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&fd)));
  shared.wakeup_fd = &fd;
  shared.wakeup_desc = grpc_fd_create(fd.read_fd, "wakeup");
  shared.wakeups = 0;
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc);
    grpc_fd_notify_on_read(
        &exec_ctx, shared.wakeup_desc,
        GRPC_CLOSURE_INIT(&shared.on_wakeup, test_threading_wakeup, &shared,
                          grpc_schedule_on_exec_ctx));
    grpc_exec_ctx_finish(&exec_ctx);
  }
  GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_first",
                               grpc_wakeup_fd_wakeup(shared.wakeup_fd)));
  for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
    gpr_thd_join(thds[i]);
  }
  fd.read_fd = 0;
  grpc_wakeup_fd_destroy(&fd);
  {
    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
    grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED);
    grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, NULL, NULL,
                   false /* already_closed */, "done");
    grpc_pollset_shutdown(&exec_ctx, shared.pollset,
                          GRPC_CLOSURE_CREATE(destroy_pollset, shared.pollset,
                                              grpc_schedule_on_exec_ctx));
    grpc_exec_ctx_finish(&exec_ctx);
  }
  gpr_free(shared.pollset);
}
예제 #27
0
void grpc_inproc_transport_init(void) {
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, NULL,
                    grpc_schedule_on_exec_ctx);
  g_empty_slice = grpc_slice_from_static_buffer(NULL, 0);

  grpc_slice key_tmp = grpc_slice_from_static_string(":path");
  g_fake_path_key = grpc_slice_intern(key_tmp);
  grpc_slice_unref_internal(&exec_ctx, key_tmp);

  g_fake_path_value = grpc_slice_from_static_string("/");

  grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
  g_fake_auth_key = grpc_slice_intern(auth_tmp);
  grpc_slice_unref_internal(&exec_ctx, auth_tmp);

  g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
  grpc_exec_ctx_finish(&exec_ctx);
}
예제 #28
0
int main(int argc, char **argv) {
  grpc_closure destroyed;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_test_init(argc, argv);
  grpc_init();
  g_pollset = gpr_malloc(grpc_pollset_size());
  grpc_pollset_init(g_pollset, &g_mu);
  grpc_exec_ctx_finish(&exec_ctx);
  test_succeeds();
  gpr_log(GPR_ERROR, "End of first test");
  test_fails();
  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
                    grpc_schedule_on_exec_ctx);
  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
  grpc_exec_ctx_finish(&exec_ctx);
  grpc_shutdown();
  gpr_free(g_pollset);
  return 0;
}
예제 #29
0
void grpc_channel_ping(grpc_channel *channel, grpc_completion_queue *cq,
                       void *tag, void *reserved) {
  GRPC_API_TRACE("grpc_channel_ping(channel=%p, cq=%p, tag=%p, reserved=%p)", 4,
                 (channel, cq, tag, reserved));
  grpc_transport_op *op = grpc_make_transport_op(NULL);
  ping_result *pr = (ping_result *)gpr_malloc(sizeof(*pr));
  grpc_channel_element *top_elem =
      grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  GPR_ASSERT(reserved == NULL);
  pr->tag = tag;
  pr->cq = cq;
  GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
  op->send_ping = &pr->closure;
  op->bind_pollset = grpc_cq_pollset(cq);
  GPR_ASSERT(grpc_cq_begin_op(cq, tag));
  top_elem->filter->start_transport_op(&exec_ctx, top_elem, op);
  grpc_exec_ctx_finish(&exec_ctx);
}
예제 #30
0
int main(int argc, char **argv) {
  grpc_closure destroyed;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_test_init(argc, argv);
  grpc_iomgr_init(&exec_ctx);
  grpc_iomgr_start(&exec_ctx);
  g_pollset = gpr_zalloc(grpc_pollset_size());
  grpc_pollset_init(g_pollset, &g_mu);
  test_grpc_fd();
  test_grpc_fd_change();
  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
                    grpc_schedule_on_exec_ctx);
  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
  grpc_exec_ctx_flush(&exec_ctx);
  gpr_free(g_pollset);
  grpc_iomgr_shutdown(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);
  return 0;
}