Esempio n. 1
0
static void rr_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
                                    grpc_error *error) {
  subchannel_data *sd = arg;
  round_robin_lb_policy *p = sd->policy;
  pending_pick *pp;
  ready_list *selected;

  int unref = 0;

  GRPC_ERROR_REF(error);
  gpr_mu_lock(&p->mu);

  if (p->shutdown) {
    unref = 1;
  } else {
    switch (sd->connectivity_state) {
      case GRPC_CHANNEL_READY:
        grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                    GRPC_CHANNEL_READY, GRPC_ERROR_REF(error),
                                    "connecting_ready");
        /* add the newly connected subchannel to the list of connected ones.
         * Note that it goes to the "end of the line". */
        sd->ready_list_node = add_connected_sc_locked(p, sd);
        /* at this point we know there's at least one suitable subchannel. Go
         * ahead and pick one and notify the pending suitors in
         * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
        selected = peek_next_connected_locked(p);
        if (p->pending_picks != NULL) {
          /* if the selected subchannel is going to be used for the pending
           * picks, update the last picked pointer */
          advance_last_picked_locked(p);
        }
        while ((pp = p->pending_picks)) {
          p->pending_picks = pp->next;

          *pp->target =
              grpc_subchannel_get_connected_subchannel(selected->subchannel);
          if (pp->user_data != NULL) {
            *pp->user_data = selected->user_data;
          }
          if (grpc_lb_round_robin_trace) {
            gpr_log(GPR_DEBUG,
                    "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
                    (void *)selected->subchannel, (void *)selected);
          }
          grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
          gpr_free(pp);
        }
        grpc_subchannel_notify_on_state_change(
            exec_ctx, sd->subchannel, p->base.interested_parties,
            &sd->connectivity_state, &sd->connectivity_changed_closure);
        break;
      case GRPC_CHANNEL_CONNECTING:
      case GRPC_CHANNEL_IDLE:
        grpc_connectivity_state_set(
            exec_ctx, &p->state_tracker, sd->connectivity_state,
            GRPC_ERROR_REF(error), "connecting_changed");
        grpc_subchannel_notify_on_state_change(
            exec_ctx, sd->subchannel, p->base.interested_parties,
            &sd->connectivity_state, &sd->connectivity_changed_closure);
        break;
      case GRPC_CHANNEL_TRANSIENT_FAILURE:
        /* renew state notification */
        grpc_subchannel_notify_on_state_change(
            exec_ctx, sd->subchannel, p->base.interested_parties,
            &sd->connectivity_state, &sd->connectivity_changed_closure);

        /* remove from ready list if still present */
        if (sd->ready_list_node != NULL) {
          remove_disconnected_sc_locked(p, sd->ready_list_node);
          sd->ready_list_node = NULL;
        }
        grpc_connectivity_state_set(
            exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
            GRPC_ERROR_REF(error), "connecting_transient_failure");
        break;
      case GRPC_CHANNEL_SHUTDOWN:
        if (sd->ready_list_node != NULL) {
          remove_disconnected_sc_locked(p, sd->ready_list_node);
          sd->ready_list_node = NULL;
        }

        p->num_subchannels--;
        GPR_SWAP(subchannel_data *, p->subchannels[sd->index],
                 p->subchannels[p->num_subchannels]);
        GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, "round_robin");
        p->subchannels[sd->index]->index = sd->index;
        gpr_free(sd);

        unref = 1;
        if (p->num_subchannels == 0) {
          grpc_connectivity_state_set(
              exec_ctx, &p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
              GRPC_ERROR_CREATE_REFERENCING("Round Robin Channels Exhausted",
                                            &error, 1),
              "no_more_channels");
          while ((pp = p->pending_picks)) {
            p->pending_picks = pp->next;
            *pp->target = NULL;
            grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
                                NULL);
            gpr_free(pp);
          }
        } else {
          grpc_connectivity_state_set(
              exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
              GRPC_ERROR_REF(error), "subchannel_failed");
        }
    } /* switch */
  }   /* !unref */

  gpr_mu_unlock(&p->mu);

  if (unref) {
    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "round_robin_connectivity");
  }

  GRPC_ERROR_UNREF(error);
}
Esempio n. 2
0
static void maybe_wake_one_watcher(grpc_fd *fd) {
  gpr_mu_lock(&fd->watcher_mu);
  maybe_wake_one_watcher_locked(fd);
  gpr_mu_unlock(&fd->watcher_mu);
}
Esempio n. 3
0
/* Test some typical scenarios in pollset_set */
static void pollset_set_test_basic() {
  /* We construct the following structure for this test:
   *
   *        +---> FD0 (Added before PSS1, PS1 and PS2 are added to PSS0)
   *        |
   *        +---> FD5 (Added after PSS1, PS1 and PS2 are added to PSS0)
   *        |
   *        |
   *        |           +---> FD1 (Added before PSS1 is added to PSS0)
   *        |           |
   *        |           +---> FD6 (Added after PSS1 is added to PSS0)
   *        |           |
   *        +---> PSS1--+           +--> FD2 (Added before PS0 is added to PSS1)
   *        |           |           |
   *        |           +---> PS0---+
   *        |                       |
   * PSS0---+                       +--> FD7 (Added after PS0 is added to PSS1)
   *        |
   *        |
   *        |           +---> FD3 (Added before PS1 is added to PSS0)
   *        |           |
   *        +---> PS1---+
   *        |           |
   *        |           +---> FD8 (Added after PS1 added to PSS0)
   *        |
   *        |
   *        |           +---> FD4 (Added before PS2 is added to PSS0)
   *        |           |
   *        +---> PS2---+
   *                    |
   *                    +---> FD9 (Added after PS2 is added to PSS0)
   */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_pollset_worker *worker;
  gpr_timespec deadline;

  test_fd tfds[10];
  test_pollset pollsets[3];
  test_pollset_set pollset_sets[2];
  const int num_fds = GPR_ARRAY_SIZE(tfds);
  const int num_ps = GPR_ARRAY_SIZE(pollsets);
  const int num_pss = GPR_ARRAY_SIZE(pollset_sets);

  init_test_fds(&exec_ctx, tfds, num_fds);
  init_test_pollsets(pollsets, num_ps);
  init_test_pollset_sets(pollset_sets, num_pss);

  /* Construct the pollset_set/pollset/fd tree (see diagram above) */

  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);

  grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[2].fd);
  grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[3].fd);
  grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[4].fd);

  grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss,
                                   pollset_sets[1].pss);

  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps);
  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps);
  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps);

  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd);
  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd);

  grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[7].fd);
  grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[8].fd);
  grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[9].fd);

  grpc_exec_ctx_flush(&exec_ctx);

  /* Test that if any FD in the above structure is readable, it is observable by
   * doing grpc_pollset_work on any pollset
   *
   *   For every pollset, do the following:
   *     - (Ensure that all FDs are in reset state)
   *     - Make all FDs readable
   *     - Call grpc_pollset_work() on the pollset
   *     - Flush the exec_ctx
   *     - Verify that on_readable call back was called for all FDs (and
   *       reset the FDs)
   * */
  for (int i = 0; i < num_ps; i++) {
    make_test_fds_readable(tfds, num_fds);

    gpr_mu_lock(pollsets[i].mu);
    deadline = grpc_timeout_milliseconds_to_deadline(2);
    GPR_ASSERT(GRPC_ERROR_NONE ==
               grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker,
                                 gpr_now(GPR_CLOCK_MONOTONIC), deadline));
    gpr_mu_unlock(pollsets[i].mu);

    grpc_exec_ctx_flush(&exec_ctx);

    verify_readable_and_reset(&exec_ctx, tfds, num_fds);
    grpc_exec_ctx_flush(&exec_ctx);
  }

  /* Test tear down */
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd);
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd);
  grpc_exec_ctx_flush(&exec_ctx);

  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps);
  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps);
  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps);

  grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss,
                                   pollset_sets[1].pss);
  grpc_exec_ctx_flush(&exec_ctx);

  cleanup_test_fds(&exec_ctx, tfds, num_fds);
  cleanup_test_pollsets(&exec_ctx, pollsets, num_ps);
  cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 4
0
static void finish_connection() {
  gpr_mu_lock(g_mu);
  g_connections_complete++;
  grpc_pollset_kick(g_pollset, NULL);
  gpr_mu_unlock(g_mu);
}
Esempio n. 5
0
static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
                              grpc_metadata_batch *initial_metadata,
                              uint32_t initial_metadata_flags,
                              grpc_connected_subchannel **connected_subchannel,
                              grpc_closure *on_ready) {
  grpc_call_element *elem = elemp;
  channel_data *chand = elem->channel_data;
  call_data *calld = elem->call_data;
  continue_picking_args *cpa;
  grpc_closure *closure;

  GPR_ASSERT(connected_subchannel);

  gpr_mu_lock(&chand->mu_config);
  if (initial_metadata == NULL) {
    if (chand->lb_policy != NULL) {
      grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
                                 connected_subchannel);
    }
    for (closure = chand->waiting_for_config_closures.head; closure != NULL;
         closure = grpc_closure_next(closure)) {
      cpa = closure->cb_arg;
      if (cpa->connected_subchannel == connected_subchannel) {
        cpa->connected_subchannel = NULL;
        grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
      }
    }
    gpr_mu_unlock(&chand->mu_config);
    return 1;
  }
  if (chand->lb_policy != NULL) {
    grpc_lb_policy *lb_policy = chand->lb_policy;
    int r;
    GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
    gpr_mu_unlock(&chand->mu_config);
    r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollent,
                            initial_metadata, initial_metadata_flags,
                            connected_subchannel, on_ready);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
    return r;
  }
  if (chand->resolver != NULL && !chand->started_resolving) {
    chand->started_resolving = 1;
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver,
                       &chand->incoming_configuration,
                       &chand->on_config_changed);
  }
  if (chand->resolver != NULL) {
    cpa = gpr_malloc(sizeof(*cpa));
    cpa->initial_metadata = initial_metadata;
    cpa->initial_metadata_flags = initial_metadata_flags;
    cpa->connected_subchannel = connected_subchannel;
    cpa->on_ready = on_ready;
    cpa->elem = elem;
    grpc_closure_init(&cpa->closure, continue_picking, cpa);
    grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure,
                          1);
  } else {
    grpc_exec_ctx_enqueue(exec_ctx, on_ready, false, NULL);
  }
  gpr_mu_unlock(&chand->mu_config);
  return 0;
}
Esempio n. 6
0
static void freelist_fd(grpc_fd *fd) {
  gpr_mu_lock(&fd_freelist_mu);
  fd->freelist_next = fd_freelist;
  fd_freelist = fd;
  gpr_mu_unlock(&fd_freelist_mu);
}
Esempio n. 7
0
File: iomgr.c Progetto: Saviio/grpc
void grpc_iomgr_shutdown(void) {
  gpr_timespec shutdown_deadline = gpr_time_add(
      gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
  gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  grpc_iomgr_platform_flush();

  gpr_mu_lock(&g_mu);
  g_shutdown = 1;
  while (g_root_object.next != &g_root_object) {
    if (gpr_time_cmp(
            gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
            gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
      if (g_root_object.next != &g_root_object) {
        gpr_log(GPR_DEBUG, "Waiting for %d iomgr objects to be destroyed",
                count_objects());
      }
      last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
    }
    if (grpc_timer_check(&exec_ctx, gpr_inf_future(GPR_CLOCK_MONOTONIC),
                         NULL)) {
      gpr_mu_unlock(&g_mu);
      grpc_exec_ctx_flush(&exec_ctx);
      gpr_mu_lock(&g_mu);
      continue;
    }
    if (g_root_object.next != &g_root_object) {
      gpr_timespec short_deadline = gpr_time_add(
          gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
      if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
        if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
          if (g_root_object.next != &g_root_object) {
            gpr_log(GPR_DEBUG,
                    "Failed to free %d iomgr objects before shutdown deadline: "
                    "memory leaks are likely",
                    count_objects());
            dump_objects("LEAKED");
            if (grpc_iomgr_abort_on_leaks()) {
              abort();
            }
          }
          break;
        }
      }
    }
  }
  gpr_mu_unlock(&g_mu);

  grpc_timer_list_shutdown(&exec_ctx);
  grpc_exec_ctx_finish(&exec_ctx);

  /* ensure all threads have left g_mu */
  gpr_mu_lock(&g_mu);
  gpr_mu_unlock(&g_mu);

  grpc_pollset_global_shutdown();
  grpc_iomgr_platform_shutdown();
  grpc_exec_ctx_global_shutdown();
  gpr_mu_destroy(&g_mu);
  gpr_cv_destroy(&g_rcv);
}
Esempio n. 8
0
/* tcp read callback */
static int recv_data_loop(grpc_chttp2_transport *t, int *success) {
  size_t i;
  int keep_reading = 0;

  lock(t);
  i = 0;
  GPR_ASSERT(!t->parsing_active);
  if (!t->closed) {
    t->parsing_active = 1;
    /* merge stream lists */
    grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                     &t->parsing_stream_map);
    grpc_chttp2_prepare_to_read(&t->global, &t->parsing);
    gpr_mu_unlock(&t->mu);
    for (; i < t->read_buffer.count &&
           grpc_chttp2_perform_read(&t->parsing, t->read_buffer.slices[i]);
         i++)
      ;
    gpr_mu_lock(&t->mu);
    if (i != t->read_buffer.count) {
      drop_connection(t);
    }
    /* merge stream lists */
    grpc_chttp2_stream_map_move_into(&t->new_stream_map,
                                     &t->parsing_stream_map);
    t->global.concurrent_stream_count =
        grpc_chttp2_stream_map_size(&t->parsing_stream_map);
    if (t->parsing.initial_window_update != 0) {
      grpc_chttp2_stream_map_for_each(&t->parsing_stream_map,
                                      update_global_window, t);
      t->parsing.initial_window_update = 0;
    }
    /* handle higher level things */
    grpc_chttp2_publish_reads(&t->global, &t->parsing);
    t->parsing_active = 0;
  }
  if (!*success || i != t->read_buffer.count) {
    drop_connection(t);
    read_error_locked(t);
  } else if (!t->closed) {
    keep_reading = 1;
    REF_TRANSPORT(t, "keep_reading");
    prevent_endpoint_shutdown(t);
  }
  gpr_slice_buffer_reset_and_unref(&t->read_buffer);
  unlock(t);

  if (keep_reading) {
    int ret = -1;
    switch (grpc_endpoint_read(t->ep, &t->read_buffer, &t->recv_data)) {
      case GRPC_ENDPOINT_DONE:
        *success = 1;
        ret = 1;
        break;
      case GRPC_ENDPOINT_ERROR:
        *success = 0;
        ret = 1;
        break;
      case GRPC_ENDPOINT_PENDING:
        ret = 0;
        break;
    }
    allow_endpoint_shutdown_unlocked(t);
    UNREF_TRANSPORT(t, "keep_reading");
    return ret;
  } else {
    UNREF_TRANSPORT(t, "recv_data");
    return 0;
  }

  gpr_log(GPR_ERROR, "should never reach here");
  abort();
}
Esempio n. 9
0
static void lock(grpc_chttp2_transport *t) { gpr_mu_lock(&t->mu); }
Esempio n. 10
0
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *err) {
  grpc_tcp_listener *sp = arg;
  grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
                                       sp->fd_index};
  grpc_pollset *read_notifier_pollset = NULL;
  grpc_fd *fdobj;

  if (err != GRPC_ERROR_NONE) {
    goto error;
  }

  read_notifier_pollset =
      sp->server->pollsets[(size_t)gpr_atm_no_barrier_fetch_add(
                               &sp->server->next_pollset_to_assign, 1) %
                           sp->server->pollset_count];

  /* loop until accept4 returns EAGAIN, and then re-arm notification */
  for (;;) {
    struct sockaddr_storage addr;
    socklen_t addrlen = sizeof(addr);
    char *addr_str;
    char *name;
    /* Note: If we ever decide to return this address to the user, remember to
       strip off the ::ffff:0.0.0.0/96 prefix first. */
    int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
    if (fd < 0) {
      switch (errno) {
        case EINTR:
          continue;
        case EAGAIN:
          grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
          return;
        default:
          gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
          goto error;
      }
    }

    grpc_set_socket_no_sigpipe_if_possible(fd);

    addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
    gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

    if (grpc_tcp_trace) {
      gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
    }

    fdobj = grpc_fd_create(fd, name);

    if (read_notifier_pollset == NULL) {
      gpr_log(GPR_ERROR, "Read notifier pollset is not set on the fd");
      goto error;
    }

    grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);

    sp->server->on_accept_cb(
        exec_ctx, sp->server->on_accept_cb_arg,
        grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
        read_notifier_pollset, &acceptor);

    gpr_free(name);
    gpr_free(addr_str);
  }

  GPR_UNREACHABLE_CODE(return );

error:
  gpr_mu_lock(&sp->server->mu);
  if (0 == --sp->server->active_ports) {
    gpr_mu_unlock(&sp->server->mu);
    deactivated_all_ports(exec_ctx, sp->server);
  } else {
    gpr_mu_unlock(&sp->server->mu);
  }
}
Esempio n. 11
0
void grpc_tcp_server_shutdown_starting_add(grpc_tcp_server *s,
                                           grpc_closure *shutdown_starting) {
  gpr_mu_lock(&s->mu);
  grpc_closure_list_add(&s->shutdown_starting, shutdown_starting, 1);
  gpr_mu_unlock(&s->mu);
}
Esempio n. 12
0
/* event manager callback when reads are ready */
static void on_read(grpc_exec_ctx *exec_ctx, void *arg, int success) {
  grpc_tcp_listener *sp = arg;
  grpc_tcp_server_acceptor acceptor = {sp->server, sp->port_index,
                                       sp->fd_index};
  grpc_fd *fdobj;
  size_t i;

  if (!success) {
    goto error;
  }

  /* loop until accept4 returns EAGAIN, and then re-arm notification */
  for (;;) {
    struct sockaddr_storage addr;
    socklen_t addrlen = sizeof(addr);
    char *addr_str;
    char *name;
    /* Note: If we ever decide to return this address to the user, remember to
       strip off the ::ffff:0.0.0.0/96 prefix first. */
    int fd = grpc_accept4(sp->fd, (struct sockaddr *)&addr, &addrlen, 1, 1);
    if (fd < 0) {
      switch (errno) {
        case EINTR:
          continue;
        case EAGAIN:
          grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
          return;
        default:
          gpr_log(GPR_ERROR, "Failed accept4: %s", strerror(errno));
          goto error;
      }
    }

    grpc_set_socket_no_sigpipe_if_possible(fd);

    addr_str = grpc_sockaddr_to_uri((struct sockaddr *)&addr);
    gpr_asprintf(&name, "tcp-server-connection:%s", addr_str);

    if (grpc_tcp_trace) {
      gpr_log(GPR_DEBUG, "SERVER_CONNECT: incoming connection: %s", addr_str);
    }

    fdobj = grpc_fd_create(fd, name);
    /* TODO(ctiller): revise this when we have server-side sharding
       of channels -- we certainly should not be automatically adding every
       incoming channel to every pollset owned by the server */
    for (i = 0; i < sp->server->pollset_count; i++) {
      grpc_pollset_add_fd(exec_ctx, sp->server->pollsets[i], fdobj);
    }
    sp->server->on_accept_cb(
        exec_ctx, sp->server->on_accept_cb_arg,
        grpc_tcp_create(fdobj, GRPC_TCP_DEFAULT_READ_SLICE_SIZE, addr_str),
        &acceptor);

    gpr_free(name);
    gpr_free(addr_str);
  }

  GPR_UNREACHABLE_CODE(return );

error:
  gpr_mu_lock(&sp->server->mu);
  if (0 == --sp->server->active_ports) {
    gpr_mu_unlock(&sp->server->mu);
    deactivated_all_ports(exec_ctx, sp->server);
  } else {
    gpr_mu_unlock(&sp->server->mu);
  }
}
Esempio n. 13
0
static void multiple_writers_single_reader(int circular_log) {
    // Sleep interval between read iterations.
    static const int READ_ITERATION_INTERVAL_IN_MSEC = 10;
    // Maximum record size.
    static const size_t MAX_RECORD_SIZE = 20;
    // Number of records written by each writer. This is sized such that we
    // will write through the entire log ~10 times.
    const int NUM_RECORDS_PER_WRITER =
        (int)((10 * census_log_remaining_space()) / (MAX_RECORD_SIZE / 2)) /
        NUM_WRITERS;
    size_t record_size = ((size_t)rand() % MAX_RECORD_SIZE) + 1;
    // Create and start writers.
    writer_thread_args writers[NUM_WRITERS];
    int writers_count = NUM_WRITERS;
    gpr_cv writers_done;
    gpr_mu writers_mu;  // protects writers_done and writers_count
    gpr_cv_init(&writers_done);
    gpr_mu_init(&writers_mu);
    gpr_thd_id id;
    for (int i = 0; i < NUM_WRITERS; ++i) {
        writers[i].index = i;
        writers[i].record_size = record_size;
        writers[i].num_records = NUM_RECORDS_PER_WRITER;
        writers[i].done = &writers_done;
        writers[i].count = &writers_count;
        writers[i].mu = &writers_mu;
        gpr_thd_new(&id, &writer_thread, &writers[i], NULL);
    }
    // Start reader.
    gpr_cv reader_done;
    gpr_mu reader_mu;  // protects reader_done and reader.running
    reader_thread_args reader;
    reader.record_size = record_size;
    reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC;
    reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER;
    reader.stop_flag = 0;
    gpr_cv_init(&reader.stop);
    gpr_cv_init(&reader_done);
    reader.done = &reader_done;
    gpr_mu_init(&reader_mu);
    reader.mu = &reader_mu;
    reader.running = 1;
    gpr_thd_new(&id, &reader_thread, &reader, NULL);
    // Wait for writers to finish.
    gpr_mu_lock(&writers_mu);
    while (writers_count != 0) {
        gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
    }
    gpr_mu_unlock(&writers_mu);
    gpr_mu_destroy(&writers_mu);
    gpr_cv_destroy(&writers_done);
    gpr_mu_lock(&reader_mu);
    if (circular_log) {
        // Stop reader.
        reader.stop_flag = 1;
        gpr_cv_signal(&reader.stop);
    }
    // wait for reader to finish
    while (reader.running) {
        gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
    }
    if (circular_log) {
        // Assert that there were no out-of-space errors.
        GPR_ASSERT(0 == census_log_out_of_space_count());
    }
    gpr_mu_unlock(&reader_mu);
    gpr_mu_destroy(&reader_mu);
    gpr_cv_destroy(&reader_done);
    if (VERBOSE) {
        printf("   Reader: finished\n");
    }
}
Esempio n. 14
0
void grpc_cq_end_server_shutdown(grpc_completion_queue *cc, void *tag) {
  gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
  add_locked(cc, GRPC_SERVER_SHUTDOWN, tag, NULL, NULL, NULL);
  end_op_locked(cc, GRPC_SERVER_SHUTDOWN);
  gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
}
Esempio n. 15
0
static void multipoll_with_epoll_pollset_maybe_work(
    grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
    gpr_timespec now, int allow_synchronous_callback) {
  struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
  int ep_rv;
  int poll_rv;
  pollset_hdr *h = pollset->data.ptr;
  int timeout_ms;
  struct pollfd pfds[2];

  /* If you want to ignore epoll's ability to sanely handle parallel pollers,
   * for a more apples-to-apples performance comparison with poll, add a
   * if (pollset->counter != 0) { return 0; }
   * here.
   */

  gpr_mu_unlock(&pollset->mu);

  timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);

  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = 0;
  pfds[1].fd = h->epoll_fd;
  pfds[1].events = POLLIN;
  pfds[1].revents = 0;

  poll_rv = grpc_poll_function(pfds, 2, timeout_ms);

  if (poll_rv < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (poll_rv == 0) {
    /* do nothing */
  } else {
    if (pfds[0].revents) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    if (pfds[1].revents) {
      do {
        ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
        if (ep_rv < 0) {
          if (errno != EINTR) {
            gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
          }
        } else {
          int i;
          for (i = 0; i < ep_rv; ++i) {
            grpc_fd *fd = ep_ev[i].data.ptr;
            /* TODO(klempner): We might want to consider making err and pri
             * separate events */
            int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
            int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
            int write = ep_ev[i].events & EPOLLOUT;
            if (read || cancel) {
              grpc_fd_become_readable(fd, allow_synchronous_callback);
            }
            if (write || cancel) {
              grpc_fd_become_writable(fd, allow_synchronous_callback);
            }
          }
        }
      } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
    }
  }

  gpr_mu_lock(&pollset->mu);
}
Esempio n. 16
0
grpc_transport_setup_result grpc_server_setup_transport(
    grpc_server *s, grpc_transport *transport,
    grpc_channel_filter const **extra_filters, size_t num_extra_filters,
    grpc_mdctx *mdctx) {
  size_t num_filters = s->channel_filter_count + num_extra_filters + 1;
  grpc_channel_filter const **filters =
      gpr_malloc(sizeof(grpc_channel_filter *) * num_filters);
  size_t i;
  size_t num_registered_methods;
  size_t alloc;
  registered_method *rm;
  channel_registered_method *crm;
  grpc_channel *channel;
  channel_data *chand;
  grpc_mdstr *host;
  grpc_mdstr *method;
  gpr_uint32 hash;
  gpr_uint32 slots;
  gpr_uint32 probes;
  gpr_uint32 max_probes = 0;
  grpc_transport_setup_result result;

  for (i = 0; i < s->channel_filter_count; i++) {
    filters[i] = s->channel_filters[i];
  }
  for (; i < s->channel_filter_count + num_extra_filters; i++) {
    filters[i] = extra_filters[i - s->channel_filter_count];
  }
  filters[i] = &grpc_connected_channel_filter;

  for (i = 0; i < s->cq_count; i++) {
    grpc_transport_add_to_pollset(transport, grpc_cq_pollset(s->cqs[i]));
  }

  channel = grpc_channel_create_from_filters(filters, num_filters,
                                             s->channel_args, mdctx, 0);
  chand = (channel_data *)grpc_channel_stack_element(
              grpc_channel_get_channel_stack(channel), 0)
              ->channel_data;
  chand->server = s;
  server_ref(s);
  chand->channel = channel;

  num_registered_methods = 0;
  for (rm = s->registered_methods; rm; rm = rm->next) {
    num_registered_methods++;
  }
  /* build a lookup table phrased in terms of mdstr's in this channels context
     to quickly find registered methods */
  if (num_registered_methods > 0) {
    slots = 2 * num_registered_methods;
    alloc = sizeof(channel_registered_method) * slots;
    chand->registered_methods = gpr_malloc(alloc);
    memset(chand->registered_methods, 0, alloc);
    for (rm = s->registered_methods; rm; rm = rm->next) {
      host = rm->host ? grpc_mdstr_from_string(mdctx, rm->host) : NULL;
      method = grpc_mdstr_from_string(mdctx, rm->method);
      hash = GRPC_MDSTR_KV_HASH(host ? host->hash : 0, method->hash);
      for (probes = 0; chand->registered_methods[(hash + probes) % slots]
                           .server_registered_method != NULL;
           probes++)
        ;
      if (probes > max_probes) max_probes = probes;
      crm = &chand->registered_methods[(hash + probes) % slots];
      crm->server_registered_method = rm;
      crm->host = host;
      crm->method = method;
    }
    chand->registered_method_slots = slots;
    chand->registered_method_max_probes = max_probes;
  }

  result = grpc_connected_channel_bind_transport(
      grpc_channel_get_channel_stack(channel), transport);

  gpr_mu_lock(&s->mu);
  chand->next = &s->root_channel_data;
  chand->prev = chand->next->prev;
  chand->next->prev = chand->prev->next = chand;
  gpr_mu_unlock(&s->mu);

  gpr_free(filters);

  return result;
}
Esempio n. 17
0
static int do_iocp_work() {
  BOOL success;
  DWORD bytes = 0;
  DWORD flags = 0;
  ULONG_PTR completion_key;
  LPOVERLAPPED overlapped;
  gpr_timespec wait_time = gpr_inf_future;
  grpc_winsocket *socket;
  grpc_winsocket_callback_info *info;
  void(*f)(void *, int) = NULL;
  void *opaque = NULL;
  success = GetQueuedCompletionStatus(g_iocp, &bytes,
                                      &completion_key, &overlapped,
                                      gpr_time_to_millis(wait_time));
  if (!success && !overlapped) {
    /* The deadline got attained. */
    return 0;
  }
  GPR_ASSERT(completion_key && overlapped);
  if (overlapped == &g_iocp_custom_overlap) {
    if (completion_key == (ULONG_PTR) &g_iocp_kick_token) {
      /* We were awoken from a kick. */
      gpr_log(GPR_DEBUG, "do_iocp_work - got a kick");
      return 1;
    }
    gpr_log(GPR_ERROR, "Unknown custom completion key.");
    abort();
  }

  socket = (grpc_winsocket*) completion_key;
  if (overlapped == &socket->write_info.overlapped) {
    gpr_log(GPR_DEBUG, "do_iocp_work - got write packet");
    info = &socket->write_info;
  } else if (overlapped == &socket->read_info.overlapped) {
    gpr_log(GPR_DEBUG, "do_iocp_work - got read packet");
    info = &socket->read_info;
  } else {
    gpr_log(GPR_ERROR, "Unknown IOCP operation");
    abort();
  }
  success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes,
                                   FALSE, &flags);
  gpr_log(GPR_DEBUG, "bytes: %u, flags: %u - op %s", bytes, flags,
          success ? "succeeded" : "failed");
  info->bytes_transfered = bytes;
  info->wsa_error = success ? 0 : WSAGetLastError();
  GPR_ASSERT(overlapped == &info->overlapped);
  gpr_mu_lock(&socket->state_mu);
  GPR_ASSERT(!info->has_pending_iocp);
  if (info->cb) {
    f = info->cb;
    opaque = info->opaque;
    info->cb = NULL;
  } else {
    info->has_pending_iocp = 1;
  }
  gpr_mu_unlock(&socket->state_mu);
  if (f) f(opaque, 1);

  return 1;
}
Esempio n. 18
0
static void shutdown_internal(grpc_server *server, gpr_uint8 have_shutdown_tag,
                              void *shutdown_tag) {
  listener *l;
  requested_call_array requested_calls;
  channel_data **channels;
  channel_data *c;
  size_t nchannels;
  size_t i, j;
  grpc_channel_op op;
  grpc_channel_element *elem;
  registered_method *rm;

  /* lock, and gather up some stuff to do */
  gpr_mu_lock(&server->mu);
  if (have_shutdown_tag) {
    for (i = 0; i < server->cq_count; i++) {
      grpc_cq_begin_op(server->cqs[i], NULL);
    }
    server->shutdown_tags =
        gpr_realloc(server->shutdown_tags,
                    sizeof(void *) * (server->num_shutdown_tags + 1));
    server->shutdown_tags[server->num_shutdown_tags++] = shutdown_tag;
  }
  if (server->shutdown) {
    gpr_mu_unlock(&server->mu);
    return;
  }

  nchannels = 0;
  for (c = server->root_channel_data.next; c != &server->root_channel_data;
       c = c->next) {
    nchannels++;
  }
  channels = gpr_malloc(sizeof(channel_data *) * nchannels);
  i = 0;
  for (c = server->root_channel_data.next; c != &server->root_channel_data;
       c = c->next) {
    grpc_channel_internal_ref(c->channel);
    channels[i] = c;
    i++;
  }

  /* collect all unregistered then registered calls */
  requested_calls = server->requested_calls;
  memset(&server->requested_calls, 0, sizeof(server->requested_calls));
  for (rm = server->registered_methods; rm; rm = rm->next) {
    if (requested_calls.count + rm->requested.count >
        requested_calls.capacity) {
      requested_calls.capacity =
          GPR_MAX(requested_calls.count + rm->requested.count,
                  2 * requested_calls.capacity);
      requested_calls.calls =
          gpr_realloc(requested_calls.calls, sizeof(*requested_calls.calls) *
                                                 requested_calls.capacity);
    }
    memcpy(requested_calls.calls + requested_calls.count, rm->requested.calls,
           sizeof(*requested_calls.calls) * rm->requested.count);
    requested_calls.count += rm->requested.count;
    gpr_free(rm->requested.calls);
    memset(&rm->requested, 0, sizeof(rm->requested));
  }

  server->shutdown = 1;
  if (server->lists[ALL_CALLS] == NULL) {
    for (i = 0; i < server->num_shutdown_tags; i++) {
      for (j = 0; j < server->cq_count; j++) {
        grpc_cq_end_op(server->cqs[j], server->shutdown_tags[i], NULL, 1);
      }
    }
  }
  gpr_mu_unlock(&server->mu);

  for (i = 0; i < nchannels; i++) {
    c = channels[i];
    elem = grpc_channel_stack_element(
        grpc_channel_get_channel_stack(c->channel), 0);

    op.type = GRPC_CHANNEL_GOAWAY;
    op.dir = GRPC_CALL_DOWN;
    op.data.goaway.status = GRPC_STATUS_OK;
    op.data.goaway.message = gpr_slice_from_copied_string("Server shutdown");
    elem->filter->channel_op(elem, NULL, &op);

    grpc_channel_internal_unref(c->channel);
  }
  gpr_free(channels);

  /* terminate all the requested calls */
  for (i = 0; i < requested_calls.count; i++) {
    fail_call(server, &requested_calls.calls[i]);
  }
  gpr_free(requested_calls.calls);

  /* Shutdown listeners */
  for (l = server->listeners; l; l = l->next) {
    l->destroy(server, l->arg);
  }
}
Esempio n. 19
0
static void multiple_writers_single_reader(int circular_log) {
  /* Sleep interval between read iterations. */
  static const gpr_int32 READ_ITERATION_INTERVAL_IN_MSEC = 10;
  /* Number of records written by each writer. */
  static const gpr_int32 NUM_RECORDS_PER_WRITER = 10 * 1024 * 1024;
  /* Maximum record size. */
  static const size_t MAX_RECORD_SIZE = 10;
  int ix;
  gpr_thd_id id;
  gpr_cv writers_done;
  int writers_count = NUM_WRITERS;
  gpr_mu writers_mu; /* protects writers_done and writers_count */
  writer_thread_args writers[NUM_WRITERS];
  gpr_cv reader_done;
  gpr_mu reader_mu; /* protects reader_done and reader.running */
  reader_thread_args reader;
  gpr_int32 record_size = 1 + rand() % MAX_RECORD_SIZE;
  printf("   Record size: %d\n", record_size);
  /* Create and start writers. */
  gpr_cv_init(&writers_done);
  gpr_mu_init(&writers_mu);
  for (ix = 0; ix < NUM_WRITERS; ++ix) {
    writers[ix].index = ix;
    writers[ix].record_size = record_size;
    writers[ix].num_records = NUM_RECORDS_PER_WRITER;
    writers[ix].done = &writers_done;
    writers[ix].count = &writers_count;
    writers[ix].mu = &writers_mu;
    gpr_thd_new(&id, &writer_thread, &writers[ix], NULL);
  }
  /* Start reader. */
  reader.record_size = record_size;
  reader.read_iteration_interval_in_msec = READ_ITERATION_INTERVAL_IN_MSEC;
  reader.total_records = NUM_WRITERS * NUM_RECORDS_PER_WRITER;
  reader.stop_flag = 0;
  gpr_cv_init(&reader.stop);
  gpr_cv_init(&reader_done);
  reader.done = &reader_done;
  gpr_mu_init(&reader_mu);
  reader.mu = &reader_mu;
  reader.running = 1;
  gpr_thd_new(&id, &reader_thread, &reader, NULL);
  /* Wait for writers to finish. */
  gpr_mu_lock(&writers_mu);
  while (writers_count != 0) {
    gpr_cv_wait(&writers_done, &writers_mu, gpr_inf_future);
  }
  gpr_mu_unlock(&writers_mu);
  gpr_mu_destroy(&writers_mu);
  gpr_cv_destroy(&writers_done);
  gpr_mu_lock(&reader_mu);
  if (circular_log) {
    /* Stop reader. */
    reader.stop_flag = 1;
    gpr_cv_signal(&reader.stop);
  }
  /* wait for reader to finish */
  while (reader.running) {
    gpr_cv_wait(&reader_done, &reader_mu, gpr_inf_future);
  }
  if (circular_log) {
    /* Assert that there were no out-of-space errors. */
    GPR_ASSERT(0 == census_log_out_of_space_count());
  }
  gpr_mu_unlock(&reader_mu);
  gpr_mu_destroy(&reader_mu);
  gpr_cv_destroy(&reader_done);
  printf("   Reader: finished\n");
}
Esempio n. 20
0
static void pf_connectivity_changed(grpc_exec_ctx *exec_ctx, void *arg,
                                    bool iomgr_success) {
  pick_first_lb_policy *p = arg;
  grpc_subchannel *selected_subchannel;
  pending_pick *pp;
  grpc_connected_subchannel *selected;

  gpr_mu_lock(&p->mu);

  selected = GET_SELECTED(p);

  if (p->shutdown) {
    gpr_mu_unlock(&p->mu);
    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
    return;
  } else if (selected != NULL) {
    if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
      /* if the selected channel goes bad, we're done */
      p->checking_connectivity = GRPC_CHANNEL_FATAL_FAILURE;
    }
    grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                p->checking_connectivity, "selected_changed");
    if (p->checking_connectivity != GRPC_CHANNEL_FATAL_FAILURE) {
      grpc_connected_subchannel_notify_on_state_change(
          exec_ctx, selected, p->base.interested_parties,
          &p->checking_connectivity, &p->connectivity_changed);
    } else {
      GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base, "pick_first_connectivity");
    }
  } else {
  loop:
    switch (p->checking_connectivity) {
      case GRPC_CHANNEL_READY:
        grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                    GRPC_CHANNEL_READY, "connecting_ready");
        selected_subchannel = p->subchannels[p->checking_subchannel];
        selected =
            grpc_subchannel_get_connected_subchannel(selected_subchannel);
        GPR_ASSERT(selected != NULL);
        GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked_first");
        /* drop the pick list: we are connected now */
        GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
        gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
        grpc_exec_ctx_enqueue(
            exec_ctx, grpc_closure_create(destroy_subchannels, p), true, NULL);
        /* update any calls that were waiting for a pick */
        while ((pp = p->pending_picks)) {
          p->pending_picks = pp->next;
          *pp->target = selected;
          grpc_pollset_set_del_pollset(exec_ctx, p->base.interested_parties,
                                       pp->pollset);
          grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
          gpr_free(pp);
        }
        grpc_connected_subchannel_notify_on_state_change(
            exec_ctx, selected, p->base.interested_parties,
            &p->checking_connectivity, &p->connectivity_changed);
        break;
      case GRPC_CHANNEL_TRANSIENT_FAILURE:
        p->checking_subchannel =
            (p->checking_subchannel + 1) % p->num_subchannels;
        if (p->checking_subchannel == 0) {
          /* only trigger transient failure when we've tried all alternatives */
          grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                      GRPC_CHANNEL_TRANSIENT_FAILURE,
                                      "connecting_transient_failure");
        }
        p->checking_connectivity = grpc_subchannel_check_connectivity(
            p->subchannels[p->checking_subchannel]);
        if (p->checking_connectivity == GRPC_CHANNEL_TRANSIENT_FAILURE) {
          grpc_subchannel_notify_on_state_change(
              exec_ctx, p->subchannels[p->checking_subchannel],
              p->base.interested_parties, &p->checking_connectivity,
              &p->connectivity_changed);
        } else {
          goto loop;
        }
        break;
      case GRPC_CHANNEL_CONNECTING:
      case GRPC_CHANNEL_IDLE:
        grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                    GRPC_CHANNEL_CONNECTING,
                                    "connecting_changed");
        grpc_subchannel_notify_on_state_change(
            exec_ctx, p->subchannels[p->checking_subchannel],
            p->base.interested_parties, &p->checking_connectivity,
            &p->connectivity_changed);
        break;
      case GRPC_CHANNEL_FATAL_FAILURE:
        p->num_subchannels--;
        GPR_SWAP(grpc_subchannel *, p->subchannels[p->checking_subchannel],
                 p->subchannels[p->num_subchannels]);
        GRPC_SUBCHANNEL_UNREF(exec_ctx, p->subchannels[p->num_subchannels],
                              "pick_first");
        if (p->num_subchannels == 0) {
          grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                      GRPC_CHANNEL_FATAL_FAILURE,
                                      "no_more_channels");
          while ((pp = p->pending_picks)) {
            p->pending_picks = pp->next;
            *pp->target = NULL;
            grpc_exec_ctx_enqueue(exec_ctx, pp->on_complete, true, NULL);
            gpr_free(pp);
          }
          GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
                                    "pick_first_connectivity");
        } else {
          grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
                                      GRPC_CHANNEL_TRANSIENT_FAILURE,
                                      "subchannel_failed");
          p->checking_subchannel %= p->num_subchannels;
          p->checking_connectivity = grpc_subchannel_check_connectivity(
              p->subchannels[p->checking_subchannel]);
          goto loop;
        }
    }
  }

  gpr_mu_unlock(&p->mu);
}
Esempio n. 21
0
void test_times_out(void) {
  struct sockaddr_in addr;
  socklen_t addr_len = sizeof(addr);
  int svr_fd;
#define NUM_CLIENT_CONNECTS 100
  int client_fd[NUM_CLIENT_CONNECTS];
  int i;
  int r;
  int connections_complete_before;
  gpr_timespec connect_deadline;
  grpc_closure done;
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;

  gpr_log(GPR_DEBUG, "test_times_out");

  memset(&addr, 0, sizeof(addr));
  addr.sin_family = AF_INET;

  /* create a dummy server */
  svr_fd = socket(AF_INET, SOCK_STREAM, 0);
  GPR_ASSERT(svr_fd >= 0);
  GPR_ASSERT(0 == bind(svr_fd, (struct sockaddr *)&addr, addr_len));
  GPR_ASSERT(0 == listen(svr_fd, 1));
  /* Get its address */
  GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)&addr, &addr_len) == 0);

  /* tie up the listen buffer, which is somewhat arbitrarily sized. */
  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
    client_fd[i] = socket(AF_INET, SOCK_STREAM, 0);
    grpc_set_socket_nonblocking(client_fd[i], 1);
    do {
      r = connect(client_fd[i], (struct sockaddr *)&addr, addr_len);
    } while (r == -1 && errno == EINTR);
    GPR_ASSERT(r < 0);
    GPR_ASSERT(errno == EWOULDBLOCK || errno == EINPROGRESS);
  }

  /* connect to dummy server address */

  connect_deadline = GRPC_TIMEOUT_SECONDS_TO_DEADLINE(1);

  gpr_mu_lock(g_mu);
  connections_complete_before = g_connections_complete;
  gpr_mu_unlock(g_mu);

  grpc_closure_init(&done, must_fail, NULL);
  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
                          (struct sockaddr *)&addr, addr_len, connect_deadline);

  /* Make sure the event doesn't trigger early */
  gpr_mu_lock(g_mu);
  for (;;) {
    grpc_pollset_worker *worker = NULL;
    gpr_timespec now = gpr_now(connect_deadline.clock_type);
    gpr_timespec continue_verifying_time =
        gpr_time_from_seconds(5, GPR_TIMESPAN);
    gpr_timespec grace_time = gpr_time_from_seconds(3, GPR_TIMESPAN);
    gpr_timespec finish_time =
        gpr_time_add(connect_deadline, continue_verifying_time);
    gpr_timespec restart_verifying_time =
        gpr_time_add(connect_deadline, grace_time);
    int is_after_deadline = gpr_time_cmp(now, connect_deadline) > 0;
    if (gpr_time_cmp(now, finish_time) > 0) {
      break;
    }
    gpr_log(GPR_DEBUG, "now=%lld.%09d connect_deadline=%lld.%09d",
            (long long)now.tv_sec, (int)now.tv_nsec,
            (long long)connect_deadline.tv_sec, (int)connect_deadline.tv_nsec);
    if (is_after_deadline && gpr_time_cmp(now, restart_verifying_time) <= 0) {
      /* allow some slack before insisting that things be done */
    } else {
      GPR_ASSERT(g_connections_complete ==
                 connections_complete_before + is_after_deadline);
    }
    gpr_timespec polling_deadline = GRPC_TIMEOUT_MILLIS_TO_DEADLINE(10);
    if (!grpc_timer_check(&exec_ctx, now, &polling_deadline)) {
      grpc_pollset_work(&exec_ctx, g_pollset, &worker, now, polling_deadline);
    }
    gpr_mu_unlock(g_mu);
    grpc_exec_ctx_flush(&exec_ctx);
    gpr_mu_lock(g_mu);
  }
  gpr_mu_unlock(g_mu);

  grpc_exec_ctx_finish(&exec_ctx);

  close(svr_fd);
  for (i = 0; i < NUM_CLIENT_CONNECTS; ++i) {
    close(client_fd[i]);
  }
}
Esempio n. 22
0
void grpc_subchannel_call_holder_perform_op(grpc_exec_ctx *exec_ctx,
                                            grpc_subchannel_call_holder *holder,
                                            grpc_transport_stream_op *op) {
  /* try to (atomically) get the call */
  grpc_subchannel_call *call = GET_CALL(holder);
  GPR_TIMER_BEGIN("grpc_subchannel_call_holder_perform_op", 0);
  if (call == CANCELLED_CALL) {
    grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  if (call != NULL) {
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  /* we failed; lock and figure out what to do */
  gpr_mu_lock(&holder->mu);
retry:
  /* need to recheck that another thread hasn't set the call */
  call = GET_CALL(holder);
  if (call == CANCELLED_CALL) {
    gpr_mu_unlock(&holder->mu);
    grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  if (call != NULL) {
    gpr_mu_unlock(&holder->mu);
    grpc_subchannel_call_process_op(exec_ctx, call, op);
    GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
    return;
  }
  /* if this is a cancellation, then we can raise our cancelled flag */
  if (op->cancel_with_status != GRPC_STATUS_OK) {
    if (!gpr_atm_rel_cas(&holder->subchannel_call, 0, 1)) {
      goto retry;
    } else {
      switch (holder->creation_phase) {
        case GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING:
          fail_locked(exec_ctx, holder);
          break;
        case GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL:
          holder->pick_subchannel(exec_ctx, holder->pick_subchannel_arg, NULL,
                                  0, &holder->connected_subchannel, NULL);
          break;
      }
      gpr_mu_unlock(&holder->mu);
      grpc_transport_stream_op_finish_with_failure(exec_ctx, op);
      GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
      return;
    }
  }
  /* if we don't have a subchannel, try to get one */
  if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      holder->connected_subchannel == NULL &&
      op->send_initial_metadata != NULL) {
    holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
    grpc_closure_init(&holder->next_step, subchannel_ready, holder);
    GRPC_CALL_STACK_REF(holder->owning_call, "pick_subchannel");
    if (holder->pick_subchannel(
            exec_ctx, holder->pick_subchannel_arg, op->send_initial_metadata,
            op->send_initial_metadata_flags, &holder->connected_subchannel,
            &holder->next_step)) {
      holder->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING;
      GRPC_CALL_STACK_UNREF(exec_ctx, holder->owning_call, "pick_subchannel");
    }
  }
  /* if we've got a subchannel, then let's ask it to create a call */
  if (holder->creation_phase == GRPC_SUBCHANNEL_CALL_HOLDER_NOT_CREATING &&
      holder->connected_subchannel != NULL) {
    gpr_atm_rel_store(
        &holder->subchannel_call,
        (gpr_atm)(uintptr_t)grpc_connected_subchannel_create_call(
            exec_ctx, holder->connected_subchannel, holder->pollent));
    retry_waiting_locked(exec_ctx, holder);
    goto retry;
  }
  /* nothing to be done but wait */
  add_waiting_locked(holder, op);
  gpr_mu_unlock(&holder->mu);
  GPR_TIMER_END("grpc_subchannel_call_holder_perform_op", 0);
}
Esempio n. 23
0
static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
                                 bool iomgr_success) {
  channel_data *chand = arg;
  grpc_lb_policy *lb_policy = NULL;
  grpc_lb_policy *old_lb_policy;
  grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
  int exit_idle = 0;

  if (chand->incoming_configuration != NULL) {
    lb_policy = grpc_client_config_get_lb_policy(chand->incoming_configuration);
    if (lb_policy != NULL) {
      GRPC_LB_POLICY_REF(lb_policy, "channel");
      GRPC_LB_POLICY_REF(lb_policy, "config_change");
      state = grpc_lb_policy_check_connectivity(exec_ctx, lb_policy);
    }

    grpc_client_config_unref(exec_ctx, chand->incoming_configuration);
  }

  chand->incoming_configuration = NULL;

  if (lb_policy != NULL) {
    grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
                                     chand->interested_parties);
  }

  gpr_mu_lock(&chand->mu_config);
  old_lb_policy = chand->lb_policy;
  chand->lb_policy = lb_policy;
  if (lb_policy != NULL) {
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  } else if (chand->resolver == NULL /* disconnected */) {
    grpc_closure_list_fail_all(&chand->waiting_for_config_closures);
    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
                               NULL);
  }
  if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
    GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
    exit_idle = 1;
    chand->exit_idle_when_lb_policy_arrives = 0;
  }

  if (iomgr_success && chand->resolver) {
    set_channel_connectivity_state_locked(exec_ctx, chand, state,
                                          "new_lb+resolver");
    if (lb_policy != NULL) {
      watch_lb_policy(exec_ctx, chand, lb_policy, state);
    }
    GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
    grpc_resolver_next(exec_ctx, chand->resolver,
                       &chand->incoming_configuration,
                       &chand->on_config_changed);
    gpr_mu_unlock(&chand->mu_config);
  } else {
    if (chand->resolver != NULL) {
      grpc_resolver_shutdown(exec_ctx, chand->resolver);
      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
      chand->resolver = NULL;
    }
    set_channel_connectivity_state_locked(
        exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN, "resolver_gone");
    gpr_mu_unlock(&chand->mu_config);
  }

  if (exit_idle) {
    grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
  }

  if (old_lb_policy != NULL) {
    grpc_pollset_set_del_pollset_set(
        exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
    GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
  }

  if (lb_policy != NULL) {
    GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
  }

  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
}
Esempio n. 24
0
static void on_read(void *user_data, gpr_slice *slices, size_t nslices,
                    grpc_endpoint_cb_status error) {
  unsigned i;
  gpr_uint8 keep_looping = 0;
  size_t input_buffer_count = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)user_data;
  gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->read_staging_buffer);
  gpr_uint8 *end = GPR_SLICE_END_PTR(ep->read_staging_buffer);

  /* TODO(yangg) check error, maybe bail out early */
  for (i = 0; i < nslices; i++) {
    gpr_slice encrypted = slices[i];
    gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(encrypted);
    size_t message_size = GPR_SLICE_LENGTH(encrypted);

    while (message_size > 0 || keep_looping) {
      size_t unprotected_buffer_size_written = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_unprotect(ep->protector, message_bytes,
                                             &processed_message_size, cur,
                                             &unprotected_buffer_size_written);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Decryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += unprotected_buffer_size_written;

      if (cur == end) {
        flush_read_staging_buffer(ep, &cur, &end);
        /* Force to enter the loop again to extract buffered bytes in protector.
           The bytes could be buffered because of running out of staging_buffer.
           If this happens at the end of all slices, doing another unprotect
           avoids leaving data in the protector. */
        keep_looping = 1;
      } else if (unprotected_buffer_size_written > 0) {
        keep_looping = 1;
      } else {
        keep_looping = 0;
      }
    }
    if (result != TSI_OK) break;
  }

  if (cur != GPR_SLICE_START_PTR(ep->read_staging_buffer)) {
    gpr_slice_buffer_add(
        &ep->input_buffer,
        gpr_slice_split_head(
            &ep->read_staging_buffer,
            (size_t)(cur - GPR_SLICE_START_PTR(ep->read_staging_buffer))));
  }

  /* TODO(yangg) experiment with moving this block after read_cb to see if it
     helps latency */
  for (i = 0; i < nslices; i++) {
    gpr_slice_unref(slices[i]);
  }

  if (result != TSI_OK) {
    gpr_slice_buffer_reset_and_unref(&ep->input_buffer);
    call_read_cb(ep, NULL, 0, GRPC_ENDPOINT_CB_ERROR);
    return;
  }
  /* The upper level will unref the slices. */
  input_buffer_count = ep->input_buffer.count;
  ep->input_buffer.count = 0;
  call_read_cb(ep, ep->input_buffer.slices, input_buffer_count, error);
}
static int multipoll_with_poll_pollset_maybe_work(
    grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now,
    int allow_synchronous_callback) {
  int timeout;
  int r;
  size_t i, np, nf, nd;
  pollset_hdr *h;
  grpc_kick_fd_info *kfd;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  if (h->pfd_capacity < h->fd_count + 1) {
    h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1);
    gpr_free(h->pfds);
    gpr_free(h->watchers);
    h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity);
    h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity);
  }
  nf = 0;
  np = 1;
  kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
  if (kfd == NULL) {
    /* Already kicked */
    return 1;
  }
  h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
  h->pfds[0].events = POLLIN;
  h->pfds[0].revents = POLLOUT;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (nd = 0; nd < h->del_count; nd++) {
      if (h->fds[i] == h->dels[nd]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[nf++] = h->fds[i];
      h->watchers[np].fd = h->fds[i];
      h->pfds[np].fd = h->fds[i]->fd;
      h->pfds[np].revents = 0;
      np++;
    }
  }
  h->pfd_count = np;
  h->fd_count = nf;
  for (nd = 0; nd < h->del_count; nd++) {
    GRPC_FD_UNREF(h->dels[nd], "multipoller_del");
  }
  h->del_count = 0;
  if (h->pfd_count == 0) {
    end_polling(pollset);
    return 0;
  }
  pollset->counter++;
  gpr_mu_unlock(&pollset->mu);

  for (i = 1; i < np; i++) {
    h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN,
                                           POLLOUT, &h->watchers[i]);
  }

  r = poll(h->pfds, h->pfd_count, timeout);

  end_polling(pollset);

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (h->pfds[0].revents & POLLIN) {
      grpc_pollset_kick_consume(&pollset->kick_state, kfd);
    }
    for (i = 1; i < np; i++) {
      if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback);
      }
      if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback);
      }
    }
  }
  grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);

  gpr_mu_lock(&pollset->mu);
  pollset->counter--;

  return 1;
}
Esempio n. 26
0
static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
                                                 gpr_slice *slices,
                                                 size_t nslices,
                                                 grpc_endpoint_write_cb cb,
                                                 void *user_data) {
  unsigned i;
  size_t output_buffer_count = 0;
  tsi_result result = TSI_OK;
  secure_endpoint *ep = (secure_endpoint *)secure_ep;
  gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
  gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
  grpc_endpoint_write_status status;
  GPR_ASSERT(ep->output_buffer.count == 0);

  if (grpc_trace_secure_endpoint) {
    for (i = 0; i < nslices; i++) {
      char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
      gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
      gpr_free(data);
    }
  }

  for (i = 0; i < nslices; i++) {
    gpr_slice plain = slices[i];
    gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
    size_t message_size = GPR_SLICE_LENGTH(plain);
    while (message_size > 0) {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      size_t processed_message_size = message_size;
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect(ep->protector, message_bytes,
                                           &processed_message_size, cur,
                                           &protected_buffer_size_to_send);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) {
        gpr_log(GPR_ERROR, "Encryption error: %s",
                tsi_result_to_string(result));
        break;
      }
      message_bytes += processed_message_size;
      message_size -= processed_message_size;
      cur += protected_buffer_size_to_send;

      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    }
    if (result != TSI_OK) break;
  }
  if (result == TSI_OK) {
    size_t still_pending_size;
    do {
      size_t protected_buffer_size_to_send = (size_t)(end - cur);
      gpr_mu_lock(&ep->protector_mu);
      result = tsi_frame_protector_protect_flush(ep->protector, cur,
                                                 &protected_buffer_size_to_send,
                                                 &still_pending_size);
      gpr_mu_unlock(&ep->protector_mu);
      if (result != TSI_OK) break;
      cur += protected_buffer_size_to_send;
      if (cur == end) {
        flush_write_staging_buffer(ep, &cur, &end);
      }
    } while (still_pending_size > 0);
    if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
      gpr_slice_buffer_add(
          &ep->output_buffer,
          gpr_slice_split_head(
              &ep->write_staging_buffer,
              (size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
    }
  }

  for (i = 0; i < nslices; i++) {
    gpr_slice_unref(slices[i]);
  }

  if (result != TSI_OK) {
    /* TODO(yangg) do different things according to the error type? */
    gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
    return GRPC_ENDPOINT_WRITE_ERROR;
  }

  /* clear output_buffer and let the lower level handle its slices. */
  output_buffer_count = ep->output_buffer.count;
  ep->output_buffer.count = 0;
  ep->write_cb = cb;
  ep->write_user_data = user_data;
  /* Need to keep the endpoint alive across a transport */
  secure_endpoint_ref(ep);
  status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices,
                               output_buffer_count, on_write, ep);
  if (status != GRPC_ENDPOINT_WRITE_PENDING) {
    secure_endpoint_unref(ep);
  }
  return status;
}
Esempio n. 27
0
/*
  call-seq:
    insecure_channel = Channel:new("myhost:8080", {'arg1': 'value1'},
                                   :this_channel_is_insecure)
    creds = ...
    secure_channel = Channel:new("myhost:443", {'arg1': 'value1'}, creds)

  Creates channel instances. */
static VALUE grpc_rb_channel_init(int argc, VALUE *argv, VALUE self) {
  VALUE channel_args = Qnil;
  VALUE credentials = Qnil;
  VALUE target = Qnil;
  grpc_rb_channel *wrapper = NULL;
  grpc_channel *ch = NULL;
  grpc_channel_credentials *creds = NULL;
  char *target_chars = NULL;
  grpc_channel_args args;
  MEMZERO(&args, grpc_channel_args, 1);

  /* "3" == 3 mandatory args */
  rb_scan_args(argc, argv, "3", &target, &channel_args, &credentials);

  TypedData_Get_Struct(self, grpc_rb_channel, &grpc_channel_data_type, wrapper);
  wrapper->mu_init_done = 0;
  target_chars = StringValueCStr(target);
  grpc_rb_hash_convert_to_channel_args(channel_args, &args);
  if (TYPE(credentials) == T_SYMBOL) {
    if (id_insecure_channel != SYM2ID(credentials)) {
      rb_raise(rb_eTypeError,
               "bad creds symbol, want :this_channel_is_insecure");
      return Qnil;
    }
    ch = grpc_insecure_channel_create(target_chars, &args, NULL);
  } else {
    wrapper->credentials = credentials;
    creds = grpc_rb_get_wrapped_channel_credentials(credentials);
    ch = grpc_secure_channel_create(creds, target_chars, &args, NULL);
  }

  GPR_ASSERT(ch);

  wrapper->wrapped = ch;

  gpr_mu_init(&wrapper->channel_mu);
  gpr_cv_init(&wrapper->channel_cv);
  wrapper->mu_init_done = 1;

  gpr_mu_lock(&wrapper->channel_mu);
  wrapper->abort_watch_connectivity_state = 0;
  wrapper->current_connectivity_state = grpc_channel_check_connectivity_state(wrapper->wrapped, 0);
  wrapper->safe_to_destroy = 0;
  wrapper->request_safe_destroy = 0;

  gpr_cv_broadcast(&wrapper->channel_cv);
  gpr_mu_unlock(&wrapper->channel_mu);


  grpc_rb_channel_try_register_connection_polling(wrapper);

  if (args.args != NULL) {
    xfree(args.args); /* Allocated by grpc_rb_hash_convert_to_channel_args */
  }
  if (ch == NULL) {
    rb_raise(rb_eRuntimeError, "could not create an rpc channel to target:%s",
             target_chars);
    return Qnil;
  }
  rb_ivar_set(self, id_target, target);
  wrapper->wrapped = ch;
  return self;
}
Esempio n. 28
0
void grpc_fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
                             grpc_closure *closure) {
  gpr_mu_lock(&fd->mu);
  notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
  gpr_mu_unlock(&fd->mu);
}
Esempio n. 29
0
/* Same FD added multiple times to the pollset_set tree */
void pollset_set_test_dup_fds() {
  /* We construct the following structure for this test:
   *
   *        +---> FD0
   *        |
   *        |
   * PSS0---+
   *        |           +---> FD0 (also under PSS0)
   *        |           |
   *        +---> PSS1--+           +--> FD1 (also under PSS1)
   *                    |           |
   *                    +---> PS ---+
   *                    |           |
   *                    |           +--> FD2
   *                    +---> FD1
   */
  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
  grpc_pollset_worker *worker;
  gpr_timespec deadline;

  test_fd tfds[3];
  test_pollset pollset;
  test_pollset_set pollset_sets[2];
  const int num_fds = GPR_ARRAY_SIZE(tfds);
  const int num_ps = 1;
  const int num_pss = GPR_ARRAY_SIZE(pollset_sets);

  init_test_fds(&exec_ctx, tfds, num_fds);
  init_test_pollsets(&pollset, num_ps);
  init_test_pollset_sets(pollset_sets, num_pss);

  /* Construct the structure */
  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[0].fd);
  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);

  grpc_pollset_add_fd(&exec_ctx, pollset.ps, tfds[1].fd);
  grpc_pollset_add_fd(&exec_ctx, pollset.ps, tfds[2].fd);

  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollset.ps);
  grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss,
                                   pollset_sets[1].pss);

  /* Test. Make all FDs readable and make sure that can be observed by doing a
   * grpc_pollset_work on the pollset 'PS' */
  make_test_fds_readable(tfds, num_fds);

  gpr_mu_lock(pollset.mu);
  deadline = grpc_timeout_milliseconds_to_deadline(2);
  GPR_ASSERT(GRPC_ERROR_NONE ==
             grpc_pollset_work(&exec_ctx, pollset.ps, &worker,
                               gpr_now(GPR_CLOCK_MONOTONIC), deadline));
  gpr_mu_unlock(pollset.mu);
  grpc_exec_ctx_flush(&exec_ctx);

  verify_readable_and_reset(&exec_ctx, tfds, num_fds);
  grpc_exec_ctx_flush(&exec_ctx);

  /* Tear down */
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[0].fd);
  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);

  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollset.ps);
  grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss,
                                   pollset_sets[1].pss);
  grpc_exec_ctx_flush(&exec_ctx);

  cleanup_test_fds(&exec_ctx, tfds, num_fds);
  cleanup_test_pollsets(&exec_ctx, &pollset, num_ps);
  cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss);
  grpc_exec_ctx_finish(&exec_ctx);
}
Esempio n. 30
0
static void http_connect_handshaker_do_handshake(
    grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker_in,
    grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done,
    grpc_handshaker_args* args) {
  http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
  // Check for HTTP CONNECT channel arg.
  // If not found, invoke on_handshake_done without doing anything.
  const grpc_arg* arg =
      grpc_channel_args_find(args->args, GRPC_ARG_HTTP_CONNECT_SERVER);
  if (arg == NULL) {
    // Set shutdown to true so that subsequent calls to
    // http_connect_handshaker_shutdown() do nothing.
    gpr_mu_lock(&handshaker->mu);
    handshaker->shutdown = true;
    gpr_mu_unlock(&handshaker->mu);
    grpc_closure_sched(exec_ctx, on_handshake_done, GRPC_ERROR_NONE);
    return;
  }
  GPR_ASSERT(arg->type == GRPC_ARG_STRING);
  char* server_name = arg->value.string;
  // Get headers from channel args.
  arg = grpc_channel_args_find(args->args, GRPC_ARG_HTTP_CONNECT_HEADERS);
  grpc_http_header* headers = NULL;
  size_t num_headers = 0;
  char** header_strings = NULL;
  size_t num_header_strings = 0;
  if (arg != NULL) {
    GPR_ASSERT(arg->type == GRPC_ARG_STRING);
    gpr_string_split(arg->value.string, "\n", &header_strings,
                     &num_header_strings);
    headers = gpr_malloc(sizeof(grpc_http_header) * num_header_strings);
    for (size_t i = 0; i < num_header_strings; ++i) {
      char* sep = strchr(header_strings[i], ':');
      if (sep == NULL) {
        gpr_log(GPR_ERROR, "skipping unparseable HTTP CONNECT header: %s",
                header_strings[i]);
        continue;
      }
      *sep = '\0';
      headers[num_headers].key = header_strings[i];
      headers[num_headers].value = sep + 1;
      ++num_headers;
    }
  }
  // Save state in the handshaker object.
  gpr_mu_lock(&handshaker->mu);
  handshaker->args = args;
  handshaker->on_handshake_done = on_handshake_done;
  // Log connection via proxy.
  char* proxy_name = grpc_endpoint_get_peer(args->endpoint);
  gpr_log(GPR_INFO, "Connecting to server %s via HTTP proxy %s", server_name,
          proxy_name);
  gpr_free(proxy_name);
  // Construct HTTP CONNECT request.
  grpc_httpcli_request request;
  memset(&request, 0, sizeof(request));
  request.host = server_name;
  request.http.method = "CONNECT";
  request.http.path = server_name;
  request.http.hdrs = headers;
  request.http.hdr_count = num_headers;
  request.handshaker = &grpc_httpcli_plaintext;
  grpc_slice request_slice = grpc_httpcli_format_connect_request(&request);
  grpc_slice_buffer_add(&handshaker->write_buffer, request_slice);
  // Clean up.
  gpr_free(headers);
  for (size_t i = 0; i < num_header_strings; ++i) {
    gpr_free(header_strings[i]);
  }
  gpr_free(header_strings);
  // Take a new ref to be held by the write callback.
  gpr_ref(&handshaker->refcount);
  grpc_endpoint_write(exec_ctx, args->endpoint, &handshaker->write_buffer,
                      &handshaker->request_done_closure);
  gpr_mu_unlock(&handshaker->mu);
}