Beispiel #1
0
int kqueue_loop(mspool *nsp, int msec_timeout) {
  int results_left = 0;
  int event_msecs; /* msecs before an event goes off */
  int combined_msecs;
  struct timespec ts, *ts_p;
  int sock_err = 0;
  struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;

  assert(msec_timeout >= -1);

  if (nsp->events_pending == 0)
    return 0; /* No need to wait on 0 events ... */


  if (gh_list_count(&nsp->active_iods) > kinfo->evlen) {
    kinfo->evlen = gh_list_count(&nsp->active_iods) * 2;
    kinfo->events = (struct kevent *)safe_realloc(kinfo->events, kinfo->evlen * sizeof(struct kevent));
  }

  do {
    msevent *nse;

    nsock_log_debug_all(nsp, "wait for events");

    nse = next_expirable_event(nsp);
    if (!nse)
      event_msecs = -1; /* None of the events specified a timeout */
    else
      event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* Force a low timeout when capturing packets on systems where
     * the pcap descriptor is not select()able. */
    if (gh_list_count(&nsp->pcap_read_events) > 0)
      if (event_msecs > PCAP_POLL_INTERVAL)
        event_msecs = PCAP_POLL_INTERVAL;
#endif
#endif

    /* We cast to unsigned because we want -1 to be very high (since it means no
     * timeout) */
    combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);

    /* Set up the timeval pointer we will give to kevent() */
    memset(&ts, 0, sizeof(struct timespec));
    if (combined_msecs >= 0) {
      ts.tv_sec = combined_msecs / 1000;
      ts.tv_nsec = (combined_msecs % 1000) * 1000000L;
      ts_p = &ts;
    } else {
      ts_p = NULL;
    }

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* do non-blocking read on pcap devices that doesn't support select()
     * If there is anything read, just leave this loop. */
    if (pcap_read_on_nonselect(nsp)) {
      /* okay, something was read. */
    } else
#endif
#endif
    {
      results_left = kevent(kinfo->kqfd, NULL, 0, kinfo->events, kinfo->evlen, ts_p);
      if (results_left == -1)
        sock_err = socket_errno();
    }

    gettimeofday(&nsock_tod, NULL); /* Due to kevent delay */
  } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */

  if (results_left == -1 && sock_err != EINTR) {
    nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
    nsp->errnum = sock_err;
    return -1;
  }

  iterate_through_event_lists(nsp, results_left);

  return 1;
}
Beispiel #2
0
int select_loop(struct npool *nsp, int msec_timeout) {
  int results_left = 0;
  int event_msecs; /* msecs before an event goes off */
  int combined_msecs;
  int sock_err = 0;
  struct timeval select_tv;
  struct timeval *select_tv_p;
  struct select_engine_info *sinfo = (struct select_engine_info *)nsp->engine_data;

  assert(msec_timeout >= -1);

  if (nsp->events_pending == 0)
    return 0; /* No need to wait on 0 events ... */

  do {
    struct nevent *nse;

    nsock_log_debug_all(nsp, "wait for events");

    nse = next_expirable_event(nsp);
    if (!nse)
      event_msecs = -1; /* None of the events specified a timeout */
    else
      event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* Force a low timeout when capturing packets on systems where
     * the pcap descriptor is not select()able. */
    if (gh_list_count(&nsp->pcap_read_events))
      if (event_msecs > PCAP_POLL_INTERVAL)
        event_msecs = PCAP_POLL_INTERVAL;
#endif
#endif

    /* We cast to unsigned because we want -1 to be very high (since it means no
     * timeout) */
    combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);

    /* Set up the timeval pointer we will give to select() */
    memset(&select_tv, 0, sizeof(select_tv));
    if (combined_msecs > 0) {
      select_tv.tv_sec = combined_msecs / 1000;
      select_tv.tv_usec = (combined_msecs % 1000) * 1000;
      select_tv_p = &select_tv;
    } else if (combined_msecs == 0) {
      /* we want the tv_sec and tv_usec to be zero but they already are from bzero */
      select_tv_p = &select_tv;
    } else {
      assert(combined_msecs == -1);
      select_tv_p = NULL;
    }

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* do non-blocking read on pcap devices that doesn't support select()
     * If there is anything read, just leave this loop. */
    if (pcap_read_on_nonselect(nsp)) {
      /* okay, something was read. */
    } else
#endif
#endif
    {
      /* Set up the descriptors for select */
      sinfo->fds_results_r = sinfo->fds_master_r;
      sinfo->fds_results_w = sinfo->fds_master_w;
      sinfo->fds_results_x = sinfo->fds_master_x;

      results_left = fselect(sinfo->max_sd + 1, &sinfo->fds_results_r,
                             &sinfo->fds_results_w, &sinfo->fds_results_x, select_tv_p);

      if (results_left == -1)
        sock_err = socket_errno();
    }

    gettimeofday(&nsock_tod, NULL); /* Due to select delay */
  } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */

  if (results_left == -1 && sock_err != EINTR) {
    nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
    nsp->errnum = sock_err;
    return -1;
  }

  iterate_through_event_lists(nsp);

  return 1;
}
Beispiel #3
0
int poll_loop(struct npool *nsp, int msec_timeout) {
  int results_left = 0;
  int event_msecs; /* msecs before an event goes off */
  int combined_msecs;
  int sock_err = 0;
  struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;

  assert(msec_timeout >= -1);

  if (nsp->events_pending == 0)
    return 0; /* No need to wait on 0 events ... */

  do {
    struct nevent *nse;

    nsock_log_debug_all(nsp, "wait for events");

    nse = next_expirable_event(nsp);
    if (!nse)
      event_msecs = -1; /* None of the events specified a timeout */
    else
      event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* Force a low timeout when capturing packets on systems where
     * the pcap descriptor is not select()able. */
    if (gh_list_count(&nsp->pcap_read_events) > 0)
      if (event_msecs > PCAP_POLL_INTERVAL)
        event_msecs = PCAP_POLL_INTERVAL;
#endif
#endif

    /* We cast to unsigned because we want -1 to be very high (since it means no
     * timeout) */
    combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* do non-blocking read on pcap devices that doesn't support select()
     * If there is anything read, just leave this loop. */
    if (pcap_read_on_nonselect(nsp)) {
      /* okay, something was read. */
    } else
#endif
#endif
    {
      results_left = Poll(pinfo->events, pinfo->max_fd + 1, combined_msecs);
      if (results_left == -1)
        sock_err = socket_errno();
    }

    gettimeofday(&nsock_tod, NULL); /* Due to poll delay */
  } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */

  if (results_left == -1 && sock_err != EINTR) {
    nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
    nsp->errnum = sock_err;
    return -1;
  }

  iterate_through_event_lists(nsp);

  return 1;
}
/* If nsp_new returned success, you must free the nsp when you are done with it
 * to conserve memory (and in some cases, sockets).  After this call, nsp may no
 * longer be used.  Any pending events are sent an NSE_STATUS_KILL callback and
 * all outstanding iods are deleted. */
void nsp_delete(nsock_pool ms_pool) {
  mspool *nsp = (mspool *)ms_pool;
  msevent *nse;
  msiod *nsi;
  int i;
  gh_lnode_t *current, *next;
  gh_list_t *event_lists[] = {
    &nsp->connect_events,
    &nsp->read_events,
    &nsp->write_events,
#if HAVE_PCAP
    &nsp->pcap_read_events,
#endif
    NULL
  };

  assert(nsp);

  /* First I go through all the events sending NSE_STATUS_KILL */
  for (i = 0; event_lists[i] != NULL; i++) {
    while (gh_list_count(event_lists[i]) > 0) {
      gh_lnode_t *lnode = gh_list_pop(event_lists[i]);

      assert(lnode);

#if HAVE_PCAP
      if (event_lists[i] == &nsp->pcap_read_events)
        nse = lnode_msevent2(lnode);
      else
#endif
        nse = lnode_msevent(lnode);

      assert(nse);

      nse->status = NSE_STATUS_KILL;
      nsock_trace_handler_callback(nsp, nse);
      nse->handler(nsp, nse, nse->userdata);

      if (nse->iod) {
        nse->iod->events_pending--;
        assert(nse->iod->events_pending >= 0);
      }
      msevent_delete(nsp, nse);
    }
    gh_list_free(event_lists[i]);
  }

  /* Kill timers too, they're not in event lists */
  while (gh_heap_count(&nsp->expirables) > 0) {
    gh_hnode_t *hnode;

    hnode = gh_heap_pop(&nsp->expirables);
    nse = container_of(hnode, msevent, expire);

    if (nse->type == NSE_TYPE_TIMER) {
      nse->status = NSE_STATUS_KILL;
      nsock_trace_handler_callback(nsp, nse);
      nse->handler(nsp, nse, nse->userdata);
      msevent_delete(nsp, nse);
      gh_list_append(&nsp->free_events, &nse->nodeq_io);
    }
  }

  gh_heap_free(&nsp->expirables);

  /* foreach msiod */
  for (current = gh_list_first_elem(&nsp->active_iods);
       current != NULL;
       current = next) {
    next = gh_lnode_next(current);
    nsi = container_of(current, msiod, nodeq);

    nsi_delete(nsi, NSOCK_PENDING_ERROR);

    gh_list_remove(&nsp->active_iods, current);
    gh_list_prepend(&nsp->free_iods, &nsi->nodeq);
  }

  /* Now we free all the memory in the free iod list */
  while ((current = gh_list_pop(&nsp->free_iods))) {
    nsi = container_of(current, msiod, nodeq);
    free(nsi);
  }

  while ((current = gh_list_pop(&nsp->free_events))) {
    nse = lnode_msevent(current);
    free(nse);
  }

  gh_list_free(&nsp->active_iods);
  gh_list_free(&nsp->free_iods);
  gh_list_free(&nsp->free_events);

  nsock_engine_destroy(nsp);

#if HAVE_OPENSSL
  if (nsp->sslctx != NULL)
    SSL_CTX_free(nsp->sslctx);
#endif

  free(nsp);
}