Пример #1
0
int poll_loop(struct npool *nsp, int msec_timeout) {
  int results_left = 0;
  int event_msecs; /* msecs before an event goes off */
  int combined_msecs;
  int sock_err = 0;
  struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;

  assert(msec_timeout >= -1);

  if (nsp->events_pending == 0)
    return 0; /* No need to wait on 0 events ... */

  do {
    struct nevent *nse;

    nsock_log_debug_all(nsp, "wait for events");

    nse = next_expirable_event(nsp);
    if (!nse)
      event_msecs = -1; /* None of the events specified a timeout */
    else
      event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* Force a low timeout when capturing packets on systems where
     * the pcap descriptor is not select()able. */
    if (gh_list_count(&nsp->pcap_read_events) > 0)
      if (event_msecs > PCAP_POLL_INTERVAL)
        event_msecs = PCAP_POLL_INTERVAL;
#endif
#endif

    /* We cast to unsigned because we want -1 to be very high (since it means no
     * timeout) */
    combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* do non-blocking read on pcap devices that doesn't support select()
     * If there is anything read, just leave this loop. */
    if (pcap_read_on_nonselect(nsp)) {
      /* okay, something was read. */
    } else
#endif
#endif
    {
      results_left = Poll(pinfo->events, pinfo->max_fd + 1, combined_msecs);
      if (results_left == -1)
        sock_err = socket_errno();
    }

    gettimeofday(&nsock_tod, NULL); /* Due to poll delay */
  } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */

  if (results_left == -1 && sock_err != EINTR) {
    nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
    nsp->errnum = sock_err;
    return -1;
  }

  iterate_through_event_lists(nsp);

  return 1;
}
Пример #2
0
/* Adds an event to the appropriate nsp event list, handles housekeeping such as
 * adjusting the descriptor select/poll lists, registering the timeout value,
 * etc. */
void nsock_pool_add_event(struct npool *nsp, struct nevent *nse) {
  nsock_log_debug("NSE #%lu: Adding event (timeout in %ldms)",
                  nse->id,
                  (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

  nsp->events_pending++;

  if (!nse->event_done && nse->timeout.tv_sec) {
    /* This event is expirable, add it to the queue */
    gh_heap_push(&nsp->expirables, &nse->expire);
  }

  /* Now we do the event type specific actions */
  switch (nse->type) {
    case NSE_TYPE_CONNECT:
    case NSE_TYPE_CONNECT_SSL:
      if (!nse->event_done) {
        assert(nse->iod->sd >= 0);
        socket_count_read_inc(nse->iod);
        socket_count_write_inc(nse->iod);
        update_events(nse->iod, nsp, EV_READ|EV_WRITE|EV_EXCEPT, EV_NONE);
      }
      iod_add_event(nse->iod, nse);
      break;

    case NSE_TYPE_READ:
      if (!nse->event_done) {
        assert(nse->iod->sd >= 0);
        socket_count_read_inc(nse->iod);
        update_events(nse->iod, nsp, EV_READ, EV_NONE);
#if HAVE_OPENSSL
        if (nse->iod->ssl)
          nse->sslinfo.ssl_desire = SSL_ERROR_WANT_READ;
#endif
      }
      iod_add_event(nse->iod, nse);
      break;

    case NSE_TYPE_WRITE:
      if (!nse->event_done) {
        assert(nse->iod->sd >= 0);
        socket_count_write_inc(nse->iod);
        update_events(nse->iod, nsp, EV_WRITE, EV_NONE);
#if HAVE_OPENSSL
        if (nse->iod->ssl)
          nse->sslinfo.ssl_desire = SSL_ERROR_WANT_WRITE;
#endif
      }
      iod_add_event(nse->iod, nse);
      break;

    case NSE_TYPE_TIMER:
      /* nothing to do */
      break;

#if HAVE_PCAP
    case NSE_TYPE_PCAP_READ: {
      mspcap *mp = (mspcap *)nse->iod->pcap;

      assert(mp);
      if (mp->pcap_desc >= 0) { /* pcap descriptor present */
        if (!nse->event_done) {
          socket_count_readpcap_inc(nse->iod);
          update_events(nse->iod, nsp, EV_READ, EV_NONE);
        }
        nsock_log_debug_all("PCAP NSE #%lu: Adding event to READ_EVENTS", nse->id);

        #if PCAP_BSD_SELECT_HACK
        /* when using BSD hack we must do pcap_next() after select().
         * Let's insert this pcap to bot queues, to selectable and nonselectable.
         * This will result in doing pcap_next_ex() just before select() */
        nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id);
        #endif
      } else {
        /* pcap isn't selectable. Add it to pcap-specific queue. */
        nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id);
      }
      iod_add_event(nse->iod, nse);
      break;
    }
#endif

    default:
      fatal("Unknown nsock event type (%d)", nse->type);
  }

  /* It can happen that the event already completed. In which case we can
   * already deliver it, even though we're probably not inside nsock_loop(). */
  if (nse->event_done) {
    event_dispatch_and_delete(nsp, nse, 1);
    update_first_events(nse);
    nevent_unref(nsp, nse);
  }
}
Пример #3
0
void process_event(struct npool *nsp, gh_list_t *evlist, struct nevent *nse, int ev) {
  int match_r = 0, match_w = 0;
#if HAVE_OPENSSL
  int desire_r = 0, desire_w = 0;
#endif

  nsock_log_debug_all("Processing event %lu (timeout in %ldms, done=%d)",
                      nse->id,
                      (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod),
                      nse->event_done);

  if (!nse->event_done) {
    switch (nse->type) {
      case NSE_TYPE_CONNECT:
      case NSE_TYPE_CONNECT_SSL:
        if (ev != EV_NONE)
          handle_connect_result(nsp, nse, NSE_STATUS_SUCCESS);
        if (event_timedout(nse))
          handle_connect_result(nsp, nse, NSE_STATUS_TIMEOUT);
        break;

      case NSE_TYPE_READ:
        match_r = ev & EV_READ;
        match_w = ev & EV_WRITE;
#if HAVE_OPENSSL
        desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ;
        desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE;
        if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w)))
          handle_read_result(nsp, nse, NSE_STATUS_SUCCESS);
        else
#endif
        if (!nse->iod->ssl && match_r)
          handle_read_result(nsp, nse, NSE_STATUS_SUCCESS);

        if (event_timedout(nse))
          handle_read_result(nsp, nse, NSE_STATUS_TIMEOUT);
        break;

      case NSE_TYPE_WRITE:
        match_r = ev & EV_READ;
        match_w = ev & EV_WRITE;
#if HAVE_OPENSSL
        desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ;
        desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE;
        if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w)))
          handle_write_result(nsp, nse, NSE_STATUS_SUCCESS);
        else
#endif
          if (!nse->iod->ssl && match_w)
            handle_write_result(nsp, nse, NSE_STATUS_SUCCESS);

        if (event_timedout(nse))
          handle_write_result(nsp, nse, NSE_STATUS_TIMEOUT);
        break;

      case NSE_TYPE_TIMER:
        if (event_timedout(nse))
          handle_timer_result(nsp, nse, NSE_STATUS_SUCCESS);
        break;

#if HAVE_PCAP
      case NSE_TYPE_PCAP_READ:{
        nsock_log_debug_all("PCAP iterating %lu", nse->id);

        if (ev & EV_READ) {
          /* buffer empty? check it! */
          if (fs_length(&(nse->iobuf)) == 0)
            do_actual_pcap_read(nse);
        }

        /* if already received something */
        if (fs_length(&(nse->iobuf)) > 0)
          handle_pcap_read_result(nsp, nse, NSE_STATUS_SUCCESS);

        if (event_timedout(nse))
          handle_pcap_read_result(nsp, nse, NSE_STATUS_TIMEOUT);

        #if PCAP_BSD_SELECT_HACK
        /* If event occurred, and we're in BSD_HACK mode, then this event was added
         * to two queues. read_event and pcap_read_event
         * Of course we should destroy it only once.
         * I assume we're now in read_event, so just unlink this event from
         * pcap_read_event */
        if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0
            && nse->event_done
            && evlist == &nsp->read_events) {
          /* event is done, list is read_events and we're in BSD_HACK mode.
           * So unlink event from pcap_read_events */
          update_first_events(nse);
          gh_list_remove(&nsp->pcap_read_events, &nse->nodeq_pcap);

          nsock_log_debug_all("PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS",
                              nse->id);
        }
        if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0
            && nse->event_done
            && evlist == &nsp->pcap_read_events) {
          update_first_events(nse);
          gh_list_remove(&nsp->read_events, &nse->nodeq_io);
          nsock_log_debug_all("PCAP NSE #%lu: Removing event from READ_EVENTS",
                              nse->id);
        }
        #endif
        break;
      }
#endif
      default:
        fatal("Event has unknown type (%d)", nse->type);
    }
  }

  if (nse->event_done) {
    /* Security sanity check: don't return a functional SSL iod without
     * setting an SSL data structure. */
    if (nse->type == NSE_TYPE_CONNECT_SSL && nse->status == NSE_STATUS_SUCCESS)
      assert(nse->iod->ssl != NULL);

    nsock_log_debug_all("NSE #%lu: Sending event", nse->id);

    /* WooHoo!  The event is ready to be sent */
    event_dispatch_and_delete(nsp, nse, 1);
  }
}
Пример #4
0
void process_iod_events(struct npool *nsp, struct niod *nsi, int ev) {
  int i = 0;
  /* store addresses of the pointers to the first elements of each kind instead
   * of storing the values, as a connect can add a read for instance */
  gh_lnode_t **start_elems[] = {
    &nsi->first_connect,
    &nsi->first_read,
    &nsi->first_write,
#if HAVE_PCAP
    &nsi->first_pcap_read,
#endif
    NULL
  };
  gh_list_t *evlists[] = {
    &nsp->connect_events,
    &nsp->read_events,
    &nsp->write_events,
#if HAVE_PCAP
    &nsp->pcap_read_events,
#endif
    NULL
  };

  assert(nsp == nsi->nsp);
  nsock_log_debug_all("Processing events on IOD %lu (ev=%d)", nsi->id, ev);

  /* We keep the events separate because we want to handle them in the
   * order: connect => read => write => timer for several reasons:
   *
   *  1) Makes sure we have gone through all the net i/o events before
   *     a timer expires (would be a shame to timeout after the data was
   *     available but before we delivered the events
   *
   *  2) The connect() results often lead to a read or write that can be
   *     processed in the same cycle.  In the same way, read() often
   *     leads to write().
   */
  for (i = 0; evlists[i] != NULL; i++) {
    gh_lnode_t *current, *next, *last;

    /* for each list, get the last event and don't look past it as an event
     * could add another event in the same list and so on... */
    last = gh_list_last_elem(evlists[i]);

    for (current = *start_elems[i];
         current != NULL && gh_lnode_prev(current) != last;
         current = next) {
      struct nevent *nse;

#if HAVE_PCAP
      if (evlists[i] == &nsi->nsp->pcap_read_events)
        nse = lnode_nevent2(current);
      else
#endif
        nse = lnode_nevent(current);

      /* events are grouped by IOD. Break if we're done with the events for the
       * current IOD */
      if (nse->iod != nsi)
        break;

      process_event(nsp, evlists[i], nse, ev);
      next = gh_lnode_next(current);

      if (nse->event_done) {
        /* event is done, remove it from the event list and update IOD pointers
         * to the first events of each kind */
        update_first_events(nse);
        gh_list_remove(evlists[i], current);
        gh_list_append(&nsp->free_events, &nse->nodeq_io);

        if (nse->timeout.tv_sec)
          gh_heap_remove(&nsp->expirables, &nse->expire);
      }
    }
  }
}
Пример #5
0
int kqueue_loop(struct npool *nsp, int msec_timeout) {
  int results_left = 0;
  int event_msecs; /* msecs before an event goes off */
  int combined_msecs;
  struct timespec ts, *ts_p;
  int sock_err = 0;
  struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;

  assert(msec_timeout >= -1);

  if (nsp->events_pending == 0)
    return 0; /* No need to wait on 0 events ... */


  if (gh_list_count(&nsp->active_iods) > kinfo->evlen) {
    kinfo->evlen = gh_list_count(&nsp->active_iods) * 2;
    kinfo->events = (struct kevent *)safe_realloc(kinfo->events, kinfo->evlen * sizeof(struct kevent));
  }

  do {
    struct nevent *nse;

    nsock_log_debug_all(nsp, "wait for events");

    nse = next_expirable_event(nsp);
    if (!nse)
      event_msecs = -1; /* None of the events specified a timeout */
    else
      event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* Force a low timeout when capturing packets on systems where
     * the pcap descriptor is not select()able. */
    if (gh_list_count(&nsp->pcap_read_events) > 0)
      if (event_msecs > PCAP_POLL_INTERVAL)
        event_msecs = PCAP_POLL_INTERVAL;
#endif
#endif

    /* We cast to unsigned because we want -1 to be very high (since it means no
     * timeout) */
    combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);

    /* Set up the timeval pointer we will give to kevent() */
    memset(&ts, 0, sizeof(struct timespec));
    if (combined_msecs >= 0) {
      ts.tv_sec = combined_msecs / 1000;
      ts.tv_nsec = (combined_msecs % 1000) * 1000000L;
      ts_p = &ts;
    } else {
      ts_p = NULL;
    }

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* do non-blocking read on pcap devices that doesn't support select()
     * If there is anything read, just leave this loop. */
    if (pcap_read_on_nonselect(nsp)) {
      /* okay, something was read. */
    } else
#endif
#endif
    {
      results_left = kevent(kinfo->kqfd, NULL, 0, kinfo->events, kinfo->evlen, ts_p);
      if (results_left == -1)
        sock_err = socket_errno();
    }

    gettimeofday(&nsock_tod, NULL); /* Due to kevent delay */
  } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */

  if (results_left == -1 && sock_err != EINTR) {
    nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
    nsp->errnum = sock_err;
    return -1;
  }

  iterate_through_event_lists(nsp, results_left);

  return 1;
}
Пример #6
0
/* This does the actual logistics of requesting a connection.  It is shared
 * by nsock_connect_tcp and nsock_connect_ssl, among others */
void nsock_connect_internal(struct npool *ms, struct nevent *nse, int type, int proto, struct sockaddr_storage *ss, size_t sslen,
                            unsigned short port) {

  struct sockaddr_in *sin;
#if HAVE_IPV6
  struct sockaddr_in6 *sin6;
#endif
  struct niod *iod = nse->iod;

  if (iod->px_ctx   /* proxy enabled */
      && proto == IPPROTO_TCP   /* restrict proxying to TCP connections */
      && (nse->handler != nsock_proxy_ev_dispatch)) {   /* for reentrancy */
    struct proxy_node *current;

    nsock_log_debug_all("TCP connection request (EID %lu) redirected through proxy chain",
                        (long)nse->id);

    current = iod->px_ctx->px_current;
    assert(current != NULL);

    memcpy(&iod->px_ctx->target_ss, ss, sslen);
    iod->px_ctx->target_sslen = sslen;
    iod->px_ctx->target_port  = port;

    ss    = &current->ss;
    sslen = current->sslen;
    port  = current->port;

    iod->px_ctx->target_handler = nse->handler;
    nse->handler = nsock_proxy_ev_dispatch;

    iod->px_ctx->target_ev_type = nse->type;
    nse->type = NSE_TYPE_CONNECT;
  }

  sin = (struct sockaddr_in *)ss;
#if HAVE_IPV6
  sin6 = (struct sockaddr_in6 *)ss;
#endif

  /* Now it is time to actually attempt the connection */
  if (nsock_make_socket(ms, iod, ss->ss_family, type, proto) == -1) {
    nse->event_done = 1;
    nse->status = NSE_STATUS_ERROR;
    nse->errnum = socket_errno();
  } else {
    if (ss->ss_family == AF_INET) {
      sin->sin_port = htons(port);
    }
#if HAVE_IPV6
    else if (ss->ss_family == AF_INET6) {
      sin6->sin6_port = htons(port);
    }
#endif
#if HAVE_SYS_UN_H
    else if (ss->ss_family == AF_UNIX) {
    }
#endif
    else {
      fatal("Unknown address family %d\n", ss->ss_family);
    }

    assert(sslen <= sizeof(iod->peer));
    if (&iod->peer != ss)
      memcpy(&iod->peer, ss, sslen);
    iod->peerlen = sslen;

    if (ms->engine->io_operations->iod_connect(ms, iod->sd, (struct sockaddr *)ss, sslen) == -1) {
      int err = socket_errno();

      if ((proto == IPPROTO_UDP) || (err != EINPROGRESS && err != EAGAIN)) {
        nse->event_done = 1;
        nse->status = NSE_STATUS_ERROR;
        nse->errnum = err;
      }
    }
    /* The callback handle_connect_result handles the connection once it completes. */
  }
}
Пример #7
0
int select_loop(struct npool *nsp, int msec_timeout) {
  int results_left = 0;
  int event_msecs; /* msecs before an event goes off */
  int combined_msecs;
  int sock_err = 0;
  struct timeval select_tv;
  struct timeval *select_tv_p;
  struct select_engine_info *sinfo = (struct select_engine_info *)nsp->engine_data;

  assert(msec_timeout >= -1);

  if (nsp->events_pending == 0)
    return 0; /* No need to wait on 0 events ... */

  do {
    struct nevent *nse;

    nsock_log_debug_all(nsp, "wait for events");

    nse = next_expirable_event(nsp);
    if (!nse)
      event_msecs = -1; /* None of the events specified a timeout */
    else
      event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod));

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* Force a low timeout when capturing packets on systems where
     * the pcap descriptor is not select()able. */
    if (gh_list_count(&nsp->pcap_read_events))
      if (event_msecs > PCAP_POLL_INTERVAL)
        event_msecs = PCAP_POLL_INTERVAL;
#endif
#endif

    /* We cast to unsigned because we want -1 to be very high (since it means no
     * timeout) */
    combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);

    /* Set up the timeval pointer we will give to select() */
    memset(&select_tv, 0, sizeof(select_tv));
    if (combined_msecs > 0) {
      select_tv.tv_sec = combined_msecs / 1000;
      select_tv.tv_usec = (combined_msecs % 1000) * 1000;
      select_tv_p = &select_tv;
    } else if (combined_msecs == 0) {
      /* we want the tv_sec and tv_usec to be zero but they already are from bzero */
      select_tv_p = &select_tv;
    } else {
      assert(combined_msecs == -1);
      select_tv_p = NULL;
    }

#if HAVE_PCAP
#ifndef PCAP_CAN_DO_SELECT
    /* do non-blocking read on pcap devices that doesn't support select()
     * If there is anything read, just leave this loop. */
    if (pcap_read_on_nonselect(nsp)) {
      /* okay, something was read. */
    } else
#endif
#endif
    {
      /* Set up the descriptors for select */
      sinfo->fds_results_r = sinfo->fds_master_r;
      sinfo->fds_results_w = sinfo->fds_master_w;
      sinfo->fds_results_x = sinfo->fds_master_x;

      results_left = fselect(sinfo->max_sd + 1, &sinfo->fds_results_r,
                             &sinfo->fds_results_w, &sinfo->fds_results_x, select_tv_p);

      if (results_left == -1)
        sock_err = socket_errno();
    }

    gettimeofday(&nsock_tod, NULL); /* Due to select delay */
  } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */

  if (results_left == -1 && sock_err != EINTR) {
    nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
    nsp->errnum = sock_err;
    return -1;
  }

  iterate_through_event_lists(nsp);

  return 1;
}
Пример #8
0
/* An internal function for cancelling an event when you already have a pointer
 * to the struct nevent (use nsock_event_cancel if you just have an ID). The
 * event_list passed in should correspond to the type of the event. For example,
 * with NSE_TYPE_READ, you would pass in &nsp->read_events;. elem is the list
 * element in event_list which holds the event.  Pass a nonzero for notify if
 * you want the program owning the event to be notified that it has been
 * cancelled */
int nevent_delete(struct npool *nsp, struct nevent *nse, gh_list_t *event_list,
                   gh_lnode_t *elem, int notify) {
  if (nse->event_done) {
    /* This event has already been marked for death somewhere else -- it will be
     * gone soon (and if we try to kill it now all hell will break loose due to
     * reentrancy. */
    return 0;
  }

  nsock_log_info("%s on event #%li (type %s)", __func__, nse->id,
                 nse_type2str(nse->type));

  /* Now that we found the event... we go through the motions of cleanly
   * cancelling it */
  switch (nse->type) {
    case NSE_TYPE_CONNECT:
    case NSE_TYPE_CONNECT_SSL:
      handle_connect_result(nsp, nse, NSE_STATUS_CANCELLED);
      break;

    case NSE_TYPE_READ:
      handle_read_result(nsp, nse, NSE_STATUS_CANCELLED);
      break;

    case NSE_TYPE_WRITE:
      handle_write_result(nsp, nse, NSE_STATUS_CANCELLED);
      break;

    case NSE_TYPE_TIMER:
      handle_timer_result(nsp, nse, NSE_STATUS_CANCELLED);
      break;

#if HAVE_PCAP
    case NSE_TYPE_PCAP_READ:
      handle_pcap_read_result(nsp, nse, NSE_STATUS_CANCELLED);
      break;
#endif

    default:
      fatal("Invalid nsock event type (%d)", nse->type);
  }

  assert(nse->event_done);

  if (nse->timeout.tv_sec)
    gh_heap_remove(&nsp->expirables, &nse->expire);

  if (event_list) {
    update_first_events(nse);
    gh_list_remove(event_list, elem);
  }

  gh_list_append(&nsp->free_events, &nse->nodeq_io);

  nsock_log_debug_all("NSE #%lu: Removing event from list", nse->id);

#if HAVE_PCAP
#if PCAP_BSD_SELECT_HACK
  if (nse->type == NSE_TYPE_PCAP_READ) {
    nsock_log_debug_all("PCAP NSE #%lu: CANCEL TEST pcap=%p read=%p curr=%p sd=%i",
                        nse->id, &nsp->pcap_read_events, &nsp->read_events,
                        event_list,((mspcap *)nse->iod->pcap)->pcap_desc);

    /* If event occurred, and we're in BSD_HACK mode, then this event was added to
     * two queues. read_event and pcap_read_event Of course we should
     * destroy it only once.  I assume we're now in read_event, so just unlink
     * this event from pcap_read_event */
    if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && event_list == &nsp->read_events) {
      /* event is done, list is read_events and we're in BSD_HACK mode. So unlink
       * event from pcap_read_events */
      gh_list_remove(&nsp->pcap_read_events, &nse->nodeq_pcap);
      nsock_log_debug_all("PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS", nse->id);
    }

    if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && event_list == &nsp->pcap_read_events) {
      /* event is done, list is read_events and we're in BSD_HACK mode.
       * So unlink event from read_events */
      gh_list_remove(&nsp->read_events, &nse->nodeq_io);

      nsock_log_debug_all("PCAP NSE #%lu: Removing event from READ_EVENTS", nse->id);
    }
  }
#endif
#endif
  event_dispatch_and_delete(nsp, nse, notify);
  return 1;
}