示例#1
0
static void
eventer_ports_impl_trigger(eventer_t e, int mask) {
  ev_lock_state_t lockstate;
  const char *cbname;
  struct timeval __now;
  int fd, newmask;
  uint64_t start, duration;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */

    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    lockstate = acquire_master_fd(fd);
    if (lockstate == EV_ALREADY_OWNED) {
      /* The incoming triggered event is already owned by this thread.
	 This means our floated event completed before the current
	 event handler even exited.  So it retriggered recursively
	 from inside the event handler.

	 Treat this special case the same as a cross thread trigger
	 and just queue this event to be picked up on the next loop
      */
      eventer_cross_thread_trigger(e, mask);
      return;
    }
    release_master_fd(fd, lockstate);
    master_fds[fd].e = e;
    e->mask = 0;
  }
  if(e != master_fds[fd].e) return;
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) {
    mtevL(eventer_deb, "Incoming event: %p already owned by this thread\n", e);
    return;
  }
  mtevAssert(lockstate == EV_OWNED);

  eventer_mark_callback_time();
  eventer_gettimeofcallback(&__now, NULL);
  cbname = eventer_name_for_callback_e(e->callback, e);
  mtevL(eventer_deb, "ports: fire on %d/%x to %s(%p)\n",
        fd, mask, cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = eventer_run_callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    if(!pthread_equal(pthread_self(), e->thr_owner)) {
      pthread_t tgt = e->thr_owner;
      e->thr_owner = pthread_self();
      alter_fd(e, 0);
      e->thr_owner = tgt;
      alter_fd(e, newmask);
      mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, pthread_self(), tgt);
    }
    else {
      alter_fd(e, newmask);
      /* Set our mask */
      e->mask = newmask;
      mtevL(eventer_deb, "ports: complete on %d/(%x->%x) to %s(%p)\n",
            fd, mask, newmask, cbname?cbname:"???", e->callback);
    }
  }
  else {
    mtevL(eventer_deb, "ports: complete on %d/none to %s(%p)\n",
          fd, cbname?cbname:"???", e->callback);
    /*
     * Long story long:
     *  When integrating with a few external event systems, we find
     *  it difficult to make their use of remove+add as an update
     *  as it can be recurrent in a single handler call and you cannot
     *  remove completely from the event system if you are going to
     *  just update (otherwise the eventer_t in your call stack could
     *  be stale).  What we do is perform a superficial remove, marking
     *  the mask as 0, but not eventer_remove_fd.  Then on an add, if
     *  we already have an event, we just update the mask (as we
     *  have not yet returned to the eventer's loop.
     *  This leaves us in a tricky situation when a remove is called
     *  and the add doesn't roll in, we return 0 (mask == 0) and hit
     *  this spot.  We have intended to remove the event, but it still
     *  resides at master_fds[fd].e -- even after we free it.
     *  So, in the evnet that we return 0 and the event that
     *  master_fds[fd].e == the event we're about to free... we NULL
     *  it out.
     */
    if(master_fds[fd].e == e) master_fds[fd].e = NULL;
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}
示例#2
0
static void eventer_kqueue_impl_trigger(eventer_t e, int mask) {
  ev_lock_state_t lockstate;
  struct timeval __now;
  int oldmask, newmask;
  const char *cbname;
  int fd;
  u_int64_t start, duration;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */

    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    master_fds[fd].e = e;
    e->mask = 0;
  }
  if(e != master_fds[fd].e) return;
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) return;
  mtevAssert(lockstate == EV_OWNED);

  mtev_gettimeofday(&__now, NULL);
  /* We're going to lie to ourselves.  You'd think this should be:
   * oldmask = e->mask;  However, we just fired with masks[fd], so
   * kqueue is clearly looking for all of the events in masks[fd].
   * So, we combine them "just to be safe."
   */
  oldmask = e->mask | masks[fd];
  cbname = eventer_name_for_callback_e(e->callback, e);
  mtevLT(eventer_deb, &__now, "kqueue: fire on %d/%x to %s(%p)\n",
         fd, masks[fd], cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = e->callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    if(!pthread_equal(pthread_self(), e->thr_owner)) {
      pthread_t tgt = e->thr_owner;
      e->thr_owner = pthread_self();
      alter_kqueue_mask(e, oldmask, 0);
      e->thr_owner = tgt;
      mtevL(eventer_deb, "moved event[%p] from t@%llx to t@%llx\n", e, (vpsized_int)pthread_self(), (vpsized_int)tgt);
      if(newmask) eventer_cross_thread_trigger(e, newmask & ~(EVENTER_EXCEPTION));
    }
    else {
      if(master_fds[fd].e != e) {
        e = master_fds[fd].e;
        mtevL(eventer_deb, "%strigger complete [event switched] %d : %x->%x\n", cross_thread ? "[X]" : "", e->fd, master_fds[fd].e->mask, newmask);
      } else {
        mtevL(eventer_deb, "%strigger complete %d : %x->%x\n", cross_thread ? "[X]" : "", e->fd, oldmask, newmask);
      }
      alter_kqueue_mask(e, (e->mask == 0 || cross_thread) ? 0 : oldmask, newmask);
      /* Set our mask */
      e->mask = newmask;
    }
  }
  else {
    /*
     * Long story long:
     *  When integrating with a few external event systems, we find
     *  it difficult to make their use of remove+add as an update
     *  as it can be recurrent in a single handler call and you cannot
     *  remove completely from the event system if you are going to
     *  just update (otherwise the eventer_t in your call stack could
     *  be stale).  What we do is perform a superficial remove, marking
     *  the mask as 0, but not eventer_remove_fd.  Then on an add, if
     *  we already have an event, we just update the mask (as we
     *  have not yet returned to the eventer's loop.
     *  This leaves us in a tricky situation when a remove is called
     *  and the add doesn't roll in, we return 0 (mask == 0) and hit
     *  this spot.  We have intended to remove the event, but it still
     *  resides at master_fds[fd].e -- even after we free it.
     *  So, in the evnet that we return 0 and the event that
     *  master_fds[fd].e == the event we're about to free... we NULL
     *  it out.
     */
    if(master_fds[fd].e == e) master_fds[fd].e = NULL;
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}
示例#3
0
static void eventer_epoll_impl_trigger(eventer_t e, int mask) {
  struct epoll_spec *spec;
  struct timeval __now;
  int fd, newmask;
  const char *cbname;
  ev_lock_state_t lockstate;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;
  int added_to_master_fds = 0;
  u_int64_t start, duration;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
    
    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    master_fds[fd].e = e;
    e->mask = 0;
    added_to_master_fds = 1;
  }
  if(e != master_fds[fd].e) return;
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) return;
  mtevAssert(lockstate == EV_OWNED);

  mtev_gettimeofday(&__now, NULL);
  cbname = eventer_name_for_callback_e(e->callback, e);
  mtevLT(eventer_deb, &__now, "epoll: fire on %d/%x to %s(%p)\n",
         fd, mask, cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = e->callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    struct epoll_event _ev;
    memset(&_ev, 0, sizeof(_ev));
    _ev.data.fd = fd;
    if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI);
    if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT);
    if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP);
    if(master_fds[fd].e == NULL) {
      mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n",
            cbname?cbname:"???", e->callback, fd);
    } else {
      if(!pthread_equal(pthread_self(), e->thr_owner)) {
        pthread_t tgt = e->thr_owner;
        e->thr_owner = pthread_self();
        spec = eventer_get_spec_for_event(e);
        if(! added_to_master_fds && epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) {
          mtevFatal(mtev_error,
                    "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; "
                    "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n",
                    spec->epoll_fd, fd, errno, strerror(errno));
        }
        e->thr_owner = tgt;
        spec = eventer_get_spec_for_event(e);
        mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0);
        mtevL(eventer_deb, "moved event[%p] from t@%d to t@%d\n", e, (int)pthread_self(), (int)tgt);
      }
      else {
        int epoll_cmd = added_to_master_fds ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
        spec = eventer_get_spec_for_event(e);
        if(epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev) != 0) {
          const char *cb_name = eventer_name_for_callback_e(e->callback, e);
          mtevFatal(mtev_error,
                    "epoll_ctl(spec->epoll_fd, EPOLL_CTL_MOD, fd, &_ev) failed; "
              "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n",
              spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???");
        }
      }
    }
    /* Set our mask */
    e->mask = newmask;
  }
  else {
    /* see kqueue implementation for details on the next line */
    if(master_fds[fd].e == e) master_fds[fd].e = NULL;
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}
示例#4
0
static void eventer_epoll_impl_trigger(eventer_t e, int mask) {
  struct epoll_spec *spec;
  struct timeval __now;
  int fd, newmask, needs_add = 0;
  const char *cbname;
  ev_lock_state_t lockstate;
  int cross_thread = mask & EVENTER_CROSS_THREAD_TRIGGER;
  uint64_t start, duration;

  mask = mask & ~(EVENTER_RESERVED);
  fd = e->fd;
  if(cross_thread) {
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */
  }
  if(!pthread_equal(pthread_self(), e->thr_owner)) {
    /* If we're triggering across threads, it can't be registered yet */
    if(master_fds[fd].e != NULL) {
      mtevL(eventer_deb, "Attempting to trigger already-registered event fd: %d cross thread.\n", fd);
    }
    /* mtevAssert(master_fds[fd].e == NULL); */

    eventer_cross_thread_trigger(e,mask);
    return;
  }
  if(master_fds[fd].e == NULL) {
    lockstate = acquire_master_fd(fd);
    if (lockstate == EV_ALREADY_OWNED) {
      /* The incoming triggered event is already owned by this thread.
       * This means our floated event completed before the current
       * event handler even exited.  So it retriggered recursively
       * from inside the event handler.
       *
       * Treat this special case the same as a cross thread trigger
       * and just queue this event to be picked up on the next loop
       */
      eventer_cross_thread_trigger(e, mask);
      return;
    }
    /*
     * If we are readding the event to the master list here, also do the needful
     * with the epoll_ctl.
     *
     * This can happen in cases where some event was floated and the float
     * completed so fast that we finished the job in the same thread 
     * that it started in.  Since we `eventer_remove_fd` before we float
     * the re-add here should replace the fd in the epoll_ctl.
     */
    master_fds[fd].e = e;
    e->mask = 0;
    struct epoll_event _ev;
    memset(&_ev, 0, sizeof(_ev));
    _ev.data.fd = fd;
    spec = eventer_get_spec_for_event(e);
    if(mask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI);
    if(mask & EVENTER_WRITE) _ev.events |= (EPOLLOUT);
    if(mask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP);

    mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, fd);
    if (epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) != 0) {
      mtevL(mtev_error, "epoll_ctl(%d, add, %d, %d)\n", spec->epoll_fd, fd, errno);
    }
    release_master_fd(fd, lockstate);
  }
  if(e != master_fds[fd].e) {
    mtevL(mtev_error, "Incoming event: %p, does not match master list: %p\n", e, master_fds[fd].e);
    return;
  }
  lockstate = acquire_master_fd(fd);
  if(lockstate == EV_ALREADY_OWNED) {
    mtevL(eventer_deb, "Incoming event: %p already owned by this thread\n", e);
    return;
  }
  mtevAssert(lockstate == EV_OWNED);

  mtev_gettimeofday(&__now, NULL);
  cbname = eventer_name_for_callback_e(e->callback, e);
  spec = eventer_get_spec_for_event(e);
  mtevLT(eventer_deb, &__now, "epoll(%d): fire on %d/%x to %s(%p)\n",
         spec->epoll_fd, fd, mask, cbname?cbname:"???", e->callback);
  mtev_memory_begin();
  LIBMTEV_EVENTER_CALLBACK_ENTRY((void *)e, (void *)e->callback, (char *)cbname, fd, e->mask, mask);
  start = mtev_gethrtime();
  newmask = e->callback(e, mask, e->closure, &__now);
  duration = mtev_gethrtime() - start;
  LIBMTEV_EVENTER_CALLBACK_RETURN((void *)e, (void *)e->callback, (char *)cbname, newmask);
  mtev_memory_end();
  stats_set_hist_intscale(eventer_callback_latency, duration, -9, 1);
  stats_set_hist_intscale(eventer_latency_handle_for_callback(e->callback), duration, -9, 1);

  if(newmask) {
    struct epoll_event _ev;
    memset(&_ev, 0, sizeof(_ev));
    _ev.data.fd = fd;
    if(newmask & EVENTER_READ) _ev.events |= (EPOLLIN|EPOLLPRI);
    if(newmask & EVENTER_WRITE) _ev.events |= (EPOLLOUT);
    if(newmask & EVENTER_EXCEPTION) _ev.events |= (EPOLLERR|EPOLLHUP);
    if(master_fds[fd].e == NULL) {
      mtevL(mtev_debug, "eventer %s(%p) epoll asked to modify descheduled fd: %d\n",
            cbname?cbname:"???", e->callback, fd);
    } else {
      if(!pthread_equal(pthread_self(), e->thr_owner)) {
        pthread_t tgt = e->thr_owner;
        e->thr_owner = pthread_self();
        spec = eventer_get_spec_for_event(e);
        if(e->mask != 0 && !needs_add) {
          mtevL(eventer_deb, "epoll_ctl(%d, del, %d)\n", spec->epoll_fd, fd);
          if(epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) != 0) {
            mtevFatal(mtev_error,
                      "epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, fd, &_ev) failed; "
                      "spec->epoll_fd: %d; fd: %d; errno: %d (%s)\n",
                      spec->epoll_fd, fd, errno, strerror(errno));
          }
        }
        e->thr_owner = tgt;
        spec = eventer_get_spec_for_event(e);
        mtevL(eventer_deb, "epoll_ctl(%d, add, %d)\n", spec->epoll_fd, fd);
        mtevAssert(epoll_ctl(spec->epoll_fd, EPOLL_CTL_ADD, fd, &_ev) == 0);
        mtevL(eventer_deb, "epoll(%d) moved event[%p] from t@%d to t@%d\n", spec->epoll_fd, e, (int)pthread_self(), (int)tgt);
      }
      else {
        int epoll_rv;
        int epoll_cmd = (e->mask == 0 || needs_add) ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
        spec = eventer_get_spec_for_event(e);
        mtevL(eventer_deb, "epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, epoll_cmd == EPOLL_CTL_ADD ? "add" : "mod", fd);
        epoll_rv = epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev);
        if(epoll_rv != 0 &&
           ((epoll_cmd == EPOLL_CTL_ADD && errno == EEXIST) ||
            (epoll_cmd == EPOLL_CTL_MOD && errno == ENOENT))) {
            /* try the other way */
          epoll_cmd = (epoll_cmd == EPOLL_CTL_ADD) ? EPOLL_CTL_MOD : EPOLL_CTL_ADD;
          mtevL(eventer_deb, "retry epoll_ctl(%d, %s, %d)\n", spec->epoll_fd, epoll_cmd == EPOLL_CTL_ADD ? "add" : "mod", fd);
	  epoll_rv = epoll_ctl(spec->epoll_fd, epoll_cmd, fd, &_ev);
        }
        if(epoll_rv != 0) {
          const char *cb_name = eventer_name_for_callback_e(e->callback, e);
          mtevFatal(mtev_error,
                    "epoll_ctl(spec->epoll_fd, %s, fd, &_ev) failed; "
                    "spec->epoll_fd: %d; fd: %d; errno: %d (%s); callback: %s\n",
                    epoll_cmd == EPOLL_CTL_ADD ? "EPOLL_CTL_ADD" : "EPOLL_CTL_MOD",
                    spec->epoll_fd, fd, errno, strerror(errno), cb_name ? cb_name : "???");
        }
      }
    }
    /* Set our mask */
    e->mask = newmask;
  }
  else {
    /* see kqueue implementation for details on the next line */
    if(master_fds[fd].e == e) {

      /* if newmask == 0 the user has floated the connection.  If we get here
       * and they have not called `eventer_remove_fd` it is a misuse of mtev.
       *
       * Check if they are compliant with floats here and remove_fd if they
       * forgot to and warn in the log
       */
      spec = eventer_get_spec_for_event(e);
      struct epoll_event _ev;
      memset(&_ev, 0, sizeof(_ev));
      _ev.data.fd = fd;
      if (epoll_ctl(spec->epoll_fd, EPOLL_CTL_DEL, e->fd, &_ev) == 0) {
        mtevL(mtev_error, "WARNING: You forgot to 'eventer_remove_fd()' before returning a mask of zero.\n");
      }
      master_fds[fd].e = NULL;
    }
    eventer_free(e);
  }
  release_master_fd(fd, lockstate);
}