Esempio n. 1
0
File: event.c Progetto: sashka/uwsgi
int event_queue_wait_multi(int eq, int timeout, void *events, int nevents) {

        int ret;
	uint_t nget = 1;
	timespec_t ts;
	port_event_t *pe;

	if (timeout >= 0) {
                ts.tv_sec = timeout;
                ts.tv_nsec = 0;
                ret = port_getn(eq, events, nevents, &nget, &ts);
        }
        else {
                ret = port_getn(eq, events, nevents, &nget, NULL);
        }

	if (ret < 0) {
                if (errno != ETIME) {
                        uwsgi_error("port_getn()");
                        return -1;
                }
                return 0;
        }

	pe = (port_event_t *) events;

        return nget;
}
Esempio n. 2
0
static int
_watch (cherokee_fdpoll_port_t *fdp, int timeout_msecs)
{
	int          i, rc, fd;
	struct timespec  timeout;

	timeout.tv_sec  = timeout_msecs/1000L;
	timeout.tv_nsec = ( timeout_msecs % 1000L ) * 1000000L;

	for (i=0; i<FDPOLL(fdp)->system_nfiles; i++) {
		fdp->port_activefd[i] = -1;
	}

	/* First call to get the number of file descriptors with activity
	 */
	rc = port_getn (fdp->port, fdp->port_events, 0,
			(uint_t *)&fdp->port_readyfds,
		        &timeout);
	if ( rc < 0 ) {
		LOG_ERRNO_S (errno, cherokee_err_error, CHEROKEE_ERROR_FDPOLL_PORTS_GETN);
		return 0;
	}

	if ( fdp->port_readyfds == 0 ) {
		/* Get at least 1 fd to wait for activity
		 */
		fdp->port_readyfds = 1;
	}

	/* Second call to get the events of the file descriptors with
	 * activity
	 */
	rc = port_getn (fdp->port, fdp->port_events,FDPOLL(fdp)->nfiles,
			&fdp->port_readyfds, &timeout);
	if ( ( (rc < 0) && (errno != ETIME) ) || (fdp->port_readyfds == -1)) {
		LOG_ERRNO_S (errno, cherokee_err_error, CHEROKEE_ERROR_FDPOLL_PORTS_GETN);
		return 0;
	}

	for ( i = 0; i < fdp->port_readyfds; ++i ) {
		int nfd;

		nfd = fdp->port_events[i].portev_object;
		fdp->port_activefd[nfd] = fdp->port_events[i].portev_events;
		rc = fd_associate( fdp,
		                   nfd,
		                   fdp->port_events[i].portev_user);
		if ( rc < 0 ) {
			LOG_ERRNO (errno, cherokee_err_error,
				   CHEROKEE_ERROR_FDPOLL_PORTS_FD_ASSOCIATE, nfd);
		}
	}

	return fdp->port_readyfds;
}
Esempio n. 3
0
int SelEpolKqEvPrt::getEvents()
{
	int numEvents = -1;
	#if defined(USE_MINGW_SELECT)
		readfds = master;
		numEvents = select(fdMax+1, &readfds, NULL, NULL, NULL);
		if(numEvents==-1)
		{
			perror("select()");
		}
		else
		{
			return fdMax+1;
		}
	#elif defined(USE_SELECT)
		for (int var = 0; var < fdsetSize; ++var) {
			readfds[var] = master[var];
		}
		numEvents = select(fdMax+1, readfds, NULL, NULL, NULL);
		if(numEvents==-1)
		{
			perror("select()");
		}
		else
		{
			return fdMax+1;
		}
	#elif defined USE_EPOLL
		numEvents = epoll_wait(epoll_handle, events, curfds,-1);
	#elif defined USE_KQUEUE
		numEvents = kevent(kq, NULL, 0, evlist, MAXDESCRIPTORS, NULL);
	#elif defined USE_DEVPOLL
		struct dvpoll pollit;
		pollit.dp_timeout = -1;
		pollit.dp_nfds = curfds;
		pollit.dp_fds = polled_fds;
		numEvents = ioctl(dev_poll_fd, DP_POLL, &pollit);
	#elif defined USE_EVPORT
		uint_t nevents, wevents = 0;
		//uint_t num = 0;
		if (port_getn(port, evlist, 0, &wevents, NULL) < 0) return 0;
		if (0 == wevents) wevents = 1;
		nevents = wevents;
		if (port_getn(port, evlist, (uint_t) MAXDESCRIPTORS, &nevents, NULL) < 0) return 0;
		numEvents = (int)nevents;
	#elif defined USE_POLL
		numEvents = poll(polled_fds,nfds,-1);
		if (numEvents == -1){
			perror ("poll");
			exit(0);
		}
	#endif
	return numEvents;
}
Esempio n. 4
0
File: port.c Progetto: Ga-vin/apache
static apr_status_t call_port_getn(int port, port_event_t list[], 
                                   unsigned int max, unsigned int *nget,
                                   apr_interval_time_t timeout)
{
    struct timespec tv, *tvptr;
    int ret;
    apr_status_t rv = APR_SUCCESS;

    if (timeout < 0) {
        tvptr = NULL;
    }
    else {
        tv.tv_sec = (long) apr_time_sec(timeout);
        tv.tv_nsec = (long) apr_time_usec(timeout) * 1000;
        tvptr = &tv;
    }

    list[0].portev_user = (void *)-1; /* so we can double check that an
                                       * event was returned
                                       */

    ret = port_getn(port, list, max, nget, tvptr);
    /* Note: 32-bit port_getn() on Solaris 10 x86 returns large negative 
     * values instead of 0 when returning immediately.
     */

    if (ret == -1) {
        rv = apr_get_netos_error();

        switch(rv) {
        case EINTR:
        case ETIME:
            if (*nget > 0 && list[0].portev_user != (void *)-1) {
                /* This confusing API can return an event at the same time
                 * that it reports EINTR or ETIME.  If that occurs, just
                 * report the event.  With EINTR, nget can be > 0 without
                 * any event, so check that portev_user was filled in.
                 *
                 * (Maybe it will be simplified; see thread
                 *   http://mail.opensolaris.org
                 *   /pipermail/networking-discuss/2009-August/011979.html
                 *  This code will still work afterwards.)
                 */
                rv = APR_SUCCESS;
                break;
            }
            if (rv == ETIME) {
                rv = APR_TIMEUP;
            }
        /* fall-through */
        default:
            *nget = 0;
        }
    }
    else if (*nget == 0) {
        rv = APR_TIMEUP;
    }

    return rv;
}
Esempio n. 5
0
int16_t
FdEventQueue::poll(
  FdEvent* fd_events,
  int16_t fd_events_len,
  const Time& timeout
) {
  if (fd_events_len > port_events.size()) {
    port_events.resize(fd_events_len);
  }

  uint_t max = fd_events_len, nget;
  timespec timeout_ts = timeout;

  int ret = port_getn(port, port_events, max, &nget, &timeout_ts);

  if (ret == 0) {
    int16_t event_i = 0;

    for (uint_t port_event_i = 0; port_event_i < nget; port_event_i++) {
      const port_event_t& port_event = port_fd_events[port_event_i];

      if (port_event.portev_source != PORT_SOURCE_USER) {
        fd_events[event_i].set_events(port_event.portev_events);
        fd_events[event_i].set_fd(port_event.portev_object);
        if (++event_i == fd_events_len) {
          break;
        }
      }
    }

    return event_i;
  } else {
    return static_cast<int16_t>(ret);
  }
}
Esempio n. 6
0
static bool portfs_root_consume_notify(watchman_global_watcher_t watcher,
    w_root_t *root, struct watchman_pending_collection *coll)
{
  struct portfs_root_state *state = root->watch;
  uint_t i, n;
  struct timeval now;
  unused_parameter(watcher);

  errno = 0;

  n = 1;
  if (port_getn(state->port_fd, state->portevents,
        sizeof(state->portevents) / sizeof(state->portevents[0]), &n, NULL)) {
    if (errno == EINTR) {
      return false;
    }
    w_log(W_LOG_FATAL, "port_getn: %s\n",
        strerror(errno));
  }

  w_log(W_LOG_DBG, "port_getn: n=%u\n", n);

  if (n == 0) {
    return false;
  }

  for (i = 0; i < n; i++) {
    if (IS_DIR_BIT_SET(state->portevents[i].portev_user)) {
      struct watchman_dir *dir = DECODE_DIR(state->portevents[i].portev_user);
      uint32_t pe = state->portevents[i].portev_events;

      w_log(W_LOG_DBG, "port: dir %.*s [0x%x]\n",
          dir->path->len, dir->path->buf, pe);

      if ((pe & (FILE_RENAME_FROM|UNMOUNTED|MOUNTEDOVER|FILE_DELETE))
          && w_string_equal(dir->path, root->root_path)) {

        w_log(W_LOG_ERR,
          "root dir %s has been (re)moved (code 0x%x), canceling watch\n",
          root->root_path->buf, pe);

        w_root_cancel(root);
        return false;
      }
      w_pending_coll_add(coll, dir->path, false, now, true);

    } else {
      struct watchman_file *file = state->portevents[i].portev_user;
      w_string_t *path;

      path = w_string_path_cat(file->parent->path, file->name);
      w_pending_coll_add(coll, path, true, now, true);
      w_log(W_LOG_DBG, "port: file %.*s\n", path->len, path->buf);
      w_string_delref(path);
    }
  }

  return true;
}
Esempio n. 7
0
int my_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
{
	port_event_t ev_list[maxevents];
	timespec_t tsp, *tsp_tmp;
	unsigned nget, i;
	int ret_val, ev_val;

	if(0 > epfd) {
		errno = EBADF;
		return -1;
	}

	if(!events) {
		errno = EFAULT;
		return -1;
	} 

	if(-1 != timeout) {
		tsp.tv_sec = timeout / 1000;
		tsp.tv_nsec = (timeout % 1000) * 1000 * 1000;
		tsp_tmp = &tsp;
	} else
		tsp_tmp = NULL;

	nget = 1;
	ev_val = port_getn(epfd, ev_list, maxevents, &nget, tsp_tmp);
	/* _only_ compare to -1, sometimes the retval is broken */
	if(-1 == ev_val) {
		if(ETIME == errno)
			return 0;
// TODO: handle EINTR??
		return -1;
	}

	ret_val = 0;
	for(i = 0; i < nget; i++)
	{
		struct epoll_event *epv_tmp;

		if(ev_list[i].portev_source != PORT_SOURCE_FD)
		{
			logg_develd("Unknown port source! p: %p s: 0x%hx e: 0x%x o: %lu\n",
				ev_list[i].portev_user, ev_list[i].portev_source, ev_list[i].portev_events,
				(unsigned long)ev_list[i].portev_object);
			continue;
		}

// TODO: handle POLLNVAL cleanly, epoll does not deliver it
		if(ev_list[i].portev_events & POLLNVAL)
			continue; /* epoll discards nval, we missuse eport one shot charactaristics */

		epv_tmp = &events[ret_val++];
		epv_tmp->events = ev_list[i].portev_events;
		epv_tmp->data.ptr = ev_list[i].portev_user;;
	}

	return ret_val;
}
Esempio n. 8
0
int PortsEngine::DispatchEvents()
{
	struct timespec poll_time;

	poll_time.tv_sec = 1;
	poll_time.tv_nsec = 0;

	unsigned int nget = 1; // used to denote a retrieve request.
	int ret = port_getn(EngineHandle, this->events, GetMaxFds() - 1, &nget, &poll_time);
	ServerInstance->UpdateTime();

	// first handle an error condition
	if (ret == -1)
		return -1;

	TotalEvents += nget;

	unsigned int i;
	for (i = 0; i < nget; i++)
	{
		switch (this->events[i].portev_source)
		{
			case PORT_SOURCE_FD:
			{
				int fd = this->events[i].portev_object;
				EventHandler* eh = ref[fd];
				if (eh)
				{
					int mask = eh->GetEventMask();
					if (events[i].portev_events & POLLWRNORM)
						mask &= ~(FD_WRITE_WILL_BLOCK | FD_WANT_FAST_WRITE | FD_WANT_SINGLE_WRITE);
					if (events[i].portev_events & POLLRDNORM)
						mask &= ~FD_READ_WILL_BLOCK;
					// reinsert port for next time around, pretending to be one-shot for writes
					SetEventMask(eh, mask);
					port_associate(EngineHandle, PORT_SOURCE_FD, fd, mask_to_events(mask), eh);
					if (events[i].portev_events & POLLRDNORM)
					{
						ReadEvents++;
						eh->HandleEvent(EVENT_READ);
						if (eh != ref[fd])
							continue;
					}
					if (events[i].portev_events & POLLWRNORM)
					{
						WriteEvents++;
						eh->HandleEvent(EVENT_WRITE);
					}
				}
			}
			default:
			break;
		}
	}

	return (int)i;
}
Esempio n. 9
0
static gboolean
port_dispatch(GSource *source, GSourceFunc callback, gpointer user_data)
{
    node_t *f;
	uint_t nget = 0;
	uint_t total = 0;

    FK_W ("%s 0x%p fd %d\n", __func__, source, PGPFD(source)->fd);

    G_LOCK (fen_lock);
    do {
        nget = 1;
        if (port_getn(PGPFD(source)->fd, pevents, PE_ALLOC, &nget, &zero_wait) == 0) {
            int i;
            for (i = 0; i < nget; i++) {
                f = (node_t *)pevents[i].portev_user;

                if (pevents[i].portev_source == PORT_SOURCE_FILE) {

                    NODE_CLE_STATE(f, NODE_STATE_ASSOCIATED);
                    NODE_SET_STATE(f, NODE_STATE_HAS_EVENTS);

                    if (HAS_NO_EXCEPTION_EVENTS(pevents[i].portev_events)) {
                        /* If the events do not show it's deleted, update
                         * file timestamp to avoid missing events next time.
                         */
                        if (node_lstat(f) != 0 /* || port_add(f) != 0 */) {
                            /* Included deleted event. */
                            pevents[i].portev_events |= FILE_DELETE;
                        }
                    }

                    /* Queue it and waiting for processing. */
                    g_queue_push_tail(g_eventq,
                      node_event_new(pevents[i].portev_events, (gpointer)f));

                } else {
                    FK_W ("[kernel] unknown portev_source %d\n", pevents[i].portev_source);
                }
            }

            total += nget;

        } else {
            FK_W ("[kernel] port_getn %s\n", g_strerror (errno));
            break;
        }
    } while (nget == PE_ALLOC);

    G_UNLOCK (fen_lock);

    if (total > 0 && callback) {
        FK_W ("[kernel] get total %ld events\n", total);
        return callback (user_data);
    }
    return TRUE;
}
Esempio n. 10
0
static void uv__fs_event_read(uv_loop_t* loop,
                              uv__io_t* w,
                              unsigned int revents) {
  uv_fs_event_t *handle = NULL;
  timespec_t timeout;
  port_event_t pe;
  int events;
  int r;

  (void) w;
  (void) revents;

  do {
    uint_t n = 1;

    /*
     * Note that our use of port_getn() here (and not port_get()) is deliberate:
     * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
     * causes port_get() to return success instead of ETIME when there aren't
     * actually any events (!); by using port_getn() in lieu of port_get(),
     * we can at least workaround the bug by checking for zero returned events
     * and treating it as we would ETIME.
     */
    do {
      memset(&timeout, 0, sizeof timeout);
      r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
    }
    while (r == -1 && errno == EINTR);

    if ((r == -1 && errno == ETIME) || n == 0)
      break;

    handle = (uv_fs_event_t*) pe.portev_user;
    assert((r == 0) && "unexpected port_get() error");

    events = 0;
    if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
      events |= UV_CHANGE;
    if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
      events |= UV_RENAME;
    assert(events != 0);
    handle->fd = PORT_FIRED;
    handle->cb(handle, NULL, events, 0);

    if (handle->fd != PORT_DELETED) {
      r = uv__fs_event_rearm(handle);
      if (r != 0)
        handle->cb(handle, NULL, 0, r);
    }
  }
  while (handle->fd != PORT_DELETED);
}
Esempio n. 11
0
int
rb_select_ports(long delay)
{
	int i, fd;
	unsigned int nget = 1;
	struct timespec poll_time;
	struct timespec *p = NULL;
	struct ev_entry *ev;

	if(delay >= 0)
	{
		poll_time.tv_sec = delay / 1000;
		poll_time.tv_nsec = (delay % 1000) * 1000000;
		p = &poll_time;
	}


	i = port_getn(pe, pelst, pemax, &nget, p);
	rb_set_time();

	if(i == -1)
		return RB_OK;

	for(i = 0; (unsigned)i < nget; i++)
	{
		if(pelst[i].portev_source == PORT_SOURCE_FD)
		{
			fd = pelst[i].portev_object;
			PF *hdl = NULL;
			rb_fde_t *F = pelst[i].portev_user;
			if((pelst[i].portev_events & (POLLIN | POLLHUP | POLLERR)) && (hdl = F->read_handler))
			{
				F->read_handler = NULL;
				hdl(F, F->read_data);
			}
			if((pelst[i].portev_events & (POLLOUT | POLLHUP | POLLERR)) && (hdl = F->write_handler))
			{
				F->write_handler = NULL;
				hdl(F, F->write_data);
			}
		} else if(pelst[i].portev_source == PORT_SOURCE_TIMER)
		{
			ev = (struct ev_entry *)pelst[i].portev_user;
			rb_run_event(ev);
		}
	}
	return RB_OK;
}
Esempio n. 12
0
int
ircd_select(unsigned long delay)
{
	int	 	 i, fd;
	uint	 	 nget = 1;
	struct	timespec 	 poll_time;
	struct	timer_data	*tdata;

	poll_time.tv_sec = delay / 1000;
	poll_time.tv_nsec = (delay % 1000) * 1000000;

	i = port_getn(pe, pelst, pemax, &nget, &poll_time);
	ircd_set_time();

	if (i == -1)
		return COMM_ERROR;

	for (i = 0; i < nget; i++) {
		switch(pelst[i].portev_source) {
		case PORT_SOURCE_FD:
			fd = pelst[i].portev_object;
			PF *hdl = NULL;
			fde_t *F = &fd_table[fd];

			if ((pelst[i].portev_events & POLLRDNORM) && (hdl = F->read_handler)) {
				F->read_handler = NULL;
				hdl(fd, F->read_data);
			}
			if ((pelst[i].portev_events & POLLWRNORM) && (hdl = F->write_handler)) {
				F->write_handler = NULL;
				hdl(fd, F->write_data);
			}
			break;

		case PORT_SOURCE_TIMER:
			tdata = pelst[i].portev_user;
			tdata->td_cb(tdata->td_udata);

			if (!tdata->td_repeat)
				free(tdata);

			break;
		}
	}
	return COMM_OK;
}
Esempio n. 13
0
static void
port_poll (EV_P_ ev_tstamp timeout)
{
  int res, i;
  struct timespec ts;
  uint_t nget = 1;

  EV_RELEASE_CB;
  ts.tv_sec  = (time_t)timeout;
  ts.tv_nsec = (long)(timeout - (ev_tstamp)ts.tv_sec) * 1e9;
  res = port_getn (backend_fd, port_events, port_eventmax, &nget, &ts);
  EV_ACQUIRE_CB;

  if (res == -1)
    { 
      if (errno != EINTR && errno != ETIME)
        ev_syserr ("(libev) port_getn");

      return;
    } 

  for (i = 0; i < nget; ++i)
    {
      if (port_events [i].portev_source == PORT_SOURCE_FD)
        {
          int fd = port_events [i].portev_object;

          fd_event (
            EV_A_
            fd,
            (port_events [i].portev_events & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
            | (port_events [i].portev_events & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
          );

          port_associate_and_check (EV_A_ fd, anfds [fd].events);
        }
    }

  if (expect_false (nget == port_eventmax))
    {
      ev_free (port_events);
      port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1);
      port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax);
    }
}
Esempio n. 14
0
int PortsEngine::DispatchEvents()
{
	struct timespec poll_time;

	poll_time.tv_sec = 1;
	poll_time.tv_nsec = 0;

	unsigned int nget = 1; // used to denote a retrieve request.
	int i = port_getn(EngineHandle, this->events, GetMaxFds() - 1, &nget, &poll_time);

	// first handle an error condition
	if (i == -1)
		return i;

	TotalEvents += nget;

	for (i = 0; i < nget; i++)
	{
		switch (this->events[i].portev_source)
		{
			case PORT_SOURCE_FD:
			{
				int fd = this->events[i].portev_object;
				if (ref[fd])
				{
					// reinsert port for next time around
					port_associate(EngineHandle, PORT_SOURCE_FD, fd, POLLRDNORM, ref[fd]);
					if ((this->events[i].portev_events & POLLRDNORM))
						ReadEvents++;
					else
						WriteEvents++;
					ref[fd]->HandleEvent((this->events[i].portev_events & POLLRDNORM) ? EVENT_READ : EVENT_WRITE);
				}
			}
			default:
			break;
		}
	}

	return i;
}
Esempio n. 15
0
/*
 * wait for events or timeout
 */
static int fpm_event_port_wait(struct fpm_event_queue_s *queue, unsigned long int timeout) /* {{{ */
{
	int ret, i, nget;
	timespec_t t;

	/* convert timeout into timespec_t */
	t.tv_sec = (int)(timeout / 1000);
	t.tv_nsec = (timeout % 1000) * 1000 * 1000;

	/* wait for inconming event or timeout. We want at least one event or timeout */
	nget = 1;
	ret = port_getn(pfd, events, nevents, &nget, &t);
	if (ret < 0) {

		/* trigger error unless signal interrupt or timeout */
		if (errno != EINTR && errno != ETIME) {
			zlog(ZLOG_WARNING, "poll() returns %d", errno);
			return -1;
		}
	}

	for (i = 0; i < nget; i++) {

		/* do we have a ptr to the event ? */
		if (!events[i].portev_user) {
			continue;
		}

		/* fire the event */
		fpm_event_fire((struct fpm_event_s *)events[i].portev_user);

		/* sanity check */
		if (fpm_globals.parent_pid != getpid()) {
			return -2;
		}
	}
	return nget;
}
Esempio n. 16
0
static gboolean
port_check(GSource *source)
{
	PSource *pn = (PSource *)source;
    uint_t nget;
    
    if (pn->pending) {
        pn->pending = FALSE;
        g_source_add_poll(source, PGPFD(source));
        g_source_unref(source);
        return FALSE;
    }

    if (!(PGPFD(pn)->revents & G_IO_IN))
        return FALSE;

    if (port_getn(PGPFD(source)->fd, NULL, 0, &nget, 0) == 0) {
        if (nget - pn->pending_events > EXPECT_INC_EVENTS(pn)) {
            /* Sleep for a while. */
            pn->pending_events = nget;
            pn->event_growing_factor ++;

            pn->pending = TRUE;
            g_source_ref(source);
            g_source_remove_poll(source, PGPFD(source));
            g_timeout_add(SLEEP_BASE_TIME,
              (GSourceFunc)port_check,
              (gpointer)pn);
            return FALSE;
        }
    }

    pn->pending_events = 0;
    pn->event_growing_factor = 0;

    return TRUE;
}
Esempio n. 17
0
int ioevent_poll(IOEventPoller *ioevent)
{
#if IOEVENT_USE_EPOLL
  return epoll_wait(ioevent->poll_fd, ioevent->events, ioevent->size, ioevent->timeout);
#elif IOEVENT_USE_KQUEUE
  return kevent(ioevent->poll_fd, NULL, 0, ioevent->events, ioevent->size, &ioevent->timeout);
#elif IOEVENT_USE_PORT
  int result;
  int retval;
  unsigned int nget = 1;
  if((retval = port_getn(ioevent->poll_fd, ioevent->events,
          ioevent->size, &nget, &ioevent->timeout)) == 0)
  {
    result = (int)nget;
  } else {
    switch(errno) {
      case EINTR:
      case EAGAIN:
      case ETIME:
        if (nget > 0) {
          result = (int)nget;
        }
        else {
          result = 0;
        }
        break;
      default:
        result = -1;
        break;
    }
  }
  return result;
#else
#error port me
#endif
}
Esempio n. 18
0
int
event_wait(struct event_base *evb, int timeout)
{
    int evp = evb->evp;
    port_event_t *event = evb->event;
    int nevent = evb->nevent;
    struct timespec ts, *tsp;

    ASSERT(evp > 0);
    ASSERT(event != NULL);
    ASSERT(nevent > 0);

    /* port_getn should block indefinitely if timeout < 0 */
    if (timeout < 0) {
        tsp = NULL;
    } else {
        tsp = &ts;
        tsp->tv_sec = timeout / 1000LL;
        tsp->tv_nsec = (timeout % 1000LL) * 1000000LL;
    }

    for (;;) {
        int i, status;
        unsigned int nreturned = 1;

        /*
         * port_getn() retrieves multiple events from a port. A port_getn()
         * call will block until at least nreturned events is triggered. On
         * a successful return event[] is populated with triggered events
         * up to the maximum sized allowed by nevent. The number of entries
         * actually placed in event[] is saved in nreturned, which may be
         * more than what we asked for but less than nevent.
         */
        status = port_getn(evp, event, nevent, &nreturned, tsp);
        if (status < 0) {
            if (errno == EINTR || errno == EAGAIN) {
                continue;
            }

            /*
             * ETIME - The time interval expired before the expected number
             * of events have been posted to the port or nreturned is updated
             * with the number of returned port_event_t structures in event[]
             */
            if (errno != ETIME) {
                log_error("port getn on evp %d with %d events failed: %s", evp,
                          nevent, strerror(errno));
                return -1;
            }
        }

        if (nreturned > 0) {
            for (i = 0; i < nreturned; i++) {
                port_event_t *ev = &evb->event[i];
                uint32_t events = 0;

                log_verb("port %04"PRIX32" from source %d "
                          "triggered on conn %p", ev->portev_events,
                          ev->portev_source, ev->portev_user);

                if (ev->portev_events & POLLERR) {
                    events |= EVENT_ERR;
                }

                if (ev->portev_events & POLLIN) {
                    events |= EVENT_READ;
                }

                if (ev->portev_events & POLLOUT) {
                    events |= EVENT_WRITE;
                }

                if (evb->cb != NULL && events != 0) {
                    status = evb->cb(ev->portev_user, events);
                    if (status < 0) {
                        continue;
                    }

                    /*
                     * When an event for a PORT_SOURCE_FD object is retrieved,
                     * the object no longer has an association with the port.
                     * The event can be processed without the possibility that
                     * another thread can retrieve a subsequent event for the
                     * same object. After processing of the file descriptor
                     * is completed, the port_associate() function can be
                     * called to reassociate the object with the port.
                     *
                     * If the descriptor is still capable of accepting data,
                     * this reassociation is required for the reactivation of
                     * the data detection.
                     */
                    event_reassociate(evb, ev->portev_user);
                }
            }

            return nreturned;
        }

        if (timeout == -1) {
            log_error("port getn on evp %d with %d events and %d timeout "
                      "returned no events", evp, nevent, timeout);
            return -1;
        }

        return 0;
    }

    NOT_REACHED();
}
Esempio n. 19
0
APR_DECLARE(apr_status_t) apr_pollset_poll(apr_pollset_t *pollset,
                                           apr_interval_time_t timeout,
                                           apr_int32_t *num,
                                           const apr_pollfd_t **descriptors)
{
    apr_os_sock_t fd;
    int ret, i;
    unsigned int nget;
    pfd_elem_t *ep;
    struct timespec tv, *tvptr;
    apr_status_t rv = APR_SUCCESS;

    if (timeout < 0) {
        tvptr = NULL;
    }
    else {
        tv.tv_sec = (long) apr_time_sec(timeout);
        tv.tv_nsec = (long) apr_time_usec(timeout) * 1000;
        tvptr = &tv;
    }

    nget = 1;

    pollset_lock_rings();

    while (!APR_RING_EMPTY(&(pollset->add_ring), pfd_elem_t, link)) {
        ep = APR_RING_FIRST(&(pollset->add_ring));
        APR_RING_REMOVE(ep, link);

        if (ep->pfd.desc_type == APR_POLL_SOCKET) {
            fd = ep->pfd.desc.s->socketdes;
        }
        else {
            fd = ep->pfd.desc.f->filedes;
        }

        port_associate(pollset->port_fd, PORT_SOURCE_FD, 
                           fd, get_event(ep->pfd.reqevents), ep);

        APR_RING_INSERT_TAIL(&(pollset->query_ring), ep, pfd_elem_t, link);

    }

    pollset_unlock_rings();

    ret = port_getn(pollset->port_fd, pollset->port_set, pollset->nalloc,
                    &nget, tvptr);

    (*num) = nget;

    if (ret == -1) {
        (*num) = 0;
        if (errno == ETIME || errno == EINTR) {
            rv = APR_TIMEUP;
        }
        else {
            rv = APR_EGENERAL;
        }
    }
    else if (nget == 0) {
        rv = APR_TIMEUP;
    }
    else {

        pollset_lock_rings();

        for (i = 0; i < nget; i++) {
            pollset->result_set[i] =
                (((pfd_elem_t*)(pollset->port_set[i].portev_user))->pfd);
            pollset->result_set[i].rtnevents =
                get_revent(pollset->port_set[i].portev_events);

            APR_RING_REMOVE((pfd_elem_t*)pollset->port_set[i].portev_user, link);

            APR_RING_INSERT_TAIL(&(pollset->add_ring), 
                                 (pfd_elem_t*)pollset->port_set[i].portev_user,
                                 pfd_elem_t, link);
        }

        pollset_unlock_rings();

        if (descriptors) {
            *descriptors = pollset->result_set;
        }
    }


    pollset_lock_rings();

    /* Shift all PFDs in the Dead Ring to be Free Ring */
    APR_RING_CONCAT(&(pollset->free_ring), &(pollset->dead_ring), pfd_elem_t, link);

    pollset_unlock_rings();

    return rv;
}
static void *
vca_main(void *arg)
{
	struct sess *sp;

	/*
	 * timeouts:
	 *
	 * min_ts : Minimum timeout for port_getn
	 * min_t  : ^ equivalent in floating point representation
	 *
	 * max_ts : Maximum timeout for port_getn
	 * max_t  : ^ equivalent in floating point representation
	 *
	 * with (nevents == 1), we should always choose the correct port_getn
	 * timeout to check session timeouts, so max is just a safety measure
	 * (if this implementation is correct, it could be set to an "infinte"
	 *  value)
	 *
	 * with (nevents > 1), min and max define the acceptable range for
	 * - additional latency of keep-alive connections and
	 * - additional tolerance for handling session timeouts
	 *
	 */
	static struct timespec min_ts = {0L,    100L /*ms*/  * 1000L /*us*/  * 1000L /*ns*/};
	static double          min_t  = 0.1; /* 100    ms*/
	static struct timespec max_ts = {1L, 0L}; 		/* 1 second */
	static double	       max_t  = 1.0;			/* 1 second */

	struct timespec ts;
	struct timespec *timeout;

	(void)arg;

	solaris_dport = port_create();
	assert(solaris_dport >= 0);

	timeout = &max_ts;

	while (1) {
		port_event_t ev[MAX_EVENTS];
		int nevents, ei, ret;
		double now, deadline;

		/*
		 * XXX Do we want to scale this up dynamically to increase
		 *     efficiency in high throughput situations? - would need to
		 *     start with one to keep latency low at any rate
		 *
		 *     Note: when increasing nevents, we must lower min_ts
		 *	     and max_ts
		 */
		nevents = 1;

		/*
		 * see disucssion in
		 * - https://issues.apache.org/bugzilla/show_bug.cgi?id=47645
		 * - http://mail.opensolaris.org/pipermail/networking-discuss/2009-August/011979.html
		 *
		 * comment from apr/poll/unix/port.c :
		 *
		 * This confusing API can return an event at the same time
		 * that it reports EINTR or ETIME.
		 *
		 */

		ret = port_getn(solaris_dport, ev, MAX_EVENTS, &nevents, timeout);

		if (ret < 0)
			assert((errno == EINTR) || (errno == ETIME));

		for (ei=0; ei<nevents; ei++) {
			vca_port_ev(ev + ei);
		}

		/* check for timeouts */
		now = TIM_real();
		deadline = now - params->sess_timeout;

		/*
		 * This loop assumes that the oldest sessions are always at the
		 * beginning of the list (which is the case if we guarantee to
		 * enqueue at the tail only
		 *
		 */

		for (;;) {
			sp = VTAILQ_FIRST(&sesshead);
			if (sp == NULL)
				break;
			if (sp->t_open > deadline) {
				break;
			}
			VTAILQ_REMOVE(&sesshead, sp, list);
			if(sp->fd != -1) {
				vca_del(sp->fd);
			}
			vca_close_session(sp, "timeout");
			SES_Delete(sp);
		}

		/*
		 * Calculate the timeout for the next get_portn
		 */

		if (sp) {
			double tmo = (sp->t_open + params->sess_timeout) - now;

			/* we should have removed all sps whose timeout has passed */
			assert(tmo > 0.0);

			if (tmo < min_t) {
				timeout = &min_ts;
			} else if (tmo > max_t) {
				timeout = &max_ts;
			} else {
				/* TIM_t2ts() ? see #630 */
				ts.tv_sec = (int)floor(tmo);
				ts.tv_nsec = 1e9 * (tmo - ts.tv_sec);
				timeout = &ts;
			}
		} else {
			timeout = &max_ts;
		}
	}
}
Esempio n. 21
0
static int
evport_dispatch(struct event_base *base, struct timeval *tv)
{
	int i, res;
	struct evport_data *epdp = base->evbase;
	port_event_t pevtlist[EVENTS_PER_GETN];

	/*
	 * port_getn will block until it has at least nevents events. It will
	 * also return how many it's given us (which may be more than we asked
	 * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
	 * nevents.
	 */
	int nevents = 1;

	/*
	 * We have to convert a struct timeval to a struct timespec
	 * (only difference is nanoseconds vs. microseconds). If no time-based
	 * events are active, we should wait for I/O (and tv == NULL).
	 */
	struct timespec ts;
	struct timespec *ts_p = NULL;
	if (tv != NULL) {
		ts.tv_sec = tv->tv_sec;
		ts.tv_nsec = tv->tv_usec * 1000;
		ts_p = &ts;
	}

	/*
	 * Before doing anything else, we need to reassociate the events we hit
	 * last time which need reassociation. See comment at the end of the
	 * loop below.
	 */
	for (i = 0; i < EVENTS_PER_GETN; ++i) {
		struct fd_info *fdi = NULL;
		if (epdp->ed_pending[i] != -1) {
			fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
		}

		if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
			int fd = epdp->ed_pending[i];
			reassociate(epdp, fdi, fd);
			epdp->ed_pending[i] = -1;
		}
	}

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN,
	    (unsigned int *) &nevents, ts_p);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno == EINTR || errno == EAGAIN) {
			return (0);
		} else if (errno == ETIME) {
			if (nevents == 0)
				return (0);
		} else {
			event_warn("port_getn");
			return (-1);
		}
	}

	event_debug(("%s: port_getn reports %d events", __func__, nevents));

	for (i = 0; i < nevents; ++i) {
		struct fd_info *fdi;
		port_event_t *pevt = &pevtlist[i];
		int fd = (int) pevt->portev_object;

		check_evportop(epdp);
		check_event(pevt);
		epdp->ed_pending[i] = fd;

		/*
		 * Figure out what kind of event it was
		 * (because we have to pass this to the callback)
		 */
		res = 0;
		if (pevt->portev_events & (POLLERR|POLLHUP)) {
			res = EV_READ | EV_WRITE;
		} else {
			if (pevt->portev_events & POLLIN)
				res |= EV_READ;
			if (pevt->portev_events & POLLOUT)
				res |= EV_WRITE;
		}

		/*
		 * Check for the error situations or a hangup situation
		 */
		if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL))
			res |= EV_READ|EV_WRITE;

		EVUTIL_ASSERT(epdp->ed_nevents > fd);
		fdi = &(epdp->ed_fds[fd]);

		evmap_io_active(base, fd, res);
	} /* end of all events gotten */

	check_evportop(epdp);

	return (0);
}
Esempio n. 22
0
static int
evport_dispatch (struct event_base *base, void *arg, struct timeval *tv)
{
  int i, res;

  struct evport_data *epdp = arg;
  port_event_t pevtlist[EVENTS_PER_GETN];

  /*
   * port_getn will block until it has at least nevents events. It will
   * also return how many it's given us (which may be more than we asked
   * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
   * nevents.
   */
  int nevents = 1;

  /*
   * We have to convert a struct timeval to a struct timespec
   * (only difference is nanoseconds vs. microseconds). If no time-based
   * events are active, we should wait for I/O (and tv == NULL).
   */

  struct timespec ts;

  struct timespec *ts_p = NULL;

  if (tv != NULL)
    {
      ts.tv_sec = tv->tv_sec;
      ts.tv_nsec = tv->tv_usec * 1000;
      ts_p = &ts;
    }

  /*
   * Before doing anything else, we need to reassociate the events we hit
   * last time which need reassociation. See comment at the end of the
   * loop below.
   */

  for (i = 0; i < EVENTS_PER_GETN; ++i)
    {

      struct fd_info *fdi = NULL;

      if (epdp->ed_pending[i] != -1)
        {
          fdi = & (epdp->ed_fds[epdp->ed_pending[i]]);
        }

      if (fdi != NULL && FDI_HAS_EVENTS (fdi) )
        {
          int fd = FDI_HAS_READ (fdi) ? fdi->fdi_revt->ev_fd :
                   fdi->fdi_wevt->ev_fd;
          reassociate (epdp, fdi, fd);
          epdp->ed_pending[i] = -1;
        }
    }

  if ( (res = port_getn (epdp->ed_port, pevtlist, EVENTS_PER_GETN,
                         (unsigned int *) & nevents, ts_p) ) == -1)
    {
      if (errno == EINTR || errno == EAGAIN)
        {
          evsignal_process (base);
          return (0);
        }

      else if (errno == ETIME)
        {
          if (nevents == 0)
            return (0);
        }

      else
        {
          event_warn ("port_getn");
          return (-1);
        }
    }

  else if (base->sig.evsignal_caught)
    {
      evsignal_process (base);
    }

  event_debug ( ("%s: port_getn reports %d events", __func__, nevents) );

  for (i = 0; i < nevents; ++i)
    {

      struct event *ev;

      struct fd_info *fdi;
      port_event_t *pevt = &pevtlist[i];
      int fd = (int) pevt->portev_object;

      check_evportop (epdp);
      check_event (pevt);
      epdp->ed_pending[i] = fd;

      /*
       * Figure out what kind of event it was
       * (because we have to pass this to the callback)
       */
      res = 0;

      if (pevt->portev_events & POLLIN)
        res |= EV_READ;

      if (pevt->portev_events & POLLOUT)
        res |= EV_WRITE;

      assert (epdp->ed_nevents > fd);

      fdi = & (epdp->ed_fds[fd]);

      /*
       * We now check for each of the possible events (READ
       * or WRITE).  Then, we activate the event (which will
       * cause its callback to be executed).
       */

      if ( (res & EV_READ) && ( (ev = fdi->fdi_revt) != NULL) )
        {
          event_active (ev, res, 1);
        }

      if ( (res & EV_WRITE) && ( (ev = fdi->fdi_wevt) != NULL) )
        {
          event_active (ev, res, 1);
        }
    } /* end of all events gotten */

  check_evportop (epdp);

  return (0);
}
Esempio n. 23
0
File: sunos.c Progetto: 2saki/node
void uv__io_poll(uv_loop_t* loop, int timeout) {
  struct port_event events[1024];
  struct port_event* pe;
  struct timespec spec;
  QUEUE* q;
  uv__io_t* w;
  uint64_t base;
  uint64_t diff;
  unsigned int nfds;
  unsigned int i;
  int saved_errno;
  int nevents;
  int count;
  int fd;

  if (loop->nfds == 0) {
    assert(QUEUE_EMPTY(&loop->watcher_queue));
    return;
  }

  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
    q = QUEUE_HEAD(&loop->watcher_queue);
    QUEUE_REMOVE(q);
    QUEUE_INIT(q);

    w = QUEUE_DATA(q, uv__io_t, watcher_queue);
    assert(w->pevents != 0);

    if (port_associate(loop->backend_fd, PORT_SOURCE_FD, w->fd, w->pevents, 0))
      abort();

    w->events = w->pevents;
  }

  assert(timeout >= -1);
  base = loop->time;
  count = 48; /* Benchmarks suggest this gives the best throughput. */

  for (;;) {
    if (timeout != -1) {
      spec.tv_sec = timeout / 1000;
      spec.tv_nsec = (timeout % 1000) * 1000000;
    }

    /* Work around a kernel bug where nfds is not updated. */
    events[0].portev_source = 0;

    nfds = 1;
    saved_errno = 0;
    if (port_getn(loop->backend_fd,
                  events,
                  ARRAY_SIZE(events),
                  &nfds,
                  timeout == -1 ? NULL : &spec)) {
      /* Work around another kernel bug: port_getn() may return events even
       * on error.
       */
      if (errno == EINTR || errno == ETIME)
        saved_errno = errno;
      else
        abort();
    }

    /* Update loop->time unconditionally. It's tempting to skip the update when
     * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
     * operating system didn't reschedule our process while in the syscall.
     */
    SAVE_ERRNO(uv__update_time(loop));

    if (events[0].portev_source == 0) {
      if (timeout == 0)
        return;

      if (timeout == -1)
        continue;

      goto update_timeout;
    }

    if (nfds == 0) {
      assert(timeout != -1);
      return;
    }

    nevents = 0;

    for (i = 0; i < nfds; i++) {
      pe = events + i;
      fd = pe->portev_object;

      assert(fd >= 0);
      assert((unsigned) fd < loop->nwatchers);

      w = loop->watchers[fd];

      /* File descriptor that we've stopped watching, ignore. */
      if (w == NULL)
        continue;

      w->cb(loop, w, pe->portev_events);
      nevents++;

      if (w != loop->watchers[fd])
        continue;  /* Disabled by callback. */

      /* Events Ports operates in oneshot mode, rearm timer on next run. */
      if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
        QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
    }

    if (nevents != 0) {
      if (nfds == ARRAY_SIZE(events) && --count != 0) {
        /* Poll for more events but don't block this time. */
        timeout = 0;
        continue;
      }
      return;
    }

    if (saved_errno == ETIME) {
      assert(timeout != -1);
      return;
    }

    if (timeout == 0)
      return;

    if (timeout == -1)
      continue;

update_timeout:
    assert(timeout > 0);

    diff = loop->time - base;
    if (diff >= (uint64_t) timeout)
      return;

    timeout -= diff;
  }
}
Esempio n. 24
0
static int aeApiPoll(aeEventLoop *eventLoop, struct timeval *tvp) {
    aeApiState *state = eventLoop->apidata;
    struct timespec timeout, *tsp;
    int mask, i;
    uint_t nevents;
    port_event_t event[MAX_EVENT_BATCHSZ];

    /*
     * If we've returned fd events before, we must re-associate them with the
     * port now, before calling port_get().  See the block comment at the top of
     * this file for an explanation of why.
     */
    for (i = 0; i < state->npending; i++) {
        if (state->pending_fds[i] == -1)
            /* This fd has since been deleted. */
            continue;

        if (aeApiAssociate("aeApiPoll", state->portfd,
            state->pending_fds[i], state->pending_masks[i]) != 0) {
            /* See aeApiDelEvent for why this case is fatal. */
            abort();
        }

        state->pending_masks[i] = AE_NONE;
        state->pending_fds[i] = -1;
    }

    state->npending = 0;

    if (tvp != NULL) {
        timeout.tv_sec = tvp->tv_sec;
        timeout.tv_nsec = tvp->tv_usec * 1000;
        tsp = &timeout;
    } else {
        tsp = NULL;
    }

    /*
     * port_getn can return with errno == ETIME having returned some events (!).
     * So if we get ETIME, we check nevents, too.
     */
    nevents = 1;
    if (port_getn(state->portfd, event, MAX_EVENT_BATCHSZ, &nevents,
        tsp) == -1 && (errno != ETIME || nevents == 0)) {
        if (errno == ETIME || errno == EINTR)
            return 0;

        /* Any other error indicates a bug. */
        perror("aeApiPoll: port_get");
        abort();
    }

    state->npending = nevents;

    for (i = 0; i < nevents; i++) {
            mask = 0;
            if (event[i].portev_events & POLLIN)
                mask |= AE_READABLE;
            if (event[i].portev_events & POLLOUT)
                mask |= AE_WRITABLE;

            eventLoop->fired[i].fd = event[i].portev_object;
            eventLoop->fired[i].mask = mask;

            if (evport_debug)
                fprintf(stderr, "aeApiPoll: fd %d mask 0x%x\n",
                    (int)event[i].portev_object, mask);

            state->pending_fds[i] = event[i].portev_object;
            state->pending_masks[i] = (uintptr_t)event[i].portev_user;
    }

    return nevents;
}
Esempio n. 25
0
static int eventer_ports_impl_loop(int id) {
  struct timeval __dyna_sleep = { 0, 0 };
  struct ports_spec *spec;
  spec = eventer_get_spec_for_event(NULL);

  while(1) {
    struct timeval __sleeptime;
    struct timespec __ports_sleeptime;
    unsigned int fd_cnt = 0;
    int ret;
    port_event_t pevents[MAX_PORT_EVENTS];

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;
 
    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent();

    /* Now we move on to our fd-based events */
    __ports_sleeptime.tv_sec = __sleeptime.tv_sec;
    __ports_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = 1;

    pevents[0].portev_source = 65535; /* This is impossible */

    ret = port_getn(spec->port_fd, pevents, MAX_PORT_EVENTS, &fd_cnt,
                    &__ports_sleeptime);
    spec->wakeup_notify = 0; /* force unlock */
    /* The timeout case is a tad complex with ports.  -1/ETIME is clearly
     * a timeout.  However, it i spossible that we got that and fd_cnt isn't
     * 0, which means we both timed out and got events... WTF?
     */
    if(fd_cnt == 0 ||
       (ret == -1 && errno == ETIME && pevents[0].portev_source == 65535))
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);

    if(ret == -1 && (errno != ETIME && errno != EINTR))
      mtevL(eventer_err, "port_getn: %s\n", strerror(errno));

    if(ret < 0)
      mtevL(eventer_deb, "port_getn: %s\n", strerror(errno));

    mtevL(eventer_deb, "debug: port_getn(%d, [], %d) => %d\n",
          spec->port_fd, fd_cnt, ret);

    if(pevents[0].portev_source == 65535) {
      /* the impossible still remains, which means our fd_cnt _must_ be 0 */
      fd_cnt = 0;
    }

    if(fd_cnt > 0) {
      int idx;
      /* Loop a last time to process */
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      for(idx = 0; idx < fd_cnt; idx++) {
        port_event_t *pe;
        eventer_t e;
        int fd, mask;

        pe = &pevents[idx];
        if(pe->portev_source != PORT_SOURCE_FD) continue;
        fd = (int)pe->portev_object;
        mtevAssert((intptr_t)pe->portev_user == fd);
        e = master_fds[fd].e;

        /* It's possible that someone removed the event and freed it
         * before we got here.... bail out if we're null.
         */
        if (!e) continue;

        mask = 0;
        if(pe->portev_events & (POLLIN | POLLHUP))
          mask |= EVENTER_READ;
        if(pe->portev_events & (POLLOUT))
          mask |= EVENTER_WRITE;
        if(pe->portev_events & (POLLERR | POLLHUP | POLLNVAL))
          mask |= EVENTER_EXCEPTION;

        eventer_ports_impl_trigger(e, mask);
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Esempio n. 26
0
static int iv_fd_port_poll(struct iv_state *st,
                           struct iv_list_head *active,
                           const struct timespec *abs)
{
    struct timespec _rel;
    struct timespec *rel;
    int run_timers;
    int run_events;
    unsigned int nget;
    port_event_t pe[PORTEV_NUM];
    int ret;
    int i;

    iv_fd_port_upload(st);

    rel = to_relative(st, &_rel, abs);

    run_timers = 0;
    if (rel != NULL && rel->tv_sec == 0 && rel->tv_nsec == 0)
        run_timers = 1;

    run_events = 0;

poll_more:
    nget = 1;

    /*
     * If we get EINTR from port_getn(), no events are returned
     * and nget will not have been updated, but if we get ETIME,
     * events may be returned, and nget will be set to the number
     * of events in the array, and we need to process those
     * events as usual.
     */
    ret = port_getn(st->u.port.port_fd, pe, PORTEV_NUM, &nget, rel);

    __iv_invalidate_now(st);

    if (ret < 0 && errno != ETIME) {
        if (errno == EINTR)
            return run_timers;

        iv_fatal("iv_fd_port_poll: got error %d[%s]", errno,
                 strerror(errno));
    }

    if (ret < 0 && errno == ETIME)
        run_timers = 1;

    for (i = 0; i < nget; i++) {
        int source;

        source = pe[i].portev_source;
        if (source == PORT_SOURCE_FD) {
            int revents;
            struct iv_fd_ *fd;

            revents = pe[i].portev_events;
            fd = pe[i].portev_user;

            if (revents & (POLLIN | POLLERR | POLLHUP))
                iv_fd_make_ready(active, fd, MASKIN);

            if (revents & (POLLOUT | POLLERR | POLLHUP))
                iv_fd_make_ready(active, fd, MASKOUT);

            if (revents & (POLLERR | POLLHUP))
                iv_fd_make_ready(active, fd, MASKERR);

            fd->registered_bands = 0;

            iv_list_del_init(&fd->list_notify);
            if (fd->wanted_bands) {
                iv_list_add_tail(&fd->list_notify,
                                 &st->u.port.notify);
            }
        } else if (source == PORT_SOURCE_TIMER) {
            run_timers = 1;
        } else if (source == PORT_SOURCE_USER) {
            run_events = 1;
        } else {
            iv_fatal("iv_fd_port_poll: received event "
                     "from unknown source %d", source);
        }
    }

    if (nget == PORTEV_NUM) {
        run_timers = 1;
        rel = &_rel;
        rel->tv_sec = 0;
        rel->tv_nsec = 0;
        goto poll_more;
    }

    if (run_events)
        iv_event_run_pending_events();

    return run_timers;
}
Esempio n. 27
0
int SelEpolKqEvPrt::getEvents()
{
	int numEvents = -1;
	#if defined(USE_WIN_IOCP)
		#ifdef OS_MINGW_W64
			IOOverlappedEntry entries[64];
			ULONG nEvents = 0;
			if(!GetQueuedCompletionStatusEx(iocpPort,
						(OVERLAPPED_ENTRY*)entries,
						64,
	                    &nEvents,
						(DWORD)this->timeoutMilis,
						FALSE)) {
				int errCd = WSAGetLastError();
				if(errCd != WAIT_TIMEOUT)
				{
					std::cout << "Error occurred during GetQueuedCompletionStatusEx " << WSAGetLastError() << std::endl;
				}
	           	return -1;
	        }
			psocks.clear();
			for(long i = 0; i < (long)nEvents; i++) {
				DWORD qty;
				DWORD flags;
				if(WSAGetOverlappedResult(entries[i].o->sock, (LPWSAOVERLAPPED)entries[i].o, &qty, FALSE, &flags))
				{
					psocks.push_back(entries[i].o);
				}
			}
			return (int)psocks.size();
		#else
			OVERLAPPED       *pOverlapped = NULL;
			IOOperation *lpContext = NULL;
			DWORD            dwBytesTransfered = 0;
			BOOL bReturn = GetQueuedCompletionStatus(iocpPort,
								&dwBytesTransfered,
								(LPDWORD)&lpContext,
								&pOverlapped,
								(DWORD)this->timeoutMilis);
			if (FALSE == bReturn)
			{
				return -1;
			}
			IOOperation* iops = (IOOperation*)lpContext;
			psocks.clear();
			psocks.push_back(iops);
			return 1;
		#endif
	#elif defined(USE_MINGW_SELECT)
		readfds = master;
		if(timeoutMilis>1)
		{
			struct timeval tv;
			tv.tv_sec = (timeoutMilis/1000);
			tv.tv_usec = (timeoutMilis%1000)*1000;
			numEvents = select(fdMax+1, &readfds, NULL, NULL, &tv);
		}
		else
		{
			numEvents = select(fdMax+1, &readfds, NULL, NULL, NULL);
		}
		if(numEvents==-1)
		{
			perror("select()");
		}
		else
		{
			if(fdMax>0)
				return fdMax+1;
		}
	#elif defined(USE_SELECT)
		for (int var = 0; var < fdsetSize; ++var) {
			readfds[var] = master[var];
		}
		if(timeoutMilis>1)
		{
			struct timeval tv;
			tv.tv_sec = (timeoutMilis/1000);
			tv.tv_usec = (timeoutMilis%1000)*1000;
			numEvents = select(fdMax+1, readfds, NULL, NULL, &tv);
		}
		else
		{
			numEvents = select(fdMax+1, readfds, NULL, NULL, NULL);
		}
		if(numEvents==-1)
		{
			perror("select()");
		}
		else
		{
			if(fdMax>0)
				return fdMax+1;
		}
	#elif defined USE_EPOLL
		int ccfds = curfds;
		if(curfds<=0) {
			ccfds = 1;
		}
		numEvents = epoll_wait(epoll_handle, events, ccfds+1, timeoutMilis);
	#elif defined USE_KQUEUE
		if(timeoutMilis>1)
		{
			struct timespec tv;
			tv.tv_sec = (timeoutMilis/1000);
			tv.tv_nsec = (timeoutMilis%1000)*1000000;
			numEvents = kevent(kq, NULL, 0, evlist, MAXDESCRIPTORS, &tv);
		}
		else
		{
			numEvents = kevent(kq, NULL, 0, evlist, MAXDESCRIPTORS, NULL);
		}
	#elif defined USE_DEVPOLL
		struct dvpoll pollit;
		pollit.dp_timeout = timeoutMilis;
		pollit.dp_nfds = curfds;
		pollit.dp_fds = polled_fds;
		numEvents = ioctl(dev_poll_fd, DP_POLL, &pollit);
	#elif defined USE_EVPORT
		uint_t nevents, wevents = 0;
		if(timeoutMilis>1)
		{
			struct timespec tv
			tv.tv_sec = (timeoutMilis/1000);
			tv.tv_nsec = (timeoutMilis%1000)*1000000;
			//uint_t num = 0;
			if (port_getn(port, evlist, 0, &wevents, &tv) < 0) return 0;
			if (0 == wevents) wevents = 1;
			nevents = wevents;
			if (port_getn(port, evlist, (uint_t) MAXDESCRIPTORS, &nevents, &tv) < 0) return 0;
		}
		else
		{
			//uint_t num = 0;
			if (port_getn(port, evlist, 0, &wevents, NULL) < 0) return 0;
Esempio n. 28
0
void
event_loop_stats(event_stats_cb_t cb, void *arg)
{
    struct stats *st = arg;
    int status, evp;
    port_event_t event;
    struct timespec ts, *tsp;

    evp = port_create();
    if (evp < 0) {
        log_error("port create failed: %s", strerror(errno));
        return;
    }

    status = port_associate(evp, PORT_SOURCE_FD, st->sd, POLLIN, NULL);
    if (status < 0) {
        log_error("port associate on evp %d sd %d failed: %s", evp, st->sd,
                  strerror(errno));
        goto error;
    }

    /* port_getn should block indefinitely if st->interval < 0 */
    if (st->interval < 0) {
        tsp = NULL;
    } else {
        tsp = &ts;
        tsp->tv_sec = st->interval / 1000LL;
        tsp->tv_nsec = (st->interval % 1000LL) * 1000000LL;
    }


    for (;;) {
        unsigned int nreturned = 1;

        status = port_getn(evp, &event, 1, &nreturned, tsp);
        if (status != NC_OK) {
            if (errno == EINTR || errno == EAGAIN) {
                continue;
            }

            if (errno != ETIME) {
                log_error("port getn on evp %d with m %d failed: %s", evp,
                          st->sd, strerror(errno));
                goto error;
            }
        }

        ASSERT(nreturned <= 1);

        if (nreturned == 1) {
            /* re-associate monitoring descriptor with the port */
            status = port_associate(evp, PORT_SOURCE_FD, st->sd, POLLIN, NULL);
            if (status < 0) {
                log_error("port associate on evp %d sd %d failed: %s", evp, st->sd,
                          strerror(errno));
            }
        }

        cb(st, &nreturned);
    }

error:
    status = close(evp);
    if (status < 0) {
        log_error("close evp %d failed, ignored: %s", evp, strerror(errno));
    }
    evp = -1;
}
Esempio n. 29
0
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread)
{
  port_event_t *event;
  uint_t n, i, max_chunk, max_sleep;
  ph_job_t *job;
  ph_iomask_t mask;
  struct timespec ts;

  max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024);
  max_sleep = ph_config_query_int("$.nbio.max_sleep", 5000);
  ts.tv_sec = max_sleep / 1000;
  ts.tv_nsec = (max_sleep - (ts.tv_sec * 1000)) * 1000000;
  event = malloc(max_chunk * sizeof(port_event_t));

  while (ck_pr_load_int(&_ph_run_loop)) {
    n = 1;
    memset(event, 0, sizeof(*event));

    if (port_getn(emitter->io_fd, event, max_chunk, &n, &ts)) {
      if (errno != EINTR && errno != ETIME) {
        ph_panic("port_getn: `Pe%d", errno);
      }
      n = 0;
    }

    if (!n) {
      ph_thread_epoch_poll();
      continue;
    }

    for (i = 0; i < n; i++) {
      ph_thread_epoch_begin();

      switch (event[i].portev_source) {
        case PORT_SOURCE_TIMER:
          gettimeofday(&thread->now, NULL);
          thread->refresh_time = false;
          ph_nbio_emitter_timer_tick(emitter);
          break;

        case PORT_SOURCE_USER:
          break;

        case PORT_SOURCE_FD:
          thread->refresh_time = true;
          job = event[i].portev_user;

          switch (event[i].portev_events & (POLLIN|POLLOUT|POLLERR|POLLHUP)) {
            case POLLIN:
              mask = PH_IOMASK_READ;
              break;
            case POLLOUT:
              mask = PH_IOMASK_WRITE;
              break;
            case POLLIN|POLLOUT:
              mask = PH_IOMASK_READ|PH_IOMASK_WRITE;
              break;
            default:
              mask = PH_IOMASK_ERR;
          }
          job->kmask = 0;
          ph_nbio_emitter_dispatch_immediate(emitter, job, mask);
          break;
      }

      if (ph_job_have_deferred_items(thread)) {
        ph_job_pool_apply_deferred_items(thread);
      }
      ph_thread_epoch_end();
      ph_thread_epoch_poll();
    }
  }

  free(event);
}
Esempio n. 30
0
static gboolean
port_fetch_event_cb (void *arg)
{
	pnode_t *pn = (pnode_t *)arg;
    _f* fo;
	uint_t nget = 0;
	port_event_t pe[PE_ALLOC];
    timespec_t timeout;
    gpointer f;
    gboolean ret = TRUE;
    
    /* FK_W ("IN <======== %s\n", __func__); */
    G_LOCK (fen_lock);
    
    memset (&timeout, 0, sizeof (timespec_t));
    do {
        nget = 1;
        if (port_getn (pn->port, pe, PE_ALLOC, &nget, &timeout) == 0) {
            int i;
            for (i = 0; i < nget; i++) {
                fo = (_f*)pe[i].portev_user;
                /* handle event */
                switch (pe[i].portev_source) {
                case PORT_SOURCE_FILE:
                    /* If got FILE_EXCEPTION or add to port failed,
                       delete the pnode */
                    fo->is_active = FALSE;
                    if (fo->user_data) {
                        FK_W("%s\n",
                          printevent(F_NAME(fo), pe[i].portev_events, "RAW"));
                        port_add_kevent (pe[i].portev_events, fo->user_data);
                    } else {
                        /* fnode is deleted */
                        goto L_delete;
                    }
                    if (pe[i].portev_events & FILE_EXCEPTION) {
                        g_hash_table_remove (_obj_fen_hash, fo->user_data);
                    L_delete:
                        FK_W ("[ FREE_FO ] [0x%p]\n", fo);
                        pnode_delete (fo->port);
                        g_free (fo);
                    }
                    break;
                default:
                    /* case PORT_SOURCE_TIMER: */
                    FK_W ("[kernel] unknown portev_source %d\n", pe[i].portev_source);
                }
            }
        } else {
            FK_W ("[kernel] port_getn %s\n", g_strerror (errno));
            nget = 0;
        }
    } while (nget == PE_ALLOC);

	/* Processing g_eventq */
    port_process_kevents ();
    
    if (pn->ref == 0) {
        pn->port_source_id = 0;
        ret = FALSE;
    }
    G_UNLOCK (fen_lock);
    /* FK_W ("OUT ========> %s\n", __func__); */
	return ret;
}