Ejemplo n.º 1
0
static int
devpoll_dispatch(struct event_base *base, struct timeval *tv)
{
	struct devpollop *devpollop = base->evbase;
	struct pollfd *events = devpollop->events;
	struct dvpoll dvp;
	int i, res, timeout = -1;

	if (devpollop->nchanges)
		devpoll_commit(devpollop);

	if (tv != NULL)
		timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;

	dvp.dp_fds = devpollop->events;
	dvp.dp_nfds = devpollop->nevents;
	dvp.dp_timeout = timeout;

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = ioctl(devpollop->dpfd, DP_POLL, &dvp);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("ioctl: DP_POLL");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: devpoll_wait reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;
		int what = events[i].revents;

		if (what & POLLHUP)
			what |= POLLIN | POLLOUT;
		else if (what & POLLERR)
			what |= POLLIN | POLLOUT;

		if (what & POLLIN)
			which |= EV_READ;
		if (what & POLLOUT)
			which |= EV_WRITE;

		if (!which)
			continue;

		/* XXX(niels): not sure if this works for devpoll */
		evmap_io_active_(base, events[i].fd, which);
	}

	return (0);
}
Ejemplo n.º 2
0
static int
devpoll_queue(struct devpollop *devpollop, int fd, int events) {
	struct pollfd *pfd;

	if (devpollop->nchanges >= devpollop->nevents) {
		/*
		 * Change buffer is full, must commit it to /dev/poll before
		 * adding more
		 */
		if (devpoll_commit(devpollop) != 0)
			return(-1);
	}

	pfd = &devpollop->changes[devpollop->nchanges++];
	pfd->fd = fd;
	pfd->events = events;
	pfd->revents = 0;

	return (0);
}
Ejemplo n.º 3
0
static int
devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
	struct devpollop *devpollop = arg;
	struct pollfd *events = devpollop->events;
	struct dvpoll dvp;
	struct evdevpoll *evdp;
	int i, res, timeout = -1;

	if (devpollop->nchanges)
		devpoll_commit(devpollop);

	if (tv != NULL)
		timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;

	dvp.dp_fds = devpollop->events;
	dvp.dp_nfds = devpollop->nevents;
	dvp.dp_timeout = timeout;

        /* we should release the lock if we're going to enter the
           kernel in a multi-threaded application.  However, if we're
           single threaded, there's really no advantage to releasing
           the lock and it just takes up time we could spend doing
           something else. */
        OPAL_THREAD_UNLOCK(&opal_event_lock);
        res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
        OPAL_THREAD_LOCK(&opal_event_lock);


	if (res == -1) {
		if (errno != EINTR) {
			event_warn("ioctl: DP_POLL");
			return (-1);
		}

		evsignal_process(base);
		return (0);
	} else if (base->sig.evsignal_caught) {
		evsignal_process(base);
	}

	event_debug(("%s: devpoll_wait reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;
		int what = events[i].revents;
		struct event *evread = NULL, *evwrite = NULL;

		assert(events[i].fd < devpollop->nfds);
		evdp = &devpollop->fds[events[i].fd];
   
                if (what & POLLHUP)
                        what |= POLLIN | POLLOUT;
                else if (what & POLLERR)
                        what |= POLLIN | POLLOUT;

		if (what & POLLIN) {
			evread = evdp->evread;
			which |= OPAL_EV_READ;
		}

		if (what & POLLOUT) {
			evwrite = evdp->evwrite;
			which |= OPAL_EV_WRITE;
		}

		if (!which)
			continue;

		if (evread != NULL && !(evread->ev_events & OPAL_EV_PERSIST))
			event_del(evread);
		if (evwrite != NULL && evwrite != evread &&
		    !(evwrite->ev_events & OPAL_EV_PERSIST))
			event_del(evwrite);

		if (evread != NULL)
			event_active(evread, OPAL_EV_READ, 1);
		if (evwrite != NULL)
			event_active(evwrite, OPAL_EV_WRITE, 1);
	}

	return (0);
}
Ejemplo n.º 4
0
int
devpoll_dispatch (struct event_base *base, void *arg, struct timeval *tv)
{

  struct devpollop *devpollop = arg;

  struct pollfd *events = devpollop->events;

  struct dvpoll dvp;

  struct evdevpoll *evdp;
  int i, res, timeout = -1;

  if (devpollop->nchanges)
    devpoll_commit (devpollop);

  if (tv != NULL)
    timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;

  dvp.dp_fds = devpollop->events;

  dvp.dp_nfds = devpollop->nevents;

  dvp.dp_timeout = timeout;

  res = ioctl (devpollop->dpfd, DP_POLL, &dvp);

  if (res == -1)
    {
      if (errno != EINTR)
        {
          event_warn ("ioctl: DP_POLL");
          return (-1);
        }

      evsignal_process (base);

      return (0);
    }

  else if (base->sig.evsignal_caught)
    {
      evsignal_process (base);
    }

  event_debug ( ("%s: devpoll_wait reports %d", __func__, res) );

  for (i = 0; i < res; i++)
    {
      int which = 0;
      int what = events[i].revents;

      struct event *evread = NULL, *evwrite = NULL;

      assert (events[i].fd < devpollop->nfds);
      evdp = &devpollop->fds[events[i].fd];

      if (what & POLLHUP)
        what |= POLLIN | POLLOUT;
      else if (what & POLLERR)
        what |= POLLIN | POLLOUT;

      if (what & POLLIN)
        {
          evread = evdp->evread;
          which |= EV_READ;
        }

      if (what & POLLOUT)
        {
          evwrite = evdp->evwrite;
          which |= EV_WRITE;
        }

      if (!which)
        continue;

      if (evread != NULL && ! (evread->ev_events & EV_PERSIST) )
        event_del (evread);

      if (evwrite != NULL && evwrite != evread &&
              ! (evwrite->ev_events & EV_PERSIST) )
        event_del (evwrite);

      if (evread != NULL)
        event_active (evread, EV_READ, 1);

      if (evwrite != NULL)
        event_active (evwrite, EV_WRITE, 1);
    }

  return (0);
}