static VALUE
fs_watcher_init(VALUE arg) {
	FSWatcher *watcher = (FSWatcher *) arg;
	struct kevent *events;
	VALUE filename;
	unsigned int i;
	uint32_t fflags;
	VALUE filenum;
	struct stat buf;
	int fd;
	
	/* Open each file in the filenames list and add each one to the events array. */
	
	/* +2 for the termination pipe and the interruption pipe. */
	events = alloca((RARRAY_LEN(watcher->filenames) + 2) * sizeof(struct kevent));
	watcher->fds = malloc(RARRAY_LEN(watcher->filenames) * sizeof(int));
	if (watcher->fds == NULL) {
		rb_raise(rb_eNoMemError, "Cannot allocate memory.");
		return Qnil;
	}
	for (i = 0; i < RARRAY_LEN(watcher->filenames); i++) {
		filename = rb_ary_entry(watcher->filenames, i);
		if (TYPE(filename) != T_STRING) {
			filename = rb_obj_as_string(filename);
		}
		
		if (stat(RSTRING_PTR(filename), &buf) == -1) {
			watcher->preparation_error = 1;
			goto end;
		}
		
		#ifdef O_EVTONLY
			fd = open(RSTRING_PTR(filename), O_EVTONLY);
		#else
			fd = open(RSTRING_PTR(filename), O_RDONLY);
		#endif
		if (fd == -1) {
			watcher->preparation_error = 1;
			goto end;
		}
		
		watcher->fds[i] = fd;
		watcher->fds_len++;
		fflags = NOTE_WRITE | NOTE_EXTEND | NOTE_RENAME | NOTE_DELETE | NOTE_REVOKE;
		EV_SET(&events[i], fd, EVFILT_VNODE, EV_ADD | EV_ENABLE | EV_CLEAR,
			fflags, 0, 0);
	}
	
	watcher->events_len = watcher->fds_len;
	
	/* Create pipes for inter-thread communication. */
	
	if (pipe(watcher->notification_fd) == -1) {
		rb_sys_fail("pipe()");
		return Qnil;
	}
	if (pipe(watcher->interruption_fd) == -1) {
		rb_sys_fail("pipe()");
		return Qnil;
	}
	
	/* Create a kqueue and register all events. */
	
	watcher->kq = kqueue();
	if (watcher->kq == -1) {
		rb_sys_fail("kqueue()");
		return Qnil;
	}
	
	if (watcher->termination_pipe != Qnil) {
		filenum = rb_funcall(watcher->termination_pipe,
			rb_intern("fileno"), 0);
		EV_SET(&events[watcher->events_len], NUM2INT(filenum),
			EVFILT_READ, EV_ADD | EV_ENABLE | EV_CLEAR, 0, 0, 0);
		watcher->termination_fd = NUM2INT(filenum);
		watcher->events_len++;
	}
	EV_SET(&events[watcher->events_len], watcher->interruption_fd[0],
		EVFILT_READ, EV_ADD | EV_ENABLE | EV_CLEAR, 0, 0, 0);
	watcher->events_len++;
	
	if (kevent(watcher->kq, events, watcher->events_len, NULL, 0, NULL) == -1) {
		rb_sys_fail("kevent()");
		return Qnil;
	}
	
end:
	if (watcher->preparation_error) {
		for (i = 0; i < watcher->fds_len; i++) {
			close(watcher->fds[i]);
		}
		free(watcher->fds);
		watcher->fds = NULL;
		watcher->fds_len = 0;
	}
	return Data_Wrap_Struct(watcher->klass, NULL, fs_watcher_free, watcher);
}
Exemple #2
0
TcpConnection TcpListener::accept() {
  assert(dispatcher != nullptr);
  assert(context == nullptr);
  if (dispatcher->interrupted()) {
    throw InterruptedException();
  }

  std::string message;
  OperationContext listenerContext;
  listenerContext.context = dispatcher->getCurrentContext();
  listenerContext.interrupted = false;
  struct kevent event;
  EV_SET(&event, listener, EVFILT_READ, EV_ADD | EV_ENABLE | EV_CLEAR , 0, SOMAXCONN, &listenerContext);
  if (kevent(dispatcher->getKqueue(), &event, 1, NULL, 0, NULL) == -1) {
    message = "kevent failed, " + lastErrorMessage();
  } else {
    context = &listenerContext;
    dispatcher->getCurrentContext()->interruptProcedure = [&] {
      assert(dispatcher != nullptr);
      assert(context != nullptr);
      OperationContext* listenerContext = static_cast<OperationContext*>(context);
      if (!listenerContext->interrupted) {
        
        struct kevent event;
        EV_SET(&event, listener, EVFILT_READ, EV_DELETE | EV_DISABLE, 0, 0, NULL);
        
        if (kevent(dispatcher->getKqueue(), &event, 1, NULL, 0, NULL) == -1) {
          throw std::runtime_error("TcpListener::stop, kevent failed, " + lastErrorMessage());
        }
        
        listenerContext->interrupted = true;
        dispatcher->pushContext(listenerContext->context);
      }
    };
    
    dispatcher->dispatch();
    dispatcher->getCurrentContext()->interruptProcedure = nullptr;
    assert(dispatcher != nullptr);
    assert(listenerContext.context == dispatcher->getCurrentContext());
    assert(context == &listenerContext);
    context = nullptr;
    listenerContext.context = nullptr;
    if (listenerContext.interrupted) {
      throw InterruptedException();
    }

    sockaddr inAddr;
    socklen_t inLen = sizeof(inAddr);
    int connection = ::accept(listener, &inAddr, &inLen);
    if (connection == -1) {
      message = "accept failed, " + lastErrorMessage();
    } else {
      int flags = fcntl(connection, F_GETFL, 0);
      if (flags == -1 || fcntl(connection, F_SETFL, flags | O_NONBLOCK) == -1) {
        message = "fcntl failed, " + lastErrorMessage();
      } else {
        return TcpConnection(*dispatcher, connection);
      }
    }
  }

  throw std::runtime_error("TcpListener::accept, " + message);
}
Exemple #3
0
int
kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
{
	struct kqop *kqop = arg;
	struct kevent *changes = kqop->changes;
	struct kevent *events = kqop->events;
	struct event *ev;
	struct timespec ts;
	int i, res;

	TIMEVAL_TO_TIMESPEC(tv, &ts);

	res = kevent(kqop->kq, changes, kqop->nchanges,
	    events, kqop->nevents, &ts);
	kqop->nchanges = 0;
	if (res == -1) {
		if (errno != EINTR) {
                        event_warn("kevent");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: kevent reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;

		if (events[i].flags & EV_ERROR) {
			/* 
			 * Error messages that can happen, when a delete fails.
			 *   EBADF happens when the file discriptor has been
			 *   closed,
			 *   ENOENT when the file discriptor was closed and
			 *   then reopened.
			 *   EINVAL for some reasons not understood; EINVAL
			 *   should not be returned ever; but FreeBSD does :-\
			 * An error is also indicated when a callback deletes
			 * an event we are still processing.  In that case
			 * the data field is set to ENOENT.
			 */
			if (events[i].data == EBADF ||
			    events[i].data == EINVAL ||
			    events[i].data == ENOENT)
				continue;
			errno = events[i].data;
			return (-1);
		}

		ev = (struct event *)events[i].udata;

		if (events[i].filter == EVFILT_READ) {
			which |= EV_READ;
		} else if (events[i].filter == EVFILT_WRITE) {
			which |= EV_WRITE;
		} else if (events[i].filter == EVFILT_SIGNAL) {
			which |= EV_SIGNAL;
		}

		if (!which)
			continue;

		if (!(ev->ev_events & EV_PERSIST))
			event_del(ev);

		event_active(ev, which,
		    ev->ev_events & EV_SIGNAL ? events[i].data : 1);
	}

	return (0);
}
Exemple #4
0
static void
kq_update_events(rb_fde_t *F, short filter, PF * handler)
{
	PF *cur_handler;
	int kep_flags;

	switch (filter)
	{
	case EVFILT_READ:
		cur_handler = F->read_handler;
		break;
	case EVFILT_WRITE:
		cur_handler = F->write_handler;
		break;
	default:
		/* XXX bad! -- adrian */
		return;
		break;
	}

	if((cur_handler == NULL && handler != NULL) || (cur_handler != NULL && handler == NULL))
	{
		struct kevent *kep;

		kep = kqlst + kqoff;

		if(handler != NULL)
		{
			kep_flags = EV_ADD | EV_ONESHOT;
		}
		else
		{
			kep_flags = EV_DELETE;
		}

		EV_SET(kep, F->fd, filter, kep_flags, 0, 0, F);

		if(++kqoff == kqmax)
		{
			int ret, i;

			/* Add them one at a time, because there may be
			 * already closed fds in it. The kernel will try
			 * to report invalid fds in the output; if there
			 * is no space, it silently stops processing the
			 * array at that point. We cannot give output space
			 * because that would also return events we cannot
			 * process at this point.
			 */
			for(i = 0; i < kqoff; i++)
			{
				ret = kevent(kq, kqlst + i, 1, NULL, 0, &zero_timespec);
				/* jdc -- someone needs to do error checking... */
				/* EBADF is normal here -- jilles */
				if(ret == -1 && errno != EBADF)
					rb_lib_log("kq_update_events(): kevent(): %s",
						   strerror(errno));
			}
			kqoff = 0;
		}
	}
}
Exemple #5
0
void FrameGdk::handleGdkEvent(GdkEvent* event)
{
    switch (event->type) {
        case GDK_EXPOSE: {
            GdkRectangle clip;
            gdk_region_get_clipbox(event->expose.region, &clip);
            gdk_window_begin_paint_region (event->any.window, event->expose.region);
            cairo_t* cr = gdk_cairo_create (event->any.window);
            GraphicsContext* ctx = new GraphicsContext(cr);
            paint(ctx, IntRect(clip.x, clip.y, clip.width, clip.height));
            delete ctx;
            gdk_window_end_paint (event->any.window);
            break;
        }
        case GDK_SCROLL: {
            PlatformWheelEvent wheelEvent(event);
            view()->handleWheelEvent(wheelEvent);
            if (wheelEvent.isAccepted()) {
                return;
            }
            RenderObject::NodeInfo nodeInfo(true, true);
            renderer()->layer()->hitTest(nodeInfo, wheelEvent.pos());
            Node* node = nodeInfo.innerNode();
            if (!node)
                return;
            //Default to scrolling the page
            //not sure why its null
            //broke anyway when its not null
            doScroll(renderer(), wheelEvent.isHorizontal(), wheelEvent.delta());
            break;
        }
        case GDK_DRAG_ENTER:
        case GDK_DRAG_LEAVE:
        case GDK_DRAG_MOTION:
        case GDK_DRAG_STATUS:
        case GDK_DROP_START:
        case GDK_DROP_FINISHED: {
            //bool updateDragAndDrop(const PlatformMouseEvent&, Clipboard*);
            //void cancelDragAndDrop(const PlatformMouseEvent&, Clipboard*);
            //bool performDragAndDrop(const PlatformMouseEvent&, Clipboard*);
            break;
        }
        case GDK_MOTION_NOTIFY:
            view()->handleMouseMoveEvent(event);
            break;
        case GDK_BUTTON_PRESS:
        case GDK_2BUTTON_PRESS:
        case GDK_3BUTTON_PRESS:
            view()->handleMousePressEvent(event);
            break;
        case GDK_BUTTON_RELEASE:
            view()->handleMouseReleaseEvent(event);
            break;
        case GDK_KEY_PRESS:
        case GDK_KEY_RELEASE: {
            PlatformKeyboardEvent kevent(event);
            bool handled = false;
            if (!kevent.isKeyUp()) {
                Node* start = selection().start().node();
                if (start && start->isContentEditable()) {
                    switch(kevent.WindowsKeyCode()) {
                        case VK_BACK:
                            TypingCommand::deleteKeyPressed(document());
                            break;
                        case VK_DELETE:
                            TypingCommand::forwardDeleteKeyPressed(document());
                            break;
                        case VK_LEFT:
                            selection().modify(SelectionController::MOVE, SelectionController::LEFT, CharacterGranularity);
                            break;
                        case VK_RIGHT:
                            selection().modify(SelectionController::MOVE, SelectionController::RIGHT, CharacterGranularity);
                            break;
                        case VK_UP:
                            selection().modify(SelectionController::MOVE, SelectionController::BACKWARD, ParagraphGranularity);
                            break;
                        case VK_DOWN:
                            selection().modify(SelectionController::MOVE, SelectionController::FORWARD, ParagraphGranularity);
                            break;
                        default:
                            TypingCommand::insertText(document(), kevent.text(), false);

                    }
                    handled = true;
                }
                if (!handled) {
                    switch (kevent.WindowsKeyCode()) {
                        case VK_LEFT:
                            doScroll(renderer(), true, -120);
                            break;
                        case VK_RIGHT:
                            doScroll(renderer(), true, 120);
                            break;
                        case VK_UP:
                            doScroll(renderer(), false, -120);
                            break;
                        case VK_PRIOR:
                            //return SB_PAGEUP;
                            break;
                        case VK_NEXT:
                            //return SB_PAGEDOWN;
                            break;
                        case VK_DOWN:
                            doScroll(renderer(), false, 120);
                            break;
                        case VK_HOME:
                            renderer()->layer()->scrollToOffset(0, 0, true, true);
                            doScroll(renderer(), false, 120);
                            break;
                        case VK_END:
                            renderer()->layer()->scrollToOffset(0,
                                                                renderer()->height(), true, true);
                            break;
                        case VK_SPACE:
                            if (kevent.shiftKey())
                                doScroll(renderer(), false, -120);
                            else
                                doScroll(renderer(), false, 120);
                            break;
                    }

                }
            }
        }
        default:
            break;
    }
}
Exemple #6
0
static ngx_int_t
ngx_kqueue_init(ngx_cycle_t *cycle, ngx_msec_t timer)
{
    ngx_kqueue_conf_t  *kcf;
    struct timespec     ts;
#if (NGX_HAVE_TIMER_EVENT)
    struct kevent       kev;
#endif

    kcf = ngx_event_get_conf(cycle->conf_ctx, ngx_kqueue_module);

    if (ngx_kqueue == -1) {
        ngx_kqueue = kqueue();

        if (ngx_kqueue == -1) {
            ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_errno,
                          "kqueue() failed");
            return NGX_ERROR;
        }

#ifdef EVFILT_USER
        if (ngx_kqueue_notify_init(cycle->log) != NGX_OK) {
            return NGX_ERROR;
        }
#endif
    }

    if (max_changes < kcf->changes) {
        if (nchanges) {
            ts.tv_sec = 0;
            ts.tv_nsec = 0;

            if (kevent(ngx_kqueue, change_list, (int) nchanges, NULL, 0, &ts)
                == -1)
            {
                ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                              "kevent() failed");
                return NGX_ERROR;
            }
            nchanges = 0;
        }

        if (change_list) {
            ngx_free(change_list);
        }

        change_list = ngx_alloc(kcf->changes * sizeof(struct kevent),
                                cycle->log);
        if (change_list == NULL) {
            return NGX_ERROR;
        }
    }

    max_changes = kcf->changes;

    if (nevents < kcf->events) {
        if (event_list) {
            ngx_free(event_list);
        }

        event_list = ngx_alloc(kcf->events * sizeof(struct kevent), cycle->log);
        if (event_list == NULL) {
            return NGX_ERROR;
        }
    }

    ngx_event_flags = NGX_USE_ONESHOT_EVENT
                      |NGX_USE_KQUEUE_EVENT
                      |NGX_USE_VNODE_EVENT;

#if (NGX_HAVE_TIMER_EVENT)

    if (timer) {
        kev.ident = 0;
        kev.filter = EVFILT_TIMER;
        kev.flags = EV_ADD|EV_ENABLE;
        kev.fflags = 0;
        kev.data = timer;
        kev.udata = 0;

        ts.tv_sec = 0;
        ts.tv_nsec = 0;

        if (kevent(ngx_kqueue, &kev, 1, NULL, 0, &ts) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "kevent(EVFILT_TIMER) failed");
            return NGX_ERROR;
        }

        ngx_event_flags |= NGX_USE_TIMER_EVENT;
    }

#endif

#if (NGX_HAVE_CLEAR_EVENT)
    ngx_event_flags |= NGX_USE_CLEAR_EVENT;
#else
    ngx_event_flags |= NGX_USE_LEVEL_EVENT;
#endif

#if (NGX_HAVE_LOWAT_EVENT)
    ngx_event_flags |= NGX_USE_LOWAT_EVENT;
#endif

    nevents = kcf->events;

    ngx_io = ngx_os_io;

    ngx_event_actions = ngx_kqueue_module_ctx.actions;

    return NGX_OK;
}
Exemple #7
0
static ngx_int_t
ngx_kqueue_process_events(ngx_cycle_t *cycle, ngx_msec_t timer,
    ngx_uint_t flags)
{
    int               events, n;
    ngx_int_t         i, instance;
    ngx_uint_t        level;
    ngx_err_t         err;
    ngx_event_t      *ev;
    ngx_queue_t      *queue;
    struct timespec   ts, *tp;

    n = (int) nchanges;
    nchanges = 0;

    if (timer == NGX_TIMER_INFINITE) {
        tp = NULL;

    } else {

        ts.tv_sec = timer / 1000;
        ts.tv_nsec = (timer % 1000) * 1000000;

        /*
         * 64-bit Darwin kernel has the bug: kernel level ts.tv_nsec is
         * the int32_t while user level ts.tv_nsec is the long (64-bit),
         * so on the big endian PowerPC all nanoseconds are lost.
         */

#if (NGX_DARWIN_KEVENT_BUG)
        ts.tv_nsec <<= 32;
#endif

        tp = &ts;
    }

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                   "kevent timer: %M, changes: %d", timer, n);

    events = kevent(ngx_kqueue, change_list, n, event_list, (int) nevents, tp);

    err = (events == -1) ? ngx_errno : 0;

    if (flags & NGX_UPDATE_TIME || ngx_event_timer_alarm) {
        ngx_time_update();
    }

    ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                   "kevent events: %d", events);

    if (err) {
        if (err == NGX_EINTR) {

            if (ngx_event_timer_alarm) {
                ngx_event_timer_alarm = 0;
                return NGX_OK;
            }

            level = NGX_LOG_INFO;

        } else {
            level = NGX_LOG_ALERT;
        }

        ngx_log_error(level, cycle->log, err, "kevent() failed");
        return NGX_ERROR;
    }

    if (events == 0) {
        if (timer != NGX_TIMER_INFINITE) {
            return NGX_OK;
        }

        ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
                      "kevent() returned no events without timeout");
        return NGX_ERROR;
    }

    for (i = 0; i < events; i++) {

        ngx_kqueue_dump_event(cycle->log, &event_list[i]);

        if (event_list[i].flags & EV_ERROR) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, event_list[i].data,
                          "kevent() error on %d filter:%d flags:%04Xd",
                          (int) event_list[i].ident, event_list[i].filter,
                          event_list[i].flags);
            continue;
        }

#if (NGX_HAVE_TIMER_EVENT)

        if (event_list[i].filter == EVFILT_TIMER) {
            ngx_time_update();
            continue;
        }

#endif

        ev = (ngx_event_t *) event_list[i].udata;

        switch (event_list[i].filter) {

        case EVFILT_READ:
        case EVFILT_WRITE:

            instance = (uintptr_t) ev & 1;
            ev = (ngx_event_t *) ((uintptr_t) ev & (uintptr_t) ~1);

            if (ev->closed || ev->instance != instance) {

                /*
                 * the stale event from a file descriptor
                 * that was just closed in this iteration
                 */

                ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0,
                               "kevent: stale event %p", ev);
                continue;
            }

            if (ev->log && (ev->log->log_level & NGX_LOG_DEBUG_CONNECTION)) {
                ngx_kqueue_dump_event(ev->log, &event_list[i]);
            }

            if (ev->oneshot) {
                ev->active = 0;
            }

            ev->available = event_list[i].data;

            if (event_list[i].flags & EV_EOF) {
                ev->pending_eof = 1;
                ev->kq_errno = event_list[i].fflags;
            }

            ev->ready = 1;

            break;

        case EVFILT_VNODE:
            ev->kq_vnode = 1;

            break;

        case EVFILT_AIO:
            ev->complete = 1;
            ev->ready = 1;

            break;

#ifdef EVFILT_USER
        case EVFILT_USER:
            break;
#endif

        default:
            ngx_log_error(NGX_LOG_ALERT, cycle->log, 0,
                          "unexpected kevent() filter %d",
                          event_list[i].filter);
            continue;
        }

        if (flags & NGX_POST_EVENTS) {
            queue = ev->accept ? &ngx_posted_accept_events
                               : &ngx_posted_events;

            ngx_post_event(ev, queue);

            continue;
        }

        ev->handler(ev);
    }

    return NGX_OK;
}
static void *watch_thread (void *userdata) {
  fsevent_watch_t *w = userdata;
  fsevent_t event;
  bool exit = false;

  for (; !exit;) {
    struct kevent kqe;

    if (-1 == kevent(w->kq, &w->pipe_event, w->num_entries + 1, &kqe, 1, NULL)) {
      debug_error(errors, "kevent failed");
      event.data.destroyed.reason = FSEVENT_BUG;
      break;
    }

    if ((int)kqe.ident == w->pipe_fds[0]) {
      char ev_code;
      read(w->pipe_fds[0], &ev_code, 1);

      switch (ev_code) {
      case EVENT_DESTROY:
        event.data.destroyed.reason = FSEVENT_EXPLICIT;
        exit = true;
        break;

      case EVENT_ADD_TO_WATCH:
        // FIXME
        //add_entries(&w);
        break;

      case EVENT_REMOVE_FROM_WATCH:
        // FIXME
        //remove_entries(&w);
        break;

      default:
        debug_error(NULL, "unknown event %i", ev_code);
        event.data.destroyed.reason = FSEVENT_BUG;
        exit = true;
        break;
      }
    } else if (EVFILT_VNODE == kqe.filter) {
      gen_event(w, &kqe);
    } else {
      debug_error(NULL, "unknown filter event %u", kqe.filter);
      event.data.destroyed.reason = FSEVENT_FAILED;
      exit = true;
    }
  }

  // inform about deletion
  event.type = FSEVENT_DESTROYED;
  w->callback(w, &event, w->userdata);

  if (w->pipe_fds[0] >= 0) {
    close(w->pipe_fds[0]);
    close(w->pipe_fds[1]);
  }

  for (int i = 0; i < w->num_entries; i++) {
    free(w->entries[i].udata);
  }

  close(w->kq);
  free(w);

  return NULL;
}
Exemple #9
0
int32_t kqueue_dispatch( struct eventset * sets, void * arg, int32_t tv )
{
    struct timespec tsp, * ptsp = NULL ;

    int32_t res = -1, i = 0;
    struct kqueuer * poller = (struct kqueuer *)arg;

    if ( tv >= 0 )
    {
        // tv - 单位为毫秒
        // 必须换算成秒,以及纳秒
        tsp.tv_sec = tv/1000;
        tsp.tv_nsec = (tv%1000) * 1000000;
        ptsp = &tsp;
    }

    res = kevent( poller->kqueuefd, poller->changes,
            poller->nchanges, poller->events, poller->nevents, ptsp );
    poller->nchanges = 0;

    if ( res == -1 )
    {
        if ( errno != EINTR )
        {
            return -1;
        }

        return 0;
    }

    for ( i = 0; i < res; ++i )
    {
        int32_t which = 0;
        struct event * ev = NULL;

        if ( poller->events[i].flags & EV_ERROR )
        {
            if ( poller->events[i].data == EBADF
                    || poller->events[i].data == EINVAL
                    || poller->events[i].data == ENOENT )
            {
                continue;
            }

            errno = (int32_t)poller->events[i].data;
            return -2;
        }

        if ( poller->events[i].filter == EVFILT_READ )
        {
            which |= EV_READ;
        }
        else if ( poller->events[i].filter == EVFILT_WRITE )
        {
            which |= EV_WRITE;
        }

        if ( !which )
        {
            continue;
        }

        ev = (struct event *)( poller->events[i].udata );
        if ( !(ev->events & EV_PERSIST) )
        {
            ev->status &= ~EVSTATUS_X_KQINKERNEL;
        }

        event_active( ev, which );
    }

    if ( res == poller->nevents )
    {
        kqueue_expand( poller );
    }

    return res;
}
Exemple #10
0
int nuiSocketPool::DispatchEvents(int timeout_millisec)
{
  mEvents.resize(mNbSockets * 2);

  struct timespec to;

  if (timeout_millisec >= 0)
  {
    to.tv_sec = timeout_millisec / 1000;
    to.tv_nsec = (timeout_millisec % 1000) * 1000000; // nanosec
  }

  int res = kevent(mQueue, NULL, 0, &mEvents[0], mEvents.size(), (timeout_millisec >= 0) ? &to : (struct timespec *) 0);

  if(res == -1)
  {
    nuiSocket::DumpError(res, "kqueue::waitForEvents");
    if (errno == EINTR)
    {
      //mQueue = kqueue();
      return 0;
    }
    return -1;
  }

  if (res == 0)
    return EWOULDBLOCK;

  std::set<nuiSocket*>::iterator it;
  std::set<nuiSocket*>::iterator end;
  {
    nglCriticalSectionGuard g(mCS);
    end = mDeletedFromPool.end();
  }

  for (int i = 0; i < res; i++)
  {
    nuiSocket* pSocket = (nuiSocket*)mEvents[i].udata;

    {
      nglCriticalSectionGuard g(mCS);
      it = mDeletedFromPool.find(pSocket);
    }

    if (it == end)
    {
      // dispatch events:
      switch (mEvents[i].filter)
      {
        case EVFILT_READ:
          if (mEvents[i].flags & EV_EOF)
            pSocket->OnReadClosed();
          else
            pSocket->OnCanRead();
          break;

        case EVFILT_WRITE:
          if (mEvents[i].flags & EV_EOF)
            pSocket->OnWriteClosed();
          else
            pSocket->OnCanWrite();
          break;
      }
    }
  }

  {
    nglCriticalSectionGuard g(mCS);
    mDeletedFromPool.clear();
  }

  HandleIdleSockets();

  return 0;
}
int
comm_select(unsigned long delay)
{
	int num, i;
	static struct kevent ke[KE_LENGTH];
	struct timespec poll_time;

	/*
	 * remember we are doing NANOseconds here, not micro/milli. God knows
	 * why jlemon used a timespec, but hey, he wrote the interface, not I
	 *   -- Adrian
	 */

	poll_time.tv_sec = delay / 1000;

	poll_time.tv_nsec = (delay % 1000) * 1000000;

	for (;;)
	{
		num = kevent(kq, kqlst, kqoff, ke, KE_LENGTH, &poll_time);
		kqoff = 0;

		if(num >= 0)
			break;

		if(ignoreErrno(errno))
			break;

		set_time();

		return COMM_ERROR;

		/* NOTREACHED */
	}

	set_time();

	if(num == 0)
		return COMM_OK;	/* No error.. */

	for (i = 0; i < num; i++)
	{
		int fd = (int) ke[i].ident;
		PF *hdl = NULL;
		fde_t *F = &fd_table[fd];

		if(ke[i].flags & EV_ERROR)
		{
			errno = ke[i].data;
			/* XXX error == bad! -- adrian */
			continue;	/* XXX! */
		}

		switch (ke[i].filter)
		{

		case EVFILT_READ:

			if((hdl = F->read_handler) != NULL)
			{
				F->read_handler = NULL;
				hdl(fd, F->read_data);
			}

			break;

		case EVFILT_WRITE:

			if((hdl = F->write_handler) != NULL)
			{
				F->write_handler = NULL;
				hdl(fd, F->write_data);
			}
			break;

		default:
			/* Bad! -- adrian */
			break;
		}
	}
	return COMM_OK;
}
Exemple #12
0
void InotifyEvent::AddCfg(cfgNode conf) {
	int inotify=rfds[0];
	cfgNode config(conf);
	if(!conf)
		throw "Want to add a null config";
	if(strcmp(config.getName(), NAME)!=0)
		throw "Got a non inotify-node in inotify config";

	int i;
	//Is there place before the end?
	for(i=0;cfg && cfg[i];++i);
	if(i>=(n_cfg)) {
		//No ? Ok then make some
		cfg=(cfgNode**)realloc(cfg, (n_cfg+2)*sizeof(cfgNode*));
		cfg[n_cfg+1]=NULL;
		cfg[n_cfg]=new cfgNode(conf);
		++n_cfg;
	} else {
		//Yes ? then use it.
		cfg[i]=new cfgNode(conf);
	}

	if(!config["folder"])
		throw "Inotify need to know in which folder to wait";
	char *file=strdup(config["folder"]);
	if( file[strlen(file)-1]=='/')
		file[strlen(file)-1]=0;
	int ret;
	struct stat buf;
	if(lstat(file, &buf)) {
		free(file);
		return;
	}
	if(!S_ISDIR(buf.st_mode)) {
		//Maybe say to the user he is a f***ing noob ?
		free(file);
		return;
	}

#ifdef BSD
	int f;
	f=open(file, O_RDONLY);
	struct kevent change;
	EV_SET(&change, f, EVFILT_VNODE,
			//ONESHOT or not ?
		EV_ADD | EV_ENABLE | EV_ONESHOT,
		NOTE_DELETE | NOTE_EXTEND | NOTE_WRITE | NOTE_ATTRIB,
		0, (void*)n);
	if(kevent(rfds[0], &change, 1, NULL, 0, 0)<0) {
		perror("kevent");
		return;
	}
#else
	ret=inotify_add_watch(inotify, file, IN_CREATE|IN_CLOSE_WRITE|IN_MOVED_TO|IN_DELETE|IN_MOVED_FROM);
	if(ret==-1) {
		printf("%s\n", file);
		free(file);
		perror("Mise en place de la surveillance du fichier");
		exit(0);
	}
#endif
	++n;
	if((n%ALLOC_STEP)==0)
		inotify_files=(inotify_file*)realloc(inotify_files, (ret+ALLOC_STEP)*sizeof(char*));
#ifdef BSD
	inotify_files[n-1].fd=f;
	inotify_files[n-1].wd=f;
	inotify_files[n-1].filename=file;
	time(&(inotify_files[n-1].last));//Start watching from now.
#else
	inotify_files[n-1].wd=ret;
	inotify_files[n-1].filename=file;
#endif
}
Exemple #13
0
/*the event object should be already allocated*/
void
blackadder::get_event_into_buf (event &ev, void *data, unsigned int data_len)
{
  int total_buf_size = 0;
  int bytes_read;
  unsigned char id_len;
  struct msghdr msg;
  struct iovec iov;
  unsigned char *ptr = NULL;
  memset (&msg, 0, sizeof(msg));
  msg.msg_iov = &iov;
  msg.msg_iovlen = 1;
  iov.iov_base = fake_buf;
  iov.iov_len = 1;
#ifdef __linux__
  total_buf_size = recvmsg (sock_fd, &msg, MSG_PEEK | MSG_TRUNC);
#else
#ifdef __APPLE__
  socklen_t _option_len = sizeof (total_buf_size);
  if (recvmsg(sock_fd, &msg, MSG_PEEK) < 0 || getsockopt(sock_fd, SOL_SOCKET, SO_NREAD, &total_buf_size, &_option_len) < 0)
#elif defined(__FreeBSD__)
  struct kevent kev;
  /* XXX: kev.data is the size of the whole unread buffer. */
  if (kevent(kq, NULL, 0, &kev, 1, NULL) < 0 || (total_buf_size = kev.data) < 0)
#else
  /* XXX: The FIONREAD ioctl gets the size of the whole unread buffer. */
  if (recvmsg(sock_fd, &msg, MSG_PEEK) < 0 || ioctl(sock_fd, FIONREAD, &total_buf_size) < 0)
#endif
  {
    cout << "recvmsg/ioctl: " << errno << endl;
    total_buf_size = -1;
  }
#endif
  if (total_buf_size > 0) {
    if (data == NULL) {
      iov.iov_base = malloc (total_buf_size);
      if (!iov.iov_base) {
	ev.type = UNDEF_EVENT;
	return;
      }
      iov.iov_len = total_buf_size;
    } else {
      iov.iov_base = data;
      iov.iov_len = data_len;
    }
    bytes_read = recvmsg (sock_fd, &msg, 0);
    if (bytes_read < 0) {
      free (iov.iov_base);
      ev.type = UNDEF_EVENT;
      return;
    }
    if (bytes_read < sizeof(struct nlmsghdr)) {
      cout << "read " << bytes_read << " bytes, not enough" << endl;
      free (iov.iov_base);
      ev.type = UNDEF_EVENT;
      return;
    }
    ev.buffer = iov.iov_base;
    ptr = (unsigned char *) ev.buffer + sizeof(struct nlmsghdr);
    ev.type = *ptr;
    ptr += sizeof(ev.type);
    id_len = *ptr;
    ptr += sizeof(id_len);
    ev.id = string ((char *) ptr, ((int) id_len) * PURSUIT_ID_LEN);
    ptr += ((int) id_len) * PURSUIT_ID_LEN;
    if (ev.type == PUBLISHED_DATA) {
      ev.data = (void *) ptr;
      ev.data_len = bytes_read - (ptr - (unsigned char *) ev.buffer);
    } else {
      ev.data = NULL;
      ev.data_len = 0;
    }
  } else if (errno == EINTR) {
    /* Interrupted system call. */
    ev.type = UNDEF_EVENT;
    return;
  } else {
    ev.type = UNDEF_EVENT;
    return;
  }
}
Exemple #14
0
blackadder::blackadder (bool user_space)
{
  int ret;
  protocol = 0;
  if (user_space) {
#if HAVE_USE_NETLINK
    sock_fd = socket (AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
#elif HAVE_USE_UNIX
    sock_fd = socket(PF_LOCAL, SOCK_DGRAM, 0);
#else
    sock_fd = -1;
    errno = EPFNOSUPPORT; /* XXX */
#endif
  } else {
#if HAVE_USE_NETLINK
    sock_fd = socket (AF_NETLINK, SOCK_RAW, NETLINK_BADDER);
#else
    sock_fd = -1;
    errno = EPFNOSUPPORT; /* XXX */
#endif
  }
  if (sock_fd < 0) {
    perror ("socket");
  }
#ifdef __APPLE__
  int bufsize = 229376; /* XXX */
  setsockopt(sock_fd, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof (bufsize));
  setsockopt(sock_fd, SOL_SOCKET, SO_RCVBUF, &bufsize, sizeof (bufsize));
#endif
#ifdef __FreeBSD__
  kq = kqueue();
  if (kq < 0) {
    perror("kqueue");
  }
  struct kevent kev;
  EV_SET(&kev, sock_fd, EVFILT_READ, EV_ADD | EV_CLEAR, 0x0, 0, NULL);
  if (kevent(kq, &kev, 1, NULL, 0, NULL)) {
    perror("kevent");
  }
#endif
  /* source netlink address */
  memset (&s_nladdr, 0, sizeof(s_nladdr));
#if HAVE_USE_NETLINK
  s_nladdr.nl_family = AF_NETLINK;
  s_nladdr.nl_pad = 0;
  s_nladdr.nl_pid = getpid ();
  ret = bind (sock_fd, (struct sockaddr *) &s_nladdr, sizeof(s_nladdr));
#elif HAVE_USE_UNIX
  if (user_space) {
#ifndef __linux__
    s_nladdr.sun_len = sizeof (s_nladdr);
#endif
    s_nladdr.sun_family = PF_LOCAL;
    /* XXX: Probably shouldn't use getpid() here. */
    ba_id2path(s_nladdr.sun_path, getpid());
    if (unlink(s_nladdr.sun_path) != 0 && errno != ENOENT)
    perror("unlink");
#ifdef __linux__
    ret = bind(sock_fd, (struct sockaddr *) &s_nladdr, sizeof (s_nladdr));
#else
    ret = bind(sock_fd, (struct sockaddr *) &s_nladdr, SUN_LEN(&s_nladdr));
#endif
  } else {
    if (sock_fd > 0)
    ret = 0;
    else {
      ret = -1;
      errno = EBADF;
    }
  }
#endif
  if (ret < 0) {
    perror ("bind");
    exit (0);
  }
  /* destination netlink address */
  memset (&d_nladdr, 0, sizeof(d_nladdr));
#if HAVE_USE_NETLINK
  d_nladdr.nl_family = AF_NETLINK;
  d_nladdr.nl_pad = 0;
  if (user_space) {
    d_nladdr.nl_pid = 9999; /* destined to user space blackadder */
  } else {
    d_nladdr.nl_pid = 0; /* destined to kernel */
  }
#elif HAVE_USE_UNIX
  if (user_space) {
#ifndef __linux__
    d_nladdr.sun_len = sizeof (d_nladdr);
#endif
    d_nladdr.sun_family = PF_LOCAL;
    ba_id2path(d_nladdr.sun_path, (user_space) ? 9999 : 0); /* XXX */
  }
#endif
}
Exemple #15
0
static struct connection *
tcpp_client_newconn(void)
{
	struct sockaddr_in sin;
	struct connection *conn;
	struct kevent kev;
	int fd, i;

	/*
	 * Spread load over available IPs, rotating through them as we go.  No
	 * attempt to localize IPs to particular workers.
	 */
	sin = localipbase;
	sin.sin_addr.s_addr = htonl(ntohl(localipbase.sin_addr.s_addr) +
	    (counter++ % Mflag));

	fd = socket(PF_INET, SOCK_STREAM, 0);
	if (fd < 0)
		err(-1, "socket");

	if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0)
		err(-1, "fcntl");

	i = 1;
	if (setsockopt(fd, SOL_SOCKET, SO_NOSIGPIPE, &i, sizeof(i)) < 0)
		err(-1, "setsockopt");
	i = 1;
	if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &i, sizeof(i)) < 0)
		err(-1, "setsockopt");
#if 0
	i = 1;
	if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &i, sizeof(i)) < 0)
		err(-1, "setsockopt");
#endif

	if (lflag) {
		if (bind(fd, (struct sockaddr *)&sin, sizeof(sin)) < 0)
			err(-1, "bind");
	}

	if (connect(fd, (struct sockaddr *)&remoteip, sizeof(remoteip)) < 0 &&
	    errno != EINPROGRESS)
		err(-1, "connect");

	conn = malloc(sizeof(*conn));
	if (conn == NULL)
		return (NULL);
	bzero(conn, sizeof(*conn));
	conn->conn_magic = CONNECTION_MAGIC;
	conn->conn_fd = fd;
	conn->conn_header.th_magic = TCPP_MAGIC;
	conn->conn_header.th_len = payload_len;
	tcpp_header_encode(&conn->conn_header);

	EV_SET(&kev, fd, EVFILT_WRITE, EV_ADD, 0, 0, conn);
	if (kevent(kq, &kev, 1, NULL, 0, NULL) < 0)
		err(-1, "newconn kevent");

	started++;
	return (conn);
}
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
                             PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
{
    int rc = VINF_SUCCESS;
    int cRequestsCompleted = 0;

    /*
     * Validate the parameters, making sure to always set pcReqs.
     */
    AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
    *pcReqs = 0; /* always set */
    PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
    RTFILEAIOCTX_VALID_RETURN(pCtxInt);
    AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
    AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
    AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);

    if (   RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0)
        && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
        return VERR_FILE_AIO_NO_REQUEST;

    /*
     * Convert the timeout if specified.
     */
    struct timespec    *pTimeout = NULL;
    struct timespec     Timeout = {0,0};
    uint64_t            StartNanoTS = 0;
    if (cMillies != RT_INDEFINITE_WAIT)
    {
        Timeout.tv_sec  = cMillies / 1000;
        Timeout.tv_nsec = cMillies % 1000 * 1000000;
        pTimeout = &Timeout;
        StartNanoTS = RTTimeNanoTS();
    }

    /* Wait for at least one. */
    if (!cMinReqs)
        cMinReqs = 1;

    /* For the wakeup call. */
    Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());

    while (   cMinReqs
           && RT_SUCCESS_NP(rc))
    {
        struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
        int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT;
        int rcBSD;
        uint64_t StartTime;

        ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
        rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout);
        ASMAtomicXchgBool(&pCtxInt->fWaiting, false);

        if (RT_UNLIKELY(rcBSD < 0))
        {
            rc = RTErrConvertFromErrno(errno);
            break;
        }

        uint32_t const cDone = rcBSD;

        /* Process received events. */
        for (uint32_t i = 0; i < cDone; i++)
        {
            PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata;
            AssertPtr(pReqInt);
            Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);

            /*
             * Retrieve the status code here already because the
             * user may omit the RTFileAioReqGetRC() call and
             * we will leak kernel resources then.
             * This will result in errors during submission
             * of other requests as soon as the max_aio_queue_per_proc
             * limit is reached.
             */
            int cbTransfered = aio_return(&pReqInt->AioCB);

            if (cbTransfered < 0)
            {
                pReqInt->Rc = RTErrConvertFromErrno(cbTransfered);
                pReqInt->cbTransfered = 0;
            }
            else
            {
                pReqInt->Rc = VINF_SUCCESS;
                pReqInt->cbTransfered = cbTransfered;
            }
            RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
            pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
        }

        /*
         * Done Yet? If not advance and try again.
         */
        if (cDone >= cMinReqs)
            break;
        cMinReqs -= cDone;
        cReqs    -= cDone;

        if (cMillies != RT_INDEFINITE_WAIT)
        {
            /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
            uint64_t NanoTS = RTTimeNanoTS();
            uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
            if (cMilliesElapsed >= cMillies)
            {
                rc = VERR_TIMEOUT;
                break;
            }

            /* The syscall supposedly updates it, but we're paranoid. :-) */
            Timeout.tv_sec  = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
            Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
        }
    }

    /*
     * Update the context state and set the return value.
     */
    *pcReqs = cRequestsCompleted;
    ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
    Assert(pCtxInt->hThreadWait == RTThreadSelf());
    ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);

    /*
     * Clear the wakeup flag and set rc.
     */
    if (    pCtxInt->fWokenUp
        &&  RT_SUCCESS(rc))
    {
        ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
        rc = VERR_INTERRUPTED;
    }

    return rc;
}
Exemple #17
0
void soundio_os_cond_timed_wait(struct SoundIoOsCond *cond,
        struct SoundIoOsMutex *locked_mutex, double seconds)
{
#if defined(SOUNDIO_OS_WINDOWS)
    CRITICAL_SECTION *target_cs;
    if (locked_mutex) {
        target_cs = &locked_mutex->id;
    } else {
        target_cs = &cond->default_cs_id;
        EnterCriticalSection(&cond->default_cs_id);
    }
    DWORD ms = seconds * 1000.0;
    SleepConditionVariableCS(&cond->id, target_cs, ms);
    if (!locked_mutex)
        LeaveCriticalSection(&cond->default_cs_id);
#elif defined(SOUNDIO_OS_KQUEUE)
    struct kevent kev;
    struct kevent out_kev;

    if (locked_mutex)
        assert_no_err(pthread_mutex_unlock(&locked_mutex->id));

    memset(&kev, 0, sizeof(kev));
    kev.ident = notify_ident;
    kev.filter = EVFILT_USER;
    kev.flags = EV_ADD | EV_CLEAR;

    // this time is relative
    struct timespec timeout;
    timeout.tv_nsec = (seconds * 1000000000L);
    timeout.tv_sec  = timeout.tv_nsec / 1000000000L;
    timeout.tv_nsec = timeout.tv_nsec % 1000000000L;

    if (kevent(cond->kq_id, &kev, 1, &out_kev, 1, &timeout) == -1) {
        if (errno == EINTR)
            return;
        assert(0); // kevent wait error
    }
    if (locked_mutex)
        assert_no_err(pthread_mutex_lock(&locked_mutex->id));
#else
    pthread_mutex_t *target_mutex;
    if (locked_mutex) {
        target_mutex = &locked_mutex->id;
    } else {
        target_mutex = &cond->default_mutex_id;
        assert_no_err(pthread_mutex_lock(target_mutex));
    }
    // this time is absolute
    struct timespec tms;
    clock_gettime(CLOCK_MONOTONIC, &tms);
    tms.tv_nsec += (seconds * 1000000000L);
    tms.tv_sec += tms.tv_nsec / 1000000000L;
    tms.tv_nsec = tms.tv_nsec % 1000000000L;
    int err;
    if ((err = pthread_cond_timedwait(&cond->id, target_mutex, &tms))) {
        assert(err != EPERM);
        assert(err != EINVAL);
    }
    if (!locked_mutex)
        assert_no_err(pthread_mutex_unlock(target_mutex));
#endif
}
bool SocketWorkerThread::run()
{
    //printf("Worker thread started.\n");
    int fd_count;
    Socket * ptr;
    int i;
    struct kevent ev;
    struct timespec ts;
    ts.tv_nsec = 0;
    ts.tv_sec = 5;
	struct kevent ev2;

    SocketMgr * mgr = SocketMgr::getSingletonPtr();
    int kq = mgr->GetKq();

    while(m_threadRunning)
    {
        fd_count = kevent(kq, NULL, 0, &events[0], THREAD_EVENT_SIZE, &ts);
        for(i = 0; i < fd_count; ++i)
        {
            if(events[i].ident >= SOCKET_HOLDER_SIZE)
            {
                Log.Warning("kqueue", "Requested FD that is too high (%u)", events[i].ident);
                continue;
            }

            ptr = mgr->fds[events[i].ident];

            if(ptr == NULL)
            {
				if( (ptr = ((Socket*)mgr->listenfds[events[i].ident])) != NULL )
				{
					((ListenSocketBase*)ptr)->OnAccept();
				}
				else
				{
					Log.Warning("kqueue", "Returned invalid fd (no pointer) of FD %u", events[i].ident);

					/* make sure it removes so we don't go chasing it again */
					EV_SET(&ev, events[i].ident, EVFILT_WRITE, EV_DELETE, 0, 0, NULL);
					EV_SET(&ev2, events[i].ident, EVFILT_READ, EV_DELETE, 0, 0, NULL);
					kevent(kq, &ev, 1, 0, 0, NULL);
					kevent(kq, &ev2, 1, 0, 0, NULL);
				}
                continue;
            }

            if(events[i].flags & EV_EOF || events[i].flags & EV_ERROR)
            {
                ptr->Disconnect();
                continue;
            }
			else if(events[i].filter == EVFILT_WRITE)
            {
                ptr->BurstBegin();          // Lock receive mutex
                ptr->WriteCallback();       // Perform actual send()
                if(ptr->GetWriteBuffer().GetSize() > 0)
                    ptr->PostEvent(EVFILT_WRITE, true);   // Still remaining data.
                else
				{
                    ptr->DecSendLock();
                    ptr->PostEvent(EVFILT_READ, false);
                }
                ptr->BurstEnd();            // Unlock
            }
			else if(events[i].filter == EVFILT_READ)
            {
                ptr->ReadCallback(0);               // Len is unknown at this point.
				if(ptr->GetWriteBuffer().GetSize() > 0 && ptr->IsConnected() && !ptr->HasSendLock())
				{
					ptr->PostEvent(EVFILT_WRITE, true);
					ptr->IncSendLock();
				}
            }
        }       
    }
    return true;
}
Exemple #19
0
static ngx_int_t
ngx_kqueue_set_event(ngx_event_t *ev, ngx_int_t filter, ngx_uint_t flags)
{
    struct kevent     *kev;
    struct timespec    ts;
    ngx_connection_t  *c;

    c = ev->data;

    ngx_log_debug3(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "kevent set event: %d: ft:%i fl:%04Xi",
                   c->fd, filter, flags);

    if (nchanges >= max_changes) {
        ngx_log_error(NGX_LOG_WARN, ev->log, 0,
                      "kqueue change list is filled up");

        ts.tv_sec = 0;
        ts.tv_nsec = 0;

        if (kevent(ngx_kqueue, change_list, (int) nchanges, NULL, 0, &ts)
            == -1)
        {
            ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno, "kevent() failed");
            return NGX_ERROR;
        }

        nchanges = 0;
    }

    kev = &change_list[nchanges];

    kev->ident = c->fd;
    kev->filter = (short) filter;
    kev->flags = (u_short) flags;
    kev->udata = NGX_KQUEUE_UDATA_T ((uintptr_t) ev | ev->instance);

    if (filter == EVFILT_VNODE) {
        kev->fflags = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND
                                 |NOTE_ATTRIB|NOTE_RENAME
#if (__FreeBSD__ == 4 && __FreeBSD_version >= 430000) \
    || __FreeBSD_version >= 500018
                                 |NOTE_REVOKE
#endif
                      ;
        kev->data = 0;

    } else {
#if (NGX_HAVE_LOWAT_EVENT)
        if (flags & NGX_LOWAT_EVENT) {
            kev->fflags = NOTE_LOWAT;
            kev->data = ev->available;

        } else {
            kev->fflags = 0;
            kev->data = 0;
        }
#else
        kev->fflags = 0;
        kev->data = 0;
#endif
    }

    ev->index = nchanges;
    nchanges++;

    if (flags & NGX_FLUSH_EVENT) {
        ts.tv_sec = 0;
        ts.tv_nsec = 0;

        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, "kevent flush");

        if (kevent(ngx_kqueue, change_list, (int) nchanges, NULL, 0, &ts)
            == -1)
        {
            ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_errno, "kevent() failed");
            return NGX_ERROR;
        }

        nchanges = 0;
    }

    return NGX_OK;
}
Exemple #20
0
static int
kq_dispatch(struct event_base *base, struct timeval *tv)
{
	struct kqop *kqop = base->evbase;
	struct kevent *events = kqop->events;
	struct kevent *changes;
	struct timespec ts, *ts_p = NULL;
	int i, n_changes, res;

	if (tv != NULL) {
		TIMEVAL_TO_TIMESPEC(tv, &ts);
		ts_p = &ts;
	}

	/* Build "changes" from "base->changes" */
	EVUTIL_ASSERT(kqop->changes);
	n_changes = kq_build_changes_list(&base->changelist, kqop);
	if (n_changes < 0)
		return -1;

	event_changelist_remove_all_(&base->changelist, base);

	/* steal the changes array in case some broken code tries to call
	 * dispatch twice at once. */
	changes = kqop->changes;
	kqop->changes = NULL;

	/* Make sure that 'events' is at least as long as the list of changes:
	 * otherwise errors in the changes can get reported as a -1 return
	 * value from kevent() rather than as EV_ERROR events in the events
	 * array.
	 *
	 * (We could instead handle -1 return values from kevent() by
	 * retrying with a smaller changes array or a larger events array,
	 * but this approach seems less risky for now.)
	 */
	if (kqop->events_size < n_changes) {
		int new_size = kqop->events_size;
		do {
			new_size *= 2;
		} while (new_size < n_changes);

		kq_grow_events(kqop, new_size);
		events = kqop->events;
	}

	EVBASE_RELEASE_LOCK(base, th_base_lock);

	res = kevent(kqop->kq, changes, n_changes,
	    events, kqop->events_size, ts_p);

	EVBASE_ACQUIRE_LOCK(base, th_base_lock);

	EVUTIL_ASSERT(kqop->changes == NULL);
	kqop->changes = changes;

	if (res == -1) {
		if (errno != EINTR) {
			event_warn("kevent");
			return (-1);
		}

		return (0);
	}

	event_debug(("%s: kevent reports %d", __func__, res));

	for (i = 0; i < res; i++) {
		int which = 0;

		if (events[i].flags & EV_ERROR) {
			switch (events[i].data) {

			/* Can occur on delete if we are not currently
			 * watching any events on this fd.  That can
			 * happen when the fd was closed and another
			 * file was opened with that fd. */
			case ENOENT:
			/* Can occur for reasons not fully understood
			 * on FreeBSD. */
			case EINVAL:
				continue;
#if defined(__FreeBSD__)
			/*
			 * This currently occurs if an FD is closed
			 * before the EV_DELETE makes it out via kevent().
			 * The FreeBSD capabilities code sees the blank
			 * capability set and rejects the request to
			 * modify an event.
			 *
			 * To be strictly correct - when an FD is closed,
			 * all the registered events are also removed.
			 * Queuing EV_DELETE to a closed FD is wrong.
			 * The event(s) should just be deleted from
			 * the pending changelist.
			 */
			case ENOTCAPABLE:
				continue;
#endif

			/* Can occur on a delete if the fd is closed. */
			case EBADF:
				/* XXXX On NetBSD, we can also get EBADF if we
				 * try to add the write side of a pipe, but
				 * the read side has already been closed.
				 * Other BSDs call this situation 'EPIPE'. It
				 * would be good if we had a way to report
				 * this situation. */
				continue;
			/* These two can occur on an add if the fd was one side
			 * of a pipe, and the other side was closed. */
			case EPERM:
			case EPIPE:
				/* Report read events, if we're listening for
				 * them, so that the user can learn about any
				 * add errors.  (If the operation was a
				 * delete, then udata should be cleared.) */
				if (events[i].udata) {
					/* The operation was an add:
					 * report the error as a read. */
					which |= EV_READ;
					break;
				} else {
					/* The operation was a del:
					 * report nothing. */
					continue;
				}

			/* Other errors shouldn't occur. */
			default:
				errno = events[i].data;
				return (-1);
			}
		} else if (events[i].filter == EVFILT_READ) {
			which |= EV_READ;
		} else if (events[i].filter == EVFILT_WRITE) {
			which |= EV_WRITE;
#ifdef EVFILT_USER
		} else if (events[i].filter == EVFILT_USER) {
			base->is_notify_pending = 0;
#endif
		}

		if (!which)
			continue;

		evmap_io_active_(base, events[i].ident, which | EV_ET);
	}

	if (res == kqop->events_size) {
		/* We used all the events space that we have. Maybe we should
		   make it bigger. */
		kq_grow_events(kqop, kqop->events_size * 2);
	}

	return (0);
}
Exemple #21
0
int
rb_select_kqueue(long delay)
{
	int num, i;
	struct timespec poll_time;
	struct timespec *pt;
	rb_fde_t *F;


	if(delay < 0)
	{
		pt = NULL;
	}
	else
	{
		pt = &poll_time;
		poll_time.tv_sec = delay / 1000;
		poll_time.tv_nsec = (delay % 1000) * 1000000;
	}

	for(;;)
	{
		num = kevent(kq, kqlst, kqoff, kqout, kqmax, pt);
		kqoff = 0;

		if(num >= 0)
			break;

		if(rb_ignore_errno(errno))
			break;

		rb_set_time();

		return RB_ERROR;

		/* NOTREACHED */
	}

	rb_set_time();

	if(num == 0)
		return RB_OK;	/* No error.. */

	for(i = 0; i < num; i++)
	{
		PF *hdl = NULL;

		if(kqout[i].flags & EV_ERROR)
		{
			errno = kqout[i].data;
			/* XXX error == bad! -- adrian */
			continue;	/* XXX! */
		}

		switch (kqout[i].filter)
		{

		case EVFILT_READ:
			F = kqout[i].udata;
			if((hdl = F->read_handler) != NULL)
			{
				F->read_handler = NULL;
				hdl(F, F->read_data);
			}

			break;

		case EVFILT_WRITE:
			F = kqout[i].udata;
			if((hdl = F->write_handler) != NULL)
			{
				F->write_handler = NULL;
				hdl(F, F->write_data);
			}
			break;
#if defined(EVFILT_TIMER)
		case EVFILT_TIMER:
			rb_run_one_event(kqout[i].udata);
			break;
#endif
		default:
			/* Bad! -- adrian */
			break;
		}
	}
	return RB_OK;
}
Exemple #22
0
int main(int argc, const char ** argv) {
    struct addrinfo * addr = malloc(sizeof(struct addrinfo));
    struct addrinfo * hints = malloc(sizeof(struct addrinfo));
    
    memset(changes, 0, sizeof(changes));
    memset(events, 0, sizeof(events));

    hints->ai_family = AF_INET;
    hints->ai_socktype = SOCK_STREAM;
    hints->ai_protocol = IPPROTO_TCP;

    r = getaddrinfo("127.0.0.1", "3000", hints, &addr);

    if (r == -1) {
        perror("Error on resolving address.\n");
        exit(EXIT_FAILURE);
    }

    sockfd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);

    if (sockfd == -1) {
        perror("Error on creating socket descriptor.\n");
        exit(EXIT_FAILURE);
    }
 
    r = bind(sockfd, addr->ai_addr, addr->ai_addrlen);

    if (r == -1) {
        perror("Error on binding to address.\n");
        exit(EXIT_FAILURE);
    }

    r = listen(sockfd, 1);

    if (r == -1) {
        perror("Error on listening on socket.\n");
        exit(EXIT_FAILURE);
    }

    kq = kqueue();

    if (kq == -1) {
        perror("Error on creating kqueue.\n");
        exit(EXIT_FAILURE);
    }
    
    EV_SET(&changes[0], sockfd, EVFILT_READ, 
            EV_ADD | EV_ENABLE, 0, 0, 0);

    for (;;) {
        int c_n = 1;
        if (changes[1].ident) {
            c_n = 2;
        }
        
        nev = kevent(kq, changes, c_n, events, 2, NULL);

        if (nev == -1) {
            perror("Error on resolving kevents.\n");
            exit(EXIT_FAILURE);
        }
        
        printf("Got event\n");
        
        for (i = 0; i < nev; i++) {
            struct client_s * client = malloc(sizeof(struct client_s));
            
            if (events[i].ident == sockfd) {
                printf("Accepting\n");
                client->fd = accept(sockfd, &client->addr, &client->addrlen);

                if (client->fd == -1) {
                    perror("Error on accepting client.\n");
                    exit(EXIT_FAILURE);
                }

                client->type = 2;

                EV_SET(&changes[1], client->fd, EVFILT_READ, 
                        EV_ADD | EV_ENABLE, 0, 0, client);
            }
            
            if (events[i].udata) {
                client = events[i].udata;

                if (client->type == 2) {
                    recv(client->fd, buffer, 1024, 0);
                    printf("client says: %s\n", buffer);
                }
            }
        }
    }

    close(sockfd);

    freeaddrinfo(addr);

    return EXIT_SUCCESS;
}
Exemple #23
0
int
main(int argc, char **argv)
{
  int option;
  char *configfile;
  int background;
  int mdns_no_rsp;
  int mdns_no_daap;
  int loglevel;
  char *logdomains;
  char *logfile;
  char *ffid;
  char *pidfile;
  const char *gcry_version;
  sigset_t sigs;
  int sigfd;
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  struct kevent ke_sigs[4];
#endif
  int ret;

  struct option option_map[] =
    {
      { "ffid",         1, NULL, 'b' },
      { "debug",        1, NULL, 'd' },
      { "logdomains",   1, NULL, 'D' },
      { "foreground",   0, NULL, 'f' },
      { "config",       1, NULL, 'c' },
      { "pidfile",      1, NULL, 'P' },
      { "version",      0, NULL, 'v' },

      { "mdns-no-rsp",  0, NULL, 512 },
      { "mdns-no-daap", 0, NULL, 513 },

      { NULL,           0, NULL, 0 }
    };

  configfile = CONFFILE;
  pidfile = PIDFILE;
  loglevel = -1;
  logdomains = NULL;
  logfile = NULL;
  background = 1;
  ffid = NULL;
  mdns_no_rsp = 0;
  mdns_no_daap = 0;

  while ((option = getopt_long(argc, argv, "D:d:c:P:fb:v", option_map, NULL)) != -1)
    {
      switch (option)
	{
	  case 512:
	    mdns_no_rsp = 1;
	    break;

	  case 513:
	    mdns_no_daap = 1;
	    break;

	  case 'b':
            ffid = optarg;
            break;

	  case 'd':
	    ret = safe_atoi32(optarg, &option);
	    if (ret < 0)
	      fprintf(stderr, "Error: loglevel must be an integer in '-d %s'\n", optarg);
	    else
	      loglevel = option;
            break;

	  case 'D':
	    logdomains = optarg;
            break;

          case 'f':
            background = 0;
            break;

          case 'c':
            configfile = optarg;
            break;

          case 'P':
	    pidfile = optarg;
            break;

          case 'v':
	    version();
            return EXIT_SUCCESS;
            break;

          default:
            usage(argv[0]);
            return EXIT_FAILURE;
            break;
        }
    }

  ret = logger_init(NULL, NULL, (loglevel < 0) ? E_LOG : loglevel);
  if (ret != 0)
    {
      fprintf(stderr, "Could not initialize log facility\n");

      return EXIT_FAILURE;
    }

  ret = conffile_load(configfile);
  if (ret != 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Config file errors; please fix your config\n");

      logger_deinit();
      return EXIT_FAILURE;
    }

  logger_deinit();

  /* Reinit log facility with configfile values */
  if (loglevel < 0)
    loglevel = cfg_getint(cfg_getsec(cfg, "general"), "loglevel");

  logfile = cfg_getstr(cfg_getsec(cfg, "general"), "logfile");

  ret = logger_init(logfile, logdomains, loglevel);
  if (ret != 0)
    {
      fprintf(stderr, "Could not reinitialize log facility with config file settings\n");

      conffile_unload();
      return EXIT_FAILURE;
    }

  /* Set up libevent logging callback */
  event_set_log_callback(logger_libevent);

  DPRINTF(E_LOG, L_MAIN, "Forked Media Server Version %s taking off\n", VERSION);

  /* Initialize ffmpeg */
  avcodec_init();

  ret = av_lockmgr_register(ffmpeg_lockmgr);
  if (ret < 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Could not register ffmpeg lock manager callback\n");

      ret = EXIT_FAILURE;
      goto ffmpeg_init_fail;
    }

  av_register_all();
  av_log_set_callback(logger_ffmpeg);
#if LIBAVFORMAT_VERSION_MAJOR < 53
  register_ffmpeg_evbuffer_url_protocol();
#endif

  /* Initialize libgcrypt */
  gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);

  gcry_version = gcry_check_version(GCRYPT_VERSION);
  if (!gcry_version)
    {
      DPRINTF(E_FATAL, L_MAIN, "libgcrypt version mismatch\n");

      ret = EXIT_FAILURE;
      goto gcrypt_init_fail;
    }

  /* We aren't handling anything sensitive, so give up on secure
   * memory, which is a scarce system resource.
   */
  gcry_control(GCRYCTL_DISABLE_SECMEM, 0);

  gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);

  DPRINTF(E_DBG, L_MAIN, "Initialized with gcrypt %s\n", gcry_version);

  /* Block signals for all threads except the main one */
  sigemptyset(&sigs);
  sigaddset(&sigs, SIGINT);
  sigaddset(&sigs, SIGHUP);
  sigaddset(&sigs, SIGCHLD);
  sigaddset(&sigs, SIGTERM);
  sigaddset(&sigs, SIGPIPE);
  ret = pthread_sigmask(SIG_BLOCK, &sigs, NULL);
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_MAIN, "Error setting signal set\n");

      ret = EXIT_FAILURE;
      goto signal_block_fail;
    }

  /* Daemonize and drop privileges */
  ret = daemonize(background, pidfile);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_MAIN, "Could not initialize server\n");

      ret = EXIT_FAILURE;
      goto daemon_fail;
    }

  /* Initialize libevent (after forking) */
  evbase_main = event_init();

  DPRINTF(E_LOG, L_MAIN, "mDNS init\n");
  ret = mdns_init();
  if (ret != 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "mDNS init failed\n");

      ret = EXIT_FAILURE;
      goto mdns_fail;
    }

  /* Initialize the database before starting */
  DPRINTF(E_INFO, L_MAIN, "Initializing database\n");
  ret = db_init();
  if (ret < 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Database init failed\n");

      ret = EXIT_FAILURE;
      goto db_fail;
    }

  /* Open a DB connection for the main thread */
  ret = db_perthread_init();
  if (ret < 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Could not perform perthread DB init for main\n");

      ret = EXIT_FAILURE;
      goto db_fail;
    }

  /* Spawn file scanner thread */
  ret = filescanner_init();
  if (ret != 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "File scanner thread failed to start\n");

      ret = EXIT_FAILURE;
      goto filescanner_fail;
    }

  /* Spawn player thread */
  ret = player_init();
  if (ret != 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Player thread failed to start\n");

      ret = EXIT_FAILURE;
      goto player_fail;
    }

  /* Spawn HTTPd thread */
  ret = httpd_init();
  if (ret != 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "HTTPd thread failed to start\n");

      ret = EXIT_FAILURE;
      goto httpd_fail;
    }

  /* Start Remote pairing service */
  ret = remote_pairing_init();
  if (ret != 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Remote pairing service failed to start\n");

      ret = EXIT_FAILURE;
      goto remote_fail;
    }

  /* Register mDNS services */
  ret = register_services(ffid, mdns_no_rsp, mdns_no_daap);
  if (ret < 0)
    {
      ret = EXIT_FAILURE;
      goto mdns_reg_fail;
    }

#if defined(__linux__)
  /* Set up signal fd */
  sigfd = signalfd(-1, &sigs, SFD_NONBLOCK | SFD_CLOEXEC);
  if (sigfd < 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Could not setup signalfd: %s\n", strerror(errno));

      ret = EXIT_FAILURE;
      goto signalfd_fail;
    }

  event_set(&sig_event, sigfd, EV_READ, signal_signalfd_cb, NULL);

#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
  sigfd = kqueue();
  if (sigfd < 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Could not setup kqueue: %s\n", strerror(errno));

      ret = EXIT_FAILURE;
      goto signalfd_fail;
    }

  EV_SET(&ke_sigs[0], SIGINT, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
  EV_SET(&ke_sigs[1], SIGTERM, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
  EV_SET(&ke_sigs[2], SIGHUP, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);
  EV_SET(&ke_sigs[3], SIGCHLD, EVFILT_SIGNAL, EV_ADD, 0, 0, NULL);

  ret = kevent(sigfd, ke_sigs, 4, NULL, 0, NULL);
  if (ret < 0)
    {
      DPRINTF(E_FATAL, L_MAIN, "Could not register signal events: %s\n", strerror(errno));

      ret = EXIT_FAILURE;
      goto signalfd_fail;
    }

  event_set(&sig_event, sigfd, EV_READ, signal_kqueue_cb, NULL);
#endif

  event_base_set(evbase_main, &sig_event);
  event_add(&sig_event, NULL);

  /* Run the loop */
  event_base_dispatch(evbase_main);

  DPRINTF(E_LOG, L_MAIN, "Stopping gracefully\n");
  ret = EXIT_SUCCESS;

  /*
   * On a clean shutdown, bring mDNS down first to give a chance
   * to the clients to perform a clean shutdown on their end
   */
  DPRINTF(E_LOG, L_MAIN, "mDNS deinit\n");
  mdns_deinit();

 signalfd_fail:
 mdns_reg_fail:
  DPRINTF(E_LOG, L_MAIN, "Remote pairing deinit\n");
  remote_pairing_deinit();

 remote_fail:
  DPRINTF(E_LOG, L_MAIN, "HTTPd deinit\n");
  httpd_deinit();

 httpd_fail:
  DPRINTF(E_LOG, L_MAIN, "Player deinit\n");
  player_deinit();

 player_fail:
  DPRINTF(E_LOG, L_MAIN, "File scanner deinit\n");
  filescanner_deinit();

 filescanner_fail:
  DPRINTF(E_LOG, L_MAIN, "Database deinit\n");
  db_perthread_deinit();
  db_deinit();
 db_fail:
  if (ret == EXIT_FAILURE)
    {
      DPRINTF(E_LOG, L_MAIN, "mDNS deinit\n");
      mdns_deinit();
    }

 mdns_fail:
 daemon_fail:
  if (background)
    {
      ret = seteuid(0);
      if (ret < 0)
	DPRINTF(E_LOG, L_MAIN, "seteuid() failed: %s\n", strerror(errno));
      else
	{
	  ret = unlink(pidfile);
	  if (ret < 0)
	    DPRINTF(E_LOG, L_MAIN, "Could not unlink PID file %s: %s\n", pidfile, strerror(errno));
	}
    }

 signal_block_fail:
 gcrypt_init_fail:
  av_lockmgr_register(NULL);

 ffmpeg_init_fail:
  DPRINTF(E_LOG, L_MAIN, "Exiting.\n");
  conffile_unload();
  logger_deinit();

  return ret;
}
static void eventer_kqueue_impl_wakeup_spec(struct kqueue_spec *spec) {
  struct kevent kev;
	EV_SET(&kev, 0, EVFILT_USER, 0, NOTE_FFCOPY|NOTE_TRIGGER|0x1, 0, NULL);
	kevent(spec->kqueue_fd, &kev, 1, NULL, 0, NULL);
}
Exemple #25
0
Fichier : bsd.c Projet : acml/kore
void
kore_platform_event_wait(void)
{
	struct connection	*c;
	struct timespec		timeo;
	int			n, i, *fd;

	timeo.tv_sec = 0;
	timeo.tv_nsec = 100000000;
	n = kevent(kfd, changelist, nchanges, events, event_count, &timeo);
	if (n == -1) {
		if (errno == EINTR)
			return;
		fatal("kevent(): %s", errno_s);
	}

	nchanges = 0;
	if (n > 0)
		kore_debug("main(): %d sockets available", n);

	for (i = 0; i < n; i++) {
		fd = (int *)events[i].udata;

		if (events[i].flags & EV_EOF ||
		    events[i].flags & EV_ERROR) {
			if (*fd == server.fd)
				fatal("error on server socket");

			c = (struct connection *)events[i].udata;
			kore_connection_disconnect(c);
			continue;
		}

		if (*fd == server.fd) {
			while (worker->accepted < worker->accept_treshold) {
				kore_connection_accept(&server, &c);
				if (c == NULL)
					continue;

				worker->accepted++;
				kore_platform_event_schedule(c->fd,
				    EVFILT_READ, EV_ADD, c);
				kore_platform_event_schedule(c->fd,
				    EVFILT_WRITE, EV_ADD | EV_ONESHOT, c);
			}
		} else {
			c = (struct connection *)events[i].udata;
			if (events[i].filter == EVFILT_READ)
				c->flags |= CONN_READ_POSSIBLE;
			if (events[i].filter == EVFILT_WRITE)
				c->flags |= CONN_WRITE_POSSIBLE;

			if (!kore_connection_handle(c)) {
				kore_connection_disconnect(c);
			} else {
				if (!TAILQ_EMPTY(&(c->send_queue))) {
					kore_platform_event_schedule(c->fd,
					    EVFILT_WRITE, EV_ADD | EV_ONESHOT,
					    c);
				}
			}
		}
	}
}
static int eventer_kqueue_impl_register_wakeup(struct kqueue_spec *spec) {
  struct kevent kev;
  EV_SET(&kev, 0, EVFILT_USER, EV_ADD|EV_ONESHOT, NOTE_FFNOP, 0, NULL);
  mtevL(eventer_deb, "wakeup... reregister\n");
  return kevent(spec->kqueue_fd, &kev, 1, NULL, 0, NULL);
}
Exemple #27
0
void
io_wantwrite_really(fd_t d, io_entry* e) {
  int64 newfd;
  assert(!e->kernelwantwrite); /* we should not be here if we already told the kernel we want to write */
  newfd = (!e->kernelwantread);
  io_wanted_fds += newfd;
#ifdef HAVE_EPOLL
  if(io_waitmode == EPOLL) {
    struct epoll_event x;
    byte_zero(&x, sizeof(x)); /* to shut up valgrind */
    x.events = EPOLLOUT;
    if(e->kernelwantread)
      x.events |= EPOLLIN;
    x.data.fd = d;
    epoll_ctl(io_master, e->kernelwantread ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, d, &x);
  }
#endif

#ifdef HAVE_KQUEUE
  if(io_waitmode == KQUEUE) {
    struct kevent kev;
    struct timespec ts;
    EV_SET(&kev, d, EVFILT_WRITE, EV_ADD | EV_ENABLE, 0, 0, 0);
    ts.tv_sec = 0;
    ts.tv_nsec = 0;
    kevent(io_master, &kev, 1, 0, 0, &ts);
  }
#endif

#ifdef HAVE_SIGIO
  if(io_waitmode == _SIGIO) {
    struct pollfd p;
    p.fd = d;
    p.events = POLLOUT;
    switch(poll(&p, 1, 0)) {
      case 1: e->canwrite = 1; break;
      case 0: e->canwrite = 0; break;
      case -1: return;
    }
    if(e->canwrite) {
      debug_printf(("io_wantwrite: enqueueing %lld in normal write queue before %ld\n", d, first_readable));
      e->next_write = first_writeable;
      first_writeable = d;
    }
  }
#endif

#ifdef USE_SELECT

#elif WINDOWS_NATIVE
  printf("e->wantwrite == %d\n", e->wantwrite);
  if(!e->wantwrite) {
    e->next_write = first_writeable;
    e->canwrite = 1;
    first_writeable = d;
    debug_printf(("queueing write, setting first_writeable to %ld\n", (long)d));
  }
#endif
  e->wantwrite = 1;
  e->kernelwantwrite = 1;
}
static int eventer_kqueue_impl_loop() {
  struct timeval __dyna_sleep = { 0, 0 };
  KQUEUE_DECL;
  KQUEUE_SETUP(NULL);

  if(eventer_kqueue_impl_register_wakeup(kqs) == -1) {
    mtevFatal(mtev_error, "error in eventer_kqueue_impl_loop: could not eventer_kqueue_impl_register_wakeup\n");
  }

  while(1) {
    struct timeval __now, __sleeptime;
    struct timespec __kqueue_sleeptime;
    int fd_cnt = 0;

    if(compare_timeval(eventer_max_sleeptime, __dyna_sleep) < 0)
      __dyna_sleep = eventer_max_sleeptime;

    __sleeptime = __dyna_sleep;

    eventer_dispatch_timed(&__now, &__sleeptime);

    if(compare_timeval(__sleeptime, __dyna_sleep) > 0)
      __sleeptime = __dyna_sleep;

    /* Handle cross_thread dispatches */
    eventer_cross_thread_process();

    /* Handle recurrent events */
    eventer_dispatch_recurrent(&__now);

    /* Now we move on to our fd-based events */
    __kqueue_sleeptime.tv_sec = __sleeptime.tv_sec;
    __kqueue_sleeptime.tv_nsec = __sleeptime.tv_usec * 1000;
    fd_cnt = kevent(kqs->kqueue_fd, ke_vec, ke_vec_used,
                    ke_vec, ke_vec_a,
                    &__kqueue_sleeptime);
    kqs->wakeup_notify = 0;
    if(fd_cnt > 0 || ke_vec_used)
      mtevLT(eventer_deb, &__now, "[t@%llx] kevent(%d, [...], %d) => %d\n", (vpsized_int)pthread_self(), kqs->kqueue_fd, ke_vec_used, fd_cnt);
    ke_vec_used = 0;
    if(fd_cnt < 0) {
      mtevLT(eventer_err, &__now, "kevent(s/%d): %s\n", kqs->kqueue_fd, strerror(errno));
    }
    else if(fd_cnt == 0 ||
            (fd_cnt == 1 && ke_vec[0].filter == EVFILT_USER)) {
      /* timeout */
      if(fd_cnt) eventer_kqueue_impl_register_wakeup(kqs);
      add_timeval(__dyna_sleep, __dyna_increment, &__dyna_sleep);
    }
    else {
      int idx;
      __dyna_sleep.tv_sec = __dyna_sleep.tv_usec = 0; /* reset */
      /* loop once to clear */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) {
          eventer_kqueue_impl_register_wakeup(kqs);
          continue;
        }
        masks[ke->ident] = 0;
      }
      /* Loop again to aggregate */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        ke = &ke_vec[idx];
        if(ke->flags & EV_ERROR) continue;
        if(ke->filter == EVFILT_USER) continue;
        if(ke->filter == EVFILT_READ) masks[ke->ident] |= EVENTER_READ;
        if(ke->filter == EVFILT_WRITE) masks[ke->ident] |= EVENTER_WRITE;
      }
      /* Loop a last time to process */
      for(idx = 0; idx < fd_cnt; idx++) {
        struct kevent *ke;
        eventer_t e;
        int fd;

        ke = &ke_vec[idx];
        if(ke->filter == EVFILT_USER) continue;
        if(ke->flags & EV_ERROR) {
          if(ke->data != EBADF && ke->data != ENOENT)
            mtevLT(eventer_err, &__now, "error [%d]: %s\n",
                   (int)ke->ident, strerror(ke->data));
          continue;
        }
        mtevAssert((vpsized_int)ke->udata == (vpsized_int)ke->ident);
        fd = ke->ident;
        e = master_fds[fd].e;
        /* If we've seen this fd, don't callback twice */
        if(!masks[fd]) continue;
        /* It's possible that someone removed the event and freed it
         * before we got here.
         */
        if(e) eventer_kqueue_impl_trigger(e, masks[fd]);
        masks[fd] = 0; /* indicates we've processed this fd */
      }
    }
  }
  /* NOTREACHED */
  return 0;
}
Exemple #29
0
int gas_wait_event (GAS_POLLER_INFO *pi, gas_poll_event *events_queue, int events_queue_size) {
	return kevent (pi->poll_handler, NULL, 0, events_queue, events_queue_size, NULL);
}
Exemple #30
0
int uv__stream_try_select(uv_stream_t* stream, int fd) {
  /*
   * kqueue doesn't work with some files from /dev mount on osx.
   * select(2) in separate thread for those fds
   */

  int kq;
  int ret;
  struct kevent filter[1];
  struct kevent events[1];
  struct timespec timeout;
  uv__stream_select_t* s;

  kq = kqueue();
  if (kq < 0) {
    fprintf(stderr, "(libuv) Failed to create kqueue (%d)\n", errno);
    abort();
  }

  EV_SET(&filter[0], fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);

  /* Use small timeout, because we only want to capture EINVALs */
  timeout.tv_sec = 0;
  timeout.tv_nsec = 1;

  ret = kevent(kq, filter, 1, events, 1, &timeout);
  close(kq);
  if (ret < 1) return -1;
  if ((events[0].flags & EV_ERROR_ORIG) == 0 || events[0].data != EINVAL) {
    return -1;
  }

  /* At this point we definitely know that this fd won't work with kqueue */
  s = malloc(sizeof(*s));
  if (s == NULL) {
    /* TODO: Return error */
    abort();
  }

  if (uv_async_init(stream->loop,
                    &s->async,
                    uv__stream_osx_select_cb)) {
    return -1;
  }
  s->async.flags |= UV__HANDLE_INTERNAL;
  uv__handle_unref((uv_handle_t*) &s->async);

  if (uv_sem_init(&s->sem, 0)) goto fatal1;
  if (uv_mutex_init(&s->mutex)) goto fatal2;

  /* Create fake fd for io watcher */
  s->fake_fd = socket(AF_UNIX, SOCK_STREAM, 0);
  if (s->fake_fd == -1) goto fatal3;

  if (uv_thread_create(&s->thread, uv__stream_osx_select, stream)) {
    goto fatal4;
  }

  s->stream = stream;
  stream->select = s;

  return 0;

fatal4:
  close(s->fake_fd);
fatal3:
  uv_mutex_destroy(&s->mutex);
fatal2:
  uv_sem_destroy(&s->sem);
fatal1:
  uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);

  free(s);
  return -1;
}