Esempio n. 1
0
void epoll_pwait_test()
{
    auto start = std::chrono::steady_clock::now();
    {
        struct epoll_event events[MAX_EVENTS];
        struct epoll_event ev;
        int timeout;
        int retval;

        int epollfd = epoll_create1(0);

        /* Watch stdin (fd 0) to see when it has input. */
        ev.events = EPOLLIN;
        ev.data.fd = 0;

        if (epoll_ctl(epollfd, EPOLL_CTL_ADD, 0, &ev) == -1)
        {
            perror("epoll_ctl");
            return;
        }

        /* Wait up to five seconds. */
        timeout = 5000;
        
        retval = epoll_pwait(epollfd, events, MAX_EVENTS, timeout, NULL);
        /* Don't rely on the value of tv now! */
    }
    auto end = std::chrono::steady_clock::now();
    std::chrono::duration<double> duration = end - start;

    {
        std::lock_guard<std::mutex> lock(s_mutex);
        std::cout << __func__ << ": " << duration.count() << std::endl;
    }
}
Esempio n. 2
0
int Epoll_pwait(int epfd, struct epoll_event *events,
                      int maxevents, int timeout,
                      const sigset_t *sigmask)
{
  int rc;
  if((rc = epoll_pwait(epfd,events,maxevents,timeout,sigmask)) == -1)
    unix_error("Epoll_wait error");
  return rc;
}
Esempio n. 3
0
static int epoll_pwait_loop(void)
{
	int i;

	/* Should fail NR_ITERS times */
	for (i = 0; i < NR_ITERS; i++)
		epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
	return 0;
}
Esempio n. 4
0
const EPollIterable& EPoll::wait(int timeout) {
    sigset_t sigset;
    sigemptyset(&sigset);
    event_count = epoll_pwait(efd, &events[0], events.size(), timeout, &sigset);
    if(event_count < 0) {
        if(errno == EINTR)
            event_count = 0;
        else
            errno_to_exception();
    }
    return iterable;
}
Esempio n. 5
0
int main (void)
{
#if defined(HAVE_SIGNALFD) && defined(HAVE_EVENTFD) \
    && defined(HAVE_EVENTFD_READ) && defined(HAVE_PPOLL)
  {
    sigset_t mask;
    int fd, fd2;
    eventfd_t ev;
    struct timespec ts = { .tv_sec = 1, .tv_nsec = 0 };
    struct pollfd pfd[2];

    sigemptyset (&mask);
    sigaddset (&mask, SIGUSR1);
    fd = signalfd (-1, &mask, 0);
    sigaddset (&mask, SIGUSR2);
    fd = signalfd (fd, &mask, 0);
    fd2 = eventfd (5, 0);
    eventfd_read (fd2, &ev);
    pfd[0].fd = fd;
    pfd[0].events = POLLIN|POLLOUT;
    pfd[1].fd = fd2;
    pfd[1].events = POLLIN|POLLOUT;
    ppoll (pfd, 2, &ts, &mask);
  }
#endif

#if defined(HAVE_UTIMENSAT)
  unlink("/tmp/valgrind-utimensat-test");
  close (creat ("/tmp/valgrind-utimensat-test", S_IRUSR | S_IWUSR));
  {
    struct timespec ts2[2] = { [0].tv_sec = 10000000, [1].tv_sec = 20000000 };
    utimensat (AT_FDCWD, "/tmp/valgrind-utimensat-test", ts2, 0);
  }
  unlink("/tmp/valgrind-utimensat-test");
#endif

#if defined(HAVE_EPOLL_CREATE) && defined(HAVE_EPOLL_PWAIT)
  {
    int fd3;
    struct epoll_event evs[10];
    sigset_t mask;

    sigemptyset (&mask);
    sigaddset (&mask, SIGUSR1);
    sigaddset (&mask, SIGUSR2);
    fd3 = epoll_create (10);
    epoll_pwait (fd3, evs, 10, 0, &mask);
  }
#endif

  return 0;
}
Esempio n. 6
0
int pty_exec(int use_shell, char * const* argv){
	int timeout = 50;
	char buf[BUFSIZ];
	pid_t pid;
	int master_fd, slave_fd, epfd, nfds;
	int ret,flags, status;
	struct epoll_event ev, events[1];
	ssize_t nread;

	sigset_t mask;
	sigemptyset(&mask);
	sigaddset(&mask, SIGCHLD);

	ret = openpty(&master_fd, &slave_fd, NULL, NULL, NULL);	
	if ( ret == -1){
		perror("openpty");
		return 1;
	}

	//fork to exec cmd and monitor output
	pid = fork();
	if ( pid > 0 ){
		//***parent: read and print child output***
		close(slave_fd);
		//handle child exit
		signal(SIGCHLD, child_handler);

		//set pty fd to non blocking mode
		flags = fcntl(master_fd, F_GETFL, 0);
		fcntl(master_fd, F_SETFL, flags | O_NONBLOCK);

		//read cmd output with epoll
		epfd = epoll_create(1);
		if(epfd == -1 ){
			perror("epoll_create");
			return(1);
		}
		ev.events = EPOLLIN | EPOLLHUP;
		ev.data.fd = master_fd;
		epoll_ctl(epfd, EPOLL_CTL_ADD, master_fd, &ev);

		while(1){
			if(exit_status != -1){
				break; //child exited
			}
			nfds = epoll_pwait(epfd, events, 1, timeout, &mask);
			if(nfds < 0){
				if (errno == EINTR){
					continue; //interrupted by signal etc...
				}
				perror("epoll_wait");
				exit(1);
			}
			if (nfds == 0 ){
				continue; //time out , nothing to read
			}
			if(events[0].events & EPOLLIN){
				nread = read(events[0].data.fd, buf, BUFSIZ);
				if (nread == -1){
					perror("read");
					break;
				}
				write(1, buf, nread);
			}
			if(events[0].events & EPOLLHUP){
				//pty slave closed 
				break;
			}
		}
		if(exit_status == -1){
			//SIGCHLD maybe lost, collect status.
			if (wait(&status) != -1 )
				exit_status = WEXITSTATUS(status);
		}
		return exit_status;
	}else{
		/*child: redirect stdin, stdout, and stderr to slave_fd
		 * I am child of old session, impossible to be session leader.
		 * so when pty is closed, no HUP will be sent to me
		*/
		close(master_fd);
		dup2(slave_fd, 1);
		dup2(slave_fd, 2);
		if(use_shell){
			ret = execlp("sh", "sh", "-c", argv[0], NULL);

		}else{
			ret = execvp(argv[0], argv);
		}
		if (ret == -1 ){
			perror("exec");
			exit(1);
		}
	}
	return 0;
}
Esempio n. 7
0
void uv__io_poll(uv_loop_t* loop, int timeout) {
  /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
   * effectively infinite on 32 bits architectures.  To avoid blocking
   * indefinitely, we cap the timeout and poll again if necessary.
   *
   * Note that "30 minutes" is a simplification because it depends on
   * the value of CONFIG_HZ.  The magic constant assumes CONFIG_HZ=1200,
   * that being the largest value I have seen in the wild (and only once.)
   */
  static const int max_safe_timeout = 1789569;
  struct epoll_event events[1024];
  struct epoll_event* pe;
  struct epoll_event e;
  int real_timeout;
  QUEUE* q;
  uv__io_t* w;
  sigset_t sigset;
  sigset_t* psigset;
  uint64_t base;
  int have_signals;
  int nevents;
  int count;
  int nfds;
  int fd;
  int op;
  int i;

  if (loop->nfds == 0) {
    assert(QUEUE_EMPTY(&loop->watcher_queue));
    return;
  }

  memset(&e, 0, sizeof(e));

  while (!QUEUE_EMPTY(&loop->watcher_queue)) {
    q = QUEUE_HEAD(&loop->watcher_queue);
    QUEUE_REMOVE(q);
    QUEUE_INIT(q);

    w = QUEUE_DATA(q, uv__io_t, watcher_queue);
    assert(w->pevents != 0);
    assert(w->fd >= 0);
    assert(w->fd < (int) loop->nwatchers);

    e.events = w->pevents;
    e.data.fd = w->fd;

    if (w->events == 0)
      op = EPOLL_CTL_ADD;
    else
      op = EPOLL_CTL_MOD;

    /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
     * events, skip the syscall and squelch the events after epoll_wait().
     */
    if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
      if (errno != EEXIST)
        abort();

      assert(op == EPOLL_CTL_ADD);

      /* We've reactivated a file descriptor that's been watched before. */
      if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
        abort();
    }

    w->events = w->pevents;
  }

  psigset = NULL;
  if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
    sigemptyset(&sigset);
    sigaddset(&sigset, SIGPROF);
    psigset = &sigset;
  }

  assert(timeout >= -1);
  base = loop->time;
  count = 48; /* Benchmarks suggest this gives the best throughput. */
  real_timeout = timeout;

  for (;;) {
    /* See the comment for max_safe_timeout for an explanation of why
     * this is necessary.  Executive summary: kernel bug workaround.
     */
    if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
      timeout = max_safe_timeout;

    nfds = epoll_pwait(loop->backend_fd,
                       events,
                       ARRAY_SIZE(events),
                       timeout,
                       psigset);

    /* Update loop->time unconditionally. It's tempting to skip the update when
     * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
     * operating system didn't reschedule our process while in the syscall.
     */
    SAVE_ERRNO(uv__update_time(loop));

    if (nfds == 0) {
      assert(timeout != -1);

      if (timeout == 0)
        return;

      /* We may have been inside the system call for longer than |timeout|
       * milliseconds so we need to update the timestamp to avoid drift.
       */
      goto update_timeout;
    }

    if (nfds == -1) {
      if (errno != EINTR)
        abort();

      if (timeout == -1)
        continue;

      if (timeout == 0)
        return;

      /* Interrupted by a signal. Update timeout and poll again. */
      goto update_timeout;
    }

    have_signals = 0;
    nevents = 0;

    assert(loop->watchers != NULL);
    loop->watchers[loop->nwatchers] = (void*) events;
    loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
    for (i = 0; i < nfds; i++) {
      pe = events + i;
      fd = pe->data.fd;

      /* Skip invalidated events, see uv__platform_invalidate_fd */
      if (fd == -1)
        continue;

      assert(fd >= 0);
      assert((unsigned) fd < loop->nwatchers);

      w = loop->watchers[fd];

      if (w == NULL) {
        /* File descriptor that we've stopped watching, disarm it.
         *
         * Ignore all errors because we may be racing with another thread
         * when the file descriptor is closed.
         */
        epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
        continue;
      }

      /* Give users only events they're interested in. Prevents spurious
       * callbacks when previous callback invocation in this loop has stopped
       * the current watcher. Also, filters out events that users has not
       * requested us to watch.
       */
      pe->events &= w->pevents | POLLERR | POLLHUP;

      /* Work around an epoll quirk where it sometimes reports just the
       * EPOLLERR or EPOLLHUP event.  In order to force the event loop to
       * move forward, we merge in the read/write events that the watcher
       * is interested in; uv__read() and uv__write() will then deal with
       * the error or hangup in the usual fashion.
       *
       * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
       * reads the available data, calls uv_read_stop(), then sometime later
       * calls uv_read_start() again.  By then, libuv has forgotten about the
       * hangup and the kernel won't report EPOLLIN again because there's
       * nothing left to read.  If anything, libuv is to blame here.  The
       * current hack is just a quick bandaid; to properly fix it, libuv
       * needs to remember the error/hangup event.  We should get that for
       * free when we switch over to edge-triggered I/O.
       */
      if (pe->events == POLLERR || pe->events == POLLHUP)
        pe->events |=
          w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);

      if (pe->events != 0) {
        /* Run signal watchers last.  This also affects child process watchers
         * because those are implemented in terms of signal watchers.
         */
        if (w == &loop->signal_io_watcher)
          have_signals = 1;
        else
          w->cb(loop, w, pe->events);

        nevents++;
      }
    }

    if (have_signals != 0)
      loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);

    loop->watchers[loop->nwatchers] = NULL;
    loop->watchers[loop->nwatchers + 1] = NULL;

    if (have_signals != 0)
      return;  /* Event loop should cycle now so don't poll again. */

    if (nevents != 0) {
      if (nfds == ARRAY_SIZE(events) && --count != 0) {
        /* Poll for more events but don't block this time. */
        timeout = 0;
        continue;
      }
      return;
    }

    if (timeout == 0)
      return;

    if (timeout == -1)
      continue;

update_timeout:
    assert(timeout > 0);

    real_timeout -= (loop->time - base);
    if (real_timeout <= 0)
      return;

    timeout = real_timeout;
  }
}
Esempio n. 8
0
    int listen()
    {
        if(m_flag_init == false)
        {
            return LISTEN_ERR_INIT;
        }

        int result = -1;
        int event_count;
        char err_string_buff[256];
        int err_string_buff_size = 256;
        sigset_t sigs;

        sigfillset(&sigs);
        m_flag_exit = false;
        while(m_flag_exit == false)
        {
            event_count = epoll_pwait(m_epoll_fd, m_events, m_el_count, 1000, &sigs);
            if(event_count == 0)
            {
                usleep(1000);
                continue;
            }
            else if(event_count < 0)
            {
                strerror_r(errno, err_string_buff, err_string_buff_size);
                std::fprintf(m_logfd, "%s\n", err_string_buff);
                m_flag_exit = true;
                continue;
            }

            for(int event_offset = 0; event_offset < m_el_count; event_offset++)
            {
                wsock *element = (wsock *) m_events[event_offset].data.ptr;
                if(element->get_type() == wsock::SOCK_SERV)
                {

                    if(element->is_v4())
                    {
                        struct sockaddr_in sock_info;
                        // get/assign a client socket
                        int sock_len = sizeof(sock_info);
                        int tmp_sock = accept(element->get_sock(),
                                         (struct sockaddr *) &sock_info, (socklen_t *) &sock_len);
                        if(tmp_sock < 0)
                        {
                            strerror_r(errno, err_string_buff, err_string_buff_size);
                            std::fprintf(m_logfd, "%s\n", err_string_buff);
                            continue;
                        }

                        // Get a socket class and assign this socket descriptor & sockaddr
                        wsock *sock = m_sock_pool.front();
                        m_sock_pool.pop();
                        if(m_sock_pool.empty() == true)
                        {
                            close(tmp_sock);
                            std::fprintf(m_logfd, "empty slot is zero.\n");
                            continue;
                        }

                        sock->set_sock(tmp_sock, wsock::SOCK_CLNT);
                        sock->set_info_v4(sock_info);

                        // Add a epoll event
                        struct epoll_event ep_event;
                        std::memset(&ep_event, 0, sizeof(ep_event));
                        ep_event.events = EPOLLIN | EPOLLET;
                        ep_event.data.ptr = sock;
                        if(epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, tmp_sock, &ep_event) != 0)
                        {
                            close(tmp_sock);
                            m_sock_pool.push(sock);
                            strerror_r(errno, err_string_buff, err_string_buff_size);
                            std::fprintf(m_logfd, "%s\n", err_string_buff);
                            m_flag_exit = true;
                            continue;
                        }

                        // Push a socket class into current list
                        m_sock_list.push_back(sock);
                        // Notify to observer(s)
                        for(unsigned int i=0; i<m_observer.size(); i++)
                        {
                            m_observer[i]->connected(sock);
                        }
                    }
                } // if(element->get_type() == wsock::SOCK_SERV)
                else if(element->get_type() == wsock::SOCK_CLNT)
                {
                    element->timestamp_update();
                    if(element->is_v4())
                    {
                        for(unsigned int i=0; i<m_observer.size(); i++)
                        {
                            m_observer[i]->ready_to_read(element);
                        }
                    }
                    if(element->is_ended() == true)
                    {
                        element->timestamp_end();
                        for(unsigned int i=0; i<m_observer.size(); i++)
                        {
                            m_observer[i]->disconnected(element);
                        }
                        element->close_sock();
                    }
                }
            }
        }
        return result;
    }
Esempio n. 9
0
static int epoll_process_events(void)
{
    int i, nevents, timeout, tmp_err;
    lts_socket_t *cs;
    uintptr_t instance;
    uint32_t revents;
#ifndef HAVE_FUNCTION_EPOLL_PWAIT
    sigset_t orig_mask;
#endif
    sigset_t sig_mask;

    (void)sigfillset(&sig_mask);
    (void)sigdelset(&sig_mask, SIGALRM); // 允许时钟信号
    cs = lts_timer_heap_min(&lts_timer_heap);
    if (! dlist_empty(&lts_post_list)) {
        timeout = 0;
    } else if (cs) {
        timeout = (int)((cs->timeout - lts_current_time) * 100); // ms
    } else {
        timeout = -1;
    }
#ifndef HAVE_FUNCTION_EPOLL_PWAIT
    sigprocmask(SIG_SETMASK, &sig_mask, &orig_mask);
    nevents = epoll_wait(epfd, buf_epevs, nbuf_epevs, timeout);
    sigprocmask(SIG_SETMASK, &orig_mask, NULL);
#else
    nevents = epoll_pwait(epfd, buf_epevs, nbuf_epevs, timeout, &sig_mask);
#endif
    tmp_err = (-1 == nevents) ? errno : 0;

    // 更新时间
    if (lts_signals_mask & LTS_MASK_SIGALRM) {
        lts_signals_mask &= ~LTS_MASK_SIGALRM;
        lts_update_time();
    }

    // 错误处理
    if (tmp_err) {
        if (EINTR == tmp_err) { // 信号中断
            return 0;
        } else {
            return -1;
        }
    }

    // 事件处理
    for (i = 0; i < nevents; ++i) {
        cs = (lts_socket_t *)buf_epevs[i].data.ptr;
        instance = ((uintptr_t)cs & (uintptr_t)1);
        cs = (lts_socket_t *)((uintptr_t)cs & (uintptr_t)~1);

        if ((-1 == cs->fd) || (instance != cs->instance)) {
            // 过期事件
            continue;
        }

        revents = buf_epevs[i].events;
        if ((revents & (EPOLLERR | EPOLLHUP)
            && (revents & (EPOLLIN | EPOLLOUT)) == 0))
        {
            revents |= (EPOLLIN | EPOLLOUT);
        }

        if ((revents & EPOLLIN) && (cs->on_readable)) {
            (*cs->on_readable)(cs);
        }

        if ((revents & EPOLLOUT) && (cs->on_writable)) {
            (*cs->on_writable)(cs);
        }
    }

    // 检查定时器堆
    while ((cs = lts_timer_heap_min(&lts_timer_heap))) {
        if (cs->timeout > lts_current_time) {
            break;
        }
        lts_timer_heap_del(&lts_timer_heap, cs);
        if (cs->on_timeoutable) {
            (*cs->on_timeoutable)(cs);
        }
    }

    return 0;
}
Esempio n. 10
0
int
epoll_wait (int epfd, struct epoll_event *events, int maxevents, int timeout)
{
  return epoll_pwait (epfd, events, maxevents, timeout, NULL);
}
Esempio n. 11
0
int epoll_wait(int fd, struct epoll_event *ev, int cnt, int to)
{
	return epoll_pwait(fd, ev, cnt, to, 0);
}
Esempio n. 12
0
int CXSPAsyncSocket::SocketIOEpollWait(int timeout)
{
	while(true)
	{
		pthread_t thrid = pthread_self();
		EpollWaitObject *pEwobj=NULL;
		if(this->m_ThreadId2Ewobj.Find(thrid, pEwobj) != 0)
		{
			pthread_exit((void *)"the map without the thread data");
			break;
		}
		int &epollId = pEwobj->epollId;
		vector<EPOLLEVENT> &vEvents = pEwobj->vEvents;
		volatile int &maxEvent = pEwobj->maxEvent;
		SIGSETT &sigset = pEwobj->sigset;
		int ret = epoll_pwait(epollId, &vEvents[0], maxEvent, timeout, &sigset);
		if(0 == ret) continue;//timeout
		else if(0 > ret)
		{
			err_ret(GetLastError(ERROR_EPOLL_WAIT));
			if(EINTR == errno || EAGAIN == errno)
				continue;
			pthread_exit((void *)"epoll pwait error");
			return -1;
		}
		int &fds = ret;//监听套接子的个数
		for(int i=0; i<fds && i<maxEvent; ++i)
		{
			struct sockaddr addr;
			EPOLLEVENT &event = vEvents[i];
			if(m_socket.sock_fd == event.data.fd)
			{
				int clifd = 0;
				while( 0 < (clifd = AcceptSocket(&addr)) )
				{
					SetAsyncSocket(clifd);
					SocketEpollAdd(clifd, EPOLLET | EPOLLIN | EPOLLET);
#ifndef Debug
					printf("Add epoll event %d", clifd);
#endif
				}
				if(-1 == clifd && errno != EAGAIN &&
						ECONNABORTED !=errno && EPROTO!=errno && EINTR != errno)
				{
					err_ret(GetLastError(ERROR_ACCEPT_SOCK));
				}
				continue;
			}
			else if(EPOLLIN & event.events)//read data
			{
				int &fd = event.data.fd;
				string szRecvData;
				szRecvData.resize(4);
				int reLen = 0, offset=0;
				while((reLen=read(fd, &szRecvData[offset], 4)) > 0)
				{
					offset += reLen;
					szRecvData.resize(offset+1024);
				}
				if((reLen < 0 && errno==ECONNRESET)
						|| (0 == reLen) )
				{
					close(fd);
					SocketEpollDel(fd, event.events);
					continue;
				}
				else if(reLen < 0) continue;
				szRecvData.resize(offset);
				this->m_deque.PushBack(szRecvData);
#ifndef Debug
				printf("recv epoll data from %d", fd);
#endif
			}
			else if(EPOLLOUT & event.events )//write data
			{
				int &fd = event.data.fd;
				string szSendData = (char *)event.data.ptr;
				int datalen = szSendData.size(), sendlen=0;
				while(0 < datalen)
				{
					sendlen=write(fd, szSendData.c_str(), datalen);
					if(sendlen < 0)
					{
						if(-1 == sendlen && errno != EAGAIN)
						{
							err_ret(GetLastError(ERROR_SEND_SOCK));
						}
						break;
					}
					datalen -= sendlen;
				}
			}
		}
	}
	return 0;
}
Esempio n. 13
0
static void* pfpf_worker(void* _data)
{
	int epoll_count = -1;
	socklen_t restrict_length = 0;
	int client_socket = -1;
	struct sockaddr_in client;
	struct epoll_event client_epoll_event;
	struct epoll_event epoll_events[EPOLL_MAXEVENTS];
	sigset_t epoll_sigmask_new;
	sigset_t epoll_sigmask_old;
	pfpf_worker_context_t* data = _data;

	pfcq_zero(&client, sizeof(struct sockaddr_in));
	pfcq_zero(&client_epoll_event, sizeof(struct epoll_event));
	pfcq_zero(epoll_events, EPOLL_MAXEVENTS * sizeof(struct epoll_event));
	pfcq_zero(&epoll_sigmask_new, sizeof(sigset_t));
	pfcq_zero(&epoll_sigmask_old, sizeof(sigset_t));

	pthread_setname_np(pthread_self(), "worker");

	if (unlikely(!data))
		goto out;

	if (unlikely(sigemptyset(&epoll_sigmask_new) != 0))
		panic("sigemptyset");
	if (unlikely(sigaddset(&epoll_sigmask_new, SIGTERM) != 0))
		panic("sigaddset");
	if (unlikely(sigaddset(&epoll_sigmask_new, SIGINT) != 0))
		panic("sigaddset");
	if (unlikely(pthread_sigmask(SIG_BLOCK, &epoll_sigmask_new, &epoll_sigmask_old) != 0))
		panic("pthread_sigmask");

	for (;;)
	{
		epoll_count = epoll_pwait(data->epoll_fd, epoll_events, EPOLL_MAXEVENTS, -1, &epoll_sigmask_old);

		if (unlikely(epoll_count == -1))
		{
			if (likely(errno == EINTR))
			{
				if (likely(data->parent_pool->should_exit))
				{
					debug("Worker #%d got interrupt signal, attempting to exit gracefully...\n", data->server_socket);
					goto lfree;
				} else
					continue;
			}
			else
			{
				warning("epoll_pwait");
				continue;
			}
		} else
		{
			for (int i = 0; i < epoll_count; i++)
			{
				int cfd = epoll_events[i].data.fd;
				if (likely(epoll_events[i].events & EPOLLIN && !(epoll_events[i].events & EPOLLERR)))
				{
					if (unlikely(cfd == data->server_socket))
					{
						restrict_length = (socklen_t)sizeof(struct sockaddr_in);
						pfcq_zero(&client, sizeof(struct sockaddr_in));
						pfcq_zero(&client_epoll_event, sizeof(struct epoll_event));

						client_socket = accept4(data->server_socket, (struct sockaddr*)&client, &restrict_length, SOCK_NONBLOCK);
						if (unlikely(client_socket < 0))
						{
							if (likely(errno == EAGAIN || errno == EWOULDBLOCK))
								continue;
							else
							{
								warning("accept4");
								continue;
							}
						}
						pfpf_client_context_t* new_pfpf_client_context = pfcq_alloc(sizeof(pfpf_client_context_t));
						new_pfpf_client_context->socket = client_socket;
						data->initializer(&new_pfpf_client_context->data);
						data->clients[client_socket] = new_pfpf_client_context;
						data->clients_count++;

						debug("Accepted client to #%d socket by #%d server\n", client_socket, data->server_socket);
						debug("Total clients of server #%d: %d\n", data->server_socket, data->clients_count);

						client_epoll_event.data.fd = client_socket;
						client_epoll_event.events = EPOLLIN;
						if (unlikely(epoll_ctl(data->epoll_fd, EPOLL_CTL_ADD, client_socket, &client_epoll_event) == -1))
							panic("epoll_ctl");
					} else
					{
						pfpf_client_context_t* current_pfpf_client_context = data->clients[cfd];

						current_pfpf_client_context->bytes_read = read(current_pfpf_client_context->socket, current_pfpf_client_context->buffer, NET_CHUNK_SIZE);
						if (likely(current_pfpf_client_context->bytes_read > 0))
						{
							data->handler(current_pfpf_client_context);
							if (current_pfpf_client_context->should_close)
								goto should_close;
						} else
						{
should_close:
							debug("Saying good bye to socket #%d from server #%d\n", current_pfpf_client_context->socket, data->server_socket);
							if (unlikely(epoll_ctl(data->epoll_fd, EPOLL_CTL_DEL, current_pfpf_client_context->socket, NULL) == -1))
								panic("epoll_ctl");
							if (unlikely(close(current_pfpf_client_context->socket) == -1))
								warning("close");
							if (likely(current_pfpf_client_context->data))
							{
								data->finalizer(current_pfpf_client_context->data);
								pfcq_free(current_pfpf_client_context->data);
							}
							pfcq_free(current_pfpf_client_context);
							data->clients[cfd] = NULL;
							data->clients_count--;
						}
					}
				} else
				{
					warning("epoll_wait");
					continue;
				}
			}
		}
	}

lfree:
	debug("Cleaning up #%d server...\n", data->server_socket);

	for (int i = 0; i < data->clients_pool_size; i++)
	{
		if (unlikely(data->clients[i]))
		{
			debug("Detaching client #%d of server #%d...\n", data->clients[i]->socket, data->server_socket);
			close(data->clients[i]->socket);
			debug("Freeing client #%d data of server #%d...\n", data->clients[i]->socket, data->server_socket);
			if (likely(data->clients[i]->data))
			{
				data->finalizer(data->clients[i]->data);
				pfcq_free(data->clients[i]->data);
			}
			pfcq_free(data->clients[i]);
			data->clients_count--;
		}
	}
	debug("Destroying clients list of server #%d...\n", data->server_socket);
	pfcq_free(data->clients);

	close(data->server_socket);
	close(data->epoll_fd);

	debug("Server #%d cleaned up\n", data->server_socket);

out:

	return NULL;
}
Esempio n. 14
0
int server::listenAndserve() {
    char * sfdstr     = getenv("sfd");
    char * epollfdstr = getenv("epollfd");

    //return 0;
    if(sfdstr){
        sfd = atoi(sfdstr);
    }
    if(epollfdstr) {
        //epollfd = atoi(epollfdstr);
    }
    if(sfd == 0) {
        sfd = socket(AF_INET,SOCK_STREAM,0);
        int flag = fcntl(sfd,F_GETFL,0);
        int result = fcntl(sfd,F_SETFL,flag | O_NONBLOCK);
        if(sfd < 0) {
            printf("create socket error\n");
            exit(-1);
            //closeProc();
        }
        struct sockaddr_in addr;
        bzero(&addr,sizeof(addr));
        addr.sin_family      = AF_INET;
        addr.sin_addr.s_addr = htonl(INADDR_ANY);
        addr.sin_port        = htons(10001);
        printf("port is %d\n",addr.sin_port);


        int on;
        on = 1;
        setsockopt(sfd,SOL_SOCKET,SO_REUSEADDR,&on, sizeof(on));

        int bid = bind(sfd,(struct sockaddr*)&addr,sizeof(struct sockaddr));
        if(bid < 0) {

            printf("bind err\n");
            printError(errno,__LINE__);
            exit(-1);
            //closeProc();
        }

        int lister = listen(sfd,10);
        if(lister < 0){
            printError(errno,__LINE__);
            exit(-1);
            //closeProc();
        }else {
            printf("listen sucess\n");
        }

    }

    if(epollfd == 0) {
        epollfd = epoll_create1(0);
        printf("epofd = %d\n",epollfd);

    }
    {
        Connection * sfdConnect = new Connection(sfd,NULL);
        int add = EventProcess(epollfd,sfd,EPOLL_CTL_ADD,EPOLLET|EPOLLIN|EPOLLOUT,sfdConnect);
        if(add < 0) {
            printError(errno,__LINE__);
            exit(-1);
            //closeProc();
        }
    }
    printf("------------------\n");
    logger->Loginfo("sfd = %d sfd = %d",sfd,epollfd);


    {
        int x = pipe(loopSwitch);
        if(x < 0) {
            printf("create pipe err\n");
        }
        //loopSwitch = open("./loopSwitch",O_CREAT|O_RDWR,0644);
        Connection * sfdConnect = new Connection(loopSwitch[0],NULL);
        int add = EventProcess(epollfd,loopSwitch[0],EPOLL_CTL_ADD,EPOLLET|EPOLLIN|EPOLLOUT,sfdConnect);
//        if(add < 0) {
//            printf("add looperr errno  %d\n",errno);
//        }
        if(add < 0) {
            printError(errno,__LINE__);
            exit(-1);
            //closeProc();
        }
    }

    int subfd = 0;
    struct sockaddr_in remoteaddr;
    socklen_t len;
    struct epoll_event *eventArry = (struct epoll_event*)malloc(sizeof(struct epoll_event) * MAXEVENTS);
    int num = 0,index = 0;
    while(true) {
        bzero(eventArry,sizeof(struct epoll_event) * MAXEVENTS);
        struct sigaction act;
        int sig = SIGSEGV;
        sigemptyset(&act.sa_mask);
        sigaddset(&act.sa_mask,SIGUSR1);
        sigaddset(&act.sa_mask,SIGUSR2);
        num = epoll_pwait(epollfd,eventArry,MAXEVENTS,-1,NULL);
        //printf("wait success num = %d  err  %d pid = %d\n",num,errno,getpid());
        int xx = errno;
        if(num == -1) {
            switch(xx) {
                case EBADF:{
                    break;
                }
                case EFAULT:{
                    break;
                }
                case EINTR:{
                    break;
                }
                case EINVAL:{
                    break;
                }
            }
            continue;
        }

//        EBADF  epfd is not a valid file descriptor.
//
//        EFAULT The memory area pointed to by events is not  accessible  with  write  permis‐
//        sions.
//
//        EINTR  The  call  was  interrupted  by a signal handler before either (1) any of the
//        requested events occurred or (2) the timeout expired; see signal(7).
//
//        EINVAL epfd is not an epoll file descriptor, or maxevents is less than or  equal  to
//        zero.
        for(index = 0;index < num; index++) {
            Connection * context = (Connection*)eventArry[index].data.ptr;
            int aFd   = context->fd;
            int subfd = -1;
            if(aFd == sfd) {
                if(accepton) {
                    while((subfd = accept(sfd,(struct sockaddr*)&remoteaddr,&len)) > 0){

                        Connection * connection = new Connection(subfd,&remoteaddr);
                        //printf("172 addr %x\n",connection);
                        EventProcess(epollfd,subfd,EPOLL_CTL_ADD,EPOLLET|EPOLLIN ,connection);
                        //handleConnect(connection);
                        connection->readHandler = &readHandler;
                    }
                }
            }else if( aFd == loopSwitch[0] ||aFd == loopSwitch[1]){
                printf("pipe closed\n");
                return 0;
            } else {
                Connection * connection = (Connection*)eventArry[index].data.ptr;
                //printf("179 addr %x\n",connection);
                if(eventArry[index].events & EPOLLIN) {
                    if(connection->readHandler != NULL){
                        int x = connection->readHandler(connection);
                        if(x == 0) {
                            EventProcess(epollfd,aFd,EPOLL_CTL_DEL,0,NULL);
                            connection->Close();
                            delete(connection);
                        }
                    }

                }

            }
        }

       // subfd = accept(sfd,(struct sockaddr*)&remoteaddr,&len);
        //printf("remoate port %d\n",remoteaddr.sin_port);
        //close(subfd);
    }

}