Пример #1
0
/*
 * If "expired" is true, a sleeper has timed out
 */
void _st_del_sleep_q(st_thread_t *thread, int expired)
{
	st_clist_t *q;
	st_thread_t *t;

	/* Remove from sleep queue */
	ST_ASSERT(thread->flags & _ST_FL_ON_SLEEPQ);
	q = thread->links.next;
	if (q != &_ST_SLEEPQ) {
		if (expired) {
			_ST_SLEEPQMAX -= thread->sleep;
		}
		else {
			t = _ST_THREAD_PTR(q);
			t->sleep += thread->sleep;
		}
	}
	else {
		/*
		 * Check if prev is the beginning of the list; if so,
		 * we are the only element on the list.
		 */
		if (thread->links.prev != &_ST_SLEEPQ)
			_ST_SLEEPQMAX -= thread->sleep;
		else
			_ST_SLEEPQMAX = 0;
	}
	thread->flags &= ~_ST_FL_ON_SLEEPQ;
	ST_REMOVE_LINK(&thread->links);
}
Пример #2
0
_st_stack_t *_st_stack_new(int stack_size)
{
  _st_clist_t *qp;
  _st_stack_t *ts;
  int extra;

  for (qp = _st_free_stacks.next; qp != &_st_free_stacks; qp = qp->next) {
    ts = _ST_THREAD_STACK_PTR(qp);
	//下面这个不执行
    if (ts->stk_size >= stack_size) {
      /* Found a stack that is big enough */
      ST_REMOVE_LINK(&ts->links);
      _st_num_free_stacks--;
      ts->links.next = NULL;
      ts->links.prev = NULL;
      return ts;
    }
  }

  /* Make a new thread stack object.创建一个新的线程stack对象 */
  if ((ts = (_st_stack_t *)calloc(1, sizeof(_st_stack_t))) == NULL)
  {
  	 //内存分配失败,会退出
  	 LOGE("calloc error");
  	return NULL;
  }
  extra = _st_randomize_stacks ? _ST_PAGE_SIZE : 0;
  ts->vaddr_size = stack_size + 2*REDZONE + extra;
  ts->vaddr = _st_new_stk_segment(ts->vaddr_size);
  if (!ts->vaddr) {
    free(ts);
    return NULL;
  }
  ts->stk_size = stack_size;
  ts->stk_bottom = ts->vaddr + REDZONE;
  ts->stk_top = ts->stk_bottom + stack_size;

#ifdef DEBUG
  mprotect(ts->vaddr, REDZONE, PROT_NONE);
  mprotect(ts->stk_top + extra, REDZONE, PROT_NONE);
#endif

  if (extra) {
    long offset = (random() % extra) & ~0xf;

    ts->stk_bottom += offset;
    ts->stk_top += offset;
  }

  return ts;
}
Пример #3
0
int st_mutex_lock(_st_mutex_t *lock)
{
  _st_thread_t *me = _ST_CURRENT_THREAD();

  if (me->flags & _ST_FL_INTERRUPT) {
    me->flags &= ~_ST_FL_INTERRUPT;
    errno = EINTR;
    return -1;
  }

  if (lock->owner == NULL) {
    /* Got the mutex */
    lock->owner = me;
    return 0;
  }

  if (lock->owner == me) {
    errno = EDEADLK;
    return -1;
  }

  /* Put caller thread on the mutex's wait queue */
  me->state = _ST_ST_LOCK_WAIT;
  ST_APPEND_LINK(&me->wait_links, &lock->wait_q);

  _ST_SWITCH_CONTEXT(me);

  ST_REMOVE_LINK(&me->wait_links);

  if ((me->flags & _ST_FL_INTERRUPT) && lock->owner != me) {
    me->flags &= ~_ST_FL_INTERRUPT;
    errno = EINTR;
    return -1;
  }

  return 0;
}
Пример #4
0
int st_cond_timedwait(_st_cond_t *cvar, st_utime_t timeout)
{
  _st_thread_t *me = _ST_CURRENT_THREAD();
  int rv;

  if (me->flags & _ST_FL_INTERRUPT) {
    me->flags &= ~_ST_FL_INTERRUPT;
    errno = EINTR;
    return -1;
  }

  /* Put caller thread on the condition variable's wait queue */
  me->state = _ST_ST_COND_WAIT;
  ST_APPEND_LINK(&me->wait_links, &cvar->wait_q);

  if (timeout != ST_UTIME_NO_TIMEOUT)
    _ST_ADD_SLEEPQ(me, timeout);

  _ST_SWITCH_CONTEXT(me);

  ST_REMOVE_LINK(&me->wait_links);
  rv = 0;

  if (me->flags & _ST_FL_TIMEDOUT) {
    me->flags &= ~_ST_FL_TIMEDOUT;
    errno = ETIME;
    rv = -1;
  }
  if (me->flags & _ST_FL_INTERRUPT) {
    me->flags &= ~_ST_FL_INTERRUPT;
    errno = EINTR;
    rv = -1;
  }

  return rv;
}
Пример #5
0
ST_HIDDEN void _st_select_find_bad_fd(void)
{
    _st_clist_t *q;
    _st_pollq_t *pq;
    int notify;
    struct pollfd *pds, *epds;
    int pq_max_osfd, osfd;
    short events;
	unsigned long noBlock = 0;

    _ST_SELECT_MAX_OSFD = -1;

    for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
        pq = _ST_POLLQUEUE_PTR(q);
        notify = 0;
        epds = pq->pds + pq->npds;
        pq_max_osfd = -1;
      
        for (pds = pq->pds; pds < epds; pds++) {
			osfd = pds->fd;
            pds->revents = 0;
            if (pds->events == 0)
                continue;
			if (ioctlsocket(fds[osfd], FIONBIO , &noBlock ) < 0){
                pds->revents = POLLNVAL;
                notify = 1;
            }
            if (osfd > pq_max_osfd) {
                pq_max_osfd = osfd;
            }
        }

        if (notify) {
            ST_REMOVE_LINK(&pq->links);
            pq->on_ioq = 0;
            /*
             * Decrement the count of descriptors for each descriptor/event
             * because this I/O request is being removed from the ioq
             */
            for (pds = pq->pds; pds < epds; pds++) {
				osfd = pds->fd;
                events = pds->events;
                if (events & POLLIN) {
                    if (--_ST_SELECT_READ_CNT(osfd) == 0) {
                        FD_CLR(fds[osfd], &_ST_SELECT_READ_SET);
                    }
                }
                if (events & POLLOUT) {
                    if (--_ST_SELECT_WRITE_CNT(osfd) == 0) {
                        FD_CLR(fds[osfd], &_ST_SELECT_WRITE_SET);
                    }
                }
                if (events & POLLPRI) {
                    if (--_ST_SELECT_EXCEP_CNT(osfd) == 0) {
                        FD_CLR(fds[osfd], &_ST_SELECT_EXCEP_SET);
                    }
                }
            }

            if (pq->thread->flags & _ST_FL_ON_SLEEPQ)
                _ST_DEL_SLEEPQ(pq->thread);
            pq->thread->state = _ST_ST_RUNNABLE;
            _ST_ADD_RUNQ(pq->thread);
        } else {
            if (_ST_SELECT_MAX_OSFD < pq_max_osfd)
                _ST_SELECT_MAX_OSFD = pq_max_osfd;
        }
    }
}
Пример #6
0
ST_HIDDEN void _st_select_dispatch(void)
{
    struct timeval timeout, *tvp;
    fd_set r, w, e;
    fd_set *rp, *wp, *ep;
    int nfd, pq_max_osfd, osfd;
    _st_clist_t *q;
    st_utime_t min_timeout;
    _st_pollq_t *pq;
    int notify;
    struct pollfd *pds, *epds;
    short events, revents;

    /*
     * Assignment of fd_sets
     */
    r = _ST_SELECT_READ_SET;
    w = _ST_SELECT_WRITE_SET;
    e = _ST_SELECT_EXCEP_SET;

    rp = &r;
    wp = &w;
    ep = &e;

    if (_ST_SLEEPQ == NULL) {
        tvp = NULL;
    } else {
        min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 :
            (_ST_SLEEPQ->due - _ST_LAST_CLOCK);
        timeout.tv_sec  = (int) (min_timeout / 1000000);
        timeout.tv_usec = (int) (min_timeout % 1000000);
        tvp = &timeout;
    }

    /* Check for I/O operations */
    nfd = select(_ST_SELECT_MAX_OSFD + 1, rp, wp, ep, tvp);

    /* Notify threads that are associated with the selected descriptors */
    if (nfd > 0) {
        _ST_SELECT_MAX_OSFD = -1;
        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            notify = 0;
            epds = pq->pds + pq->npds;
            pq_max_osfd = -1;
      
            for (pds = pq->pds; pds < epds; pds++) {
				osfd = pds->fd;
                events = pds->events;
                revents = 0;
                if ((events & POLLIN) && FD_ISSET(fds[osfd], rp)) {
                    revents |= POLLIN;
                }
                if ((events & POLLOUT) && FD_ISSET(fds[osfd], wp)) {
                    revents |= POLLOUT;
                }
                if ((events & POLLPRI) && FD_ISSET(fds[osfd], ep)) {
                    revents |= POLLPRI;
                }
                pds->revents = revents;
                if (revents) {
                    notify = 1;
                }
                if (osfd > pq_max_osfd) {
                    pq_max_osfd = osfd;
                }
            }
            if (notify) {
                ST_REMOVE_LINK(&pq->links);
                pq->on_ioq = 0;
                /*
                 * Decrement the count of descriptors for each descriptor/event
                 * because this I/O request is being removed from the ioq
                 */
                for (pds = pq->pds; pds < epds; pds++) {
					osfd = pds->fd;
                    events = pds->events;
                    if (events & POLLIN) {
                        if (--_ST_SELECT_READ_CNT(osfd) == 0) {
                            FD_CLR(fds[osfd], &_ST_SELECT_READ_SET);
                        }
                    }
                    if (events & POLLOUT) {
                        if (--_ST_SELECT_WRITE_CNT(osfd) == 0) {
                            FD_CLR(fds[osfd], &_ST_SELECT_WRITE_SET);
                        }
                    }
                    if (events & POLLPRI) {
                        if (--_ST_SELECT_EXCEP_CNT(osfd) == 0) {
                            FD_CLR(fds[osfd], &_ST_SELECT_EXCEP_SET);
                        }
                    }
                }

                if (pq->thread->flags & _ST_FL_ON_SLEEPQ)
                    _ST_DEL_SLEEPQ(pq->thread);
                pq->thread->state = _ST_ST_RUNNABLE;
                _ST_ADD_RUNQ(pq->thread);
            } else {
                if (_ST_SELECT_MAX_OSFD < pq_max_osfd)
                    _ST_SELECT_MAX_OSFD = pq_max_osfd;
            }
        }
    } else if (nfd < 0) {
        /*
         * It can happen when a thread closes file descriptor
         * that is being used by some other thread -- BAD!
         */
        if (errno == EBADF)
            _st_select_find_bad_fd();
    }
}
Пример #7
0
ST_HIDDEN void _st_kq_dispatch(void)
{
    struct timespec timeout, *tsp;
    struct kevent kev;
    st_utime_t min_timeout;
    _st_clist_t *q;
    _st_pollq_t *pq;
    struct pollfd *pds, *epds;
    int nfd, i, osfd, notify, filter;
    short events, revents;

    if (_ST_SLEEPQ == NULL) {
        tsp = NULL;
    } else {
        min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 :
            (_ST_SLEEPQ->due - _ST_LAST_CLOCK);
        timeout.tv_sec  = (time_t) (min_timeout / 1000000);
        timeout.tv_nsec = (long) ((min_timeout % 1000000) * 1000);
        tsp = &timeout;
    }

 retry_kevent:
    /* Check for I/O operations */
    nfd = kevent(_st_kq_data->kq,
                 _st_kq_data->addlist, _st_kq_data->addlist_cnt,
                 _st_kq_data->evtlist, _st_kq_data->evtlist_size, tsp);

    _st_kq_data->addlist_cnt = 0;

    if (nfd > 0) {
        for (i = 0; i < nfd; i++) {
            osfd = _st_kq_data->evtlist[i].ident;
            filter = _st_kq_data->evtlist[i].filter;

            if (filter == EVFILT_READ) {
                _ST_KQ_REVENTS(osfd) |= POLLIN;
            } else if (filter == EVFILT_WRITE) {
                _ST_KQ_REVENTS(osfd) |= POLLOUT;
            }
            if (_st_kq_data->evtlist[i].flags & EV_ERROR) {
                if (_st_kq_data->evtlist[i].data == EBADF) {
                    _ST_KQ_REVENTS(osfd) |= POLLNVAL;
                } else {
                    _ST_KQ_REVENTS(osfd) |= POLLERR;
                }
            }
        }

        _st_kq_data->dellist_cnt = 0;

        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            notify = 0;
            epds = pq->pds + pq->npds;
                     
            for (pds = pq->pds; pds < epds; pds++) {
                osfd = pds->fd;
                events = pds->events;
                revents = (short)(_ST_KQ_REVENTS(osfd) & ~(POLLIN | POLLOUT));
                if ((events & POLLIN) && (_ST_KQ_REVENTS(osfd) & POLLIN)) {
                    revents |= POLLIN;
                }
                if ((events & POLLOUT) && (_ST_KQ_REVENTS(osfd) & POLLOUT)) {
                    revents |= POLLOUT;
                }
                pds->revents = revents;
                if (revents) {
                    notify = 1;
                }
            }
            if (notify) {
                ST_REMOVE_LINK(&pq->links);
                pq->on_ioq = 0;
                for (pds = pq->pds; pds < epds; pds++) {
                    osfd = pds->fd;
                    events = pds->events;
                    /*
                     * We set EV_ONESHOT flag so we only need to delete
                     * descriptor if it didn't fire.
                     */
                    if ((events & POLLIN) && (--_ST_KQ_READ_CNT(osfd) == 0) &&
                        ((_ST_KQ_REVENTS(osfd) & POLLIN) == 0)) {
                        memset(&kev, 0, sizeof(kev));
                        kev.ident = osfd;
                        kev.filter = EVFILT_READ;
                        kev.flags = EV_DELETE;
                        _st_kq_dellist_add(&kev);
                    }
                    if ((events & POLLOUT) && (--_ST_KQ_WRITE_CNT(osfd) == 0)
                        && ((_ST_KQ_REVENTS(osfd) & POLLOUT) == 0)) {
                        memset(&kev, 0, sizeof(kev));
                        kev.ident = osfd;
                        kev.filter = EVFILT_WRITE;
                        kev.flags = EV_DELETE;
                        _st_kq_dellist_add(&kev);
                    }
                }

                if (pq->thread->flags & _ST_FL_ON_SLEEPQ)
                    _ST_DEL_SLEEPQ(pq->thread);
                pq->thread->state = _ST_ST_RUNNABLE;
                _ST_ADD_RUNQ(pq->thread);
            }
        }

        if (_st_kq_data->dellist_cnt > 0) {
            int rv;
            do {
                /* This kevent() won't block since result list size is 0 */
                rv = kevent(_st_kq_data->kq, _st_kq_data->dellist,
                            _st_kq_data->dellist_cnt, NULL, 0, NULL);
            } while (rv < 0 && errno == EINTR);
        }

        for (i = 0; i < nfd; i++) {
            osfd = _st_kq_data->evtlist[i].ident;
            _ST_KQ_REVENTS(osfd) = 0;
        }

    } else if (nfd < 0) {
        if (errno == EBADF && _st_kq_data->pid != getpid()) {
            /* We probably forked, reinitialize kqueue */
            if ((_st_kq_data->kq = kqueue()) < 0) {
                /* There is nothing we can do here, will retry later */
                return;
            }
            fcntl(_st_kq_data->kq, F_SETFD, FD_CLOEXEC);
            _st_kq_data->pid = getpid();
            /* Re-register all descriptors on ioq with new kqueue */
            memset(_st_kq_data->fd_data, 0,
                   _st_kq_data->fd_data_size * sizeof(_kq_fd_data_t));
            for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
                pq = _ST_POLLQUEUE_PTR(q);
                _st_kq_pollset_add(pq->pds, pq->npds);
            }
            goto retry_kevent;
        }
    }
}
Пример #8
0
ST_HIDDEN void _st_poll_dispatch(void)
{
    int timeout, nfd;
    _st_clist_t *q;
    st_utime_t min_timeout;
    _st_pollq_t *pq;
    struct pollfd *pds, *epds, *pollfds;

    /*
     * Build up the array of struct pollfd to wait on.
     * If existing array is not big enough, release it and allocate a new one.
     */
    ST_ASSERT(_ST_POLL_OSFD_CNT >= 0);
    if (_ST_POLL_OSFD_CNT > _ST_POLLFDS_SIZE) {
        free(_ST_POLLFDS);
        _ST_POLLFDS = (struct pollfd *) malloc((_ST_POLL_OSFD_CNT + 10) *
                                               sizeof(struct pollfd));
        ST_ASSERT(_ST_POLLFDS != NULL);
        _ST_POLLFDS_SIZE = _ST_POLL_OSFD_CNT + 10;
    }
    pollfds = _ST_POLLFDS;

    /* Gather all descriptors into one array */
    for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
        pq = _ST_POLLQUEUE_PTR(q);
        memcpy(pollfds, pq->pds, sizeof(struct pollfd) * pq->npds);
        pollfds += pq->npds;
    }
    ST_ASSERT(pollfds <= _ST_POLLFDS + _ST_POLLFDS_SIZE);

    if (_ST_SLEEPQ == NULL) {
        timeout = -1;
    } else {
        min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 :
            (_ST_SLEEPQ->due - _ST_LAST_CLOCK);
        timeout = (int) (min_timeout / 1000);
    }

    /* Check for I/O operations */
    nfd = poll(_ST_POLLFDS, _ST_POLL_OSFD_CNT, timeout);

    /* Notify threads that are associated with the selected descriptors */
    if (nfd > 0) {
        pollfds = _ST_POLLFDS;
        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            epds = pollfds + pq->npds;
            for (pds = pollfds; pds < epds; pds++) {
                if (pds->revents)
                    break;
            }
            if (pds < epds) {
                memcpy(pq->pds, pollfds, sizeof(struct pollfd) * pq->npds);
                ST_REMOVE_LINK(&pq->links);
                pq->on_ioq = 0;

                if (pq->thread->flags & _ST_FL_ON_SLEEPQ)
                    _ST_DEL_SLEEPQ(pq->thread);
                pq->thread->state = _ST_ST_RUNNABLE;
                _ST_ADD_RUNQ(pq->thread);

                _ST_POLL_OSFD_CNT -= pq->npds;
                ST_ASSERT(_ST_POLL_OSFD_CNT >= 0);
            }
            pollfds = epds;
        }
    }
}
Пример #9
0
ST_HIDDEN void _st_epoll_dispatch(void)
{
    st_utime_t min_timeout;
    _st_clist_t *q;
    _st_pollq_t *pq;
    struct pollfd *pds, *epds;
    struct epoll_event ev;
    int timeout, nfd, i, osfd, notify;
    int events, op;
    short revents;

    if (_ST_SLEEPQ == NULL) {
        timeout = -1;
    } else {
        min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 :
            (_ST_SLEEPQ->due - _ST_LAST_CLOCK);
        timeout = (int) (min_timeout / 1000);
    }

    if (_st_epoll_data->pid != getpid()) {
        /* We probably forked, reinitialize epoll set */
        close(_st_epoll_data->epfd);
        _st_epoll_data->epfd = epoll_create(_st_epoll_data->fd_hint);
        if (_st_epoll_data->epfd < 0) {
            /* There is nothing we can do here, will retry later */
            return;
        }
        fcntl(_st_epoll_data->epfd, F_SETFD, FD_CLOEXEC);
        _st_epoll_data->pid = getpid();

        /* Put all descriptors on ioq into new epoll set */
        memset(_st_epoll_data->fd_data, 0,
               _st_epoll_data->fd_data_size * sizeof(_epoll_fd_data_t));
        _st_epoll_data->evtlist_cnt = 0;
        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            _st_epoll_pollset_add(pq->pds, pq->npds);
        }
    }

    /* Check for I/O operations */
    nfd = epoll_wait(_st_epoll_data->epfd, _st_epoll_data->evtlist,
                     _st_epoll_data->evtlist_size, timeout);

    if (nfd > 0) {
        for (i = 0; i < nfd; i++) {
            osfd = _st_epoll_data->evtlist[i].data.fd;
            _ST_EPOLL_REVENTS(osfd) = _st_epoll_data->evtlist[i].events;
            if (_ST_EPOLL_REVENTS(osfd) & (EPOLLERR | EPOLLHUP)) {
                /* Also set I/O bits on error */
                _ST_EPOLL_REVENTS(osfd) |= _ST_EPOLL_EVENTS(osfd);
            }
        }

        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            notify = 0;
            epds = pq->pds + pq->npds;

            for (pds = pq->pds; pds < epds; pds++) {
                if (_ST_EPOLL_REVENTS(pds->fd) == 0) {
                    pds->revents = 0;
                    continue;
                }
                osfd = pds->fd;
                events = pds->events;
                revents = 0;
                if ((events & POLLIN) && (_ST_EPOLL_REVENTS(osfd) & EPOLLIN))
                    revents |= POLLIN;
                if ((events & POLLOUT) && (_ST_EPOLL_REVENTS(osfd) & EPOLLOUT))
                    revents |= POLLOUT;
                if ((events & POLLPRI) && (_ST_EPOLL_REVENTS(osfd) & EPOLLPRI))
                    revents |= POLLPRI;
                if (_ST_EPOLL_REVENTS(osfd) & EPOLLERR)
                    revents |= POLLERR;
                if (_ST_EPOLL_REVENTS(osfd) & EPOLLHUP)
                    revents |= POLLHUP;

                pds->revents = revents;
                if (revents) {
                    notify = 1;
                }
            }
            if (notify) {
                ST_REMOVE_LINK(&pq->links);
                pq->on_ioq = 0;
                /*
                 * Here we will only delete/modify descriptors that
                 * didn't fire (see comments in _st_epoll_pollset_del()).
                 */
                _st_epoll_pollset_del(pq->pds, pq->npds);

                if (pq->thread->flags & _ST_FL_ON_SLEEPQ)
                    _ST_DEL_SLEEPQ(pq->thread);
                pq->thread->state = _ST_ST_RUNNABLE;
                _ST_ADD_RUNQ(pq->thread);
            }
        }

        for (i = 0; i < nfd; i++) {
            /* Delete/modify descriptors that fired */
            osfd = _st_epoll_data->evtlist[i].data.fd;
            _ST_EPOLL_REVENTS(osfd) = 0;
            events = _ST_EPOLL_EVENTS(osfd);
            op = events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL;
            ev.events = events;
            ev.data.fd = osfd;
            if (epoll_ctl(_st_epoll_data->epfd, op, osfd, &ev) == 0 &&
                op == EPOLL_CTL_DEL) {
                _st_epoll_data->evtlist_cnt--;
            }
        }
    }
}
Пример #10
0
// epoll 事件分发
ST_HIDDEN void _st_epoll_dispatch(void)
{
    st_utime_t min_timeout;
    _st_clist_t *q;
    _st_pollq_t *pq;
    struct pollfd *pds, *epds;
    struct epoll_event ev;
    int timeout, nfd, i, osfd, notify;
    int events, op;
    short revents;

    if (_ST_SLEEPQ == NULL) {
        timeout = -1;
    } else {
        // 获取最早睡眠的线程还要睡多久(用于epoll超时)
        min_timeout = (_ST_SLEEPQ->due <= _ST_LAST_CLOCK) ? 0 :
            (_ST_SLEEPQ->due - _ST_LAST_CLOCK);
        timeout = (int) (min_timeout / 1000);
    }

    // fork 出的子进程,则重新获取 epoll fd
    if (_st_epoll_data->pid != getpid()) {
        /* We probably forked, reinitialize epoll set */
        close(_st_epoll_data->epfd);
        _st_epoll_data->epfd = epoll_create(_st_epoll_data->fd_hint);
        if (_st_epoll_data->epfd < 0) {
            /* There is nothing we can do here, will retry later */
            return;
        }
        // exec 调用时关闭
        fcntl(_st_epoll_data->epfd, F_SETFD, FD_CLOEXEC);
        _st_epoll_data->pid = getpid();

        /* Put all descriptors on ioq into new epoll set */
        memset(_st_epoll_data->fd_data, 0,
               _st_epoll_data->fd_data_size * sizeof(_epoll_fd_data_t));
        _st_epoll_data->evtlist_cnt = 0;
        // 将 io 队列的 net fd 都加入事件系统
        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            _st_epoll_pollset_add(pq->pds, pq->npds);
        }
    }

    /* Check for I/O operations */
    nfd = epoll_wait(_st_epoll_data->epfd, _st_epoll_data->evtlist,
                     _st_epoll_data->evtlist_size, timeout);

    if (nfd > 0) {
        for (i = 0; i < nfd; i++) {
            osfd = _st_epoll_data->evtlist[i].data.fd;
            _ST_EPOLL_REVENTS(osfd) = _st_epoll_data->evtlist[i].events;
            if (_ST_EPOLL_REVENTS(osfd) & (EPOLLERR | EPOLLHUP)) {
                /* Also set I/O bits on error */
                _ST_EPOLL_REVENTS(osfd) |= _ST_EPOLL_EVENTS(osfd);
            }
        }

        // ####
        // 依次从 io 队列取出每个线程等待 io 的 pollfd,pq 是一个线程中加入的所有文件描述符
        for (q = _ST_IOQ.next; q != &_ST_IOQ; q = q->next) {
            pq = _ST_POLLQUEUE_PTR(q);
            notify = 0;
            epds = pq->pds + pq->npds;

            // 遍历每个线程 pollfd ,获取对应的事件,然后从 io 队列中移除;如果 pollfd 上
            // 有事件发生,则将其从 io 队列移除,没有事件的继续在 io 队列等待
            for (pds = pq->pds; pds < epds; pds++) {
                if (_ST_EPOLL_REVENTS(pds->fd) == 0) {
                    pds->revents = 0;
                    continue;
                }
                osfd = pds->fd;
                events = pds->events;
                revents = 0;
                if ((events & POLLIN) && (_ST_EPOLL_REVENTS(osfd) & EPOLLIN))
                    revents |= POLLIN;
                if ((events & POLLOUT) && (_ST_EPOLL_REVENTS(osfd) & EPOLLOUT))
                    revents |= POLLOUT;
                if ((events & POLLPRI) && (_ST_EPOLL_REVENTS(osfd) & EPOLLPRI))
                    revents |= POLLPRI;
                if (_ST_EPOLL_REVENTS(osfd) & EPOLLERR)
                    revents |= POLLERR;
                if (_ST_EPOLL_REVENTS(osfd) & EPOLLHUP)
                    revents |= POLLHUP;

                pds->revents = revents;
                // 判断是不是有事件发生
                if (revents) {
                    notify = 1;
                }
            }
            if (notify) {
                // 将有时间发生的描述符从 io 队列移除
                ST_REMOVE_LINK(&pq->links);
                pq->on_ioq = 0;
                /*
                 * Here we will only delete/modify descriptors that
                 * didn't fire (see comments in _st_epoll_pollset_del()).
                 */
                // 将当前线程已处理完事件的文件描述符从事件系统中移除
                _st_epoll_pollset_del(pq->pds, pq->npds);

                // 如果当前线程处于睡眠则从睡眠队列移除,然后将当前线程加入运行队列
                if (pq->thread->flags & _ST_FL_ON_SLEEPQ)
                    _ST_DEL_SLEEPQ(pq->thread);
                pq->thread->state = _ST_ST_RUNNABLE;
                _ST_ADD_RUNQ(pq->thread);
            }
        }

        // 修改剩下还有等待事件的描述符
        for (i = 0; i < nfd; i++) {
            /* Delete/modify descriptors that fired */
            osfd = _st_epoll_data->evtlist[i].data.fd;
            _ST_EPOLL_REVENTS(osfd) = 0;
            events = _ST_EPOLL_EVENTS(osfd);
            op = events ? EPOLL_CTL_MOD : EPOLL_CTL_DEL;
            ev.events = events;
            ev.data.fd = osfd;
            if (epoll_ctl(_st_epoll_data->epfd, op, osfd, &ev) == 0 &&
                op == EPOLL_CTL_DEL) {
                _st_epoll_data->evtlist_cnt--;
            }
        }
    }
}