/* 获得锁以后,ngx_accept_mutex_held变量被设为true, 会在调用ngx_process_epoll_event的时候把标志位设置成 NGX_POST_EVENTS,这个使epoll在接受到事件的时候, 暂时不处理,而是放到一个队列中暂时保存起来,等到释放了accept锁之后才处理这些事件,提高效率。 */ void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { // 使用了 timer_resolution 指令,epoll_wait将阻塞等待 timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } // 系统使用accpte_mutex锁来解决惊群问题,对listen->fd进行交替accept操作 if (ngx_use_accept_mutex) { /* * [analy] ngx_accept_disabled表示此时满负荷,没必要再处理新连接了,我们在nginx.conf曾经配置了每一个nginx worker进程能够处理的最大连接数, * 当达到最大数的7/8时,ngx_accept_disabled为正,说明本nginx worker进程非常繁忙,将不再去处理新连接,这也是个简单的负载均衡 */ if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { /* * 拿到accept锁后将flags=NGX_POST_EVENTS, 这个使标记epoll在接收到事件的时候, accept事件暂时不处理, * 而是放到一个队列中暂时保存起来(ngx_posted_accept_events链表中),等到释放了accept锁之后才处理这些事件,提高效率 * epollin|epollout事件都放到ngx_posted_events链表中 */ if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { // 持有锁,设置标记NGX_POST_EVENTS flags |= NGX_POST_EVENTS; } else { /* * 拿不到锁,也就不会处理监听的句柄,这个timer实际是传给epoll_wait的超时时间, * 修改为最大ngx_accept_mutex_delay意味着epoll_wait更短的超时返回,以免新连接长时间没有得到处理 */ if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); // 此时调用ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags) delta = ngx_current_msec - delta; // 计算ngx_epoll_process_events()函数处理经过的毫秒数 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); // 如果ngx_posted_accept_events链表有数据,就开始accept建立新连接 if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } /* 如果持有accept锁,将此锁释放 问题:为什么此时释放锁,而不再处理accept之前释放呢? 猜测:如果在accept之前释放,其他进程获得了accpet锁将进行fd的事件监听,此时延迟处理的accept事件将被其他进程抢走 */ if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } /* 超找超时的事件将其删除后在调用注册的handler */ if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); // 如果ngx_posted_events链表有数据,开始处理所有正常读写事件 if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
int ngx_epoll_process_events(ngx_cycle_t *cycle) { int events; size_t n; ngx_int_t instance, i; ngx_uint_t lock, accept_lock, expire; ngx_err_t err; ngx_log_t *log; ngx_msec_t timer; ngx_event_t *rev, *wev; struct timeval tv; ngx_connection_t *c; ngx_epoch_msec_t delta; for ( ;; ) { timer = ngx_event_find_timer(); #if (NGX_THREADS) if (timer == NGX_TIMER_ERROR) { return NGX_ERROR; } if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; break; } #endif if (timer != 0) { break; } ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll expired timer"); ngx_event_expire_timers((ngx_msec_t) (ngx_elapsed_msec - ngx_old_elapsed_msec)); if (ngx_posted_events && ngx_threaded) { ngx_wakeup_worker_thread(cycle); } } /* NGX_TIMER_INFINITE == INFTIM */ if (timer == NGX_TIMER_INFINITE) { expire = 0; } else { expire = 1; } ngx_old_elapsed_msec = ngx_elapsed_msec; accept_lock = 0; if (ngx_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return NGX_ERROR; } if (ngx_accept_mutex_held) { accept_lock = 1; } else if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; expire = 0; } } } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll timer: %d", timer); events = epoll_wait(ep, event_list, nevents, timer); if (events == -1) { err = ngx_errno; } else { err = 0; } ngx_gettimeofday(&tv); ngx_time_update(tv.tv_sec); delta = ngx_elapsed_msec; ngx_elapsed_msec = (ngx_epoch_msec_t) tv.tv_sec * 1000 + tv.tv_usec / 1000 - ngx_start_msec; if (timer != NGX_TIMER_INFINITE) { delta = ngx_elapsed_msec - delta; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll timer: %d, delta: %d", timer, (int) delta); } else { if (events == 0) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "epoll_wait() returned no events without timeout"); ngx_accept_mutex_unlock(); return NGX_ERROR; } } if (err) { ngx_log_error((err == NGX_EINTR) ? NGX_LOG_INFO : NGX_LOG_ALERT, cycle->log, err, "epoll_wait() failed"); ngx_accept_mutex_unlock(); return NGX_ERROR; } if (events > 0) { if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { ngx_accept_mutex_unlock(); return NGX_ERROR; } lock = 1; } else { lock =0; } log = cycle->log; for (i = 0; i < events; i++) { c = event_list[i].data.ptr; instance = (uintptr_t) c & 1; c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1); rev = c->read; if (c->fd == -1 || rev->instance != instance) { /* * the stale event from a file descriptor * that was just closed in this iteration */ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll: stale event " PTR_FMT, c); continue; } #if (NGX_DEBUG0) log = c->log ? c->log : cycle->log; #endif ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "epoll: fd:%d ev:%04X d:" PTR_FMT, c->fd, event_list[i].events, event_list[i].data); if (event_list[i].events & (EPOLLERR|EPOLLHUP)) { ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, "epoll_wait() error on fd:%d ev:%04X", c->fd, event_list[i].events); } if (event_list[i].events & ~(EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)) { ngx_log_error(NGX_LOG_ALERT, log, 0, "strange epoll_wait() events fd:%d ev:%04X", c->fd, event_list[i].events); } wev = c->write; if ((event_list[i].events & (EPOLLOUT|EPOLLERR|EPOLLHUP)) && wev->active) { if (ngx_threaded) { wev->posted_ready = 1; ngx_post_event(wev); } else { wev->ready = 1; if (!ngx_accept_mutex_held) { wev->event_handler(wev); } else { ngx_post_event(wev); } } } /* * EPOLLIN must be handled after EPOLLOUT because we use * the optimization to avoid the unnecessary mutex locking/unlocking * if the accept event is the last one. */ if ((event_list[i].events & (EPOLLIN|EPOLLERR|EPOLLHUP)) && rev->active) { if (ngx_threaded && !rev->accept) { rev->posted_ready = 1; ngx_post_event(rev); continue; } rev->ready = 1; if (!ngx_threaded && !ngx_accept_mutex_held) { rev->event_handler(rev); } else if (!rev->accept) { ngx_post_event(rev); } else if (ngx_accept_disabled <= 0) { ngx_mutex_unlock(ngx_posted_events_mutex); rev->event_handler(rev); if (ngx_accept_disabled > 0) { ngx_accept_mutex_unlock(); accept_lock = 0; } if (i + 1 == events) { lock = 0; break; } if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { if (accept_lock) { ngx_accept_mutex_unlock(); } return NGX_ERROR; } } } } if (accept_lock) { ngx_accept_mutex_unlock(); } if (lock) { ngx_mutex_unlock(ngx_posted_events_mutex); } if (expire && delta) { ngx_event_expire_timers((ngx_msec_t) delta); } if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle); } } return NGX_OK; }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } //是否使用accept互斥体。accept mutex的作用就是避免惊群,同时实现负载均衡 if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { //大于0说明该进程接收的连接过多,放弃一次争抢accept mutex的机会 ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; //这个标志是将所有产生的事件放入到一个队列中。等释放锁以后再慢慢来处理事件。 } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) //设置最长延迟多久,再次去争抢锁 { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; //epoll开始wait事件 (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //ngx_posted_accept_events暂存epoll从监听套接字接口wait到的accept事件 if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) { //所有accept事件处理完成了,如果拥有锁的话,赶紧释放了 ngx_shmtx_unlock(&ngx_accept_mutex); } //delta是上文对epoll wait事件的耗时统计,存在毫秒级的耗时就对所有事件的timer进行检查, if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); //处理普通事件(连接上获得的读写事件) if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) {//Means we need abandon a chin ngx_accept_disabled--; } else { //important,try to lock the mutex!!! if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) {//First try to lock the mutex!!! return; } if (ngx_accept_mutex_held) {//Means If we successfully get the lock!!! flags |= NGX_POST_EVENTS;//Set the flag!!! } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags);//now,we start to call the ngx_event_actions.process_event handler to process!! delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //ngx_posted_accept_events Means this kind of event should be desposit before release the ngx_accept_mutex!!!! if (ngx_posted_accept_events) {// ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) {//Now we unlock the mutex!!! ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { //desposit the common connection,not the accept handler!! ngx_event_process_posted(cycle, &ngx_posted_events); } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; /* * 执行宏操作 define ngx_process_events ngx_event_actions.process_events * 更进一步说:nginx在执行./configure的时候会生成nginx_module配置文件, * 如:当我们配置了epoll模块时,当中即会生成有ngx_epoll_module模块, * 之后初始化阶段会同时初始化epoll模块(ngx_epoll_init)其中,执行代码 * ngx_event_actions = ngx_epoll_module_ctx.actions 于是,把ngx_pro- * -cess_events指向了epoll模块的process_event方法,具体可见epoll模块的 * 相关处理 * 在epoll模块中,这一步执行的是ngx_epoll_process_event方法 */ (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
//事件循环核心 void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { //当ngx是经过配置的信息,比如100ms timer = NGX_TIMER_INFINITE; //timer无限大 flags = 0; } else { timer = ngx_event_find_timer(); //找到最小的定时时间 flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { //如果设置了负载均衡策略 if (ngx_accept_disabled > 0) { //大于0时,已经过载了 ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { //没有过载,那就去争用锁 return; } if (ngx_accept_mutex_held) { //如果当前拥有锁 flags |= NGX_POST_EVENTS; //加上一个标签,后续处理,先将accept事件放入到一个执行队列,释放锁后才继续执行 } else { if (timer == NGX_TIMER_INFINITE //如果当前没有锁,那就把事件监控机制阻塞点的超时时间限制在一个比较短的范围内,然后继续去争用锁 || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); //epoll模块中的ngx_epoll_process_events delta = ngx_current_msec - delta; //统计耗时的时间 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); if (ngx_posted_accept_events) { //对队列中accpet的事件的处理 ngx_event_process_posted(cycle, &ngx_posted_accept_events); //暂存accpet事件 } if (ngx_accept_mutex_held) { //释放锁 ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { //处理定时器超时的事件 ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { //处理普通读写事件 if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); //对于普通事件的处理 } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } // 竞争accept锁,接收客户端,很多文章都会扯到这里 // 防止惊群效应,就是多个进程accept,当有客户连接 // 时候都会唤醒所有的进程,但是只有一个进程可以 // 得到连接,这个问题会让cpu占用率瞬间变高 // 参考http://blog.csdn.net/russell_tao/article/details/7204260 // http://tengine.taobao.org/book/chapter_06.html#accept-40 //当nginx worker进程数>1时且配置文件中打开accept_mutex时,这个标志置为1 if (ngx_use_accept_mutex) { // ngx_accept_disabled>0时表示满负荷,没必要再处理新连 // 在nginx.conf配置nginx worker进程能够处理的最大连接数worker_connections, // 当达到最大数的7/8时,ngx_accept_disabled为正,说明本nginx worker进程非常繁忙, // 将不再去处理新连接 if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { //竞争accept锁,多个worker仅有一个可以得到这把锁。 //竞争锁不会阻塞进程而是立刻返回,获取成功 //的话ngx_accept_mutex_held被置为1。拿到锁意味着监听 //句柄被放到本进程的epoll中,如果没有拿到锁, //则监听句柄会被从epoll中取出。 if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } //拿到锁的话,置flag为NGX_POST_EVENTS,这意味着 //ngx_process_events函数中,任何事件都将延后处理, //会把accept事件都放到ngx_posted_accept_events链表中, //epollin|epollout事件都放到ngx_posted_events链表中 if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { //拿不到锁也就不会处理监听的句柄,这个 //timer实际是传给epoll_wait的超时时间,修改为最大 //ngx_accept_mutex_delay意味着epoll_wait更短的超时返回, //以免新连接长时间没有得到处理 if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //如果ngx_posted_accept_events链表有数据,就开始accept建立新连接 if (ngx_posted_accept_events) { //ngx_posted_accept_events指针的值会被更新 ngx_event_process_posted(cycle, &ngx_posted_accept_events); } //释放锁后再处理下面的EPOLLIN、 EPOLLOUT请求 if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } // delta不为0说明时间有走动,检查时候有计时器超时 if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); //然后再处理正常的数据读写请求。因为这些请求耗 //时久,所以在ngx_process_events里NGX_POST_EVENTS标志将事件 //都放入ngx_posted_events链表中,延迟到锁释放了再处理。 if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
// 等待事件并调用处理事件的函数。 void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { // 表明需要通过加锁解决惊群问题 if (ngx_accept_disabled > 0) { // 空闲连接只剩下不到总连接数的1/8 ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; // 如果使用epoll作为事件模型,这个函数调用ngx_epoll_process_events() (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); // 处理ngx_posted_accept_events事件链表 if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } // 释放这个进程的ngx_accept_mutex锁 if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } // delta是处理这次事件的时间,如果达到一毫秒就执行所有过期的定时器事件。 if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); // 处理ngx_posted_events事件链表 if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }