void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
// 重要!! // 在ngx_process_cycle.c:ngx_single_process_cycle/ngx_worker_process_cycle里调用 // 处理socket读写事件和定时器事件 // 获取负载均衡锁,监听端口接受连接 // 调用epoll模块的ngx_epoll_process_events获取发生的事件 // 然后处理超时事件和在延后队列里的所有事件 void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; // ccf->timer_resolution // nginx更新缓存时间的精度,如果设置了会定时发送sigalarm信号更新时间 // ngx_timer_resolution = ccf->timer_resolution;默认值是0 if (ngx_timer_resolution) { // 要求epoll无限等待事件的发生,直至被sigalarm信号中断 timer = NGX_TIMER_INFINITE; flags = 0; } else { // 没有设置时间精度,默认设置 // 在定时器红黑树里找到最小的时间,二叉树查找很快 // timer >0 红黑树里即将超时的事件的时间 // timer <0 表示红黑树为空,即无超时事件 // timer==0意味着在红黑树里已经有事件超时了,必须立即处理 // timer==0,epoll就不会等待,收集完事件立即返回 timer = ngx_event_find_timer(); // NGX_UPDATE_TIME要求epoll等待这个时间,然后主动更新时间 flags = NGX_UPDATE_TIME; // nginx 1.9.x不再使用old threads代码 #if (NGX_WIN32) /* handle signals from master in case of network inactivity */ if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } // 现在已经设置了合适的timer和flag // 负载均衡锁标志量, accept_mutex on // 1.9.x,如果使用了reuseport,那么ngx_use_accept_mutex==0 // // 1.11.3开始,默认不使用负载均衡锁,提高性能,下面的代码直接跳过 if (ngx_use_accept_mutex) { // ngx_accept_disabled = ngx_cycle->connection_n / 8 // - ngx_cycle->free_connection_n; // ngx_accept_disabled是总连接数的1/8-空闲连接数 // 也就是说空闲连接数小于总数的1/8,那么就暂时停止接受连接 if (ngx_accept_disabled > 0) { // 但也不能永远不接受连接,毕竟还是有空闲连接的,所以每次要减一 ngx_accept_disabled--; } else { // 尝试获取负载均衡锁,开始监听端口 // 如未获取则不监听端口 // 内部调用ngx_enable_accept_events/ngx_disable_accept_events if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { // 如果监听失败,那么直接结束函数,不处理epoll事件 return; } // ngx_trylock_accept_mutex执行成功 // 使用变量ngx_accept_mutex_held检查是否成功获取了锁 // 确实已经获得了锁,接下来的epoll的事件需要加入延后队列处理 // 这样可以尽快释放锁给其他进程,提高运行效率 if (ngx_accept_mutex_held) { // 加上NGX_POST_EVENTS标志 // epoll获得的所有事件都会加入到ngx_posted_events // 待释放锁后再逐个处理,尽量避免过长时间持有锁 flags |= NGX_POST_EVENTS; } else { // 未获取到锁 // 要求epoll无限等待,或者等待时间超过配置的ngx_accept_mutex_delay // 也就是说nginx的epoll不会等待超过ngx_accept_mutex_delay的500毫秒 // 如果epoll有事件发生,那么此等待时间无意义,epoll_wait立即返回 if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { // epoll的超时时间最大就是ngx_accept_mutex_delay // ngx_accept_mutex_delay = ecf->accept_mutex_delay; // 如果时间精度设置的太粗,那么就使用这个时间,500毫秒 timer = ngx_accept_mutex_delay; } } } } //ngx_use_accept_mutex // 如果不使用负载均衡,或者没有抢到锁 // 那么就不会使用延后处理队列,即没有NGX_POST_EVENTS标志 // 1.11.3开始,默认不使用负载均衡锁,提高性能 // 省去了锁操作和队列操作 // 不管是否获得了负载均衡锁,都要处理事件和定时器 // 如果获得了负载均衡锁,事件就会多出一个accept事件 // 否则只有普通的读写事件和定时器事件 // 获取当前的时间,毫秒数 delta = ngx_current_msec; // #define ngx_process_events ngx_event_actions.process_events // 实际上就是ngx_epoll_process_events // // epoll模块核心功能,调用epoll_wait处理发生的事件 // 使用event_list和nevents获取内核返回的事件 // timer是无事件发生时最多等待的时间,即超时时间 // 如果ngx_event_find_timer返回timer==0,那么epoll不会等待,立即返回 // 函数可以分为两部分,一是用epoll获得事件,二是处理事件,加入延后队列 // // 如果不使用负载均衡(accept_mutex off) // 那么所有IO事件均在此函数里处理,即搜集事件并调用handler (void) ngx_process_events(cycle, timer, flags); // 在ngx_process_events里缓存的时间肯定已经更新 // 计算得到epoll一次调用消耗的毫秒数 delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); // 先处理连接事件,通常只有一个accept的连接 // in ngx_event_posted.c // 实际上调用的就是ngx_event_accept // 在http模块里是http.c:ngx_http_init_connection // // 如果不使用负载均衡(accept_mutex off)或者reuseport // 那么此处就是空操作,因为队列为空 ngx_event_process_posted(cycle, &ngx_posted_accept_events); // 释放锁,其他进程可以获取,再监听端口 // 这里只处理accept事件,工作量小,可以尽快释放锁,供其他进程使用 if (ngx_accept_mutex_held) { // 释放负载均衡锁 // 其他进程最多等待ngx_accept_mutex_delay毫秒后 // 再走ngx_trylock_accept_mutex决定端口的监听权 ngx_shmtx_unlock(&ngx_accept_mutex); } // 如果消耗了一点时间,那么看看是否定时器里有过期的 if (delta) { // 遍历定时器红黑树,找出所有过期的事件,调用handler处理超时 // 其中可能有的socket读写超时,那么就结束请求,断开连接 ngx_event_expire_timers(); } // 接下来处理延后队列里的事件,即调用事件的handler(ev),收发数据 // in ngx_event_posted.c // 这里因为要处理大量的事件,而且是简单的顺序调用,所以可能会阻塞 // nginx大部分的工作量都在这里 // 注意与accept的函数是相同的,但队列不同,即里面的事件不同 // // 如果不使用负载均衡(accept_mutex off)或者reuseport // 那么此处就是空操作,因为队列为空 ngx_event_process_posted(cycle, &ngx_posted_events); }
/* 获得锁以后,ngx_accept_mutex_held变量被设为true, 会在调用ngx_process_epoll_event的时候把标志位设置成 NGX_POST_EVENTS,这个使epoll在接受到事件的时候, 暂时不处理,而是放到一个队列中暂时保存起来,等到释放了accept锁之后才处理这些事件,提高效率。 */ void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { // 使用了 timer_resolution 指令,epoll_wait将阻塞等待 timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } // 系统使用accpte_mutex锁来解决惊群问题,对listen->fd进行交替accept操作 if (ngx_use_accept_mutex) { /* * [analy] ngx_accept_disabled表示此时满负荷,没必要再处理新连接了,我们在nginx.conf曾经配置了每一个nginx worker进程能够处理的最大连接数, * 当达到最大数的7/8时,ngx_accept_disabled为正,说明本nginx worker进程非常繁忙,将不再去处理新连接,这也是个简单的负载均衡 */ if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { /* * 拿到accept锁后将flags=NGX_POST_EVENTS, 这个使标记epoll在接收到事件的时候, accept事件暂时不处理, * 而是放到一个队列中暂时保存起来(ngx_posted_accept_events链表中),等到释放了accept锁之后才处理这些事件,提高效率 * epollin|epollout事件都放到ngx_posted_events链表中 */ if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { // 持有锁,设置标记NGX_POST_EVENTS flags |= NGX_POST_EVENTS; } else { /* * 拿不到锁,也就不会处理监听的句柄,这个timer实际是传给epoll_wait的超时时间, * 修改为最大ngx_accept_mutex_delay意味着epoll_wait更短的超时返回,以免新连接长时间没有得到处理 */ if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); // 此时调用ngx_epoll_process_events(ngx_cycle_t *cycle, ngx_msec_t timer, ngx_uint_t flags) delta = ngx_current_msec - delta; // 计算ngx_epoll_process_events()函数处理经过的毫秒数 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); // 如果ngx_posted_accept_events链表有数据,就开始accept建立新连接 if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } /* 如果持有accept锁,将此锁释放 问题:为什么此时释放锁,而不再处理accept之前释放呢? 猜测:如果在accept之前释放,其他进程获得了accpet锁将进行fd的事件监听,此时延迟处理的accept事件将被其他进程抢走 */ if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } /* 超找超时的事件将其删除后在调用注册的handler */ if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); // 如果ngx_posted_events链表有数据,开始处理所有正常读写事件 if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
int ngx_epoll_process_events(ngx_cycle_t *cycle) { int events; size_t n; ngx_int_t instance, i; ngx_uint_t lock, accept_lock, expire; ngx_err_t err; ngx_log_t *log; ngx_msec_t timer; ngx_event_t *rev, *wev; struct timeval tv; ngx_connection_t *c; ngx_epoch_msec_t delta; for ( ;; ) { timer = ngx_event_find_timer(); #if (NGX_THREADS) if (timer == NGX_TIMER_ERROR) { return NGX_ERROR; } if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; break; } #endif if (timer != 0) { break; } ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll expired timer"); ngx_event_expire_timers((ngx_msec_t) (ngx_elapsed_msec - ngx_old_elapsed_msec)); if (ngx_posted_events && ngx_threaded) { ngx_wakeup_worker_thread(cycle); } } /* NGX_TIMER_INFINITE == INFTIM */ if (timer == NGX_TIMER_INFINITE) { expire = 0; } else { expire = 1; } ngx_old_elapsed_msec = ngx_elapsed_msec; accept_lock = 0; if (ngx_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return NGX_ERROR; } if (ngx_accept_mutex_held) { accept_lock = 1; } else if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; expire = 0; } } } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll timer: %d", timer); events = epoll_wait(ep, event_list, nevents, timer); if (events == -1) { err = ngx_errno; } else { err = 0; } ngx_gettimeofday(&tv); ngx_time_update(tv.tv_sec); delta = ngx_elapsed_msec; ngx_elapsed_msec = (ngx_epoch_msec_t) tv.tv_sec * 1000 + tv.tv_usec / 1000 - ngx_start_msec; if (timer != NGX_TIMER_INFINITE) { delta = ngx_elapsed_msec - delta; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll timer: %d, delta: %d", timer, (int) delta); } else { if (events == 0) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "epoll_wait() returned no events without timeout"); ngx_accept_mutex_unlock(); return NGX_ERROR; } } if (err) { ngx_log_error((err == NGX_EINTR) ? NGX_LOG_INFO : NGX_LOG_ALERT, cycle->log, err, "epoll_wait() failed"); ngx_accept_mutex_unlock(); return NGX_ERROR; } if (events > 0) { if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { ngx_accept_mutex_unlock(); return NGX_ERROR; } lock = 1; } else { lock =0; } log = cycle->log; for (i = 0; i < events; i++) { c = event_list[i].data.ptr; instance = (uintptr_t) c & 1; c = (ngx_connection_t *) ((uintptr_t) c & (uintptr_t) ~1); rev = c->read; if (c->fd == -1 || rev->instance != instance) { /* * the stale event from a file descriptor * that was just closed in this iteration */ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll: stale event " PTR_FMT, c); continue; } #if (NGX_DEBUG0) log = c->log ? c->log : cycle->log; #endif ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "epoll: fd:%d ev:%04X d:" PTR_FMT, c->fd, event_list[i].events, event_list[i].data); if (event_list[i].events & (EPOLLERR|EPOLLHUP)) { ngx_log_debug2(NGX_LOG_DEBUG_EVENT, log, 0, "epoll_wait() error on fd:%d ev:%04X", c->fd, event_list[i].events); } if (event_list[i].events & ~(EPOLLIN|EPOLLOUT|EPOLLERR|EPOLLHUP)) { ngx_log_error(NGX_LOG_ALERT, log, 0, "strange epoll_wait() events fd:%d ev:%04X", c->fd, event_list[i].events); } wev = c->write; if ((event_list[i].events & (EPOLLOUT|EPOLLERR|EPOLLHUP)) && wev->active) { if (ngx_threaded) { wev->posted_ready = 1; ngx_post_event(wev); } else { wev->ready = 1; if (!ngx_accept_mutex_held) { wev->event_handler(wev); } else { ngx_post_event(wev); } } } /* * EPOLLIN must be handled after EPOLLOUT because we use * the optimization to avoid the unnecessary mutex locking/unlocking * if the accept event is the last one. */ if ((event_list[i].events & (EPOLLIN|EPOLLERR|EPOLLHUP)) && rev->active) { if (ngx_threaded && !rev->accept) { rev->posted_ready = 1; ngx_post_event(rev); continue; } rev->ready = 1; if (!ngx_threaded && !ngx_accept_mutex_held) { rev->event_handler(rev); } else if (!rev->accept) { ngx_post_event(rev); } else if (ngx_accept_disabled <= 0) { ngx_mutex_unlock(ngx_posted_events_mutex); rev->event_handler(rev); if (ngx_accept_disabled > 0) { ngx_accept_mutex_unlock(); accept_lock = 0; } if (i + 1 == events) { lock = 0; break; } if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { if (accept_lock) { ngx_accept_mutex_unlock(); } return NGX_ERROR; } } } } if (accept_lock) { ngx_accept_mutex_unlock(); } if (lock) { ngx_mutex_unlock(ngx_posted_events_mutex); } if (expire && delta) { ngx_event_expire_timers((ngx_msec_t) delta); } if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle); } } return NGX_OK; }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } //是否使用accept互斥体。accept mutex的作用就是避免惊群,同时实现负载均衡 if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { //大于0说明该进程接收的连接过多,放弃一次争抢accept mutex的机会 ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; //这个标志是将所有产生的事件放入到一个队列中。等释放锁以后再慢慢来处理事件。 } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) //设置最长延迟多久,再次去争抢锁 { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; //epoll开始wait事件 (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //ngx_posted_accept_events暂存epoll从监听套接字接口wait到的accept事件 if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) { //所有accept事件处理完成了,如果拥有锁的话,赶紧释放了 ngx_shmtx_unlock(&ngx_accept_mutex); } //delta是上文对epoll wait事件的耗时统计,存在毫秒级的耗时就对所有事件的timer进行检查, if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); //处理普通事件(连接上获得的读写事件) if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
int ngx_devpoll_process_events(ngx_cycle_t *cycle) { int events; ngx_int_t i; ngx_uint_t j, lock, accept_lock, expire; size_t n; ngx_msec_t timer; ngx_err_t err; ngx_cycle_t **old_cycle; ngx_event_t *rev, *wev; ngx_connection_t *c; ngx_epoch_msec_t delta; struct dvpoll dvp; struct timeval tv; for ( ;; ) { timer = ngx_event_find_timer(); if (timer != 0) { break; } ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "devpoll expired timer"); ngx_event_expire_timers((ngx_msec_t) (ngx_elapsed_msec - ngx_old_elapsed_msec)); } /* NGX_TIMER_INFINITE == INFTIM */ if (timer == NGX_TIMER_INFINITE) { expire = 0; } else { expire = 1; } ngx_old_elapsed_msec = ngx_elapsed_msec; accept_lock = 0; if (ngx_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return NGX_ERROR; } if (ngx_accept_mutex_held) { accept_lock = 1; } else if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; expire = 0; } } } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "devpoll timer: %d", timer); if (nchanges) { n = nchanges * sizeof(struct pollfd); if (write(dp, change_list, n) != (ssize_t) n) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "write(/dev/poll) failed"); ngx_accept_mutex_unlock(); return NGX_ERROR; } } dvp.dp_fds = event_list; dvp.dp_nfds = nevents; dvp.dp_timeout = timer; events = ioctl(dp, DP_POLL, &dvp); if (events == -1) { err = ngx_errno; } else { err = 0; } nchanges = 0; ngx_gettimeofday(&tv); ngx_time_update(tv.tv_sec); delta = ngx_elapsed_msec; ngx_elapsed_msec = (ngx_epoch_msec_t) tv.tv_sec * 1000 + tv.tv_usec / 1000 - ngx_start_msec; if (err) { ngx_log_error((err == NGX_EINTR) ? NGX_LOG_INFO : NGX_LOG_ALERT, cycle->log, err, "ioctl(DP_POLL) failed"); ngx_accept_mutex_unlock(); return NGX_ERROR; } if (timer != NGX_TIMER_INFINITE) { delta = ngx_elapsed_msec - delta; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "devpoll timer: %d, delta: %d", timer, (int) delta); } else { if (events == 0) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "ioctl(DP_POLL) returned no events without timeout"); ngx_accept_mutex_unlock(); return NGX_ERROR; } } if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { ngx_accept_mutex_unlock(); return NGX_ERROR; } lock = 1; for (i = 0; i < events; i++) { c = &ngx_cycle->connections[event_list[i].fd]; if (c->fd == -1) { old_cycle = ngx_old_cycles.elts; for (j = 0; j < ngx_old_cycles.nelts; j++) { if (old_cycle[j] == NULL) { continue; } c = &old_cycle[j]->connections[event_list[i].fd]; if (c->fd != -1) { break; } } } if (c->fd == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, 0, "unknown cycle"); exit(1); } ngx_log_debug3(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "devpoll: fd:%d, ev:%04X, rev:%04X", event_list[i].fd, event_list[i].events, event_list[i].revents); if (event_list[i].revents & (POLLERR|POLLHUP|POLLNVAL)) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "ioctl(DP_POLL) error fd:%d ev:%04X rev:%04X", event_list[i].fd, event_list[i].events, event_list[i].revents); } if (event_list[i].revents & ~(POLLIN|POLLOUT|POLLERR|POLLHUP|POLLNVAL)) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "strange ioctl(DP_POLL) events " "fd:%d ev:%04X rev:%04X", event_list[i].fd, event_list[i].events, event_list[i].revents); } wev = c->write; if ((event_list[i].events & (POLLOUT|POLLERR|POLLHUP)) && wev->active) { wev->ready = 1; if (!ngx_threaded && !ngx_accept_mutex_held) { wev->event_handler(wev); } else { ngx_post_event(wev); } } /* * POLLIN must be handled after POLLOUT because we use * the optimization to avoid the unnecessary mutex locking/unlocking * if the accept event is the last one. */ rev = c->read; if ((event_list[i].events & (POLLIN|POLLERR|POLLHUP)) && rev->active) { rev->ready = 1; if (!ngx_threaded && !ngx_accept_mutex_held) { rev->event_handler(rev); } else if (!rev->accept) { ngx_post_event(rev); } else if (ngx_accept_disabled <= 0) { ngx_mutex_unlock(ngx_posted_events_mutex); c->read->event_handler(rev); if (ngx_accept_disabled > 0) { ngx_accept_mutex_unlock(); accept_lock = 0; } if (i + 1 == events) { lock = 0; break; } if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { if (accept_lock) { ngx_accept_mutex_unlock(); } return NGX_ERROR; } } } } if (accept_lock) { ngx_accept_mutex_unlock(); } if (lock) { ngx_mutex_unlock(ngx_posted_events_mutex); } if (expire && delta) { ngx_event_expire_timers((ngx_msec_t) delta); } if (!ngx_threaded) { ngx_event_process_posted(cycle); } return NGX_OK; }
/* * 处理IO事件和定时事件 */ void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_OLD_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } /* ngx_use_accept_mutex 表示是否需要通过 accept 加锁来解决惊群问题。 当 nginx worker 进程 > 1 时且配置文件中打开了 * accept_mutex 时,这个表示为 1*/ if (ngx_use_accept_mutex) { /* *ngx_accept_disable表示此时满负荷,没必要处理新连接了,当链接数达到7/8时,ngx_accept_disabled为正时,说明 nginx worker进程 *非常繁忙,将不再处理新连接,这也是个简单的负载均衡 */ if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { /* *获得accept锁,多个worker仅有一个可以得到这把锁。获得锁的不是阻塞过程,都是立刻返回,获取成功的话 *ngx_accept_mutex_held 置为1。拿到锁,意味着监听句柄被放到 epoll 中,如果没拿到锁,则监听句柄从epoll中取出 */ if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { //使用锁,尝试上锁 return; } /* *拿到锁的话,置flag为 NGX_POST_EVENTS,这意味着ngx_proces_evetns 函数中,任何事件都将延后处理,会把 accept 事件都放到 *ngx_post_accept_events链表中, epollin|epollout事件都放到ngx_posted_evetns链表中 */ if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { /* *拿不到锁,也不会处理监听句柄,这个timer实际上是传给 epoll_wait的超时时间,修改尾最大 ngx_accept_mutex_delay *意味着 epoll_wait 更短的超时返回,以免新连接长时间没有得到处理 */ if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); /*处理新连接请求*/ ngx_event_process_posted(cycle, &ngx_posted_accept_events); if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } /*处理正常的数据读写请求, 因为这些请求耗时许久,延迟到锁释放了再处理*/ ngx_event_process_posted(cycle, &ngx_posted_events); }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) {//Means we need abandon a chin ngx_accept_disabled--; } else { //important,try to lock the mutex!!! if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) {//First try to lock the mutex!!! return; } if (ngx_accept_mutex_held) {//Means If we successfully get the lock!!! flags |= NGX_POST_EVENTS;//Set the flag!!! } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags);//now,we start to call the ngx_event_actions.process_event handler to process!! delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //ngx_posted_accept_events Means this kind of event should be desposit before release the ngx_accept_mutex!!!! if (ngx_posted_accept_events) {// ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) {//Now we unlock the mutex!!! ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { //desposit the common connection,not the accept handler!! ngx_event_process_posted(cycle, &ngx_posted_events); } } }
static ngx_int_t ngx_select_process_events(ngx_cycle_t *cycle) { int ready, nready; ngx_uint_t i, found, lock, expire; ngx_err_t err; ngx_msec_t timer; ngx_event_t *ev; ngx_connection_t *c; ngx_epoch_msec_t delta; struct timeval tv, *tp; #if (HAVE_SELECT_CHANGE_TIMEOUT) static ngx_epoch_msec_t deltas = 0; #endif for ( ;; ) { timer = ngx_event_find_timer(); if (timer != 0) { break; } ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select expired timer"); ngx_event_expire_timers((ngx_msec_t) (ngx_elapsed_msec - ngx_old_elapsed_msec)); } ngx_old_elapsed_msec = ngx_elapsed_msec; expire = 1; #if !(WIN32) if (ngx_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return NGX_ERROR; } if (ngx_accept_mutex_held == 0 && (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay)) { timer = ngx_accept_mutex_delay; expire = 0; } } } if (max_fd == -1) { for (i = 0; i < nevents; i++) { c = event_index[i]->data; if (max_fd < c->fd) { max_fd = c->fd; } } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "change max_fd: %d", max_fd); } #endif #if (NGX_DEBUG) if (cycle->log->log_level & NGX_LOG_DEBUG_ALL) { for (i = 0; i < nevents; i++) { ev = event_index[i]; c = ev->data; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select event: fd:%d wr:%d", c->fd, ev->write); } #if !(WIN32) ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "max_fd: %d", max_fd); #endif } #endif if (timer == NGX_TIMER_INFINITE) { tp = NULL; expire = 0; } else { tv.tv_sec = timer / 1000; tv.tv_usec = (timer % 1000) * 1000; tp = &tv; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select timer: %d", timer); work_read_fd_set = master_read_fd_set; work_write_fd_set = master_write_fd_set; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select read fd_set: %08X", *(int *) &work_read_fd_set); #if (WIN32) ready = select(0, &work_read_fd_set, &work_write_fd_set, NULL, tp); #else ready = select(max_fd + 1, &work_read_fd_set, &work_write_fd_set, NULL, tp); #endif if (ready == -1) { err = ngx_socket_errno; } else { err = 0; } #if (HAVE_SELECT_CHANGE_TIMEOUT) if (timer != NGX_TIMER_INFINITE) { delta = timer - (tv.tv_sec * 1000 + tv.tv_usec / 1000); /* * learn the real time and update the cached time * if the sum of the last deltas overcomes 1 second */ deltas += delta; if (deltas > 1000) { ngx_gettimeofday(&tv); ngx_time_update(tv.tv_sec); deltas = tv.tv_usec / 1000; ngx_elapsed_msec = (ngx_epoch_msec_t) tv.tv_sec * 1000 + tv.tv_usec / 1000 - ngx_start_msec; } else { ngx_elapsed_msec += delta; } ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select timer: %d, delta: %d", timer, (int) delta); } else { delta = 0; ngx_gettimeofday(&tv); ngx_time_update(tv.tv_sec); ngx_elapsed_msec = (ngx_epoch_msec_t) tv.tv_sec * 1000 + tv.tv_usec / 1000 - ngx_start_msec; if (ready == 0) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select() returned no events without timeout"); ngx_accept_mutex_unlock(); return NGX_ERROR; } } #else /* !(HAVE_SELECT_CHANGE_TIMEOUT) */ ngx_gettimeofday(&tv); ngx_time_update(tv.tv_sec); delta = ngx_elapsed_msec; ngx_elapsed_msec = (ngx_epoch_msec_t) tv.tv_sec * 1000 + tv.tv_usec / 1000 - ngx_start_msec; if (timer != NGX_TIMER_INFINITE) { delta = ngx_elapsed_msec - delta; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select timer: %d, delta: %d", timer, (int) delta); } else { if (ready == 0) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select() returned no events without timeout"); ngx_accept_mutex_unlock(); return NGX_ERROR; } } #endif /* HAVE_SELECT_CHANGE_TIMEOUT */ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select ready %d", ready); if (err) { #if (WIN32) ngx_log_error(NGX_LOG_ALERT, cycle->log, err, "select() failed"); #else ngx_log_error((err == NGX_EINTR) ? NGX_LOG_INFO : NGX_LOG_ALERT, cycle->log, err, "select() failed"); #endif ngx_accept_mutex_unlock(); return NGX_ERROR; } if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { ngx_accept_mutex_unlock(); return NGX_ERROR; } lock = 1; nready = 0; for (i = 0; i < nevents; i++) { ev = event_index[i]; c = ev->data; found = 0; if (ev->write) { if (FD_ISSET(c->fd, &work_write_fd_set)) { found = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select write %d", c->fd); } } else { if (FD_ISSET(c->fd, &work_read_fd_set)) { found = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "select read %d", c->fd); } } if (found) { ev->ready = 1; if (ev->oneshot) { if (ev->timer_set) { ngx_del_timer(ev); } if (ev->write) { ngx_select_del_event(ev, NGX_WRITE_EVENT, 0); } else { ngx_select_del_event(ev, NGX_READ_EVENT, 0); } } if (ev->accept) { ev->next = accept_events; accept_events = ev; } else { ngx_post_event(ev); } nready++; } } ev = accept_events; for ( ;; ) { ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept event " PTR_FMT, ev); if (ev == NULL) { break; } ngx_mutex_unlock(ngx_posted_events_mutex); ev->event_handler(ev); if (ngx_accept_disabled > 0) { lock = 0; break; } ev = ev->next; if (ev == NULL) { lock = 0; break; } if (ngx_mutex_lock(ngx_posted_events_mutex) == NGX_ERROR) { ngx_accept_mutex_unlock(); return NGX_ERROR; } } ngx_accept_mutex_unlock(); accept_events = NULL; if (lock) { ngx_mutex_unlock(ngx_posted_events_mutex); } if (ready != nready) { ngx_log_error(NGX_LOG_ALERT, cycle->log, 0, "select ready != events"); } if (expire && delta) { ngx_event_expire_timers((ngx_msec_t) delta); } if (!ngx_threaded) { ngx_event_process_posted(cycle); } return NGX_OK; }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; /* * 执行宏操作 define ngx_process_events ngx_event_actions.process_events * 更进一步说:nginx在执行./configure的时候会生成nginx_module配置文件, * 如:当我们配置了epoll模块时,当中即会生成有ngx_epoll_module模块, * 之后初始化阶段会同时初始化epoll模块(ngx_epoll_init)其中,执行代码 * ngx_event_actions = ngx_epoll_module_ctx.actions 于是,把ngx_pro- * -cess_events指向了epoll模块的process_event方法,具体可见epoll模块的 * 相关处理 * 在epoll模块中,这一步执行的是ngx_epoll_process_event方法 */ (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_WIN32) /* handle signals from master in case of network inactivity */ if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); /*×èÈûµÈ´ýʼþ*/ delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); ngx_event_process_posted(cycle, &ngx_posted_accept_events); if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } ngx_event_process_posted(cycle, &ngx_posted_events); }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; /*nginx提供参数timer_resolution,设置缓存时间更新的间隔; 配置该项后,nginx将使用中断机制,而非使用定时器红黑树中的最小时间为epoll_wait的超时时间,即此时定时器将定期被中断。 timer_resolution指令的使用将会设置epoll_wait超时时间为-1,这表示epoll_wait将永远阻塞直至读写事件发生或信号中断。 1.设置timer_resolution时,flags=0,只有当ngx_event_timer_alarm=1时epoll_wait()返回时才执行ngx_time_update(更新后会把ngx_event_timer_alarm置零) 2.没有设置timer_resolution,flags = NGX_UPDATE_TIME,timer为定时器红黑树中最小定时时间,将作为epoll_wait的超时时间(timeout) */ if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { // //如果没有设置timer_resolution定时器,则每次epoll_wait后跟新时间,否则每隔timer_resolution配置跟新一次时间,见ngx_epoll_process_events //获取离现在最近的超时定时器时间 timer = ngx_event_find_timer();//例如如果一次accept的时候失败,则在ngx_event_accept中会把ngx_event_conf_t->accept_mutex_delay加入到红黑树定时器中 flags = NGX_UPDATE_TIME; #if (NGX_WIN32) /* handle signals from master in case of network inactivity */ if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } //ngx_use_accept_mutex表示是否需要通过对accept加锁来解决惊群问题。当nginx worker进程数>1时且配置文件中打开accept_mutex时,这个标志置为1 if (ngx_use_accept_mutex) { /* ngx_accept_disabled表示此时满负荷,没必要再处理新连接了,我们在nginx.conf曾经配置了每一个nginx worker进程能够处理的最大连接数, 当达到最大数的7/8时,ngx_accept_disabled为正,说明本nginx worker进程非常繁忙,将不再去处理新连接,这也是个简单的负载均衡 在当前使用的连接到达总连接数的7/8时,就不会再处理新连接了,同时,在每次调用process_events时都会将ngx_accept_disabled减1, 直到ngx_accept_disabled降到总连接数的7/8以下时,才会调用ngx_trylock_accept_mutex试图去处理新连接事件。 */ if (ngx_accept_disabled > 0) { //为正说明可用连接用了超过八分之七,则让其他的进程在下面的else中来accept ngx_accept_disabled--; } else { /* 如果ngx_trylock_accept_mutex方法没有获取到锁,接下来调用事件驱动模块的process_events方法时只能处理已有的连接上的事件; 如果获取到了锁,调用process_events方法时就会既处理已有连接上的事件,也处理新连接的事件。 如何用锁来避免惊群? 尝试锁accept mutex,只有成功获取锁的进程,才会将listen 套接字放入epoll中。因此,这就保证了只有一个进程拥有 监听套接口,故所有进程阻塞在epoll_wait时,不会出现惊群现象。 这里的ngx_trylock_accept_mutex函数中,如果顺利的获取了锁,那么它会将监听端口注册到当前worker进程的epoll当中 获得accept锁,多个worker仅有一个可以得到这把锁。获得锁不是阻塞过程,都是立刻返回,获取成功的话ngx_accept_mutex_held被置为1。 拿到锁,意味着监听句柄被放到本进程的epoll中了,如果没有拿到锁,则监听句柄会被从epoll中取出。 */ /* 如果ngx_use_accept_mutex为0也就是未开启accept_mutex锁,则在ngx_worker_process_init->ngx_event_process_init 中把accept连接读事件统计到epoll中 否则在ngx_process_events_and_timers->ngx_process_events_and_timers->ngx_trylock_accept_mutex中把accept连接读事件统计到epoll中 */ if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { //不管是获取到锁还是没获取到锁都是返回NGX_OK return; } /* 拿到锁的话,置flag为NGX_POST_EVENTS,这意味着ngx_process_events函数中,任何事件都将延后处理,会把accept事件都放到 ngx_posted_accept_events链表中,epollin|epollout事件都放到ngx_posted_events链表中 */ if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { /* 拿不到锁,也就不会处理监听的句柄,这个timer实际是传给epoll_wait的超时时间,修改为最大ngx_accept_mutex_delay意味 着epoll_wait更短的超时返回,以免新连接长时间没有得到处理 */ if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { //如果没获取到锁,则延迟这么多ms重新获取说,继续循环,也就是技术锁被其他进程获得,本进程最多在epoll_wait中睡眠0.5s,然后返回 timer = ngx_accept_mutex_delay; //保证这么多时间超时的时候出发epoll_wait返回,从而可以更新内存时间 } } } } delta = ngx_current_msec; /* 1.如果进程获的锁,并获取到锁,则该进程在epoll事件发生后会触发返回,然后得到对应的事件handler,加入延迟队列中,然后释放锁,然 后在执行对应handler,同时更新时间,判断该进程对应的红黑树中是否有定时器超时, 2.如果没有获取到锁,则默认传给epoll_wait的超时时间是0.5s,表示过0.5s继续获取锁,0.5s超时后,会跟新当前时间,同时判断是否有过期的 定时器,有则指向对应的定时器函数 */ /* 1.ngx_event_s可以是普通的epoll读写事件(参考ngx_event_connect_peer->ngx_add_conn或者ngx_add_event),通过读写事件触发 2.也可以是普通定时器事件(参考ngx_cache_manager_process_handler->ngx_add_timer(ngx_event_add_timer)),通过ngx_process_events_and_timers中的 epoll_wait返回,可以是读写事件触发返回,也可能是因为没获取到共享锁,从而等待0.5s返回重新获取锁来跟新事件并执行超时事件来跟新事件并且判断定 时器链表中的超时事件,超时则执行从而指向event的handler,然后进一步指向对应r或者u的->write_event_handler read_event_handler 3.也可以是利用定时器expirt实现的读写事件(参考ngx_http_set_write_handler->ngx_add_timer(ngx_event_add_timer)),触发过程见2,只是在handler中不会执行write_event_handler read_event_handler */ //linux下,普通网络套接字调用ngx_epoll_process_events函数开始处理,异步文件i/o设置事件的回调方法为ngx_epoll_eventfd_handler (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; //(void) ngx_process_events(cycle, timer, flags)中epoll等待事件触发过程花费的时间 //ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "epoll_wait timer range(delta): %M", delta); //来自于客户端的accept事件立epoll_wait返回后马执行,之行为accpet事件后,立马释放ngx_accept_mutex锁,这样其他进程就可以立马获得锁accept客户端连接 ngx_event_process_posted(cycle, &ngx_posted_accept_events); //释放锁后再处理下面的EPOLLIN EPOLLOUT请求 if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); //处理红黑树队列中的超时事件handler } /* 然后再处理正常的数据读写请求。因为这些请求耗时久,所以在ngx_process_events里NGX_POST_EVENTS标志将事件都放入ngx_posted_events 链表中,延迟到锁释放了再处理。 */ ngx_event_process_posted(cycle, &ngx_posted_events); //普通读写事件放在释放ngx_accept_mutex锁后执行,提高客户端accept性能 }
//事件循环核心 void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { //当ngx是经过配置的信息,比如100ms timer = NGX_TIMER_INFINITE; //timer无限大 flags = 0; } else { timer = ngx_event_find_timer(); //找到最小的定时时间 flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { //如果设置了负载均衡策略 if (ngx_accept_disabled > 0) { //大于0时,已经过载了 ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { //没有过载,那就去争用锁 return; } if (ngx_accept_mutex_held) { //如果当前拥有锁 flags |= NGX_POST_EVENTS; //加上一个标签,后续处理,先将accept事件放入到一个执行队列,释放锁后才继续执行 } else { if (timer == NGX_TIMER_INFINITE //如果当前没有锁,那就把事件监控机制阻塞点的超时时间限制在一个比较短的范围内,然后继续去争用锁 || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); //epoll模块中的ngx_epoll_process_events delta = ngx_current_msec - delta; //统计耗时的时间 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); if (ngx_posted_accept_events) { //对队列中accpet的事件的处理 ngx_event_process_posted(cycle, &ngx_posted_accept_events); //暂存accpet事件 } if (ngx_accept_mutex_held) { //释放锁 ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { //处理定时器超时的事件 ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); if (ngx_posted_events) { //处理普通读写事件 if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); //对于普通事件的处理 } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } // 竞争accept锁,接收客户端,很多文章都会扯到这里 // 防止惊群效应,就是多个进程accept,当有客户连接 // 时候都会唤醒所有的进程,但是只有一个进程可以 // 得到连接,这个问题会让cpu占用率瞬间变高 // 参考http://blog.csdn.net/russell_tao/article/details/7204260 // http://tengine.taobao.org/book/chapter_06.html#accept-40 //当nginx worker进程数>1时且配置文件中打开accept_mutex时,这个标志置为1 if (ngx_use_accept_mutex) { // ngx_accept_disabled>0时表示满负荷,没必要再处理新连 // 在nginx.conf配置nginx worker进程能够处理的最大连接数worker_connections, // 当达到最大数的7/8时,ngx_accept_disabled为正,说明本nginx worker进程非常繁忙, // 将不再去处理新连接 if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { //竞争accept锁,多个worker仅有一个可以得到这把锁。 //竞争锁不会阻塞进程而是立刻返回,获取成功 //的话ngx_accept_mutex_held被置为1。拿到锁意味着监听 //句柄被放到本进程的epoll中,如果没有拿到锁, //则监听句柄会被从epoll中取出。 if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } //拿到锁的话,置flag为NGX_POST_EVENTS,这意味着 //ngx_process_events函数中,任何事件都将延后处理, //会把accept事件都放到ngx_posted_accept_events链表中, //epollin|epollout事件都放到ngx_posted_events链表中 if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { //拿不到锁也就不会处理监听的句柄,这个 //timer实际是传给epoll_wait的超时时间,修改为最大 //ngx_accept_mutex_delay意味着epoll_wait更短的超时返回, //以免新连接长时间没有得到处理 if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //如果ngx_posted_accept_events链表有数据,就开始accept建立新连接 if (ngx_posted_accept_events) { //ngx_posted_accept_events指针的值会被更新 ngx_event_process_posted(cycle, &ngx_posted_accept_events); } //释放锁后再处理下面的EPOLLIN、 EPOLLOUT请求 if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } // delta不为0说明时间有走动,检查时候有计时器超时 if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); //然后再处理正常的数据读写请求。因为这些请求耗 //时久,所以在ngx_process_events里NGX_POST_EVENTS标志将事件 //都放入ngx_posted_events链表中,延迟到锁释放了再处理。 if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
// 等待事件并调用处理事件的函数。 void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_THREADS) if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } if (ngx_use_accept_mutex) { // 表明需要通过加锁解决惊群问题 if (ngx_accept_disabled > 0) { // 空闲连接只剩下不到总连接数的1/8 ngx_accept_disabled--; } else { if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; // 如果使用epoll作为事件模型,这个函数调用ngx_epoll_process_events() (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); // 处理ngx_posted_accept_events事件链表 if (ngx_posted_accept_events) { ngx_event_process_posted(cycle, &ngx_posted_accept_events); } // 释放这个进程的ngx_accept_mutex锁 if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } // delta是处理这次事件的时间,如果达到一毫秒就执行所有过期的定时器事件。 if (delta) { ngx_event_expire_timers(); } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "posted events %p", ngx_posted_events); // 处理ngx_posted_events事件链表 if (ngx_posted_events) { if (ngx_threaded) { ngx_wakeup_worker_thread(cycle); } else { ngx_event_process_posted(cycle, &ngx_posted_events); } } }
void ngx_process_events_and_timers(ngx_cycle_t *cycle) { ngx_uint_t flags; ngx_msec_t timer, delta; if (ngx_timer_resolution) { timer = NGX_TIMER_INFINITE; flags = 0; } else { timer = ngx_event_find_timer(); flags = NGX_UPDATE_TIME; #if (NGX_WIN32) /* handle signals from master in case of network inactivity */ if (timer == NGX_TIMER_INFINITE || timer > 500) { timer = 500; } #endif } //是否使用锁机制 if (ngx_use_accept_mutex) { if (ngx_accept_disabled > 0) { ngx_accept_disabled--; } else { //获取锁(event/ngx_event_accept.c) if (ngx_trylock_accept_mutex(cycle) == NGX_ERROR) { return; } if (ngx_accept_mutex_held) { flags |= NGX_POST_EVENTS; } else { if (timer == NGX_TIMER_INFINITE || timer > ngx_accept_mutex_delay) { timer = ngx_accept_mutex_delay; } } } } delta = ngx_current_msec; //#define ngx_process_events ngx_event_actions.process_events (void) ngx_process_events(cycle, timer, flags); delta = ngx_current_msec - delta; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "timer delta: %M", delta); //从ngx_posted_accept_events队列获取事件,执行回调函数 //ngx_posted_accept_events:accept请求队列(use accept lock情况下) //event/ngx_event_posted.c ngx_event_process_posted(cycle, &ngx_posted_accept_events); //释放accept锁 if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); } if (delta) { ngx_event_expire_timers(); } //ngx_posted_events队列(写事件) ngx_event_process_posted(cycle, &ngx_posted_events); }