/* 获得accept锁,多个worker仅有一个可以得到这把锁。 获得锁不是阻塞过程,都是立刻返回,获取成功的话ngx_accept_mutex_held被置为1。 拿到锁,那么监听句柄会被放到本进程的epoll中了,否则,则监听句柄会被从epoll中取出。 */ ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) {//文件锁或者spinlock ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0 && !(ngx_event_flags & NGX_USE_RTSIG_EVENT)) {//注意后面有个非字 return NGX_OK; } //将监听的SOCK 的读事件加入到epoll,因为我们获得了锁,所以我们可以进行accept了,于是将将accept事件加入epoll if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1;//我拿到了,这里可以返回了 return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); if (ngx_accept_mutex_held) { //ngx_accept_mutex_held在外面是不会被改变的,因此这里表示,如果刚才我获得过一次锁了,这回我没有拿到锁,那我得删除epoll注册才行。 //这里主要避免一点,开始的时候是没有加入epoll的,如果第一次没有拿到锁,那么这里就不需要删除,如果连续几次都没有拿到锁,那也不需要重复删除 if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
// 尝试获取负载均衡锁,监听端口 // 如未获取则不监听端口 // 锁标志ngx_accept_mutex_held // 内部调用ngx_enable_accept_events/ngx_disable_accept_events ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { // 尝试锁定共享内存锁 // 非阻塞,会立即返回 if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); // 锁成功 // 之前已经持有了锁,那么就直接返回,继续监听端口 // ngx_accept_events在epoll里不使用 // rtsig在nginx 1.9.x已经删除 if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } // 之前没有持有锁,需要注册epoll事件监听端口 // 遍历监听端口列表,加入epoll连接事件,开始接受请求 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { // 如果监听失败就需要立即解锁,函数结束 ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } // 已经成功将监听事件加入epoll // 设置已经获得锁的标志 ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } // try失败,未获得锁,极小的消耗 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); // 未获得锁 // 但之前持有锁,也就是说之前在监听端口 if (ngx_accept_mutex_held) { // 遍历监听端口列表,删除epoll监听连接事件,不接受请求 if (ngx_disable_accept_events(cycle, 0) == NGX_ERROR) { return NGX_ERROR; } // 设置未获得锁的标志 ngx_accept_mutex_held = 0; } return NGX_OK; }
/* 试图处理监听端口的新连接事件 */ ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { /* 获取ngx_accept_mutex锁,成功返回1,失败返回0 */ if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); /* * 标志位ngx_accept_mutex_held为1表示当前进程已经获取了ngx_accept_mutex锁; * 满足下面条件时,表示当前进程在之前已经获得ngx_accept_mutex锁; * 则直接返回; */ if (ngx_accept_mutex_held && ngx_accept_events == 0 && !(ngx_event_flags & NGX_USE_RTSIG_EVENT)) { return NGX_OK; } /* 将所有监听连接的读事件添加到当前的epoll事件驱动模块中 */ if (ngx_enable_accept_events(cycle) == NGX_ERROR) { /* 若添加失败,则释放该锁 */ ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } /* 设置当前进程获取锁的情况 */ ngx_accept_events = 0; ngx_accept_mutex_held = 1;/* 表示当前进程已经得到ngx_accept_mutex锁 */ return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); /* * 若当前进程获取ngx_accept_mutex锁失败,并且ngx_accept_mutex_held为1, * 此时是错误情况 */ if (ngx_accept_mutex_held) { /* 将所有监听连接的读事件从事件驱动模块中移除 */ if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
//在打开accept_mutex锁的情况下,只有调用ngx_trylock_accept_mutex方法后, //当前的worker进程才会去试着监听web端口 ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { /* * 使用进程间的同步锁,试图获取accept_mutex锁. * ngx_shmtx_trylock返回1表示成功拿到锁,返回0表示获取锁失败. * 这个获取锁的过程是非阻塞的,此时一旦锁被其它的worker子进程占用 * ngx_shmtx_trylock方法会立刻返回 * */ if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); /* 如果获取到accept_mutex锁,但ngx_accept_mutex_held为1,则立刻返回. * ngx_accept_mutex_held是一个标志位,当它为1时,表示当前进程已经获取到锁了 * */ if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } //将所有监听连接的读事件添加到当前的epoll等事件驱动模块中 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { //如果将监听句柄添加到事件驱动模块失败,就必须释放ngx_accept_mutex锁 ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } /* 经过ngx_enable_accept_events方法的调用,当前进程的事件驱动模块已经开始监听所有的端口, * 这时需要把ngx_accept_mutex_held标志位置为1,方便本进程的其它模块了解它目前已经获取到了锁 * */ ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); /* 如果ngx_shmtx_trylock返回0,则表明获取ngx_accept_mutex锁失败,这时如果ngx_accept_mutex_held * 标志位还为1,即当前进程还在获取到锁的状态,这当然是不正确的,需要处理 * */ if (ngx_accept_mutex_held) { //ngx_disable_accept_events会将所有监听连接的读事件从事件驱动模块中移除 if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } //在没有获取到ngx_accept_mutex锁时,必须把标志位置为0 ngx_accept_mutex_held = 0; } return NGX_OK; }
ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0 && !(ngx_event_flags & NGX_USE_RTSIG_EVENT)) { return NGX_OK; } // 将监听fd加入到epoll事件中 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); // 如果当前没有获得锁,则将监听fd从epoll事件中移除 if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) {// ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); //如果本来已经获得锁,则直接返回Ok if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } //到达这里,说明重新获得锁成功,因此需要打开被关闭的listening句柄,调用ngx_enable_accept_events函数,将监听端口注册到当前worker进程的epoll当中去 if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; ////表示当前获取了锁 return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); //这里表示的是以前曾经获取过,但是这次却获取失败了,那么需要将监听端口从当前的worker进程的epoll当中移除,调用的是ngx_disable_accept_events函数 if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle, 0) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; }
void ngx_event_accept(ngx_event_t *ev) { socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; u_char sa[NGX_SOCKADDRLEN]; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif if (ev->timedout) { if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } ev->timedout = 0; } ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { ev->available = ecf->multi_accept; } lc = ev->data; ls = lc->listening; ev->ready = 0; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = NGX_SOCKADDRLEN; #if (NGX_HAVE_ACCEPT4) if (use_accept4) { s = accept4(lc->fd, (struct sockaddr *) sa, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, (struct sockaddr *) sa, &socklen); } #else s = accept(lc->fd, (struct sockaddr *) sa, &socklen); #endif if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; if (err == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } if (err == NGX_EMFILE || err == NGX_ENFILE) { if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle, 1) != NGX_OK) { return; } if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } ngx_accept_disabled = 1; } else { ngx_add_timer(ev, ecf->accept_mutex_delay); } } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; c = ngx_get_connection(s, ev->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } c->type = SOCK_STREAM; #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for iocp and non-blocking mode for others */ if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_IOCP_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->local_socklen = ls->socklen; c->unexpected_eof = 1; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif rev = c->read; wev = c->write; wev->ready = 1; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { rev->ready = 1; } if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { ngx_str_t addr; u_char text[NGX_SOCKADDR_STRLEN]; ngx_debug_accepted_connection(ecf, c); if (log->log_level & NGX_LOG_DEBUG_EVENT) { addr.data = text; addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, NGX_SOCKADDR_STRLEN, 1); ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%uA accept: %V fd:%d", c->number, &addr, s); } } #endif if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; ls->handler(c); if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } } while (ev->available); }
//这里的event是在ngx_event_process_init中从连接池中获取的 ngx_connection_t中的->read读事件 //accept是在ngx_event_process_init(但进程或者不配置负载均衡的时候)或者(多进程,配置负载均衡)的时候把accept事件添加到epoll中 void //该形参中的ngx_connection_t(ngx_event_t)是为accept事件连接准备的空间,当accept返回成功后,会重新获取一个ngx_connection_t(ngx_event_t)用来读写该连接 ngx_event_accept(ngx_event_t *ev) //在ngx_process_events_and_timers中执行 { //一个accept事件对应一个ev,如当前一次有4个客户端accept,应该对应4个ev事件,一次来多个accept的处理在下面的do {}while中实现 socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; //如果是文件异步i/o中的ngx_event_aio_t,则它来自ngx_event_aio_t->ngx_event_t(只有读),如果是网络事件中的event,则为ngx_connection_s中的event(包括读和写) ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; u_char sa[NGX_SOCKADDRLEN]; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif if (ev->timedout) { if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } ev->timedout = 0; } ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { ev->available = ecf->multi_accept; } lc = ev->data; ls = lc->listening; ev->ready = 0; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = NGX_SOCKADDRLEN; #if (NGX_HAVE_ACCEPT4) //ngx_close_socket可以关闭套接字 if (use_accept4) { s = accept4(lc->fd, (struct sockaddr *) sa, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, (struct sockaddr *) sa, &socklen); } #else /* 针对非阻塞I/O执行的系统调用则总是立即返回,而不管事件足否已经发生。如果事件没有眭即发生,这些系统调用就 返回—1.和出错的情况一样。此时我们必须根据errno来区分这两种情况。对accept、send和recv而言,事件未发牛时errno 通常被设置成EAGAIN(意为“再来一次”)或者EWOULDBLOCK(意为“期待阻塞”):对conncct而言,errno则被 设置成EINPROGRESS(意为“在处理中")。 */ s = accept(lc->fd, (struct sockaddr *) sa, &socklen); #endif if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; if (err == NGX_EAGAIN) { //如果event{}开启multi_accept,则在accept完该listen ip:port对应的ip和端口连接后,会通过这里返回 ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } if (err == NGX_EMFILE || err == NGX_ENFILE) { if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle, 1) != NGX_OK) { return; } if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } //当前进程连接accpet失败,则可以暂时设置为1,下次来的时候由其他进程竞争accpet锁,下下次该进程继续竞争该accept,因为在下次的时候ngx_process_events_and_timers //ngx_accept_disabled = 1; 减去1后为0,可以继续竞争 ngx_accept_disabled = 1; } else { ////如果是不需要实现负载均衡,则扫尾延时下继续在ngx_process_events_and_timers中accept ngx_add_timer(ev, ecf->accept_mutex_delay, NGX_FUNC_LINE); } } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif //设置负载均衡阀值 ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; //判断可用连接的数目和总数目的八分之一大小,如果可用的小于八分之一,为正 //在服务器端accept客户端连接成功(ngx_event_accept)后,会通过ngx_get_connection从连接池获取一个ngx_connection_t结构,也就是每个客户端连接对于一个ngx_connection_t结构, //并且为其分配一个ngx_http_connection_t结构,ngx_connection_t->data = ngx_http_connection_t,见ngx_http_init_connection //从连接池中获取一个空闲ngx_connection_t,用于客户端连接建立成功后向该连接读写数据,函数形参中的ngx_event_t对应的是为accept事件对应的 //ngx_connection_t中对应的event c = ngx_get_connection(s, ev->log); //ngx_get_connection中c->fd = s; //注意,这里的ngx_connection_t是从连接池中从新获取的,和ngx_epoll_process_events中的ngx_connection_t是两个不同的。 if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for iocp and non-blocking mode for others */ if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_IOCP_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->local_socklen = ls->socklen; c->unexpected_eof = 1; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif //注意,这里的ngx_connection_t是从连接池中从新获取的,和ngx_epoll_process_events中的ngx_connection_t是两个不同的。 rev = c->read; wev = c->write; wev->ready = 1; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { rev->ready = 1; } if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { ngx_str_t addr; struct sockaddr_in *sin; ngx_cidr_t *cidr; ngx_uint_t i; u_char text[NGX_SOCKADDR_STRLEN]; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; ngx_uint_t n; #endif cidr = ecf->debug_connection.elts; for (i = 0; i < ecf->debug_connection.nelts; i++) { if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) { goto next; } switch (cidr[i].family) { #if (NGX_HAVE_INET6) case AF_INET6: sin6 = (struct sockaddr_in6 *) c->sockaddr; for (n = 0; n < 16; n++) { if ((sin6->sin6_addr.s6_addr[n] & cidr[i].u.in6.mask.s6_addr[n]) != cidr[i].u.in6.addr.s6_addr[n]) { goto next; } } break; #endif #if (NGX_HAVE_UNIX_DOMAIN) case AF_UNIX: break; #endif default: /* AF_INET */ sin = (struct sockaddr_in *) c->sockaddr; if ((sin->sin_addr.s_addr & cidr[i].u.in.mask) != cidr[i].u.in.addr) { goto next; } break; } log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL; break; next: continue; } if (log->log_level & NGX_LOG_DEBUG_EVENT) { addr.data = text; addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, NGX_SOCKADDR_STRLEN, 1); ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%uA accept: %V fd:%d", c->number, &addr, s); } } #endif if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { //如果是epoll,不会走到这里面去 if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; ls->handler(c);//ngx_http_init_connection if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } } while (ev->available); //一次性读取所有当前的accept,直到accept返回NGX_EAGAIN,然后退出 }
//处理新连接的回调函数 void ngx_event_accept(ngx_event_t *ev) { socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; u_char sa[NGX_SOCKADDRLEN]; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif if (ev->timedout) { if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } ev->timedout = 0; } ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { ev->available = ecf->multi_accept; } lc = ev->data; ls = lc->listening; ev->ready = 0; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = NGX_SOCKADDRLEN; //首先尝试调用accept方法试图建立新连接 #if (NGX_HAVE_ACCEPT4) if (use_accept4) { s = accept4(lc->fd, (struct sockaddr *) sa, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, (struct sockaddr *) sa, &socklen); } #else s = accept(lc->fd, (struct sockaddr *) sa, &socklen); #endif if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; if (err == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } if (err == NGX_EMFILE || err == NGX_ENFILE) { if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } ngx_accept_disabled = 1; } else { ngx_add_timer(ev, ecf->accept_mutex_delay); } } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif /* * ngx_accept_disabled是负载均衡机制实现的关键阈值,这个阈值与连接池中连接的使用情况密切相关 * 初始时这个值为负数,为负数时不会触发负载均衡操作,而当为正数时触发负载均衡操作 * 当ngx_accept_disabled为正数时当前进程将不再处理新连接事件,而仅仅是ngx_accept_disabled减1 * * 如果当前活动连接数超过最大可承受连接数的7/8(ngx_accept_disabled为负),则表示发生过载 */ ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; //由连接池中获取一个ngx_connection_t连接对象 c = ngx_get_connection(s, ev->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for iocp and non-blocking mode for others */ if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_IOCP_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->local_socklen = ls->socklen; c->unexpected_eof = 1; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif rev = c->read; wev = c->write; wev->ready = 1; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { rev->ready = 1; } if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { ngx_str_t addr; struct sockaddr_in *sin; ngx_cidr_t *cidr; ngx_uint_t i; u_char text[NGX_SOCKADDR_STRLEN]; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; ngx_uint_t n; #endif cidr = ecf->debug_connection.elts; for (i = 0; i < ecf->debug_connection.nelts; i++) { if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) { goto next; } switch (cidr[i].family) { #if (NGX_HAVE_INET6) case AF_INET6: sin6 = (struct sockaddr_in6 *) c->sockaddr; for (n = 0; n < 16; n++) { if ((sin6->sin6_addr.s6_addr[n] & cidr[i].u.in6.mask.s6_addr[n]) != cidr[i].u.in6.addr.s6_addr[n]) { goto next; } } break; #endif #if (NGX_HAVE_UNIX_DOMAIN) case AF_UNIX: break; #endif default: /* AF_INET */ sin = (struct sockaddr_in *) c->sockaddr; if ((sin->sin_addr.s_addr & cidr[i].u.in.mask) != cidr[i].u.in.addr) { goto next; } break; } log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL; break; next: continue; } if (log->log_level & NGX_LOG_DEBUG_EVENT) { addr.data = text; addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, NGX_SOCKADDR_STRLEN, 1); ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%uA accept: %V fd:%d", c->number, &addr, s); } } #endif if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; ls->handler(c); if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } //如果监听事件的available标志位为1,则再次循环,对应着multi_accept配置项,告诉nginx一次性尽量多的建立新连接 } while (ev->available); }
// 仅接受tcp连接 // ngx_event_process_init里设置接受连接的回调函数为ngx_event_accept,可以接受连接 // 监听端口上收到连接请求时的回调函数,即事件handler // 从cycle的连接池里获取连接 // 关键操作 ls->handler(c);调用其他模块的业务handler void ngx_event_accept(ngx_event_t *ev) { socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; u_char sa[NGX_SOCKADDRLEN]; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif // 事件已经超时 if (ev->timedout) { // 遍历监听端口列表,重新加入epoll连接事件 if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } // 保证监听不超时 ev->timedout = 0; } ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); // rtsig在nginx 1.9.x已经删除 if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { // epoll是否允许尽可能接受多个请求 ev->available = ecf->multi_accept; } // 事件的连接对象 lc = ev->data; // 事件对应的监听端口对象 ls = lc->listening; // 此时还没有数据可读 ev->ready = 0; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = NGX_SOCKADDRLEN; // 调用accept接受连接,返回socket对象 #if (NGX_HAVE_ACCEPT4) if (use_accept4) { s = accept4(lc->fd, (struct sockaddr *) sa, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, (struct sockaddr *) sa, &socklen); } #else s = accept(lc->fd, (struct sockaddr *) sa, &socklen); #endif // 接受连接出错 if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; // EAGAIN,此时已经没有新的连接,用于multi_accept if (err == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } // 系统的文件句柄数用完了 if (err == NGX_EMFILE || err == NGX_ENFILE) { // 遍历监听端口列表,删除epoll监听连接事件,不接受请求 if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle, 1) != NGX_OK) { return; } // 解锁负载均衡,允许其他进程接受请求 if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } //未持有锁,暂时不接受请求 ngx_accept_disabled = 1; } else { // 不使用负载均衡 // 等待一下,再次尝试接受请求 ngx_add_timer(ev, ecf->accept_mutex_delay); } } return; } // 接受连接出错 #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif // 此时accept返回了一个socket描述符s // ngx_accept_disabled是总连接数的1/8-空闲连接数 // 也就是说空闲连接数小于总数的1/8,那么就暂时停止接受连接 ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; // 从全局变量ngx_cycle里获取空闲链接,即free_connections链表 c = ngx_get_connection(s, ev->log); // 如果没有空闲连接,那么关闭socket,无法处理请求 if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } // 1.10连接对象里新的字段,表示连接类型是tcp c->type = SOCK_STREAM; #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif // 创建连接使用的内存池 // stream模块设置连接的内存池是256bytes,不可配置 // http模块可以在ngx_http_core_srv_conf_t里配置 c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } // 拷贝客户端sockaddr c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for iocp and non-blocking mode for others */ // 设置socket为非阻塞 if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_IOCP_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; // 连接的收发数据函数 // #define ngx_recv ngx_io.recv // #define ngx_recv_chain ngx_io.recv_chain // ngx_posix_init.c里初始化为linux的底层接口 c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; // 设置其他的成员 c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->local_socklen = ls->socklen; c->unexpected_eof = 1; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif // 连接相关的读写事件 rev = c->read; wev = c->write; // 建立连接后是可写的 wev->ready = 1; // rtsig在nginx 1.9.x已经删除 if (ngx_event_flags & NGX_USE_IOCP_EVENT) { rev->ready = 1; } // 如果listen使用了deferred,那么建立连接时就已经有数据可读了 // 否则需要自己再加读事件,当有数据来时才能读取 if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ // 连接计数器增加 c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { ngx_str_t addr; u_char text[NGX_SOCKADDR_STRLEN]; ngx_debug_accepted_connection(ecf, c); if (log->log_level & NGX_LOG_DEBUG_EVENT) { addr.data = text; addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, NGX_SOCKADDR_STRLEN, 1); ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%uA accept: %V fd:%d", c->number, &addr, s); } } #endif // 连接的读写事件都加入epoll,即有读写都会由epoll收集事件并处理 if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; // 接受连接,收到请求的回调函数 // 在http模块里是http.c:ngx_http_init_connection // stream模块里是ngx_stream_init_connection ls->handler(c); // epoll不处理 if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } // 如果ev->available = ecf->multi_accept; // epoll尽可能接受多个请求,直至accept出错EAGAIN,即无新连接请求 // 否则epoll只接受一个请求后即退出循环 } while (ev->available); }
void ngx_event_accept(ngx_event_t *ev) { socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; u_char sa[NGX_SOCKADDRLEN]; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif if (ev->timedout) { if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } ev->timedout = 0; } ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { ev->available = 1; } else if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { ev->available = ecf->multi_accept; } lc = ev->data; ls = lc->listening; ev->ready = 0; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = NGX_SOCKADDRLEN; #if (NGX_HAVE_ACCEPT4) if (use_accept4) { s = accept4(lc->fd, (struct sockaddr *) sa, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, (struct sockaddr *) sa, &socklen); } #else s = accept(lc->fd, (struct sockaddr *) sa, &socklen); #endif if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; if (err == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } if (err == NGX_EMFILE || err == NGX_ENFILE) { if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } ngx_accept_disabled = 1; } else { ngx_add_timer(ev, ecf->accept_mutex_delay); } } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif /* accept到一个新的连接后,就重新计算ngx_accept_disabled的值, 它主要是用来做负载均衡,之前有提过。 这里,我们可以看到他的就只方式 “总连接数的八分之一 - 剩余的连接数“ 总连接指每个进程设定的最大连接数,这个数字可以再配置文件中指定。 所以每个进程到总连接数的7/8后,ngx_accept_disabled就大于零,连接超载了 */ ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; //获取一个connection c = ngx_get_connection(s, ev->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif //为新的链接创建起一个memory pool //连接关闭的时候,才释放pool c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for aio and non-blocking mode for others */ if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_AIO_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { //我们使用epoll模型,这里我们设置连接为nonblocking if (!(ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT))) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; //初始化新的连接 c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->unexpected_eof = 1; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif rev = c->read; wev = c->write; wev->ready = 1; if (ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT)) { /* rtsig, aio, iocp */ rev->ready = 1; } if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif #if (NGX_THREADS) rev->lock = &c->lock; wev->lock = &c->lock; rev->own_lock = &c->lock; wev->own_lock = &c->lock; #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { struct sockaddr_in *sin; ngx_cidr_t *cidr; ngx_uint_t i; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; ngx_uint_t n; #endif cidr = ecf->debug_connection.elts; for (i = 0; i < ecf->debug_connection.nelts; i++) { if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) { goto next; } switch (cidr[i].family) { #if (NGX_HAVE_INET6) case AF_INET6: sin6 = (struct sockaddr_in6 *) c->sockaddr; for (n = 0; n < 16; n++) { if ((sin6->sin6_addr.s6_addr[n] & cidr[i].u.in6.mask.s6_addr[n]) != cidr[i].u.in6.addr.s6_addr[n]) { goto next; } } break; #endif #if (NGX_HAVE_UNIX_DOMAIN) case AF_UNIX: break; #endif default: /* AF_INET */ sin = (struct sockaddr_in *) c->sockaddr; if ((sin->sin_addr.s_addr & cidr[i].u.in.mask) != cidr[i].u.in.addr) { goto next; } break; } log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL; break; next: continue; } } #endif ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%d accept: %V fd:%d", c->number, &c->addr_text, s); if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; /* hao ning haohaohaohao 这里listen handler很重要,它将完成新连接的最后初始化工作, 同时将accept到的新的连接放入epoll中;挂在这个handler上的函数, 就是ngx_http_init_connection 在之后http模块中在详细介绍 */ ls->handler(c); if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } } while (ev->available); }
/* 处理新连接事件 */ void ngx_event_accept(ngx_event_t *ev) { socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; u_char sa[NGX_SOCKADDRLEN]; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif if (ev->timedout) { if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } ev->timedout = 0; } /* 获取ngx_event_core_module模块的配置项参数结构 */ ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { ev->available = 1; } else if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { ev->available = ecf->multi_accept; } lc = ev->data;/* 获取事件所对应的连接对象 */ ls = lc->listening;/* 获取连接对象的监听端口数组 */ ev->ready = 0;/* 设置事件的状态为未准备就绪 */ ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = NGX_SOCKADDRLEN; /* accept 建立一个新的连接 */ #if (NGX_HAVE_ACCEPT4) if (use_accept4) { s = accept4(lc->fd, (struct sockaddr *) sa, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, (struct sockaddr *) sa, &socklen); } #else s = accept(lc->fd, (struct sockaddr *) sa, &socklen); #endif /* 连接建立错误时的相应处理 */ if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; if (err == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } if (err == NGX_EMFILE || err == NGX_ENFILE) { if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } ngx_accept_disabled = 1; } else { ngx_add_timer(ev, ecf->accept_mutex_delay); } } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif /* * ngx_accept_disabled 变量是负载均衡阈值,表示进程是否超载; * 设置负载均衡阈值为每个进程最大连接数的八分之一减去空闲连接数; * 即当每个进程accept到的活动连接数超过最大连接数的7/8时, * ngx_accept_disabled 大于0,表示该进程处于负载过重; */ ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; /* 从connections数组中获取一个connection连接来维护新的连接 */ c = ngx_get_connection(s, ev->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif /* 为新的连接创建一个连接池pool,直到关闭该连接时才释放该连接池pool */ c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for aio and non-blocking mode for others */ /* 设置套接字的属性 */ if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_AIO_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { /* 使用epoll模型时,套接字的属性为非阻塞模式 */ if (!(ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT))) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; /* 初始化新连接 */ c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->local_socklen = ls->socklen; c->unexpected_eof = 1; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif /* 获取新连接的读事件、写事件 */ rev = c->read; wev = c->write; /* 写事件准备就绪 */ wev->ready = 1; if (ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT)) { /* rtsig, aio, iocp */ rev->ready = 1; } if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif #if (NGX_THREADS) rev->lock = &c->lock; wev->lock = &c->lock; rev->own_lock = &c->lock; wev->own_lock = &c->lock; #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { struct sockaddr_in *sin; ngx_cidr_t *cidr; ngx_uint_t i; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; ngx_uint_t n; #endif cidr = ecf->debug_connection.elts; for (i = 0; i < ecf->debug_connection.nelts; i++) { if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) { goto next; } switch (cidr[i].family) { #if (NGX_HAVE_INET6) case AF_INET6: sin6 = (struct sockaddr_in6 *) c->sockaddr; for (n = 0; n < 16; n++) { if ((sin6->sin6_addr.s6_addr[n] & cidr[i].u.in6.mask.s6_addr[n]) != cidr[i].u.in6.addr.s6_addr[n]) { goto next; } } break; #endif #if (NGX_HAVE_UNIX_DOMAIN) case AF_UNIX: break; #endif default: /* AF_INET */ sin = (struct sockaddr_in *) c->sockaddr; if ((sin->sin_addr.s_addr & cidr[i].u.in.mask) != cidr[i].u.in.addr) { goto next; } break; } log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL; break; next: continue; } } #endif ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%uA accept: %V fd:%d", c->number, &c->addr_text, s); /* 将新连接对应的读事件注册到epoll事件对象中 */ if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; /* * 设置回调函数,完成新连接的最后初始化工作, * 由函数ngx_http_init_connection完成 */ ls->handler(c); /* 调整事件available标志位,该标志位为1表示Nginx一次尽可能多建立新连接 */ if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } } while (ev->available); }