ngx_int_t ngx_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) { if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* kqueue, epoll */ if (!rev->active && !rev->ready) { if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; } else if (ngx_event_flags & NGX_USE_LEVEL_EVENT) { /* select, poll, /dev/poll */ if (!rev->active && !rev->ready) { if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (rev->active && (rev->ready || (flags & NGX_CLOSE_EVENT))) { if (ngx_del_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT | flags) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } else if (ngx_event_flags & NGX_USE_EVENTPORT_EVENT) { /* event ports */ if (!rev->active && !rev->ready) { if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (rev->oneshot && !rev->ready) { if (ngx_del_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } /* aio, iocp, rtsig */ return NGX_OK; }
void ngx_postgres_upstream_free_connection(ngx_log_t *log, ngx_connection_t *c, PGconn *pgconn, ngx_postgres_upstream_srv_conf_t *pgscf) { ngx_event_t *rev, *wev; dd("entering"); PQfinish(pgconn); if (c) { rev = c->read; wev = c->write; if (rev->timer_set) { ngx_del_timer(rev); } if (wev->timer_set) { ngx_del_timer(wev); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (rev->active || rev->disabled) { ngx_del_event(rev, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (wev->active || wev->disabled) { ngx_del_event(wev, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (rev->prev) { ngx_delete_posted_event(rev); } if (wev->prev) { ngx_delete_posted_event(wev); } rev->closed = 1; wev->closed = 1; #if defined(nginx_version) && (nginx_version >= 1001004) if (c->pool) { ngx_destroy_pool(c->pool); } #endif ngx_free_connection(c); } /* free spot in keepalive connection pool */ pgscf->active_conns--; dd("returning"); }
void ngx_zeromq_close(ngx_connection_t *c) { void *zmq; if (c->fd == -1) { return; } zmq = c->data; ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, "zmq_close: zmq:%p fd:%d #%d", zmq, c->fd, c->number); if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); ngx_free_connection(c); c->fd = (ngx_socket_t) -1; if (zmq_close(zmq) == -1) { ngx_zeromq_log_error(ngx_cycle->log, "zmq_close()"); } }
void ngx_close_listening_sockets(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { return; } ngx_accept_mutex_held = 0; ngx_use_accept_mutex = 0; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c->read->active) { if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else if (ngx_event_flags & NGX_USE_EPOLL_EVENT) { /* * it seems that Linux-2.6.x OpenVZ sends events * for closed shared listening sockets unless * the events was explicity deleted */ ngx_del_event(c->read, NGX_READ_EVENT, 0); } else { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } } ngx_free_connection(c); c->fd = (ngx_socket_t) -1; ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0, "close listening %V #%d ", &ls[i].addr_text, ls[i].fd); if (ngx_close_socket(ls[i].fd) == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_close_socket_n " %V failed", &ls[i].addr_text); } } }
static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle) {//删除监听SOCK的读事件,一般在没有获得锁的时候,得先删除这个事件才行,不然越位了 ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (!c->read->active) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_del_conn(c, NGX_DISABLE_EVENT) == NGX_ERROR) { return NGX_ERROR; } } else { if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT)//删除读事件,不过会放一个写事件。这里好像很变扭 == NGX_ERROR) { return NGX_ERROR; } } } return NGX_OK; }
static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (!c->read->active) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_del_conn(c, NGX_DISABLE_EVENT) == NGX_ERROR) { return NGX_ERROR; } } else { if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT) == NGX_ERROR) { return NGX_ERROR; } } } return NGX_OK; }
static ngx_int_t ngx_rtmp_exec_kill(ngx_rtmp_session_t *s, ngx_rtmp_exec_t *e, ngx_int_t term) { ngx_log_debug1(NGX_LOG_DEBUG_RTMP, s->connection->log, 0, "exec: terminating child %ui", (ngx_int_t)e->pid); if (e->respawn_evt.timer_set) { ngx_del_timer(&e->respawn_evt); } ngx_del_event(&e->read_evt, NGX_READ_EVENT, 0); e->active = 0; close(e->pipefd); if (!term) { return NGX_OK; } if (kill(e->pid, SIGKILL) == -1) { ngx_log_error(NGX_LOG_INFO, s->connection->log, ngx_errno, "exec: kill failed pid=%i", (ngx_int_t)e->pid); } else { ngx_log_debug1(NGX_LOG_DEBUG_RTMP, s->connection->log, 0, "exec: killed pid=%i", (ngx_int_t)e->pid); } return NGX_OK; }
ngx_socket_t ngx_tcp_reuse_get_active_conn(ngx_log_t *log) { ngx_socket_t fd = -1; ngx_err_t err; u_char test[2]; while (!ngx_queue_empty(&active_conns)) { ngx_queue_t *head_conn = ngx_queue_head(&active_conns); ngx_tcp_reuse_conn_t *active_conn = ngx_queue_data(head_conn, ngx_tcp_reuse_conn_t, q_elt); fd = active_conn->fd; if (active_conn->read.timer_set) { ngx_del_timer(&active_conn->read); } if (active_conn->write.timer_set) { ngx_del_timer(&active_conn->write); } if (active_conn->read.active) { ngx_del_event(&active_conn->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (active_conn->write.active) { ngx_del_event(&active_conn->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } ngx_queue_remove(&active_conn->q_elt); ngx_memzero(active_conn, sizeof(ngx_tcp_reuse_conn_t)); ngx_queue_insert_tail(&empty_conns, &active_conn->q_elt); if (recv(fd, test, 0, 0) == 0) { ngx_log_debug(NGX_LOG_DEBUG_HTTP, log, 0, "0 : errno:%d, %s", ngx_socket_errno, strerror(errno)); close(fd); fd = -1; } else { ngx_log_debug(NGX_LOG_DEBUG_HTTP, log, 0, "!0 : errno:%d, %s", ngx_socket_errno, strerror(errno)); err = ngx_socket_errno; if (err == 11) break; else { close(fd); fd = -1; } } ngx_log_debug(NGX_LOG_DEBUG_HTTP, log, 0, "fd:%d", fd); } return fd; }
void redis_nginx_del_write(void *privdata) { ngx_connection_t *connection = (ngx_connection_t *) privdata; if (connection->write->active && redis_nginx_fd_is_valid(connection->fd)) { if (ngx_del_event(connection->write, NGX_WRITE_EVENT, 0) == NGX_ERROR) { ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0, "redis_nginx_adapter: could not delete write event to redis"); } } }
void ngx_close_listening_sockets(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_socket_t fd; ngx_listening_t *ls; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { return; } ngx_accept_mutex_held = 0; ngx_accept_mutex = NULL; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { fd = ls[i].fd; #if (WIN32) /* * Winsock assignes a socket number divisible by 4 * so to find a connection we divide a socket number by 4. */ fd /= 4; #endif if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (cycle->connections[fd].read->active) { ngx_del_conn(&cycle->connections[fd], NGX_CLOSE_EVENT); } } else { if (cycle->read_events[fd].active) { ngx_del_event(&cycle->read_events[fd], NGX_READ_EVENT, NGX_CLOSE_EVENT); } } if (ngx_close_socket(ls[i].fd) == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_close_socket_n " %s failed", ls[i].addr_text.data); } cycle->connections[fd].fd = (ngx_socket_t) -1; } }
static void destroy_dummy_conn(ngx_connection_t *c) { if (c == NULL) return ; if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { ngx_del_conn(c, 0); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, 0); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, 0); } } if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } close(c->fd); c->fd = -1; c->read->closed = 1; c->write->closed = 1; free(c->read); free(c->write); free(c); }
static void ngx_rtmp_netcall_send(ngx_event_t *wev) { ngx_rtmp_netcall_session_t *cs; ngx_connection_t *cc; ngx_chain_t *cl; cc = wev->data; cs = cc->data; if (cc->destroyed) { return; } if (wev->timedout) { ngx_log_error(NGX_LOG_INFO, cc->log, NGX_ETIMEDOUT, "netcall: client send timed out"); cc->timedout = 1; ngx_rtmp_netcall_close(cc); return; } if (wev->timer_set) { ngx_del_timer(wev); } cl = cc->send_chain(cc, cs->out, 0); if (cl == NGX_CHAIN_ERROR) { ngx_rtmp_netcall_close(cc); return; } cs->out = cl; /* more data to send? */ if (cl) { ngx_add_timer(wev, cs->timeout); if (ngx_handle_write_event(wev, 0) != NGX_OK) { ngx_rtmp_netcall_close(cc); } return; } /* we've sent everything we had. * now receive reply */ ngx_del_event(wev, NGX_WRITE_EVENT, 0); ngx_rtmp_netcall_recv(cc->read); }
static void ngx_open_file_del_event(ngx_cached_open_file_t *file) { if (file->event == NULL) { return; } (void) ngx_del_event(file->event, NGX_VNODE_EVENT, file->count ? NGX_FLUSH_EVENT : NGX_CLOSE_EVENT); ngx_free(file->event->data); ngx_free(file->event); file->event = NULL; file->use_event = 0; }
static void ngx_rpc_notify_write_handler(ngx_event_t *ev) { ngx_connection_t *notify_con = ev->data; ngx_rpc_notify_t *notify = (ngx_rpc_notify_t *)(notify_con->sockaddr); ngx_log_debug(NGX_LOG_DEBUG_ALL, ev->log, 0, "ngx_rpc_notify_write_handler notify:%p eventfd:%d", notify, notify->event_fd); ngx_del_event(notify_con->write, NGX_WRITE_EVENT, 0); notify->write_hanlder(notify->ctx); }
void ngx_tcp2http_block_reading(ngx_http_request_t *r) { ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, "http reading blocked"); /* aio does not call this handler */ if ((ngx_event_flags & NGX_USE_LEVEL_EVENT) && r->connection->read->active) { if (ngx_del_event(r->connection->read, NGX_READ_EVENT, 0) != NGX_OK) { ngx_tcp2http_close_request(r, 0); } } }
// 遍历监听端口列表,删除epoll监听连接事件,不接受请求 static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle, ngx_uint_t all) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; // 遍历监听端口列表 ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; // 如果连接没有监听(读事件不激活)那么就跳过 // rtsig在nginx 1.9.x已经删除 if (c == NULL || !c->read->active) { continue; } #if (NGX_HAVE_REUSEPORT) /* * do not disable accept on worker's own sockets * when disabling accept events due to accept mutex */ if (ls[i].reuseport && !all) { continue; } #endif // 删除读事件,即不接受连接事件 // NGX_DISABLE_EVENT暂时无用 if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; }
static ngx_int_t ngx_rtmp_exec_kill(ngx_rtmp_exec_t *e, ngx_int_t kill_signal) { if (e->respawn_evt.timer_set) { ngx_del_timer(&e->respawn_evt); } if (e->read_evt.active) { ngx_del_event(&e->read_evt, NGX_READ_EVENT, 0); } if (e->active == 0) { return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_RTMP, e->log, 0, "exec: terminating child %ui", (ngx_int_t) e->pid); e->active = 0; close(e->pipefd); if (e->save_pid) { *e->save_pid = NGX_INVALID_PID; } if (kill_signal == 0) { return NGX_OK; } if (kill(e->pid, kill_signal) == -1) { ngx_log_error(NGX_LOG_INFO, e->log, ngx_errno, "exec: kill failed pid=%i", (ngx_int_t) e->pid); } else { ngx_log_debug1(NGX_LOG_DEBUG_RTMP, e->log, 0, "exec: killed pid=%i", (ngx_int_t) e->pid); } return NGX_OK; }
static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle, ngx_uint_t all) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c == NULL || !c->read->active) { continue; } #if (NGX_HAVE_REUSEPORT) /* * do not disable accept on worker's own sockets * when disabling accept events due to accept mutex */ if (ls[i].reuseport && !all) { continue; } #endif if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; }
void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == (ngx_socket_t) -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (!c->shared) { if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } } if (c->read->posted) { ngx_delete_posted_event(c->read); } if (c->write->posted) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); log_error = c->log_error; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (c->shared) { return; } if (ngx_close_socket(fd) == -1) { err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } ngx_log_error(level, c->log, err, ngx_close_socket_n " %d failed", fd); } }
static void ngx_http_zm_sso_check_broken_connection(ngx_http_request_t * r, ngx_event_t *ev) { ngx_connection_t *c; int n; char buf[1]; ngx_err_t err; ngx_int_t event; c = ev->data; r = c->data; if (c->error) { if ((ngx_event_flags & NGX_USE_LEVEL_EVENT) && ev->active) { event = ev->write ? NGX_WRITE_EVENT : NGX_READ_EVENT; if (ngx_del_event(ev, event, 0) != NGX_OK) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } } ngx_http_finalize_request(r, NGX_HTTP_CLIENT_CLOSED_REQUEST); return; } n = recv(c->fd, buf, 1, MSG_PEEK); err = ngx_socket_errno; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ev->log, err, "zm sso recv(): %d", n); if (ev->write && (n >= 0 || err == NGX_EAGAIN)) { return; } if ((ngx_event_flags & NGX_USE_LEVEL_EVENT) && ev->active) { event = ev->write ? NGX_WRITE_EVENT : NGX_READ_EVENT; if (ngx_del_event(ev, event, 0) != NGX_OK) { ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR); return; } } if (n > 0) { return; } if (n == -1) { if (err == NGX_EAGAIN) { return; } ev->error = 1; } else { /* n == 0 */ err = 0; } ev->eof = 1; c->error = 1; ngx_log_error(NGX_LOG_INFO, ev->log, err, "client closed prematurely connection during sso authentication"); ngx_http_finalize_request(r, NGX_HTTP_CLIENT_CLOSED_REQUEST); }
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ngx_uint_t m, i; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *next, *old; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; ngx_event_module_t *module; ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module); if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) { ngx_use_accept_mutex = 1; ngx_accept_mutex_held = 0; ngx_accept_mutex_delay = ecf->accept_mutex_delay; } else { ngx_use_accept_mutex = 0; } #if (NGX_WIN32) /* * disable accept mutex on win32 as it may cause deadlock if * grabbed by a process which can't accept connections */ ngx_use_accept_mutex = 0; #endif #if (NGX_THREADS) ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0); if (ngx_posted_events_mutex == NULL) { return NGX_ERROR; } #endif if (ngx_event_timer_init(cycle->log) == NGX_ERROR) { return NGX_ERROR; } for (m = 0; ngx_modules[m]; m++) { if (ngx_modules[m]->type != NGX_EVENT_MODULE) { continue; } if (ngx_modules[m]->ctx_index != ecf->use) { continue; } module = ngx_modules[m]->ctx; if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } #if !(NGX_WIN32) if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { struct sigaction sa; struct itimerval itv; ngx_memzero(&sa, sizeof(struct sigaction)); sa.sa_handler = ngx_timer_signal_handler; sigemptyset(&sa.sa_mask); if (sigaction(SIGALRM, &sa, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sigaction(SIGALRM) failed"); return NGX_ERROR; } itv.it_interval.tv_sec = ngx_timer_resolution / 1000; itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000; itv.it_value.tv_sec = ngx_timer_resolution / 1000; itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000; if (setitimer(ITIMER_REAL, &itv, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "setitimer() failed"); } } if (ngx_event_flags & NGX_USE_FD_EVENT) { struct rlimit rlmt; if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed"); return NGX_ERROR; } cycle->files_n = (ngx_uint_t) rlmt.rlim_cur; cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n, cycle->log); if (cycle->files == NULL) { return NGX_ERROR; } } #endif cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; #if (NGX_THREADS) rev[i].lock = &c[i].lock; rev[i].own_lock = &c[i].lock; #endif } cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; #if (NGX_THREADS) wev[i].lock = &c[i].lock; wev[i].own_lock = &c[i].lock; #endif } i = cycle->connection_n; next = NULL; do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; c[i].fd = (ngx_socket_t) -1; next = &c[i]; #if (NGX_THREADS) c[i].lock = 0; #endif } while (i); cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ngx_get_connection(ls[i].fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->log = &ls[i].log; c->listening = &ls[i]; ls[i].connection = c; rev = c->read; rev->log = c->log; rev->accept = 1; #if (NGX_HAVE_DEFERRED_ACCEPT) rev->deferred_accept = ls[i].deferred_accept; #endif if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ls[i].previous) { /* * delete the old accept events that were bound to * the old cycle read events array */ old = ls[i].previous->connection; if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR) { return NGX_ERROR; } old->fd = (ngx_socket_t) -1; } } #if (NGX_WIN32) if (ngx_event_flags & NGX_USE_IOCP_EVENT) { ngx_iocp_conf_t *iocpcf; rev->handler = ngx_event_acceptex; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) { return NGX_ERROR; } ls[i].log.handler = ngx_acceptex_log_error; iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module); if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex) == NGX_ERROR) { return NGX_ERROR; } } else { rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #else rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } else { if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #endif } return NGX_OK; }
ngx_int_t ngx_handle_write_event(ngx_event_t *wev, size_t lowat) { ngx_connection_t *c; if (lowat) { c = wev->data; if (ngx_send_lowat(c, lowat) == NGX_ERROR) { return NGX_ERROR; } } if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* kqueue, epoll */ if (!wev->active && !wev->ready) { if (ngx_add_event(wev, NGX_WRITE_EVENT, NGX_CLEAR_EVENT | (lowat ? NGX_LOWAT_EVENT : 0)) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; } else if (ngx_event_flags & NGX_USE_LEVEL_EVENT) { /* select, poll, /dev/poll */ if (!wev->active && !wev->ready) { if (ngx_add_event(wev, NGX_WRITE_EVENT, NGX_LEVEL_EVENT) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (wev->active && wev->ready) { if (ngx_del_event(wev, NGX_WRITE_EVENT, NGX_LEVEL_EVENT) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } else if (ngx_event_flags & NGX_USE_EVENTPORT_EVENT) { /* event ports */ if (!wev->active && !wev->ready) { if (ngx_add_event(wev, NGX_WRITE_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (wev->oneshot && wev->ready) { if (ngx_del_event(wev, NGX_WRITE_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } /* aio, iocp, rtsig */ return NGX_OK; }
/* * [analy] 将写事件添加到事件处理队列中 * 参数 lowat: 如果大于0,将设置发送缓冲区的低潮阀值 */ ngx_int_t ngx_handle_write_event(ngx_event_t *wev, size_t lowat) { ngx_connection_t *c; if (lowat) { // 设置发送缓冲区的低潮阀值 c = wev->data; if (ngx_send_lowat(c, lowat) == NGX_ERROR) { return NGX_ERROR; } } if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { // 使用edge triggered 模式 /* kqueue, epoll */ // 此事件未加入epoll事件队列中时,将添加写事件。 if (!wev->active && !wev->ready) { if (ngx_add_event(wev, NGX_WRITE_EVENT, NGX_CLEAR_EVENT | (lowat ? NGX_LOWAT_EVENT : 0)) // NGX_CLEAR_EVENT = EPOLLET == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; } else if (ngx_event_flags & NGX_USE_LEVEL_EVENT) { // 使用level triggerred模式 /* select, poll, /dev/poll */ if (!wev->active && !wev->ready) { if (ngx_add_event(wev, NGX_WRITE_EVENT, NGX_LEVEL_EVENT) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (wev->active && wev->ready) { if (ngx_del_event(wev, NGX_WRITE_EVENT, NGX_LEVEL_EVENT) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } else if (ngx_event_flags & NGX_USE_EVENTPORT_EVENT) { /* event ports */ if (!wev->active && !wev->ready) { if (ngx_add_event(wev, NGX_WRITE_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (wev->oneshot && wev->ready) { if (ngx_del_event(wev, NGX_WRITE_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } /* aio, iocp, rtsig */ return NGX_OK; }
/* * [analy] 加入读事件到epoll事件处理队列中(根据触发模式有不通的处理方式) */ ngx_int_t ngx_handle_read_event(ngx_event_t *rev, ngx_uint_t flags) { if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* [analy] 使用edge triggered */ /* kqueue, epoll */ if (!rev->active && !rev->ready) { // 当此事件处于非活跃状态时(即没有被添加到epoll事件监控队列中时), rev->ready是此连接没有读事件到来 if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) /* [analy] 调用ngx_epoll_add_event(), 当使用epoll时,宏NGX_CLEAR_EVENT被定义成 EPOLLET */ == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; } else if (ngx_event_flags & NGX_USE_LEVEL_EVENT) { /* [analy] 使用level triggered */ /* select, poll, /dev/poll */ if (!rev->active && !rev->ready) { if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (rev->active && (rev->ready || (flags & NGX_CLOSE_EVENT))) { if (ngx_del_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT | flags) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } else if (ngx_event_flags & NGX_USE_EVENTPORT_EVENT) { /* event ports */ if (!rev->active && !rev->ready) { if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } if (rev->oneshot && !rev->ready) { if (ngx_del_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } return NGX_OK; } } /* aio, iocp, rtsig */ return NGX_OK; }
void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } #if (NGX_THREADS) /* * we have to clean the connection information before the closing * because another thread may reopen the same file descriptor * before we clean the connection */ ngx_mutex_lock(ngx_posted_events_mutex); if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; if (c->single_connection) { ngx_unlock(&c->lock); c->read->locked = 0; c->write->locked = 0; } ngx_mutex_unlock(ngx_posted_events_mutex); #else if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; #endif ngx_reusable_connection(c, 0); log_error = c->log_error; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (ngx_close_socket(fd) == -1) { err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } /* we use ngx_cycle->log because c->log was in c->pool */ ngx_log_error(level, ngx_cycle->log, err, ngx_close_socket_n " %d failed", fd); } }
//关闭cycle中listening动态数组已经打开的句柄 void ngx_close_listening_sockets(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { return; } ngx_accept_mutex_held = 0; ngx_use_accept_mutex = 0; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c) { if (c->read->active) { if (ngx_event_flags & NGX_USE_EPOLL_EVENT) { /* * it seems that Linux-2.6.x OpenVZ sends events * for closed shared listening sockets unless * the events was explicitly deleted */ ngx_del_event(c->read, NGX_READ_EVENT, 0); } else { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } } ngx_free_connection(c); c->fd = (ngx_socket_t) -1; } ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0, "close listening %V #%d ", &ls[i].addr_text, ls[i].fd); if (ngx_close_socket(ls[i].fd) == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_close_socket_n " %V failed", &ls[i].addr_text); } #if (NGX_HAVE_UNIX_DOMAIN) if (ls[i].sockaddr->sa_family == AF_UNIX && ngx_process <= NGX_PROCESS_MASTER && ngx_new_binary == 0) { u_char *name = ls[i].addr_text.data + sizeof("unix:") - 1; if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } } #endif ls[i].fd = (ngx_socket_t) -1; } cycle->listening.nelts = 0; }
// fork之后,worker进程初始化时调用,即每个worker里都会执行 // 初始化两个延后处理的事件队列,初始化定时器红黑树 // 发送定时信号,更新时间用 // 初始化cycle里的连接和事件数组 // 设置接受连接的回调函数为ngx_event_accept,可以接受连接 static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ngx_uint_t m, i; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *next, *old; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; ngx_event_module_t *module; // core模块的配置结构体 ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); // event_core模块的配置结构体 ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module); // 使用master/worker多进程,使用负载均衡 if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) { // 设置全局变量 // 使用负载均衡,刚开始未持有锁,设置抢锁的等待时间 ngx_use_accept_mutex = 1; ngx_accept_mutex_held = 0; ngx_accept_mutex_delay = ecf->accept_mutex_delay; } else { // 单进程、未明确指定负载均衡,就不使用负载均衡 ngx_use_accept_mutex = 0; } #if (NGX_WIN32) /* * disable accept mutex on win32 as it may cause deadlock if * grabbed by a process which can't accept connections */ ngx_use_accept_mutex = 0; #endif // 初始化两个延后处理的事件队列 ngx_queue_init(&ngx_posted_accept_events); ngx_queue_init(&ngx_posted_events); // 初始化定时器红黑树 if (ngx_event_timer_init(cycle->log) == NGX_ERROR) { return NGX_ERROR; } // 遍历事件模块,但只执行实际使用的事件模块对应初始化函数 for (m = 0; cycle->modules[m]; m++) { if (cycle->modules[m]->type != NGX_EVENT_MODULE) { continue; } // 找到use指令使用的事件模型,或者是默认事件模型 if (cycle->modules[m]->ctx_index != ecf->use) { continue; } module = cycle->modules[m]->ctx; // 调用事件模块的事件初始化函数 // // 调用epoll_create初始化epoll机制 // 参数size=cycle->connection_n / 2,但并无实际意义 // 设置全局变量,操作系统提供的底层数据收发接口 // 初始化全局的事件模块访问接口,指向epoll的函数 // 默认使用et模式,边缘触发,高速 if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } // unix代码, 发送定时信号,更新时间用 #if !(NGX_WIN32) // NGX_USE_TIMER_EVENT标志量只有eventport/kqueue,epoll无此标志位 // ngx_timer_resolution = ccf->timer_resolution;默认值是0 // 所以只有使用了timer_resolution指令才会发信号 if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { struct sigaction sa; struct itimerval itv; // 设置信号掩码,sigalarm ngx_memzero(&sa, sizeof(struct sigaction)); sa.sa_handler = ngx_timer_signal_handler; sigemptyset(&sa.sa_mask); if (sigaction(SIGALRM, &sa, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sigaction(SIGALRM) failed"); return NGX_ERROR; } // 设置信号发送的时间间隔,也就是nginx的时间精度 // 收到信号会设置设置ngx_event_timer_alarm变量 // 在epoll的ngx_epoll_process_events里检查,更新时间的标志 itv.it_interval.tv_sec = ngx_timer_resolution / 1000; itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000; itv.it_value.tv_sec = ngx_timer_resolution / 1000; itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000; if (setitimer(ITIMER_REAL, &itv, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "setitimer() failed"); } } if (ngx_event_flags & NGX_USE_FD_EVENT) { struct rlimit rlmt; if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed"); return NGX_ERROR; } cycle->files_n = (ngx_uint_t) rlmt.rlim_cur; cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n, cycle->log); if (cycle->files == NULL) { return NGX_ERROR; } } #else if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { ngx_log_error(NGX_LOG_WARN, cycle->log, 0, "the \"timer_resolution\" directive is not supported " "with the configured event method, ignored"); ngx_timer_resolution = 0; } #endif // 创建连接池数组,大小是cycle->connection_n // 直接使用malloc分配内存,没有使用内存池 cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; // 创建读事件池数组,大小是cycle->connection_n cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } // 读事件对象初始化 rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; } // 创建写事件池数组,大小是cycle->connection_n cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } // 写事件对象初始化 wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; } // i是数组的末尾 i = cycle->connection_n; next = NULL; // 把连接对象与读写事件关联起来 // 注意i是数组的末尾,从最后遍历 do { i--; // 使用data成员,把连接对象串成链表 c[i].data = next; // 读写事件 c[i].read = &cycle->read_events[i]; c[i].write = &cycle->write_events[i]; // 连接的描述符是-1,表示无效 c[i].fd = (ngx_socket_t) -1; // next指针指向数组的前一个元素 next = &c[i]; } while (i); // 连接对象已经串成链表,现在设置空闲链表指针 // 此时next指向连接对象数组的第一个元素 cycle->free_connections = next; // 连接没有使用,全是空闲连接 cycle->free_connection_n = cycle->connection_n; /* for each listening socket */ // 为每个监听端口分配一个连接对象 ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { #if (NGX_HAVE_REUSEPORT) if (ls[i].reuseport && ls[i].worker != ngx_worker) { continue; } #endif // 获取一个空闲连接 c = ngx_get_connection(ls[i].fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->type = ls[i].type; c->log = &ls[i].log; c->listening = &ls[i]; ls[i].connection = c; rev = c->read; rev->log = c->log; // 设置accept标志,接受连接 rev->accept = 1; #if (NGX_HAVE_DEFERRED_ACCEPT) rev->deferred_accept = ls[i].deferred_accept; #endif if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ls[i].previous) { /* * delete the old accept events that were bound to * the old cycle read events array */ old = ls[i].previous->connection; if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR) { return NGX_ERROR; } old->fd = (ngx_socket_t) -1; } } #if (NGX_WIN32) if (ngx_event_flags & NGX_USE_IOCP_EVENT) { ngx_iocp_conf_t *iocpcf; rev->handler = ngx_event_acceptex; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) { return NGX_ERROR; } ls[i].log.handler = ngx_acceptex_log_error; iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module); if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex) == NGX_ERROR) { return NGX_ERROR; } } else { rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #else // 重要!! // 设置接受连接的回调函数为ngx_event_accept // 监听端口上收到连接请求时的回调函数,即事件handler // 从cycle的连接池里获取连接 // 关键操作 ls->handler(c);调用其他模块的业务handler // 1.10使用ngx_event_recvmsg接收udp rev->handler = (c->type == SOCK_STREAM) ? ngx_event_accept : ngx_event_recvmsg; // 如果使用负载均衡,不向epoll添加事件,只有抢到锁才添加 if (ngx_use_accept_mutex #if (NGX_HAVE_REUSEPORT) && !ls[i].reuseport #endif ) { // 对一个监听端口的处理结束,只设置了回调函数 continue; } // nginx 1.9.x不再使用rtsig // 单进程、未明确指定负载均衡,不使用负载均衡 // 直接加入epoll事件,开始监听,可以接受请求 if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } #endif } // 为每个监听端口分配一个连接对象循环结束 return NGX_OK; }
static ngx_int_t ngx_event_process_init(ngx_cycle_t *cycle) { ngx_uint_t m, i; ngx_event_t *rev, *wev; ngx_listening_t *ls; ngx_connection_t *c, *next, *old; ngx_core_conf_t *ccf; ngx_event_conf_t *ecf; ngx_event_module_t *module; ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module); ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module); /* [analy] ngx_use_accept_mutex表示是否需要通过对accept加锁来解决惊群问题。 当nginx worker进程数>1时且配置文件中打开accept_mutex时,这个标志置为1 */ if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) { ngx_use_accept_mutex = 1; ngx_accept_mutex_held = 0; ngx_accept_mutex_delay = ecf->accept_mutex_delay; } else { ngx_use_accept_mutex = 0; } #if (NGX_THREADS) ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0); if (ngx_posted_events_mutex == NULL) { return NGX_ERROR; } #endif /* [analy] ??????????? */ if (ngx_event_timer_init(cycle->log) == NGX_ERROR) { return NGX_ERROR; } /* [analy] 调用事件处理模块(epoll)初始化函数 */ for (m = 0; ngx_modules[m]; m++) { if (ngx_modules[m]->type != NGX_EVENT_MODULE) { continue; } if (ngx_modules[m]->ctx_index != ecf->use) { continue; } /* [analy] 由于Nginx实现了很多的事件模块,比如:epoll,poll,select, kqueue,aio (这些模块位于src/event/modules目录中)等等,所以Nginx对事件模块进行 了一层抽象,方便在不同的系统上使用不同的事件模型,也便于扩展新的事件 模型 此处的init回调,其实就是调用了ngx_epoll_init函数。module->actions结构 封装了epoll的所有接口函数。Nginx就是通过actions结构将epoll注册到事件 抽象层中。actions的类型是ngx_event_actions_t */ module = ngx_modules[m]->ctx; if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) { /* fatal */ exit(2); } break; } #if !(NGX_WIN32) /* * timer_resolution指令指定了时间,并且未指定NGX_USE_TIMER_EVENT标记时, * 根据 timer_resolution 指令指定的时间设置一个定时器 */ if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) { struct sigaction sa; struct itimerval itv; ngx_memzero(&sa, sizeof(struct sigaction)); sa.sa_handler = ngx_timer_signal_handler; sigemptyset(&sa.sa_mask); if (sigaction(SIGALRM, &sa, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "sigaction(SIGALRM) failed"); return NGX_ERROR; } itv.it_interval.tv_sec = ngx_timer_resolution / 1000; itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000; itv.it_value.tv_sec = ngx_timer_resolution / 1000; itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000; if (setitimer(ITIMER_REAL, &itv, NULL) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "setitimer() failed"); } } if (ngx_event_flags & NGX_USE_FD_EVENT) { /* [analy] epoll模块不使用 */ struct rlimit rlmt; if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) { ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno, "getrlimit(RLIMIT_NOFILE) failed"); return NGX_ERROR; } cycle->files_n = (ngx_uint_t) rlmt.rlim_cur; cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n, cycle->log); if (cycle->files == NULL) { return NGX_ERROR; } } #endif /* [analy] 为连接池申请空间,由于此处在worker进程初始化时进行的,所以 每个worker都会拥有一个自己的connections连接池 根据配置worker_connections指令指定的个数申请 */ cycle->connections = ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log); if (cycle->connections == NULL) { return NGX_ERROR; } c = cycle->connections; /* [analy] 为读事件队列申请空间 */ cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->read_events == NULL) { return NGX_ERROR; } rev = cycle->read_events; for (i = 0; i < cycle->connection_n; i++) { rev[i].closed = 1; rev[i].instance = 1; #if (NGX_THREADS) rev[i].lock = &c[i].lock; rev[i].own_lock = &c[i].lock; #endif } /* [analy] 为写事件队列申请空间 */ cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n, cycle->log); if (cycle->write_events == NULL) { return NGX_ERROR; } wev = cycle->write_events; for (i = 0; i < cycle->connection_n; i++) { wev[i].closed = 1; #if (NGX_THREADS) wev[i].lock = &c[i].lock; wev[i].own_lock = &c[i].lock; #endif } /* [analy] 初始化connections数组 data字段指向下一个元素 read事件指针指向read_events对应下标的元素 write事件指针指向write_events对应下标的元素 fd初始化-1 */ i = cycle->connection_n; next = NULL; do { i--; c[i].data = next; c[i].read = &cycle->read_events[i]; /* [analy] 将连接池中connections的read事件与read_events数组中的对应下标的元素关联 */ c[i].write = &cycle->write_events[i]; /* [analy] 将连接池中connections的write事件与write_events数组中对应下标元素关联 */ c[i].fd = (ngx_socket_t) -1; next = &c[i]; #if (NGX_THREADS) c[i].lock = 0; #endif } while (i); /* 初始化free_connections空闲连接池和空闲连接个数;指向connections连接池首地址 */ cycle->free_connections = next; cycle->free_connection_n = cycle->connection_n; /* [analy] 为每一个套接口分配一个空闲的连接 */ /* for each listening socket */ ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ngx_get_connection(ls[i].fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->log = &ls[i].log; c->listening = &ls[i]; /* [analy] connection的listening指针指向cycle->listening[n] */ ls[i].connection = c; /* [analy] cycle->listening[n]->connection指针指向了申请的空闲connection */ rev = c->read; rev->log = c->log; rev->accept = 1; // 设置监听套接字标识 #if (NGX_HAVE_DEFERRED_ACCEPT) rev->deferred_accept = ls[i].deferred_accept; #endif if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ls[i].previous) { /* * delete the old accept events that were bound to * the old cycle read events array */ old = ls[i].previous->connection; if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT) == NGX_ERROR) { return NGX_ERROR; } old->fd = (ngx_socket_t) -1; } } #if (NGX_WIN32) if (ngx_event_flags & NGX_USE_IOCP_EVENT) { ngx_iocp_conf_t *iocpcf; rev->handler = ngx_event_acceptex; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) { return NGX_ERROR; } ls[i].log.handler = ngx_acceptex_log_error; iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module); if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex) == NGX_ERROR) { return NGX_ERROR; } } else { rev->handler = ngx_event_accept; if (ngx_use_accept_mutex) { continue; } if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } #else rev->handler = ngx_event_accept; /* [analy] 设置accpet回调处理函数 */ if (ngx_use_accept_mutex) { continue; } if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } else { if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) { /* [analy] 将event送进epoll队列中 */ return NGX_ERROR; } } #endif } return NGX_OK; }
void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == (ngx_socket_t) -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } //首先将连接的读写事件从定时器中取出 if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { //将读写事件从epoll中移除 ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (c->read->posted) { ngx_delete_posted_event(c->read); } if (c->write->posted) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); log_error = c->log_error; //调用ngx_free_connection方法把表示连接的ngx_connection_t结构体归还给ngx_cycle_t核心结构体的空闲连接池 ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (ngx_close_socket(fd) == -1) { //系统调用close err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } /* we use ngx_cycle->log because c->log was in c->pool */ ngx_log_error(level, ngx_cycle->log, err, ngx_close_socket_n " %d failed", fd); } }
/* ngx_http_close_request方法是更高层的用于关闭请求的方法,当然,HTTP模块一般也不会直接调用它的。在上面几节中反复提到的引用计数, 就是由ngx_http_close_request方法负责检测的,同时它会在引用计数清零时正式调用ngx_http_free_request方法和ngx_http_close_connection(ngx_close_connection) 方法来释放请求、关闭连接,见ngx_http_close_request */ void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == (ngx_socket_t) -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } /* 首先将连接的读/写事件从定时器中取出。实际上就是检查读/写事件的time_set标志位,如果为1,则证明事件在定时器中,那么需要调 用ngx_del_timer方法把事件从定时器中移除。 */ if (c->read->timer_set) { ngx_del_timer(c->read, NGX_FUNC_LINE); } if (c->write->timer_set) { ngx_del_timer(c->write, NGX_FUNC_LINE); } /* 调用ngx_del_conn宏(或者ngx_del_event宏)将读/写事件从epoll中移除。实际上就是调用ngx_event_actions_t接口 中的del_conn方法,当事件模块是epoll模块时,就是从epoll中移除这个连接的读/写事件。同时,如果这个事件在ngx_posted_accept_events或 者ngx_posted_events队列中,还需要调用ngx_delete_posted_event宏把事件从post事件队列中移除。 */ if (ngx_del_conn) { //ngx_epoll_del_connection ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); //ngx_epoll_del_event } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (c->read->posted) { ngx_delete_posted_event(c->read); } if (c->write->posted) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); log_error = c->log_error; /* 调用ngx_free_connection方法把表示连接的ngx_connection-t结构体归还给ngx_ cycle_t核心结构体的空闲连接池free connections。 */ ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; ngx_log_debugall(ngx_cycle->log, 0, "close socket:%d", fd); //调用系统提供的close方法关闭这个TCP连接套接字。 if (ngx_close_socket(fd) == -1) { err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } /* we use ngx_cycle->log because c->log was in c->pool */ //由于c已经在前面释放了,因此不能再用C->log了 ngx_log_error(level, ngx_cycle->log, err, ngx_close_socket_n " %d failed", fd); } }