ngx_int_t ngx_add_channel_event(ngx_cycle_t *cycle, ngx_fd_t fd, ngx_int_t event, ngx_event_handler_pt handler) { syslog(LOG_INFO, "[%s:%s:%d]\n", __FILE__, __func__, __LINE__); ngx_event_t *ev, *rev, *wev; ngx_connection_t *c; c = ngx_get_connection(fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->pool = cycle->pool; rev = c->read; wev = c->write; rev->log = cycle->log; wev->log = cycle->log; #if (NGX_THREADS) rev->lock = &c->lock; wev->lock = &c->lock; rev->own_lock = &c->lock; wev->own_lock = &c->lock; #endif rev->channel = 1; wev->channel = 1; ev = (event == NGX_READ_EVENT) ? rev : wev; ev->handler = handler; if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_free_connection(c); return NGX_ERROR; } } else { if (ngx_add_event(ev, event, 0) == NGX_ERROR) { ngx_free_connection(c); return NGX_ERROR; } } return NGX_OK; }
ngx_int_t ngx_add_channel_event(ngx_cycle_t *cycle, ngx_fd_t fd, ngx_int_t event, ngx_event_handler_pt handler) { ngx_event_t *ev, *rev, *wev; ngx_connection_t *c; c = ngx_get_connection(fd, cycle->log); if (c == NULL) { return NGX_ERROR; } c->pool = cycle->pool; rev = c->read; wev = c->write; rev->log = cycle->log; wev->log = cycle->log; rev->channel = 1; wev->channel = 1; ev = (event == NGX_READ_EVENT) ? rev : wev; ev->handler = handler; if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_free_connection(c); return NGX_ERROR; } } else { if (ngx_add_event(ev, event, 0) == NGX_ERROR) { ngx_free_connection(c); return NGX_ERROR; } } return NGX_OK; }
void redis_nginx_cleanup(void *privdata) { if (privdata) { ngx_connection_t *connection = (ngx_connection_t *) privdata; redisAsyncContext *ac = (redisAsyncContext *) connection->data; if (ac->err) { nchan_store_redis_connection_close_handler(ac); //ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0, "redis_nginx_adapter: connection to redis failed - %s", ac->errstr); /** * If the context had an error but the fd still valid is because another context got the same fd from OS. * So we clean the reference to this fd on redisAsyncContext and on ngx_connection, avoiding close a socket in use. */ if (redis_nginx_fd_is_valid(ac->c.fd)) { ac->c.fd = -1; connection->fd = NGX_INVALID_FILE; } } if ((connection->fd != NGX_INVALID_FILE)) { redis_nginx_del_read(privdata); redis_nginx_del_write(privdata); ngx_close_connection(connection); } else { ngx_free_connection(connection); } ac->ev.data = NULL; } }
// 发生了错误,关闭一个连接 static void ngx_close_accepted_connection(ngx_connection_t *c) { ngx_socket_t fd; // 释放连接,加入空闲链表 // in core/ngx_connection.c ngx_free_connection(c); // 连接的描述符置为无效 fd = c->fd; c->fd = (ngx_socket_t) -1; // 关闭socket if (!c->shared && ngx_close_socket(fd) == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, ngx_socket_errno, ngx_close_socket_n " failed"); } // 释放连接相关的所有内存 if (c->pool) { ngx_destroy_pool(c->pool); } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, -1); #endif }
void ngx_postgres_upstream_free_connection(ngx_log_t *log, ngx_connection_t *c, PGconn *pgconn, ngx_postgres_upstream_srv_conf_t *pgscf) { ngx_event_t *rev, *wev; dd("entering"); PQfinish(pgconn); if (c) { rev = c->read; wev = c->write; if (rev->timer_set) { ngx_del_timer(rev); } if (wev->timer_set) { ngx_del_timer(wev); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (rev->active || rev->disabled) { ngx_del_event(rev, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (wev->active || wev->disabled) { ngx_del_event(wev, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (rev->prev) { ngx_delete_posted_event(rev); } if (wev->prev) { ngx_delete_posted_event(wev); } rev->closed = 1; wev->closed = 1; #if defined(nginx_version) && (nginx_version >= 1001004) if (c->pool) { ngx_destroy_pool(c->pool); } #endif ngx_free_connection(c); } /* free spot in keepalive connection pool */ pgscf->active_conns--; dd("returning"); }
void ngx_zeromq_close(ngx_connection_t *c) { void *zmq; if (c->fd == -1) { return; } zmq = c->data; ngx_log_debug3(NGX_LOG_DEBUG_EVENT, c->log, 0, "zmq_close: zmq:%p fd:%d #%d", zmq, c->fd, c->number); if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); ngx_free_connection(c); c->fd = (ngx_socket_t) -1; if (zmq_close(zmq) == -1) { ngx_zeromq_log_error(ngx_cycle->log, "zmq_close()"); } }
static void ngx_close_udp_connection(ngx_connection_t *c) { ngx_free_connection(c); if (c->pool) { ngx_destroy_pool(c->pool); } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, -1); #endif }
void ngx_close_listening_sockets(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { return; } ngx_accept_mutex_held = 0; ngx_use_accept_mutex = 0; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c->read->active) { if (ngx_event_flags & NGX_USE_RTSIG_EVENT) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else if (ngx_event_flags & NGX_USE_EPOLL_EVENT) { /* * it seems that Linux-2.6.x OpenVZ sends events * for closed shared listening sockets unless * the events was explicity deleted */ ngx_del_event(c->read, NGX_READ_EVENT, 0); } else { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } } ngx_free_connection(c); c->fd = (ngx_socket_t) -1; ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0, "close listening %V #%d ", &ls[i].addr_text, ls[i].fd); if (ngx_close_socket(ls[i].fd) == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_close_socket_n " %V failed", &ls[i].addr_text); } } }
int ngx_rpc_notify_unregister(ngx_rpc_notify_t* notify) { ngx_log_debug(NGX_LOG_DEBUG_ALL, ngx_cycle->log, 0, "ngx_rpc_notify_unregister notify:%p eventfd:%d", notify, notify->event_fd); ngx_del_conn(notify->notify_conn, 0); close(notify->event_fd); notify->notify_conn->pool = NULL; ngx_free_connection(notify->notify_conn); return NGX_OK; }
static ngx_int_t ngx_http_statsd_udp_send(ngx_udp_endpoint_t *l, u_char *buf, size_t len) { ssize_t n; ngx_udp_connection_t *uc; uc = l->udp_connection; if (!uc) { return NGX_ERROR; } if (uc->connection == NULL) { uc->log = *l->log; uc->log.handler = NULL; uc->log.data = NULL; uc->log.action = "logging"; if(ngx_udp_connect(uc) != NGX_OK) { if(uc->connection != NULL) { ngx_free_connection(uc->connection); uc->connection = NULL; } return NGX_ERROR; } uc->connection->data = l; uc->connection->read->handler = ngx_http_statsd_udp_dummy_handler; uc->connection->read->resolver = 0; } n = ngx_send(uc->connection, buf, len); if (n == -1) { return NGX_ERROR; } if ((size_t) n != (size_t) len) { #if defined nginx_version && nginx_version >= 8032 ngx_log_error(NGX_LOG_CRIT, &uc->log, 0, "send() incomplete"); #else ngx_log_error(NGX_LOG_CRIT, uc->log, 0, "send() incomplete"); #endif return NGX_ERROR; } return NGX_OK; }
static void ngx_close_posted_connection(ngx_connection_t *c) { ngx_socket_t fd; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) - 1; if (ngx_close_socket(fd) == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, ngx_socket_errno, ngx_close_socket_n " failed"); } if (c->pool) { ngx_destroy_pool(c->pool); } }
void nchan_close_fake_connection(ngx_connection_t *c) { ngx_pool_t *pool; ngx_connection_t *saved_c = NULL; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, "http close fake http connection %p", c); c->destroyed = 1; pool = c->pool; if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } c->read->closed = 1; c->write->closed = 1; /* we temporarily use a valid fd (0) to make ngx_free_connection happy */ c->fd = 0; if (ngx_cycle->files) { saved_c = ngx_cycle->files[0]; } ngx_free_connection(c); c->fd = (ngx_socket_t) -1; if (ngx_cycle->files) { ngx_cycle->files[0] = saved_c; } if (pool) { ngx_destroy_pool(pool); } }
static void ngx_close_accepted_connection(ngx_connection_t *c) { ngx_socket_t fd; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (ngx_close_socket(fd) == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, ngx_socket_errno, ngx_close_socket_n " failed"); } if (c->pool) { ngx_destroy_pool(c->pool); } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, -1); #endif }
void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == (ngx_socket_t) -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } //首先将连接的读写事件从定时器中取出 if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { //将读写事件从epoll中移除 ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (c->read->posted) { ngx_delete_posted_event(c->read); } if (c->write->posted) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); log_error = c->log_error; //调用ngx_free_connection方法把表示连接的ngx_connection_t结构体归还给ngx_cycle_t核心结构体的空闲连接池 ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (ngx_close_socket(fd) == -1) { //系统调用close err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } /* we use ngx_cycle->log because c->log was in c->pool */ ngx_log_error(level, ngx_cycle->log, err, ngx_close_socket_n " %d failed", fd); } }
//关闭cycle中listening动态数组已经打开的句柄 void ngx_close_listening_sockets(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { return; } ngx_accept_mutex_held = 0; ngx_use_accept_mutex = 0; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c) { if (c->read->active) { if (ngx_event_flags & NGX_USE_EPOLL_EVENT) { /* * it seems that Linux-2.6.x OpenVZ sends events * for closed shared listening sockets unless * the events was explicitly deleted */ ngx_del_event(c->read, NGX_READ_EVENT, 0); } else { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } } ngx_free_connection(c); c->fd = (ngx_socket_t) -1; } ngx_log_debug2(NGX_LOG_DEBUG_CORE, cycle->log, 0, "close listening %V #%d ", &ls[i].addr_text, ls[i].fd); if (ngx_close_socket(ls[i].fd) == -1) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_close_socket_n " %V failed", &ls[i].addr_text); } #if (NGX_HAVE_UNIX_DOMAIN) if (ls[i].sockaddr->sa_family == AF_UNIX && ngx_process <= NGX_PROCESS_MASTER && ngx_new_binary == 0) { u_char *name = ls[i].addr_text.data + sizeof("unix:") - 1; if (ngx_delete_file(name) == NGX_FILE_ERROR) { ngx_log_error(NGX_LOG_EMERG, cycle->log, ngx_socket_errno, ngx_delete_file_n " %s failed", name); } } #endif ls[i].fd = (ngx_socket_t) -1; } cycle->listening.nelts = 0; }
ngx_int_t ngx_http_drizzle_handler(ngx_http_request_t *r) { ngx_http_upstream_t *u; ngx_http_drizzle_loc_conf_t *dlcf; #if defined(nginx_version) && nginx_version < 8017 ngx_http_drizzle_ctx_t *dctx; #endif ngx_http_core_loc_conf_t *clcf; ngx_str_t target; ngx_url_t url; ngx_connection_t *c; dd("request: %p", r); dd("subrequest in memory: %d", (int) r->subrequest_in_memory); dd("connection: %p", r->connection); dd("connection log: %p", r->connection->log); if (r->subrequest_in_memory) { /* TODO: add support for subrequest in memory by * emitting output into u->buffer instead */ ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, "ngx_http_drizzle_module does not support " "subrequest in memory"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } dlcf = ngx_http_get_module_loc_conf(r, ngx_http_drizzle_module); if ((dlcf->default_query == NULL) && !(dlcf->methods_set & r->method)) { if (dlcf->methods_set != 0) { return NGX_HTTP_NOT_ALLOWED; } clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module); ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "drizzle: missing \"drizzle_query\" in location \"%V\"", &clcf->name); return NGX_HTTP_INTERNAL_SERVER_ERROR; } dd("XXX upstream already exists? %p", r->upstream); #if defined(nginx_version) && \ ((nginx_version >= 7063 && nginx_version < 8000) \ || nginx_version >= 8007) dd("creating upstream......."); if (ngx_http_upstream_create(r) != NGX_OK) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } u = r->upstream; #else /* 0.7.x < 0.7.63, 0.8.x < 0.8.7 */ dd("XXX create upstream"); u = ngx_pcalloc(r->pool, sizeof(ngx_http_upstream_t)); if (u == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } u->peer.log = r->connection->log; u->peer.log_error = NGX_ERROR_ERR; # if (NGX_THREADS) u->peer.lock = &r->connection->lock; # endif r->upstream = u; #endif if (dlcf->complex_target) { /* variables used in the drizzle_pass directive */ if (ngx_http_complex_value(r, dlcf->complex_target, &target) != NGX_OK) { dd("failed to compile"); return NGX_ERROR; } if (target.len == 0) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "drizzle: handler: empty \"drizzle_pass\" target"); return NGX_HTTP_INTERNAL_SERVER_ERROR; } url.host = target; url.port = 0; url.no_resolve = 1; dlcf->upstream.upstream = ngx_http_upstream_drizzle_add(r, &url); if (dlcf->upstream.upstream == NULL) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "drizzle: upstream \"%V\" not found", &target); return NGX_HTTP_INTERNAL_SERVER_ERROR; } } #if defined(nginx_version) && nginx_version < 8017 dctx = ngx_pcalloc(r->pool, sizeof(ngx_http_drizzle_ctx_t)); if (dctx == NULL) { return NGX_HTTP_INTERNAL_SERVER_ERROR; } ngx_http_set_ctx(r, dctx, ngx_http_drizzle_module); #endif u->schema.len = sizeof("drizzle://") - 1; u->schema.data = (u_char *) "drizzle://"; u->output.tag = (ngx_buf_tag_t) &ngx_http_drizzle_module; dd("drizzle tag: %p", (void *) u->output.tag); u->conf = &dlcf->upstream; u->create_request = ngx_http_drizzle_create_request; u->reinit_request = ngx_http_drizzle_reinit_request; u->process_header = ngx_http_drizzle_process_header; u->abort_request = ngx_http_drizzle_abort_request; u->finalize_request = ngx_http_drizzle_finalize_request; /* we bypass the upstream input filter mechanism in * ngx_http_upstream_process_headers */ u->input_filter_init = ngx_http_drizzle_input_filter_init; u->input_filter = ngx_http_drizzle_input_filter; u->input_filter_ctx = NULL; #if defined(nginx_version) && nginx_version >= 8011 r->main->count++; #endif dd("XXX connect timeout: %d", (int) dlcf->upstream.connect_timeout); ngx_http_upstream_dbd_init(r); /* override the read/write event handler to our own */ u->write_event_handler = ngx_http_drizzle_wev_handler; u->read_event_handler = ngx_http_drizzle_rev_handler; /* a bit hack-ish way to return error response (clean-up part) */ if ((u->peer.connection) && (u->peer.connection->fd == 0)) { c = u->peer.connection; u->peer.connection = NULL; if (c->write->timer_set) { ngx_del_timer(c->write); } ngx_free_connection(c); ngx_http_upstream_drizzle_finalize_request(r, u, #if defined(nginx_version) && (nginx_version >= 8017) NGX_HTTP_SERVICE_UNAVAILABLE); #else dctx->status ? dctx->status : NGX_HTTP_INTERNAL_SERVER_ERROR); #endif }
ngx_int_t ngx_zeromq_connect(ngx_peer_connection_t *pc) { ngx_connection_t *c; ngx_event_t *rev, *wev; void *zmq; int fd, zero; size_t fdsize; zmq = zmq_socket(zmq_context, ZMQ_REQ); if (zmq == NULL) { ngx_zeromq_log_error(pc->log, "zmq_socket(ZMQ_REQ)"); return NGX_ERROR; } fdsize = sizeof(int); if (zmq_getsockopt(zmq, ZMQ_FD, &fd, &fdsize) == -1) { ngx_zeromq_log_error(pc->log, "zmq_getsockopt(ZMQ_FD)"); goto failed_zmq; } zero = 0; if (zmq_setsockopt(zmq, ZMQ_LINGER, &zero, sizeof(int)) == -1) { ngx_zeromq_log_error(pc->log, "zmq_setsockopt(ZMQ_LINGER)"); goto failed_zmq; } c = ngx_get_connection(fd, pc->log); if (c == NULL) { goto failed_zmq; } c->data = zmq; c->recv = ngx_zeromq_recv; c->send = ngx_zeromq_send; c->recv_chain = ngx_zeromq_recv_chain; c->send_chain = ngx_zeromq_send_chain; /* This won't fly with ZeroMQ */ c->sendfile = 0; c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; c->log_error = pc->log_error; rev = c->read; wev = c->write; rev->log = pc->log; wev->log = pc->log; pc->connection = c; c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); if (pc->local) { ngx_log_error(NGX_LOG_WARN, pc->log, 0, "zmq_connect: binding to local address is not supported"); } if (zmq_connect(zmq, (const char *) pc->data) == -1) { ngx_zeromq_log_error(pc->log, "zmq_connect()"); goto failed; } ngx_log_debug4(NGX_LOG_DEBUG_EVENT, pc->log, 0, "zmq_connect: lazily connected to tcp://%V," " zmq:%p fd:%d #%d", pc->name, zmq, fd, c->number); if (ngx_add_conn) { /* rtsig */ if (ngx_add_conn(c) == NGX_ERROR) { goto failed; } } else { if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* kqueue, epoll */ if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) != NGX_OK) { goto failed; } } else { /* select, poll, /dev/poll */ if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) != NGX_OK) { goto failed; } } } /* * ZeroMQ assumes that new socket is read-ready (but it really isn't) * and it won't notify us about any new events if we don't fail to read * from it first. Sigh. */ rev->ready = 1; wev->ready = 1; return NGX_OK; failed: ngx_free_connection(c); c->fd = (ngx_socket_t) -1; failed_zmq: if (zmq_close(zmq) == -1) { ngx_zeromq_log_error(pc->log, "zmq_close()"); } return NGX_ERROR; }
ngx_int_t ngx_zeromq_connect(ngx_peer_connection_t *pc) { ngx_zeromq_connection_t *zc = pc->data; ngx_zeromq_endpoint_t *zep; ngx_connection_t *c; ngx_event_t *rev, *wev; void *zmq; int fd, zero; size_t fdsize; ngx_uint_t i; zep = zc->endpoint; zmq = zmq_socket(ngx_zeromq_ctx, zep->type->value); if (zmq == NULL) { ngx_log_error(NGX_LOG_ALERT, pc->log, 0, "zmq_socket(%V) failed (%d: %s)", &zep->type->name, ngx_errno, zmq_strerror(ngx_errno)); return NGX_ERROR; } fdsize = sizeof(int); if (zmq_getsockopt(zmq, ZMQ_FD, &fd, &fdsize) == -1) { ngx_zeromq_log_error(pc->log, "zmq_getsockopt(ZMQ_FD)"); goto failed_zmq; } zero = 0; if (zmq_setsockopt(zmq, ZMQ_LINGER, &zero, sizeof(int)) == -1) { ngx_zeromq_log_error(pc->log, "zmq_setsockopt(ZMQ_LINGER)"); goto failed_zmq; } c = ngx_get_connection(fd, pc->log); if (c == NULL) { goto failed_zmq; } c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); c->recv = ngx_zeromq_recv; c->send = NULL; c->recv_chain = ngx_zeromq_recv_chain; c->send_chain = ngx_zeromq_send_chain; /* This won't fly with ZeroMQ */ c->sendfile = 0; c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; c->log_error = pc->log_error; rev = c->read; wev = c->write; rev->data = zc; wev->data = zc; rev->handler = ngx_zeromq_event_handler; wev->handler = ngx_zeromq_event_handler; rev->log = pc->log; wev->log = pc->log; pc->connection = &zc->connection; zc->connection_ptr = c; memcpy(&zc->connection, c, sizeof(ngx_connection_t)); zc->socket = zmq; if (zep->type->can_send) { zc->send = zc; } if (zep->type->can_recv) { zc->recv = zc; } if (pc->local) { ngx_log_error(NGX_LOG_WARN, pc->log, 0, "zmq_connect: binding to local address is not supported"); } if (zep->bind) { if (zep->rand) { for (i = 0; ; i++) { ngx_zeromq_randomized_endpoint_regen(&zep->addr); if (zmq_bind(zmq, (const char *) zep->addr.data) == -1) { if (ngx_errno == NGX_EADDRINUSE && i < 65535) { continue; } ngx_zeromq_log_error(pc->log, "zmq_bind()"); goto failed; } break; } } else { if (zmq_bind(zmq, (const char *) zep->addr.data) == -1) { ngx_zeromq_log_error(pc->log, "zmq_bind()"); goto failed; } } } else { if (zmq_connect(zmq, (const char *) zep->addr.data) == -1) { ngx_zeromq_log_error(pc->log, "zmq_connect()"); goto failed; } } ngx_log_debug7(NGX_LOG_DEBUG_EVENT, pc->log, 0, "zmq_connect: %s to %V (%V), fd:%d #%d zc:%p zmq:%p", zep->bind ? "bound" : "lazily connected", &zep->addr, &zep->type->name, fd, c->number, zc, zmq); if (ngx_add_conn) { /* rtsig */ if (ngx_add_conn(c) == NGX_ERROR) { goto failed; } } else { if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* kqueue, epoll */ if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) != NGX_OK) { goto failed; } } else { /* select, poll, /dev/poll */ if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) != NGX_OK) { goto failed; } } } /* * ZeroMQ assumes that new socket is read-ready (but it really isn't) * and it won't notify us about any new events if we don't fail to read * from it first. Sigh. */ rev->ready = 1; wev->ready = zep->type->can_send; return NGX_OK; failed: ngx_free_connection(c); c->fd = (ngx_socket_t) -1; pc->connection = NULL; zc->socket = NULL; failed_zmq: if (zmq_close(zmq) == -1) { ngx_zeromq_log_error(pc->log, "zmq_close()"); } return NGX_ERROR; }
static void ngx_http_lua_abort_pending_timers(ngx_event_t *ev) { ngx_int_t i, n; ngx_event_t **events; ngx_connection_t *c, *saved_c = NULL; ngx_rbtree_node_t *cur, *prev, *next, *sentinel; ngx_http_lua_timer_ctx_t *tctx; ngx_http_lua_main_conf_t *lmcf; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, "lua abort pending timers HERE"); c = ev->data; lmcf = c->data; dd("lua connection fd: %d", (int) c->fd); if (!c->close) { return; } c->read->closed = 1; c->write->closed = 1; /* we temporarily use a valid fd (0) to make ngx_free_connection happy */ c->fd = 0; if (ngx_cycle->files) { saved_c = ngx_cycle->files[0]; } ngx_free_connection(c); c->fd = -1; if (ngx_cycle->files) { ngx_cycle->files[0] = saved_c; } if (lmcf->pending_timers == 0) { return; } /* expire pending timers immediately */ sentinel = ngx_event_timer_rbtree.sentinel; prev = NULL; cur = ngx_event_timer_rbtree.root; events = ngx_pcalloc(ngx_cycle->pool, lmcf->pending_timers * sizeof(ngx_event_t)); if (events == NULL) { return; } n = 0; dd("root: %p, root parent: %p, sentinel: %p", cur, cur->parent, sentinel); while (lmcf->pending_timers > n) { if (cur == sentinel || cur == NULL) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, "lua pending timer counter got out of sync: %i", lmcf->pending_timers); break; } if (prev == cur->parent) { next = cur->left; if (next == sentinel) { ev = (ngx_event_t *) ((char *) cur - offsetof(ngx_event_t, timer)); if (ev->handler == ngx_http_lua_timer_handler) { dd("found node: %p", cur); events[n++] = ev; } next = (cur->right != sentinel) ? cur->right : cur->parent; } } else if (prev == cur->left) { ev = (ngx_event_t *) ((char *) cur - offsetof(ngx_event_t, timer)); if (ev->handler == ngx_http_lua_timer_handler) { dd("found node 2: %p", cur); events[n++] = ev; } next = (cur->right != sentinel) ? cur->right : cur->parent; } else if (prev == cur->right) { next = cur->parent; } else { next = NULL; } prev = cur; cur = next; } for (i = 0; i < n; i++) { ev = events[i]; ngx_rbtree_delete(&ngx_event_timer_rbtree, &ev->timer); #if (NGX_DEBUG) ev->timer.left = NULL; ev->timer.right = NULL; ev->timer.parent = NULL; #endif ev->timer_set = 0; ev->timedout = 1; tctx = ev->data; tctx->premature = 1; ev->handler(ev); } if (lmcf->pending_timers) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, "lua pending timer counter got out of sync: %i", lmcf->pending_timers); } }
static ngx_int_t ngx_http_lua_udp_connect(ngx_udp_connection_t *uc) { int rc; ngx_int_t event; ngx_event_t *rev, *wev; ngx_socket_t s; ngx_connection_t *c; s = ngx_socket(uc->sockaddr->sa_family, SOCK_DGRAM, 0); ngx_log_debug1(NGX_LOG_DEBUG_EVENT, &uc->log, 0, "UDP socket %d", s); if (s == -1) { ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno, ngx_socket_n " failed"); return NGX_ERROR; } c = ngx_get_connection(s, &uc->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno, ngx_close_socket_n "failed"); } return NGX_ERROR; } if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_free_connection(c); if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return NGX_ERROR; } rev = c->read; wev = c->write; rev->log = &uc->log; wev->log = &uc->log; uc->connection = c; c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_THREADS) /* TODO: lock event when call completion handler */ rev->lock = &c->lock; wev->lock = &c->lock; rev->own_lock = &c->lock; wev->own_lock = &c->lock; #endif #if (NGX_HTTP_LUA_HAVE_SO_PASSCRED) if (uc->sockaddr->sa_family == AF_UNIX) { struct sockaddr addr; addr.sa_family = AF_UNIX; /* just to make valgrind happy */ ngx_memzero(addr.sa_data, sizeof(addr.sa_data)); ngx_log_debug0(NGX_LOG_DEBUG_EVENT, &uc->log, 0, "datagram unix " "domain socket autobind"); if (bind(uc->connection->fd, &addr, sizeof(sa_family_t)) != 0) { ngx_log_error(NGX_LOG_CRIT, &uc->log, ngx_socket_errno, "bind() failed"); return NGX_ERROR; } } #endif ngx_log_debug3(NGX_LOG_DEBUG_EVENT, &uc->log, 0, "connect to %V, fd:%d #%d", &uc->server, s, c->number); rc = connect(s, uc->sockaddr, uc->socklen); /* TODO: aio, iocp */ if (rc == -1) { ngx_log_error(NGX_LOG_CRIT, &uc->log, ngx_socket_errno, "connect() failed"); return NGX_ERROR; } /* UDP sockets are always ready to write */ wev->ready = 1; if (ngx_add_event) { event = (ngx_event_flags & NGX_USE_CLEAR_EVENT) ? /* kqueue, epoll */ NGX_CLEAR_EVENT: /* select, poll, /dev/poll */ NGX_LEVEL_EVENT; /* eventport event type has no meaning: oneshot only */ if (ngx_add_event(rev, NGX_READ_EVENT, event) != NGX_OK) { return NGX_ERROR; } } else { /* rtsig */ if (ngx_add_conn(c) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; }
void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == (ngx_socket_t) -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (!c->shared) { if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } } if (c->read->posted) { ngx_delete_posted_event(c->read); } if (c->write->posted) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); log_error = c->log_error; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (c->shared) { return; } if (ngx_close_socket(fd) == -1) { err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } ngx_log_error(level, c->log, err, ngx_close_socket_n " %d failed", fd); } }
ngx_int_t ngx_event_connect_peer(ngx_peer_connection_t *pc) { //Here,we initilize a connection!!! int rc; ngx_int_t event; ngx_err_t err; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_connection_t *c; //Here,very important, this handler is use to get a select handler!! /* For ip_hash module,will set the get handler!!! r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer; r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer; */ //will stored the really backend ip_addr in the pc!! rc = pc->get(pc, pc->data); if (rc != NGX_OK) { return rc; } //Here,Do the socket operation! alloc a socket_fd s = ngx_socket(pc->sockaddr->sa_family, SOCK_STREAM, 0); ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, "socket %d", s); if (s == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, ngx_socket_n " failed"); return NGX_ERROR; } c = ngx_get_connection(s, pc->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, ngx_close_socket_n "failed"); } return NGX_ERROR; } if (pc->rcvbuf) { if (setsockopt(s, SOL_SOCKET, SO_RCVBUF, (const void *) &pc->rcvbuf, sizeof(int)) == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, "setsockopt(SO_RCVBUF) failed"); goto failed; } } if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, ngx_nonblocking_n " failed"); goto failed; } if (pc->local) { if (bind(s, pc->local->sockaddr, pc->local->socklen) == -1) { ngx_log_error(NGX_LOG_CRIT, pc->log, ngx_socket_errno, "bind(%V) failed", &pc->local->name); goto failed; } } //########################### Here set the connection data's read write handler!!! c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->sendfile = 1; c->log_error = pc->log_error; if (pc->sockaddr->sa_family != AF_INET) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } rev = c->read; wev = c->write; rev->log = pc->log; wev->log = pc->log; pc->connection = c; c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); #if (NGX_THREADS) /* TODO: lock event when call completion handler */ rev->lock = pc->lock; wev->lock = pc->lock; rev->own_lock = &c->lock; wev->own_lock = &c->lock; #endif if (ngx_add_conn) { if (ngx_add_conn(c) == NGX_ERROR) { goto failed; } } ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connect to %V, fd:%d #%d", pc->name, s, c->number); //########################## Here,doing the really connect operation rc = connect(s, pc->sockaddr, pc->socklen); if (rc == -1) { err = ngx_socket_errno; if (err != NGX_EINPROGRESS #if (NGX_WIN32) /* Winsock returns WSAEWOULDBLOCK (NGX_EAGAIN) */ && err != NGX_EAGAIN #endif ) { if (err == NGX_ECONNREFUSED #if (NGX_LINUX) /* * Linux returns EAGAIN instead of ECONNREFUSED * for unix sockets if listen queue is full */ || err == NGX_EAGAIN #endif || err == NGX_ECONNRESET || err == NGX_ENETDOWN || err == NGX_ENETUNREACH || err == NGX_EHOSTDOWN || err == NGX_EHOSTUNREACH) { level = NGX_LOG_ERR; } else { level = NGX_LOG_CRIT; } ngx_log_error(level, c->log, err, "connect() to %V failed", pc->name); return NGX_DECLINED; } } if (ngx_add_conn) { if (rc == -1) { /* NGX_EINPROGRESS */ return NGX_AGAIN; } ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected"); wev->ready = 1;// return NGX_OK; } if (ngx_event_flags & NGX_USE_AIO_EVENT) { ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, ngx_socket_errno, "connect(): %d", rc); /* aio, iocp */ if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, ngx_blocking_n " failed"); goto failed; } /* * FreeBSD's aio allows to post an operation on non-connected socket. * NT does not support it. * * TODO: check in Win32, etc. As workaround we can use NGX_ONESHOT_EVENT */ rev->ready = 1; wev->ready = 1; return NGX_OK; } if (ngx_event_flags & NGX_USE_CLEAR_EVENT) { /* kqueue */ event = NGX_CLEAR_EVENT; } else { /* select, poll, /dev/poll */ event = NGX_LEVEL_EVENT; } //##########################################Very important! //##########################################add the connection read event!! if (ngx_add_event(rev, NGX_READ_EVENT, event) != NGX_OK) { goto failed; } if (rc == -1) { /* NGX_EINPROGRESS */ if (ngx_add_event(wev, NGX_WRITE_EVENT, event) != NGX_OK) { goto failed; } return NGX_AGAIN; } ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected"); wev->ready = 1; //Means the connection is ready to write! return NGX_OK; failed: ngx_free_connection(c); if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return NGX_ERROR; }
static ngx_int_t ngx_send_radius_request( ngx_http_request_t *r, radius_req_queue_node_t* prev_req ) { ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request 0x%xl", r ); ngx_http_auth_radius_main_conf_t* conf = ngx_http_get_module_main_conf( r, ngx_http_auth_radius_module ); ngx_http_core_loc_conf_t *clcf; clcf = ngx_http_get_module_loc_conf( r, ngx_http_core_module ); ngx_http_auth_radius_ctx_t* ctx = ngx_http_get_module_ctx( r, ngx_http_auth_radius_module ); if ( ctx == NULL ) abort(); // TODO radius_str_t user = { RADIUS_STR_FROM_NGX_STR_INITIALIZER( r->headers_in.user ) }; radius_str_t passwd = { RADIUS_STR_FROM_NGX_STR_INITIALIZER( r->headers_in.passwd ) }; radius_req_queue_node_t* n; n = radius_send_request( prev_req, &user, &passwd, clcf->error_log ); if ( n == NULL ) { abort(); // TODO } ngx_http_auth_radius_main_conf_t* lconf = ngx_http_get_module_loc_conf( r, ngx_http_auth_radius_module ); ngx_add_timer( r->connection->read, lconf->radius_timeout ); radius_server_t* rs; rs = get_server_by_req( n ); ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request rs: %d, assign 0x%xl to 0x%xl, id: %d", rs->id, r, n, n->ident ); n->data = r; ctx->n = n; ngx_connection_t* c = rs->data; ngx_event_t* rev; if ( c == NULL ) { c = ngx_get_connection( rs->s, conf->log ); if ( c == NULL ) { ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_get_connection" ); if (ngx_close_socket( rs->s ) == -1) ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_close_socket" ); return NGX_ERROR; } if ( ngx_nonblocking( rs->s ) == -1 ) { ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_nonblocking" ); ngx_free_connection( c ); if (ngx_close_socket( rs->s ) == -1) ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_close_socket" ); return NGX_ERROR; } rs->data = c; c->data = rs; rev = c->read; rev->handler = radius_read_handler; rev->log = clcf->error_log; rs->log = clcf->error_log; if ( ngx_add_event( rev, NGX_READ_EVENT, NGX_LEVEL_EVENT ) != NGX_OK ) { ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_add_event" ); return NGX_ERROR; } c->number = ngx_atomic_fetch_add( ngx_connection_counter, 1 ); } return NGX_OK; }
void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } #if (NGX_THREADS) /* * we have to clean the connection information before the closing * because another thread may reopen the same file descriptor * before we clean the connection */ ngx_mutex_lock(ngx_posted_events_mutex); if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; if (c->single_connection) { ngx_unlock(&c->lock); c->read->locked = 0; c->write->locked = 0; } ngx_mutex_unlock(ngx_posted_events_mutex); #else if (c->read->prev) { ngx_delete_posted_event(c->read); } if (c->write->prev) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; #endif ngx_reusable_connection(c, 0); log_error = c->log_error; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (ngx_close_socket(fd) == -1) { err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } /* we use ngx_cycle->log because c->log was in c->pool */ ngx_log_error(level, ngx_cycle->log, err, ngx_close_socket_n " %d failed", fd); } }
/* ngx_http_close_request方法是更高层的用于关闭请求的方法,当然,HTTP模块一般也不会直接调用它的。在上面几节中反复提到的引用计数, 就是由ngx_http_close_request方法负责检测的,同时它会在引用计数清零时正式调用ngx_http_free_request方法和ngx_http_close_connection(ngx_close_connection) 方法来释放请求、关闭连接,见ngx_http_close_request */ void ngx_close_connection(ngx_connection_t *c) { ngx_err_t err; ngx_uint_t log_error, level; ngx_socket_t fd; if (c->fd == (ngx_socket_t) -1) { ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); return; } /* 首先将连接的读/写事件从定时器中取出。实际上就是检查读/写事件的time_set标志位,如果为1,则证明事件在定时器中,那么需要调 用ngx_del_timer方法把事件从定时器中移除。 */ if (c->read->timer_set) { ngx_del_timer(c->read, NGX_FUNC_LINE); } if (c->write->timer_set) { ngx_del_timer(c->write, NGX_FUNC_LINE); } /* 调用ngx_del_conn宏(或者ngx_del_event宏)将读/写事件从epoll中移除。实际上就是调用ngx_event_actions_t接口 中的del_conn方法,当事件模块是epoll模块时,就是从epoll中移除这个连接的读/写事件。同时,如果这个事件在ngx_posted_accept_events或 者ngx_posted_events队列中,还需要调用ngx_delete_posted_event宏把事件从post事件队列中移除。 */ if (ngx_del_conn) { //ngx_epoll_del_connection ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); //ngx_epoll_del_event } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } if (c->read->posted) { ngx_delete_posted_event(c->read); } if (c->write->posted) { ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(c, 0); log_error = c->log_error; /* 调用ngx_free_connection方法把表示连接的ngx_connection-t结构体归还给ngx_ cycle_t核心结构体的空闲连接池free connections。 */ ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; ngx_log_debugall(ngx_cycle->log, 0, "close socket:%d", fd); //调用系统提供的close方法关闭这个TCP连接套接字。 if (ngx_close_socket(fd) == -1) { err = ngx_socket_errno; if (err == NGX_ECONNRESET || err == NGX_ENOTCONN) { switch (log_error) { case NGX_ERROR_INFO: level = NGX_LOG_INFO; break; case NGX_ERROR_ERR: level = NGX_LOG_ERR; break; default: level = NGX_LOG_CRIT; } } else { level = NGX_LOG_CRIT; } /* we use ngx_cycle->log because c->log was in c->pool */ //由于c已经在前面释放了,因此不能再用C->log了 ngx_log_error(level, ngx_cycle->log, err, ngx_close_socket_n " %d failed", fd); } }
void ngx_zeromq_close(ngx_zeromq_connection_t *zc) { ngx_connection_t *c; c = &zc->connection; if (c->fd == -1) { return; } ngx_log_debug4(NGX_LOG_DEBUG_EVENT, c->log, 0, "zmq_close: fd:%d #%d zc:%p zmq:%p", c->fd, c->number, zc, zc->socket); if (c->read->timer_set) { ngx_del_timer(c->read); } if (c->write->timer_set) { ngx_del_timer(c->write); } if (ngx_del_conn) { ngx_del_conn(c, NGX_CLOSE_EVENT); } else { if (c->read->active || c->read->disabled) { ngx_del_event(c->read, NGX_READ_EVENT, NGX_CLOSE_EVENT); } if (c->write->active || c->write->disabled) { ngx_del_event(c->write, NGX_WRITE_EVENT, NGX_CLOSE_EVENT); } } #if (nginx_version >= 1007005) if (c->read->posted) { #else if (c->read->prev) { #endif ngx_delete_posted_event(c->read); } #if (nginx_version >= 1007005) if (c->write->posted) { #else if (c->write->prev) { #endif ngx_delete_posted_event(c->write); } c->read->closed = 1; c->write->closed = 1; ngx_reusable_connection(zc->connection_ptr, 0); ngx_free_connection(zc->connection_ptr); c->fd = (ngx_socket_t) -1; zc->connection_ptr->fd = (ngx_socket_t) -1; if (zmq_close(zc->socket) == -1) { ngx_zeromq_log_error(ngx_cycle->log, "zmq_close()"); } zc->socket = NULL; } static void ngx_zeromq_event_handler(ngx_event_t *ev) { ngx_zeromq_connection_t *zc; ngx_connection_t *c; void *zmq; int events; size_t esize; /* * ZeroMQ notifies us about new events in edge-triggered fashion * by changing state of the notification socket to read-ready. * * Write-readiness doesn't indicate anything and can be ignored. */ if (ev->write) { return; } zc = ev->data; zc = zc->send; esize = sizeof(int); #if (NGX_DEBUG) if (zc->recv != zc->send) { zmq = zc->request_sent ? zc->socket : zc->recv->socket; if (zmq_getsockopt(zmq, ZMQ_EVENTS, &events, &esize) == -1) { ngx_zeromq_log_error(ev->log, "zmq_getsockopt(ZMQ_EVENTS)"); ev->error = 1; return; } ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "zmq_event: %s:%d (ignored)", zc->request_sent ? "send" : "recv", events); } #endif zmq = zc->request_sent ? zc->recv->socket : zc->socket; if (zmq_getsockopt(zmq, ZMQ_EVENTS, &events, &esize) == -1) { ngx_zeromq_log_error(ev->log, "zmq_getsockopt(ZMQ_EVENTS)"); ev->error = 1; return; } ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "zmq_event: %s:%d", zc->request_sent ? "recv" : "send", events); c = &zc->connection; if (zc->request_sent) { c->read->ready = events & ZMQ_POLLIN ? 1 : 0; if (c->read->ready) { zc->handler(c->read); } } else { c->write->ready = events & ZMQ_POLLOUT ? 1 : 0; if (c->write->ready) { zc->handler(c->write); } } } static ssize_t ngx_zeromq_sendmsg(void *zmq, ngx_event_t *ev, zmq_msg_t *msg, int flags) { size_t size; size = zmq_msg_size(msg); for (;;) { if (zmq_msg_send(msg, zmq, ZMQ_DONTWAIT|flags) == -1) { if (ngx_errno == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, "zmq_send: not ready"); ev->ready = 0; return NGX_AGAIN; } if (ngx_errno == NGX_EINTR) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, "zmq_send: interrupted"); ev->ready = 0; continue; } ngx_zeromq_log_error(ev->log, "zmq_msg_send()"); ev->error = 1; return NGX_ERROR; } break; } ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "zmq_send: %uz eom:%d", size, flags != ZMQ_SNDMORE); return size; }
static void ngx_http_lua_abort_pending_timers(ngx_event_t *ev) { ngx_int_t i, n; ngx_event_t **events; ngx_connection_t *c, *saved_c = NULL; ngx_rbtree_node_t *cur, *prev, *next, *sentinel, *temp; ngx_http_lua_timer_ctx_t *tctx; ngx_http_lua_main_conf_t *lmcf; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, "lua abort pending timers"); c = ev->data; lmcf = c->data; dd("lua connection fd: %d", (int) c->fd); if (!c->close) { return; } c->read->closed = 1; c->write->closed = 1; /* we temporarily use a valid fd (0) to make ngx_free_connection happy */ c->fd = 0; if (ngx_cycle->files) { saved_c = ngx_cycle->files[0]; } ngx_free_connection(c); c->fd = (ngx_socket_t) -1; if (ngx_cycle->files) { ngx_cycle->files[0] = saved_c; } if (lmcf->pending_timers == 0) { return; } /* expire pending timers immediately */ sentinel = ngx_event_timer_rbtree.sentinel; cur = ngx_event_timer_rbtree.root; /* XXX nginx does not guarentee the parent of root is meaningful, * so we temporarily override it to simplify tree traversal. */ temp = cur->parent; cur->parent = NULL; prev = NULL; events = ngx_pcalloc(ngx_cycle->pool, lmcf->pending_timers * sizeof(ngx_event_t)); if (events == NULL) { return; } n = 0; dd("root: %p, root parent: %p, sentinel: %p", cur, cur->parent, sentinel); while (n < lmcf->pending_timers) { if (cur == sentinel || cur == NULL) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, "lua pending timer counter got out of sync: %i", lmcf->pending_timers); break; } dd("prev: %p, cur: %p, cur parent: %p, cur left: %p, cur right: %p", prev, cur, cur->parent, cur->left, cur->right); if (prev == cur->parent) { /* neither of the children has been accessed yet */ next = cur->left; if (next == sentinel) { ev = (ngx_event_t *) ((char *) cur - offsetof(ngx_event_t, timer)); if (ev->handler == ngx_http_lua_timer_handler) { dd("found node: %p", cur); events[n++] = ev; } next = (cur->right != sentinel) ? cur->right : cur->parent; } } else if (prev == cur->left) { /* just accessed the left child */ ev = (ngx_event_t *) ((char *) cur - offsetof(ngx_event_t, timer)); if (ev->handler == ngx_http_lua_timer_handler) { dd("found node 2: %p", cur); events[n++] = ev; } next = (cur->right != sentinel) ? cur->right : cur->parent; } else if (prev == cur->right) { /* already accessed both children */ next = cur->parent; } else { /* not reacheable */ next = NULL; } prev = cur; cur = next; } /* restore the old tree root's parent */ ngx_event_timer_rbtree.root->parent = temp; ngx_log_debug1(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0, "lua found %i pending timers to be aborted prematurely", n); for (i = 0; i < n; i++) { ev = events[i]; ngx_rbtree_delete(&ngx_event_timer_rbtree, &ev->timer); #if (NGX_DEBUG) ev->timer.left = NULL; ev->timer.right = NULL; ev->timer.parent = NULL; #endif ev->timer_set = 0; ev->timedout = 1; tctx = ev->data; tctx->premature = 1; dd("calling timer handler prematurely"); ev->handler(ev); } #if 0 if (pending_timers) { ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0, "lua pending timer counter got out of sync: %i", pending_timers); } #endif }