예제 #1
0
int
redis_nginx_event_attach(redisAsyncContext *ac)
{
    ngx_connection_t *connection;
    redisContext *c = &(ac->c);

    /* Nothing should be attached when something is already attached */
    if (ac->ev.data != NULL) {
        ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0, "redis_nginx_adapter: context already attached");
        return REDIS_ERR;
    }

    connection = ngx_get_connection(c->fd, ngx_cycle->log);
    if (connection == NULL) {
        ngx_log_error(NGX_LOG_ERR, ngx_cycle->log, 0, "redis_nginx_adapter: could not get a connection for fd #%d", c->fd);
        return REDIS_ERR;
    }


    /* Register functions to start/stop listening for events */
    ac->ev.addRead = redis_nginx_add_read;
    ac->ev.delRead = redis_nginx_del_read;
    ac->ev.addWrite = redis_nginx_add_write;
    ac->ev.delWrite = redis_nginx_del_write;
    ac->ev.cleanup = redis_nginx_cleanup;
    ac->ev.data = connection;
    connection->data = ac;

    return REDIS_OK;
}
예제 #2
0
ngx_connection_t *nchan_create_fake_connection(ngx_pool_t *pool) {
    ngx_log_t               *log;
    ngx_connection_t        *c;
    ngx_connection_t        *saved_c = NULL;

    /* (we temporarily use a valid fd (0) to make ngx_get_connection happy) */
    if (ngx_cycle->files) {
        saved_c = ngx_cycle->files[0];
    }

    c = ngx_get_connection(0, ngx_cycle->log);

    if (ngx_cycle->files) {
        ngx_cycle->files[0] = saved_c;
    }

    if (c == NULL) {
        return NULL;
    }

    c->fd = (ngx_socket_t) -1;
    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

    if (pool) {
        c->pool = pool;
    }
    else {
        c->pool = ngx_create_pool(128, c->log);
        if (c->pool == NULL) {
            goto failed;
        }
    }

    log = ngx_pcalloc(c->pool, sizeof(ngx_log_t));
    if (log == NULL) {
        goto failed;
    }

    c->log = log;
    c->log->connection = c->number;
    c->log->action = NULL;
    c->log->data = NULL;

    c->log_error = NGX_ERROR_INFO;

    c->error = 1;

    return c;

failed:

    nchan_close_fake_connection(c);
    return NULL;
}
예제 #3
0
ngx_int_t ipc_register_worker(ipc_t *ipc, ngx_cycle_t *cycle) {
  int                    i;    
  ngx_connection_t      *c;
  ipc_process_t         *proc;
  
  for(i=0; i< NGX_MAX_PROCESSES; i++) {
    
    proc = &ipc->process[i];
    
    if(!proc->active) continue;
    
    assert(proc->pipe[0] != NGX_INVALID_FILE);
    assert(proc->pipe[1] != NGX_INVALID_FILE);
    
    if(i==ngx_process_slot) {
      //set up read connection
      c = ngx_get_connection(proc->pipe[0], cycle->log);
      c->data = ipc;
      
      c->read->handler = ipc_read_handler;
      c->read->log = cycle->log;
      c->write->handler = NULL;
      
      ngx_add_event(c->read, NGX_READ_EVENT, 0);
      proc->c=c;
    }
    else {
      //set up write connection
      c = ngx_get_connection(proc->pipe[1], cycle->log);
      
      c->data = proc;
      
      c->read->handler = NULL;
      c->write->log = cycle->log;
      c->write->handler = ipc_write_handler;
      
      proc->c=c;
    }
  }
  return NGX_OK;
}
예제 #4
0
ngx_int_t
ngx_add_channel_event(ngx_cycle_t *cycle, ngx_fd_t fd, ngx_int_t event,
    ngx_event_handler_pt handler)
{
    syslog(LOG_INFO, "[%s:%s:%d]\n", __FILE__, __func__, __LINE__);
    ngx_event_t       *ev, *rev, *wev;
    ngx_connection_t  *c;

    c = ngx_get_connection(fd, cycle->log);

    if (c == NULL) {
        return NGX_ERROR;
    }

    c->pool = cycle->pool;

    rev = c->read;
    wev = c->write;

    rev->log = cycle->log;
    wev->log = cycle->log;

#if (NGX_THREADS)
    rev->lock = &c->lock;
    wev->lock = &c->lock;
    rev->own_lock = &c->lock;
    wev->own_lock = &c->lock;
#endif

    rev->channel = 1;
    wev->channel = 1;

    ev = (event == NGX_READ_EVENT) ? rev : wev;

    ev->handler = handler;

    if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
        if (ngx_add_conn(c) == NGX_ERROR) {
            ngx_free_connection(c);
            return NGX_ERROR;
        }

    } else {
        if (ngx_add_event(ev, event, 0) == NGX_ERROR) {
            ngx_free_connection(c);
            return NGX_ERROR;
        }
    }

    return NGX_OK;
}
예제 #5
0
ngx_rpc_notify_t* ngx_rpc_notify_register(ngx_rpc_notify_t **notify_slot, void *ctx)
{

    if( ngx_process_slot < 0 || ngx_process_slot > ngx_last_process)
    {
        ngx_log_error(NGX_LOG_WARN, ngx_cycle->log, 0,
                      "ngx_rpc_notify_register ngx_process_slot:%d not in range [0, %d)",
                      ngx_process_slot, ngx_last_process);
        return NULL;
    }


    ngx_rpc_notify_t* notify = notify_slot[ngx_process_slot];

    notify->ctx = ctx;
    notify->log = ngx_cycle->log;

    notify->notify_conn = ngx_get_connection(notify->event_fd, ngx_cycle->log);

    notify->notify_conn->pool = NULL;
    // sockaddr just keep the current
    notify->notify_conn->sockaddr = (struct sockaddr *)notify;


    // for ngx epoll
    notify->notify_conn->read->handler = ngx_rpc_notify_read_handler;
    notify->notify_conn->read->log     = ngx_cycle->log;
    notify->notify_conn->read->data    = notify->notify_conn;

    notify->notify_conn->write->handler = ngx_rpc_notify_write_handler;
    notify->notify_conn->write->log     = ngx_cycle->log;
    notify->notify_conn->write->data    = notify->notify_conn;

    notify->read_hanlder = ngx_rpc_notify_default_hanlder;
    notify->write_hanlder = ngx_rpc_notify_default_hanlder;


    if( ngx_add_conn(notify->notify_conn) != NGX_OK)
    {
        ngx_log_error(NGX_LOG_WARN, ngx_cycle->log, 0,
                      "ngx_rpc_notify_register failed, notify:%p eventfd:%d slot:%d",
                      notify, notify->event_fd, ngx_process_slot);
        return NULL;
    }

    ngx_log_error(NGX_LOG_INFO, ngx_cycle->log, 0,
                  "ngx_rpc_notify_register notify:%p eventfd:%d slot:%d",
                  notify, notify->event_fd, ngx_process_slot);

    return notify;
}
예제 #6
0
ngx_int_t
ngx_add_channel_event(ngx_cycle_t *cycle, ngx_fd_t fd, ngx_int_t event, ngx_event_handler_pt handler)
{
    ngx_event_t       *ev, *rev, *wev;
    ngx_connection_t  *c;

    c = ngx_get_connection(fd, cycle->log);

    if (c == NULL) 
	{
        return NGX_ERROR;
    }

    c->pool = cycle->pool;

    rev = c->read;
    wev = c->write;

    rev->log = cycle->log;
    wev->log = cycle->log;

    rev->channel = 1;
    wev->channel = 1;

    ev = (event == NGX_READ_EVENT) ? rev : wev;

    ev->handler = handler;

    if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0)
	{
        if (ngx_add_conn(c) == NGX_ERROR) 
		{
            ngx_free_connection(c);
            return NGX_ERROR;
        }

    } 
	else
	{
        if (ngx_add_event(ev, event, 0) == NGX_ERROR)
		{
            ngx_free_connection(c);
            return NGX_ERROR;
        }
    }

    return NGX_OK;
}
예제 #7
0
static  ngx_int_t ngx_http_clojure_jvm_worker_pipe_init(ngx_log_t *log) {
	ngx_connection_t *c;
	ngx_int_t rc;

	rc = ngx_http_clojure_pipe(nc_jvm_worker_pipe_fds);
	if (rc != 0) {
		ngx_log_error(NGX_LOG_ERR, log, 0, "ngx clojure: create worker_pipe failed");
		return NGX_ERROR;
	}
	if (ngx_nonblocking(nc_jvm_worker_pipe_fds[0]) == -1) {
		ngx_log_error(NGX_LOG_ERR, log, 0, "ngx clojure create worker_pipe at ngx_nonblocking(fds[0]) failed");
		return NGX_ERROR;
	}
	if (ngx_nonblocking(nc_jvm_worker_pipe_fds[1]) == -1) {
		ngx_log_error(NGX_LOG_ERR, log, 0, "ngx clojure: create worker_pipe at ngx_nonblocking(fds[1]) failed");
		return NGX_ERROR;
	}

	ngx_log_error(NGX_LOG_NOTICE, log, 0, "ngx clojure: init pipe for jvm worker, fds: %d, %d", nc_jvm_worker_pipe_fds[0], nc_jvm_worker_pipe_fds[1]);

	c = ngx_get_connection(nc_jvm_worker_pipe_fds[0], log);
	if (c == NULL) {
		ngx_http_clojure_pipe_close(nc_jvm_worker_pipe_fds[1]);
		ngx_http_clojure_pipe_close(nc_jvm_worker_pipe_fds[0]);
		ngx_log_error(NGX_LOG_ERR, log, 0, "ngx clojure: create worker_pipe at ngx_get_connection failed");
		return NGX_ERROR;
	}

	c->recv = ngx_recv;
	c->send = ngx_send;
	c->recv_chain = ngx_recv_chain;
	c->send_chain = ngx_send_chain;
	c->log = log;
	c->read->log = log;
	c->write->log = log;
	c->data = NULL;
	c->read->handler = ngx_http_clojure_jvm_worker_result_handler;

	rc = ngx_handle_read_event(c->read, 0);
	if (rc != NGX_OK) {
		ngx_log_error(NGX_LOG_ERR, log, 0, "ngx clojure: create worker_pipe at ngx_handle_read_event failed");
	}
	return rc;
}
예제 #8
0
ngx_int_t
ngx_event_connect_peer(ngx_peer_connection_t *pc)
{
    int                rc, type;
#if (NGX_HAVE_IP_BIND_ADDRESS_NO_PORT || NGX_LINUX)
    in_port_t          port;
#endif
    ngx_int_t          event;
    ngx_err_t          err;
    ngx_uint_t         level;
    ngx_socket_t       s;
    ngx_event_t       *rev, *wev;
    ngx_connection_t  *c;

    rc = pc->get(pc, pc->data);
    if (rc != NGX_OK) {
        return rc;
    }

    type = (pc->type ? pc->type : SOCK_STREAM);

    s = ngx_socket(pc->sockaddr->sa_family, type, 0);

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, pc->log, 0, "%s socket %d",
                   (type == SOCK_STREAM) ? "stream" : "dgram", s);

    if (s == (ngx_socket_t) -1) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_socket_n " failed");
        return NGX_ERROR;
    }


    c = ngx_get_connection(s, pc->log);

    if (c == NULL) {
        if (ngx_close_socket(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          ngx_close_socket_n "failed");
        }

        return NGX_ERROR;
    }

    c->type = type;

    if (pc->rcvbuf) {
        if (setsockopt(s, SOL_SOCKET, SO_RCVBUF,
                       (const void *) &pc->rcvbuf, sizeof(int)) == -1)
        {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          "setsockopt(SO_RCVBUF) failed");
            goto failed;
        }
    }

    if (ngx_nonblocking(s) == -1) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_nonblocking_n " failed");

        goto failed;
    }

    if (pc->local) {

#if (NGX_HAVE_TRANSPARENT_PROXY)
        if (pc->transparent) {
            if (ngx_event_connect_set_transparent(pc, s) != NGX_OK) {
                goto failed;
            }
        }
#endif

#if (NGX_HAVE_IP_BIND_ADDRESS_NO_PORT || NGX_LINUX)
        port = ngx_inet_get_port(pc->local->sockaddr);
#endif

#if (NGX_HAVE_IP_BIND_ADDRESS_NO_PORT)

        if (pc->sockaddr->sa_family != AF_UNIX && port == 0) {
            static int  bind_address_no_port = 1;

            if (bind_address_no_port) {
                if (setsockopt(s, IPPROTO_IP, IP_BIND_ADDRESS_NO_PORT,
                               (const void *) &bind_address_no_port,
                               sizeof(int)) == -1)
                {
                    err = ngx_socket_errno;

                    if (err != NGX_EOPNOTSUPP && err != NGX_ENOPROTOOPT) {
                        ngx_log_error(NGX_LOG_ALERT, pc->log, err,
                                      "setsockopt(IP_BIND_ADDRESS_NO_PORT) "
                                      "failed, ignored");

                    } else {
                        bind_address_no_port = 0;
                    }
                }
            }
        }

#endif

#if (NGX_LINUX)

        if (pc->type == SOCK_DGRAM && port != 0) {
            int  reuse_addr = 1;

            if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR,
                           (const void *) &reuse_addr, sizeof(int))
                 == -1)
            {
                ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                              "setsockopt(SO_REUSEADDR) failed");
                goto failed;
            }
        }

#endif

        if (bind(s, pc->local->sockaddr, pc->local->socklen) == -1) {
            ngx_log_error(NGX_LOG_CRIT, pc->log, ngx_socket_errno,
                          "bind(%V) failed", &pc->local->name);

            goto failed;
        }
    }

    if (type == SOCK_STREAM) {
        c->recv = ngx_recv;
        c->send = ngx_send;
        c->recv_chain = ngx_recv_chain;
        c->send_chain = ngx_send_chain;

        c->sendfile = 1;

        if (pc->sockaddr->sa_family == AF_UNIX) {
            c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
            c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;

#if (NGX_SOLARIS)
            /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
            c->sendfile = 0;
#endif
        }

    } else { /* type == SOCK_DGRAM */
        c->recv = ngx_udp_recv;
        c->send = ngx_send;
        c->send_chain = ngx_udp_send_chain;
    }

    c->log_error = pc->log_error;

    rev = c->read;
    wev = c->write;

    rev->log = pc->log;
    wev->log = pc->log;

    pc->connection = c;

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

    if (ngx_add_conn) {
        if (ngx_add_conn(c) == NGX_ERROR) {
            goto failed;
        }
    }

    ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0,
                   "connect to %V, fd:%d #%uA", pc->name, s, c->number);

    rc = connect(s, pc->sockaddr, pc->socklen);

    if (rc == -1) {
        err = ngx_socket_errno;


        if (err != NGX_EINPROGRESS
#if (NGX_WIN32)
            /* Winsock returns WSAEWOULDBLOCK (NGX_EAGAIN) */
            && err != NGX_EAGAIN
#endif
            )
        {
            if (err == NGX_ECONNREFUSED
#if (NGX_LINUX)
                /*
                 * Linux returns EAGAIN instead of ECONNREFUSED
                 * for unix sockets if listen queue is full
                 */
                || err == NGX_EAGAIN
#endif
                || err == NGX_ECONNRESET
                || err == NGX_ENETDOWN
                || err == NGX_ENETUNREACH
                || err == NGX_EHOSTDOWN
                || err == NGX_EHOSTUNREACH)
            {
                level = NGX_LOG_ERR;

            } else {
                level = NGX_LOG_CRIT;
            }

            ngx_log_error(level, c->log, err, "connect() to %V failed",
                          pc->name);

            ngx_close_connection(c);
            pc->connection = NULL;

            return NGX_DECLINED;
        }
    }

    if (ngx_add_conn) {
        if (rc == -1) {

            /* NGX_EINPROGRESS */

            return NGX_AGAIN;
        }

        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected");

        wev->ready = 1;

        return NGX_OK;
    }

    if (ngx_event_flags & NGX_USE_IOCP_EVENT) {

        ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, ngx_socket_errno,
                       "connect(): %d", rc);

        if (ngx_blocking(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          ngx_blocking_n " failed");
            goto failed;
        }

        /*
         * FreeBSD's aio allows to post an operation on non-connected socket.
         * NT does not support it.
         *
         * TODO: check in Win32, etc. As workaround we can use NGX_ONESHOT_EVENT
         */

        rev->ready = 1;
        wev->ready = 1;

        return NGX_OK;
    }

    if (ngx_event_flags & NGX_USE_CLEAR_EVENT) {

        /* kqueue */

        event = NGX_CLEAR_EVENT;

    } else {

        /* select, poll, /dev/poll */

        event = NGX_LEVEL_EVENT;
    }

    if (ngx_add_event(rev, NGX_READ_EVENT, event) != NGX_OK) {
        goto failed;
    }

    if (rc == -1) {

        /* NGX_EINPROGRESS */

        if (ngx_add_event(wev, NGX_WRITE_EVENT, event) != NGX_OK) {
            goto failed;
        }

        return NGX_AGAIN;
    }

    ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected");

    wev->ready = 1;

    return NGX_OK;

failed:

    ngx_close_connection(c);
    pc->connection = NULL;

    return NGX_ERROR;
}
예제 #9
0
void
ngx_event_accept(ngx_event_t *ev)
{
    socklen_t          socklen;
    ngx_err_t          err;
    ngx_log_t         *log;
    ngx_socket_t       s;
    ngx_event_t       *rev, *wev;
    ngx_listening_t   *ls;
    ngx_connection_t  *c, *lc;
    ngx_event_conf_t  *ecf;
    u_char             sa[NGX_SOCKADDRLEN];

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
        ev->available = 1;

    } else if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) {
        ev->available = ecf->multi_accept;
    }

    lc = ev->data;
    ls = lc->listening;
    ev->ready = 0;

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "accept on %V, ready: %d", &ls->addr_text, ev->available);

    do {
        socklen = NGX_SOCKADDRLEN;

        s = accept(lc->fd, (struct sockaddr *) sa, &socklen);

        if (s == -1) {
            err = ngx_socket_errno;

            if (err == NGX_EAGAIN) {
                ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err,
                               "accept() not ready");
                return;
            }

            ngx_log_error((ngx_uint_t) ((err == NGX_ECONNABORTED) ?
                                             NGX_LOG_ERR : NGX_LOG_ALERT),
                          ev->log, err, "accept() failed");

            if (err == NGX_ECONNABORTED) {
                if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
                    ev->available--;
                }

                if (ev->available) {
                    continue;
                }
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif

        ngx_accept_disabled = ngx_cycle->connection_n / 8
                              - ngx_cycle->free_connection_n;

        c = ngx_get_connection(s, ev->log);

        if (c == NULL) {
            if (ngx_close_socket(s) == -1) {
                ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                              ngx_close_socket_n " failed");
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

        c->pool = ngx_create_pool(ls->pool_size, ev->log);
        if (c->pool == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->sockaddr = ngx_palloc(c->pool, socklen);
        if (c->sockaddr == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        ngx_memcpy(c->sockaddr, sa, socklen);

        log = ngx_palloc(c->pool, sizeof(ngx_log_t));
        if (log == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        /* set a blocking mode for aio and non-blocking mode for others */

        if (ngx_inherited_nonblocking) {
            if (ngx_event_flags & NGX_USE_AIO_EVENT) {
                if (ngx_blocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_blocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }

        } else {
            if (!(ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT))) {
                if (ngx_nonblocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_nonblocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }
        }

        *log = ls->log;

        c->recv = ngx_recv;
        c->send = ngx_send;
        c->recv_chain = ngx_recv_chain;
        c->send_chain = ngx_send_chain;

        c->log = log;
        c->pool->log = log;

        c->socklen = socklen;
        c->listening = ls;
        c->local_sockaddr = ls->sockaddr;

        c->unexpected_eof = 1;

#if (NGX_HAVE_UNIX_DOMAIN)
        if (c->sockaddr->sa_family == AF_UNIX) {
            c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
            c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;
#if (NGX_SOLARIS)
            /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
            c->sendfile = 0;
#endif
        }
#endif

        rev = c->read;
        wev = c->write;

        wev->ready = 1;

        if (ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT)) {
            /* rtsig, aio, iocp */
            rev->ready = 1;
        }

        if (ev->deferred_accept) {
            rev->ready = 1;
#if (NGX_HAVE_KQUEUE)
            rev->available = 1;
#endif
        }

        rev->log = log;
        wev->log = log;

        /*
         * TODO: MT: - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         *
         * TODO: MP: - allocated in a shared memory
         *           - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         */

        c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

#if (NGX_THREADS)
        rev->lock = &c->lock;
        wev->lock = &c->lock;
        rev->own_lock = &c->lock;
        wev->own_lock = &c->lock;
#endif

        if (ls->addr_ntop) {
            c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len);
            if (c->addr_text.data == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->addr_text.data,
                                             ls->addr_text_max_len, 0);
            if (c->addr_text.len == 0) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

#if (NGX_DEBUG)
        {

        in_addr_t            i;
        ngx_event_debug_t   *dc;
        struct sockaddr_in  *sin;

        sin = (struct sockaddr_in *) sa;
        dc = ecf->debug_connection.elts;
        for (i = 0; i < ecf->debug_connection.nelts; i++) {
            if ((sin->sin_addr.s_addr & dc[i].mask) == dc[i].addr) {
                log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL;
                break;
            }
        }

        }
#endif

        ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0,
                       "*%d accept: %V fd:%d", c->number, &c->addr_text, s);

        if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
            if (ngx_add_conn(c) == NGX_ERROR) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

        log->data = NULL;
        log->handler = NULL;

        ls->handler(c);

        if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
            ev->available--;
        }

    } while (ev->available);
}
예제 #10
0
static void
ngx_http_lua_timer_handler(ngx_event_t *ev)
{
    int                      n;
    lua_State               *L;
    ngx_int_t                rc;
    ngx_log_t               *log;
    ngx_connection_t        *c = NULL, *saved_c = NULL;
    ngx_http_request_t      *r = NULL;
    ngx_http_lua_ctx_t      *ctx;
    ngx_http_cleanup_t      *cln;
    ngx_http_log_ctx_t      *logctx;

    ngx_http_lua_timer_ctx_t         tctx;
    ngx_http_lua_main_conf_t        *lmcf;
    ngx_http_core_loc_conf_t        *clcf;

    ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
                   "lua ngx.timer expired");

    ngx_memcpy(&tctx, ev->data, sizeof(ngx_http_lua_timer_ctx_t));
    ngx_free(ev);
    ev = NULL;

    lmcf = tctx.lmcf;

    lmcf->pending_timers--;

    if (lmcf->running_timers >= lmcf->max_running_timers) {
        ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0,
                      "%i lua_max_running_timers are not enough",
                      lmcf->max_running_timers);
        goto abort;
    }

    /* create the fake connection (we temporarily use a valid fd (0) to make
       ngx_get_connection happy) */

    if (ngx_cycle->files) {
        saved_c = ngx_cycle->files[0];
    }

    c = ngx_get_connection(0, ngx_cycle->log);

    if (ngx_cycle->files) {
        ngx_cycle->files[0] = saved_c;
    }

    if (c == NULL) {
        goto abort;
    }

    c->fd = -1;

    c->pool = ngx_create_pool(NGX_CYCLE_POOL_SIZE, c->log);
    if (c->pool == NULL) {
        goto abort;
    }

    log = ngx_pcalloc(c->pool, sizeof(ngx_log_t));
    if (log == NULL) {
        goto abort;
    }

    logctx = ngx_palloc(c->pool, sizeof(ngx_http_log_ctx_t));
    if (logctx == NULL) {
        goto abort;
    }

    dd("c pool allocated: %d", (int) (sizeof(ngx_log_t)
                                      + sizeof(ngx_http_log_ctx_t) + sizeof(ngx_http_request_t)));

    logctx->connection = c;
    logctx->request = NULL;
    logctx->current_request = NULL;

    c->log = log;
    c->log->connection = c->number;
    c->log->handler = ngx_http_lua_log_timer_error;
    c->log->data = logctx;
    c->log->action = NULL;

    c->log_error = NGX_ERROR_INFO;

#if 0
    c->buffer = ngx_create_temp_buf(c->pool, 2);
    if (c->buffer == NULL) {
        goto abort;
    }

    c->buffer->start[0] = CR;
    c->buffer->start[1] = LF;
#endif

    /* create the fake request */

    r = ngx_pcalloc(c->pool, sizeof(ngx_http_request_t));
    if (r == NULL) {
        goto abort;
    }

    c->requests++;
    logctx->request = r;
    logctx->current_request = r;

    r->pool = ngx_create_pool(NGX_CYCLE_POOL_SIZE, c->log);
    if (r->pool == NULL) {
        goto abort;
    }

    dd("r pool allocated: %d", (int) (sizeof(ngx_http_lua_ctx_t)
                                      + sizeof(void *) * ngx_http_max_module + sizeof(ngx_http_cleanup_t)));

#if 0
    hc = ngx_pcalloc(c->pool, sizeof(ngx_http_connection_t));
    if (hc == NULL) {
        goto abort;
    }

    r->header_in = c->buffer;
    r->header_end = c->buffer->start;

    if (ngx_list_init(&r->headers_out.headers, r->pool, 0,
                      sizeof(ngx_table_elt_t))
            != NGX_OK)
    {
        goto abort;
    }

    if (ngx_list_init(&r->headers_in.headers, r->pool, 0,
                      sizeof(ngx_table_elt_t))
            != NGX_OK)
    {
        goto abort;
    }
#endif

    r->ctx = ngx_pcalloc(r->pool, sizeof(void *) * ngx_http_max_module);
    if (r->ctx == NULL) {
        goto abort;
    }

#if 0
    cmcf = ngx_http_get_module_main_conf(r, ngx_http_core_module);

    r->variables = ngx_pcalloc(r->pool, cmcf->variables.nelts
                               * sizeof(ngx_http_variable_value_t));
    if (r->variables == NULL) {
        goto abort;
    }
#endif

    ctx = ngx_http_lua_create_ctx(r);
    if (ctx == NULL) {
        goto abort;
    }

    r->headers_in.content_length_n = 0;
    c->data = r;
#if 0
    hc->request = r;
    r->http_connection = hc;
#endif
    r->signature = NGX_HTTP_MODULE;
    r->connection = c;
    r->main = r;
    r->count = 1;

    r->method = NGX_HTTP_UNKNOWN;

    r->headers_in.keep_alive_n = -1;
    r->uri_changes = NGX_HTTP_MAX_URI_CHANGES + 1;
    r->subrequests = NGX_HTTP_MAX_SUBREQUESTS + 1;

    r->http_state = NGX_HTTP_PROCESS_REQUEST_STATE;
    r->discard_body = 1;

    r->main_conf = tctx.main_conf;
    r->srv_conf = tctx.srv_conf;
    r->loc_conf = tctx.loc_conf;

    clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
    c->log->file = clcf->error_log->file;
    if (!(c->log->log_level & NGX_LOG_DEBUG_CONNECTION)) {
        c->log->log_level = clcf->error_log->log_level;
    }

    c->error = 1;

    ctx->cur_co_ctx = &ctx->entry_co_ctx;

    L = lmcf->lua;

    cln = ngx_http_cleanup_add(r, 0);
    if (cln == NULL) {
        goto abort;
    }

    cln->handler = ngx_http_lua_request_cleanup;
    cln->data = r;
    ctx->cleanup = &cln->handler;

    ctx->entered_content_phase = 1;
    ctx->context = NGX_HTTP_LUA_CONTEXT_TIMER;

    r->read_event_handler = ngx_http_block_reading;

    ctx->cur_co_ctx->co_ref = tctx.co_ref;
    ctx->cur_co_ctx->co = tctx.co;
    ctx->cur_co_ctx->co_status = NGX_HTTP_LUA_CO_RUNNING;

    dd("r connection: %p, log %p", r->connection, r->connection->log);

    /*  save the request in coroutine globals table */
    ngx_http_lua_set_req(tctx.co, r);

    lmcf->running_timers++;

    lua_pushboolean(tctx.co, tctx.premature);

    n = lua_gettop(tctx.co);
    if (n > 2) {
        lua_insert(tctx.co, 2);
    }

    rc = ngx_http_lua_run_thread(L, r, ctx, n - 1);

    dd("timer lua run thread: %d", (int) rc);

    if (rc == NGX_ERROR || rc >= NGX_OK) {
        /* do nothing */

    } else if (rc == NGX_AGAIN) {
        rc = ngx_http_lua_content_run_posted_threads(L, r, ctx, 0);

    } else if (rc == NGX_DONE) {
        rc = ngx_http_lua_content_run_posted_threads(L, r, ctx, 1);
    } else {
        rc = NGX_OK;
    }

    ngx_http_lua_finalize_request(r, rc);
    return;

abort:
    if (tctx.co_ref && tctx.co) {
        lua_pushlightuserdata(tctx.co, &ngx_http_lua_coroutines_key);
        lua_rawget(tctx.co, LUA_REGISTRYINDEX);
        luaL_unref(tctx.co, -1, tctx.co_ref);
        lua_settop(tctx.co, 0);
    }

    if (r && r->pool) {
        ngx_destroy_pool(r->pool);
    }

    if (c) {
        ngx_http_lua_close_fake_connection(c);
    }
}
예제 #11
0
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
    ngx_uint_t           m, i;
    ngx_event_t         *rev, *wev;
    ngx_listening_t     *ls;
    ngx_connection_t    *c, *next, *old;
    ngx_core_conf_t     *ccf;
    ngx_event_conf_t    *ecf;
    ngx_event_module_t  *module;

    ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
    ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);

    if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) {
        ngx_use_accept_mutex = 1;
        ngx_accept_mutex_held = 0;
        ngx_accept_mutex_delay = ecf->accept_mutex_delay;

    } else {
        ngx_use_accept_mutex = 0;
    }

#if (NGX_WIN32)

    /*
     * disable accept mutex on win32 as it may cause deadlock if
     * grabbed by a process which can't accept connections
     */

    ngx_use_accept_mutex = 0;

#endif

#if (NGX_THREADS)
    ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0);
    if (ngx_posted_events_mutex == NULL) {
        return NGX_ERROR;
    }
#endif

    if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
        return NGX_ERROR;
    }

    for (m = 0; ngx_modules[m]; m++) {
        if (ngx_modules[m]->type != NGX_EVENT_MODULE) {
            continue;
        }

        if (ngx_modules[m]->ctx_index != ecf->use) {
            continue;
        }

        module = ngx_modules[m]->ctx;

        if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) {
            /* fatal */
            exit(2);
        }

        break;
    }

#if !(NGX_WIN32)

    if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) {
        struct sigaction  sa;
        struct itimerval  itv;

        ngx_memzero(&sa, sizeof(struct sigaction));
        sa.sa_handler = ngx_timer_signal_handler;
        sigemptyset(&sa.sa_mask);

        if (sigaction(SIGALRM, &sa, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "sigaction(SIGALRM) failed");
            return NGX_ERROR;
        }

        itv.it_interval.tv_sec = ngx_timer_resolution / 1000;
        itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000;
        itv.it_value.tv_sec = ngx_timer_resolution / 1000;
        itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000;

        if (setitimer(ITIMER_REAL, &itv, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "setitimer() failed");
        }
    }

    if (ngx_event_flags & NGX_USE_FD_EVENT) {
        struct rlimit  rlmt;

        if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "getrlimit(RLIMIT_NOFILE) failed");
            return NGX_ERROR;
        }

        cycle->files_n = (ngx_uint_t) rlmt.rlim_cur;

        cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n,
                                  cycle->log);
        if (cycle->files == NULL) {
            return NGX_ERROR;
        }
    }

#endif

    cycle->connections =
        ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log);
    if (cycle->connections == NULL) {
        return NGX_ERROR;
    }

    c = cycle->connections;

    cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                   cycle->log);
    if (cycle->read_events == NULL) {
        return NGX_ERROR;
    }

    rev = cycle->read_events;
    for (i = 0; i < cycle->connection_n; i++) {
        rev[i].closed = 1;
        rev[i].instance = 1;
#if (NGX_THREADS)
        rev[i].lock = &c[i].lock;
        rev[i].own_lock = &c[i].lock;
#endif
    }

    cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                    cycle->log);
    if (cycle->write_events == NULL) {
        return NGX_ERROR;
    }

    wev = cycle->write_events;
    for (i = 0; i < cycle->connection_n; i++) {
        wev[i].closed = 1;
#if (NGX_THREADS)
        wev[i].lock = &c[i].lock;
        wev[i].own_lock = &c[i].lock;
#endif
    }

    i = cycle->connection_n;
    next = NULL;

    do {
        i--;

        c[i].data = next;
        c[i].read = &cycle->read_events[i];
        c[i].write = &cycle->write_events[i];
        c[i].fd = (ngx_socket_t) -1;

        next = &c[i];

#if (NGX_THREADS)
        c[i].lock = 0;
#endif
    } while (i);

    cycle->free_connections = next;
    cycle->free_connection_n = cycle->connection_n;

    /* for each listening socket */

    ls = cycle->listening.elts;
    for (i = 0; i < cycle->listening.nelts; i++) {

        c = ngx_get_connection(ls[i].fd, cycle->log);

        if (c == NULL) {
            return NGX_ERROR;
        }

        c->log = &ls[i].log;

        c->listening = &ls[i];
        ls[i].connection = c;

        rev = c->read;

        rev->log = c->log;
        rev->accept = 1;

#if (NGX_HAVE_DEFERRED_ACCEPT)
        rev->deferred_accept = ls[i].deferred_accept;
#endif

        if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
            if (ls[i].previous) {

                /*
                 * delete the old accept events that were bound to
                 * the old cycle read events array
                 */

                old = ls[i].previous->connection;

                if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT)
                    == NGX_ERROR)
                {
                    return NGX_ERROR;
                }

                old->fd = (ngx_socket_t) -1;
            }
        }

#if (NGX_WIN32)

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            ngx_iocp_conf_t  *iocpcf;

            rev->handler = ngx_event_acceptex;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) {
                return NGX_ERROR;
            }

            ls[i].log.handler = ngx_acceptex_log_error;

            iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module);
            if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex)
                == NGX_ERROR)
            {
                return NGX_ERROR;
            }

        } else {
            rev->handler = ngx_event_accept;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

#else

        rev->handler = ngx_event_accept;

        if (ngx_use_accept_mutex) {
            continue;
        }

        if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
            if (ngx_add_conn(c) == NGX_ERROR) {
                return NGX_ERROR;
            }

        } else {
            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

#endif

    }

    return NGX_OK;
}
예제 #12
0
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
    ngx_uint_t           m, i;
    ngx_event_t         *rev, *wev;
    ngx_listening_t     *ls;
    ngx_connection_t    *c, *next, *old;
    ngx_core_conf_t     *ccf;
    ngx_event_conf_t    *ecf;
    ngx_event_module_t  *module;

    ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
    ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);

	/* [analy]  ngx_use_accept_mutex表示是否需要通过对accept加锁来解决惊群问题。
					当nginx worker进程数>1时且配置文件中打开accept_mutex时,这个标志置为1 
	*/
    if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) {
        ngx_use_accept_mutex = 1;
        ngx_accept_mutex_held = 0;
        ngx_accept_mutex_delay = ecf->accept_mutex_delay;

    } else {
        ngx_use_accept_mutex = 0;
    }

#if (NGX_THREADS)
    ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0);
    if (ngx_posted_events_mutex == NULL) {
        return NGX_ERROR;
    }
#endif

	/* [analy]	??????????? */
    if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
        return NGX_ERROR;
    }

	/* [analy]	调用事件处理模块(epoll)初始化函数 */
    for (m = 0; ngx_modules[m]; m++) {
        if (ngx_modules[m]->type != NGX_EVENT_MODULE) {
            continue;
        }

        if (ngx_modules[m]->ctx_index != ecf->use) {
            continue;
        }

		/* [analy]
			由于Nginx实现了很多的事件模块,比如:epoll,poll,select, kqueue,aio 
			(这些模块位于src/event/modules目录中)等等,所以Nginx对事件模块进行 
			了一层抽象,方便在不同的系统上使用不同的事件模型,也便于扩展新的事件 
			模型
			此处的init回调,其实就是调用了ngx_epoll_init函数。module->actions结构 
			封装了epoll的所有接口函数。Nginx就是通过actions结构将epoll注册到事件 
			抽象层中。actions的类型是ngx_event_actions_t
		*/
        module = ngx_modules[m]->ctx;

        if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) {
            /* fatal */
            exit(2);
        }

        break;
    }

#if !(NGX_WIN32)

	/* 
	 *	timer_resolution指令指定了时间,并且未指定NGX_USE_TIMER_EVENT标记时, 
	 *			根据 timer_resolution 指令指定的时间设置一个定时器
	 */
    if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) {
        struct sigaction  sa;
        struct itimerval  itv;

        ngx_memzero(&sa, sizeof(struct sigaction));
        sa.sa_handler = ngx_timer_signal_handler;
        sigemptyset(&sa.sa_mask);

        if (sigaction(SIGALRM, &sa, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "sigaction(SIGALRM) failed");
            return NGX_ERROR;
        }

        itv.it_interval.tv_sec = ngx_timer_resolution / 1000;
        itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000;
        itv.it_value.tv_sec = ngx_timer_resolution / 1000;
        itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000;

        if (setitimer(ITIMER_REAL, &itv, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "setitimer() failed");
        }
    }

    if (ngx_event_flags & NGX_USE_FD_EVENT) {					/* [analy]	epoll模块不使用 */
        struct rlimit  rlmt;	

        if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "getrlimit(RLIMIT_NOFILE) failed");
            return NGX_ERROR;
        }

        cycle->files_n = (ngx_uint_t) rlmt.rlim_cur;

        cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n,
                                  cycle->log);
        if (cycle->files == NULL) {
            return NGX_ERROR;
        }
    }

#endif

	/* [analy]	为连接池申请空间,由于此处在worker进程初始化时进行的,所以
					每个worker都会拥有一个自己的connections连接池	
					根据配置worker_connections指令指定的个数申请 */
    cycle->connections =
        ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log);
    if (cycle->connections == NULL) {
        return NGX_ERROR;
    }

    c = cycle->connections;
	
	/* [analy]	为读事件队列申请空间 */
    cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                   cycle->log);
    if (cycle->read_events == NULL) {
        return NGX_ERROR;
    }

    rev = cycle->read_events;
    for (i = 0; i < cycle->connection_n; i++) {
        rev[i].closed = 1;
        rev[i].instance = 1;
#if (NGX_THREADS)
        rev[i].lock = &c[i].lock;
        rev[i].own_lock = &c[i].lock;
#endif
    }

	/* [analy]	为写事件队列申请空间 */
    cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                    cycle->log);
    if (cycle->write_events == NULL) {
        return NGX_ERROR;
    }

    wev = cycle->write_events;
    for (i = 0; i < cycle->connection_n; i++) {
        wev[i].closed = 1;
#if (NGX_THREADS)
        wev[i].lock = &c[i].lock;
        wev[i].own_lock = &c[i].lock;
#endif
    }

	/* [analy]	初始化connections数组
					data字段指向下一个元素
					read事件指针指向read_events对应下标的元素
					write事件指针指向write_events对应下标的元素
					fd初始化-1
	*/

    i = cycle->connection_n;
    next = NULL;

    do {
        i--;

        c[i].data = next;
        c[i].read = &cycle->read_events[i];			/* [analy]	将连接池中connections的read事件与read_events数组中的对应下标的元素关联 */
        c[i].write = &cycle->write_events[i];		/* [analy]	将连接池中connections的write事件与write_events数组中对应下标元素关联 */
        c[i].fd = (ngx_socket_t) -1;

        next = &c[i];

#if (NGX_THREADS)
        c[i].lock = 0;
#endif
    } while (i);

	/* 初始化free_connections空闲连接池和空闲连接个数;指向connections连接池首地址 */
    cycle->free_connections = next;
    cycle->free_connection_n = cycle->connection_n;

	/* [analy]	为每一个套接口分配一个空闲的连接 */
    /* for each listening socket */
    ls = cycle->listening.elts;
    for (i = 0; i < cycle->listening.nelts; i++) {

        c = ngx_get_connection(ls[i].fd, cycle->log);

        if (c == NULL) {
            return NGX_ERROR;
        }

        c->log = &ls[i].log;

        c->listening = &ls[i];					/* [analy]	connection的listening指针指向cycle->listening[n] */	
        ls[i].connection = c;					/* [analy]	cycle->listening[n]->connection指针指向了申请的空闲connection */	
			
        rev = c->read;

        rev->log = c->log;
        rev->accept = 1;						//	设置监听套接字标识

#if (NGX_HAVE_DEFERRED_ACCEPT)
        rev->deferred_accept = ls[i].deferred_accept;
#endif

        if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
            if (ls[i].previous) {

                /*
                 * delete the old accept events that were bound to
                 * the old cycle read events array
                 */

                old = ls[i].previous->connection;

                if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT)
                    == NGX_ERROR)
                {
                    return NGX_ERROR;
                }

                old->fd = (ngx_socket_t) -1;
            }
        }

#if (NGX_WIN32)

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            ngx_iocp_conf_t  *iocpcf;

            rev->handler = ngx_event_acceptex;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) {
                return NGX_ERROR;
            }

            ls[i].log.handler = ngx_acceptex_log_error;

            iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module);
            if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex)
                == NGX_ERROR)
            {
                return NGX_ERROR;
            }

        } else {
            rev->handler = ngx_event_accept;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

#else

        rev->handler = ngx_event_accept;			/* [analy]	设置accpet回调处理函数 */	

        if (ngx_use_accept_mutex) {
            continue;
        }

        if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
            if (ngx_add_conn(c) == NGX_ERROR) {
                return NGX_ERROR;
            }

        } else {
            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {			/* [analy]	将event送进epoll队列中 */
                return NGX_ERROR;
            }
        }

#endif

    }

    return NGX_OK;
}
예제 #13
0
// fork之后,worker进程初始化时调用,即每个worker里都会执行
// 初始化两个延后处理的事件队列,初始化定时器红黑树
// 发送定时信号,更新时间用
// 初始化cycle里的连接和事件数组
// 设置接受连接的回调函数为ngx_event_accept,可以接受连接
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
    ngx_uint_t           m, i;
    ngx_event_t         *rev, *wev;
    ngx_listening_t     *ls;
    ngx_connection_t    *c, *next, *old;
    ngx_core_conf_t     *ccf;
    ngx_event_conf_t    *ecf;
    ngx_event_module_t  *module;

    // core模块的配置结构体
    ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);

    // event_core模块的配置结构体
    ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);

    // 使用master/worker多进程,使用负载均衡
    if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) {

        // 设置全局变量
        // 使用负载均衡,刚开始未持有锁,设置抢锁的等待时间
        ngx_use_accept_mutex = 1;
        ngx_accept_mutex_held = 0;
        ngx_accept_mutex_delay = ecf->accept_mutex_delay;

    } else {
        // 单进程、未明确指定负载均衡,就不使用负载均衡
        ngx_use_accept_mutex = 0;
    }

#if (NGX_WIN32)

    /*
     * disable accept mutex on win32 as it may cause deadlock if
     * grabbed by a process which can't accept connections
     */

    ngx_use_accept_mutex = 0;

#endif

    // 初始化两个延后处理的事件队列
    ngx_queue_init(&ngx_posted_accept_events);
    ngx_queue_init(&ngx_posted_events);

    // 初始化定时器红黑树
    if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
        return NGX_ERROR;
    }

    // 遍历事件模块,但只执行实际使用的事件模块对应初始化函数
    for (m = 0; cycle->modules[m]; m++) {
        if (cycle->modules[m]->type != NGX_EVENT_MODULE) {
            continue;
        }

        // 找到use指令使用的事件模型,或者是默认事件模型
        if (cycle->modules[m]->ctx_index != ecf->use) {
            continue;
        }

        module = cycle->modules[m]->ctx;

        // 调用事件模块的事件初始化函数
        //
        // 调用epoll_create初始化epoll机制
        // 参数size=cycle->connection_n / 2,但并无实际意义
        // 设置全局变量,操作系统提供的底层数据收发接口
        // 初始化全局的事件模块访问接口,指向epoll的函数
        // 默认使用et模式,边缘触发,高速
        if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) {
            /* fatal */
            exit(2);
        }

        break;
    }

// unix代码, 发送定时信号,更新时间用
#if !(NGX_WIN32)

    // NGX_USE_TIMER_EVENT标志量只有eventport/kqueue,epoll无此标志位
    // ngx_timer_resolution = ccf->timer_resolution;默认值是0
    // 所以只有使用了timer_resolution指令才会发信号
    if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) {
        struct sigaction  sa;
        struct itimerval  itv;

        // 设置信号掩码,sigalarm
        ngx_memzero(&sa, sizeof(struct sigaction));
        sa.sa_handler = ngx_timer_signal_handler;
        sigemptyset(&sa.sa_mask);

        if (sigaction(SIGALRM, &sa, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "sigaction(SIGALRM) failed");
            return NGX_ERROR;
        }

        // 设置信号发送的时间间隔,也就是nginx的时间精度
        // 收到信号会设置设置ngx_event_timer_alarm变量
        // 在epoll的ngx_epoll_process_events里检查,更新时间的标志
        itv.it_interval.tv_sec = ngx_timer_resolution / 1000;
        itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000;
        itv.it_value.tv_sec = ngx_timer_resolution / 1000;
        itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000;

        if (setitimer(ITIMER_REAL, &itv, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "setitimer() failed");
        }
    }

    if (ngx_event_flags & NGX_USE_FD_EVENT) {
        struct rlimit  rlmt;

        if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "getrlimit(RLIMIT_NOFILE) failed");
            return NGX_ERROR;
        }

        cycle->files_n = (ngx_uint_t) rlmt.rlim_cur;

        cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n,
                                  cycle->log);
        if (cycle->files == NULL) {
            return NGX_ERROR;
        }
    }

#else

    if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) {
        ngx_log_error(NGX_LOG_WARN, cycle->log, 0,
                      "the \"timer_resolution\" directive is not supported "
                      "with the configured event method, ignored");
        ngx_timer_resolution = 0;
    }

#endif

    // 创建连接池数组,大小是cycle->connection_n
    // 直接使用malloc分配内存,没有使用内存池
    cycle->connections =
        ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log);
    if (cycle->connections == NULL) {
        return NGX_ERROR;
    }

    c = cycle->connections;

    // 创建读事件池数组,大小是cycle->connection_n
    cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                   cycle->log);
    if (cycle->read_events == NULL) {
        return NGX_ERROR;
    }

    // 读事件对象初始化
    rev = cycle->read_events;
    for (i = 0; i < cycle->connection_n; i++) {
        rev[i].closed = 1;
        rev[i].instance = 1;
    }

    // 创建写事件池数组,大小是cycle->connection_n
    cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                    cycle->log);
    if (cycle->write_events == NULL) {
        return NGX_ERROR;
    }

    // 写事件对象初始化
    wev = cycle->write_events;
    for (i = 0; i < cycle->connection_n; i++) {
        wev[i].closed = 1;
    }

    // i是数组的末尾
    i = cycle->connection_n;
    next = NULL;

    // 把连接对象与读写事件关联起来
    // 注意i是数组的末尾,从最后遍历
    do {
        i--;

        // 使用data成员,把连接对象串成链表
        c[i].data = next;

        // 读写事件
        c[i].read = &cycle->read_events[i];
        c[i].write = &cycle->write_events[i];

        // 连接的描述符是-1,表示无效
        c[i].fd = (ngx_socket_t) -1;

        // next指针指向数组的前一个元素
        next = &c[i];
    } while (i);

    // 连接对象已经串成链表,现在设置空闲链表指针
    // 此时next指向连接对象数组的第一个元素
    cycle->free_connections = next;

    // 连接没有使用,全是空闲连接
    cycle->free_connection_n = cycle->connection_n;

    /* for each listening socket */

    // 为每个监听端口分配一个连接对象
    ls = cycle->listening.elts;
    for (i = 0; i < cycle->listening.nelts; i++) {

#if (NGX_HAVE_REUSEPORT)
        if (ls[i].reuseport && ls[i].worker != ngx_worker) {
            continue;
        }
#endif

        // 获取一个空闲连接
        c = ngx_get_connection(ls[i].fd, cycle->log);

        if (c == NULL) {
            return NGX_ERROR;
        }

        c->type = ls[i].type;
        c->log = &ls[i].log;

        c->listening = &ls[i];
        ls[i].connection = c;

        rev = c->read;

        rev->log = c->log;

        // 设置accept标志,接受连接
        rev->accept = 1;

#if (NGX_HAVE_DEFERRED_ACCEPT)
        rev->deferred_accept = ls[i].deferred_accept;
#endif

        if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
            if (ls[i].previous) {

                /*
                 * delete the old accept events that were bound to
                 * the old cycle read events array
                 */

                old = ls[i].previous->connection;

                if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT)
                    == NGX_ERROR)
                {
                    return NGX_ERROR;
                }

                old->fd = (ngx_socket_t) -1;
            }
        }

#if (NGX_WIN32)

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            ngx_iocp_conf_t  *iocpcf;

            rev->handler = ngx_event_acceptex;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) {
                return NGX_ERROR;
            }

            ls[i].log.handler = ngx_acceptex_log_error;

            iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module);
            if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex)
                == NGX_ERROR)
            {
                return NGX_ERROR;
            }

        } else {
            rev->handler = ngx_event_accept;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

#else

        // 重要!!
        // 设置接受连接的回调函数为ngx_event_accept
        // 监听端口上收到连接请求时的回调函数,即事件handler
        // 从cycle的连接池里获取连接
        // 关键操作 ls->handler(c);调用其他模块的业务handler
        // 1.10使用ngx_event_recvmsg接收udp
        rev->handler = (c->type == SOCK_STREAM) ? ngx_event_accept
                                                : ngx_event_recvmsg;

        // 如果使用负载均衡,不向epoll添加事件,只有抢到锁才添加
        if (ngx_use_accept_mutex
#if (NGX_HAVE_REUSEPORT)
            && !ls[i].reuseport
#endif
           )
        {
            // 对一个监听端口的处理结束,只设置了回调函数
            continue;
        }

        // nginx 1.9.x不再使用rtsig

        // 单进程、未明确指定负载均衡,不使用负载均衡
        // 直接加入epoll事件,开始监听,可以接受请求
        if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
            return NGX_ERROR;
        }

#endif

    } // 为每个监听端口分配一个连接对象循环结束

    return NGX_OK;
}
//处理新连接的回调函数
void
ngx_event_accept(ngx_event_t *ev)
{
    socklen_t          socklen;
    ngx_err_t          err;
    ngx_log_t         *log;
    ngx_uint_t         level;
    ngx_socket_t       s;
    ngx_event_t       *rev, *wev;
    ngx_listening_t   *ls;
    ngx_connection_t  *c, *lc;
    ngx_event_conf_t  *ecf;
    u_char             sa[NGX_SOCKADDRLEN];
#if (NGX_HAVE_ACCEPT4)
    static ngx_uint_t  use_accept4 = 1;
#endif

    if (ev->timedout) {
        if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) {
            return;
        }

        ev->timedout = 0;
    }

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) {
        ev->available = ecf->multi_accept;
    }

    lc = ev->data;
    ls = lc->listening;
    ev->ready = 0;

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "accept on %V, ready: %d", &ls->addr_text, ev->available);

    do {
        socklen = NGX_SOCKADDRLEN;
        //首先尝试调用accept方法试图建立新连接
#if (NGX_HAVE_ACCEPT4)
        if (use_accept4) {
            s = accept4(lc->fd, (struct sockaddr *) sa, &socklen,
                        SOCK_NONBLOCK);
        } else {
            s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
        }
#else
        s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
#endif

        if (s == (ngx_socket_t) -1) {
            err = ngx_socket_errno;

            if (err == NGX_EAGAIN) {
                ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err,
                               "accept() not ready");
                return;
            }

            level = NGX_LOG_ALERT;

            if (err == NGX_ECONNABORTED) {
                level = NGX_LOG_ERR;

            } else if (err == NGX_EMFILE || err == NGX_ENFILE) {
                level = NGX_LOG_CRIT;
            }

#if (NGX_HAVE_ACCEPT4)
            ngx_log_error(level, ev->log, err,
                          use_accept4 ? "accept4() failed" : "accept() failed");

            if (use_accept4 && err == NGX_ENOSYS) {
                use_accept4 = 0;
                ngx_inherited_nonblocking = 0;
                continue;
            }
#else
            ngx_log_error(level, ev->log, err, "accept() failed");
#endif

            if (err == NGX_ECONNABORTED) {
                if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
                    ev->available--;
                }

                if (ev->available) {
                    continue;
                }
            }

            if (err == NGX_EMFILE || err == NGX_ENFILE) {
                if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle)
                    != NGX_OK)
                {
                    return;
                }

                if (ngx_use_accept_mutex) {
                    if (ngx_accept_mutex_held) {
                        ngx_shmtx_unlock(&ngx_accept_mutex);
                        ngx_accept_mutex_held = 0;
                    }

                    ngx_accept_disabled = 1;

                } else {
                    ngx_add_timer(ev, ecf->accept_mutex_delay);
                }
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif
        /*
         * ngx_accept_disabled是负载均衡机制实现的关键阈值,这个阈值与连接池中连接的使用情况密切相关
         * 初始时这个值为负数,为负数时不会触发负载均衡操作,而当为正数时触发负载均衡操作
         * 当ngx_accept_disabled为正数时当前进程将不再处理新连接事件,而仅仅是ngx_accept_disabled减1
         *
         * 如果当前活动连接数超过最大可承受连接数的7/8(ngx_accept_disabled为负),则表示发生过载
         */
        ngx_accept_disabled = ngx_cycle->connection_n / 8
                              - ngx_cycle->free_connection_n;
        //由连接池中获取一个ngx_connection_t连接对象
        c = ngx_get_connection(s, ev->log);

        if (c == NULL) {
            if (ngx_close_socket(s) == -1) {
                ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                              ngx_close_socket_n " failed");
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

        c->pool = ngx_create_pool(ls->pool_size, ev->log);
        if (c->pool == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->sockaddr = ngx_palloc(c->pool, socklen);
        if (c->sockaddr == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        ngx_memcpy(c->sockaddr, sa, socklen);

        log = ngx_palloc(c->pool, sizeof(ngx_log_t));
        if (log == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        /* set a blocking mode for iocp and non-blocking mode for others */

        if (ngx_inherited_nonblocking) {
            if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
                if (ngx_blocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_blocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }

        } else {
            if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
                if (ngx_nonblocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_nonblocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }
        }

        *log = ls->log;

        c->recv = ngx_recv;
        c->send = ngx_send;
        c->recv_chain = ngx_recv_chain;
        c->send_chain = ngx_send_chain;

        c->log = log;
        c->pool->log = log;

        c->socklen = socklen;
        c->listening = ls;
        c->local_sockaddr = ls->sockaddr;
        c->local_socklen = ls->socklen;

        c->unexpected_eof = 1;

#if (NGX_HAVE_UNIX_DOMAIN)
        if (c->sockaddr->sa_family == AF_UNIX) {
            c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
            c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;
#if (NGX_SOLARIS)
            /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
            c->sendfile = 0;
#endif
        }
#endif

        rev = c->read;
        wev = c->write;

        wev->ready = 1;

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            rev->ready = 1;
        }

        if (ev->deferred_accept) {
            rev->ready = 1;
#if (NGX_HAVE_KQUEUE)
            rev->available = 1;
#endif
        }

        rev->log = log;
        wev->log = log;

        /*
         * TODO: MT: - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         *
         * TODO: MP: - allocated in a shared memory
         *           - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         */

        c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

        if (ls->addr_ntop) {
            c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len);
            if (c->addr_text.data == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen,
                                             c->addr_text.data,
                                             ls->addr_text_max_len, 0);
            if (c->addr_text.len == 0) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

#if (NGX_DEBUG)
        {

        ngx_str_t             addr;
        struct sockaddr_in   *sin;
        ngx_cidr_t           *cidr;
        ngx_uint_t            i;
        u_char                text[NGX_SOCKADDR_STRLEN];
#if (NGX_HAVE_INET6)
        struct sockaddr_in6  *sin6;
        ngx_uint_t            n;
#endif

        cidr = ecf->debug_connection.elts;
        for (i = 0; i < ecf->debug_connection.nelts; i++) {
            if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) {
                goto next;
            }

            switch (cidr[i].family) {

#if (NGX_HAVE_INET6)
            case AF_INET6:
                sin6 = (struct sockaddr_in6 *) c->sockaddr;
                for (n = 0; n < 16; n++) {
                    if ((sin6->sin6_addr.s6_addr[n]
                        & cidr[i].u.in6.mask.s6_addr[n])
                        != cidr[i].u.in6.addr.s6_addr[n])
                    {
                        goto next;
                    }
                }
                break;
#endif

#if (NGX_HAVE_UNIX_DOMAIN)
            case AF_UNIX:
                break;
#endif

            default: /* AF_INET */
                sin = (struct sockaddr_in *) c->sockaddr;
                if ((sin->sin_addr.s_addr & cidr[i].u.in.mask)
                    != cidr[i].u.in.addr)
                {
                    goto next;
                }
                break;
            }

            log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL;
            break;

        next:
            continue;
        }

        if (log->log_level & NGX_LOG_DEBUG_EVENT) {
            addr.data = text;
            addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text,
                                     NGX_SOCKADDR_STRLEN, 1);

            ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0,
                           "*%uA accept: %V fd:%d", c->number, &addr, s);
        }

        }
#endif

        if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
            if (ngx_add_conn(c) == NGX_ERROR) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

        log->data = NULL;
        log->handler = NULL;

        ls->handler(c);

        if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
            ev->available--;
        }
        //如果监听事件的available标志位为1,则再次循环,对应着multi_accept配置项,告诉nginx一次性尽量多的建立新连接
    } while (ev->available);
}
예제 #15
0
ngx_int_t
ngx_zeromq_connect(ngx_peer_connection_t *pc)
{
    ngx_zeromq_connection_t  *zc = pc->data;
    ngx_zeromq_endpoint_t    *zep;
    ngx_connection_t         *c;
    ngx_event_t              *rev, *wev;
    void                     *zmq;
    int                       fd, zero;
    size_t                    fdsize;
    ngx_uint_t                i;

    zep = zc->endpoint;

    zmq = zmq_socket(ngx_zeromq_ctx, zep->type->value);
    if (zmq == NULL) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, 0,
                      "zmq_socket(%V) failed (%d: %s)",
                      &zep->type->name, ngx_errno, zmq_strerror(ngx_errno));
        return NGX_ERROR;
    }

    fdsize = sizeof(int);

    if (zmq_getsockopt(zmq, ZMQ_FD, &fd, &fdsize) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_getsockopt(ZMQ_FD)");
        goto failed_zmq;
    }

    zero = 0;

    if (zmq_setsockopt(zmq, ZMQ_LINGER, &zero, sizeof(int)) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_setsockopt(ZMQ_LINGER)");
        goto failed_zmq;
    }

    c = ngx_get_connection(fd, pc->log);
    if (c == NULL) {
        goto failed_zmq;
    }

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

    c->recv = ngx_zeromq_recv;
    c->send = NULL;
    c->recv_chain = ngx_zeromq_recv_chain;
    c->send_chain = ngx_zeromq_send_chain;

    /* This won't fly with ZeroMQ */
    c->sendfile = 0;
    c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
    c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;

    c->log_error = pc->log_error;

    rev = c->read;
    wev = c->write;

    rev->data = zc;
    wev->data = zc;

    rev->handler = ngx_zeromq_event_handler;
    wev->handler = ngx_zeromq_event_handler;

    rev->log = pc->log;
    wev->log = pc->log;

    pc->connection = &zc->connection;
    zc->connection_ptr = c;

    memcpy(&zc->connection, c, sizeof(ngx_connection_t));

    zc->socket = zmq;

    if (zep->type->can_send) {
        zc->send = zc;
    }

    if (zep->type->can_recv) {
        zc->recv = zc;
    }

    if (pc->local) {
        ngx_log_error(NGX_LOG_WARN, pc->log, 0,
                      "zmq_connect: binding to local address is not supported");
    }

    if (zep->bind) {
        if (zep->rand) {
            for (i = 0; ; i++) {
                ngx_zeromq_randomized_endpoint_regen(&zep->addr);

                if (zmq_bind(zmq, (const char *) zep->addr.data) == -1) {

                    if (ngx_errno == NGX_EADDRINUSE && i < 65535) {
                        continue;
                    }

                    ngx_zeromq_log_error(pc->log, "zmq_bind()");
                    goto failed;
                }

                break;
            }

        } else {
            if (zmq_bind(zmq, (const char *) zep->addr.data) == -1) {
                ngx_zeromq_log_error(pc->log, "zmq_bind()");
                goto failed;
            }
        }

    } else {
        if (zmq_connect(zmq, (const char *) zep->addr.data) == -1) {
            ngx_zeromq_log_error(pc->log, "zmq_connect()");
            goto failed;
        }
    }

    ngx_log_debug7(NGX_LOG_DEBUG_EVENT, pc->log, 0,
                   "zmq_connect: %s to %V (%V), fd:%d #%d zc:%p zmq:%p",
                   zep->bind ? "bound" : "lazily connected",
                   &zep->addr, &zep->type->name, fd, c->number, zc, zmq);

    if (ngx_add_conn) {
        /* rtsig */
        if (ngx_add_conn(c) == NGX_ERROR) {
            goto failed;
        }

    } else {
        if (ngx_event_flags & NGX_USE_CLEAR_EVENT) {
            /* kqueue, epoll */
            if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) != NGX_OK) {
                goto failed;
            }

        } else {
            /* select, poll, /dev/poll */
            if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) != NGX_OK) {
                goto failed;
            }
        }
    }

    /*
     * ZeroMQ assumes that new socket is read-ready (but it really isn't)
     * and it won't notify us about any new events if we don't fail to read
     * from it first. Sigh.
     */

    rev->ready = 1;
    wev->ready = zep->type->can_send;

    return NGX_OK;

failed:

    ngx_free_connection(c);

    c->fd = (ngx_socket_t) -1;

    pc->connection = NULL;
    zc->socket = NULL;

failed_zmq:

    if (zmq_close(zmq) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_close()");
    }

    return NGX_ERROR;
}
void
ngx_event_recvmsg(ngx_event_t *ev)
{
    ssize_t            n;
    ngx_log_t         *log;
    ngx_err_t          err;
    ngx_event_t       *rev, *wev;
    struct iovec       iov[1];
    struct msghdr      msg;
    ngx_listening_t   *ls;
    ngx_event_conf_t  *ecf;
    ngx_connection_t  *c, *lc;
    u_char             sa[NGX_SOCKADDRLEN];
    static u_char      buffer[65535];

#if (NGX_HAVE_MSGHDR_MSG_CONTROL)

#if (NGX_HAVE_IP_RECVDSTADDR)
    u_char             msg_control[CMSG_SPACE(sizeof(struct in_addr))];
#elif (NGX_HAVE_IP_PKTINFO)
    u_char             msg_control[CMSG_SPACE(sizeof(struct in_pktinfo))];
#endif

#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO)
    u_char             msg_control6[CMSG_SPACE(sizeof(struct in6_pktinfo))];
#endif

#endif

    if (ev->timedout) {
        if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) {
            return;
        }

        ev->timedout = 0;
    }

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) {
        ev->available = ecf->multi_accept;
    }

    lc = ev->data;
    ls = lc->listening;
    ev->ready = 0;

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "recvmsg on %V, ready: %d", &ls->addr_text, ev->available);

    do {
        ngx_memzero(&msg, sizeof(struct msghdr));

        iov[0].iov_base = (void *) buffer;
        iov[0].iov_len = sizeof(buffer);

        msg.msg_name = &sa;
        msg.msg_namelen = sizeof(sa);
        msg.msg_iov = iov;
        msg.msg_iovlen = 1;

#if (NGX_HAVE_MSGHDR_MSG_CONTROL)

        if (ls->wildcard) {

#if (NGX_HAVE_IP_RECVDSTADDR || NGX_HAVE_IP_PKTINFO)
            if (ls->sockaddr->sa_family == AF_INET) {
                msg.msg_control = &msg_control;
                msg.msg_controllen = sizeof(msg_control);
            }
#endif

#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO)
            if (ls->sockaddr->sa_family == AF_INET6) {
                msg.msg_control = &msg_control6;
                msg.msg_controllen = sizeof(msg_control6);
            }
#endif
        }

#endif

        n = recvmsg(lc->fd, &msg, 0);

        if (n == -1) {
            err = ngx_socket_errno;

            if (err == NGX_EAGAIN) {
                ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err,
                               "recvmsg() not ready");
                return;
            }

            ngx_log_error(NGX_LOG_ALERT, ev->log, err, "recvmsg() failed");

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif

#if (NGX_HAVE_MSGHDR_MSG_CONTROL)
        if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) {
            ngx_log_error(NGX_LOG_ALERT, ev->log, 0,
                          "recvmsg() truncated data");
            continue;
        }
#endif

        ngx_accept_disabled = ngx_cycle->connection_n / 8
                              - ngx_cycle->free_connection_n;

        c = ngx_get_connection(lc->fd, ev->log);
        if (c == NULL) {
            return;
        }

        c->shared = 1;
        c->type = SOCK_DGRAM;
        c->socklen = msg.msg_namelen;

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

        c->pool = ngx_create_pool(ls->pool_size, ev->log);
        if (c->pool == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->sockaddr = ngx_palloc(c->pool, c->socklen);
        if (c->sockaddr == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        ngx_memcpy(c->sockaddr, msg.msg_name, c->socklen);

        log = ngx_palloc(c->pool, sizeof(ngx_log_t));
        if (log == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        *log = ls->log;

        c->send = ngx_udp_send;

        c->log = log;
        c->pool->log = log;

        c->listening = ls;
        c->local_sockaddr = ls->sockaddr;
        c->local_socklen = ls->socklen;

#if (NGX_HAVE_MSGHDR_MSG_CONTROL)

        if (ls->wildcard) {
            struct cmsghdr   *cmsg;
            struct sockaddr  *sockaddr;

            sockaddr = ngx_palloc(c->pool, c->local_socklen);
            if (sockaddr == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            ngx_memcpy(sockaddr, c->local_sockaddr, c->local_socklen);
            c->local_sockaddr = sockaddr;

            for (cmsg = CMSG_FIRSTHDR(&msg);
                 cmsg != NULL;
                 cmsg = CMSG_NXTHDR(&msg, cmsg))
            {

#if (NGX_HAVE_IP_RECVDSTADDR)

                if (cmsg->cmsg_level == IPPROTO_IP
                    && cmsg->cmsg_type == IP_RECVDSTADDR
                    && sockaddr->sa_family == AF_INET)
                {
                    struct in_addr      *addr;
                    struct sockaddr_in  *sin;

                    addr = (struct in_addr *) CMSG_DATA(cmsg);
                    sin = (struct sockaddr_in *) sockaddr;
                    sin->sin_addr = *addr;

                    break;
                }

#elif (NGX_HAVE_IP_PKTINFO)

                if (cmsg->cmsg_level == IPPROTO_IP
                    && cmsg->cmsg_type == IP_PKTINFO
                    && sockaddr->sa_family == AF_INET)
                {
                    struct in_pktinfo   *pkt;
                    struct sockaddr_in  *sin;

                    pkt = (struct in_pktinfo *) CMSG_DATA(cmsg);
                    sin = (struct sockaddr_in *) sockaddr;
                    sin->sin_addr = pkt->ipi_addr;

                    break;
                }

#endif

#if (NGX_HAVE_INET6 && NGX_HAVE_IPV6_RECVPKTINFO)

                if (cmsg->cmsg_level == IPPROTO_IPV6
                    && cmsg->cmsg_type == IPV6_PKTINFO
                    && sockaddr->sa_family == AF_INET6)
                {
                    struct in6_pktinfo   *pkt6;
                    struct sockaddr_in6  *sin6;

                    pkt6 = (struct in6_pktinfo *) CMSG_DATA(cmsg);
                    sin6 = (struct sockaddr_in6 *) sockaddr;
                    sin6->sin6_addr = pkt6->ipi6_addr;

                    break;
                }

#endif

            }
        }

#endif

        c->buffer = ngx_create_temp_buf(c->pool, n);
        if (c->buffer == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->buffer->last = ngx_cpymem(c->buffer->last, buffer, n);

        rev = c->read;
        wev = c->write;

        wev->ready = 1;

        rev->log = log;
        wev->log = log;

        /*
         * TODO: MT: - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         *
         * TODO: MP: - allocated in a shared memory
         *           - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         */

        c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

        if (ls->addr_ntop) {
            c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len);
            if (c->addr_text.data == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen,
                                             c->addr_text.data,
                                             ls->addr_text_max_len, 0);
            if (c->addr_text.len == 0) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

#if (NGX_DEBUG)
        {
        ngx_str_t  addr;
        u_char     text[NGX_SOCKADDR_STRLEN];

        ngx_debug_accepted_connection(ecf, c);

        if (log->log_level & NGX_LOG_DEBUG_EVENT) {
            addr.data = text;
            addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text,
                                     NGX_SOCKADDR_STRLEN, 1);

            ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0,
                           "*%uA recvmsg: %V fd:%d n:%z",
                           c->number, &addr, c->fd, n);
        }

        }
#endif

        log->data = NULL;
        log->handler = NULL;

        ls->handler(c);

        if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
            ev->available -= n;
        }

    } while (ev->available);
}
void ngx_event_recvmsg(ngx_event_t *ev)
{
    ssize_t            n;
    ngx_log_t         *log;
    ngx_err_t          err;
    ngx_event_t       *rev, *wev;    
    WSABUF             wsabuf[1];	
    u_long             bytes, flags;	
    ngx_listening_t   *ls;
    ngx_event_conf_t  *ecf;
    ngx_connection_t  *c, *lc;
    u_char             sa[NGX_SOCKADDRLEN];	
    socklen_t          socklen = NGX_SOCKADDRLEN;
    static u_char      buffer[65535];

    if (ev->timedout) {
        if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) {
            return;
        }

        ev->timedout = 0;
    }

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    ev->available = ecf->multi_accept;

    lc = ev->data;
    ls = lc->listening;
    ev->ready = 0;	

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "WSARecvFrom on %V, ready: %d", &ls->addr_text, ev->available);

    do {
        ngx_memzero(&wsabuf, sizeof(WSABUF));

        wsabuf[0].buf = (void *) buffer;
        wsabuf[0].len  = sizeof(buffer);

        flags = 0;
        bytes = 0;        
        n = WSARecvFrom(lc->fd, &wsabuf, 1, &bytes, &flags,
                     (struct sockaddr *) sa,
                     (LPINT) &socklen, NULL, NULL);

        if (n == SOCKET_ERROR) {
            err = ngx_socket_errno;

            if (err == NGX_EAGAIN) {
                ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err,
                               "WSARecvFrom() not ready");
                return;
            }

            ngx_log_error(NGX_LOG_ALERT, ev->log, err, "WSARecvFrom() failed");

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif

        ngx_accept_disabled = ngx_cycle->connection_n / 8
                              - ngx_cycle->free_connection_n;

        c = ngx_get_connection(lc->fd, ev->log);
        if (c == NULL) {
            return;
        }

        c->shared = 1;
        c->type = SOCK_DGRAM;
        c->socklen = sizeof(sa);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

        c->pool = ngx_create_pool(ls->pool_size, ev->log);
        if (c->pool == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->sockaddr = ngx_palloc(c->pool, c->socklen);
        if (c->sockaddr == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        ngx_memcpy(c->sockaddr, &sa, c->socklen);

        log = ngx_palloc(c->pool, sizeof(ngx_log_t));
        if (log == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        *log = ls->log;

        c->send = ngx_udp_send;

        c->log = log;
        c->pool->log = log;

        c->listening = ls;
        c->local_sockaddr = ls->sockaddr;
        c->local_socklen = ls->socklen;

        n = bytes;
        c->buffer = ngx_create_temp_buf(c->pool, n);
        if (c->buffer == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->buffer->last = ngx_cpymem(c->buffer->last, buffer, n);

        rev = c->read;
        wev = c->write;

        wev->ready = 1;

        rev->log = log;
        wev->log = log;

        /*
         * TODO: MT: - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         *
         * TODO: MP: - allocated in a shared memory
         *           - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         */

        c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

        if (ls->addr_ntop) {
            c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len);
            if (c->addr_text.data == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen,
                                             c->addr_text.data,
                                             ls->addr_text_max_len, 0);
            if (c->addr_text.len == 0) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

#if (NGX_DEBUG)
        {
        ngx_str_t  addr;
        u_char     text[NGX_SOCKADDR_STRLEN];

        ngx_debug_accepted_connection(ecf, c);

        if (log->log_level & NGX_LOG_DEBUG_EVENT) {
            addr.data = text;
            addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text,
                                     NGX_SOCKADDR_STRLEN, 1);

            ngx_log_debug4(NGX_LOG_DEBUG_EVENT, log, 0,
                           "*%uA WSARecvFrom: %V fd:%d n:%z",
                           c->number, &addr, c->fd, n);
        }

        }
#endif

        log->data = NULL;
        log->handler = NULL;

        ls->handler(c);

    } while (ev->available);
}
ngx_int_t
ngx_stream_upm_init_worker(ngx_cycle_t * cycle)
{
    u_char                          *p;
    //ngx_int_t                        rc;
    ngx_connection_t                *c;
    //ngx_connection_t                *pc;
    ngx_stream_upm_ctx_t            *ctx;
    ngx_stream_session_t            *fake_session;
    ngx_stream_upstream_t           *u;
    //ngx_peer_connection_t           *peer;
    ngx_stream_upm_main_conf_t      *ummcf;
    ngx_stream_upstream_srv_conf_t  *uscf; 

    //Fake a client session
    ummcf = ngx_stream_cycle_get_module_main_conf(cycle, ngx_stream_upm_module);
    ummcf->fake_session = ngx_pcalloc(cycle->pool, sizeof(ngx_stream_session_t));
    if (ummcf->fake_session == NULL) {
        return NGX_ERROR;    
    }

    fake_session = ummcf->fake_session;
    //Fake the client fd as 254
    c = ngx_get_connection(254, cycle->log);
    if (c == NULL) {
        return NGX_ERROR;
    }
    //Set the fake conn's base items
    c->pool = cycle->pool;
    c->log = cycle->log;

    fake_session->signature = NGX_STREAM_MODULE;
    fake_session->main_conf = ((ngx_stream_conf_ctx_t *)cycle->conf_ctx)->main_conf;
    fake_session->srv_conf = ((ngx_stream_conf_ctx_t *)cycle->conf_ctx)->srv_conf;

    fake_session->connection = c;
    c->data = fake_session;

    u = ngx_pcalloc(c->pool, sizeof(ngx_stream_upstream_t));
    if (u == NULL) {
        return NGX_ERROR;
    }
    fake_session->upstream = u;
    
    //Init the upm ctx;
    fake_session->ctx = ngx_pcalloc(cycle->pool, sizeof(void *) * ngx_stream_max_module);
    if (fake_session->ctx == NULL) {
        return NGX_ERROR;
    }
    ctx = ngx_pcalloc(cycle->pool, sizeof(ngx_stream_upm_ctx_t));
    ngx_stream_set_ctx(fake_session, ctx, ngx_stream_upm_module);
    
    ctx->state = CONNECT;
    ctx->connect = ngx_stream_upm_connect;
    ctx->create_request = ngx_stream_upm_create_request;
    ctx->send_request = ngx_stream_upm_send_request;
    ctx->read_and_parse_response = ngx_stream_upm_read_and_parse_response;
    //ctx->process_response = ngx_stream_upm_process_response;

    u->peer.log = cycle->log;
    u->peer.log_error = NGX_ERROR_ERR;
    //whitout need set the local
    u->peer.local = NULL;

    uscf = ummcf->upstream;
    if (uscf->peer.init(fake_session, uscf) != NGX_OK) {
        return NGX_ERROR;
    }

    u->peer.start_time = ngx_current_msec;

    if (ummcf->next_upstream_tries
        && u->peer.tries > ummcf->next_upstream_tries)
    {
        u->peer.tries = ummcf->next_upstream_tries;
    }
    u->start_sec = ngx_time();

    //May without need alloc the downstream_buf
    p = ngx_pnalloc(c->pool, ummcf->buffer_size);
    if (p == NULL) {
        return NGX_ERROR;
    }
    
    //web use the downstream_buf to buffer the request;
    u->downstream_buf.start = p;
    u->downstream_buf.end = p + ummcf->buffer_size;
    u->downstream_buf.pos = p;
    u->downstream_buf.last = p;

    c->write->handler = ngx_stream_upm_empty_handler;
    c->read->handler = ngx_stream_upm_empty_handler;

    return ngx_stream_upm_connect(fake_session);
}
static ngx_int_t
ngx_send_radius_request( ngx_http_request_t *r, radius_req_queue_node_t* prev_req ) {

    ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request 0x%xl", r );
    ngx_http_auth_radius_main_conf_t* conf = ngx_http_get_module_main_conf( r, ngx_http_auth_radius_module );

    ngx_http_core_loc_conf_t  *clcf;
    clcf = ngx_http_get_module_loc_conf( r, ngx_http_core_module );

    ngx_http_auth_radius_ctx_t* ctx = ngx_http_get_module_ctx( r, ngx_http_auth_radius_module );
    if ( ctx == NULL )
        abort(); // TODO

    radius_str_t user = { RADIUS_STR_FROM_NGX_STR_INITIALIZER( r->headers_in.user ) };
    radius_str_t passwd = { RADIUS_STR_FROM_NGX_STR_INITIALIZER( r->headers_in.passwd ) };

    radius_req_queue_node_t* n;
    n = radius_send_request( prev_req, &user, &passwd, clcf->error_log );
    if ( n == NULL ) {
        abort(); // TODO
    }

    ngx_http_auth_radius_main_conf_t* lconf = ngx_http_get_module_loc_conf( r, ngx_http_auth_radius_module );
    ngx_add_timer( r->connection->read, lconf->radius_timeout ); 

    radius_server_t* rs;
    rs = get_server_by_req( n );
    ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request rs: %d, assign 0x%xl to 0x%xl, id: %d", rs->id, r, n, n->ident );

    n->data = r;
    ctx->n = n;

    ngx_connection_t* c = rs->data;
    ngx_event_t* rev;

    if ( c == NULL ) {

        c = ngx_get_connection( rs->s, conf->log );
        if ( c == NULL ) {
            ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_get_connection" );
            if (ngx_close_socket( rs->s ) == -1)
                ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_close_socket" );
            return NGX_ERROR;
        }

        if ( ngx_nonblocking( rs->s ) == -1 ) {
            ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_nonblocking" );
            ngx_free_connection( c );
            if (ngx_close_socket( rs->s ) == -1)
                ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_close_socket" );
            return NGX_ERROR;
        }

        rs->data = c;
        c->data = rs;

        rev = c->read;
        rev->handler = radius_read_handler;
        rev->log = clcf->error_log;
        rs->log = clcf->error_log;

        if ( ngx_add_event( rev, NGX_READ_EVENT, NGX_LEVEL_EVENT ) != NGX_OK ) {
            ngx_log_error( NGX_LOG_ERR, r->connection->log, 0, "ngx_send_radius_request: ngx_add_event" );
            return NGX_ERROR;
        }

        c->number = ngx_atomic_fetch_add( ngx_connection_counter, 1 );
    }
    return NGX_OK;
}
//这里的event是在ngx_event_process_init中从连接池中获取的 ngx_connection_t中的->read读事件
//accept是在ngx_event_process_init(但进程或者不配置负载均衡的时候)或者(多进程,配置负载均衡)的时候把accept事件添加到epoll中
void //该形参中的ngx_connection_t(ngx_event_t)是为accept事件连接准备的空间,当accept返回成功后,会重新获取一个ngx_connection_t(ngx_event_t)用来读写该连接
ngx_event_accept(ngx_event_t *ev) //在ngx_process_events_and_timers中执行              
{ //一个accept事件对应一个ev,如当前一次有4个客户端accept,应该对应4个ev事件,一次来多个accept的处理在下面的do {}while中实现
    socklen_t          socklen;
    ngx_err_t          err;
    ngx_log_t         *log;
    ngx_uint_t         level;
    ngx_socket_t       s;

//如果是文件异步i/o中的ngx_event_aio_t,则它来自ngx_event_aio_t->ngx_event_t(只有读),如果是网络事件中的event,则为ngx_connection_s中的event(包括读和写)
    ngx_event_t       *rev, *wev; 
    ngx_listening_t   *ls;
    ngx_connection_t  *c, *lc;
    ngx_event_conf_t  *ecf;
    u_char             sa[NGX_SOCKADDRLEN];
#if (NGX_HAVE_ACCEPT4)
    static ngx_uint_t  use_accept4 = 1;
#endif

    if (ev->timedout) {
        if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) {
            return;
        }

        ev->timedout = 0;
    }

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) {
        ev->available = ecf->multi_accept;   
    }

    lc = ev->data;
    ls = lc->listening;
    ev->ready = 0;

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "accept on %V, ready: %d", &ls->addr_text, ev->available);

    do {
        socklen = NGX_SOCKADDRLEN;

#if (NGX_HAVE_ACCEPT4) //ngx_close_socket可以关闭套接字
        if (use_accept4) {
            s = accept4(lc->fd, (struct sockaddr *) sa, &socklen,
                        SOCK_NONBLOCK);
        } else {
            s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
        }
#else
    /*
            针对非阻塞I/O执行的系统调用则总是立即返回,而不管事件足否已经发生。如果事件没有眭即发生,这些系统调用就
        返回—1.和出错的情况一样。此时我们必须根据errno来区分这两种情况。对accept、send和recv而言,事件未发牛时errno
        通常被设置成EAGAIN(意为“再来一次”)或者EWOULDBLOCK(意为“期待阻塞”):对conncct而言,errno则被
        设置成EINPROGRESS(意为“在处理中")。
          */
        s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
#endif

        if (s == (ngx_socket_t) -1) {
            err = ngx_socket_errno;
            
            if (err == NGX_EAGAIN) { //如果event{}开启multi_accept,则在accept完该listen ip:port对应的ip和端口连接后,会通过这里返回
                ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err,
                               "accept() not ready");
                return;
            }

            level = NGX_LOG_ALERT;

            if (err == NGX_ECONNABORTED) {
                level = NGX_LOG_ERR;

            } else if (err == NGX_EMFILE || err == NGX_ENFILE) {
                level = NGX_LOG_CRIT;
            }

#if (NGX_HAVE_ACCEPT4)
            ngx_log_error(level, ev->log, err,
                          use_accept4 ? "accept4() failed" : "accept() failed");

            if (use_accept4 && err == NGX_ENOSYS) {
                use_accept4 = 0;
                ngx_inherited_nonblocking = 0;
                continue;
            }
#else
            ngx_log_error(level, ev->log, err, "accept() failed");
#endif

            if (err == NGX_ECONNABORTED) {
                if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
                    ev->available--;
                }

                if (ev->available) {
                    continue;
                }
            }

            if (err == NGX_EMFILE || err == NGX_ENFILE) {
                if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle, 1)
                    != NGX_OK)
                {
                    return;
                }

                if (ngx_use_accept_mutex) {
                    if (ngx_accept_mutex_held) {
                        ngx_shmtx_unlock(&ngx_accept_mutex);
                        ngx_accept_mutex_held = 0;
                    }
//当前进程连接accpet失败,则可以暂时设置为1,下次来的时候由其他进程竞争accpet锁,下下次该进程继续竞争该accept,因为在下次的时候ngx_process_events_and_timers
//ngx_accept_disabled = 1; 减去1后为0,可以继续竞争
                    ngx_accept_disabled = 1; 
                } else { ////如果是不需要实现负载均衡,则扫尾延时下继续在ngx_process_events_and_timers中accept
                    ngx_add_timer(ev, ecf->accept_mutex_delay, NGX_FUNC_LINE);
                }
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif
        //设置负载均衡阀值
        ngx_accept_disabled = ngx_cycle->connection_n / 8
                              - ngx_cycle->free_connection_n; //判断可用连接的数目和总数目的八分之一大小,如果可用的小于八分之一,为正

        //在服务器端accept客户端连接成功(ngx_event_accept)后,会通过ngx_get_connection从连接池获取一个ngx_connection_t结构,也就是每个客户端连接对于一个ngx_connection_t结构,
        //并且为其分配一个ngx_http_connection_t结构,ngx_connection_t->data = ngx_http_connection_t,见ngx_http_init_connection

        //从连接池中获取一个空闲ngx_connection_t,用于客户端连接建立成功后向该连接读写数据,函数形参中的ngx_event_t对应的是为accept事件对应的
        //ngx_connection_t中对应的event
        c = ngx_get_connection(s, ev->log);  //ngx_get_connection中c->fd = s;
        //注意,这里的ngx_connection_t是从连接池中从新获取的,和ngx_epoll_process_events中的ngx_connection_t是两个不同的。

        if (c == NULL) {
            if (ngx_close_socket(s) == -1) {
                ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                              ngx_close_socket_n " failed");
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

        c->pool = ngx_create_pool(ls->pool_size, ev->log);
        if (c->pool == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->sockaddr = ngx_palloc(c->pool, socklen);
        if (c->sockaddr == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        ngx_memcpy(c->sockaddr, sa, socklen);

        log = ngx_palloc(c->pool, sizeof(ngx_log_t));
        if (log == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        /* set a blocking mode for iocp and non-blocking mode for others */

        if (ngx_inherited_nonblocking) {
            if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
                if (ngx_blocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_blocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }

        } else {
            if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
                if (ngx_nonblocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_nonblocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }
        }

        *log = ls->log;

        c->recv = ngx_recv;
        c->send = ngx_send;
        c->recv_chain = ngx_recv_chain;
        c->send_chain = ngx_send_chain;

        c->log = log;
        c->pool->log = log;

        c->socklen = socklen;
        c->listening = ls;
        c->local_sockaddr = ls->sockaddr;
        c->local_socklen = ls->socklen;

        c->unexpected_eof = 1;

#if (NGX_HAVE_UNIX_DOMAIN)
        if (c->sockaddr->sa_family == AF_UNIX) {
            c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
            c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;
#if (NGX_SOLARIS)
            /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
            c->sendfile = 0;
#endif
        }
#endif
        //注意,这里的ngx_connection_t是从连接池中从新获取的,和ngx_epoll_process_events中的ngx_connection_t是两个不同的。
        rev = c->read; 
        wev = c->write;

        wev->ready = 1;

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            rev->ready = 1;
        }

        if (ev->deferred_accept) {
            rev->ready = 1;
#if (NGX_HAVE_KQUEUE)
            rev->available = 1;
#endif
        }

        rev->log = log;
        wev->log = log;

        /*
         * TODO: MT: - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         *
         * TODO: MP: - allocated in a shared memory
         *           - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         */

        c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

        if (ls->addr_ntop) {
            c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len);
            if (c->addr_text.data == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen,
                                             c->addr_text.data,
                                             ls->addr_text_max_len, 0);
            if (c->addr_text.len == 0) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

#if (NGX_DEBUG)
        {

        ngx_str_t             addr;
        struct sockaddr_in   *sin;
        ngx_cidr_t           *cidr;
        ngx_uint_t            i;
        u_char                text[NGX_SOCKADDR_STRLEN];
#if (NGX_HAVE_INET6)
        struct sockaddr_in6  *sin6;
        ngx_uint_t            n;
#endif

        cidr = ecf->debug_connection.elts;
        for (i = 0; i < ecf->debug_connection.nelts; i++) {
            if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) {
                goto next;
            }

            switch (cidr[i].family) {

#if (NGX_HAVE_INET6)
            case AF_INET6:
                sin6 = (struct sockaddr_in6 *) c->sockaddr;
                for (n = 0; n < 16; n++) {
                    if ((sin6->sin6_addr.s6_addr[n]
                        & cidr[i].u.in6.mask.s6_addr[n])
                        != cidr[i].u.in6.addr.s6_addr[n])
                    {
                        goto next;
                    }
                }
                break;
#endif

#if (NGX_HAVE_UNIX_DOMAIN)
            case AF_UNIX:
                break;
#endif

            default: /* AF_INET */
                sin = (struct sockaddr_in *) c->sockaddr;
                if ((sin->sin_addr.s_addr & cidr[i].u.in.mask)
                    != cidr[i].u.in.addr)
                {
                    goto next;
                }
                break;
            }

            log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL;
            break;

        next:
            continue;
        }

        if (log->log_level & NGX_LOG_DEBUG_EVENT) {
            addr.data = text;
            addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text,
                                     NGX_SOCKADDR_STRLEN, 1);

            ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0,
                           "*%uA accept: %V fd:%d", c->number, &addr, s);
        }

        }
#endif

        if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { //如果是epoll,不会走到这里面去
            if (ngx_add_conn(c) == NGX_ERROR) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

        log->data = NULL;
        log->handler = NULL;

        ls->handler(c);//ngx_http_init_connection

        if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
            ev->available--;
        }

    } while (ev->available); //一次性读取所有当前的accept,直到accept返回NGX_EAGAIN,然后退出
}
예제 #21
0
ngx_int_t
ngx_event_connect_peer(ngx_peer_connection_t *pc)
{   //Here,we initilize a connection!!!
    int                rc;
    ngx_int_t          event;
    ngx_err_t          err;
    ngx_uint_t         level;
    ngx_socket_t       s;
    ngx_event_t       *rev, *wev;
    ngx_connection_t  *c;
    //Here,very important, this handler is use to get a select handler!!
    /*
    For ip_hash module,will set the get handler!!!
    r->upstream->peer.get = ngx_http_upstream_get_round_robin_peer;
    r->upstream->peer.free = ngx_http_upstream_free_round_robin_peer;
    */
    //will stored the really backend ip_addr in the pc!!
    rc = pc->get(pc, pc->data);

    if (rc != NGX_OK) {
        return rc;
    }
    //Here,Do the socket operation! alloc a socket_fd
    s = ngx_socket(pc->sockaddr->sa_family, SOCK_STREAM, 0);

    ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, "socket %d", s);

    if (s == -1) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_socket_n " failed");
        return NGX_ERROR;
    }


    c = ngx_get_connection(s, pc->log);

    if (c == NULL) {
        if (ngx_close_socket(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          ngx_close_socket_n "failed");
        }

        return NGX_ERROR;
    }

    if (pc->rcvbuf) {
        if (setsockopt(s, SOL_SOCKET, SO_RCVBUF,
                       (const void *) &pc->rcvbuf, sizeof(int)) == -1)
        {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          "setsockopt(SO_RCVBUF) failed");
            goto failed;
        }
    }

    if (ngx_nonblocking(s) == -1) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_nonblocking_n " failed");

        goto failed;
    }

    if (pc->local) {
        if (bind(s, pc->local->sockaddr, pc->local->socklen) == -1) {
            ngx_log_error(NGX_LOG_CRIT, pc->log, ngx_socket_errno,
                          "bind(%V) failed", &pc->local->name);

            goto failed;
        }
    }

    //###########################  Here set the connection data's read write handler!!!
    c->recv = ngx_recv;
    c->send = ngx_send;
    c->recv_chain = ngx_recv_chain;
    c->send_chain = ngx_send_chain;

    c->sendfile = 1;

    c->log_error = pc->log_error;

    if (pc->sockaddr->sa_family != AF_INET) {
        c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
        c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;

#if (NGX_SOLARIS)
        /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
        c->sendfile = 0;
#endif
    }

    rev = c->read;
    wev = c->write;

    rev->log = pc->log;
    wev->log = pc->log;

    pc->connection = c;

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_THREADS)

    /* TODO: lock event when call completion handler */

    rev->lock = pc->lock;
    wev->lock = pc->lock;
    rev->own_lock = &c->lock;
    wev->own_lock = &c->lock;

#endif

    if (ngx_add_conn) {
        if (ngx_add_conn(c) == NGX_ERROR) {
            goto failed;
        }
    }

    ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0,
                   "connect to %V, fd:%d #%d", pc->name, s, c->number);

    //########################## Here,doing the really connect operation
    rc = connect(s, pc->sockaddr, pc->socklen);

    if (rc == -1) {
        err = ngx_socket_errno;

        if (err != NGX_EINPROGRESS
#if (NGX_WIN32)
                /* Winsock returns WSAEWOULDBLOCK (NGX_EAGAIN) */
                && err != NGX_EAGAIN
#endif
           )
        {
            if (err == NGX_ECONNREFUSED
#if (NGX_LINUX)
                    /*
                     * Linux returns EAGAIN instead of ECONNREFUSED
                     * for unix sockets if listen queue is full
                     */
                    || err == NGX_EAGAIN
#endif
                    || err == NGX_ECONNRESET
                    || err == NGX_ENETDOWN
                    || err == NGX_ENETUNREACH
                    || err == NGX_EHOSTDOWN
                    || err == NGX_EHOSTUNREACH)
            {
                level = NGX_LOG_ERR;

            } else {
                level = NGX_LOG_CRIT;
            }

            ngx_log_error(level, c->log, err, "connect() to %V failed",
                          pc->name);

            return NGX_DECLINED;
        }
    }

    if (ngx_add_conn) {
        if (rc == -1) {

            /* NGX_EINPROGRESS */

            return NGX_AGAIN;
        }

        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected");

        wev->ready = 1;//

        return NGX_OK;
    }

    if (ngx_event_flags & NGX_USE_AIO_EVENT) {

        ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, ngx_socket_errno,
                       "connect(): %d", rc);

        /* aio, iocp */

        if (ngx_blocking(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          ngx_blocking_n " failed");
            goto failed;
        }

        /*
         * FreeBSD's aio allows to post an operation on non-connected socket.
         * NT does not support it.
         *
         * TODO: check in Win32, etc. As workaround we can use NGX_ONESHOT_EVENT
         */

        rev->ready = 1;
        wev->ready = 1;

        return NGX_OK;
    }

    if (ngx_event_flags & NGX_USE_CLEAR_EVENT) {

        /* kqueue */

        event = NGX_CLEAR_EVENT;

    } else {

        /* select, poll, /dev/poll */

        event = NGX_LEVEL_EVENT;
    }

    //##########################################Very important!
    //##########################################add the connection read event!!
    if (ngx_add_event(rev, NGX_READ_EVENT, event) != NGX_OK) {
        goto failed;
    }

    if (rc == -1) {

        /* NGX_EINPROGRESS */

        if (ngx_add_event(wev, NGX_WRITE_EVENT, event) != NGX_OK) {
            goto failed;
        }

        return NGX_AGAIN;
    }

    ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected");

    wev->ready = 1;  //Means the connection is ready to write!

    return NGX_OK;

failed:

    ngx_free_connection(c);

    if (ngx_close_socket(s) == -1) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_close_socket_n " failed");
    }

    return NGX_ERROR;
}
예제 #22
0
// 这篇文章写得很清晰http://www.tbdata.org/archives/1245
static ngx_int_t
ngx_event_process_init(ngx_cycle_t *cycle)
{
    ngx_uint_t           m, i;
    ngx_event_t         *rev, *wev;
    ngx_listening_t     *ls;
    ngx_connection_t    *c, *next, *old;
    ngx_core_conf_t     *ccf;
    ngx_event_conf_t    *ecf;
    ngx_event_module_t  *module;
    // 获取相应模块的配置结构
    ccf = (ngx_core_conf_t *) ngx_get_conf(cycle->conf_ctx, ngx_core_module);
    ecf = ngx_event_get_conf(cycle->conf_ctx, ngx_event_core_module);

    // 判断是否使用mutex锁,主要是为了控制负载均衡。ccf->master主要确定下是否是master-worker模式。单进程模式就不需要进行下面操作了。
    if (ccf->master && ccf->worker_processes > 1 && ecf->accept_mutex) {
        //使用mutex控制进程的负载均衡
        ngx_use_accept_mutex = 1;
        ngx_accept_mutex_held = 0;
        ngx_accept_mutex_delay = ecf->accept_mutex_delay; // 抢互斥体失败后,下次再抢的间隔时间

    } else {
        ngx_use_accept_mutex = 0;
    }

#if (NGX_THREADS)
    ngx_posted_events_mutex = ngx_mutex_init(cycle->log, 0);
    if (ngx_posted_events_mutex == NULL) {
        return NGX_ERROR;
    }
#endif

    //定时器初始化
    if (ngx_event_timer_init(cycle->log) == NGX_ERROR) {
        return NGX_ERROR;
    }

    //event module的初始化
    for (m = 0; ngx_modules[m]; m++) {
        if (ngx_modules[m]->type != NGX_EVENT_MODULE) {
            continue;
        }

        if (ngx_modules[m]->ctx_index != ecf->use) {
            continue;
        }

        module = ngx_modules[m]->ctx;
        
        //初始化模块
        if (module->actions.init(cycle, ngx_timer_resolution) != NGX_OK) {
            /* fatal */
            exit(2);
        }

        break;
    }

#if !(NGX_WIN32)

    if (ngx_timer_resolution && !(ngx_event_flags & NGX_USE_TIMER_EVENT)) {
        struct sigaction  sa;
        struct itimerval  itv;

        ngx_memzero(&sa, sizeof(struct sigaction));
        sa.sa_handler = ngx_timer_signal_handler;
        sigemptyset(&sa.sa_mask);

        if (sigaction(SIGALRM, &sa, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "sigaction(SIGALRM) failed");
            return NGX_ERROR;
        }

        itv.it_interval.tv_sec = ngx_timer_resolution / 1000;
        itv.it_interval.tv_usec = (ngx_timer_resolution % 1000) * 1000;
        itv.it_value.tv_sec = ngx_timer_resolution / 1000;
        itv.it_value.tv_usec = (ngx_timer_resolution % 1000 ) * 1000;

        if (setitimer(ITIMER_REAL, &itv, NULL) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "setitimer() failed");
        }
    }

    if (ngx_event_flags & NGX_USE_FD_EVENT) {
        struct rlimit  rlmt;

        if (getrlimit(RLIMIT_NOFILE, &rlmt) == -1) {
            ngx_log_error(NGX_LOG_ALERT, cycle->log, ngx_errno,
                          "getrlimit(RLIMIT_NOFILE) failed");
            return NGX_ERROR;
        }

        cycle->files_n = (ngx_uint_t) rlmt.rlim_cur;

        cycle->files = ngx_calloc(sizeof(ngx_connection_t *) * cycle->files_n,
                                  cycle->log);
        if (cycle->files == NULL) {
            return NGX_ERROR;
        }
    }

#endif

    //创建连接池。现在已经是在worker中了,所以每个worker都有自己的connection数组
    cycle->connections =
        ngx_alloc(sizeof(ngx_connection_t) * cycle->connection_n, cycle->log);
    if (cycle->connections == NULL) {
        return NGX_ERROR;
    }

    c = cycle->connections;
    
    //创建所有读事件
    cycle->read_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                   cycle->log);
    if (cycle->read_events == NULL) {
        return NGX_ERROR;
    }

    rev = cycle->read_events;
    //初始化读事件
    for (i = 0; i < cycle->connection_n; i++) {
        rev[i].closed = 1;
        //防止stale event
        rev[i].instance = 1;
#if (NGX_THREADS)
        rev[i].lock = &c[i].lock;
        rev[i].own_lock = &c[i].lock;
#endif
    }

    //创建写事件
    cycle->write_events = ngx_alloc(sizeof(ngx_event_t) * cycle->connection_n,
                                    cycle->log);
    if (cycle->write_events == NULL) {
        return NGX_ERROR;
    }

    wev = cycle->write_events;
    //初始化写事件
    for (i = 0; i < cycle->connection_n; i++) {
        wev[i].closed = 1;
#if (NGX_THREADS)
        wev[i].lock = &c[i].lock;
        wev[i].own_lock = &c[i].lock;
#endif
    }

    i = cycle->connection_n;
    next = NULL;

    //初始化连接池
    do {
        i--;
        
        //链表
        c[i].data = next;
        //每一个连接的读写事件对应cycle的读写事件
        c[i].read = &cycle->read_events[i];
        c[i].write = &cycle->write_events[i];
        c[i].fd = (ngx_socket_t) -1;

        next = &c[i];

#if (NGX_THREADS)
        c[i].lock = 0;
#endif
    } while (i);

    //设置free 连接
    cycle->free_connections = next;
    cycle->free_connection_n = cycle->connection_n;

    /* for each listening socket */
    //下面这段初始化listen 事件 ,创建socket句柄,绑定事件回调,然后加入到事件驱动中

    ls = cycle->listening.elts;  // 为每一个监听套接字从connection数组中分配一个连接,即一个slot
    //开始遍历listen
    for (i = 0; i < cycle->listening.nelts; i++) {
        
        //从连接池取得连接
        c = ngx_get_connection(ls[i].fd, cycle->log);

        if (c == NULL) {
            return NGX_ERROR;
        }

        c->log = &ls[i].log;

        c->listening = &ls[i];
        ls[i].connection = c;

        rev = c->read;

        rev->log = c->log;
        rev->accept = 1;

#if (NGX_HAVE_DEFERRED_ACCEPT)
        rev->deferred_accept = ls[i].deferred_accept;
#endif

        if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
            if (ls[i].previous) {

                /*
                 * delete the old accept events that were bound to
                 * the old cycle read events array
                 */

                old = ls[i].previous->connection;

                if (ngx_del_event(old->read, NGX_READ_EVENT, NGX_CLOSE_EVENT)
                    == NGX_ERROR)
                {
                    return NGX_ERROR;
                }

                old->fd = (ngx_socket_t) -1;
            }
        }

#if (NGX_WIN32)

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            ngx_iocp_conf_t  *iocpcf;

            rev->handler = ngx_event_acceptex;

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, 0, NGX_IOCP_ACCEPT) == NGX_ERROR) {
                return NGX_ERROR;
            }

            ls[i].log.handler = ngx_acceptex_log_error;

            iocpcf = ngx_event_get_conf(cycle->conf_ctx, ngx_iocp_module);
            if (ngx_event_post_acceptex(&ls[i], iocpcf->post_acceptex)
                == NGX_ERROR)
            {
                return NGX_ERROR;
            }

        } else {
            rev->handler = ngx_event_accept;  // 注册监听套接读事件的回调函数ngx_event_accept

            if (ngx_use_accept_mutex) {
                continue;
            }

            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

#else
        
        //设置listen句柄的事件回调,这个回调里面会accept,然后进行后续处理,这个函数是nginx事件驱动的第一个函数
        rev->handler = ngx_event_accept;

        //如果默认使用mutex,则会继续下面操作
        if (ngx_use_accept_mutex) {
            continue;
        }

        if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
            if (ngx_add_conn(c) == NGX_ERROR) {
                return NGX_ERROR;
            }

        } else {
            //加可读事件到事件处理,如果没有使用accept互斥体,那么就在此处将监听套接字放入
            if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
                return NGX_ERROR;
            }
        }

#endif

    }

    return NGX_OK;
}
static ngx_int_t
ngx_http_lua_udp_connect(ngx_udp_connection_t *uc)
{
    int                rc;
    ngx_int_t          event;
    ngx_event_t       *rev, *wev;
    ngx_socket_t       s;
    ngx_connection_t  *c;

    s = ngx_socket(uc->sockaddr->sa_family, SOCK_DGRAM, 0);

    ngx_log_debug1(NGX_LOG_DEBUG_EVENT, &uc->log, 0, "UDP socket %d", s);

    if (s == -1) {
        ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno,
                      ngx_socket_n " failed");

        return NGX_ERROR;
    }

    c = ngx_get_connection(s, &uc->log);

    if (c == NULL) {
        if (ngx_close_socket(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno,
                          ngx_close_socket_n "failed");
        }

        return NGX_ERROR;
    }

    if (ngx_nonblocking(s) == -1) {
        ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno,
                      ngx_nonblocking_n " failed");

        ngx_free_connection(c);

        if (ngx_close_socket(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, &uc->log, ngx_socket_errno,
                          ngx_close_socket_n " failed");
        }

        return NGX_ERROR;
    }

    rev = c->read;
    wev = c->write;

    rev->log = &uc->log;
    wev->log = &uc->log;

    uc->connection = c;

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_THREADS)

    /* TODO: lock event when call completion handler */

    rev->lock = &c->lock;
    wev->lock = &c->lock;
    rev->own_lock = &c->lock;
    wev->own_lock = &c->lock;

#endif

#if (NGX_HTTP_LUA_HAVE_SO_PASSCRED)
    if (uc->sockaddr->sa_family == AF_UNIX) {
        struct sockaddr         addr;

        addr.sa_family = AF_UNIX;

        /* just to make valgrind happy */
        ngx_memzero(addr.sa_data, sizeof(addr.sa_data));

        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, &uc->log, 0, "datagram unix "
                       "domain socket autobind");

        if (bind(uc->connection->fd, &addr, sizeof(sa_family_t)) != 0) {
            ngx_log_error(NGX_LOG_CRIT, &uc->log, ngx_socket_errno,
                          "bind() failed");

            return NGX_ERROR;
        }
    }
#endif

    ngx_log_debug3(NGX_LOG_DEBUG_EVENT, &uc->log, 0,
                   "connect to %V, fd:%d #%d", &uc->server, s, c->number);

    rc = connect(s, uc->sockaddr, uc->socklen);

    /* TODO: aio, iocp */

    if (rc == -1) {
        ngx_log_error(NGX_LOG_CRIT, &uc->log, ngx_socket_errno,
                      "connect() failed");

        return NGX_ERROR;
    }

    /* UDP sockets are always ready to write */
    wev->ready = 1;

    if (ngx_add_event) {

        event = (ngx_event_flags & NGX_USE_CLEAR_EVENT) ?
                    /* kqueue, epoll */                 NGX_CLEAR_EVENT:
                    /* select, poll, /dev/poll */       NGX_LEVEL_EVENT;
                    /* eventport event type has no meaning: oneshot only */

        if (ngx_add_event(rev, NGX_READ_EVENT, event) != NGX_OK) {
            return NGX_ERROR;
        }

    } else {
        /* rtsig */

        if (ngx_add_conn(c) == NGX_ERROR) {
            return NGX_ERROR;
        }
    }

    return NGX_OK;
}
예제 #24
0
ngx_int_t
ngx_postgres_upstream_get_peer(ngx_peer_connection_t *pc, void *data)
{
    ngx_postgres_upstream_peer_data_t  *pgdt = data;
    ngx_postgres_upstream_srv_conf_t   *pgscf;
#if defined(nginx_version) && (nginx_version < 8017)
    ngx_postgres_ctx_t                 *pgctx;
#endif
    ngx_postgres_upstream_peers_t      *peers;
    ngx_postgres_upstream_peer_t       *peer;
    ngx_connection_t                   *pgxc = NULL;
    int                                 fd;
    ngx_event_t                        *rev, *wev;
    ngx_int_t                           rc;
    u_char                             *connstring, *last;
    size_t                              len;

    dd("entering");

#if defined(nginx_version) && (nginx_version < 8017)
    if (data == NULL) {
        goto failed;
    }

    pgctx = ngx_http_get_module_ctx(pgdt->request, ngx_postgres_module);
#endif

    pgscf = pgdt->srv_conf;

    pgdt->failed = 0;

    if (pgscf->max_cached && pgscf->single) {
        rc = ngx_postgres_keepalive_get_peer_single(pc, pgdt, pgscf);
        if (rc != NGX_DECLINED) {
            /* re-use keepalive peer */
            dd("re-using keepalive peer (single)");

            pgdt->state = state_db_send_query;

            ngx_postgres_process_events(pgdt->request);

            dd("returning NGX_AGAIN");
            return NGX_AGAIN;
        }
    }

    peers = pgscf->peers;

    if (pgscf->current > peers->number - 1) {
        pgscf->current = 0;
    }

    peer = &peers->peer[pgscf->current++];

    pgdt->name.len = peer->name.len;
    pgdt->name.data = peer->name.data;

    pgdt->sockaddr = *peer->sockaddr;

    pc->name = &pgdt->name;
    pc->sockaddr = &pgdt->sockaddr;
    pc->socklen = peer->socklen;
    pc->cached = 0;

    if ((pgscf->max_cached) && (!pgscf->single)) {
        rc = ngx_postgres_keepalive_get_peer_multi(pc, pgdt, pgscf);
        if (rc != NGX_DECLINED) {
            /* re-use keepalive peer */
            dd("re-using keepalive peer (multi)");

            pgdt->state = state_db_send_query;

            ngx_postgres_process_events(pgdt->request);

            dd("returning NGX_AGAIN");
            return NGX_AGAIN;
        }
    }

    if ((pgscf->reject) && (pgscf->active_conns >= pgscf->max_cached)) {
        ngx_log_error(NGX_LOG_INFO, pc->log, 0,
                      "postgres: keepalive connection pool is full,"
                      " rejecting request to upstream \"%V\"", &peer->name);

        /* a bit hack-ish way to return error response (setup part) */
        pc->connection = ngx_get_connection(0, pc->log);

#if defined(nginx_version) && (nginx_version < 8017)
        pgctx->status = NGX_HTTP_SERVICE_UNAVAILABLE;
#endif

        dd("returning NGX_AGAIN (NGX_HTTP_SERVICE_UNAVAILABLE)");
        return NGX_AGAIN;
    }

    /* sizeof("...") - 1 + 1 (for spaces and '\0' omitted */
    len = sizeof("hostaddr=") + peer->host.len
        + sizeof("port=") + sizeof("65535") - 1
        + sizeof("dbname=") + peer->dbname.len
        + sizeof("user="******"password="******"sslmode=disable");

    connstring = ngx_pnalloc(pgdt->request->pool, len);
    if (connstring == NULL) {
#if defined(nginx_version) && (nginx_version >= 8017)
        dd("returning NGX_ERROR");
        return NGX_ERROR;
#else
        goto failed;
#endif
    }

    /* TODO add unix sockets */
    last = ngx_snprintf(connstring, len - 1,
                        "hostaddr=%V port=%d dbname=%V user=%V password=%V"
                        " sslmode=disable",
                        &peer->host, peer->port, &peer->dbname, &peer->user,
                        &peer->password);
    *last = '\0';

    dd("PostgreSQL connection string: %s", connstring);

    /*
     * internal checks in PQsetnonblocking are taking care of any
     * PQconnectStart failures, so we don't need to check them here.
     */

    ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0,
                   "postgres: connecting");

    pgdt->pgconn = PQconnectStart((const char *)connstring);
    if (PQsetnonblocking(pgdt->pgconn, 1) == -1) {
        ngx_log_error(NGX_LOG_ERR, pc->log, 0,
                      "postgres: connection failed: %s in upstream \"%V\"",
                      PQerrorMessage(pgdt->pgconn), &peer->name);

        PQfinish(pgdt->pgconn);
        pgdt->pgconn = NULL;

#if defined(nginx_version) && (nginx_version >= 8017)
        dd("returning NGX_DECLINED");
        return NGX_DECLINED;
#else
        pgctx->status = NGX_HTTP_BAD_GATEWAY;
        goto failed;
#endif
    }

#if defined(DDEBUG) && (DDEBUG > 1)
    PQtrace(pgdt->pgconn, stderr);
#endif

    dd("connection status:%d", (int) PQstatus(pgdt->pgconn));

    /* take spot in keepalive connection pool */
    pgscf->active_conns++;

    /* add the file descriptor (fd) into an nginx connection structure */

    fd = PQsocket(pgdt->pgconn);
    if (fd == -1) {
        ngx_log_error(NGX_LOG_ERR, pc->log, 0,
                      "postgres: failed to get connection fd");

        goto invalid;
    }

    ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
                   "postgres: connection fd:%d", fd);

    pgxc = pc->connection = ngx_get_connection(fd, pc->log);

    if (pgxc == NULL) {
        ngx_log_error(NGX_LOG_ERR, pc->log, 0,
                      "postgres: failed to get a free nginx connection");

        goto invalid;
    }

    pgxc->log = pc->log;
    pgxc->log_error = pc->log_error;
    pgxc->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

    rev = pgxc->read;
    wev = pgxc->write;

    rev->log = pc->log;
    wev->log = pc->log;

    /* register the connection with postgres connection fd into the
     * nginx event model */

    if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
        dd("NGX_USE_RTSIG_EVENT");
        if (ngx_add_conn(pgxc) != NGX_OK) {
            goto bad_add;
        }

    } else if (ngx_event_flags & NGX_USE_CLEAR_EVENT) {
        dd("NGX_USE_CLEAR_EVENT");
        if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) != NGX_OK) {
            goto bad_add;
        }

        if (ngx_add_event(wev, NGX_WRITE_EVENT, NGX_CLEAR_EVENT) != NGX_OK) {
            goto bad_add;
        }

    } else {
        dd("NGX_USE_LEVEL_EVENT");
        if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) != NGX_OK) {
            goto bad_add;
        }

        if (ngx_add_event(wev, NGX_WRITE_EVENT, NGX_LEVEL_EVENT) != NGX_OK) {
            goto bad_add;
        }
    }

    pgxc->log->action = "connecting to PostgreSQL database";
    pgdt->state = state_db_connect;

    dd("returning NGX_AGAIN");
    return NGX_AGAIN;

bad_add:

    ngx_log_error(NGX_LOG_ERR, pc->log, 0,
                  "postgres: failed to add nginx connection");

invalid:

    ngx_postgres_upstream_free_connection(pc->log, pc->connection,
                                          pgdt->pgconn, pgscf);

#if defined(nginx_version) && (nginx_version >= 8017)
    dd("returning NGX_ERROR");
    return NGX_ERROR;
#else

failed:

    /* a bit hack-ish way to return error response (setup part) */
    pc->connection = ngx_get_connection(0, pc->log);

    dd("returning NGX_AGAIN (NGX_ERROR)");
    return NGX_AGAIN;
#endif
}
예제 #25
0
static int
ngx_http_lua_ngx_timer_at(lua_State *L)
{
    int                      nargs, co_ref;
    u_char                  *p;
    lua_State               *mt;  /* the main thread */
    lua_State               *co;
    ngx_msec_t               delay;
    ngx_event_t             *ev;
    ngx_http_request_t      *r;
    ngx_connection_t        *saved_c = NULL;
#if 0
    ngx_http_connection_t   *hc;
#endif

    ngx_http_lua_timer_ctx_t      *tctx;
    ngx_http_lua_main_conf_t      *lmcf;
#if 0
    ngx_http_core_main_conf_t     *cmcf;
#endif

    nargs = lua_gettop(L);
    if (nargs < 2) {
        return luaL_error(L, "expecting at least 2 arguments but got %d",
                          nargs);
    }

    delay = (ngx_msec_t) (luaL_checknumber(L, 1) * 1000);

    luaL_argcheck(L, lua_isfunction(L, 2) && !lua_iscfunction(L, 2), 2,
                  "Lua function expected");

    r = ngx_http_lua_get_req(L);
    if (r == NULL) {
        return luaL_error(L, "no request");
    }

    if (ngx_exiting) {
        lua_pushnil(L);
        lua_pushliteral(L, "process exiting");
        return 2;
    }

    lmcf = ngx_http_get_module_main_conf(r, ngx_http_lua_module);

    if (lmcf->pending_timers >= lmcf->max_pending_timers) {
        lua_pushnil(L);
        lua_pushliteral(L, "too many pending timers");
        return 2;
    }

    if (lmcf->watcher == NULL) {
        /* create the watcher fake connection */

        ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ngx_cycle->log, 0,
                       "lua creating fake watcher connection");

        if (ngx_cycle->files) {
            saved_c = ngx_cycle->files[0];
        }

        lmcf->watcher = ngx_get_connection(0, ngx_cycle->log);

        if (ngx_cycle->files) {
            ngx_cycle->files[0] = saved_c;
        }

        if (lmcf->watcher == NULL) {
            return luaL_error(L, "no memory");
        }

        lmcf->watcher->fd = -2;  /* to work around the -1 check in
                                    ngx_worker_process_cycle */
        lmcf->watcher->idle = 1;
        lmcf->watcher->read->handler = ngx_http_lua_abort_pending_timers;
        lmcf->watcher->data = lmcf;
    }

    mt = lmcf->lua;

    co = lua_newthread(mt);

    /* L stack: time func [args] thread */

    ngx_http_lua_probe_user_coroutine_create(r, L, co);

    lua_createtable(co, 0, 0);  /* the new global table */

    /* co stack: global_tb */

    lua_createtable(co, 0, 1);  /* the metatable */
    lua_pushvalue(co, LUA_GLOBALSINDEX);
    lua_setfield(co, -2, "__index");
    lua_setmetatable(co, -2);

    /* co stack: global_tb */

    lua_replace(co, LUA_GLOBALSINDEX);

    /* co stack: <empty> */

    dd("stack top: %d", lua_gettop(L));

    lua_xmove(mt, L, 1);    /* move coroutine from main thread to L */

    /* L stack: time func [args] thread */
    /* mt stack: empty */

    lua_pushvalue(L, 2);    /* copy entry function to top of L*/

    /* L stack: time func [args] thread func */

    lua_xmove(L, co, 1);    /* move entry function from L to co */

    /* L stack: time func [args] thread */
    /* co stack: func */

    lua_pushvalue(co, LUA_GLOBALSINDEX);
    lua_setfenv(co, -2);

    /* co stack: func */

    lua_pushlightuserdata(L, &ngx_http_lua_coroutines_key);
    lua_rawget(L, LUA_REGISTRYINDEX);

    /* L stack: time func [args] thread corountines */

    lua_pushvalue(L, -2);

    /* L stack: time func [args] thread coroutines thread */

    co_ref = luaL_ref(L, -2);
    lua_pop(L, 1);

    /* L stack: time func [args] thread */

    if (nargs > 2) {
        lua_pop(L, 1);  /* L stack: time func [args] */
        lua_xmove(L, co, nargs - 2);  /* L stack: time func */

        /* co stack: func [args] */
    }

    p = ngx_alloc(sizeof(ngx_event_t) + sizeof(ngx_http_lua_timer_ctx_t),
                  r->connection->log);
    if (p == NULL) {
        lua_pushlightuserdata(L, &ngx_http_lua_coroutines_key);
        lua_rawget(L, LUA_REGISTRYINDEX);
        luaL_unref(L, -1, co_ref);
        return luaL_error(L, "no memory");
    }

    ev = (ngx_event_t *) p;

    ngx_memzero(ev, sizeof(ngx_event_t));

    p += sizeof(ngx_event_t);

    tctx = (ngx_http_lua_timer_ctx_t *) p;

    tctx->premature = 0;
    tctx->co_ref = co_ref;
    tctx->co = co;
    tctx->main_conf = r->main_conf;
    tctx->srv_conf = r->srv_conf;
    tctx->loc_conf = r->loc_conf;
    tctx->lmcf = lmcf;

    ev->handler = ngx_http_lua_timer_handler;
    ev->data = tctx;
    ev->log = ngx_cycle->log;

    lmcf->pending_timers++;

    ngx_add_timer(ev, delay);

    lua_pushinteger(L, 1);
    return 1;
}
ngx_int_t
ngx_event_connect_peer(ngx_peer_connection_t *pc)
{
    int                rc;
    ngx_int_t          event;
    ngx_err_t          err;
    ngx_uint_t         level;
    ngx_socket_t       s;
    ngx_event_t       *rev, *wev;
    ngx_connection_t  *c;

    rc = pc->get(pc, pc->data);//ngx_http_upstream_get_round_robin_peer获取一个peer
    if (rc != NGX_OK) {
        return rc;
    }

    s = ngx_socket(pc->sockaddr->sa_family, SOCK_STREAM, 0);

    ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, 0, "socket %d", s);

    if (s == (ngx_socket_t) -1) {
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_socket_n " failed");
        return NGX_ERROR;
    }

    /*
     由于Nginx的事件框架要求每个连接都由一个ngx_connection-t结构体来承载,因此这一步将调用ngx_get_connection方法,由ngx_cycle_t
 核心结构体中free_connections指向的空闲连接池处获取到一个ngx_connection_t结构体,作为承载Nginx与上游服务器间的TCP连接
     */
    c = ngx_get_connection(s, pc->log);

    if (c == NULL) {
        if (ngx_close_socket(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          ngx_close_socket_n "failed");
        }

        return NGX_ERROR;
    }

    if (pc->rcvbuf) {
        if (setsockopt(s, SOL_SOCKET, SO_RCVBUF,
                       (const void *) &pc->rcvbuf, sizeof(int)) == -1)
        {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          "setsockopt(SO_RCVBUF) failed");
            goto failed;
        }
    }

    if (ngx_nonblocking(s) == -1) { //建立一个TCP套接字,同时,这个套接字需要设置为非阻塞模式。
        ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                      ngx_nonblocking_n " failed");

        goto failed;
    }

    if (pc->local) {
        if (bind(s, pc->local->sockaddr, pc->local->socklen) == -1) {
            ngx_log_error(NGX_LOG_CRIT, pc->log, ngx_socket_errno,
                          "bind(%V) failed", &pc->local->name);

            goto failed;
        }
    }

    c->recv = ngx_recv;
    c->send = ngx_send;
    c->recv_chain = ngx_recv_chain;
    c->send_chain = ngx_send_chain;

    /*
       和后端的ngx_connection_t在ngx_event_connect_peer这里置为1,但在ngx_http_upstream_connect中c->sendfile &= r->connection->sendfile;,
       和客户端浏览器的ngx_connextion_t的sendfile需要在ngx_http_update_location_config中判断,因此最终是由是否在configure的时候是否有加
       sendfile选项来决定是置1还是置0
    */
    c->sendfile = 1;

    c->log_error = pc->log_error;

    if (pc->sockaddr->sa_family == AF_UNIX) {
        c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
        c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;

#if (NGX_SOLARIS)
        /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
        c->sendfile = 0;
#endif
    }

    rev = c->read;
    wev = c->write;

    rev->log = pc->log;
    wev->log = pc->log;

    pc->connection = c;

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

    /*
     事件模块的ngx_event_actions接口,其中的add_conn方法可以将TCP套接字以期待可读、可写事件的方式添加到事件搜集器中。对于
     epoll事件模块来说,add_conn方法就是把套接字以期待EPOLLIN EPOLLOUT事件的方式加入epoll中,这一步即调用add_conn方法把刚刚
     建立的套接字添加到epoll中,表示如果这个套接字上出现了预期的网络事件,则希望epoll能够回调它的handler方法。
     */
    if (ngx_add_conn) {
        /*
        将这个连接ngx_connection t上的读/写事件的handler回调方法都设置为ngx_http_upstream_handler。,见函数外层的ngx_http_upstream_connect
         */
        if (ngx_add_conn(c) == NGX_ERROR) {
            goto failed;
        }
    }

    ngx_log_debug3(NGX_LOG_DEBUG_EVENT, pc->log, 0,
                   "connect to %V, fd:%d #%uA", pc->name, s, c->number);

    /*
     调用connect方法向上游服务器发起TCP连接,作为非阻塞套接字,connect方法可能立刻返回连接建立成功,也可能告诉用户继续等待上游服务器的响应
     对connect连接是否建立成功的检查会在函数外面的u->read_event_handler = ngx_http_upstream_process_header;见函数外层的ngx_http_upstream_connect
     */
     /*
        针对非阻塞I/O执行的系统调用则总是立即返回,而不管事件足否已经发生。如果事件没有眭即发生,这些系统调用就
    返回—1.和出错的情况一样。此时我们必须根据errno来区分这两种情况。对accept、send和recv而言,事件未发牛时errno
    通常被设置成EAGAIN(意为“再来一次”)或者EWOULDBLOCK(意为“期待阻塞”):对conncct而言,errno则被
    设置成EINPROGRESS(意为“在处理中")。
      */ //connect的时候返回成功后使用的sock就是socket创建的sock,这和服务器端accept成功返回一个新的sock不一样
      //上面的ngx_add_conn已经把读写事件一起添加到了epoll中
    rc = connect(s, pc->sockaddr, pc->socklen); //connect返回值可以参考<linux高性能服务器开发> 9.5节

    if (rc == -1) {
        err = ngx_socket_errno;


        if (err != NGX_EINPROGRESS
#if (NGX_WIN32)
            /* Winsock returns WSAEWOULDBLOCK (NGX_EAGAIN) */
            && err != NGX_EAGAIN
#endif
            )
        {
            if (err == NGX_ECONNREFUSED
#if (NGX_LINUX)
                /*
                 * Linux returns EAGAIN instead of ECONNREFUSED
                 * for unix sockets if listen queue is full
                 */
                || err == NGX_EAGAIN
#endif
                || err == NGX_ECONNRESET
                || err == NGX_ENETDOWN
                || err == NGX_ENETUNREACH
                || err == NGX_EHOSTDOWN
                || err == NGX_EHOSTUNREACH)
            {
                level = NGX_LOG_ERR;

            } else {
                level = NGX_LOG_CRIT;
            }

            ngx_log_error(level, c->log, err, "connect() to %V failed",
                          pc->name);

            ngx_close_connection(c);
            pc->connection = NULL;

            return NGX_DECLINED;
        }
    }

    if (ngx_add_conn) {
        if (rc == -1) {
        //这个表示发出了连接三步握手中的SYN,单还没有等待对方完全应答回来表示连接成功通过外层的
        //c->write->handler = ngx_http_upstream_handler;  u->write_event_handler = ngx_http_upstream_send_request_handler促发返回成功
            /* NGX_EINPROGRESS */

            return NGX_AGAIN;
        }

        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected");

        wev->ready = 1;

        return NGX_OK;  
    }

    if (ngx_event_flags & NGX_USE_IOCP_EVENT) {

        ngx_log_debug1(NGX_LOG_DEBUG_EVENT, pc->log, ngx_socket_errno,
                       "connect(): %d", rc);

        if (ngx_blocking(s) == -1) {
            ngx_log_error(NGX_LOG_ALERT, pc->log, ngx_socket_errno,
                          ngx_blocking_n " failed");
            goto failed;
        }

        /*
         * FreeBSD's aio allows to post an operation on non-connected socket.
         * NT does not support it.
         *
         * TODO: check in Win32, etc. As workaround we can use NGX_ONESHOT_EVENT
         */

        rev->ready = 1;
        wev->ready = 1;

        return NGX_OK;
    }

    if (ngx_event_flags & NGX_USE_CLEAR_EVENT) {

        /* kqueue */

        event = NGX_CLEAR_EVENT;

    } else {

        /* select, poll, /dev/poll */

        event = NGX_LEVEL_EVENT;
    }

    char tmpbuf[256];
        
        snprintf(tmpbuf, sizeof(tmpbuf), "<%25s, %5d> epoll NGX_READ_EVENT(et) read add", NGX_FUNC_LINE);
        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, tmpbuf);
    if (ngx_add_event(rev, NGX_READ_EVENT, event) != NGX_OK) {
        goto failed;
    }

    if (rc == -1) {

        /* NGX_EINPROGRESS */
        char tmpbuf[256];
        
        snprintf(tmpbuf, sizeof(tmpbuf), "<%25s, %5d> epoll NGX_WRITE_EVENT(et) read add", NGX_FUNC_LINE);
        ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, tmpbuf);
        if (ngx_add_event(wev, NGX_WRITE_EVENT, event) != NGX_OK) {
            goto failed;
        }

        return NGX_AGAIN; //这个表示发出了连接三步握手中的SYN,单还没有等待对方完全应答回来表示连接成功
        //通过外层的 c->write->handler = ngx_http_upstream_handler;  u->write_event_handler = ngx_http_upstream_send_request_handler促发返回成功
    }

    ngx_log_debug0(NGX_LOG_DEBUG_EVENT, pc->log, 0, "connected");

    wev->ready = 1;

    return NGX_OK;

failed:

    ngx_close_connection(c);
    pc->connection = NULL;

    return NGX_ERROR;
}
예제 #27
0
void
ngx_event_udp_aio_recv(ngx_event_t *ev)
{
    ngx_udp_recv_event_t *event = (ngx_udp_recv_event_t *) ev;

    ngx_log_t         *log;
    ngx_event_t       *rev, *wev;
    ngx_listening_t   *ls;
    ngx_connection_t  *c, *lc;

    ev->ready = 0;

    lc = ev->data;
    ls = lc->listening;

    if (ev->error) {
        ngx_destroy_pool(event->pool);
        goto post_udp_recv;
    }

#if (NGX_STAT_STUB)
    (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif

    ev->log->data = &ls->addr_text;

    c = ngx_get_connection(lc->fd, ev->log);

    if (c == NULL) {
        ngx_destroy_pool(event->pool);
        goto post_udp_recv;
    }

#if (NGX_STAT_STUB)
    (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

    c->pool = event->pool;
    c->buffer = event->buffer;

#if (NGX_WIN32) && (NGX_HAVE_IOCP)
    if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
        c->buffer->last += ev->available;
        ev->available = 0;
    }
#endif

    c->sockaddr = ngx_palloc(c->pool, event->socklen);
    if (c->sockaddr == NULL) {
        ngx_close_udp_connection(c);
        goto post_udp_recv;
    }

    ngx_memcpy(c->sockaddr, event->sockaddr, event->socklen);

    log = ngx_palloc(c->pool, sizeof(ngx_log_t));
    if (log == NULL) {
        ngx_close_udp_connection(c);
        goto post_udp_recv;
    }

    *log = ls->log;

    c->recv = ngx_recv;
    c->send = ngx_send;
    c->recv_chain = ngx_recv_chain;
    c->send_chain = ngx_send_chain;

    c->log = log;
    c->pool->log = log;

    c->socklen = event->socklen;
    c->listening = ls;
    c->local_sockaddr = ls->sockaddr;

    c->unexpected_eof = 1;

#if (NGX_HAVE_UNIX_DOMAIN)
    if (c->sockaddr->sa_family == AF_UNIX) {
        c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
        c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;
    }
#endif

    rev = c->read;
    wev = c->write;

    wev->ready = 1;

    if (ngx_event_flags & (NGX_USE_AIO_EVENT|NGX_USE_RTSIG_EVENT)) {
        /* rtsig, aio, iocp */
        rev->ready = 1;
    }

    if (ev->deferred_accept) {
        rev->ready = 1;
    }

    rev->log = log;
    wev->log = log;

    /*
     * TODO: MT: - ngx_atomic_fetch_add()
     *             or protection by critical section or light mutex
     *
     * TODO: MP: - allocated in a shared memory
     *           - ngx_atomic_fetch_add()
     *             or protection by critical section or light mutex
     */

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
    (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

#if (NGX_THREADS)
    rev->lock = &c->lock;
    wev->lock = &c->lock;
    rev->own_lock = &c->lock;
    wev->own_lock = &c->lock;
#endif

    if (ls->addr_ntop) {
        c->addr_text.data = ngx_pnalloc(c->pool,
                                  ls->addr_text_max_len + sizeof(":65535") - 1);
        if (c->addr_text.data == NULL) {
            ngx_close_udp_connection(c);
            goto post_udp_recv;
        }

        c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->addr_text.data,
                               ls->addr_text_max_len + sizeof(":65535") - 1, 1);
        if (c->addr_text.len == 0) {
            ngx_close_udp_connection(c);
            goto post_udp_recv;
        }
    }

#if (NGX_DEBUG)
    {
    in_addr_t            i;
    ngx_event_conf_t    *ecf;
    ngx_event_debug_t   *dc;
    struct sockaddr_in  *sin;

    sin = (struct sockaddr_in *) c->sockaddr;

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    dc = ecf->debug_connection.elts;
    for (i = 0; i < ecf->debug_connection.nelts; i++) {
        if ((sin->sin_addr.s_addr & dc[i].mask) == dc[i].addr) {
            log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL;
            break;
        }
    }
    }
#endif

    log->data = NULL;
    log->handler = NULL;

    ls->handler(c);

post_udp_recv:

#if (NGX_WIN32)

    if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
        ngx_event_post_one_udp_recv(ls, event);
    }

#endif

    return;
}
static ngx_int_t 
ngx_tcp_process_init(ngx_cycle_t *cycle)
{
    ngx_int_t                   rc;
    ngx_tcp_core_main_conf_t   *cmcf;
    ngx_tcp_conf_ctx_t         *ctx;
    ngx_listening_t            *ls;
    ngx_connection_t           *c;
    ngx_event_t                *rev;
    ngx_tcp_port_t             *mport;
    ngx_tcp_in_addr_t          *addrs;

    rc = ngx_tcp_instruct_unix_listen(cycle);
    if (NGX_OK != rc) {
        return rc;
    }
    ctx = (ngx_tcp_conf_ctx_t *)ngx_get_conf(cycle->conf_ctx, ngx_tcp_module);
    cmcf = ngx_tcp_get_module_main_conf(ctx, ngx_tcp_core_module);
    ls = cmcf->ls;
    rc = ngx_tcp_open_listening_socket(ls);
    if (NGX_OK != rc) {
        return rc;
    }
    mport = ngx_palloc(cycle->pool, sizeof(ngx_tcp_port_t));
    if (mport == NULL) {
        return NGX_ERROR;
    }
    mport->naddrs = 1;
    mport->addrs = ngx_pcalloc(cycle->pool,
                               mport->naddrs * sizeof(ngx_tcp_in_addr_t));
    if (mport->addrs == NULL) {
        return NGX_ERROR;
    }
    addrs = mport->addrs;
    addrs->conf.ctx = ctx;
    addrs->conf.addr_text= cmcf->unix_url;
    ls->servers = mport;

    if (cmcf->error_log == NULL) {
        ls->logp = cycle->log;
    } else {
        ls->logp = cmcf->error_log;
    }
    ls->log = *(ls->logp);
    ls->log.data = &ls->addr_text;
    ls->log.handler = ngx_accept_log_error;

    c = ngx_get_connection(ls->fd, cycle->log);
    // c->log = &ls->log;
    c->log = ls->logp;
    c->listening = ls;
    ls->connection = c;

    rev = c->read;
    rev->log = c->log;
    rev->accept = 1;

#if (NGX_HAVE_DEFERRED_ACCEPT)
    rev->deferred_accept = ls->deferred_accept;
#endif

    rev->handler = ngx_event_accept;

    if (ngx_event_flags & NGX_USE_RTSIG_EVENT) {
        if (ngx_add_conn(c) == NGX_ERROR) {
            return NGX_ERROR;
        }

    } else {
        if (ngx_add_event(rev, NGX_READ_EVENT, 0) == NGX_ERROR) {
            return NGX_ERROR;
        }
    }

    return NGX_OK;
}
예제 #29
0
ngx_int_t
ngx_zeromq_connect(ngx_peer_connection_t *pc)
{
    ngx_connection_t  *c;
    ngx_event_t       *rev, *wev;
    void              *zmq;
    int                fd, zero;
    size_t             fdsize;

    zmq = zmq_socket(zmq_context, ZMQ_REQ);
    if (zmq == NULL) {
        ngx_zeromq_log_error(pc->log, "zmq_socket(ZMQ_REQ)");
        return NGX_ERROR;
    }

    fdsize = sizeof(int);

    if (zmq_getsockopt(zmq, ZMQ_FD, &fd, &fdsize) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_getsockopt(ZMQ_FD)");
        goto failed_zmq;
    }

    zero = 0;

    if (zmq_setsockopt(zmq, ZMQ_LINGER, &zero, sizeof(int)) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_setsockopt(ZMQ_LINGER)");
        goto failed_zmq;
    }

    c = ngx_get_connection(fd, pc->log);
    if (c == NULL) {
        goto failed_zmq;
    }

    c->data = zmq;

    c->recv = ngx_zeromq_recv;
    c->send = ngx_zeromq_send;
    c->recv_chain = ngx_zeromq_recv_chain;
    c->send_chain = ngx_zeromq_send_chain;

    /* This won't fly with ZeroMQ */
    c->sendfile = 0;
    c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
    c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;

    c->log_error = pc->log_error;

    rev = c->read;
    wev = c->write;

    rev->log = pc->log;
    wev->log = pc->log;

    pc->connection = c;

    c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

    if (pc->local) {
        ngx_log_error(NGX_LOG_WARN, pc->log, 0,
                      "zmq_connect: binding to local address is not supported");
    }

    if (zmq_connect(zmq, (const char *) pc->data) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_connect()");
        goto failed;
    }

    ngx_log_debug4(NGX_LOG_DEBUG_EVENT, pc->log, 0,
                   "zmq_connect: lazily connected to tcp://%V,"
                   " zmq:%p fd:%d #%d", pc->name, zmq, fd, c->number);

    if (ngx_add_conn) {
        /* rtsig */
        if (ngx_add_conn(c) == NGX_ERROR) {
            goto failed;
        }

    } else {
        if (ngx_event_flags & NGX_USE_CLEAR_EVENT) {
            /* kqueue, epoll */
            if (ngx_add_event(rev, NGX_READ_EVENT, NGX_CLEAR_EVENT) != NGX_OK) {
                goto failed;
            }

        } else {
            /* select, poll, /dev/poll */
            if (ngx_add_event(rev, NGX_READ_EVENT, NGX_LEVEL_EVENT) != NGX_OK) {
                goto failed;
            }
        }
    }

    /*
     * ZeroMQ assumes that new socket is read-ready (but it really isn't)
     * and it won't notify us about any new events if we don't fail to read
     * from it first. Sigh.
     */

    rev->ready = 1;
    wev->ready = 1;

    return NGX_OK;

failed:

    ngx_free_connection(c);

    c->fd = (ngx_socket_t) -1;

failed_zmq:

    if (zmq_close(zmq) == -1) {
        ngx_zeromq_log_error(pc->log, "zmq_close()");
    }

    return NGX_ERROR;
}
void
ngx_event_accept(ngx_event_t *ev)
{
    socklen_t          socklen;
    ngx_err_t          err;
    ngx_log_t         *log;
    ngx_uint_t         level;
    ngx_socket_t       s;
    ngx_event_t       *rev, *wev;
    ngx_listening_t   *ls;
    ngx_connection_t  *c, *lc;
    ngx_event_conf_t  *ecf;
    u_char             sa[NGX_SOCKADDRLEN];
#if (NGX_HAVE_ACCEPT4)
    static ngx_uint_t  use_accept4 = 1;
#endif

    if (ev->timedout) {
        if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) {
            return;
        }

        ev->timedout = 0;
    }

    ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module);

    if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) {
        ev->available = ecf->multi_accept;
    }

    lc = ev->data;
    ls = lc->listening;
    ev->ready = 0;

    ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0,
                   "accept on %V, ready: %d", &ls->addr_text, ev->available);

    do {
        socklen = NGX_SOCKADDRLEN;

#if (NGX_HAVE_ACCEPT4)
        if (use_accept4) {
            s = accept4(lc->fd, (struct sockaddr *) sa, &socklen,
                        SOCK_NONBLOCK);
        } else {
            s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
        }
#else
        s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
#endif

        if (s == (ngx_socket_t) -1) {
            err = ngx_socket_errno;

            if (err == NGX_EAGAIN) {
                ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err,
                               "accept() not ready");
                return;
            }

            level = NGX_LOG_ALERT;

            if (err == NGX_ECONNABORTED) {
                level = NGX_LOG_ERR;

            } else if (err == NGX_EMFILE || err == NGX_ENFILE) {
                level = NGX_LOG_CRIT;
            }

#if (NGX_HAVE_ACCEPT4)
            ngx_log_error(level, ev->log, err,
                          use_accept4 ? "accept4() failed" : "accept() failed");

            if (use_accept4 && err == NGX_ENOSYS) {
                use_accept4 = 0;
                ngx_inherited_nonblocking = 0;
                continue;
            }
#else
            ngx_log_error(level, ev->log, err, "accept() failed");
#endif

            if (err == NGX_ECONNABORTED) {
                if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
                    ev->available--;
                }

                if (ev->available) {
                    continue;
                }
            }

            if (err == NGX_EMFILE || err == NGX_ENFILE) {
                if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle, 1)
                    != NGX_OK)
                {
                    return;
                }

                if (ngx_use_accept_mutex) {
                    if (ngx_accept_mutex_held) {
                        ngx_shmtx_unlock(&ngx_accept_mutex);
                        ngx_accept_mutex_held = 0;
                    }

                    ngx_accept_disabled = 1;

                } else {
                    ngx_add_timer(ev, ecf->accept_mutex_delay);
                }
            }

            return;
        }

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1);
#endif

        ngx_accept_disabled = ngx_cycle->connection_n / 8
                              - ngx_cycle->free_connection_n;

        c = ngx_get_connection(s, ev->log);

        if (c == NULL) {
            if (ngx_close_socket(s) == -1) {
                ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                              ngx_close_socket_n " failed");
            }

            return;
        }

        c->type = SOCK_STREAM;

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_active, 1);
#endif

        c->pool = ngx_create_pool(ls->pool_size, ev->log);
        if (c->pool == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        c->sockaddr = ngx_palloc(c->pool, socklen);
        if (c->sockaddr == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        ngx_memcpy(c->sockaddr, sa, socklen);

        log = ngx_palloc(c->pool, sizeof(ngx_log_t));
        if (log == NULL) {
            ngx_close_accepted_connection(c);
            return;
        }

        /* set a blocking mode for iocp and non-blocking mode for others */

        if (ngx_inherited_nonblocking) {
            if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
                if (ngx_blocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_blocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }

        } else {
            if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) {
                if (ngx_nonblocking(s) == -1) {
                    ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno,
                                  ngx_nonblocking_n " failed");
                    ngx_close_accepted_connection(c);
                    return;
                }
            }
        }

        *log = ls->log;

        c->recv = ngx_recv;
        c->send = ngx_send;
        c->recv_chain = ngx_recv_chain;
        c->send_chain = ngx_send_chain;

        c->log = log;
        c->pool->log = log;

        c->socklen = socklen;
        c->listening = ls;
        c->local_sockaddr = ls->sockaddr;
        c->local_socklen = ls->socklen;

        c->unexpected_eof = 1;

#if (NGX_HAVE_UNIX_DOMAIN)
        if (c->sockaddr->sa_family == AF_UNIX) {
            c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED;
            c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;
#if (NGX_SOLARIS)
            /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */
            c->sendfile = 0;
#endif
        }
#endif

        rev = c->read;
        wev = c->write;

        wev->ready = 1;

        if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
            rev->ready = 1;
        }

        if (ev->deferred_accept) {
            rev->ready = 1;
#if (NGX_HAVE_KQUEUE)
            rev->available = 1;
#endif
        }

        rev->log = log;
        wev->log = log;

        /*
         * TODO: MT: - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         *
         * TODO: MP: - allocated in a shared memory
         *           - ngx_atomic_fetch_add()
         *             or protection by critical section or light mutex
         */

        c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1);

#if (NGX_STAT_STUB)
        (void) ngx_atomic_fetch_add(ngx_stat_handled, 1);
#endif

        if (ls->addr_ntop) {
            c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len);
            if (c->addr_text.data == NULL) {
                ngx_close_accepted_connection(c);
                return;
            }

            c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen,
                                             c->addr_text.data,
                                             ls->addr_text_max_len, 0);
            if (c->addr_text.len == 0) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

#if (NGX_DEBUG)
        {
        ngx_str_t  addr;
        u_char     text[NGX_SOCKADDR_STRLEN];

        ngx_debug_accepted_connection(ecf, c);

        if (log->log_level & NGX_LOG_DEBUG_EVENT) {
            addr.data = text;
            addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text,
                                     NGX_SOCKADDR_STRLEN, 1);

            ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0,
                           "*%uA accept: %V fd:%d", c->number, &addr, s);
        }

        }
#endif

        if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) {
            if (ngx_add_conn(c) == NGX_ERROR) {
                ngx_close_accepted_connection(c);
                return;
            }
        }

        log->data = NULL;
        log->handler = NULL;

        ls->handler(c);

        if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
            ev->available--;
        }

    } while (ev->available);
}