示例#1
0
static apr_status_t send_socket(apr_pool_t *p,
                                apr_socket_t *s,
                                apr_socket_t *outbound)
{
    apr_status_t rv;
    apr_os_sock_t rawsock;
    apr_os_sock_t srawsock;
    struct msghdr msg;
    struct cmsghdr *cmsg;
    struct iovec iov;
    char b = '\0';

    rv = apr_os_sock_get(&rawsock, outbound);
    if (rv != APR_SUCCESS) {
        return rv;
    }

    rv = apr_os_sock_get(&srawsock, s);
    if (rv != APR_SUCCESS) {
        return rv;
    }

    memset(&msg, 0, sizeof(msg));

    msg.msg_iov = &iov;
    msg.msg_iovlen = 1;

    iov.iov_base = &b;
    iov.iov_len = 1;

    cmsg = apr_palloc(p, sizeof(*cmsg) + sizeof(rawsock));
    cmsg->cmsg_len = sizeof(*cmsg) + sizeof(rawsock);
    cmsg->cmsg_level = SOL_SOCKET;
    cmsg->cmsg_type = SCM_RIGHTS;

    memcpy(CMSG_DATA(cmsg), &rawsock, sizeof(rawsock));

    msg.msg_control = cmsg;
    msg.msg_controllen = cmsg->cmsg_len;

    rv = sendmsg(srawsock, &msg, 0);

    if (rv == -1) {
        return errno;
    }


    return APR_SUCCESS;
}
示例#2
0
文件: beos.c 项目: bobsense/packages
/* This accepts a connection and allows us to handle the error codes better than
 * the previous code, while also making it more obvious.
 */
static apr_status_t beos_accept(void **accepted, ap_listen_rec *lr, apr_pool_t *ptrans)
{
    apr_socket_t *csd;
    apr_status_t status;
    int sockdes;

    *accepted = NULL;
    status = apr_socket_accept(&csd, lr->sd, ptrans);
    if (status == APR_SUCCESS) {
        *accepted = csd;
        apr_os_sock_get(&sockdes, csd);
        return status;
    }

    if (APR_STATUS_IS_EINTR(status)) {
        return status;
    }
        /* This switch statement provides us with better error details. */
    switch (status) {
#ifdef ECONNABORTED
        case ECONNABORTED:
#endif
#ifdef ETIMEDOUT
        case ETIMEDOUT:
#endif
#ifdef EHOSTUNREACH
        case EHOSTUNREACH:
#endif
#ifdef ENETUNREACH
        case ENETUNREACH:
#endif
            break;
#ifdef ENETDOWN
        case ENETDOWN:
            /*
             * When the network layer has been shut down, there
             * is not much use in simply exiting: the parent
             * would simply re-create us (and we'd fail again).
             * Use the CHILDFATAL code to tear the server down.
             * @@@ Martin's idea for possible improvement:
             * A different approach would be to define
             * a new APEXIT_NETDOWN exit code, the reception
             * of which would make the parent shutdown all
             * children, then idle-loop until it detected that
             * the network is up again, and restart the children.
             * Ben Hyde noted that temporary ENETDOWN situations
             * occur in mobile IP.
             */
            ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                         "apr_socket_accept: giving up.");
            return APR_EGENERAL;
#endif /*ENETDOWN*/

        default:
            ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf,
                         "apr_socket_accept: (client socket)");
            return APR_EGENERAL;
    }
    return status;
}
示例#3
0
文件: beos.c 项目: kheradmand/Break
static void process_socket(apr_pool_t *p, apr_socket_t *sock,
                           int my_child_num, apr_bucket_alloc_t *bucket_alloc)
{
    conn_rec *current_conn;
    long conn_id = my_child_num;
    int csd;
    ap_sb_handle_t *sbh;

    (void)apr_os_sock_get(&csd, sock);
    
    if (csd >= FD_SETSIZE) {
        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, NULL,
                     "filedescriptor (%u) larger than FD_SETSIZE (%u) "
                     "found, you probably need to rebuild Apache with a "
                     "larger FD_SETSIZE", csd, FD_SETSIZE);
        apr_socket_close(sock);
        return;
    }

    ap_create_sb_handle(&sbh, p, 0, my_child_num);
    current_conn = ap_run_create_connection(p, ap_server_conf,
                                            sock, conn_id, sbh,
                                            bucket_alloc);

    if (current_conn) {
        ap_process_connection(current_conn, sock);
        ap_lingering_close(current_conn);
    }
}
示例#4
0
/** Construct a socket comm object from an APR socket. */
kdsock_comm *kdsock_comm_new_apr(apr_pool_t *pool, apr_socket_t *apr_sock, int auto_shutdown) {
    kdsock_comm *self;

    self = apr_pcalloc(pool, sizeof(kdsock_comm));
    self->pool = pool;

    self->c = apr_pcalloc(pool, sizeof(kdcomm));
    self->c->funcs = &sock_comm_funcs;
    self->c->state = COMM_UNKNOWN;
    self->c->obj = self;

    self->apr_sock = apr_sock;
    self->auto_shutdown = auto_shutdown;

    /* Extract the file descriptor. */
    apr_os_sock_get(&self->c->fd, self->apr_sock);

    /* We call the shots for the timeout. */
    apr_socket_timeout_set(self->apr_sock, 0);

    /* Register a function to close on the socket on pool
       destruction. */
    apr_pool_cleanup_register(pool, self, kdsock_comm_delete, kdsock_comm_child_delete);
    
    return self;
}
示例#5
0
int sock_native_fd(net_sock_t *nsock)
{
    network_sock_t *sock = (network_sock_t *)nsock;
    int fd;

    if (apr_os_sock_get(&fd, sock->fd) != APR_SUCCESS) fd = -1;

    return(fd);
}
void socket_thread_main(apr_thread_t *self, void *data)
{
	int ret = 0;
	socket_thread_data_t *thread_data = (socket_thread_data_t*)data;
	gnutls_session_t session;
	apr_os_sock_t sock;

	if(tls_cert != NULL)
	{
		if(apr_os_sock_get(&sock, thread_data->socket) != APR_SUCCESS)
		{
			syslog(LOG_ERR, "Can't get raw socket for TLS use");
			ret = -1;
		}
		else if((thread_data->tls_session=apr_pcalloc(
				thread_data->pool, sizeof(gnutls_session_t)))!=NULL)
		{
			session = initialize_tls_session ();
			gnutls_transport_set_ptr (session,
					(gnutls_transport_ptr_t) (sock));
			do
			{
				ret = gnutls_handshake (session);
			}
			while (ret < 0 && gnutls_error_is_fatal (ret) == 0);

			if (ret < 0)
			{
				syslog(LOG_ERR, "handshake has failed (%s)\n\n", gnutls_strerror (ret));
			}
			*(thread_data->tls_session) = session;
		}
		else
		{
			syslog(LOG_ERR, "memory allocation failure");
			ret = -1;
		}
	}

	if(ret >= 0)
	{
		socket_thread_handle(thread_data);
		if(thread_data->tls_session != NULL)
			gnutls_bye(session, GNUTLS_SHUT_WR);
	}

	apr_socket_close(thread_data->socket);
	if(thread_data->tls_session != NULL)
		gnutls_deinit (session);
	apr_pool_destroy(thread_data->pool);
	syslog(LOG_INFO, "client connection closed");
}
示例#7
0
文件: io_net.c 项目: LuaDist/lua-apr
int socket_fd_get(lua_State *L)
{
  apr_status_t status;
  lua_apr_socket *object;
  apr_os_sock_t fd;

  object = socket_check(L, 1, 1);
  status = apr_os_sock_get(&fd, object->handle);
  if (status != APR_SUCCESS)
    return push_error_status(L, status);

  lua_pushinteger(L, fd);
  return 1;
}
示例#8
0
文件: llpumpio.cpp 项目: Boy/rainbow
//
// local functions
//
void ll_debug_poll_fd(const char* msg, const apr_pollfd_t* poll)
{
#if LL_DEBUG_POLL_FILE_DESCRIPTORS
	if(!poll)
	{
		lldebugs << "Poll -- " << (msg?msg:"") << ": no pollfd." << llendl;
		return;
	}
	if(poll->desc.s)
	{
		apr_os_sock_t os_sock;
		if(APR_SUCCESS == apr_os_sock_get(&os_sock, poll->desc.s))
		{
			lldebugs << "Poll -- " << (msg?msg:"") << " on fd " << os_sock
				 << " at " << poll->desc.s << llendl;
		}
		else
		{
			lldebugs << "Poll -- " << (msg?msg:"") << " no fd "
				 << " at " << poll->desc.s << llendl;
		}
	}
	else if(poll->desc.f)
	{
		apr_os_file_t os_file;
		if(APR_SUCCESS == apr_os_file_get(&os_file, poll->desc.f))
		{
			lldebugs << "Poll -- " << (msg?msg:"") << " on fd " << os_file
				 << " at " << poll->desc.f << llendl;
		}
		else
		{
			lldebugs << "Poll -- " << (msg?msg:"") << " no fd "
				 << " at " << poll->desc.f << llendl;
		}
	}
	else
	{
		lldebugs << "Poll -- " << (msg?msg:"") << ": no descriptor." << llendl;
	}
#endif	
}
示例#9
0
static int setup_listeners(server_rec *s)
{
    ap_listen_rec *lr;
    int sockdes;

    if (ap_setup_listeners(s) < 1 ) {
        ap_log_error(APLOG_MARK, APLOG_ALERT, 0, s, APLOGNO(00222)
            "no listening sockets available, shutting down");
        return -1;
    }

    listenmaxfd = -1;
    FD_ZERO(&listenfds);
    for (lr = ap_listeners; lr; lr = lr->next) {
        apr_os_sock_get(&sockdes, lr->sd);
        FD_SET(sockdes, &listenfds);
        if (sockdes > listenmaxfd) {
            listenmaxfd = sockdes;
        }
    }
    return 0;
}
示例#10
0
文件: child.c 项目: Distrotech/httpd
static APR_INLINE ap_listen_rec *find_ready_listener(fd_set * main_fds)
{
    ap_listen_rec *lr;
    SOCKET nsd;

    lr = head_listener;
    do {
        apr_os_sock_get(&nsd, lr->sd);
        if (FD_ISSET(nsd, main_fds)) {
            head_listener = lr->next;
            if (!head_listener) {
                head_listener = ap_listeners;
            }
            return lr;
        }
        lr = lr->next;
        if (!lr) {
            lr = ap_listeners;
        }
    } while (lr != head_listener);
    return NULL;
}
// Quick function 
void ll_debug_socket(const char* msg, apr_socket_t* apr_sock)
{
#if LL_DEBUG_SOCKET_FILE_DESCRIPTORS
	if(!apr_sock)
	{
		LL_DEBUGS() << "Socket -- " << (msg?msg:"") << ": no socket." << LL_ENDL;
		return;
	}
	// *TODO: Why doesn't this work?
	//apr_os_sock_t os_sock;
	int os_sock;
	if(APR_SUCCESS == apr_os_sock_get(&os_sock, apr_sock))
	{
		LL_DEBUGS() << "Socket -- " << (msg?msg:"") << " on fd " << os_sock
			<< " at " << apr_sock << LL_ENDL;
	}
	else
	{
		LL_DEBUGS() << "Socket -- " << (msg?msg:"") << " no fd "
			<< " at " << apr_sock << LL_ENDL;
	}
#endif
}
示例#12
0
apr_status_t
join_mcast( apr_pool_t *context, apr_socket_t *sock, char *mcast_channel, apr_port_t port, char *ifname )
{
  apr_pool_t *pool = NULL;
  apr_status_t status;
  int rval;
  apr_sockaddr_t *sa;
  apr_os_sock_t s;

  if((status = apr_pool_create(&pool, context)) != APR_SUCCESS)
    {
      return status;
    }

  status = apr_sockaddr_info_get(&sa, mcast_channel , APR_UNSPEC, port, 0, pool);
  if(status != APR_SUCCESS)
    {
      apr_pool_destroy(pool);
      return status;
    }

  apr_os_sock_get(&s, sock);

  switch( sa->family ) /* (*sa)->sa.sin.sin_family */
    {
    case APR_INET:
      {
        struct ip_mreq mreq[1];
        struct ifreq ifreq[1];
        
        /* &((*sa)->sa.sin.sin_addr */
        memcpy(&mreq->imr_multiaddr, &(sa->sa.sin.sin_addr), 
               sizeof mreq->imr_multiaddr);
        
        memset(&ifreq,0, sizeof(ifreq));
        if(ifname)
          {
            memset(ifreq, 0, sizeof(struct ifreq));
            strncpy(ifreq->ifr_name, ifname, IFNAMSIZ);
            if(ioctl(s, SIOCGIFADDR, ifreq) == -1)
              {
                apr_pool_destroy(pool);
                return APR_EGENERAL;
              }
          }
        else
          {
            /* wildcard address (let the kernel decide) */
            mreq->imr_interface.s_addr = htonl(INADDR_ANY);
          }
        
        memcpy(&mreq->imr_interface, 
               &((struct sockaddr_in *)&ifreq->ifr_addr)->sin_addr, 
               sizeof mreq->imr_interface);
        
        rval = setsockopt(s, IPPROTO_IP, IP_ADD_MEMBERSHIP, 
                mreq, sizeof mreq);
        if(rval<0)
          {
            apr_pool_destroy(pool);
            return APR_EGENERAL;
          }
        break;
      }
#if APR_HAVE_IPV6
    case APR_INET6:
      {
        struct ipv6_mreq mreq[1];
        struct ifreq ifreq[1];
        
        /* &((*sa)->sa.sin6.sin6_addr)*/
        memcpy(&mreq->ipv6mr_multiaddr, &(sa->sa.sin6.sin6_addr),
               sizeof mreq->ipv6mr_multiaddr);
        
        memset(&ifreq,0, sizeof(ifreq));
        if(ifname)
          {
            strncpy(ifreq->ifr_name, ifname, IFNAMSIZ);
          }
        
        if (ioctl(s, SIOCGIFADDR, ifreq) == -1)
          {
            apr_pool_destroy(pool);
            return -1;
          }
        
        rval = setsockopt(s, IPPROTO_IPV6, IPV6_JOIN_GROUP, mreq, sizeof mreq);
        break;
      }
#endif
    default:
        apr_pool_destroy(pool);
        /* Set errno to EPROTONOSUPPORT */
        return -1;
    }

  apr_pool_destroy(pool);
  return APR_SUCCESS;
}
示例#13
0
int main(int argc, const char * const argv[])
{
    apr_file_t *infd, *skwrapper;
    apr_sockaddr_t *skaddr;
    apr_getopt_t *gopt;
    apr_socket_t *skt;
    apr_pool_t *pool;
    apr_status_t rv;
    apr_proc_t proc;


    /* Command line arguments */
    int num_to_start = 1, port = 0;
    const char *interface = NULL;
    const char *command = NULL;

    apr_app_initialize(&argc, &argv, NULL);

    atexit(apr_terminate);

    apr_pool_create(&pool, NULL);

    rv = apr_getopt_init(&gopt, pool, argc, argv);
    if (rv) {
        return EXIT_FAILURE;
    }

    for (;;) {
        const char *arg;
        char opt;

        rv = apr_getopt(gopt, "c:p:i:N:", &opt, &arg);
        if (APR_STATUS_IS_EOF(rv)) {
            break;
        } else if (rv) {
            usage();
        } else {
            switch (opt) {
            case 'c':
                command = arg;
                break;

            case 'p':
                port = atoi(arg);
                if (! port) {
                    usage();
                }
                break;

            case 'i':
                interface = arg;
                break;

            case 'N':
                num_to_start = atoi(arg);
                if (! num_to_start) {
                    usage();
                }
                break;

            default:
                break;
            }
        }
    }

    if (! command || ! port) {
        usage();
    }

    rv = apr_sockaddr_info_get(&skaddr, interface, APR_UNSPEC, port, 0, pool);
    if (rv) {
        exit_error(rv, "apr_sockaddr_info_get");
    }

    rv = apr_socket_create(&skt, skaddr->family, SOCK_STREAM, APR_PROTO_TCP, pool);
    if (rv) {
        exit_error(rv, "apr_socket_create");
    }

    rv = apr_socket_bind(skt, skaddr);
    if (rv) {
        exit_error(rv, "apr_socket_bind");
    }

    rv = apr_socket_listen(skt, 1024);
    if (rv) {
        exit_error(rv, "apr_socket_listen");
    }

    rv = apr_proc_detach(APR_PROC_DETACH_DAEMONIZE);
    if (rv) {
        exit_error(rv, "apr_proc_detach");
    }

#if defined(WIN32) || defined(OS2) || defined(NETWARE)

#error "Please implement me."

#else

    while (--num_to_start >= 0) {
        rv = apr_proc_fork(&proc, pool);
        if (rv == APR_INCHILD) {
            apr_os_file_t oft = 0;
            apr_os_sock_t oskt;

            /* Ok, so we need a file that has file descriptor 0 (which
             * FastCGI wants), but points to our socket.  This isn't really
             * possible in APR, so we cheat a bit.  I have no idea how to
             * do this on a non-unix platform, so for now this is platform
             * specific.  Ick.
             *
             * Note that this has to happen post-detach, otherwise fd 0
             * gets closed during apr_proc_detach and it's all for nothing.
             *
             * Unfortunately, doing this post detach means we have no way
             * to let anyone know if there's a problem at this point :( */

            rv = apr_os_file_put(&infd, &oft, APR_READ | APR_WRITE, pool);
            if (rv) {
                exit(EXIT_FAILURE);
            }

            rv = apr_os_sock_get(&oskt, skt);
            if (rv) {
                exit(EXIT_FAILURE);
            }

            rv = apr_os_file_put(&skwrapper, &oskt, APR_READ | APR_WRITE,
                                 pool);
            if (rv) {
                exit(EXIT_FAILURE);
            }

            rv = apr_file_dup2(infd, skwrapper, pool);
            if (rv) {
                exit(EXIT_FAILURE);
            }

            /* XXX Can't use apr_proc_create because there's no way to get
             *     infd into the procattr without going through another dup2,
             *     which means by the time it gets to the fastcgi process it
             *     is no longer fd 0, so it doesn't work.  Sigh. */

            execl(command, command, NULL);

        } else if (rv == APR_INPARENT) {
            if (num_to_start == 0) {
                apr_socket_close(skt);
            }
        } else {
            exit_error(rv, "apr_proc_fork");
        }
    }

#endif

    return EXIT_SUCCESS;
}
示例#14
0
文件: unixd.c 项目: pexip/os-apache2
AP_DECLARE(apr_status_t) ap_unixd_accept(void **accepted, ap_listen_rec *lr,
                                         apr_pool_t *ptrans)
{
    apr_socket_t *csd;
    apr_status_t status;
#ifdef _OSD_POSIX
    int sockdes;
#endif

    *accepted = NULL;
    status = apr_socket_accept(&csd, lr->sd, ptrans);
    if (status == APR_SUCCESS) {
        *accepted = csd;
#ifdef _OSD_POSIX
        apr_os_sock_get(&sockdes, csd);
        if (sockdes >= FD_SETSIZE) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(02176)
                         "new file descriptor %d is too large; you probably need "
                         "to rebuild Apache with a larger FD_SETSIZE "
                         "(currently %d)",
                         sockdes, FD_SETSIZE);
            apr_socket_close(csd);
            return APR_EINTR;
        }
#endif
        return APR_SUCCESS;
    }

    if (APR_STATUS_IS_EINTR(status)) {
        return status;
    }
    /* Our old behaviour here was to continue after accept()
     * errors.  But this leads us into lots of troubles
     * because most of the errors are quite fatal.  For
     * example, EMFILE can be caused by slow descriptor
     * leaks (say in a 3rd party module, or libc).  It's
     * foolish for us to continue after an EMFILE.  We also
     * seem to tickle kernel bugs on some platforms which
     * lead to never-ending loops here.  So it seems best
     * to just exit in most cases.
     */
    switch (status) {
#if defined(HPUX11) && defined(ENOBUFS)
        /* On HPUX 11.x, the 'ENOBUFS, No buffer space available'
         * error occurs because the accept() cannot complete.
         * You will not see ENOBUFS with 10.20 because the kernel
         * hides any occurrence from being returned to user space.
         * ENOBUFS with 11.x's TCP/IP stack is possible, and could
         * occur intermittently. As a work-around, we are going to
         * ignore ENOBUFS.
         */
        case ENOBUFS:
#endif

#ifdef EPROTO
        /* EPROTO on certain older kernels really means
         * ECONNABORTED, so we need to ignore it for them.
         * See discussion in new-httpd archives nh.9701
         * search for EPROTO.
         *
         * Also see nh.9603, search for EPROTO:
         * There is potentially a bug in Solaris 2.x x<6,
         * and other boxes that implement tcp sockets in
         * userland (i.e. on top of STREAMS).  On these
         * systems, EPROTO can actually result in a fatal
         * loop.  See PR#981 for example.  It's hard to
         * handle both uses of EPROTO.
         */
        case EPROTO:
#endif
#ifdef ECONNABORTED
        case ECONNABORTED:
#endif
        /* Linux generates the rest of these, other tcp
         * stacks (i.e. bsd) tend to hide them behind
         * getsockopt() interfaces.  They occur when
         * the net goes sour or the client disconnects
         * after the three-way handshake has been done
         * in the kernel but before userland has picked
         * up the socket.
         */
#ifdef ECONNRESET
        case ECONNRESET:
#endif
#ifdef ETIMEDOUT
        case ETIMEDOUT:
#endif
#ifdef EHOSTUNREACH
        case EHOSTUNREACH:
#endif
#ifdef ENETUNREACH
        case ENETUNREACH:
#endif
        /* EAGAIN/EWOULDBLOCK can be returned on BSD-derived
         * TCP stacks when the connection is aborted before
         * we call connect, but only because our listener
         * sockets are non-blocking (AP_NONBLOCK_WHEN_MULTI_LISTEN)
         */
#ifdef EAGAIN
        case EAGAIN:
#endif
#ifdef EWOULDBLOCK
#if !defined(EAGAIN) || EAGAIN != EWOULDBLOCK
        case EWOULDBLOCK:
#endif
#endif
            break;
#ifdef ENETDOWN
        case ENETDOWN:
            /*
             * When the network layer has been shut down, there
             * is not much use in simply exiting: the parent
             * would simply re-create us (and we'd fail again).
             * Use the CHILDFATAL code to tear the server down.
             * @@@ Martin's idea for possible improvement:
             * A different approach would be to define
             * a new APEXIT_NETDOWN exit code, the reception
             * of which would make the parent shutdown all
             * children, then idle-loop until it detected that
             * the network is up again, and restart the children.
             * Ben Hyde noted that temporary ENETDOWN situations
             * occur in mobile IP.
             */
            ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, APLOGNO(02177)
                         "apr_socket_accept: giving up.");
            return APR_EGENERAL;
#endif /*ENETDOWN*/

        default:
            /* If the socket has been closed in ap_close_listeners()
             * by the restart/stop action, we may get EBADF.
             * Do not print an error in this case.
             */
            if (!lr->active) {
                ap_log_error(APLOG_MARK, APLOG_DEBUG, status, ap_server_conf, APLOGNO(02178)
                             "apr_socket_accept failed for inactive listener");
                return status;
            }
            ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, APLOGNO(02179)
                         "apr_socket_accept: (client socket)");
            return APR_EGENERAL;
    }
    return status;
}
示例#15
0
文件: child.c 项目: Distrotech/httpd
static unsigned int __stdcall winnt_accept(void *lr_)
{
    ap_listen_rec *lr = (ap_listen_rec *)lr_;
    apr_os_sock_info_t sockinfo;
    PCOMP_CONTEXT context = NULL;
    DWORD BytesRead;
    SOCKET nlsd;
    int rv, err_count = 0;
#if APR_HAVE_IPV6
    SOCKADDR_STORAGE ss_listen;
    int namelen = sizeof(ss_listen);
#endif

    apr_os_sock_get(&nlsd, lr->sd);

#if APR_HAVE_IPV6
    if (getsockname(nlsd, (struct sockaddr *)&ss_listen, &namelen) == SOCKET_ERROR) {
        ap_log_error(APLOG_MARK,APLOG_ERR, apr_get_netos_error(), ap_server_conf,
                    "winnt_accept: getsockname error on listening socket, is IPv6 available?");
        return 1;
   }
#endif

    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
                 "Child %lu: Starting thread to listen on port %d.", my_pid, lr->bind_addr->port);
    while (!shutdown_in_progress) {
        if (!context) {
            context = mpm_get_completion_context();
            if (!context) {
                /* Temporary resource constraint? */
                Sleep(0);
                continue;
            }
        }

        /* Create and initialize the accept socket */
#if APR_HAVE_IPV6
        if (context->accept_socket == INVALID_SOCKET) {
            context->accept_socket = socket(ss_listen.ss_family, SOCK_STREAM, IPPROTO_TCP);
            context->socket_family = ss_listen.ss_family;
        }
        else if (context->socket_family != ss_listen.ss_family) {
            closesocket(context->accept_socket);
            context->accept_socket = socket(ss_listen.ss_family, SOCK_STREAM, IPPROTO_TCP);
            context->socket_family = ss_listen.ss_family;
        }

        if (context->accept_socket == INVALID_SOCKET) {
            ap_log_error(APLOG_MARK,APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "winnt_accept: Failed to allocate an accept socket. "
                         "Temporary resource constraint? Try again.");
            Sleep(100);
            continue;
        }
#else
        if (context->accept_socket == INVALID_SOCKET) {
            context->accept_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
            if (context->accept_socket == INVALID_SOCKET) {
                /* Another temporary condition? */
                ap_log_error(APLOG_MARK,APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                             "winnt_accept: Failed to allocate an accept socket. "
                             "Temporary resource constraint? Try again.");
                Sleep(100);
                continue;
            }
        }
#endif
        /* AcceptEx on the completion context. The completion context will be
         * signaled when a connection is accepted.
         */
        if (!AcceptEx(nlsd, context->accept_socket,
                      context->buff,
                      0,
                      PADDED_ADDR_SIZE,
                      PADDED_ADDR_SIZE,
                      &BytesRead,
                      &context->Overlapped)) {
            rv = apr_get_netos_error();
            if ((rv == APR_FROM_OS_ERROR(WSAEINVAL)) ||
                (rv == APR_FROM_OS_ERROR(WSAENOTSOCK))) {
                /* We can get here when:
                 * 1) the client disconnects early
                 * 2) TransmitFile does not properly recycle the accept socket (typically
                 *    because the client disconnected)
                 * 3) there is VPN or Firewall software installed with buggy AcceptEx implementation
                 * 4) the webserver is using a dynamic address that has changed
                 */
                ++err_count;
                closesocket(context->accept_socket);
                context->accept_socket = INVALID_SOCKET;
                if (err_count > MAX_ACCEPTEX_ERR_COUNT) {
                    ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
                                 "Child %lu: Encountered too many errors accepting client connections. "
                                 "Possible causes: dynamic address renewal, or incompatible VPN or firewall software. "
                                 "Try using the Win32DisableAcceptEx directive.", my_pid);
                    err_count = 0;
                }
                continue;
            }
            else if ((rv != APR_FROM_OS_ERROR(ERROR_IO_PENDING)) &&
                     (rv != APR_FROM_OS_ERROR(WSA_IO_PENDING))) {
                ++err_count;
                if (err_count > MAX_ACCEPTEX_ERR_COUNT) {
                    ap_log_error(APLOG_MARK,APLOG_ERR, rv, ap_server_conf,
                                 "Child %lu: Encountered too many errors accepting client connections. "
                                 "Possible causes: Unknown. "
                                 "Try using the Win32DisableAcceptEx directive.", my_pid);
                    err_count = 0;
                }
                closesocket(context->accept_socket);
                context->accept_socket = INVALID_SOCKET;
                continue;
            }
            err_count = 0;

            /* Wait for pending i/o.
             * Wake up once per second to check for shutdown .
             * XXX: We should be waiting on exit_event instead of polling
             */
            while (1) {
                rv = WaitForSingleObject(context->Overlapped.hEvent, 1000);
                if (rv == WAIT_OBJECT_0) {
                    if (context->accept_socket == INVALID_SOCKET) {
                        /* socket already closed */
                        break;
                    }
                    if (!GetOverlappedResult((HANDLE)context->accept_socket,
                                             &context->Overlapped,
                                             &BytesRead, FALSE)) {
                        ap_log_error(APLOG_MARK, APLOG_WARNING,
                                     apr_get_os_error(), ap_server_conf,
                             "winnt_accept: Asynchronous AcceptEx failed.");
                        closesocket(context->accept_socket);
                        context->accept_socket = INVALID_SOCKET;
                    }
                    break;
                }
                /* WAIT_TIMEOUT */
                if (shutdown_in_progress) {
                    closesocket(context->accept_socket);
                    context->accept_socket = INVALID_SOCKET;
                    break;
                }
            }
            if (context->accept_socket == INVALID_SOCKET) {
                continue;
            }
        }
        err_count = 0;
        /* Inherit the listen socket settings. Required for
         * shutdown() to work
         */
        if (setsockopt(context->accept_socket, SOL_SOCKET,
                       SO_UPDATE_ACCEPT_CONTEXT, (char *)&nlsd,
                       sizeof(nlsd))) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "setsockopt(SO_UPDATE_ACCEPT_CONTEXT) failed.");
            /* Not a failure condition. Keep running. */
        }

        /* Get the local & remote address */
        GetAcceptExSockaddrs(context->buff,
                             0,
                             PADDED_ADDR_SIZE,
                             PADDED_ADDR_SIZE,
                             &context->sa_server,
                             &context->sa_server_len,
                             &context->sa_client,
                             &context->sa_client_len);

        sockinfo.os_sock = &context->accept_socket;
        sockinfo.local   = context->sa_server;
        sockinfo.remote  = context->sa_client;
        sockinfo.family  = context->sa_server->sa_family;
        sockinfo.type    = SOCK_STREAM;
        apr_os_sock_make(&context->sock, &sockinfo, context->ptrans);

        /* When a connection is received, send an io completion notification to
         * the ThreadDispatchIOCP. This function could be replaced by
         * mpm_post_completion_context(), but why do an extra function call...
         */
        PostQueuedCompletionStatus(ThreadDispatchIOCP, 0, IOCP_CONNECTION_ACCEPTED,
                                   &context->Overlapped);
        context = NULL;
    }
    if (!shutdown_in_progress) {
        /* Yow, hit an irrecoverable error! Tell the child to die. */
        SetEvent(exit_event);
    }
    ap_log_error(APLOG_MARK, APLOG_INFO, APR_SUCCESS, ap_server_conf,
                 "Child %lu: Accept thread exiting.", my_pid);
    return 0;
}
示例#16
0
文件: child.c 项目: Distrotech/httpd
static unsigned int __stdcall win9x_accept(void * dummy)
{
    struct timeval tv;
    fd_set main_fds;
    int wait_time = 1;
    SOCKET csd;
    SOCKET nsd = INVALID_SOCKET;
    int count_select_errors = 0;
    int rc;
    int clen;
    ap_listen_rec *lr;
    struct fd_set listenfds;
#if APR_HAVE_IPV6
    struct sockaddr_in6 sa_client;
#else
    struct sockaddr_in sa_client;
#endif

    /* Setup the listeners
     * ToDo: Use apr_poll()
     */
    FD_ZERO(&listenfds);
    for (lr = ap_listeners; lr; lr = lr->next) {
        if (lr->sd != NULL) {
            apr_os_sock_get(&nsd, lr->sd);
            FD_SET(nsd, &listenfds);
            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
                         "Child %lu: Listening on port %d.", my_pid, lr->bind_addr->port);
        }
    }

    head_listener = ap_listeners;

    while (!shutdown_in_progress) {
        tv.tv_sec = wait_time;
        tv.tv_usec = 0;
        memcpy(&main_fds, &listenfds, sizeof(fd_set));

        /* First parameter of select() is ignored on Windows */
        rc = select(0, &main_fds, NULL, NULL, &tv);

        if (rc == 0 || (rc == SOCKET_ERROR && APR_STATUS_IS_EINTR(apr_get_netos_error()))) {
            count_select_errors = 0;    /* reset count of errors */
            continue;
        }
        else if (rc == SOCKET_ERROR) {
            /* A "real" error occurred, log it and increment the count of
             * select errors. This count is used to ensure we don't go into
             * a busy loop of continuous errors.
             */
            ap_log_error(APLOG_MARK, APLOG_INFO, apr_get_netos_error(), ap_server_conf,
                         "select failed with error %d", apr_get_netos_error());
            count_select_errors++;
            if (count_select_errors > MAX_SELECT_ERRORS) {
                shutdown_in_progress = 1;
                ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(), ap_server_conf,
                             "Too many errors in select loop. Child process exiting.");
                break;
            }
        } else {
            ap_listen_rec *lr;

            lr = find_ready_listener(&main_fds);
            if (lr != NULL) {
                /* fetch the native socket descriptor */
                apr_os_sock_get(&nsd, lr->sd);
            }
        }

        do {
            clen = sizeof(sa_client);
            csd = accept(nsd, (struct sockaddr *) &sa_client, &clen);
        } while (csd == INVALID_SOCKET && APR_STATUS_IS_EINTR(apr_get_netos_error()));

        if (csd == INVALID_SOCKET) {
            if (APR_STATUS_IS_ECONNABORTED(apr_get_netos_error())) {
                ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(), ap_server_conf,
                            "accept: (client socket)");
            }
        }
        else {
            add_job(csd);
        }
    }
    SetEvent(exit_event);
    return 0;
}
示例#17
0
/*static */
void worker_main(void *arg)
{
    ap_listen_rec *lr, *first_lr, *last_lr = NULL;
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    conn_rec *current_conn;
    apr_status_t stat = APR_EINIT;
    ap_sb_handle_t *sbh;
    apr_thread_t *thd = NULL;
    apr_os_thread_t osthd;

    int my_worker_num = (int)arg;
    apr_socket_t *csd = NULL;
    int requests_this_child = 0;
    apr_socket_t *sd = NULL;
    fd_set main_fds;

    int sockdes;
    int srv;
    struct timeval tv;
    int wouldblock_retry;

    osthd = apr_os_thread_current();
    apr_os_thread_put(&thd, &osthd, pmain);

    tv.tv_sec = 1;
    tv.tv_usec = 0;

    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);

    apr_pool_create_ex(&ptrans, pmain, NULL, allocator);
    apr_allocator_owner_set(allocator, ptrans);
    apr_pool_tag(ptrans, "transaction");

    bucket_alloc = apr_bucket_alloc_create_ex(allocator);

    atomic_inc (&worker_thread_count);

    while (!die_now) {
        /*
        * (Re)initialize this child to a pre-connection state.
        */
        current_conn = NULL;
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
            && requests_this_child++ >= ap_max_requests_per_child)) {
            DBPRINT1 ("\n**Thread slot %d is shutting down", my_worker_num);
            clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
        }

        ap_update_child_status_from_indexes(0, my_worker_num, WORKER_READY,
                                            (request_rec *) NULL);

        /*
        * Wait for an acceptable connection to arrive.
        */

        for (;;) {
            if (shutdown_pending || restart_pending || (ap_scoreboard_image->servers[0][my_worker_num].status == WORKER_IDLE_KILL)) {
                DBPRINT1 ("\nThread slot %d is shutting down\n", my_worker_num);
                clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
            }

            /* Check the listen queue on all sockets for requests */
            memcpy(&main_fds, &listenfds, sizeof(fd_set));
            srv = select(listenmaxfd + 1, &main_fds, NULL, NULL, &tv);

            if (srv <= 0) {
                if (srv < 0) {
                    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00217)
                        "select() failed on listen socket");
                    apr_thread_yield();
                }
                continue;
            }

            /* remember the last_lr we searched last time around so that
            we don't end up starving any particular listening socket */
            if (last_lr == NULL) {
                lr = ap_listeners;
            }
            else {
                lr = last_lr->next;
                if (!lr)
                    lr = ap_listeners;
            }
            first_lr = lr;
            do {
                apr_os_sock_get(&sockdes, lr->sd);
                if (FD_ISSET(sockdes, &main_fds))
                    goto got_listener;
                lr = lr->next;
                if (!lr)
                    lr = ap_listeners;
            } while (lr != first_lr);
            /* if we get here, something unexpected happened. Go back
            into the select state and try again.
            */
            continue;
        got_listener:
            last_lr = lr;
            sd = lr->sd;

            wouldblock_retry = MAX_WB_RETRIES;

            while (wouldblock_retry) {
                if ((stat = apr_socket_accept(&csd, sd, ptrans)) == APR_SUCCESS) {
                    break;
                }
                else {
                    /* if the error is a wouldblock then maybe we were too
                        quick try to pull the next request from the listen
                        queue.  Try a few more times then return to our idle
                        listen state. */
                    if (!APR_STATUS_IS_EAGAIN(stat)) {
                        break;
                    }

                    if (wouldblock_retry--) {
                        apr_thread_yield();
                    }
                }
            }

            /* If we got a new socket, set it to non-blocking mode and process
                it.  Otherwise handle the error. */
            if (stat == APR_SUCCESS) {
                apr_socket_opt_set(csd, APR_SO_NONBLOCK, 0);
#ifdef DBINFO_ON
                if (wouldblock_retry < MAX_WB_RETRIES) {
                    retry_success++;
                    avg_retries += (MAX_WB_RETRIES-wouldblock_retry);
                }
#endif
                break;       /* We have a socket ready for reading */
            }
            else {
#ifdef DBINFO_ON
                if (APR_STATUS_IS_EAGAIN(stat)) {
                        would_block++;
                        retry_fail++;
                }
                else if (
#else
                if (APR_STATUS_IS_EAGAIN(stat) ||
#endif
                    APR_STATUS_IS_ECONNRESET(stat) ||
                    APR_STATUS_IS_ETIMEDOUT(stat) ||
                    APR_STATUS_IS_EHOSTUNREACH(stat) ||
                    APR_STATUS_IS_ENETUNREACH(stat)) {
                        ;
                }
#ifdef USE_WINSOCK
                else if (APR_STATUS_IS_ENETDOWN(stat)) {
                       /*
                        * When the network layer has been shut down, there
                        * is not much use in simply exiting: the parent
                        * would simply re-create us (and we'd fail again).
                        * Use the CHILDFATAL code to tear the server down.
                        * @@@ Martin's idea for possible improvement:
                        * A different approach would be to define
                        * a new APEXIT_NETDOWN exit code, the reception
                        * of which would make the parent shutdown all
                        * children, then idle-loop until it detected that
                        * the network is up again, and restart the children.
                        * Ben Hyde noted that temporary ENETDOWN situations
                        * occur in mobile IP.
                        */
                        ap_log_error(APLOG_MARK, APLOG_EMERG, stat, ap_server_conf, APLOGNO(00218)
                            "apr_socket_accept: giving up.");
                        clean_child_exit(APEXIT_CHILDFATAL, my_worker_num, ptrans,
                                         bucket_alloc);
                }
#endif
                else {
                        ap_log_error(APLOG_MARK, APLOG_ERR, stat, ap_server_conf, APLOGNO(00219)
                            "apr_socket_accept: (client socket)");
                        clean_child_exit(1, my_worker_num, ptrans, bucket_alloc);
                }
            }
        }

        ap_create_sb_handle(&sbh, ptrans, 0, my_worker_num);
        /*
        * We now have a connection, so set it up with the appropriate
        * socket options, file descriptors, and read/write buffers.
        */
        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd,
                                                my_worker_num, sbh,
                                                bucket_alloc);
        if (current_conn) {
            current_conn->current_thread = thd;
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        request_count++;
    }
    clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
}
示例#18
0
/* Main processing of the parent process
 * returns TRUE if restarting
 */
static char master_main()
{
    server_rec *s = ap_server_conf;
    ap_listen_rec *lr;
    parent_info_t *parent_info;
    char *listener_shm_name;
    int listener_num, num_listeners, slot;
    ULONG rc;

    printf("%s \n", ap_get_server_version());
    set_signals();

    if (ap_setup_listeners(ap_server_conf) < 1) {
        ap_log_error(APLOG_MARK, APLOG_ALERT, 0, s,
                     "no listening sockets available, shutting down");
        return FALSE;
    }

    /* Allocate a shared memory block for the array of listeners */
    for (num_listeners = 0, lr = ap_listeners; lr; lr = lr->next) {
        num_listeners++;
    }

    listener_shm_name = apr_psprintf(pconf, "/sharemem/httpd/parent_info.%d", getpid());
    rc = DosAllocSharedMem((PPVOID)&parent_info, listener_shm_name,
                           sizeof(parent_info_t) + num_listeners * sizeof(listen_socket_t),
                           PAG_READ|PAG_WRITE|PAG_COMMIT);

    if (rc) {
        ap_log_error(APLOG_MARK, APLOG_ALERT, APR_FROM_OS_ERROR(rc), s,
                     "failure allocating shared memory, shutting down");
        return FALSE;
    }

    /* Store the listener sockets in the shared memory area for our children to see */
    for (listener_num = 0, lr = ap_listeners; lr; lr = lr->next, listener_num++) {
        apr_os_sock_get(&parent_info->listeners[listener_num].listen_fd, lr->sd);
    }

    /* Create mutex to prevent multiple child processes from detecting
     * a connection with apr_poll()
     */

    rc = DosCreateMutexSem(NULL, &ap_mpm_accept_mutex, DC_SEM_SHARED, FALSE);

    if (rc) {
        ap_log_error(APLOG_MARK, APLOG_ALERT, APR_FROM_OS_ERROR(rc), s,
                     "failure creating accept mutex, shutting down");
        return FALSE;
    }

    parent_info->accept_mutex = ap_mpm_accept_mutex;

    /* Allocate shared memory for scoreboard */
    if (ap_scoreboard_image == NULL) {
        void *sb_mem;
        rc = DosAllocSharedMem(&sb_mem, ap_scoreboard_fname,
                               ap_calc_scoreboard_size(),
                               PAG_COMMIT|PAG_READ|PAG_WRITE);

        if (rc) {
            ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf,
                         "unable to allocate shared memory for scoreboard , exiting");
            return FALSE;
        }

        ap_init_scoreboard(sb_mem);
    }

    ap_scoreboard_image->global->restart_time = apr_time_now();
    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
		"%s configured -- resuming normal operations",
		ap_get_server_version());
    ap_log_error(APLOG_MARK, APLOG_INFO, 0, ap_server_conf,
		"Server built: %s", ap_get_server_built());
#ifdef AP_MPM_WANT_SET_ACCEPT_LOCK_MECH
    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf,
		"AcceptMutex: %s (default: %s)",
		apr_proc_mutex_name(accept_mutex),
		apr_proc_mutex_defname());
#endif
    if (one_process) {
        ap_scoreboard_image->parent[0].pid = getpid();
        ap_mpm_child_main(pconf);
        return FALSE;
    }

    while (!restart_pending && !shutdown_pending) {
        RESULTCODES proc_rc;
        PID child_pid;
        int active_children = 0;

        /* Count number of active children */
        for (slot=0; slot < HARD_SERVER_LIMIT; slot++) {
            active_children += ap_scoreboard_image->parent[slot].pid != 0 &&
                !ap_scoreboard_image->parent[slot].quiescing;
        }

        /* Spawn children if needed */
        for (slot=0; slot < HARD_SERVER_LIMIT && active_children < ap_daemons_to_start; slot++) {
            if (ap_scoreboard_image->parent[slot].pid == 0) {
                spawn_child(slot);
                active_children++;
            }
        }

        rc = DosWaitChild(DCWA_PROCESSTREE, DCWW_NOWAIT, &proc_rc, &child_pid, 0);

        if (rc == 0) {
            /* A child has terminated, remove its scoreboard entry & terminate if necessary */
            for (slot=0; ap_scoreboard_image->parent[slot].pid != child_pid && slot < HARD_SERVER_LIMIT; slot++);

            if (slot < HARD_SERVER_LIMIT) {
                ap_scoreboard_image->parent[slot].pid = 0;
                ap_scoreboard_image->parent[slot].quiescing = 0;

                if (proc_rc.codeTerminate == TC_EXIT) {
                    /* Child terminated normally, check its exit code and
                     * terminate server if child indicates a fatal error
                     */
                    if (proc_rc.codeResult == APEXIT_CHILDFATAL)
                        break;
                }
            }
        } else if (rc == ERROR_CHILD_NOT_COMPLETE) {
            /* No child exited, lets sleep for a while.... */
            apr_sleep(SCOREBOARD_MAINTENANCE_INTERVAL);
        }
    }

    /* Signal children to shut down, either gracefully or immediately */
    for (slot=0; slot<HARD_SERVER_LIMIT; slot++) {
      kill(ap_scoreboard_image->parent[slot].pid, is_graceful ? SIGHUP : SIGTERM);
    }

    DosFreeMem(parent_info);
    return restart_pending;
}
示例#19
0
SWITCH_DECLARE(switch_status_t) switch_os_sock_get(switch_os_socket_t *thesock, switch_socket_t *sock)
{
	return apr_os_sock_get(thesock, sock);
}
示例#20
0
文件: svnserve.c 项目: 2asoft/freebsd
/*
 * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
 * either return an error to be displayed, or set *EXIT_CODE to non-zero and
 * return SVN_NO_ERROR.
 */
static svn_error_t *
sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
{
  enum run_mode run_mode = run_mode_unspecified;
  svn_boolean_t foreground = FALSE;
  apr_socket_t *sock;
  apr_sockaddr_t *sa;
  svn_error_t *err;
  apr_getopt_t *os;
  int opt;
  serve_params_t params;
  const char *arg;
  apr_status_t status;
#ifndef WIN32
  apr_proc_t proc;
#endif
  svn_boolean_t is_multi_threaded;
  enum connection_handling_mode handling_mode = CONNECTION_DEFAULT;
  svn_boolean_t cache_fulltexts = TRUE;
  svn_boolean_t cache_txdeltas = TRUE;
  svn_boolean_t cache_revprops = FALSE;
  svn_boolean_t use_block_read = FALSE;
  apr_uint16_t port = SVN_RA_SVN_PORT;
  const char *host = NULL;
  int family = APR_INET;
  apr_int32_t sockaddr_info_flags = 0;
#if APR_HAVE_IPV6
  svn_boolean_t prefer_v6 = FALSE;
#endif
  svn_boolean_t quiet = FALSE;
  svn_boolean_t is_version = FALSE;
  int mode_opt_count = 0;
  int handling_opt_count = 0;
  const char *config_filename = NULL;
  const char *pid_filename = NULL;
  const char *log_filename = NULL;
  svn_node_kind_t kind;
  apr_size_t min_thread_count = THREADPOOL_MIN_SIZE;
  apr_size_t max_thread_count = THREADPOOL_MAX_SIZE;
#ifdef SVN_HAVE_SASL
  SVN_ERR(cyrus_init(pool));
#endif

  /* Check library versions */
  SVN_ERR(check_lib_versions());

  /* Initialize the FS library. */
  SVN_ERR(svn_fs_initialize(pool));

  SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));

  params.root = "/";
  params.tunnel = FALSE;
  params.tunnel_user = NULL;
  params.read_only = FALSE;
  params.base = NULL;
  params.cfg = NULL;
  params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_DEFAULT;
  params.logger = NULL;
  params.config_pool = NULL;
  params.authz_pool = NULL;
  params.fs_config = NULL;
  params.vhost = FALSE;
  params.username_case = CASE_ASIS;
  params.memory_cache_size = (apr_uint64_t)-1;
  params.zero_copy_limit = 0;
  params.error_check_interval = 4096;

  while (1)
    {
      status = apr_getopt_long(os, svnserve__options, &opt, &arg);
      if (APR_STATUS_IS_EOF(status))
        break;
      if (status != APR_SUCCESS)
        {
          usage(argv[0], pool);
          *exit_code = EXIT_FAILURE;
          return SVN_NO_ERROR;
        }
      switch (opt)
        {
        case '6':
#if APR_HAVE_IPV6
          prefer_v6 = TRUE;
#endif
          /* ### Maybe error here if we don't have IPV6 support? */
          break;

        case 'h':
          help(pool);
          return SVN_NO_ERROR;

        case 'q':
          quiet = TRUE;
          break;

        case SVNSERVE_OPT_VERSION:
          is_version = TRUE;
          break;

        case 'd':
          if (run_mode != run_mode_daemon)
            {
              run_mode = run_mode_daemon;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_FOREGROUND:
          foreground = TRUE;
          break;

        case SVNSERVE_OPT_SINGLE_CONN:
          handling_mode = connection_mode_single;
          handling_opt_count++;
          break;

        case 'i':
          if (run_mode != run_mode_inetd)
            {
              run_mode = run_mode_inetd;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_LISTEN_PORT:
          {
            apr_uint64_t val;

            err = svn_cstring_strtoui64(&val, arg, 0, APR_UINT16_MAX, 10);
            if (err)
              return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, err,
                                       _("Invalid port '%s'"), arg);
            port = (apr_uint16_t)val;
          }
          break;

        case SVNSERVE_OPT_LISTEN_HOST:
          host = arg;
          break;

        case 't':
          if (run_mode != run_mode_tunnel)
            {
              run_mode = run_mode_tunnel;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_TUNNEL_USER:
          params.tunnel_user = arg;
          break;

        case 'X':
          if (run_mode != run_mode_listen_once)
            {
              run_mode = run_mode_listen_once;
              mode_opt_count++;
            }
          break;

        case 'r':
          SVN_ERR(svn_utf_cstring_to_utf8(&params.root, arg, pool));

          SVN_ERR(svn_io_check_resolved_path(params.root, &kind, pool));
          if (kind != svn_node_dir)
            {
              return svn_error_createf(SVN_ERR_ILLEGAL_TARGET, NULL,
                       _("Root path '%s' does not exist "
                         "or is not a directory"), params.root);
            }

          params.root = svn_dirent_internal_style(params.root, pool);
          SVN_ERR(svn_dirent_get_absolute(&params.root, params.root, pool));
          break;

        case 'R':
          params.read_only = TRUE;
          break;

        case 'T':
          handling_mode = connection_mode_thread;
          handling_opt_count++;
          break;

        case 'c':
          params.compression_level = atoi(arg);
          if (params.compression_level < SVN_DELTA_COMPRESSION_LEVEL_NONE)
            params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_NONE;
          if (params.compression_level > SVN_DELTA_COMPRESSION_LEVEL_MAX)
            params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_MAX;
          break;

        case 'M':
          params.memory_cache_size = 0x100000 * apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_CACHE_TXDELTAS:
          cache_txdeltas = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_FULLTEXTS:
          cache_fulltexts = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_REVPROPS:
          cache_revprops = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_BLOCK_READ:
          use_block_read = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CLIENT_SPEED:
          {
            apr_size_t bandwidth = (apr_size_t)apr_strtoi64(arg, NULL, 0);

            /* for slower clients, don't try anything fancy */
            if (bandwidth >= 1000)
              {
                /* block other clients for at most 1 ms (at full bandwidth).
                   Note that the send buffer is 16kB anyways. */
                params.zero_copy_limit = bandwidth * 120;

                /* check for aborted connections at the same rate */
                params.error_check_interval = bandwidth * 120;
              }
          }
          break;

        case SVNSERVE_OPT_MIN_THREADS:
          min_thread_count = (apr_size_t)apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_MAX_THREADS:
          max_thread_count = (apr_size_t)apr_strtoi64(arg, NULL, 0);
          break;

#ifdef WIN32
        case SVNSERVE_OPT_SERVICE:
          if (run_mode != run_mode_service)
            {
              run_mode = run_mode_service;
              mode_opt_count++;
            }
          break;
#endif

        case SVNSERVE_OPT_CONFIG_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&config_filename, arg, pool));
          config_filename = svn_dirent_internal_style(config_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&config_filename, config_filename,
                                          pool));
          break;

        case SVNSERVE_OPT_PID_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&pid_filename, arg, pool));
          pid_filename = svn_dirent_internal_style(pid_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&pid_filename, pid_filename, pool));
          break;

         case SVNSERVE_OPT_VIRTUAL_HOST:
           params.vhost = TRUE;
           break;

         case SVNSERVE_OPT_LOG_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&log_filename, arg, pool));
          log_filename = svn_dirent_internal_style(log_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&log_filename, log_filename, pool));
          break;

        }
    }

  if (is_version)
    {
      SVN_ERR(version(quiet, pool));
      return SVN_NO_ERROR;
    }

  if (os->ind != argc)
    {
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  if (mode_opt_count != 1)
    {
      svn_error_clear(svn_cmdline_fputs(
#ifdef WIN32
                      _("You must specify exactly one of -d, -i, -t, "
                        "--service or -X.\n"),
#else
                      _("You must specify exactly one of -d, -i, -t or -X.\n"),
#endif
                       stderr, pool));
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  if (handling_opt_count > 1)
    {
      svn_error_clear(svn_cmdline_fputs(
                      _("You may only specify one of -T or --single-thread\n"),
                      stderr, pool));
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  /* construct object pools */
  is_multi_threaded = handling_mode == connection_mode_thread;
  params.fs_config = apr_hash_make(pool);
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_DELTAS,
                cache_txdeltas ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_FULLTEXTS,
                cache_fulltexts ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_REVPROPS,
                cache_revprops ? "2" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_BLOCK_READ,
                use_block_read ? "1" :"0");

  SVN_ERR(svn_repos__config_pool_create(&params.config_pool,
                                        is_multi_threaded,
                                        pool));
  SVN_ERR(svn_repos__authz_pool_create(&params.authz_pool,
                                       params.config_pool,
                                       is_multi_threaded,
                                       pool));

  /* If a configuration file is specified, load it and any referenced
   * password and authorization files. */
  if (config_filename)
    {
      params.base = svn_dirent_dirname(config_filename, pool);

      SVN_ERR(svn_repos__config_pool_get(&params.cfg, NULL,
                                         params.config_pool,
                                         config_filename,
                                         TRUE, /* must_exist */
                                         FALSE, /* names_case_sensitive */
                                         NULL,
                                         pool));
    }

  if (log_filename)
    SVN_ERR(logger__create(&params.logger, log_filename, pool));
  else if (run_mode == run_mode_listen_once)
    SVN_ERR(logger__create_for_stderr(&params.logger, pool));

  if (params.tunnel_user && run_mode != run_mode_tunnel)
    {
      return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
               _("Option --tunnel-user is only valid in tunnel mode"));
    }

  if (run_mode == run_mode_inetd || run_mode == run_mode_tunnel)
    {
      apr_pool_t *connection_pool;
      svn_ra_svn_conn_t *conn;
      svn_stream_t *stdin_stream;
      svn_stream_t *stdout_stream;

      params.tunnel = (run_mode == run_mode_tunnel);
      apr_pool_cleanup_register(pool, pool, apr_pool_cleanup_null,
                                redirect_stdout);

      SVN_ERR(svn_stream_for_stdin(&stdin_stream, pool));
      SVN_ERR(svn_stream_for_stdout(&stdout_stream, pool));

      /* Use a subpool for the connection to ensure that if SASL is used
       * the pool cleanup handlers that call sasl_dispose() (connection_pool)
       * and sasl_done() (pool) are run in the right order. See issue #3664. */
      connection_pool = svn_pool_create(pool);
      conn = svn_ra_svn_create_conn4(NULL, stdin_stream, stdout_stream,
                                     params.compression_level,
                                     params.zero_copy_limit,
                                     params.error_check_interval,
                                     connection_pool);
      err = serve(conn, &params, connection_pool);
      svn_pool_destroy(connection_pool);

      return err;
    }

#ifdef WIN32
  /* If svnserve needs to run as a Win32 service, then we need to
     coordinate with the Service Control Manager (SCM) before
     continuing.  This function call registers the svnserve.exe
     process with the SCM, waits for the "start" command from the SCM
     (which will come very quickly), and confirms that those steps
     succeeded.

     After this call succeeds, the service is free to run.  At some
     point in the future, the SCM will send a message to the service,
     requesting that it stop.  This is translated into a call to
     winservice_notify_stop().  The service is then responsible for
     cleanly terminating.

     We need to do this before actually starting the service logic
     (opening files, sockets, etc.) because the SCM wants you to
     connect *first*, then do your service-specific logic.  If the
     service process takes too long to connect to the SCM, then the
     SCM will decide that the service is busted, and will give up on
     it.
     */
  if (run_mode == run_mode_service)
    {
      err = winservice_start();
      if (err)
        {
          svn_handle_error2(err, stderr, FALSE, "svnserve: ");

          /* This is the most common error.  It means the user started
             svnserve from a shell, and specified the --service
             argument.  svnserve cannot be started, as a service, in
             this way.  The --service argument is valid only valid if
             svnserve is started by the SCM. */
          if (err->apr_err ==
              APR_FROM_OS_ERROR(ERROR_FAILED_SERVICE_CONTROLLER_CONNECT))
            {
              svn_error_clear(svn_cmdline_fprintf(stderr, pool,
                  _("svnserve: The --service flag is only valid if the"
                    " process is started by the Service Control Manager.\n")));
            }

          svn_error_clear(err);
          *exit_code = EXIT_FAILURE;
          return SVN_NO_ERROR;
        }

      /* The service is now in the "starting" state.  Before the SCM will
         consider the service "started", this thread must call the
         winservice_running() function. */
    }
#endif /* WIN32 */

  /* Make sure we have IPV6 support first before giving apr_sockaddr_info_get
     APR_UNSPEC, because it may give us back an IPV6 address even if we can't
     create IPV6 sockets. */

#if APR_HAVE_IPV6
#ifdef MAX_SECS_TO_LINGER
  /* ### old APR interface */
  status = apr_socket_create(&sock, APR_INET6, SOCK_STREAM, pool);
#else
  status = apr_socket_create(&sock, APR_INET6, SOCK_STREAM, APR_PROTO_TCP,
                             pool);
#endif
  if (status == 0)
    {
      apr_socket_close(sock);
      family = APR_UNSPEC;

      if (prefer_v6)
        {
          if (host == NULL)
            host = "::";
          sockaddr_info_flags = APR_IPV6_ADDR_OK;
        }
      else
        {
          if (host == NULL)
            host = "0.0.0.0";
          sockaddr_info_flags = APR_IPV4_ADDR_OK;
        }
    }
#endif

  status = apr_sockaddr_info_get(&sa, host, family, port,
                                 sockaddr_info_flags, pool);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't get address info"));
    }


#ifdef MAX_SECS_TO_LINGER
  /* ### old APR interface */
  status = apr_socket_create(&sock, sa->family, SOCK_STREAM, pool);
#else
  status = apr_socket_create(&sock, sa->family, SOCK_STREAM, APR_PROTO_TCP,
                             pool);
#endif
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't create server socket"));
    }

  /* Prevents "socket in use" errors when server is killed and quickly
   * restarted. */
  status = apr_socket_opt_set(sock, APR_SO_REUSEADDR, 1);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't set options on server socket"));
    }

  status = apr_socket_bind(sock, sa);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't bind server socket"));
    }

  status = apr_socket_listen(sock, ACCEPT_BACKLOG);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't listen on server socket"));
    }

#if APR_HAS_FORK
  if (run_mode != run_mode_listen_once && !foreground)
    /* ### ignoring errors... */
    apr_proc_detach(APR_PROC_DETACH_DAEMONIZE);

  apr_signal(SIGCHLD, sigchld_handler);
#endif

#ifdef SIGPIPE
  /* Disable SIGPIPE generation for the platforms that have it. */
  apr_signal(SIGPIPE, SIG_IGN);
#endif

#ifdef SIGXFSZ
  /* Disable SIGXFSZ generation for the platforms that have it, otherwise
   * working with large files when compiled against an APR that doesn't have
   * large file support will crash the program, which is uncool. */
  apr_signal(SIGXFSZ, SIG_IGN);
#endif

  if (pid_filename)
    SVN_ERR(write_pid_file(pid_filename, pool));

#ifdef WIN32
  status = apr_os_sock_get(&winservice_svnserve_accept_socket, sock);
  if (status)
    winservice_svnserve_accept_socket = INVALID_SOCKET;

  /* At this point, the service is "running".  Notify the SCM. */
  if (run_mode == run_mode_service)
    winservice_running();
#endif

  /* Configure FS caches for maximum efficiency with svnserve.
   * For pre-forked (i.e. multi-processed) mode of operation,
   * keep the per-process caches smaller than the default.
   * Also, apply the respective command line parameters, if given. */
  {
    svn_cache_config_t settings = *svn_cache_config_get();

    if (params.memory_cache_size != -1)
      settings.cache_size = params.memory_cache_size;

    settings.single_threaded = TRUE;
    if (handling_mode == connection_mode_thread)
      {
#if APR_HAS_THREADS
        settings.single_threaded = FALSE;
#else
        /* No requests will be processed at all
         * (see "switch (handling_mode)" code further down).
         * But if they were, some other synchronization code
         * would need to take care of securing integrity of
         * APR-based structures. That would include our caches.
         */
#endif
      }

    svn_cache_config_set(&settings);
  }

#if APR_HAS_THREADS
  SVN_ERR(svn_root_pools__create(&connection_pools));

  if (handling_mode == connection_mode_thread)
    {
      /* create the thread pool with a valid range of threads */
      if (max_thread_count < 1)
        max_thread_count = 1;
      if (min_thread_count > max_thread_count)
        min_thread_count = max_thread_count;

      status = apr_thread_pool_create(&threads,
                                      min_thread_count,
                                      max_thread_count,
                                      pool);
      if (status)
        {
          return svn_error_wrap_apr(status, _("Can't create thread pool"));
        }

      /* let idle threads linger for a while in case more requests are
         coming in */
      apr_thread_pool_idle_wait_set(threads, THREADPOOL_THREAD_IDLE_LIMIT);

      /* don't queue requests unless we reached the worker thread limit */
      apr_thread_pool_threshold_set(threads, 0);
    }
  else
    {
      threads = NULL;
    }
#endif

  while (1)
    {
      connection_t *connection = NULL;
      SVN_ERR(accept_connection(&connection, sock, &params, handling_mode,
                                pool));
      if (run_mode == run_mode_listen_once)
        {
          err = serve_socket(connection, connection->pool);
          close_connection(connection);
          return err;
        }

      switch (handling_mode)
        {
        case connection_mode_fork:
#if APR_HAS_FORK
          status = apr_proc_fork(&proc, connection->pool);
          if (status == APR_INCHILD)
            {
              /* the child would't listen to the main server's socket */
              apr_socket_close(sock);

              /* serve_socket() logs any error it returns, so ignore it. */
              svn_error_clear(serve_socket(connection, connection->pool));
              close_connection(connection);
              return SVN_NO_ERROR;
            }
          else if (status != APR_INPARENT)
            {
              err = svn_error_wrap_apr(status, "apr_proc_fork");
              logger__log_error(params.logger, err, NULL, NULL);
              svn_error_clear(err);
            }
#endif
          break;

        case connection_mode_thread:
          /* Create a detached thread for each connection.  That's not a
             particularly sophisticated strategy for a threaded server, it's
             little different from forking one process per connection. */
#if APR_HAS_THREADS
          attach_connection(connection);

          status = apr_thread_pool_push(threads, serve_thread, connection,
                                        0, NULL);
          if (status)
            {
              return svn_error_wrap_apr(status, _("Can't push task"));
            }
#endif
          break;

        case connection_mode_single:
          /* Serve one connection at a time. */
          /* serve_socket() logs any error it returns, so ignore it. */
          svn_error_clear(serve_socket(connection, connection->pool));
        }

      close_connection(connection);
    }

  /* NOTREACHED */
}
示例#21
0
文件: child.c 项目: Ga-vin/apache
static unsigned int __stdcall winnt_accept(void *lr_)
{
    ap_listen_rec *lr = (ap_listen_rec *)lr_;
    apr_os_sock_info_t sockinfo;
    winnt_conn_ctx_t *context = NULL;
    DWORD BytesRead;
    SOCKET nlsd;
    core_server_config *core_sconf;
    const char *accf_name;
    int rv;
    int accf;
    int err_count = 0;
    HANDLE events[3];
#if APR_HAVE_IPV6
    SOCKADDR_STORAGE ss_listen;
    int namelen = sizeof(ss_listen);
#endif
    u_long zero = 0;

    core_sconf = ap_get_core_module_config(ap_server_conf->module_config);
    accf_name = apr_table_get(core_sconf->accf_map, lr->protocol);

    if (strcmp(accf_name, "data") == 0)
        accf = 2;
    else if (strcmp(accf_name, "connect") == 0)
        accf = 1;
    else if (strcmp(accf_name, "none") == 0)
        accf = 0;
    else {
        accf = 0;
        accf_name = "none";
        ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(00331)
                     "winnt_accept: unrecognized AcceptFilter '%s', "
                     "only 'data', 'connect' or 'none' are valid. "
                     "Using 'none' instead", accf_name);
    }

    apr_os_sock_get(&nlsd, lr->sd);

#if APR_HAVE_IPV6
    if (getsockname(nlsd, (struct sockaddr *)&ss_listen, &namelen) == SOCKET_ERROR) {
        ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(),
                     ap_server_conf, APLOGNO(00332)
                     "winnt_accept: getsockname error on listening socket, "
                     "is IPv6 available?");
        return 1;
   }
#endif

    if (accf > 0) /* 'data' or 'connect' */
    {
        /* first, high priority event is an already accepted connection */
        events[1] = exit_event;
        events[2] = max_requests_per_child_event;
    }
    else /* accf == 0, 'none' */
    {
reinit: /* target of data or connect upon too many AcceptEx failures */

        /* last, low priority event is a not yet accepted connection */
        events[0] = exit_event;
        events[1] = max_requests_per_child_event;
        events[2] = CreateEvent(NULL, FALSE, FALSE, NULL);

        /* The event needs to be removed from the accepted socket,
         * if not removed from the listen socket prior to accept(),
         */
        rv = WSAEventSelect(nlsd, events[2], FD_ACCEPT);
        if (rv) {
            ap_log_error(APLOG_MARK, APLOG_ERR,
                         apr_get_netos_error(), ap_server_conf, APLOGNO(00333)
                         "WSAEventSelect() failed.");
            CloseHandle(events[2]);
            return 1;
        }
    }

    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00334)
                 "Child: Accept thread listening on %pI using AcceptFilter %s",
                 lr->bind_addr, accf_name);

    while (!shutdown_in_progress) {
        if (!context) {
            int timeout;

            context = mpm_get_completion_context(&timeout);
            if (!context) {
                if (!timeout) {
                    /* Hopefully a temporary condition in the provider? */
                    ++err_count;
                    if (err_count > MAX_ACCEPTEX_ERR_COUNT) {
                        ap_log_error(APLOG_MARK, APLOG_CRIT, 0, ap_server_conf, APLOGNO(00335)
                                     "winnt_accept: Too many failures grabbing a "
                                     "connection ctx.  Aborting.");
                        break;
                    }
                }
                Sleep(100);
                continue;
            }
        }

        if (accf > 0) /* Either 'connect' or 'data' */
        {
            DWORD len;
            char *buf;

            /* Create and initialize the accept socket */
#if APR_HAVE_IPV6
            if (context->accept_socket == INVALID_SOCKET) {
                context->accept_socket = socket(ss_listen.ss_family, SOCK_STREAM,
                                                IPPROTO_TCP);
                context->socket_family = ss_listen.ss_family;
            }
            else if (context->socket_family != ss_listen.ss_family) {
                closesocket(context->accept_socket);
                context->accept_socket = socket(ss_listen.ss_family, SOCK_STREAM,
                                                IPPROTO_TCP);
                context->socket_family = ss_listen.ss_family;
            }
#else
            if (context->accept_socket == INVALID_SOCKET)
                context->accept_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
#endif

            if (context->accept_socket == INVALID_SOCKET) {
                ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(),
                             ap_server_conf, APLOGNO(00336)
                             "winnt_accept: Failed to allocate an accept socket. "
                             "Temporary resource constraint? Try again.");
                Sleep(100);
                continue;
            }

            if (accf == 2) { /* 'data' */
                len = APR_BUCKET_BUFF_SIZE;
                buf = apr_bucket_alloc(len, context->ba);
                len -= PADDED_ADDR_SIZE * 2;
            }
            else /* (accf == 1) 'connect' */ {
                len = 0;
                buf = context->buff;
            }

            /* AcceptEx on the completion context. The completion context will be
             * signaled when a connection is accepted.
             */
            if (!AcceptEx(nlsd, context->accept_socket, buf, len,
                          PADDED_ADDR_SIZE, PADDED_ADDR_SIZE, &BytesRead,
                          &context->overlapped)) {
                rv = apr_get_netos_error();
                if ((rv == APR_FROM_OS_ERROR(WSAECONNRESET)) ||
                    (rv == APR_FROM_OS_ERROR(WSAEACCES))) {
                    /* We can get here when:
                     * 1) the client disconnects early
                     * 2) handshake was incomplete
                     */
                    if (accf == 2)
                        apr_bucket_free(buf);
                    closesocket(context->accept_socket);
                    context->accept_socket = INVALID_SOCKET;
                    continue;
                }
                else if ((rv == APR_FROM_OS_ERROR(WSAEINVAL)) ||
                         (rv == APR_FROM_OS_ERROR(WSAENOTSOCK))) {
                    /* We can get here when:
                     * 1) TransmitFile does not properly recycle the accept socket (typically
                     *    because the client disconnected)
                     * 2) there is VPN or Firewall software installed with
                     *    buggy WSAAccept or WSADuplicateSocket implementation
                     * 3) the dynamic address / adapter has changed
                     * Give five chances, then fall back on AcceptFilter 'none'
                     */
                    if (accf == 2)
                        apr_bucket_free(buf);
                    closesocket(context->accept_socket);
                    context->accept_socket = INVALID_SOCKET;
                    ++err_count;
                    if (err_count > MAX_ACCEPTEX_ERR_COUNT) {
                        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00337)
                                     "Child: Encountered too many AcceptEx "
                                     "faults accepting client connections. "
                                     "Possible causes: dynamic address renewal, "
                                     "or incompatible VPN or firewall software. ");
                        ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, ap_server_conf, APLOGNO(00338)
                                     "winnt_mpm: falling back to "
                                     "'AcceptFilter none'.");
                        err_count = 0;
                        accf = 0;
                    }
                    continue;
                }
                else if ((rv != APR_FROM_OS_ERROR(ERROR_IO_PENDING)) &&
                         (rv != APR_FROM_OS_ERROR(WSA_IO_PENDING))) {
                    if (accf == 2)
                        apr_bucket_free(buf);
                    closesocket(context->accept_socket);
                    context->accept_socket = INVALID_SOCKET;
                    ++err_count;
                    if (err_count > MAX_ACCEPTEX_ERR_COUNT) {
                        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00339)
                                     "Child: Encountered too many AcceptEx "
                                     "faults accepting client connections.");
                        ap_log_error(APLOG_MARK, APLOG_NOTICE, rv, ap_server_conf, APLOGNO(00340)
                                     "winnt_mpm: falling back to "
                                     "'AcceptFilter none'.");
                        err_count = 0;
                        accf = 0;
                        goto reinit;
                    }
                    continue;
                }

                err_count = 0;
                events[0] = context->overlapped.hEvent;

                do {
                    rv = WaitForMultipleObjectsEx(3, events, FALSE, INFINITE, TRUE);
                } while (rv == WAIT_IO_COMPLETION);

                if (rv == WAIT_OBJECT_0) {
                    if ((context->accept_socket != INVALID_SOCKET) &&
                        !GetOverlappedResult((HANDLE)context->accept_socket,
                                             &context->overlapped,
                                             &BytesRead, FALSE)) {
                        ap_log_error(APLOG_MARK, APLOG_WARNING,
                                     apr_get_os_error(), ap_server_conf, APLOGNO(00341)
                             "winnt_accept: Asynchronous AcceptEx failed.");
                        closesocket(context->accept_socket);
                        context->accept_socket = INVALID_SOCKET;
                    }
                }
                else {
                    /* exit_event triggered or event handle was closed */
                    closesocket(context->accept_socket);
                    context->accept_socket = INVALID_SOCKET;
                    if (accf == 2)
                        apr_bucket_free(buf);
                    break;
                }

                if (context->accept_socket == INVALID_SOCKET) {
                    if (accf == 2)
                        apr_bucket_free(buf);
                    continue;
                }
            }
            err_count = 0;

            /* Potential optimization; consider handing off to the worker */

            /* Inherit the listen socket settings. Required for
             * shutdown() to work
             */
            if (setsockopt(context->accept_socket, SOL_SOCKET,
                           SO_UPDATE_ACCEPT_CONTEXT, (char *)&nlsd,
                           sizeof(nlsd))) {
                ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(),
                             ap_server_conf, APLOGNO(00342)
                             "setsockopt(SO_UPDATE_ACCEPT_CONTEXT) failed.");
                /* Not a failure condition. Keep running. */
            }

            /* Get the local & remote address
             * TODO; error check
             */
            GetAcceptExSockaddrs(buf, len, PADDED_ADDR_SIZE, PADDED_ADDR_SIZE,
                                 &context->sa_server, &context->sa_server_len,
                                 &context->sa_client, &context->sa_client_len);

            /* For 'data', craft a bucket for our data result
             * and pass to worker_main as context->overlapped.Pointer
             */
            if (accf == 2 && BytesRead)
            {
                apr_bucket *b;
                b = apr_bucket_heap_create(buf, APR_BUCKET_BUFF_SIZE,
                                           apr_bucket_free, context->ba);
                /* Adjust the bucket to refer to the actual bytes read */
                b->length = BytesRead;
                context->overlapped.Pointer = b;
            }
            else
                context->overlapped.Pointer = NULL;
        }
        else /* (accf = 0)  e.g. 'none' */
        {
            /* There is no socket reuse without AcceptEx() */
            if (context->accept_socket != INVALID_SOCKET)
                closesocket(context->accept_socket);

            /* This could be a persistent event per-listener rather than
             * per-accept.  However, the event needs to be removed from
             * the target socket if not removed from the listen socket
             * prior to accept(), or the event select is inherited.
             * and must be removed from the accepted socket.
             */

            do {
                rv = WaitForMultipleObjectsEx(3, events, FALSE, INFINITE, TRUE);
            } while (rv == WAIT_IO_COMPLETION);


            if (rv != WAIT_OBJECT_0 + 2) {
                /* not FD_ACCEPT;
                 * exit_event triggered or event handle was closed
                 */
                break;
            }

            context->sa_server = (void *) context->buff;
            context->sa_server_len = sizeof(context->buff) / 2;
            context->sa_client_len = context->sa_server_len;
            context->sa_client = (void *) (context->buff
                                         + context->sa_server_len);

            context->accept_socket = accept(nlsd, context->sa_server,
                                            &context->sa_server_len);

            if (context->accept_socket == INVALID_SOCKET) {

                rv = apr_get_netos_error();
                if (   rv == APR_FROM_OS_ERROR(WSAECONNRESET)
                    || rv == APR_FROM_OS_ERROR(WSAEINPROGRESS)
                    || rv == APR_FROM_OS_ERROR(WSAEWOULDBLOCK) ) {
                    ap_log_error(APLOG_MARK, APLOG_DEBUG,
                                 rv, ap_server_conf, APLOGNO(00343)
                                 "accept() failed, retrying.");
                    continue;
                }

                /* A more serious error than 'retry', log it */
                ap_log_error(APLOG_MARK, APLOG_WARNING,
                             rv, ap_server_conf, APLOGNO(00344)
                             "accept() failed.");

                if (   rv == APR_FROM_OS_ERROR(WSAEMFILE)
                    || rv == APR_FROM_OS_ERROR(WSAENOBUFS) ) {
                    /* Hopefully a temporary condition in the provider? */
                    Sleep(100);
                    ++err_count;
                    if (err_count > MAX_ACCEPTEX_ERR_COUNT) {
                        ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf, APLOGNO(00345)
                                     "Child: Encountered too many accept() "
                                     "resource faults, aborting.");
                        break;
                    }
                    continue;
                }
                break;
            }
            /* Per MSDN, cancel the inherited association of this socket
             * to the WSAEventSelect API, and restore the state corresponding
             * to apr_os_sock_make's default assumptions (really, a flaw within
             * os_sock_make and os_sock_put that it does not query).
             */
            WSAEventSelect(context->accept_socket, 0, 0);
            context->overlapped.Pointer = NULL;
            err_count = 0;

            context->sa_server_len = sizeof(context->buff) / 2;
            if (getsockname(context->accept_socket, context->sa_server,
                            &context->sa_server_len) == SOCKET_ERROR) {
                ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, APLOGNO(00346)
                             "getsockname failed");
                continue;
            }
            if ((getpeername(context->accept_socket, context->sa_client,
                             &context->sa_client_len)) == SOCKET_ERROR) {
                ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf, APLOGNO(00347)
                             "getpeername failed");
                memset(&context->sa_client, '\0', sizeof(context->sa_client));
            }
        }

        sockinfo.os_sock  = &context->accept_socket;
        sockinfo.local    = context->sa_server;
        sockinfo.remote   = context->sa_client;
        sockinfo.family   = context->sa_server->sa_family;
        sockinfo.type     = SOCK_STREAM;
        sockinfo.protocol = IPPROTO_TCP;
        /* Restore the state corresponding to apr_os_sock_make's default
         * assumption of timeout -1 (really, a flaw of os_sock_make and
         * os_sock_put that it does not query to determine ->timeout).
         * XXX: Upon a fix to APR, these three statements should disappear.
         */
        ioctlsocket(context->accept_socket, FIONBIO, &zero);
        setsockopt(context->accept_socket, SOL_SOCKET, SO_RCVTIMEO,
                   (char *) &zero, sizeof(zero));
        setsockopt(context->accept_socket, SOL_SOCKET, SO_SNDTIMEO,
                   (char *) &zero, sizeof(zero));
        apr_os_sock_make(&context->sock, &sockinfo, context->ptrans);

        /* When a connection is received, send an io completion notification
         * to the ThreadDispatchIOCP.
         */
        PostQueuedCompletionStatus(ThreadDispatchIOCP, BytesRead,
                                   IOCP_CONNECTION_ACCEPTED,
                                   &context->overlapped);
        context = NULL;
    }
    if (!accf)
        CloseHandle(events[2]);

    if (!shutdown_in_progress) {
        /* Yow, hit an irrecoverable error! Tell the child to die. */
        SetEvent(exit_event);
    }

    ap_log_error(APLOG_MARK, APLOG_DEBUG, APR_SUCCESS, ap_server_conf, APLOGNO(00348)
                 "Child: Accept thread exiting.");
    return 0;
}