Ejemplo n.º 1
0
/* This accepts a connection and allows us to handle the error codes better than
 * the previous code, while also making it more obvious.
 */
static apr_status_t beos_accept(void **accepted, ap_listen_rec *lr, apr_pool_t *ptrans)
{
    apr_socket_t *csd;
    apr_status_t status;
    int sockdes;

    *accepted = NULL;
    status = apr_socket_accept(&csd, lr->sd, ptrans);
    if (status == APR_SUCCESS) {
        *accepted = csd;
        apr_os_sock_get(&sockdes, csd);
        return status;
    }

    if (APR_STATUS_IS_EINTR(status)) {
        return status;
    }
        /* This switch statement provides us with better error details. */
    switch (status) {
#ifdef ECONNABORTED
        case ECONNABORTED:
#endif
#ifdef ETIMEDOUT
        case ETIMEDOUT:
#endif
#ifdef EHOSTUNREACH
        case EHOSTUNREACH:
#endif
#ifdef ENETUNREACH
        case ENETUNREACH:
#endif
            break;
#ifdef ENETDOWN
        case ENETDOWN:
            /*
             * When the network layer has been shut down, there
             * is not much use in simply exiting: the parent
             * would simply re-create us (and we'd fail again).
             * Use the CHILDFATAL code to tear the server down.
             * @@@ Martin's idea for possible improvement:
             * A different approach would be to define
             * a new APEXIT_NETDOWN exit code, the reception
             * of which would make the parent shutdown all
             * children, then idle-loop until it detected that
             * the network is up again, and restart the children.
             * Ben Hyde noted that temporary ENETDOWN situations
             * occur in mobile IP.
             */
            ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                         "apr_socket_accept: giving up.");
            return APR_EGENERAL;
#endif /*ENETDOWN*/

        default:
            ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf,
                         "apr_socket_accept: (client socket)");
            return APR_EGENERAL;
    }
    return status;
}
Ejemplo n.º 2
0
/*
 *
 * Function: checkExitedSystem
 * Description:  检查退出的子系统
 * Input: 
 * OutPut:
 *      
 * Return:
 * Other:
 *
 */
void CommandServer::checkExitedSystem( void )
{
   int iExitCode, iIdx;
   apr_exit_why_e tWhy;
   apr_status_t tStatus;
   apr_proc_t   stProcess;

   do
   {
      tStatus = apr_proc_wait_all_procs( &stProcess, &iExitCode, &tWhy,
                                         APR_NOWAIT, m_pstLocal );
      if( stProcess.pid > 0 && !APR_STATUS_IS_EINTR(tStatus) )
      {
         /*设置该子系统为可用*/
         for( iIdx = 0; iIdx < m_pstConfig->m_iMaxSys; iIdx++ )
         {
            if( m_pstSystem[iIdx].m_tManagerPid == stProcess.pid )
            {
               m_pstSystem[iIdx].m_tManagerPid = 0;
               m_pstSystem[iIdx].m_bActive = false;
               break;
            }
         }
         if( iIdx == m_pstConfig->m_iMaxSys )
         {
            LOG4C(( LOG_WARN, "请尽快检查系统,有个进程%d不知道怎么来的\n",
                    stProcess.pid ));
         }
      }
      else
      {
         return;
      }
   } while( true );
}
Ejemplo n.º 3
0
void ap_wait_or_timeout(apr_exit_why_e *status, int *exitcode, apr_proc_t *ret,
                        apr_pool_t *p)
{
    apr_status_t rv;

    ++wait_or_timeout_counter;
    if (wait_or_timeout_counter == INTERVAL_OF_WRITABLE_PROBES) {
        wait_or_timeout_counter = 0;
        ap_run_monitor(p);
    }

    rv = apr_proc_wait_all_procs(ret, exitcode, status, APR_NOWAIT, p);
    if (APR_STATUS_IS_EINTR(rv)) {
        ret->pid = -1;
        return;
    }

    if (APR_STATUS_IS_CHILD_DONE(rv)) {
        return;
    }

#ifdef NEED_WAITPID
    if ((ret = reap_children(exitcode, status)) > 0) {
        return;
    }
#endif

    apr_sleep(SCOREBOARD_MAINTENANCE_INTERVAL);
    ret->pid = -1;
    return;
}
Ejemplo n.º 4
0
AP_DECLARE(void) ap_wait_or_timeout(apr_exit_why_e *status, int *exitcode,
                                    apr_proc_t *ret, apr_pool_t *p,
                                    server_rec *s)
{
    apr_status_t rv;

    ++wait_or_timeout_counter;
    if (wait_or_timeout_counter == INTERVAL_OF_WRITABLE_PROBES) {
        wait_or_timeout_counter = 0;
        ap_run_monitor(p, s);
    }

    rv = apr_proc_wait_all_procs(ret, exitcode, status, APR_NOWAIT, p);
    ap_update_global_status();

    if (APR_STATUS_IS_EINTR(rv)) {
        ret->pid = -1;
        return;
    }

    if (APR_STATUS_IS_CHILD_DONE(rv)) {
        return;
    }

    apr_sleep(apr_time_from_sec(1));
    ret->pid = -1;
}
static apr_status_t pass_data_to_filter(ap_filter_t *f, const char *data,
                                        apr_size_t len, apr_bucket_brigade *bb)
{
    ef_ctx_t *ctx = f->ctx;
    ef_dir_t *dc = ctx->dc;
    apr_status_t rv;
    apr_size_t bytes_written = 0;
    apr_size_t tmplen;

    do {
        tmplen = len - bytes_written;
        rv = apr_file_write(ctx->proc->in,
                       (const char *)data + bytes_written,
                       &tmplen);
        bytes_written += tmplen;
        if (rv != APR_SUCCESS && !APR_STATUS_IS_EAGAIN(rv)) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, f->r,
                          "apr_file_write(child input), len %" APR_SIZE_T_FMT,
                          tmplen);
            return rv;
        }
        if (APR_STATUS_IS_EAGAIN(rv)) {
            /* XXX handle blocking conditions here...  if we block, we need
             * to read data from the child process and pass it down to the
             * next filter!
             */
            rv = drain_available_output(f, bb);
            if (APR_STATUS_IS_EAGAIN(rv)) {
#if APR_FILES_AS_SOCKETS
                int num_events;
                const apr_pollfd_t *pdesc;

                rv = apr_pollset_poll(ctx->pollset, f->r->server->timeout,
                                      &num_events, &pdesc);
                if (rv || dc->debug >= DBGLVL_GORY) {
                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG,
                                  rv, f->r, "apr_pollset_poll()");
                }
                if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) {
                    /* some error such as APR_TIMEUP */
                    return rv;
                }
#else /* APR_FILES_AS_SOCKETS */
                /* Yuck... I'd really like to wait until I can read
                 * or write, but instead I have to sleep and try again
                 */
                apr_sleep(100000); /* 100 milliseconds */
                if (dc->debug >= DBGLVL_GORY) {
                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG,
                                  0, f->r, "apr_sleep()");
                }
#endif /* APR_FILES_AS_SOCKETS */
            }
            else if (rv != APR_SUCCESS) {
                return rv;
            }
        }
    } while (bytes_written < len);
    return rv;
}
/* Read method of CGI bucket: polls on stderr and stdout of the child,
 * sending any stderr output immediately away to the error log. */
static apr_status_t cgi_bucket_read(apr_bucket *b, const char **str,
                                    apr_size_t *len, apr_read_type_e block)
{
    struct cgi_bucket_data *data = b->data;
    apr_interval_time_t timeout;
    apr_status_t rv;
    int gotdata = 0;

    timeout = block == APR_NONBLOCK_READ ? 0 : data->r->server->timeout;

    do {
        const apr_pollfd_t *results;
        apr_int32_t num;

        rv = apr_pollset_poll(data->pollset, timeout, &num, &results);
        if (APR_STATUS_IS_TIMEUP(rv)) {
            if (timeout) {
                ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, data->r,
                              "Timeout waiting for output from CGI script %s",
                              data->r->filename);
                return rv;
            }
            else {
                return APR_EAGAIN;
            }
        }
        else if (APR_STATUS_IS_EINTR(rv)) {
            continue;
        }
        else if (rv != APR_SUCCESS) {
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, data->r,
                          "poll failed waiting for CGI child");
            return rv;
        }

        for (; num; num--, results++) {
            if (results[0].client_data == (void *)1) {
                /* stdout */
                rv = cgi_read_stdout(b, results[0].desc.f, str, len);
                if (APR_STATUS_IS_EOF(rv)) {
                    rv = APR_SUCCESS;
                }
                gotdata = 1;
            } else {
                /* stderr */
                apr_status_t rv2 = log_script_err(data->r, results[0].desc.f);
                if (APR_STATUS_IS_EOF(rv2)) {
                    apr_pollset_remove(data->pollset, &results[0]);
                }
            }
        }

    } while (!gotdata);

    return rv;
}
Ejemplo n.º 7
0
apr_status_t apr_wait_for_io_or_timeout(apr_file_t *f, apr_socket_t *s,
                                        int for_read)
{
    apr_interval_time_t timeout;
    apr_pollfd_t pfd;
    int type = for_read ? APR_POLLIN : APR_POLLOUT;
    apr_pollset_t *pollset;
    apr_status_t status;

    /* TODO - timeout should be less each time through this loop */
    if (f) {
        pfd.desc_type = APR_POLL_FILE;
        pfd.desc.f = f;

        pollset = f->pollset;
        if (pollset == NULL) {
            status = apr_pollset_create(&(f->pollset), 1, f->pool, 0);
            if (status != APR_SUCCESS) {
                return status;
            }
            pollset = f->pollset;
        }
        timeout = f->timeout;
    }
    else {
        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = s;

        pollset = s->pollset;
        timeout = s->timeout;
    }
    pfd.reqevents = type;

    /* Remove the object if it was in the pollset, then add in the new
     * object with the correct reqevents value. Ignore the status result
     * on the remove, because it might not be in there (yet).
     */
    (void) apr_pollset_remove(pollset, &pfd);

    /* ### check status code */
    (void) apr_pollset_add(pollset, &pfd);

    do {
        int numdesc;
        const apr_pollfd_t *pdesc;

        status = apr_pollset_poll(pollset, timeout, &numdesc, &pdesc);

        if (numdesc == 1 && (pdesc[0].rtnevents & type) != 0) {
            return APR_SUCCESS;
        }
    } while (APR_STATUS_IS_EINTR(status));

    return status;
}
Ejemplo n.º 8
0
APR_DECLARE(apr_status_t) apr_socket_atreadeof(apr_socket_t *sock, int *atreadeof)
{
    apr_pollfd_t pfds[1];
    apr_status_t rv;
    apr_int32_t  nfds;

    /* The purpose here is to return APR_SUCCESS only in cases in
     * which it can be unambiguously determined whether or not the
     * socket will return EOF on next read.  In case of an unexpected
     * error, return that. */

    pfds[0].reqevents = APR_POLLIN;
    pfds[0].desc_type = APR_POLL_SOCKET;
    pfds[0].desc.s = sock;

    do {
        rv = apr_poll(&pfds[0], 1, &nfds, 0);
    } while (APR_STATUS_IS_EINTR(rv));

    if (APR_STATUS_IS_TIMEUP(rv)) {
        /* Read buffer empty -> subsequent reads would block, so,
         * definitely not at EOF. */
        *atreadeof = 0;
        return APR_SUCCESS;
    }
    else if (rv) {
        /* Some other error -> unexpected error. */
        return rv;
    }
    else if (nfds == 1 && pfds[0].rtnevents == APR_POLLIN) {
        apr_sockaddr_t unused;
        apr_size_t len = 1;
        char buf;

        /* The socket is readable - peek to see whether it returns EOF
         * without consuming bytes from the socket buffer. */
        rv = apr_socket_recvfrom(&unused, sock, MSG_PEEK, &buf, &len);
        if (rv == APR_EOF) {
            *atreadeof = 1;
            return APR_SUCCESS;
        }
        else if (rv) {
            /* Read error -> unexpected error. */
            return rv;
        }
        else {
            *atreadeof = 0;
            return APR_SUCCESS;
        }
    }

    /* Should not fall through here. */
    return APR_EGENERAL;
}
Ejemplo n.º 9
0
apr_status_t serf_context_run(
    serf_context_t *ctx,
    apr_short_interval_time_t duration,
    apr_pool_t *pool)
{
    apr_status_t status;
    apr_int32_t num;
    const apr_pollfd_t *desc;
    serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton;

    if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) {
        return status;
    }

    if ((status = apr_pollset_poll(ps->pollset, duration, &num,
                                   &desc)) != APR_SUCCESS) {
        /* EINTR indicates a handled signal happened during the poll call,
           ignore, the application can safely retry. */
        if (APR_STATUS_IS_EINTR(status))
            return APR_SUCCESS;

        /* ### do we still need to dispatch stuff here?
           ### look at the potential return codes. map to our defined
           ### return values? ...
        */

        /* Use the strict documented error for poll timeouts, to allow proper
           handling of the other timeout types when returned from
           serf_event_trigger */
        if (APR_STATUS_IS_TIMEUP(status))
            return APR_TIMEUP; /* Return the documented error */
        return status;
    }

    while (num--) {
        serf_io_baton_t *io  = desc->client_data;

        status = serf_event_trigger(ctx, io, desc);
        if (status) {
            /* Don't return APR_TIMEUP as a connection error, as our caller
               will use that as a trigger to call us again */
            if (APR_STATUS_IS_TIMEUP(status))
                status = SERF_ERROR_CONNECTION_TIMEDOUT;
            return status;
        }

        desc++;
    }

    return APR_SUCCESS;
}
Ejemplo n.º 10
0
static apr_status_t pod_signal_internal(ap_pod_t *pod, int graceful)
{
    apr_status_t rv;
    char char_of_death = graceful ? GRACEFUL_CHAR : RESTART_CHAR;
    apr_size_t one = 1;

    do {
        rv = apr_file_write(pod->pod_out, &char_of_death, &one);
    } while (APR_STATUS_IS_EINTR(rv));
    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf,
                     "write pipe_of_death");
    }
    return rv;
}
Ejemplo n.º 11
0
/* Wait for the next client connection to come in from SOCK.  Allocate
 * the connection in a root pool from CONNECTION_POOLS and assign PARAMS.
 * Return the connection object in *CONNECTION.
 *
 * Use HANDLING_MODE for proper internal cleanup.
 */
static svn_error_t *
accept_connection(connection_t **connection,
                  apr_socket_t *sock,
                  serve_params_t *params,
                  enum connection_handling_mode handling_mode,
                  apr_pool_t *pool)
{
  apr_status_t status;

  /* Non-standard pool handling.  The main thread never blocks to join
   *         the connection threads so it cannot clean up after each one.  So
   *         separate pools that can be cleared at thread exit are used. */

  apr_pool_t *connection_pool = svn_pool_create(pool);
  *connection = apr_pcalloc(connection_pool, sizeof(**connection));
  (*connection)->pool = connection_pool;
  (*connection)->params = params;
  (*connection)->ref_count = 1;

  do
    {
      #ifdef WIN32
      if (winservice_is_stopping())
        exit(0);
      #endif

      status = apr_socket_accept(&(*connection)->usock, sock,
                                 connection_pool);
      if (handling_mode == connection_mode_fork)
        {
          apr_proc_t proc;

          /* Collect any zombie child processes. */
          while (apr_proc_wait_all_procs(&proc, NULL, NULL, APR_NOWAIT,
            connection_pool) == APR_CHILD_DONE)
            ;
        }
    }
  while (APR_STATUS_IS_EINTR(status)
    || APR_STATUS_IS_ECONNABORTED(status)
    || APR_STATUS_IS_ECONNRESET(status));

  return status
       ? svn_error_wrap_apr(status, _("Can't accept client connection"))
       : SVN_NO_ERROR;
}
Ejemplo n.º 12
0
apr_status_t serf_context_run(
    serf_context_t *ctx,
    apr_short_interval_time_t duration,
    apr_pool_t *pool)
{
    apr_status_t status;
    apr_int32_t num;
    const apr_pollfd_t *desc;
    serf_pollset_t *ps = (serf_pollset_t*)ctx->pollset_baton;

    if ((status = serf_context_prerun(ctx)) != APR_SUCCESS) {
        return status;
    }

    if ((status = apr_pollset_poll(ps->pollset, duration, &num,
                                   &desc)) != APR_SUCCESS) {
        /* EINTR indicates a handled signal happened during the poll call,
           ignore, the application can safely retry. */
        if (APR_STATUS_IS_EINTR(status))
            return APR_SUCCESS;

        /* ### do we still need to dispatch stuff here?
           ### look at the potential return codes. map to our defined
           ### return values? ...
        */
        return status;
    }

    while (num--) {
        serf_connection_t *conn = desc->client_data;

        status = serf_event_trigger(ctx, conn, desc);
        if (status) {
            return status;
        }

        desc++;
    }

    return APR_SUCCESS;
}
Ejemplo n.º 13
0
static apr_status_t send_brigade_blocking(apr_socket_t *s,
                                          apr_bucket_brigade *bb,
                                          apr_size_t *bytes_written,
                                          conn_rec *c)
{
    apr_status_t rv;

    rv = APR_SUCCESS;
    while (!APR_BRIGADE_EMPTY(bb)) {
        rv = send_brigade_nonblocking(s, bb, bytes_written, c);
        if (rv != APR_SUCCESS) {
            if (APR_STATUS_IS_EAGAIN(rv)) {
                /* Wait until we can send more data */
                apr_int32_t nsds;
                apr_interval_time_t timeout;
                apr_pollfd_t pollset;

                pollset.p = c->pool;
                pollset.desc_type = APR_POLL_SOCKET;
                pollset.reqevents = APR_POLLOUT;
                pollset.desc.s = s;
                apr_socket_timeout_get(s, &timeout);
                do {
                    rv = apr_poll(&pollset, 1, &nsds, timeout);
                } while (APR_STATUS_IS_EINTR(rv));
                if (rv != APR_SUCCESS) {
                    break;
                }
            }
            else {
                break;
            }
        }
    }
    return rv;
}
//static void child_main(int child_num_arg)
void body()
{

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(allocator); //// removed deref
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(pchild, pconf, NULL, allocator); //// removed deref
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(ptrans, pchild); //// removed deref
    apr_pool_tag(ptrans, 65); // "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(accept_mutex, ap_lock_fname, pchild); //// removed deref
    if (status != APR_SUCCESS) {
        /* ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, */
        /*              "Couldnt initialize crossprocess lock in child " */
        /*              "%s %d", ap_lock_fname, ap_accept_lock_mech); */
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child() > 0) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(sbh, pchild, my_child_num, 0); //// removed deref

    ap_update_child_status(sbh, SERVER_READY, NULL);

    /* Set up the pollfd array */
    /* ### check the status */
    (void) apr_pollset_create(pollset, num_listensocks, pchild, 0); //// removed deref

    num_listensocks = nondet(); assume(num_listensocks>0);

    lr = ap_listeners;
    i = num_listensocks; 
    while (1) {
      if ( i<=0 ) break; 
        int pfd = 0;

        pfd_desc_type = APR_POLL_SOCKET;
        pfd_desc_s = 1; // lr->sd;
        pfd_reqevents = APR_POLLIN;
        pfd_client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, pfd); //// removed deref
	i--;
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    while(1>0) {
      if (die_now>0) break;
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());
	do_ACCEPT=1; do_ACCEPT=0;

	dummy = nondet();
	if(dummy > 0) {
          /* goto loc_return; */
          while(1>0) { int ddd; ddd=ddd; }
        }

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
	  while(1) {
	      int numdesc;
                const void *pdesc;

                /* timeout == -1 == wait forever */
                status = apr_pollset_poll(pollset, -1, numdesc, pdesc); //// removed deref
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(status) > 0) {
                        if (one_process>0 && shutdown_pending>0) {
			  /* goto loc_return; */
                          while(1>0) { int ddd; ddd=ddd; }
                        }
                        goto loc_continueA;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    /* ap_log_error5(APLOG_MARK, APLOG_ERR, status, */
                    /*              ap_server_conf, "apr_pollset_poll: (listen)"); */
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = 1; //pdesc[last_poll_idx++].client_data;
		break;

	    loc_continueA: {int yyy2; yyy2=yyy2; }
            }
        }
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = nondet(); // lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
	  goto loc_continueB;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn > 0) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
	dummy = nondet();
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation != dummy) {
	  //ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    loc_continueB: { int uuu; uuu=uuu; }
    }
    clean_child_exit(0);
 /* loc_return: */
    while(1>0) { int ddd; ddd=ddd; }
}
Ejemplo n.º 15
0
static apr_status_t gnutls_io_input_read(mgs_handle_t * ctxt,
        char *buf, apr_size_t * len) {
    apr_size_t wanted = *len;
    apr_size_t bytes = 0;
    int rc;

    *len = 0;

    /* If we have something leftover from last time, try that first. */
    if ((bytes = char_buffer_read(&ctxt->input_cbuf, buf, wanted))) {
        *len = bytes;
        if (ctxt->input_mode == AP_MODE_SPECULATIVE) {
            /* We want to rollback this read. */
            if (ctxt->input_cbuf.length > 0) {
                ctxt->input_cbuf.value -= bytes;
                ctxt->input_cbuf.length += bytes;
            } else {
                char_buffer_write(&ctxt->input_cbuf, buf,
                        (int) bytes);
            }
            return APR_SUCCESS;
        }
        /* This could probably be *len == wanted, but be safe from stray
         * photons.
         */
        if (*len >= wanted) {
            return APR_SUCCESS;
        }
        if (ctxt->input_mode == AP_MODE_GETLINE) {
            if (memchr(buf, APR_ASCII_LF, *len)) {
                return APR_SUCCESS;
            }
        } else {
            /* Down to a nonblock pattern as we have some data already
             */
            ctxt->input_block = APR_NONBLOCK_READ;
        }
    }

    if (ctxt->session == NULL) {
        return APR_EGENERAL;
    }

    while (1) {

        rc = gnutls_record_recv(ctxt->session, buf + bytes,
                wanted - bytes);

        if (rc > 0) {
            *len += rc;
            if (ctxt->input_mode == AP_MODE_SPECULATIVE) {
                /* We want to rollback this read. */
                char_buffer_write(&ctxt->input_cbuf, buf,
                        rc);
            }
            return ctxt->input_rc;
        } else if (rc == 0) {
            /* If EAGAIN, we will loop given a blocking read,
             * otherwise consider ourselves at EOF.
             */
            if (APR_STATUS_IS_EAGAIN(ctxt->input_rc)
                    || APR_STATUS_IS_EINTR(ctxt->input_rc)) {
                /* Already read something, return APR_SUCCESS instead.
                 * On win32 in particular, but perhaps on other kernels,
                 * a blocking call isn't 'always' blocking.
                 */
                if (*len > 0) {
                    ctxt->input_rc = APR_SUCCESS;
                    break;
                }
                if (ctxt->input_block == APR_NONBLOCK_READ) {
                    break;
                }
            } else {
                if (*len > 0) {
                    ctxt->input_rc = APR_SUCCESS;
                } else {
                    ctxt->input_rc = APR_EOF;
                }
                break;
            }
        } else { /* (rc < 0) */

            if (rc == GNUTLS_E_REHANDSHAKE) {
                /* A client has asked for a new Hankshake. Currently, we don't do it */
                ap_log_error(APLOG_MARK, APLOG_INFO,
                        ctxt->input_rc,
                        ctxt->c->base_server,
                        "GnuTLS: Error reading data. Client Requested a New Handshake."
                        " (%d) '%s'", rc,
                        gnutls_strerror(rc));
            } else if (rc == GNUTLS_E_WARNING_ALERT_RECEIVED) {
                rc = gnutls_alert_get(ctxt->session);
                ap_log_error(APLOG_MARK, APLOG_INFO,
                        ctxt->input_rc,
                        ctxt->c->base_server,
                        "GnuTLS: Warning Alert From Client: "
                        " (%d) '%s'", rc,
                        gnutls_alert_get_name(rc));
            } else if (rc == GNUTLS_E_FATAL_ALERT_RECEIVED) {
                rc = gnutls_alert_get(ctxt->session);
                ap_log_error(APLOG_MARK, APLOG_INFO,
                        ctxt->input_rc,
                        ctxt->c->base_server,
                        "GnuTLS: Fatal Alert From Client: "
                        "(%d) '%s'", rc,
                        gnutls_alert_get_name(rc));
                ctxt->input_rc = APR_EGENERAL;
                break;
            } else {
                /* Some Other Error. Report it. Die. */
                if (gnutls_error_is_fatal(rc)) {
                    ap_log_error(APLOG_MARK,
                            APLOG_INFO,
                            ctxt->input_rc,
                            ctxt->c->base_server,
                            "GnuTLS: Error reading data. (%d) '%s'",
                            rc,
                            gnutls_strerror(rc));
                } else if (*len > 0) {
                    ctxt->input_rc = APR_SUCCESS;
                    break;
                }
            }

            if (ctxt->input_rc == APR_SUCCESS) {
                ctxt->input_rc = APR_EGENERAL;
            }
            break;
        }
    }
    return ctxt->input_rc;
}
Ejemplo n.º 16
0
ssize_t mgs_transport_read(gnutls_transport_ptr_t ptr,
        void *buffer, size_t len) {
    mgs_handle_t *ctxt = ptr;
    apr_status_t rc;
    apr_size_t in = len;
    apr_read_type_e block = ctxt->input_block;

    ctxt->input_rc = APR_SUCCESS;

    /* If Len = 0, we don't do anything. */
    if (!len || buffer == NULL) {
        return 0;
    }
    if (!ctxt->input_bb) {
        ctxt->input_rc = APR_EOF;
        return -1;
    }

    if (APR_BRIGADE_EMPTY(ctxt->input_bb)) {

        rc = ap_get_brigade(ctxt->input_filter->next,
                ctxt->input_bb, AP_MODE_READBYTES,
                ctxt->input_block, in);

        /* Not a problem, there was simply no data ready yet.
         */
        if (APR_STATUS_IS_EAGAIN(rc) || APR_STATUS_IS_EINTR(rc)
                || (rc == APR_SUCCESS
                && APR_BRIGADE_EMPTY(ctxt->input_bb))) {

            if (APR_STATUS_IS_EOF(ctxt->input_rc)) {
                return 0;
            } else {
                if (ctxt->session)
                    gnutls_transport_set_errno(ctxt->
                        session,
                        EINTR);
                return -1;
            }
        }


        if (rc != APR_SUCCESS) {
            /* Unexpected errors discard the brigade */
            apr_brigade_cleanup(ctxt->input_bb);
            ctxt->input_bb = NULL;
            return -1;
        }
    }

    ctxt->input_rc =
            brigade_consume(ctxt->input_bb, block, buffer, &len);

    if (ctxt->input_rc == APR_SUCCESS) {
        return (ssize_t) len;
    }

    if (APR_STATUS_IS_EAGAIN(ctxt->input_rc)
            || APR_STATUS_IS_EINTR(ctxt->input_rc)) {
        if (len == 0) {
            if (ctxt->session)
                gnutls_transport_set_errno(ctxt->session,
                    EINTR);
            return -1;
        }

        return (ssize_t) len;
    }

    /* Unexpected errors and APR_EOF clean out the brigade.
     * Subsequent calls will return APR_EOF.
     */
    apr_brigade_cleanup(ctxt->input_bb);
    ctxt->input_bb = NULL;

    if (APR_STATUS_IS_EOF(ctxt->input_rc) && len) {
        /* Provide the results of this read pass,
         * without resetting the BIO retry_read flag
         */
        return (ssize_t) len;
    }

    return -1;
}
Ejemplo n.º 17
0
static int32 worker_thread(void * dummy)
{
    proc_info * ti = dummy;
    int child_slot = ti->slot;
    apr_pool_t *tpool = ti->tpool;
    apr_allocator_t *allocator;
    apr_socket_t *csd = NULL;
    apr_pool_t *ptrans;		/* Pool for per-transaction stuff */
    apr_bucket_alloc_t *bucket_alloc;
    apr_socket_t *sd = NULL;
    apr_status_t rv = APR_EINIT;
    int srv , n;
    int curr_pollfd = 0, last_pollfd = 0;
    sigset_t sig_mask;
    int requests_this_child = ap_max_requests_per_thread;
    apr_pollfd_t *pollset;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0; 
    free(ti);

    mpm_state = AP_MPMQ_STARTING;

    on_exit_thread(check_restart, (void*)child_slot);
          
    /* block the signals for this thread */
    sigfillset(&sig_mask);
    sigprocmask(SIG_BLOCK, &sig_mask, NULL);

    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&ptrans, tpool, NULL, allocator);
    apr_allocator_owner_set(allocator, ptrans);

    apr_pool_tag(ptrans, "transaction");

    bucket_alloc = apr_bucket_alloc_create_ex(allocator);

    apr_thread_mutex_lock(worker_thread_count_mutex);
    worker_thread_count++;
    apr_thread_mutex_unlock(worker_thread_count_mutex);

    (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_STARTING,
                                               (request_rec*)NULL);
                                  
    apr_poll_setup(&pollset, num_listening_sockets + 1, tpool);
    for(n=0 ; n <= num_listening_sockets ; n++)
        apr_poll_socket_add(pollset, listening_sockets[n], APR_POLLIN);

    mpm_state = AP_MPMQ_RUNNING;

    while (1) {
        /* If we're here, then chances are (unless we're the first thread created) 
         * we're going to be held up in the accept mutex, so doing this here
         * shouldn't hurt performance.
         */

        this_worker_should_exit |= (ap_max_requests_per_thread != 0) && (requests_this_child <= 0);
        
        if (this_worker_should_exit) break;

        (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_READY,
                                                   (request_rec*)NULL);

        apr_thread_mutex_lock(accept_mutex);

        while (!this_worker_should_exit) {
            apr_int16_t event;
            apr_status_t ret;

            ret = apr_poll(pollset, num_listening_sockets + 1, &srv, -1);

            if (ret != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(ret)) {
                    continue;
                }
                /* poll() will only return errors in catastrophic
                 * circumstances. Let's try exiting gracefully, for now. */
                ap_log_error(APLOG_MARK, APLOG_ERR, ret, (const server_rec *)
                             ap_server_conf, "apr_poll: (listen)");
                this_worker_should_exit = 1;
            } else {
                /* if we've bailed in apr_poll what's the point of trying to use the data? */
                apr_poll_revents_get(&event, listening_sockets[0], pollset);

                if (event & APR_POLLIN){
                    apr_sockaddr_t *rec_sa;
                    apr_size_t len = 5;
                    char *tmpbuf = apr_palloc(ptrans, sizeof(char) * 5);
                    apr_sockaddr_info_get(&rec_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, ptrans);
                    
                    if ((ret = apr_recvfrom(rec_sa, listening_sockets[0], 0, tmpbuf, &len))
                        != APR_SUCCESS){
                        ap_log_error(APLOG_MARK, APLOG_ERR, ret, NULL, 
                            "error getting data from UDP!!");
                    }else {
                        /* add checking??? */              
                    }
                    this_worker_should_exit = 1;
                }
            }
          
            if (this_worker_should_exit) break;

            if (num_listening_sockets == 1) {
                sd = ap_listeners->sd;
                goto got_fd;
            }
            else {
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;

                    if (curr_pollfd > num_listening_sockets)
                        curr_pollfd = 1;
                    
                    /* Get the revent... */
                    apr_poll_revents_get(&event, listening_sockets[curr_pollfd], pollset);
                    
                    if (event & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        sd = listening_sockets[curr_pollfd];
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);
            }
        }
    got_fd:

        if (!this_worker_should_exit) {
            rv = apr_accept(&csd, sd, ptrans);

            apr_thread_mutex_unlock(accept_mutex);
            if (rv != APR_SUCCESS) {
                ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
                  "apr_accept");
            } else {
                process_socket(ptrans, csd, child_slot, bucket_alloc);
                requests_this_child--;
            }
        }
        else {
            apr_thread_mutex_unlock(accept_mutex);
            break;
        }
        apr_pool_clear(ptrans);
    }

    ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL);

    apr_bucket_alloc_destroy(bucket_alloc);

    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL,
                 "worker_thread %ld exiting", find_thread(NULL));
    
    apr_thread_mutex_lock(worker_thread_count_mutex);
    worker_thread_count--;
    apr_thread_mutex_unlock(worker_thread_count_mutex);

    return (0);
}
Ejemplo n.º 18
0
static unsigned int __stdcall win9x_accept(void * dummy)
{
    struct timeval tv;
    fd_set main_fds;
    int wait_time = 1;
    SOCKET csd;
    SOCKET nsd = INVALID_SOCKET;
    int count_select_errors = 0;
    int rc;
    int clen;
    ap_listen_rec *lr;
    struct fd_set listenfds;
#if APR_HAVE_IPV6
    struct sockaddr_in6 sa_client;
#else
    struct sockaddr_in sa_client;
#endif

    /* Setup the listeners
     * ToDo: Use apr_poll()
     */
    FD_ZERO(&listenfds);
    for (lr = ap_listeners; lr; lr = lr->next) {
        if (lr->sd != NULL) {
            apr_os_sock_get(&nsd, lr->sd);
            FD_SET(nsd, &listenfds);
            ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf,
                         "Child %lu: Listening on port %d.", my_pid, lr->bind_addr->port);
        }
    }

    head_listener = ap_listeners;

    while (!shutdown_in_progress) {
        tv.tv_sec = wait_time;
        tv.tv_usec = 0;
        memcpy(&main_fds, &listenfds, sizeof(fd_set));

        /* First parameter of select() is ignored on Windows */
        rc = select(0, &main_fds, NULL, NULL, &tv);

        if (rc == 0 || (rc == SOCKET_ERROR && APR_STATUS_IS_EINTR(apr_get_netos_error()))) {
            count_select_errors = 0;    /* reset count of errors */
            continue;
        }
        else if (rc == SOCKET_ERROR) {
            /* A "real" error occurred, log it and increment the count of
             * select errors. This count is used to ensure we don't go into
             * a busy loop of continuous errors.
             */
            ap_log_error(APLOG_MARK, APLOG_INFO, apr_get_netos_error(), ap_server_conf,
                         "select failed with error %d", apr_get_netos_error());
            count_select_errors++;
            if (count_select_errors > MAX_SELECT_ERRORS) {
                shutdown_in_progress = 1;
                ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(), ap_server_conf,
                             "Too many errors in select loop. Child process exiting.");
                break;
            }
        } else {
            ap_listen_rec *lr;

            lr = find_ready_listener(&main_fds);
            if (lr != NULL) {
                /* fetch the native socket descriptor */
                apr_os_sock_get(&nsd, lr->sd);
            }
        }

        do {
            clen = sizeof(sa_client);
            csd = accept(nsd, (struct sockaddr *) &sa_client, &clen);
        } while (csd == INVALID_SOCKET && APR_STATUS_IS_EINTR(apr_get_netos_error()));

        if (csd == INVALID_SOCKET) {
            if (APR_STATUS_IS_ECONNABORTED(apr_get_netos_error())) {
                ap_log_error(APLOG_MARK, APLOG_ERR, apr_get_netos_error(), ap_server_conf,
                            "accept: (client socket)");
            }
        }
        else {
            add_job(csd);
        }
    }
    SetEvent(exit_event);
    return 0;
}
Ejemplo n.º 19
0
/* This is the thread that actually does all the work. */
static int32 worker_thread(void *dummy)
{
    int worker_slot = (int)dummy;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    apr_status_t rv = APR_EINIT;
    int last_poll_idx = 0;
    sigset_t sig_mask;
    int requests_this_child = 0;
    apr_pollset_t *pollset = NULL;
    ap_listen_rec *lr = NULL;
    ap_sb_handle_t *sbh = NULL;
    int i;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0;
    /* We have 2 pools that we create/use throughout the lifetime of this
     * worker. The first and longest lived is the pworker pool. From
     * this we create the ptrans pool, the lifetime of which is the same
     * as each connection and is reset prior to each attempt to
     * process a connection.
     */
    apr_pool_t *ptrans = NULL;
    apr_pool_t *pworker = NULL;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */

    on_exit_thread(check_restart, (void*)worker_slot);

    /* block the signals for this thread only if we're not running as a
     * single process.
     */
    if (!one_process) {
        sigfillset(&sig_mask);
        sigprocmask(SIG_BLOCK, &sig_mask, NULL);
    }

    /* Each worker thread is fully in control of it's destinay and so
     * to allow each thread to handle the lifetime of it's own resources
     * we create and use a subcontext for every thread.
     * The subcontext is a child of the pconf pool.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pworker, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pworker);

    apr_pool_create(&ptrans, pworker);
    apr_pool_tag(ptrans, "transaction");

    ap_create_sb_handle(&sbh, pworker, 0, worker_slot);
    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* We add an extra socket here as we add the udp_sock we use for signalling
     * death. This gets added after the others.
     */
    apr_pollset_create(&pollset, num_listening_sockets + 1, pworker, 0);

    for (lr = ap_listeners, i = num_listening_sockets; i--; lr = lr->next) {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        apr_pollset_add(pollset, &pfd);
    }
    {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = udp_sock;
        pfd.reqevents = APR_POLLIN;

        apr_pollset_add(pollset, &pfd);
    }

    bucket_alloc = apr_bucket_alloc_create(pworker);

    mpm_state = AP_MPMQ_RUNNING;

        while (!this_worker_should_exit) {
        conn_rec *current_conn;
        void *csd;

        /* (Re)initialize this child to a pre-connection state. */
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_thread > 0
             && requests_this_child++ >= ap_max_requests_per_thread))
            clean_child_exit(0, worker_slot);

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        apr_thread_mutex_lock(accept_mutex);

        /* We always (presently) have at least 2 sockets we listen on, so
         * we don't have the ability for a fast path for a single socket
         * as some MPM's allow :(
         */
        for (;;) {
            apr_int32_t numdesc = 0;
            const apr_pollfd_t *pdesc = NULL;

            rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
            if (rv != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(rv)) {
                    if (one_process && shutdown_pending)
                        return;
                    continue;
                }
                ap_log_error(APLOG_MARK, APLOG_ERR, rv,
                             ap_server_conf, "apr_pollset_poll: (listen)");
                clean_child_exit(1, worker_slot);
            }
            /* We can always use pdesc[0], but sockets at position N
             * could end up completely starved of attention in a very
             * busy server. Therefore, we round-robin across the
             * returned set of descriptors. While it is possible that
             * the returned set of descriptors might flip around and
             * continue to starve some sockets, we happen to know the
             * internal pollset implementation retains ordering
             * stability of the sockets. Thus, the round-robin should
             * ensure that a socket will eventually be serviced.
             */
            if (last_poll_idx >= numdesc)
                last_poll_idx = 0;

            /* Grab a listener record from the client_data of the poll
             * descriptor, and advance our saved index to round-robin
             * the next fetch.
             *
             * ### hmm... this descriptor might have POLLERR rather
             * ### than POLLIN
             */

            lr = pdesc[last_poll_idx++].client_data;

            /* The only socket we add without client_data is the first, the UDP socket
             * we listen on for restart signals. If we've therefore gotten a hit on that
             * listener lr will be NULL here and we know we've been told to die.
             * Before we jump to the end of the while loop with this_worker_should_exit
             * set to 1 (causing us to exit normally we hope) we release the accept_mutex
             * as we want every thread to go through this same routine :)
             * Bit of a hack, but compared to what I had before...
             */
            if (lr == NULL) {
                this_worker_should_exit = 1;
                apr_thread_mutex_unlock(accept_mutex);
                goto got_a_black_spot;
            }
            goto got_fd;
        }
got_fd:
        /* Run beos_accept to accept the connection and set things up to
         * allow us to process it. We always release the accept_lock here,
         * even if we failt o accept as otherwise we'll starve other workers
         * which would be bad.
         */
        rv = beos_accept(&csd, lr, ptrans);
        apr_thread_mutex_unlock(accept_mutex);

        if (rv == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1, worker_slot);
        } else if (rv != APR_SUCCESS)
            continue;

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, worker_slot, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            this_worker_should_exit = 1;
        }
got_a_black_spot:
    }

    apr_pool_destroy(ptrans);
    apr_pool_destroy(pworker);

    clean_child_exit(0, worker_slot);
}

static int make_worker(int slot)
{
    thread_id tid;

    if (slot + 1 > ap_max_child_assigned)
            ap_max_child_assigned = slot + 1;

    (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL);

    if (one_process) {
        set_signals();
        ap_scoreboard_image->parent[0].pid = getpid();
        ap_scoreboard_image->servers[0][slot].tid = find_thread(NULL);
        return 0;
    }

    tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY,
                       (void *)slot);
    if (tid < B_NO_ERROR) {
        ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL,
            "spawn_thread: Unable to start a new thread");
        /* In case system resources are maxed out, we don't want
         * Apache running away with the CPU trying to fork over and
         * over and over again.
         */
        (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD,
                                                   (request_rec*)NULL);

        sleep(10);
        return -1;
    }
    resume_thread(tid);

    ap_scoreboard_image->servers[0][slot].tid = tid;
    return 0;
}
Ejemplo n.º 20
0
void status_to_name(lua_State *L, apr_status_t status)
{
  /* Use a switch statement for fast number to string mapping: */
  switch (status) {
#   ifdef APR_ANONYMOUS
    case APR_ANONYMOUS:
      lua_pushliteral(L, "ANONYMOUS");
      return;
#   endif
#   ifdef APR_BADARG
    case APR_BADARG:
      lua_pushliteral(L, "BADARG");
      return;
#   endif
#   ifdef APR_BADCH
    case APR_BADCH:
      lua_pushliteral(L, "BADCH");
      return;
#   endif
#   ifdef APR_DETACH
    case APR_DETACH:
      lua_pushliteral(L, "DETACH");
      return;
#   endif
#   ifdef APR_EABOVEROOT
    case APR_EABOVEROOT:
      lua_pushliteral(L, "EABOVEROOT");
      return;
#   endif
#   ifdef APR_EABSOLUTE
    case APR_EABSOLUTE:
      lua_pushliteral(L, "EABSOLUTE");
      return;
#   endif
#   ifdef APR_EACCES
    case APR_EACCES:
      lua_pushliteral(L, "EACCES");
      return;
#   endif
#   ifdef APR_EAFNOSUPPORT
    case APR_EAFNOSUPPORT:
      lua_pushliteral(L, "EAFNOSUPPORT");
      return;
#   endif
#   ifdef APR_EAGAIN
    case APR_EAGAIN:
      lua_pushliteral(L, "EAGAIN");
      return;
#   endif
#   ifdef APR_EBADDATE
    case APR_EBADDATE:
      lua_pushliteral(L, "EBADDATE");
      return;
#   endif
#   ifdef APR_EBADF
    case APR_EBADF:
      lua_pushliteral(L, "EBADF");
      return;
#   endif
#   ifdef APR_EBADIP
    case APR_EBADIP:
      lua_pushliteral(L, "EBADIP");
      return;
#   endif
#   ifdef APR_EBADMASK
    case APR_EBADMASK:
      lua_pushliteral(L, "EBADMASK");
      return;
#   endif
#   ifdef APR_EBADPATH
    case APR_EBADPATH:
      lua_pushliteral(L, "EBADPATH");
      return;
#   endif
#   ifdef APR_EBUSY
    case APR_EBUSY:
      lua_pushliteral(L, "EBUSY");
      return;
#   endif
#   ifdef APR_ECONNABORTED
    case APR_ECONNABORTED:
      lua_pushliteral(L, "ECONNABORTED");
      return;
#   endif
#   ifdef APR_ECONNREFUSED
    case APR_ECONNREFUSED:
      lua_pushliteral(L, "ECONNREFUSED");
      return;
#   endif
#   ifdef APR_ECONNRESET
    case APR_ECONNRESET:
      lua_pushliteral(L, "ECONNRESET");
      return;
#   endif
#   ifdef APR_EDSOOPEN
    case APR_EDSOOPEN:
      lua_pushliteral(L, "EDSOOPEN");
      return;
#   endif
#   ifdef APR_EEXIST
    case APR_EEXIST:
      lua_pushliteral(L, "EEXIST");
      return;
#   endif
#   ifdef APR_EFTYPE
    case APR_EFTYPE:
      lua_pushliteral(L, "EFTYPE");
      return;
#   endif
#   ifdef APR_EGENERAL
    case APR_EGENERAL:
      lua_pushliteral(L, "EGENERAL");
      return;
#   endif
#   ifdef APR_EHOSTUNREACH
    case APR_EHOSTUNREACH:
      lua_pushliteral(L, "EHOSTUNREACH");
      return;
#   endif
#   ifdef APR_EINCOMPLETE
    case APR_EINCOMPLETE:
      lua_pushliteral(L, "EINCOMPLETE");
      return;
#   endif
#   ifdef APR_EINIT
    case APR_EINIT:
      lua_pushliteral(L, "EINIT");
      return;
#   endif
#   ifdef APR_EINPROGRESS
    case APR_EINPROGRESS:
      lua_pushliteral(L, "EINPROGRESS");
      return;
#   endif
#   ifdef APR_EINTR
    case APR_EINTR:
      lua_pushliteral(L, "EINTR");
      return;
#   endif
#   ifdef APR_EINVAL
    case APR_EINVAL:
      lua_pushliteral(L, "EINVAL");
      return;
#   endif
#   ifdef APR_EINVALSOCK
    case APR_EINVALSOCK:
      lua_pushliteral(L, "EINVALSOCK");
      return;
#   endif
#   ifdef APR_EMFILE
    case APR_EMFILE:
      lua_pushliteral(L, "EMFILE");
      return;
#   endif
#   ifdef APR_EMISMATCH
    case APR_EMISMATCH:
      lua_pushliteral(L, "EMISMATCH");
      return;
#   endif
#   ifdef APR_ENAMETOOLONG
    case APR_ENAMETOOLONG:
      lua_pushliteral(L, "ENAMETOOLONG");
      return;
#   endif
#   ifdef APR_ENETUNREACH
    case APR_ENETUNREACH:
      lua_pushliteral(L, "ENETUNREACH");
      return;
#   endif
#   ifdef APR_ENFILE
    case APR_ENFILE:
      lua_pushliteral(L, "ENFILE");
      return;
#   endif
#   ifdef APR_ENODIR
    case APR_ENODIR:
      lua_pushliteral(L, "ENODIR");
      return;
#   endif
#   ifdef APR_ENOENT
    case APR_ENOENT:
      lua_pushliteral(L, "ENOENT");
      return;
#   endif
#   ifdef APR_ENOLOCK
    case APR_ENOLOCK:
      lua_pushliteral(L, "ENOLOCK");
      return;
#   endif
#   ifdef APR_ENOMEM
    case APR_ENOMEM:
      lua_pushliteral(L, "ENOMEM");
      return;
#   endif
#   ifdef APR_ENOPOLL
    case APR_ENOPOLL:
      lua_pushliteral(L, "ENOPOLL");
      return;
#   endif
#   ifdef APR_ENOPOOL
    case APR_ENOPOOL:
      lua_pushliteral(L, "ENOPOOL");
      return;
#   endif
#   ifdef APR_ENOPROC
    case APR_ENOPROC:
      lua_pushliteral(L, "ENOPROC");
      return;
#   endif
#   ifdef APR_ENOSHMAVAIL
    case APR_ENOSHMAVAIL:
      lua_pushliteral(L, "ENOSHMAVAIL");
      return;
#   endif
#   ifdef APR_ENOSOCKET
    case APR_ENOSOCKET:
      lua_pushliteral(L, "ENOSOCKET");
      return;
#   endif
#   ifdef APR_ENOSPC
    case APR_ENOSPC:
      lua_pushliteral(L, "ENOSPC");
      return;
#   endif
#   ifdef APR_ENOSTAT
    case APR_ENOSTAT:
      lua_pushliteral(L, "ENOSTAT");
      return;
#   endif
#   ifdef APR_ENOTDIR
    case APR_ENOTDIR:
      lua_pushliteral(L, "ENOTDIR");
      return;
#   endif
#   ifdef APR_ENOTEMPTY
    case APR_ENOTEMPTY:
      lua_pushliteral(L, "ENOTEMPTY");
      return;
#   endif
#   ifdef APR_ENOTENOUGHENTROPY
    case APR_ENOTENOUGHENTROPY:
      lua_pushliteral(L, "ENOTENOUGHENTROPY");
      return;
#   endif
#   ifdef APR_ENOTHDKEY
    case APR_ENOTHDKEY:
      lua_pushliteral(L, "ENOTHDKEY");
      return;
#   endif
#   ifdef APR_ENOTHREAD
    case APR_ENOTHREAD:
      lua_pushliteral(L, "ENOTHREAD");
      return;
#   endif
#   ifdef APR_ENOTIME
    case APR_ENOTIME:
      lua_pushliteral(L, "ENOTIME");
      return;
#   endif
#   ifdef APR_ENOTIMPL
    case APR_ENOTIMPL:
      lua_pushliteral(L, "ENOTIMPL");
      return;
#   endif
#   ifdef APR_ENOTSOCK
    case APR_ENOTSOCK:
      lua_pushliteral(L, "ENOTSOCK");
      return;
#   endif
#   ifdef APR_EOF
    case APR_EOF:
      lua_pushliteral(L, "EOF");
      return;
#   endif
#   ifdef APR_EPATHWILD
    case APR_EPATHWILD:
      lua_pushliteral(L, "EPATHWILD");
      return;
#   endif
#   ifdef APR_EPIPE
    case APR_EPIPE:
      lua_pushliteral(L, "EPIPE");
      return;
#   endif
#   ifdef APR_EPROC_UNKNOWN
    case APR_EPROC_UNKNOWN:
      lua_pushliteral(L, "EPROC_UNKNOWN");
      return;
#   endif
#   ifdef APR_ERELATIVE
    case APR_ERELATIVE:
      lua_pushliteral(L, "ERELATIVE");
      return;
#   endif
#   ifdef APR_ESPIPE
    case APR_ESPIPE:
      lua_pushliteral(L, "ESPIPE");
      return;
#   endif
#   ifdef APR_ESYMNOTFOUND
    case APR_ESYMNOTFOUND:
      lua_pushliteral(L, "ESYMNOTFOUND");
      return;
#   endif
#   ifdef APR_ETIMEDOUT
    case APR_ETIMEDOUT:
      lua_pushliteral(L, "ETIMEDOUT");
      return;
#   endif
#   ifdef APR_EXDEV
    case APR_EXDEV:
      lua_pushliteral(L, "EXDEV");
      return;
#   endif
#   ifdef APR_FILEBASED
    case APR_FILEBASED:
      lua_pushliteral(L, "FILEBASED");
      return;
#   endif
#   ifdef APR_INCHILD
    case APR_INCHILD:
      lua_pushliteral(L, "INCHILD");
      return;
#   endif
#   ifdef APR_INCOMPLETE
    case APR_INCOMPLETE:
      lua_pushliteral(L, "INCOMPLETE");
      return;
#   endif
#   ifdef APR_INPARENT
    case APR_INPARENT:
      lua_pushliteral(L, "INPARENT");
      return;
#   endif
#   ifdef APR_KEYBASED
    case APR_KEYBASED:
      lua_pushliteral(L, "KEYBASED");
      return;
#   endif
#   ifdef APR_NOTDETACH
    case APR_NOTDETACH:
      lua_pushliteral(L, "NOTDETACH");
      return;
#   endif
#   ifdef APR_NOTFOUND
    case APR_NOTFOUND:
      lua_pushliteral(L, "NOTFOUND");
      return;
#   endif
#   ifdef APR_SUCCESS
    case APR_SUCCESS:
      lua_pushliteral(L, "SUCCESS");
      return;
#   endif
#   ifdef APR_TIMEUP
    case APR_TIMEUP:
      lua_pushliteral(L, "TIMEUP");
      return;
#   endif
  }

  /* If the switch statement fails we fall back to the following monstrosity :-) */
  if (0) ;
# ifdef APR_STATUS_IS_ANONYMOUS
  else if (APR_STATUS_IS_ANONYMOUS(status)) {
    lua_pushliteral(L, "ANONYMOUS");
    return;
  }
# endif
# ifdef APR_STATUS_IS_BADARG
  else if (APR_STATUS_IS_BADARG(status)) {
    lua_pushliteral(L, "BADARG");
    return;
  }
# endif
# ifdef APR_STATUS_IS_BADCH
  else if (APR_STATUS_IS_BADCH(status)) {
    lua_pushliteral(L, "BADCH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_DETACH
  else if (APR_STATUS_IS_DETACH(status)) {
    lua_pushliteral(L, "DETACH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EABOVEROOT
  else if (APR_STATUS_IS_EABOVEROOT(status)) {
    lua_pushliteral(L, "EABOVEROOT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EABSOLUTE
  else if (APR_STATUS_IS_EABSOLUTE(status)) {
    lua_pushliteral(L, "EABSOLUTE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EACCES
  else if (APR_STATUS_IS_EACCES(status)) {
    lua_pushliteral(L, "EACCES");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EAFNOSUPPORT
  else if (APR_STATUS_IS_EAFNOSUPPORT(status)) {
    lua_pushliteral(L, "EAFNOSUPPORT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EAGAIN
  else if (APR_STATUS_IS_EAGAIN(status)) {
    lua_pushliteral(L, "EAGAIN");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EBADDATE
  else if (APR_STATUS_IS_EBADDATE(status)) {
    lua_pushliteral(L, "EBADDATE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EBADF
  else if (APR_STATUS_IS_EBADF(status)) {
    lua_pushliteral(L, "EBADF");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EBADIP
  else if (APR_STATUS_IS_EBADIP(status)) {
    lua_pushliteral(L, "EBADIP");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EBADMASK
  else if (APR_STATUS_IS_EBADMASK(status)) {
    lua_pushliteral(L, "EBADMASK");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EBADPATH
  else if (APR_STATUS_IS_EBADPATH(status)) {
    lua_pushliteral(L, "EBADPATH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EBUSY
  else if (APR_STATUS_IS_EBUSY(status)) {
    lua_pushliteral(L, "EBUSY");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ECONNABORTED
  else if (APR_STATUS_IS_ECONNABORTED(status)) {
    lua_pushliteral(L, "ECONNABORTED");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ECONNREFUSED
  else if (APR_STATUS_IS_ECONNREFUSED(status)) {
    lua_pushliteral(L, "ECONNREFUSED");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ECONNRESET
  else if (APR_STATUS_IS_ECONNRESET(status)) {
    lua_pushliteral(L, "ECONNRESET");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EDSOOPEN
  else if (APR_STATUS_IS_EDSOOPEN(status)) {
    lua_pushliteral(L, "EDSOOPEN");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EEXIST
  else if (APR_STATUS_IS_EEXIST(status)) {
    lua_pushliteral(L, "EEXIST");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EFTYPE
  else if (APR_STATUS_IS_EFTYPE(status)) {
    lua_pushliteral(L, "EFTYPE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EGENERAL
  else if (APR_STATUS_IS_EGENERAL(status)) {
    lua_pushliteral(L, "EGENERAL");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EHOSTUNREACH
  else if (APR_STATUS_IS_EHOSTUNREACH(status)) {
    lua_pushliteral(L, "EHOSTUNREACH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EINCOMPLETE
  else if (APR_STATUS_IS_EINCOMPLETE(status)) {
    lua_pushliteral(L, "EINCOMPLETE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EINIT
  else if (APR_STATUS_IS_EINIT(status)) {
    lua_pushliteral(L, "EINIT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EINPROGRESS
  else if (APR_STATUS_IS_EINPROGRESS(status)) {
    lua_pushliteral(L, "EINPROGRESS");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EINTR
  else if (APR_STATUS_IS_EINTR(status)) {
    lua_pushliteral(L, "EINTR");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EINVAL
  else if (APR_STATUS_IS_EINVAL(status)) {
    lua_pushliteral(L, "EINVAL");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EINVALSOCK
  else if (APR_STATUS_IS_EINVALSOCK(status)) {
    lua_pushliteral(L, "EINVALSOCK");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EMFILE
  else if (APR_STATUS_IS_EMFILE(status)) {
    lua_pushliteral(L, "EMFILE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EMISMATCH
  else if (APR_STATUS_IS_EMISMATCH(status)) {
    lua_pushliteral(L, "EMISMATCH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENAMETOOLONG
  else if (APR_STATUS_IS_ENAMETOOLONG(status)) {
    lua_pushliteral(L, "ENAMETOOLONG");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENETUNREACH
  else if (APR_STATUS_IS_ENETUNREACH(status)) {
    lua_pushliteral(L, "ENETUNREACH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENFILE
  else if (APR_STATUS_IS_ENFILE(status)) {
    lua_pushliteral(L, "ENFILE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENODIR
  else if (APR_STATUS_IS_ENODIR(status)) {
    lua_pushliteral(L, "ENODIR");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOENT
  else if (APR_STATUS_IS_ENOENT(status)) {
    lua_pushliteral(L, "ENOENT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOLOCK
  else if (APR_STATUS_IS_ENOLOCK(status)) {
    lua_pushliteral(L, "ENOLOCK");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOMEM
  else if (APR_STATUS_IS_ENOMEM(status)) {
    lua_pushliteral(L, "ENOMEM");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOPOLL
  else if (APR_STATUS_IS_ENOPOLL(status)) {
    lua_pushliteral(L, "ENOPOLL");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOPOOL
  else if (APR_STATUS_IS_ENOPOOL(status)) {
    lua_pushliteral(L, "ENOPOOL");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOPROC
  else if (APR_STATUS_IS_ENOPROC(status)) {
    lua_pushliteral(L, "ENOPROC");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOSHMAVAIL
  else if (APR_STATUS_IS_ENOSHMAVAIL(status)) {
    lua_pushliteral(L, "ENOSHMAVAIL");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOSOCKET
  else if (APR_STATUS_IS_ENOSOCKET(status)) {
    lua_pushliteral(L, "ENOSOCKET");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOSPC
  else if (APR_STATUS_IS_ENOSPC(status)) {
    lua_pushliteral(L, "ENOSPC");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOSTAT
  else if (APR_STATUS_IS_ENOSTAT(status)) {
    lua_pushliteral(L, "ENOSTAT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTDIR
  else if (APR_STATUS_IS_ENOTDIR(status)) {
    lua_pushliteral(L, "ENOTDIR");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTEMPTY
  else if (APR_STATUS_IS_ENOTEMPTY(status)) {
    lua_pushliteral(L, "ENOTEMPTY");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTENOUGHENTROPY
  else if (APR_STATUS_IS_ENOTENOUGHENTROPY(status)) {
    lua_pushliteral(L, "ENOTENOUGHENTROPY");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTHDKEY
  else if (APR_STATUS_IS_ENOTHDKEY(status)) {
    lua_pushliteral(L, "ENOTHDKEY");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTHREAD
  else if (APR_STATUS_IS_ENOTHREAD(status)) {
    lua_pushliteral(L, "ENOTHREAD");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTIME
  else if (APR_STATUS_IS_ENOTIME(status)) {
    lua_pushliteral(L, "ENOTIME");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTIMPL
  else if (APR_STATUS_IS_ENOTIMPL(status)) {
    lua_pushliteral(L, "ENOTIMPL");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ENOTSOCK
  else if (APR_STATUS_IS_ENOTSOCK(status)) {
    lua_pushliteral(L, "ENOTSOCK");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EOF
  else if (APR_STATUS_IS_EOF(status)) {
    lua_pushliteral(L, "EOF");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EPATHWILD
  else if (APR_STATUS_IS_EPATHWILD(status)) {
    lua_pushliteral(L, "EPATHWILD");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EPIPE
  else if (APR_STATUS_IS_EPIPE(status)) {
    lua_pushliteral(L, "EPIPE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EPROC_UNKNOWN
  else if (APR_STATUS_IS_EPROC_UNKNOWN(status)) {
    lua_pushliteral(L, "EPROC_UNKNOWN");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ERELATIVE
  else if (APR_STATUS_IS_ERELATIVE(status)) {
    lua_pushliteral(L, "ERELATIVE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ESPIPE
  else if (APR_STATUS_IS_ESPIPE(status)) {
    lua_pushliteral(L, "ESPIPE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ESYMNOTFOUND
  else if (APR_STATUS_IS_ESYMNOTFOUND(status)) {
    lua_pushliteral(L, "ESYMNOTFOUND");
    return;
  }
# endif
# ifdef APR_STATUS_IS_ETIMEDOUT
  else if (APR_STATUS_IS_ETIMEDOUT(status)) {
    lua_pushliteral(L, "ETIMEDOUT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_EXDEV
  else if (APR_STATUS_IS_EXDEV(status)) {
    lua_pushliteral(L, "EXDEV");
    return;
  }
# endif
# ifdef APR_STATUS_IS_FILEBASED
  else if (APR_STATUS_IS_FILEBASED(status)) {
    lua_pushliteral(L, "FILEBASED");
    return;
  }
# endif
# ifdef APR_STATUS_IS_INCHILD
  else if (APR_STATUS_IS_INCHILD(status)) {
    lua_pushliteral(L, "INCHILD");
    return;
  }
# endif
# ifdef APR_STATUS_IS_INCOMPLETE
  else if (APR_STATUS_IS_INCOMPLETE(status)) {
    lua_pushliteral(L, "INCOMPLETE");
    return;
  }
# endif
# ifdef APR_STATUS_IS_INPARENT
  else if (APR_STATUS_IS_INPARENT(status)) {
    lua_pushliteral(L, "INPARENT");
    return;
  }
# endif
# ifdef APR_STATUS_IS_KEYBASED
  else if (APR_STATUS_IS_KEYBASED(status)) {
    lua_pushliteral(L, "KEYBASED");
    return;
  }
# endif
# ifdef APR_STATUS_IS_NOTDETACH
  else if (APR_STATUS_IS_NOTDETACH(status)) {
    lua_pushliteral(L, "NOTDETACH");
    return;
  }
# endif
# ifdef APR_STATUS_IS_NOTFOUND
  else if (APR_STATUS_IS_NOTFOUND(status)) {
    lua_pushliteral(L, "NOTFOUND");
    return;
  }
# endif
# ifdef APR_STATUS_IS_TIMEUP
  else if (APR_STATUS_IS_TIMEUP(status)) {
    lua_pushliteral(L, "TIMEUP");
    return;
  }
# endif

  lua_pushinteger(L, status);
}
Ejemplo n.º 21
0
int main(int argc, const char *const *argv, const char *const *env)
{
	const char *db_filename = "last_message.db";
	int dying = 0;
	sqlite3 *sql = NULL;
	lmSQL lmdb = {};

	apr_socket_t *acc = NULL;
	apr_pollset_t *pollset = NULL;

	APR_DO_OR_DIE(apr_app_initialize(&argc, &argv, &env));
	atexit(&apr_terminate);

	apr_pool_t *pool;
	APR_DO_OR_DIE(apr_pool_create(&pool, NULL));

	if (argc > 2) {
		return print_usage();
	}
	if (argc == 2) {
		if (strcmp(argv[1], "--help") == 0) {
			return print_usage();
		}
		db_filename = argv[1];
	}
	int err;
	if ((err = sqlite3_open(db_filename, &sql)) != SQLITE_OK) {
		fprintf(stderr, "Can't open DB (%s): %s.\n", db_filename,
		        sqlite3_errstr(err));
		return 1;
	}

	int rc;
	char *rc_msg;
	const char *CREATE_MESSAGE_TABLES =
	    "CREATE TABLE IF NOT EXISTS messages ("
	    "  msgid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,"
	    "  name BLOB NOT NULL,"
	    "  left INTEGER NOT NULL,"
	    "  message BLOB NOT NULL"
	    ");";
	rc = sqlite3_exec(sql, CREATE_MESSAGE_TABLES, NULL, NULL, &rc_msg);
	if (rc != SQLITE_OK) {
		FAIL("Can't create 'messages' table: %s.\n", rc_msg);
		sqlite3_close(sql);
		return 1;
	}
	const char *CREATE_MESSAGE_SEEN = "CREATE TABLE IF NOT EXISTS seen ("
	                                  "  name BLOB PRIMARY KEY NOT NULL,"
	                                  "  msgid INTEGER  NOT NULL"
	                                  ");";
	rc = sqlite3_exec(sql, CREATE_MESSAGE_SEEN, NULL, NULL, &rc_msg);
	if (rc != SQLITE_OK) {
		FAIL("Can't create 'seen' table: %s.\n", rc_msg);
		sqlite3_close(sql);
		return 1;
	}

	lmdb.sql = sql;

#define LM_SQLITE_PREP(thing, statement)                                       \
	do {                                                                   \
		int prep =                                                     \
		    sqlite3_prepare_v2(sql, (statement), -1, (thing), NULL);   \
		if (prep != SQLITE_OK) {                                       \
			FAIL("SQL compilation error: (%s) while compiling "    \
			     "(%s).\n",                                        \
			     sqlite3_errmsg(sql), statement);                  \
			goto die;                                              \
		}                                                              \
	} while (0)

	LM_SQLITE_PREP(&lmdb.put, "INSERT INTO messages (name, left, message) "
	                          "VALUES (?, ?, ?);");
	LM_SQLITE_PREP(&lmdb.get, "SELECT msgid, left, message "
	                          "FROM messages "
	                          "WHERE name = ? "
	                          "ORDER BY msgid ASC;");
	LM_SQLITE_PREP(&lmdb.seen_add, "INSERT INTO seen (name, msgid) "
	                               "VALUES (?, ?);");
	LM_SQLITE_PREP(&lmdb.seen_del, "DELETE FROM seen "
	                               "WHERE name = ?;");
	LM_SQLITE_PREP(&lmdb.seen_get, "SELECT msgid FROM seen "
	                               "WHERE name = ?;");
	LM_SQLITE_PREP(&lmdb.drop, "DELETE FROM messages "
	                           "WHERE msgid <= ? "
	                           "  AND name = ?;");

	APR_DO_OR_DIE(apr_socket_create(&acc, APR_INET, SOCK_STREAM, 0, pool));
	APR_DO_OR_DIE(apr_socket_opt_set(acc, APR_SO_REUSEADDR, 1));
	apr_sockaddr_t *l_addr;
	APR_DO_OR_DIE(
	    apr_sockaddr_info_get(&l_addr, NULL, APR_INET, 1066, 0, pool));
	APR_DO_OR_DIE(apr_socket_bind(acc, l_addr));
	APR_DO_OR_DIE(apr_socket_listen(acc, 8));

	apr_pollfd_t apr_accept_desc;
	memset(&apr_accept_desc, 0, sizeof apr_accept_desc);
	apr_accept_desc.p = pool;
	apr_accept_desc.desc_type = APR_POLL_SOCKET;
	apr_accept_desc.desc.s = acc;
	apr_accept_desc.reqevents = APR_POLLIN;
	apr_accept_desc.client_data = NULL;

	APR_DO_OR_DIE(apr_pollset_create(&pollset, 256, pool, 0));
	APR_DO_OR_DIE(apr_pollset_add(pollset, &apr_accept_desc));

	apr_signal(SIGTERM, shutdown_on_signal);
	apr_signal(SIGINT, shutdown_on_signal);

	apr_int32_t signalled_len = 0;
	const apr_pollfd_t *signalled = NULL;
	apr_status_t poll_err = 0;
	for (;;) {
		if (global_shutting_down) {
			goto goodnight;
		}
		if (!APR_STATUS_IS_EINTR(poll_err)) {
			APR_DO_OR_DIE(poll_err);
		}
		for (apr_int32_t i = 0; i < signalled_len; i++) {
			const apr_pollfd_t *s = signalled + i;
			if (s->desc.s == acc) {
				DEBUG("accept\n");
				do_client_accept(acc, pollset, pool, &lmdb);
			} else {
				do_client_state_machine(s, pollset);
			}
		}
		poll_err =
		    apr_pollset_poll(pollset, -1, &signalled_len, &signalled);
	}

	if (0) {
	goodnight:
		fprintf(stderr, "Goodnight!\n");
	}

	if (0) {
	die:
		dying = 1;
	}

	sqlite3_finalize(lmdb.put);
	lmdb.put = 0;
	sqlite3_finalize(lmdb.get);
	lmdb.get = 0;
	sqlite3_finalize(lmdb.seen_add);
	lmdb.seen_add = 0;
	sqlite3_finalize(lmdb.seen_del);
	lmdb.seen_del = 0;
	sqlite3_finalize(lmdb.seen_get);
	lmdb.seen_get = 0;
	sqlite3_finalize(lmdb.drop);
	lmdb.drop = 0;

	if (sqlite3_close(sql) != SQLITE_OK) {
		fprintf(stderr, "Error closing DB (%s): %s.\n", db_filename,
		        sqlite3_errmsg(sql));
		return 1;
	}

	apr_pollset_destroy(pollset);
	apr_socket_close(acc);

	return dying;
}
Ejemplo n.º 22
0
static apr_status_t dispatch(proxy_conn_rec *conn, proxy_dir_conf *conf,
                             request_rec *r, apr_pool_t *setaside_pool,
                             apr_uint16_t request_id, const char **err,
                             int *bad_request, int *has_responded)
{
    apr_bucket_brigade *ib, *ob;
    int seen_end_of_headers = 0, done = 0, ignore_body = 0;
    apr_status_t rv = APR_SUCCESS;
    int script_error_status = HTTP_OK;
    conn_rec *c = r->connection;
    struct iovec vec[2];
    ap_fcgi_header header;
    unsigned char farray[AP_FCGI_HEADER_LEN];
    apr_pollfd_t pfd;
    int header_state = HDR_STATE_READING_HEADERS;
    char stack_iobuf[AP_IOBUFSIZE];
    apr_size_t iobuf_size = AP_IOBUFSIZE;
    char *iobuf = stack_iobuf;

    *err = NULL;
    if (conn->worker->s->io_buffer_size_set) {
        iobuf_size = conn->worker->s->io_buffer_size;
        iobuf = apr_palloc(r->pool, iobuf_size);
    }

    pfd.desc_type = APR_POLL_SOCKET;
    pfd.desc.s = conn->sock;
    pfd.p = r->pool;
    pfd.reqevents = APR_POLLIN | APR_POLLOUT;

    ib = apr_brigade_create(r->pool, c->bucket_alloc);
    ob = apr_brigade_create(r->pool, c->bucket_alloc);

    while (! done) {
        apr_interval_time_t timeout;
        apr_size_t len;
        int n;

        /* We need SOME kind of timeout here, or virtually anything will
         * cause timeout errors. */
        apr_socket_timeout_get(conn->sock, &timeout);

        rv = apr_poll(&pfd, 1, &n, timeout);
        if (rv != APR_SUCCESS) {
            if (APR_STATUS_IS_EINTR(rv)) {
                continue;
            }
            *err = "polling";
            break;
        }

        if (pfd.rtnevents & APR_POLLOUT) {
            apr_size_t to_send, writebuflen;
            int last_stdin = 0;
            char *iobuf_cursor;

            rv = ap_get_brigade(r->input_filters, ib,
                                AP_MODE_READBYTES, APR_BLOCK_READ,
                                iobuf_size);
            if (rv != APR_SUCCESS) {
                *err = "reading input brigade";
                *bad_request = 1;
                break;
            }

            if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(ib))) {
                last_stdin = 1;
            }

            writebuflen = iobuf_size;

            rv = apr_brigade_flatten(ib, iobuf, &writebuflen);

            apr_brigade_cleanup(ib);

            if (rv != APR_SUCCESS) {
                *err = "flattening brigade";
                break;
            }

            to_send = writebuflen;
            iobuf_cursor = iobuf;
            while (to_send > 0) {
                int nvec = 0;
                apr_size_t write_this_time;

                write_this_time =
                    to_send < AP_FCGI_MAX_CONTENT_LEN ? to_send : AP_FCGI_MAX_CONTENT_LEN;

                ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id,
                                       (apr_uint16_t)write_this_time, 0);
                ap_fcgi_header_to_array(&header, farray);

                vec[nvec].iov_base = (void *)farray;
                vec[nvec].iov_len = sizeof(farray);
                ++nvec;
                if (writebuflen) {
                    vec[nvec].iov_base = iobuf_cursor;
                    vec[nvec].iov_len = write_this_time;
                    ++nvec;
                }

                rv = send_data(conn, vec, nvec, &len);
                if (rv != APR_SUCCESS) {
                    *err = "sending stdin";
                    break;
                }

                to_send -= write_this_time;
                iobuf_cursor += write_this_time;
            }
            if (rv != APR_SUCCESS) {
                break;
            }

            if (last_stdin) {
                pfd.reqevents = APR_POLLIN; /* Done with input data */

                /* signal EOF (empty FCGI_STDIN) */
                ap_fcgi_fill_in_header(&header, AP_FCGI_STDIN, request_id,
                                       0, 0);
                ap_fcgi_header_to_array(&header, farray);

                vec[0].iov_base = (void *)farray;
                vec[0].iov_len = sizeof(farray);

                rv = send_data(conn, vec, 1, &len);
                if (rv != APR_SUCCESS) {
                    *err = "sending empty stdin";
                    break;
                }
            }
        }

        if (pfd.rtnevents & APR_POLLIN) {
            apr_size_t readbuflen;
            apr_uint16_t clen, rid;
            apr_bucket *b;
            unsigned char plen;
            unsigned char type, version;

            /* First, we grab the header... */
            rv = get_data_full(conn, (char *) farray, AP_FCGI_HEADER_LEN);
            if (rv != APR_SUCCESS) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01067)
                              "Failed to read FastCGI header");
                break;
            }

            ap_log_rdata(APLOG_MARK, APLOG_TRACE8, r, "FastCGI header",
                         farray, AP_FCGI_HEADER_LEN, 0);

            ap_fcgi_header_fields_from_array(&version, &type, &rid,
                                             &clen, &plen, farray);

            if (version != AP_FCGI_VERSION_1) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01068)
                              "Got bogus version %d", (int)version);
                rv = APR_EINVAL;
                break;
            }

            if (rid != request_id) {
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01069)
                              "Got bogus rid %d, expected %d",
                              rid, request_id);
                rv = APR_EINVAL;
                break;
            }

recv_again:
            if (clen > iobuf_size) {
                readbuflen = iobuf_size;
            } else {
                readbuflen = clen;
            }

            /* Now get the actual data.  Yes it sucks to do this in a second
             * recv call, this will eventually change when we move to real
             * nonblocking recv calls. */
            if (readbuflen != 0) {
                rv = get_data(conn, iobuf, &readbuflen);
                if (rv != APR_SUCCESS) {
                    *err = "reading response body";
                    break;
                }
            }

            switch (type) {
            case AP_FCGI_STDOUT:
                if (clen != 0) {
                    b = apr_bucket_transient_create(iobuf,
                                                    readbuflen,
                                                    c->bucket_alloc);

                    APR_BRIGADE_INSERT_TAIL(ob, b);

                    if (! seen_end_of_headers) {
                        int st = handle_headers(r, &header_state,
                                                iobuf, readbuflen);

                        if (st == 1) {
                            int status;
                            seen_end_of_headers = 1;

                            status = ap_scan_script_header_err_brigade_ex(r, ob,
                                NULL, APLOG_MODULE_INDEX);
                            /* suck in all the rest */
                            if (status != OK) {
                                apr_bucket *tmp_b;
                                apr_brigade_cleanup(ob);
                                tmp_b = apr_bucket_eos_create(c->bucket_alloc);
                                APR_BRIGADE_INSERT_TAIL(ob, tmp_b);

                                *has_responded = 1;
                                r->status = status;
                                rv = ap_pass_brigade(r->output_filters, ob);
                                if (rv != APR_SUCCESS) {
                                    *err = "passing headers brigade to output filters";
                                }
                                else if (status == HTTP_NOT_MODIFIED) {
                                    /* The 304 response MUST NOT contain
                                     * a message-body, ignore it. */
                                    ignore_body = 1;
                                }
                                else {
                                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01070)
                                                    "Error parsing script headers");
                                    rv = APR_EINVAL;
                                }
                                break;
                            }

                            if (conf->error_override &&
                                ap_is_HTTP_ERROR(r->status)) {
                                /*
                                 * set script_error_status to discard
                                 * everything after the headers
                                 */
                                script_error_status = r->status;
                                /*
                                 * prevent ap_die() from treating this as a
                                 * recursive error, initially:
                                 */
                                r->status = HTTP_OK;
                            }

                            if (script_error_status == HTTP_OK
                                && !APR_BRIGADE_EMPTY(ob) && !ignore_body) {
                                /* Send the part of the body that we read while
                                 * reading the headers.
                                 */
                                *has_responded = 1;
                                rv = ap_pass_brigade(r->output_filters, ob);
                                if (rv != APR_SUCCESS) {
                                    *err = "passing brigade to output filters";
                                    break;
                                }
                            }
                            apr_brigade_cleanup(ob);

                            apr_pool_clear(setaside_pool);
                        }
                        else {
                            /* We're still looking for the end of the
                             * headers, so this part of the data will need
                             * to persist. */
                            apr_bucket_setaside(b, setaside_pool);
                        }
                    } else {
                        /* we've already passed along the headers, so now pass
                         * through the content.  we could simply continue to
                         * setaside the content and not pass until we see the
                         * 0 content-length (below, where we append the EOS),
                         * but that could be a huge amount of data; so we pass
                         * along smaller chunks
                         */
                        if (script_error_status == HTTP_OK && !ignore_body) {
                            *has_responded = 1;
                            rv = ap_pass_brigade(r->output_filters, ob);
                            if (rv != APR_SUCCESS) {
                                *err = "passing brigade to output filters";
                                break;
                            }
                        }
                        apr_brigade_cleanup(ob);
                    }

                    /* If we didn't read all the data, go back and get the
                     * rest of it. */
                    if (clen > readbuflen) {
                        clen -= readbuflen;
                        goto recv_again;
                    }
                } else {
                    /* XXX what if we haven't seen end of the headers yet? */

                    if (script_error_status == HTTP_OK) {
                        b = apr_bucket_eos_create(c->bucket_alloc);
                        APR_BRIGADE_INSERT_TAIL(ob, b);

                        *has_responded = 1;
                        rv = ap_pass_brigade(r->output_filters, ob);
                        if (rv != APR_SUCCESS) {
                            *err = "passing brigade to output filters";
                            break;
                        }
                    }

                    /* XXX Why don't we cleanup here?  (logic from AJP) */
                }
                break;

            case AP_FCGI_STDERR:
                /* TODO: Should probably clean up this logging a bit... */
                if (clen) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01071)
                                  "Got error '%.*s'", (int)readbuflen, iobuf);
                }

                if (clen > readbuflen) {
                    clen -= readbuflen;
                    goto recv_again;
                }
                break;

            case AP_FCGI_END_REQUEST:
                done = 1;
                break;

            default:
                ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(01072)
                              "Got bogus record %d", type);
                break;
            }
            /* Leave on above switch's inner error. */
            if (rv != APR_SUCCESS) {
                break;
            }

            if (plen) {
                rv = get_data_full(conn, iobuf, plen);
                if (rv != APR_SUCCESS) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02537)
                                  "Error occurred reading padding");
                    break;
                }
            }
        }
    }

    apr_brigade_destroy(ib);
    apr_brigade_destroy(ob);

    if (script_error_status != HTTP_OK) {
        ap_die(script_error_status, r); /* send ErrorDocument */
        *has_responded = 1;
    }

    return rv;
}
Ejemplo n.º 23
0
lt_http_status_t lt_http_server_run( lt_http_server_t * server )
{
    apr_pool_t * pool = NULL;
    apr_socket_t * client = NULL;
    char error[1025];
    memset( error, 0, 1025 );

    if( server == NULL ) return LT_HTTP_INVALID_ARG;

    /* prepare connection pool */
    apr_pool_create( &pool, server->pool );

    /* make the socket non-blocking */
    if( APR_SUCCESS != apr_socket_opt_set( server->socket,
                    APR_SO_NONBLOCK, 1 ) ) {
        my_perror( "ERROR: apr_socket_opt_set failed with: " );
        return LT_HTTP_INVALID_ARG;
    }

    while( 1 ) {
        apr_status_t rv;

        /* bool reading should be atomic operation so no locking is needed */
        if( server->stoprequested ) {
            break;
        }

        /* clear pool memory */
        apr_pool_clear( pool );

        /* accept new connection */
        rv = apr_socket_accept( &client, server->socket, pool );

        if( APR_STATUS_IS_EAGAIN( rv ) || APR_STATUS_IS_EINTR( rv ) ) {
            /* sleep for 100ms before accepting new client */
            apr_sleep( 100 * 1000 );
            continue;
        }

        if( APR_SUCCESS != rv ) {
            my_perror( "ERROR: apr_socket_accept failed with: " );
            continue;
        }

        /* determine client address */
        {
            apr_sockaddr_t * sa = NULL;
            char * ip = NULL;
            if( APR_SUCCESS != apr_socket_addr_get( &sa,
                            APR_REMOTE, client ) ) {
                my_perror( "ERROR: apr_socket_addr_get failed with: " );
                apr_socket_close( client );
                continue;
            }
            if( APR_SUCCESS != apr_sockaddr_ip_get( &ip, sa ) ) {
                my_perror( "ERROR: apr_sockaddr_ip_get failed with: " );
                apr_socket_close( client );
                continue;
            }
        }


        /* immediatelly start sending HTTP response headers */
        {
            char * headers = apr_pstrcat( pool,
                    "HTTP/1.0 200 OK\r\n"
                    "Content-Length: ",
                    apr_ltoa( pool, server->finfo.size ),
                    "\r\n",
                    "Content-Type: application/octet-stream;"
                    " charset=utf-8\r\n",
                    "Connection: Close\r\n",
                    "\r\n",
                    NULL );
            apr_size_t headers_size = strlen( headers );
            if( APR_SUCCESS != apr_socket_send(
                            client, headers, &headers_size ) ) {
                my_perror( "ERROR: apr_socket_send failed with: " );
                apr_socket_close( client );
                continue;
            }
        }

        /* send file contents */
        {
            apr_off_t offset = 0;
            apr_size_t len = server->finfo.size;

            if( APR_SUCCESS != apr_socket_sendfile(
                            client, server->file, NULL,
                            &offset, &len, 0 ) ) {
                my_perror( "ERROR: apr_socket_sendfile failed with: " );
                apr_socket_close( client );
                continue;
            }
        }

        /* read and discard all headers */
        {
            apr_status_t rv;

            /* set non-block option on client socket */
            if( APR_SUCCESS != apr_socket_timeout_set( client,
                            2 * 1000 * 1000 ) ) {
                my_perror( "ERROR: apr_socket_timeout_set failed with: " );
                apr_socket_close( client );
                continue;
            }

            /* read all data until 2 sec timeout or eof, then proceed to */
            /* close */
            do {
                char buffer[1024];
                apr_size_t len = 1024;
                rv = apr_socket_recv( client, buffer, &len );

                if( APR_STATUS_IS_TIMEUP( rv ) || APR_STATUS_IS_EOF( rv ) ) {
                    break;
                }
            } while( 1 );
        }

        /* close our side of connection */
        if( APR_SUCCESS !=
                    apr_socket_shutdown( client, APR_SHUTDOWN_WRITE ) ) {
            /* we actually don't care about errors arriving during shutdown
             * phase
             * my_perror( "ERROR: apr_socket_shutdown(WRITE) failed with: " );
             */
            apr_socket_close( client );
            continue;
        }

        /* close other side of connection */
        if( APR_SUCCESS !=
                    apr_socket_shutdown( client, APR_SHUTDOWN_READ ) ) {
            /* we actually don't care about errors arriving during shutdown
             * phase
             * my_perror( "ERROR: apr_socket_shutdown(READ) failed with: " );
             */
            apr_socket_close( client );
            continue;
        }

        /* close socket */
        if( APR_SUCCESS != apr_socket_close( client ) ) {
            /* we actually don't care about errors arriving during shutdown
             * phase
             * my_perror( "ERROR: apr_socket_close failed with: " );
             */
            continue;
        }
    }

    return LT_HTTP_SUCCESS;
}
Ejemplo n.º 24
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_status_t status;
    int i;
    ap_listen_rec *lr;
    apr_pollset_t *pollset;
    ap_sb_handle_t *sbh;
    apr_bucket_alloc_t *bucket_alloc;
    int last_poll_idx = 0;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't initialize cross-process lock in child "
                     "(%s) (%d)", ap_lock_fname, ap_accept_lock_mech);
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    status = apr_pollset_create(&pollset, num_listensocks, pchild, 0);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't create pollset in child; check system or user limits");
        clean_child_exit(APEXIT_CHILDSICK); /* assume temporary resource issue */
    }

    for (lr = ap_listeners, i = num_listensocks; i--; lr = lr->next) {
        apr_pollfd_t pfd = { 0 };

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, &pfd);
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    /* die_now is set when AP_SIG_GRACEFUL is received in the child;
     * shutdown_pending is set when SIGTERM is received when running
     * in single process mode.  */
    while (!die_now && !shutdown_pending) {
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
            for (;;) {
                apr_int32_t numdesc;
                const apr_pollfd_t *pdesc;

                /* check for termination first so we don't sleep for a while in
                 * poll if already signalled
                 */
                if (one_process && shutdown_pending) {
                    SAFE_ACCEPT(accept_mutex_off());
                    return;
                }
                else if (die_now) {
                    /* In graceful stop/restart; drop the mutex
                     * and terminate the child. */
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(0);
                }
                /* timeout == 10 seconds to avoid a hang at graceful restart/stop
                 * caused by the closing of sockets by the signal handler
                 */
                status = apr_pollset_poll(pollset, apr_time_from_sec(10), 
                                          &numdesc, &pdesc);
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_TIMEUP(status) ||
                        APR_STATUS_IS_EINTR(status)) {
                        continue;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    ap_log_error(APLOG_MARK, APLOG_ERR, status,
                                 ap_server_conf, "apr_pollset_poll: (listen)");
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = pdesc[last_poll_idx++].client_data;
                goto got_fd;
            }
        }
    got_fd:
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Ejemplo n.º 25
0
void lfd_listen(apr_pool_t * mp)
{
	apr_pool_t		* thd_pool = NULL;
	apr_socket_t		* listen_sock;
	apr_socket_t		* client_sock;
	apr_thread_t *thd;
	apr_threadattr_t	* thattr;
	apr_pollfd_t		  pfd;
	apr_interval_time_t	  timeout = lfd_config_max_acceptloop_timeout;
	apr_int32_t		  nsds;
	apr_status_t		  rc;

	create_listen_socket(&listen_sock, mp);
	if(NULL == listen_sock)
	{
		lfd_log(LFD_ERROR, "lfd_listen: could not create listen socket");
		return;
	}

	rc = apr_threadattr_create(&thattr, mp);
	if(APR_SUCCESS != rc)
	{
		lfd_log_apr_err(rc, "apr_threadattr_create failed");
		return;
	}
	while(1)
	{
		//###: Should I allocate the pool as a subpool of the root pool?
		//What is the amount allocated per pool and is it freed when the child pool is destroyed?
		//rc = apr_pool_create(&thd_pool, mp);
		if(NULL == thd_pool)
		{
			rc = apr_pool_create(&thd_pool, NULL);
			if(APR_SUCCESS != rc)
			{
				lfd_log_apr_err(rc, "apr_pool_create of thd_pool failed");
				continue;
			}
		}

		create_pollfd_from_socket(&pfd, listen_sock, mp);

		rc = apr_poll(&pfd, 1, &nsds, timeout);
		if((APR_SUCCESS != rc) && (!APR_STATUS_IS_TIMEUP(rc)) && (!APR_STATUS_IS_EINTR(rc)))
		{
			//break - an unrecoverable error occured
			lfd_log_apr_err(rc, "apr_poll failed");
			break;
		}

		if(apr_atomic_read32(&ftp_must_exit))
		{
			//if the flag says we must exit, we comply, so bye bye!
			return;
		}
		if(APR_STATUS_IS_TIMEUP(rc) || APR_STATUS_IS_EINTR(rc) || (APR_POLLIN != pfd.rtnevents))
		{
			continue;
		}


		rc = apr_socket_accept(&client_sock, listen_sock, thd_pool);
		if(APR_SUCCESS != rc)
		{
			//###: For which errorcode must we break out of the loop?
			lfd_log_apr_err(rc, "apr_socket_accept failed");
			if(APR_STATUS_IS_EAGAIN(rc))
			{
				lfd_log(LFD_ERROR, "lfd_listen: APR_STATUS_IS_EAGAIN");
			}
			continue;
		}
		rc = apr_thread_create(&thd, thattr, &lfd_worker_protocol_main, (void*)client_sock, thd_pool);
		if(APR_SUCCESS != rc)
		{
			lfd_log_apr_err(rc, "apr_thread_create failed");
			apr_socket_close(client_sock);
			continue;
		}
		thd_pool = NULL;
	}
}
Ejemplo n.º 26
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    conn_rec *current_conn;
    apr_status_t status = APR_EINIT;
    int i;
    ap_listen_rec *lr;
    int curr_pollfd, last_pollfd = 0;
    apr_pollfd_t *pollset;
    int offset;
    void *csd;
    ap_sb_handle_t *sbh;
    apr_status_t rv;
    apr_bucket_alloc_t *bucket_alloc;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */
    
    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    csd = NULL;
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
                     "Couldn't initialize cross-process lock in child");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
	clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    listensocks = apr_pcalloc(pchild,
                            sizeof(*listensocks) * (num_listensocks));
    for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) {
        listensocks[i].accept_func = lr->accept_func;
        listensocks[i].sd = lr->sd;
    }

    pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks);
    pollset[0].p = pchild;
    for (i = 0; i < num_listensocks; i++) {
        pollset[i].desc.s = listensocks[i].sd;
        pollset[i].desc_type = APR_POLL_SOCKET;
        pollset[i].reqevents = APR_POLLIN;
    }

    mpm_state = AP_MPMQ_RUNNING;
    
    bucket_alloc = apr_bucket_alloc_create(pchild);

    while (!die_now) {
	/*
	 * (Re)initialize this child to a pre-connection state.
	 */

	current_conn = NULL;

	apr_pool_clear(ptrans);

	if ((ap_max_requests_per_child > 0
	     && requests_this_child++ >= ap_max_requests_per_child)) {
	    clean_child_exit(0);
	}

	(void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

	/*
	 * Wait for an acceptable connection to arrive.
	 */

	/* Lock around "accept", if necessary */
	SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            offset = 0;
        }
        else {
            /* multiple listening sockets - need to poll */
	    for (;;) {
                apr_status_t ret;
                apr_int32_t n;

                ret = apr_poll(pollset, num_listensocks, &n, -1);
                if (ret != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(ret)) {
                        continue;
                    }
    	            /* Single Unix documents select as returning errnos
    	             * EBADF, EINTR, and EINVAL... and in none of those
    	             * cases does it make sense to continue.  In fact
    	             * on Linux 2.0.x we seem to end up with EFAULT
    	             * occasionally, and we'd loop forever due to it.
    	             */
    	            ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf,
                             "apr_poll: (listen)");
    	            clean_child_exit(1);
                }
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;
                    if (curr_pollfd >= num_listensocks) {
                        curr_pollfd = 0;
                    }
                    /* XXX: Should we check for POLLERR? */
                    if (pollset[curr_pollfd].rtnevents & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        offset = curr_pollfd;
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);

                continue;
            }
        }
    got_fd:
	/* if we accept() something we don't want to die, so we have to
	 * defer the exit
	 */
        status = listensocks[offset].accept_func(&csd, 
                                                 &listensocks[offset], ptrans);
        SAFE_ACCEPT(accept_mutex_off());	/* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

	/*
	 * We now have a connection, so set it up with the appropriate
	 * socket options, file descriptors, and read/write buffers.
	 */

	current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        
        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Ejemplo n.º 27
0
AP_DECLARE(apr_status_t) ap_unixd_accept(void **accepted, ap_listen_rec *lr,
                                         apr_pool_t *ptrans)
{
    apr_socket_t *csd;
    apr_status_t status;
#ifdef _OSD_POSIX
    int sockdes;
#endif

    *accepted = NULL;
    status = apr_socket_accept(&csd, lr->sd, ptrans);
    if (status == APR_SUCCESS) {
        *accepted = csd;
#ifdef _OSD_POSIX
        apr_os_sock_get(&sockdes, csd);
        if (sockdes >= FD_SETSIZE) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf, APLOGNO(02176)
                         "new file descriptor %d is too large; you probably need "
                         "to rebuild Apache with a larger FD_SETSIZE "
                         "(currently %d)",
                         sockdes, FD_SETSIZE);
            apr_socket_close(csd);
            return APR_EINTR;
        }
#endif
        return APR_SUCCESS;
    }

    if (APR_STATUS_IS_EINTR(status)) {
        return status;
    }
    /* Our old behaviour here was to continue after accept()
     * errors.  But this leads us into lots of troubles
     * because most of the errors are quite fatal.  For
     * example, EMFILE can be caused by slow descriptor
     * leaks (say in a 3rd party module, or libc).  It's
     * foolish for us to continue after an EMFILE.  We also
     * seem to tickle kernel bugs on some platforms which
     * lead to never-ending loops here.  So it seems best
     * to just exit in most cases.
     */
    switch (status) {
#if defined(HPUX11) && defined(ENOBUFS)
        /* On HPUX 11.x, the 'ENOBUFS, No buffer space available'
         * error occurs because the accept() cannot complete.
         * You will not see ENOBUFS with 10.20 because the kernel
         * hides any occurrence from being returned to user space.
         * ENOBUFS with 11.x's TCP/IP stack is possible, and could
         * occur intermittently. As a work-around, we are going to
         * ignore ENOBUFS.
         */
        case ENOBUFS:
#endif

#ifdef EPROTO
        /* EPROTO on certain older kernels really means
         * ECONNABORTED, so we need to ignore it for them.
         * See discussion in new-httpd archives nh.9701
         * search for EPROTO.
         *
         * Also see nh.9603, search for EPROTO:
         * There is potentially a bug in Solaris 2.x x<6,
         * and other boxes that implement tcp sockets in
         * userland (i.e. on top of STREAMS).  On these
         * systems, EPROTO can actually result in a fatal
         * loop.  See PR#981 for example.  It's hard to
         * handle both uses of EPROTO.
         */
        case EPROTO:
#endif
#ifdef ECONNABORTED
        case ECONNABORTED:
#endif
        /* Linux generates the rest of these, other tcp
         * stacks (i.e. bsd) tend to hide them behind
         * getsockopt() interfaces.  They occur when
         * the net goes sour or the client disconnects
         * after the three-way handshake has been done
         * in the kernel but before userland has picked
         * up the socket.
         */
#ifdef ECONNRESET
        case ECONNRESET:
#endif
#ifdef ETIMEDOUT
        case ETIMEDOUT:
#endif
#ifdef EHOSTUNREACH
        case EHOSTUNREACH:
#endif
#ifdef ENETUNREACH
        case ENETUNREACH:
#endif
        /* EAGAIN/EWOULDBLOCK can be returned on BSD-derived
         * TCP stacks when the connection is aborted before
         * we call connect, but only because our listener
         * sockets are non-blocking (AP_NONBLOCK_WHEN_MULTI_LISTEN)
         */
#ifdef EAGAIN
        case EAGAIN:
#endif
#ifdef EWOULDBLOCK
#if !defined(EAGAIN) || EAGAIN != EWOULDBLOCK
        case EWOULDBLOCK:
#endif
#endif
            break;
#ifdef ENETDOWN
        case ENETDOWN:
            /*
             * When the network layer has been shut down, there
             * is not much use in simply exiting: the parent
             * would simply re-create us (and we'd fail again).
             * Use the CHILDFATAL code to tear the server down.
             * @@@ Martin's idea for possible improvement:
             * A different approach would be to define
             * a new APEXIT_NETDOWN exit code, the reception
             * of which would make the parent shutdown all
             * children, then idle-loop until it detected that
             * the network is up again, and restart the children.
             * Ben Hyde noted that temporary ENETDOWN situations
             * occur in mobile IP.
             */
            ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, APLOGNO(02177)
                         "apr_socket_accept: giving up.");
            return APR_EGENERAL;
#endif /*ENETDOWN*/

        default:
            /* If the socket has been closed in ap_close_listeners()
             * by the restart/stop action, we may get EBADF.
             * Do not print an error in this case.
             */
            if (!lr->active) {
                ap_log_error(APLOG_MARK, APLOG_DEBUG, status, ap_server_conf, APLOGNO(02178)
                             "apr_socket_accept failed for inactive listener");
                return status;
            }
            ap_log_error(APLOG_MARK, APLOG_ERR, status, ap_server_conf, APLOGNO(02179)
                         "apr_socket_accept: (client socket)");
            return APR_EGENERAL;
    }
    return status;
}
/*
 * process the request and write the response.
 */
static int proxy_wstunnel_request(apr_pool_t *p, request_rec *r,
                                proxy_conn_rec *conn,
                                proxy_worker *worker,
                                proxy_server_conf *conf,
                                apr_uri_t *uri,
                                char *url, char *server_portstr)
{
    apr_status_t rv;
    apr_pollset_t *pollset;
    apr_pollfd_t pollfd;
    const apr_pollfd_t *signalled;
    apr_int32_t pollcnt, pi;
    apr_int16_t pollevent;
    conn_rec *c = r->connection;
    apr_socket_t *sock = conn->sock;
    conn_rec *backconn = conn->connection;
    char *buf;
    apr_bucket_brigade *header_brigade;
    apr_bucket *e;
    char *old_cl_val = NULL;
    char *old_te_val = NULL;
    apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
    apr_socket_t *client_socket = ap_get_conn_socket(c);
    int done = 0, replied = 0;

    header_brigade = apr_brigade_create(p, backconn->bucket_alloc);

    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "sending request");

    rv = ap_proxy_create_hdrbrgd(p, header_brigade, r, conn,
                                 worker, conf, uri, url, server_portstr,
                                 &old_cl_val, &old_te_val);
    if (rv != OK) {
        return rv;
    }

    buf = apr_pstrdup(p, "Upgrade: WebSocket" CRLF "Connection: Upgrade" CRLF CRLF);
    ap_xlate_proto_to_ascii(buf, strlen(buf));
    e = apr_bucket_pool_create(buf, strlen(buf), p, c->bucket_alloc);
    APR_BRIGADE_INSERT_TAIL(header_brigade, e);

    if ((rv = ap_proxy_pass_brigade(backconn->bucket_alloc, r, conn, backconn,
                                    header_brigade, 1)) != OK)
        return rv;

    apr_brigade_cleanup(header_brigade);

    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");

    if ((rv = apr_pollset_create(&pollset, 2, p, 0)) != APR_SUCCESS) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02443)
                      "error apr_pollset_create()");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

#if 0
    apr_socket_opt_set(sock, APR_SO_NONBLOCK, 1);
    apr_socket_opt_set(sock, APR_SO_KEEPALIVE, 1);
    apr_socket_opt_set(client_socket, APR_SO_NONBLOCK, 1);
    apr_socket_opt_set(client_socket, APR_SO_KEEPALIVE, 1);
#endif

    pollfd.p = p;
    pollfd.desc_type = APR_POLL_SOCKET;
    pollfd.reqevents = APR_POLLIN | APR_POLLHUP;
    pollfd.desc.s = sock;
    pollfd.client_data = NULL;
    apr_pollset_add(pollset, &pollfd);

    pollfd.desc.s = client_socket;
    apr_pollset_add(pollset, &pollfd);

    ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");

    r->output_filters = c->output_filters;
    r->proto_output_filters = c->output_filters;
    r->input_filters = c->input_filters;
    r->proto_input_filters = c->input_filters;

    /* This handler should take care of the entire connection; make it so that
     * nothing else is attempted on the connection after returning. */
    c->keepalive = AP_CONN_CLOSE;

    do { /* Loop until done (one side closes the connection, or an error) */
        rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled);
        if (rv != APR_SUCCESS) {
            if (APR_STATUS_IS_EINTR(rv)) {
                continue;
            }
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02444) "error apr_poll()");
            return HTTP_INTERNAL_SERVER_ERROR;
        }
        ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(02445)
                      "woke from poll(), i=%d", pollcnt);

        for (pi = 0; pi < pollcnt; pi++) {
            const apr_pollfd_t *cur = &signalled[pi];

            if (cur->desc.s == sock) {
                pollevent = cur->rtnevents;
                if (pollevent & (APR_POLLIN | APR_POLLHUP)) {
                    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(02446)
                                  "sock was readable");
                    done |= ap_proxy_transfer_between_connections(r, backconn,
                                                                  c,
                                                                  header_brigade,
                                                                  bb, "sock",
                                                                  NULL,
                                                                  AP_IOBUFSIZE,
                                                                  0)
                                                                 != APR_SUCCESS;
                }
                else if (pollevent & APR_POLLERR) {
                    ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02447)
                            "error on backconn");
                    backconn->aborted = 1;
                    done = 1;
                }
                else { 
                    ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02605)
                            "unknown event on backconn %d", pollevent);
                    done = 1;
                }
            }
            else if (cur->desc.s == client_socket) {
                pollevent = cur->rtnevents;
                if (pollevent & (APR_POLLIN | APR_POLLHUP)) {
                    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(02448)
                                  "client was readable");
                    done |= ap_proxy_transfer_between_connections(r, c,
                                                                  backconn, bb,
                                                                  header_brigade,
                                                                  "client",
                                                                  &replied,
                                                                  AP_IOBUFSIZE,
                                                                  0)
                                                                 != APR_SUCCESS;
                }
                else if (pollevent & APR_POLLERR) {
                    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, APLOGNO(02607)
                            "error on client conn");
                    c->aborted = 1;
                    done = 1;
                }
                else { 
                    ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(02606)
                            "unknown event on client conn %d", pollevent);
                    done = 1;
                }
            }
            else {
                ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02449)
                              "unknown socket in pollset");
                done = 1;
            }

        }
    } while (!done);

    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
                  "finished with poll() - cleaning up");

    if (!replied) {
        return HTTP_BAD_GATEWAY;
    }
    else {
        return OK;
    }

    return OK;
}
Ejemplo n.º 29
0
void ap_mpm_child_main(apr_pool_t *pconf)
{
    ap_listen_rec *lr = NULL;
    int requests_this_child = 0;
    int rv = 0;
    unsigned long ulTimes;
    int my_pid = getpid();
    ULONG rc, c;
    HQUEUE workq;
    apr_pollset_t *pollset;
    int num_listeners;
    TID server_maint_tid;
    void *sb_mem;

    /* Stop Ctrl-C/Ctrl-Break signals going to child processes */
    DosSetSignalExceptionFocus(0, &ulTimes);
    set_signals();

    /* Create pool for child */
    apr_pool_create(&pchild, pconf);

    ap_run_child_init(pchild, ap_server_conf);

    /* Create an event semaphore used to trigger other threads to shutdown */
    rc = DosCreateEventSem(NULL, &shutdown_event, 0, FALSE);

    if (rc) {
        ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf,
                     "unable to create shutdown semaphore, exiting");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    /* Gain access to the scoreboard. */
    rc = DosGetNamedSharedMem(&sb_mem, ap_scoreboard_fname,
                              PAG_READ|PAG_WRITE);

    if (rc) {
        ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf,
                     "scoreboard not readable in child, exiting");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_calc_scoreboard_size();
    ap_init_scoreboard(sb_mem);

    /* Gain access to the accpet mutex */
    rc = DosOpenMutexSem(NULL, &ap_mpm_accept_mutex);

    if (rc) {
        ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf,
                     "accept mutex couldn't be accessed in child, exiting");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    /* Find our pid in the scoreboard so we know what slot our parent allocated us */
    for (child_slot = 0; ap_scoreboard_image->parent[child_slot].pid != my_pid && child_slot < HARD_SERVER_LIMIT; child_slot++);

    if (child_slot == HARD_SERVER_LIMIT) {
        ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf,
                     "child pid not found in scoreboard, exiting");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_my_generation = ap_scoreboard_image->parent[child_slot].generation;
    memset(ap_scoreboard_image->servers[child_slot], 0, sizeof(worker_score) * HARD_THREAD_LIMIT);

    /* Set up an OS/2 queue for passing connections & termination requests
     * to worker threads
     */
    rc = DosCreateQueue(&workq, QUE_FIFO, apr_psprintf(pchild, "/queues/httpd/work.%d", my_pid));

    if (rc) {
        ap_log_error(APLOG_MARK, APLOG_ERR, APR_FROM_OS_ERROR(rc), ap_server_conf,
                     "unable to create work queue, exiting");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    /* Create initial pool of worker threads */
    for (c = 0; c < ap_min_spare_threads; c++) {
//        ap_scoreboard_image->servers[child_slot][c].tid = _beginthread(worker_main, NULL, 128*1024, (void *)c);
    }

    /* Start maintenance thread */
    server_maint_tid = _beginthread(server_maintenance, NULL, 32768, NULL);

    /* Set up poll */
    for (num_listeners = 0, lr = ap_listeners; lr; lr = lr->next) {
        num_listeners++;
    }

    apr_pollset_create(&pollset, num_listeners, pchild, 0);

    for (lr = ap_listeners; lr != NULL; lr = lr->next) {
        apr_pollfd_t pfd = { 0 };

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;
        apr_pollset_add(pollset, &pfd);
    }

    /* Main connection accept loop */
    do {
        apr_pool_t *pconn;
        worker_args_t *worker_args;
        int last_poll_idx = 0;

        apr_pool_create(&pconn, pchild);
        worker_args = apr_palloc(pconn, sizeof(worker_args_t));
        worker_args->pconn = pconn;

        if (num_listeners == 1) {
            rv = apr_socket_accept(&worker_args->conn_sd, ap_listeners->sd, pconn);
        } else {
            const apr_pollfd_t *poll_results;
            apr_int32_t num_poll_results;

            rc = DosRequestMutexSem(ap_mpm_accept_mutex, SEM_INDEFINITE_WAIT);

            if (shutdown_pending) {
                DosReleaseMutexSem(ap_mpm_accept_mutex);
                break;
            }

            rv = APR_FROM_OS_ERROR(rc);

            if (rv == APR_SUCCESS) {
                rv = apr_pollset_poll(pollset, -1, &num_poll_results, &poll_results);
                DosReleaseMutexSem(ap_mpm_accept_mutex);
            }

            if (rv == APR_SUCCESS) {
                if (last_poll_idx >= num_listeners) {
                    last_poll_idx = 0;
                }

                lr = poll_results[last_poll_idx++].client_data;
                rv = apr_socket_accept(&worker_args->conn_sd, lr->sd, pconn);
                last_poll_idx++;
            }
        }

        if (rv != APR_SUCCESS) {
            if (!APR_STATUS_IS_EINTR(rv)) {
                ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
                             "apr_socket_accept");
                clean_child_exit(APEXIT_CHILDFATAL);
            }
        } else {
            DosWriteQueue(workq, WORKTYPE_CONN, sizeof(worker_args_t), worker_args, 0);
            requests_this_child++;
        }

        if (ap_max_requests_per_child != 0 && requests_this_child >= ap_max_requests_per_child)
            break;
    } while (!shutdown_pending && ap_my_generation == ap_scoreboard_image->global->running_generation);

    ap_scoreboard_image->parent[child_slot].quiescing = 1;
    DosPostEventSem(shutdown_event);
    DosWaitThread(&server_maint_tid, DCWW_WAIT);

    if (is_graceful) {
        char someleft;

        /* tell our worker threads to exit */
        for (c=0; c<HARD_THREAD_LIMIT; c++) {
            if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) {
                DosWriteQueue(workq, WORKTYPE_EXIT, 0, NULL, 0);
            }
        }

        do {
            someleft = 0;

            for (c=0; c<HARD_THREAD_LIMIT; c++) {
                if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) {
                    someleft = 1;
                    DosSleep(1000);
                    break;
                }
            }
        } while (someleft);
    } else {
        DosPurgeQueue(workq);

        for (c=0; c<HARD_THREAD_LIMIT; c++) {
            if (ap_scoreboard_image->servers[child_slot][c].status != SERVER_DEAD) {
                DosKillThread(ap_scoreboard_image->servers[child_slot][c].tid);
            }
        }
    }

    apr_pool_destroy(pchild);
}
Ejemplo n.º 30
0
/* CONNECT handler */
static int proxy_connect_handler(request_rec *r, proxy_worker *worker,
                                 proxy_server_conf *conf,
                                 char *url, const char *proxyname,
                                 apr_port_t proxyport)
{
    connect_conf *c_conf =
        ap_get_module_config(r->server->module_config, &proxy_connect_module);

    apr_pool_t *p = r->pool;
    apr_socket_t *sock;
    conn_rec *c = r->connection;
    conn_rec *backconn;

    apr_bucket_brigade *bb = apr_brigade_create(p, c->bucket_alloc);
    apr_status_t rv;
    apr_size_t nbytes;
    char buffer[HUGE_STRING_LEN];
    apr_socket_t *client_socket = ap_get_conn_socket(c);
    int failed, rc;
    int client_error = 0;
    apr_pollset_t *pollset;
    apr_pollfd_t pollfd;
    const apr_pollfd_t *signalled;
    apr_int32_t pollcnt, pi;
    apr_int16_t pollevent;
    apr_sockaddr_t *nexthop;

    apr_uri_t uri;
    const char *connectname;
    int connectport = 0;

    /* is this for us? */
    if (r->method_number != M_CONNECT) {
        ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "declining URL %s", url);
        return DECLINED;
    }
    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "serving URL %s", url);


    /*
     * Step One: Determine Who To Connect To
     *
     * Break up the URL to determine the host to connect to
     */

    /* we break the URL into host, port, uri */
    if (APR_SUCCESS != apr_uri_parse_hostinfo(p, url, &uri)) {
        return ap_proxyerror(r, HTTP_BAD_REQUEST,
                             apr_pstrcat(p, "URI cannot be parsed: ", url,
                                         NULL));
    }

    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01019)
                  "connecting %s to %s:%d", url, uri.hostname, uri.port);

    /* Determine host/port of next hop; from request URI or of a proxy. */
    connectname = proxyname ? proxyname : uri.hostname;
    connectport = proxyname ? proxyport : uri.port;

    /* Do a DNS lookup for the next hop */
    rv = apr_sockaddr_info_get(&nexthop, connectname, APR_UNSPEC, 
                               connectport, 0, p);
    if (rv != APR_SUCCESS) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(02327)
                      "failed to resolve hostname '%s'", connectname);
        return ap_proxyerror(r, HTTP_BAD_GATEWAY,
                             apr_pstrcat(p, "DNS lookup failure for: ",
                                         connectname, NULL));
    }

    /* Check ProxyBlock directive on the hostname/address.  */
    if (ap_proxy_checkproxyblock2(r, conf, uri.hostname, 
                                 proxyname ? NULL : nexthop) != OK) {
        return ap_proxyerror(r, HTTP_FORBIDDEN,
                             "Connect to remote machine blocked");
    }

    ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
                  "connecting to remote proxy %s on port %d",
                  connectname, connectport);

    /* Check if it is an allowed port */
    if(!allowed_port(c_conf, uri.port)) {
              return ap_proxyerror(r, HTTP_FORBIDDEN,
                                   "Connect to remote machine blocked");
    }

    /*
     * Step Two: Make the Connection
     *
     * We have determined who to connect to. Now make the connection.
     */

    /*
     * At this point we have a list of one or more IP addresses of
     * the machine to connect to. If configured, reorder this
     * list so that the "best candidate" is first try. "best
     * candidate" could mean the least loaded server, the fastest
     * responding server, whatever.
     *
     * For now we do nothing, ie we get DNS round robin.
     * XXX FIXME
     */
    failed = ap_proxy_connect_to_backend(&sock, "CONNECT", nexthop,
                                         connectname, conf, r);

    /* handle a permanent error from the above loop */
    if (failed) {
        if (proxyname) {
            return DECLINED;
        }
        else {
            return HTTP_SERVICE_UNAVAILABLE;
        }
    }

    /* setup polling for connection */
    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");

    if ((rv = apr_pollset_create(&pollset, 2, r->pool, 0)) != APR_SUCCESS) {
        apr_socket_close(sock);
        ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01020)
                      "error apr_pollset_create()");
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    /* Add client side to the poll */
    pollfd.p = r->pool;
    pollfd.desc_type = APR_POLL_SOCKET;
    pollfd.reqevents = APR_POLLIN;
    pollfd.desc.s = client_socket;
    pollfd.client_data = NULL;
    apr_pollset_add(pollset, &pollfd);

    /* Add the server side to the poll */
    pollfd.desc.s = sock;
    apr_pollset_add(pollset, &pollfd);

    /*
     * Step Three: Send the Request
     *
     * Send the HTTP/1.1 CONNECT request to the remote server
     */

    backconn = ap_run_create_connection(c->pool, r->server, sock,
                                        c->id, c->sbh, c->bucket_alloc);
    if (!backconn) {
        /* peer reset */
        ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01021)
                      "an error occurred creating a new connection "
                      "to %pI (%s)", nexthop, connectname);
        apr_socket_close(sock);
        return HTTP_INTERNAL_SERVER_ERROR;
    }
    ap_proxy_ssl_disable(backconn);
    rc = ap_run_pre_connection(backconn, sock);
    if (rc != OK && rc != DONE) {
        backconn->aborted = 1;
        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01022)
                      "pre_connection setup failed (%d)", rc);
        return HTTP_INTERNAL_SERVER_ERROR;
    }

    ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
                  "connection complete to %pI (%s)",
                  nexthop, connectname);
    apr_table_setn(r->notes, "proxy-source-port", apr_psprintf(r->pool, "%hu",
                   backconn->local_addr->port));

    /* If we are connecting through a remote proxy, we need to pass
     * the CONNECT request on to it.
     */
    if (proxyport) {
    /* FIXME: Error checking ignored.
     */
        ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
                      "sending the CONNECT request to the remote proxy");
        ap_fprintf(backconn->output_filters, bb,
                   "CONNECT %s HTTP/1.0" CRLF, r->uri);
        ap_fprintf(backconn->output_filters, bb,
                   "Proxy-agent: %s" CRLF CRLF, ap_get_server_banner());
        ap_fflush(backconn->output_filters, bb);
    }
    else {
        ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Returning 200 OK");
        nbytes = apr_snprintf(buffer, sizeof(buffer),
                              "HTTP/1.0 200 Connection Established" CRLF);
        ap_xlate_proto_to_ascii(buffer, nbytes);
        ap_fwrite(c->output_filters, bb, buffer, nbytes);
        nbytes = apr_snprintf(buffer, sizeof(buffer),
                              "Proxy-agent: %s" CRLF CRLF,
                              ap_get_server_banner());
        ap_xlate_proto_to_ascii(buffer, nbytes);
        ap_fwrite(c->output_filters, bb, buffer, nbytes);
        ap_fflush(c->output_filters, bb);
#if 0
        /* This is safer code, but it doesn't work yet.  I'm leaving it
         * here so that I can fix it later.
         */
        r->status = HTTP_OK;
        r->header_only = 1;
        apr_table_set(r->headers_out, "Proxy-agent: %s", ap_get_server_banner());
        ap_rflush(r);
#endif
    }

    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r, "setting up poll()");

    /*
     * Step Four: Handle Data Transfer
     *
     * Handle two way transfer of data over the socket (this is a tunnel).
     */

    /* we are now acting as a tunnel - the input/output filter stacks should
     * not contain any non-connection filters.
     */
    r->output_filters = c->output_filters;
    r->proto_output_filters = c->output_filters;
    r->input_filters = c->input_filters;
    r->proto_input_filters = c->input_filters;
/*    r->sent_bodyct = 1;*/

    while (1) { /* Infinite loop until error (one side closes the connection) */
        if ((rv = apr_pollset_poll(pollset, -1, &pollcnt, &signalled))
            != APR_SUCCESS) {
            if (APR_STATUS_IS_EINTR(rv)) {
                continue;
            }
            apr_socket_close(sock);
            ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, APLOGNO(01023) "error apr_poll()");
            return HTTP_INTERNAL_SERVER_ERROR;
        }
#ifdef DEBUGGING
        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01024)
                      "woke from poll(), i=%d", pollcnt);
#endif

        for (pi = 0; pi < pollcnt; pi++) {
            const apr_pollfd_t *cur = &signalled[pi];

            if (cur->desc.s == sock) {
                pollevent = cur->rtnevents;
                if (pollevent & APR_POLLIN) {
#ifdef DEBUGGING
                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01025)
                                  "sock was readable");
#endif
                    rv = proxy_connect_transfer(r, backconn, c, bb, "sock");
                    }
                else if ((pollevent & APR_POLLERR)
                         || (pollevent & APR_POLLHUP)) {
                         rv = APR_EPIPE;
                         ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, APLOGNO(01026)
                                       "err/hup on backconn");
                }
                if (rv != APR_SUCCESS)
                    client_error = 1;
            }
            else if (cur->desc.s == client_socket) {
                pollevent = cur->rtnevents;
                if (pollevent & APR_POLLIN) {
#ifdef DEBUGGING
                    ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(01027)
                                  "client was readable");
#endif
                    rv = proxy_connect_transfer(r, c, backconn, bb, "client");
                }
            }
            else {
                rv = APR_EBADF;
                ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(01028)
                              "unknown socket in pollset");
            }

        }
        if (rv != APR_SUCCESS) {
            break;
        }
    }

    ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
                  "finished with poll() - cleaning up");

    /*
     * Step Five: Clean Up
     *
     * Close the socket and clean up
     */

    if (client_error)
        apr_socket_close(sock);
    else
        ap_lingering_close(backconn);

    c->aborted = 1;
    c->keepalive = AP_CONN_CLOSE;

    return OK;
}