Example #1
0
static apr_pool_t *
init(const char *application)
{
  apr_allocator_t *allocator;
  apr_pool_t *pool;
  svn_error_t *err;
  const svn_version_checklist_t checklist[] = {
    {"svn_client", svn_client_version},
    {"svn_subr", svn_subr_version},
    {"svn_ra", svn_ra_version},
    {NULL, NULL}
  };

  SVN_VERSION_DEFINE(my_version);

  if (svn_cmdline_init(application, stderr)
      || apr_allocator_create(&allocator))
    exit(EXIT_FAILURE);

  err = svn_ver_check_list(&my_version, checklist);
  if (err)
    handle_error(err, NULL);

  apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);
  pool = svn_pool_create_ex(NULL, allocator);
  apr_allocator_owner_set(allocator, pool);

  return pool;
}
Example #2
0
static PCOMP_CONTEXT win9x_get_connection(PCOMP_CONTEXT context)
{
    apr_os_sock_info_t sockinfo;
    int len, salen;
#if APR_HAVE_IPV6
    salen = sizeof(struct sockaddr_in6);
#else
    salen = sizeof(struct sockaddr_in);
#endif


    if (context == NULL) {
        /* allocate the completion context and the transaction pool */
        apr_allocator_t *allocator;
        apr_thread_mutex_lock(child_lock);
        context = apr_pcalloc(pchild, sizeof(COMP_CONTEXT));
        apr_allocator_create(&allocator);
        apr_allocator_max_free_set(allocator, ap_max_mem_free);
        apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator);
        apr_allocator_owner_set(allocator, context->ptrans);
        apr_pool_tag(context->ptrans, "transaction");
        apr_thread_mutex_unlock(child_lock);
    }

    while (1) {
        apr_pool_clear(context->ptrans);
        context->ba = apr_bucket_alloc_create(context->ptrans);
        context->accept_socket = remove_job();
        if (context->accept_socket == INVALID_SOCKET) {
            return NULL;
        }
        len = salen;
        context->sa_server = apr_palloc(context->ptrans, len);
        if (getsockname(context->accept_socket,
                        context->sa_server, &len)== SOCKET_ERROR) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "getsockname failed");
            continue;
        }
        len = salen;
        context->sa_client = apr_palloc(context->ptrans, len);
        if ((getpeername(context->accept_socket,
                         context->sa_client, &len)) == SOCKET_ERROR) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "getpeername failed");
            memset(&context->sa_client, '\0', sizeof(context->sa_client));
        }
        sockinfo.os_sock = &context->accept_socket;
        sockinfo.local   = context->sa_server;
        sockinfo.remote  = context->sa_client;
        sockinfo.family  = context->sa_server->sa_family;
        sockinfo.type    = SOCK_STREAM;
        apr_os_sock_make(&context->sock, &sockinfo, context->ptrans);

        return context;
    }
}
Example #3
0
static int
init_function (svn_client_ctx_t **ctx, apr_pool_t **pool, lua_State *L) {
	apr_allocator_t *allocator;
	svn_auth_baton_t *ab;
	svn_config_t *cfg;
	svn_error_t *err;

	if (svn_cmdline_init("svn", stderr) != EXIT_SUCCESS) {
		return send_error (L, "Error initializing svn\n");
	}

	if (apr_allocator_create(&allocator)) {
		return send_error (L, "Error creating allocator\n");
	}

	apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);

  	*pool = svn_pool_create_ex(NULL, allocator);
	apr_allocator_owner_set(allocator, *pool);

  	err = svn_ra_initialize(*pool);
	IF_ERROR_RETURN (err, *pool, L);

	err = svn_client_create_context (ctx, *pool);
	IF_ERROR_RETURN (err, *pool, L);

	err = svn_config_get_config(&((*ctx)->config),	NULL, *pool);
	IF_ERROR_RETURN (err, *pool, L);

	cfg = apr_hash_get((*ctx)->config, SVN_CONFIG_CATEGORY_CONFIG,
			APR_HASH_KEY_STRING);


	err = svn_cmdline_setup_auth_baton(&ab,
			FALSE,
			NULL,
			NULL,
			NULL,
			FALSE,
			cfg,
			(*ctx)->cancel_func,
			(*ctx)->cancel_baton,
			*pool);
	IF_ERROR_RETURN (err, *pool, L);

	(*ctx)->auth_baton = ab;

	return 0;
}
Example #4
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf,
                        apr_interval_time_t stream_timeout,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        m->q = h2_tq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS));
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->stream_timeout = stream_timeout;
        m->workers = workers;
        
        m->tx_handles_reserved = 0;
        m->tx_chunk_size = 4;
    }
    return m;
}
Example #5
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    h2_config *conf = h2_config_get(c);
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        apr_atomic_set32(&m->refs, 1);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        
        m->q = h2_tq_create(m->id, m->pool);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->closed = h2_stream_set_create(m->pool);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->workers = workers;
        
        m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
    }
    return m;
}
Example #6
0
APT_DECLARE(apr_pool_t*) apt_pool_create()
{
	apr_pool_t *pool = NULL;

#ifdef OWN_ALLOCATOR_PER_POOL
	apr_allocator_t *allocator = NULL;
	apr_thread_mutex_t *mutex = NULL;

	if(apr_allocator_create(&allocator) == APR_SUCCESS) {
		if(apr_pool_create_ex(&pool,NULL,NULL,allocator) == APR_SUCCESS) {
			apr_allocator_owner_set(allocator,pool);
			apr_thread_mutex_create(&mutex,APR_THREAD_MUTEX_NESTED,pool);
			apr_allocator_mutex_set(allocator,mutex);
			apr_pool_mutex_set(pool,mutex); 
		}
	}
#else
	apr_pool_create(&pool,NULL);
#endif
	return pool;
}
Example #7
0
apr_pool_t *nx_pool_create_core()
{
    apr_pool_t *pool;
    apr_allocator_t *allocator;
    apr_thread_mutex_t *mutex;

    CHECKERR(apr_allocator_create(&allocator));
    
#ifdef HAVE_APR_POOL_CREATE_UNMANAGED_EX
    // older apr does not have this
    CHECKERR(apr_pool_create_unmanaged_ex(&pool, &abort_func, allocator)); 
#else
    CHECKERR(apr_pool_create_ex(&pool, NULL, &abort_func, allocator)); 
#endif
    allocator = apr_pool_allocator_get(pool);
    apr_allocator_owner_set(allocator, pool);
    ASSERT(allocator != NULL);
    apr_allocator_max_free_set(allocator, NX_MAX_ALLOCATOR_SIZE);
    CHECKERR(apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_UNNESTED, pool));
    apr_allocator_mutex_set(allocator, mutex);

    return ( pool );
}
//static void child_main(int child_num_arg)
void body()
{

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(allocator); //// removed deref
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(pchild, pconf, NULL, allocator); //// removed deref
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(ptrans, pchild); //// removed deref
    apr_pool_tag(ptrans, 65); // "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(accept_mutex, ap_lock_fname, pchild); //// removed deref
    if (status != APR_SUCCESS) {
        /* ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, */
        /*              "Couldnt initialize crossprocess lock in child " */
        /*              "%s %d", ap_lock_fname, ap_accept_lock_mech); */
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child() > 0) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(sbh, pchild, my_child_num, 0); //// removed deref

    ap_update_child_status(sbh, SERVER_READY, NULL);

    /* Set up the pollfd array */
    /* ### check the status */
    (void) apr_pollset_create(pollset, num_listensocks, pchild, 0); //// removed deref

    num_listensocks = nondet(); assume(num_listensocks>0);

    lr = ap_listeners;
    i = num_listensocks; 
    while (1) {
      if ( i<=0 ) break; 
        int pfd = 0;

        pfd_desc_type = APR_POLL_SOCKET;
        pfd_desc_s = 1; // lr->sd;
        pfd_reqevents = APR_POLLIN;
        pfd_client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, pfd); //// removed deref
	i--;
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    while(1>0) {
      if (die_now>0) break;
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());
	do_ACCEPT=1; do_ACCEPT=0;

	dummy = nondet();
	if(dummy > 0) {
          /* goto loc_return; */
          while(1>0) { int ddd; ddd=ddd; }
        }

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
	  while(1) {
	      int numdesc;
                const void *pdesc;

                /* timeout == -1 == wait forever */
                status = apr_pollset_poll(pollset, -1, numdesc, pdesc); //// removed deref
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(status) > 0) {
                        if (one_process>0 && shutdown_pending>0) {
			  /* goto loc_return; */
                          while(1>0) { int ddd; ddd=ddd; }
                        }
                        goto loc_continueA;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    /* ap_log_error5(APLOG_MARK, APLOG_ERR, status, */
                    /*              ap_server_conf, "apr_pollset_poll: (listen)"); */
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = 1; //pdesc[last_poll_idx++].client_data;
		break;

	    loc_continueA: {int yyy2; yyy2=yyy2; }
            }
        }
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = nondet(); // lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
	  goto loc_continueB;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn > 0) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
	dummy = nondet();
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation != dummy) {
	  //ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    loc_continueB: { int uuu; uuu=uuu; }
    }
    clean_child_exit(0);
 /* loc_return: */
    while(1>0) { int ddd; ddd=ddd; }
}
Example #9
0
static winnt_conn_ctx_t *mpm_get_completion_context(int *timeout)
{
    apr_status_t rv;
    winnt_conn_ctx_t *context = NULL;

    *timeout = 0;
    while (1) {
        /* Grab a context off the queue */
        apr_thread_mutex_lock(qlock);
        if (qhead) {
            context = qhead;
            qhead = qhead->next;
            if (!qhead)
                qtail = NULL;
        } else {
            ResetEvent(qwait_event);
        }
        apr_thread_mutex_unlock(qlock);

        if (!context) {
            /* We failed to grab a context off the queue, consider allocating
             * a new one out of the child pool. There may be up to
             * (ap_threads_per_child + num_listeners) contexts in the system
             * at once.
             */
            if (num_completion_contexts >= max_num_completion_contexts) {
                /* All workers are busy, need to wait for one */
                static int reported = 0;
                if (!reported) {
                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00326)
                                 "Server ran out of threads to serve "
                                 "requests. Consider raising the "
                                 "ThreadsPerChild setting");
                    reported = 1;
                }

                /* Wait for a worker to free a context. Once per second, give
                 * the caller a chance to check for shutdown. If the wait
                 * succeeds, get the context off the queue. It must be
                 * available, since there's only one consumer.
                 */
                rv = WaitForSingleObject(qwait_event, 1000);
                if (rv == WAIT_OBJECT_0)
                    continue;
                else {
                    if (rv == WAIT_TIMEOUT) {
                        /* somewhat-normal condition where threads are busy */
                        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00327)
                                     "mpm_get_completion_context: Failed to get a "
                                     "free context within 1 second");
                        *timeout = 1;
                    }
                    else {
                        /* should be the unexpected, generic WAIT_FAILED */
                        ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(),
                                     ap_server_conf, APLOGNO(00328)
                                     "mpm_get_completion_context: "
                                     "WaitForSingleObject failed to get free context");
                    }
                    return NULL;
                }
            } else {
                /* Allocate another context.
                 * Note: Multiple failures in the next two steps will cause
                 * the pchild pool to 'leak' storage. I don't think this
                 * is worth fixing...
                 */
                apr_allocator_t *allocator;

                apr_thread_mutex_lock(child_lock);
                context = (winnt_conn_ctx_t *)apr_pcalloc(pchild,
                                                     sizeof(winnt_conn_ctx_t));


                context->overlapped.hEvent = CreateEvent(NULL, TRUE,
                                                         FALSE, NULL);
                if (context->overlapped.hEvent == NULL) {
                    /* Hopefully this is a temporary condition ... */
                    ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(),
                                 ap_server_conf, APLOGNO(00329)
                                 "mpm_get_completion_context: "
                                 "CreateEvent failed.");

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }

                /* Create the transaction pool */
                apr_allocator_create(&allocator);
                apr_allocator_max_free_set(allocator, ap_max_mem_free);
                rv = apr_pool_create_ex(&context->ptrans, pchild, NULL,
                                        allocator);
                if (rv != APR_SUCCESS) {
                    ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(00330)
                                 "mpm_get_completion_context: Failed "
                                 "to create the transaction pool.");
                    CloseHandle(context->overlapped.hEvent);

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }
                apr_allocator_owner_set(allocator, context->ptrans);
                apr_pool_tag(context->ptrans, "transaction");

                context->accept_socket = INVALID_SOCKET;
                context->ba = apr_bucket_alloc_create(context->ptrans);
                apr_atomic_inc32(&num_completion_contexts);

                apr_thread_mutex_unlock(child_lock);
                break;
            }
        } else {
            /* Got a context from the queue */
            break;
        }
    }

    return context;
}
Example #10
0
switch_memory_pool_t *switch_core_memory_init(void)
{
#ifndef INSTANTLY_DESTROY_POOLS
	switch_threadattr_t *thd_attr;
#endif
#ifdef PER_POOL_LOCK
	apr_allocator_t *my_allocator = NULL;
	apr_thread_mutex_t *my_mutex;
#endif

	memset(&memory_manager, 0, sizeof(memory_manager));

#ifdef PER_POOL_LOCK
	if ((apr_allocator_create(&my_allocator)) != APR_SUCCESS) {
		abort();
	}

	if ((apr_pool_create_ex(&memory_manager.memory_pool, NULL, NULL, my_allocator)) != APR_SUCCESS) {
		apr_allocator_destroy(my_allocator);
		my_allocator = NULL;
		abort();
	}

	if ((apr_thread_mutex_create(&my_mutex, APR_THREAD_MUTEX_NESTED, memory_manager.memory_pool)) != APR_SUCCESS) {
		abort();
	}

	apr_allocator_mutex_set(my_allocator, my_mutex);
	apr_pool_mutex_set(memory_manager.memory_pool, my_mutex);
	apr_allocator_owner_set(my_allocator, memory_manager.memory_pool);
	apr_pool_tag(memory_manager.memory_pool, "core_pool");
#else
	apr_pool_create(&memory_manager.memory_pool, NULL);
	switch_assert(memory_manager.memory_pool != NULL);
#endif

#ifdef USE_MEM_LOCK
	switch_mutex_init(&memory_manager.mem_lock, SWITCH_MUTEX_NESTED, memory_manager.memory_pool);
#endif

#ifdef INSTANTLY_DESTROY_POOLS
	{
		void *foo;
		foo = (void *) (intptr_t) pool_thread;
	}
#else

	switch_queue_create(&memory_manager.pool_queue, 50000, memory_manager.memory_pool);
	switch_queue_create(&memory_manager.pool_recycle_queue, 50000, memory_manager.memory_pool);

	switch_threadattr_create(&thd_attr, memory_manager.memory_pool);

	switch_threadattr_stacksize_set(thd_attr, SWITCH_THREAD_STACKSIZE);
	switch_thread_create(&pool_thread_p, thd_attr, pool_thread, NULL, memory_manager.memory_pool);

	while (!memory_manager.pool_thread_running) {
		switch_cond_next();
	}
#endif

	return memory_manager.memory_pool;
}
Example #11
0
SWITCH_DECLARE(switch_status_t) switch_core_perform_new_memory_pool(switch_memory_pool_t **pool, const char *file, const char *func, int line)
{
	char *tmp;
#ifdef INSTANTLY_DESTROY_POOLS
	apr_pool_create(pool, NULL);
	switch_assert(*pool != NULL);
#else

#ifdef PER_POOL_LOCK
	apr_allocator_t *my_allocator = NULL;
	apr_thread_mutex_t *my_mutex;
#else
	void *pop = NULL;
#endif

#ifdef USE_MEM_LOCK
	switch_mutex_lock(memory_manager.mem_lock);
#endif
	switch_assert(pool != NULL);

#ifndef PER_POOL_LOCK
	if (switch_queue_trypop(memory_manager.pool_recycle_queue, &pop) == SWITCH_STATUS_SUCCESS && pop) {
		*pool = (switch_memory_pool_t *) pop;
	} else {
#endif

#ifdef PER_POOL_LOCK
		if ((apr_allocator_create(&my_allocator)) != APR_SUCCESS) {
			abort();
		}

		if ((apr_pool_create_ex(pool, NULL, NULL, my_allocator)) != APR_SUCCESS) {
			abort();
		}

		if ((apr_thread_mutex_create(&my_mutex, APR_THREAD_MUTEX_NESTED, *pool)) != APR_SUCCESS) {
			abort();
		}

		apr_allocator_mutex_set(my_allocator, my_mutex);
		apr_allocator_owner_set(my_allocator, *pool);

		apr_pool_mutex_set(*pool, my_mutex);

#else
		apr_pool_create(pool, NULL);
		switch_assert(*pool != NULL);
	}
#endif
#endif

	tmp = switch_core_sprintf(*pool, "%s:%d", file, line);
	apr_pool_tag(*pool, tmp);

#ifdef DEBUG_ALLOC2
	switch_log_printf(SWITCH_CHANNEL_ID_LOG, file, func, line, NULL, SWITCH_LOG_CONSOLE, "%p New Pool %s\n", (void *) *pool, apr_pool_tag(*pool, NULL));
#endif


#ifdef USE_MEM_LOCK
	switch_mutex_unlock(memory_manager.mem_lock);
#endif

	return SWITCH_STATUS_SUCCESS;
}
Example #12
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf, 
                        apr_interval_time_t stream_timeout,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_pool_tag(m->pool, "h2_mplx");
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        status = apr_thread_cond_create(&m->task_thawed, m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
    
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->q = h2_iq_create(m->pool, m->max_streams);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_timeout = stream_timeout;
        m->workers = workers;
        m->workers_max = workers->max_workers;
        m->workers_def_limit = 4;
        m->workers_limit = m->workers_def_limit;
        m->last_limit_change = m->last_idle_block = apr_time_now();
        m->limit_change_interval = apr_time_from_msec(200);
        
        m->tx_handles_reserved = 0;
        m->tx_chunk_size = 4;
        
        m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
        
        m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, 
                                         m->stream_max_mem);
        h2_ngn_shed_set_ctx(m->ngn_shed , m);
    }
    return m;
}
Example #13
0
conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
{
    apr_allocator_t *allocator;
    apr_status_t status;
    apr_pool_t *pool;
    conn_rec *c;
    void *cfg;
    module *mpm;
    
    ap_assert(master);
    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
                  "h2_stream(%ld-%d): create slave", master->id, slave_id);
    
    /* We create a pool with its own allocator to be used for
     * processing a request. This is the only way to have the processing
     * independant of its parent pool in the sense that it can work in
     * another thread. Also, the new allocator needs its own mutex to
     * synchronize sub-pools.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    status = apr_pool_create_ex(&pool, parent, NULL, allocator);
    if (status != APR_SUCCESS) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, 
                      APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
                      master->id, slave_id);
        return NULL;
    }
    apr_allocator_owner_set(allocator, pool);
    apr_pool_abort_set(abort_on_oom, pool);
    apr_pool_tag(pool, "h2_slave_conn");

    c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
    if (c == NULL) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, 
                      APLOGNO(02913) "h2_session(%ld-%d): create slave",
                      master->id, slave_id);
        apr_pool_destroy(pool);
        return NULL;
    }
    
    memcpy(c, master, sizeof(conn_rec));
        
    c->master                 = master;
    c->pool                   = pool;   
    c->conn_config            = ap_create_conn_config(pool);
    c->notes                  = apr_table_make(pool, 5);
    c->input_filters          = NULL;
    c->output_filters         = NULL;
    c->keepalives             = 0;
#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
    c->filter_conn_ctx        = NULL;
#endif
    c->bucket_alloc           = apr_bucket_alloc_create(pool);
#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
    c->data_in_input_filters  = 0;
    c->data_in_output_filters = 0;
#endif
    /* prevent mpm_event from making wrong assumptions about this connection,
     * like e.g. using its socket for an async read check. */
    c->clogging_input_filters = 1;
    c->log                    = NULL;
    c->log_id                 = apr_psprintf(pool, "%ld-%d", 
                                             master->id, slave_id);
    c->aborted                = 0;
    /* We cannot install the master connection socket on the slaves, as
     * modules mess with timeouts/blocking of the socket, with
     * unwanted side effects to the master connection processing.
     * Fortunately, since we never use the slave socket, we can just install
     * a single, process-wide dummy and everyone is happy.
     */
    ap_set_module_config(c->conn_config, &core_module, dummy_socket);
    /* TODO: these should be unique to this thread */
    c->sbh                    = master->sbh;
    /* TODO: not all mpm modules have learned about slave connections yet.
     * copy their config from master to slave.
     */
    if ((mpm = h2_conn_mpm_module()) != NULL) {
        cfg = ap_get_module_config(master->conn_config, mpm);
        ap_set_module_config(c->conn_config, mpm, cfg);
    }

    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, 
                  "h2_slave(%s): created", c->log_id);
    return c;
}
Example #14
0
File: main.c Project: ejrh/ejrh
int
main (int argc, const char * const *argv)
{
  svn_error_t *err;
  apr_status_t apr_err;
  apr_allocator_t *allocator;
  apr_pool_t *pool;

  const svn_opt_subcommand_desc_t *subcommand = NULL;
  struct svnindex_opt_state opt_state;
  apr_getopt_t *os;  
  int opt_id;
  apr_array_header_t *received_opts;
  int i;

  /* Initialize the app. */
  if (svn_cmdline_init ("svnindex", stderr) != EXIT_SUCCESS)
    return EXIT_FAILURE;

  /* Create our top-level pool.  Use a seperate mutexless allocator,
   * given this application is single threaded.
   */
  if (apr_allocator_create (&allocator))
    return EXIT_FAILURE;

  apr_allocator_max_free_set (allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);

  pool = svn_pool_create_ex (NULL, allocator);
  apr_allocator_owner_set (allocator, pool);

  received_opts = apr_array_make (pool, SVN_OPT_MAX_OPTIONS, sizeof (int));

  /* Check library versions */
  err = check_lib_versions ();
  if (err)
    return svn_cmdline_handle_exit_error (err, pool, "svnindex: ");

  /* Initialize the FS library. */
  err = svn_fs_initialize (pool);
  if (err)
    return svn_cmdline_handle_exit_error (err, pool, "svnindex: ");

  if (argc <= 1)
    {
      subcommand_help (NULL, NULL, pool);
      svn_pool_destroy (pool);
      return EXIT_FAILURE;
    }

  /* Initialize opt_state. */
  memset (&opt_state, 0, sizeof (opt_state));
  opt_state.start_revision.kind = svn_opt_revision_unspecified;
  opt_state.end_revision.kind = svn_opt_revision_unspecified;

  /* Parse options. */
  apr_getopt_init (&os, pool, argc, argv);
  os->interleave = 1;

  while (1)
    {
      const char *opt_arg;
      const char *utf8_opt_arg;

      /* Parse the next option. */
      apr_err = apr_getopt_long (os, options_table, &opt_id, &opt_arg);
      if (APR_STATUS_IS_EOF (apr_err))
        break;
      else if (apr_err)
        {
          subcommand_help (NULL, NULL, pool);
          svn_pool_destroy (pool);
          return EXIT_FAILURE;
        }

      /* Stash the option code in an array before parsing it. */
      APR_ARRAY_PUSH (received_opts, int) = opt_id;

      switch (opt_id) {
      case 'r':
        {
          if (opt_state.start_revision.kind != svn_opt_revision_unspecified)
            {
              err = svn_error_create
                (SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
                 _("Multiple revision arguments encountered; "
                   "try '-r M:N' instead of '-r M -r N'"));
              return svn_cmdline_handle_exit_error (err, pool, "svnindex: ");
            }
          if (svn_opt_parse_revision (&(opt_state.start_revision),
                                      &(opt_state.end_revision),
                                      opt_arg, pool) != 0)
            {
              err = svn_utf_cstring_to_utf8 (&utf8_opt_arg, opt_arg,
                                             pool);

              if (! err)
                err = svn_error_createf
                  (SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
                   _("Syntax error in revision argument '%s'"),
                   utf8_opt_arg);
              return svn_cmdline_handle_exit_error (err, pool, "svnindex: ");
            }
        }
        break;
      case 'q':
        opt_state.quiet = TRUE;
        break;
      case 'h':
      case '?':
        opt_state.help = TRUE;
        break;
      case svnindex__version:
        opt_state.version = TRUE;
        opt_state.help = TRUE;
        break;
      case svnindex__db:
        err = svn_utf_cstring_to_utf8 (&opt_state.db, opt_arg,
                                       pool);
        if (err)
          return svn_cmdline_handle_exit_error (err, pool, "svnindex: ");
        break;
      case 'v':
        opt_state.verbose = TRUE;
        break;
      default:
        {
          subcommand_help (NULL, NULL, pool);
          svn_pool_destroy (pool);
          return EXIT_FAILURE;
        }
      }  /* close `switch' */
    }  /* close `while' */
  
  /* If the user asked for help, then the rest of the arguments are
     the names of subcommands to get help on (if any), or else they're
     just typos/mistakes.  Whatever the case, the subcommand to
     actually run is subcommand_help(). */
  if (opt_state.help)
    subcommand = svn_opt_get_canonical_subcommand (cmd_table, "help");

  /* If we're not running the `help' subcommand, then look for a
     subcommand in the first argument. */
  if (subcommand == NULL)
    {
      if (os->ind >= os->argc)
        {
          svn_error_clear
            (svn_cmdline_fprintf (stderr, pool,
                                  _("subcommand argument required\n")));
          subcommand_help (NULL, NULL, pool);
          svn_pool_destroy (pool);
          return EXIT_FAILURE;
        }
      else
        {
          const char *first_arg = os->argv[os->ind++];
          subcommand = svn_opt_get_canonical_subcommand (cmd_table, first_arg);
          if (subcommand == NULL)
            {
              const char* first_arg_utf8;
              err = svn_utf_cstring_to_utf8 (&first_arg_utf8, first_arg, pool);
              if (err)
                return svn_cmdline_handle_exit_error (err, pool, "svnindex: ");
              svn_error_clear
                (svn_cmdline_fprintf (stderr, pool,
                                      _("Unknown command: '%s'\n"),
                                      first_arg_utf8));
              subcommand_help (NULL, NULL, pool);
              svn_pool_destroy (pool);
              return EXIT_FAILURE;
            }
        }
    }

  /* If there's a second argument, it's probably the repository.
     Every subcommand except `help' requires one, so we parse it out
     here and store it in opt_state. */
  if (subcommand->cmd_func != subcommand_help)
    {
      err = parse_local_repos_path (os, 
                                    &(opt_state.repository_path), 
                                    pool);
      if(err)
        {
          svn_handle_error2 (err, stderr, FALSE, "svnindex: ");
          svn_opt_subcommand_help (subcommand->name, cmd_table,
                                   options_table, pool);
          svn_error_clear (err);
          svn_pool_destroy (pool);
          return EXIT_FAILURE;
        }

    }


  /* Check that the subcommand wasn't passed any inappropriate options. */
  for (i = 0; i < received_opts->nelts; i++)
    {
      opt_id = APR_ARRAY_IDX (received_opts, i, int);

      /* All commands implicitly accept --help, so just skip over this
         when we see it. Note that we don't want to include this option
         in their "accepted options" list because it would be awfully
         redundant to display it in every commands' help text. */
      if (opt_id == 'h' || opt_id == '?')
        continue;

      if (! svn_opt_subcommand_takes_option (subcommand, opt_id))
        {
          const char *optstr;
          const apr_getopt_option_t *badopt = 
            svn_opt_get_option_from_code (opt_id, options_table);
          svn_opt_format_option (&optstr, badopt, FALSE, pool);
          svn_error_clear
            (svn_cmdline_fprintf
             (stderr, pool, _("subcommand '%s' doesn't accept option '%s'\n"
                              "Type 'svnindex help %s' for usage.\n"),
              subcommand->name, optstr, subcommand->name));
          svn_pool_destroy (pool);
          return EXIT_FAILURE;
        }
    }

  /* Set up our cancellation support. */
  setup_cancellation_signals (signal_handler);

#ifdef SIGPIPE
  /* Disable SIGPIPE generation for the platforms that have it. */
  apr_signal(SIGPIPE, SIG_IGN);
#endif

  /* Run the subcommand. */
  err = (*subcommand->cmd_func) (os, &opt_state, pool);
  if (err)
    {
      if (err->apr_err == SVN_ERR_CL_ARG_PARSING_ERROR)
        {
          svn_handle_error2 (err, stderr, FALSE, "svnindex: ");
          svn_opt_subcommand_help (subcommand->name, cmd_table,
                                   options_table, pool);
        }
      else
        svn_handle_error2 (err, stderr, FALSE, "svnindex: ");
      svn_error_clear (err);
      svn_pool_destroy (pool);
      return EXIT_FAILURE;
    }
  else
    {
      svn_pool_destroy (pool);
      /* Ensure that everything is written to stdout, so the user will
         see any print errors. */
      err = svn_cmdline_fflush (stdout);
      if (err) {
        svn_handle_error2 (err, stderr, FALSE, "svnindex: ");
        svn_error_clear (err);
        return EXIT_FAILURE;
      }
      return EXIT_SUCCESS;
    }
}
Example #15
0
PCOMP_CONTEXT mpm_get_completion_context(void)
{
    apr_status_t rv;
    PCOMP_CONTEXT context = NULL;

    while (1) {
        /* Grab a context off the queue */
        apr_thread_mutex_lock(qlock);
        if (qhead) {
            context = qhead;
            qhead = qhead->next;
            if (!qhead)
                qtail = NULL;
        } else {
            ResetEvent(qwait_event);
        }
        apr_thread_mutex_unlock(qlock);

        if (!context) {
            /* We failed to grab a context off the queue, consider allocating
             * a new one out of the child pool. There may be up to
             * (ap_threads_per_child + num_listeners) contexts in the system
             * at once.
             */
            if (num_completion_contexts >= max_num_completion_contexts) {
                /* All workers are busy, need to wait for one */
                static int reported = 0;
                if (!reported) {
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf,
                                 "Server ran out of threads to serve requests. Consider "
                                 "raising the ThreadsPerChild setting");
                    reported = 1;
                }

                /* Wait for a worker to free a context. Once per second, give
                 * the caller a chance to check for shutdown. If the wait
                 * succeeds, get the context off the queue. It must be available,
                 * since there's only one consumer.
                 */
                rv = WaitForSingleObject(qwait_event, 1000);
                if (rv == WAIT_OBJECT_0)
                    continue;
                else /* Hopefully, WAIT_TIMEOUT */
                    return NULL;
            } else {
                /* Allocate another context.
                 * Note:
                 * Multiple failures in the next two steps will cause the pchild pool
                 * to 'leak' storage. I don't think this is worth fixing...
                 */
                apr_allocator_t *allocator;

                apr_thread_mutex_lock(child_lock);
                context = (PCOMP_CONTEXT) apr_pcalloc(pchild, sizeof(COMP_CONTEXT));

                context->Overlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
                if (context->Overlapped.hEvent == NULL) {
                    /* Hopefully this is a temporary condition ... */
                    ap_log_error(APLOG_MARK,APLOG_WARNING, apr_get_os_error(), ap_server_conf,
                                 "mpm_get_completion_context: CreateEvent failed.");

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }

                /* Create the tranaction pool */
                apr_allocator_create(&allocator);
                apr_allocator_max_free_set(allocator, ap_max_mem_free);
                rv = apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator);
                if (rv != APR_SUCCESS) {
                    ap_log_error(APLOG_MARK,APLOG_WARNING, rv, ap_server_conf,
                                 "mpm_get_completion_context: Failed to create the transaction pool.");
                    CloseHandle(context->Overlapped.hEvent);

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }
                apr_allocator_owner_set(allocator, context->ptrans);
                apr_pool_tag(context->ptrans, "transaction");

                context->accept_socket = INVALID_SOCKET;
                context->ba = apr_bucket_alloc_create(context->ptrans);
                apr_atomic_inc32(&num_completion_contexts);

                apr_thread_mutex_unlock(child_lock);
                break;
            }
        } else {
            /* Got a context from the queue */
            break;
        }
    }

    return context;
}
Example #16
0
int
main(int argc, const char **argv)
{
    svn_error_t *err = SVN_NO_ERROR;
    const svn_opt_subcommand_desc2_t *subcommand = NULL;
    opt_baton_t *opt_baton;
    svn_revnum_t latest_revision = SVN_INVALID_REVNUM;
    apr_pool_t *pool = NULL;
    const char *config_dir = NULL;
    const char *username = NULL;
    const char *password = NULL;
    svn_boolean_t no_auth_cache = FALSE;
    svn_boolean_t trust_server_cert = FALSE;
    svn_boolean_t non_interactive = FALSE;
    apr_array_header_t *config_options = NULL;
    apr_getopt_t *os;
    const char *first_arg;
    apr_array_header_t *received_opts;
    apr_allocator_t *allocator;
    int i;

    if (svn_cmdline_init ("svnrdump", stderr) != EXIT_SUCCESS)
        return EXIT_FAILURE;

    /* Create our top-level pool.  Use a separate mutexless allocator,
     * given this application is single threaded.
     */
    if (apr_allocator_create(&allocator))
        return EXIT_FAILURE;

    apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);

    pool = svn_pool_create_ex(NULL, allocator);
    apr_allocator_owner_set(allocator, pool);

    opt_baton = apr_pcalloc(pool, sizeof(*opt_baton));
    opt_baton->start_revision.kind = svn_opt_revision_unspecified;
    opt_baton->end_revision.kind = svn_opt_revision_unspecified;
    opt_baton->url = NULL;

    SVNRDUMP_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));

    os->interleave = TRUE; /* Options and arguments can be interleaved */

    /* Set up our cancellation support. */
    apr_signal(SIGINT, signal_handler);
#ifdef SIGBREAK
    /* SIGBREAK is a Win32 specific signal generated by ctrl-break. */
    apr_signal(SIGBREAK, signal_handler);
#endif
#ifdef SIGHUP
    apr_signal(SIGHUP, signal_handler);
#endif
#ifdef SIGTERM
    apr_signal(SIGTERM, signal_handler);
#endif
#ifdef SIGPIPE
    /* Disable SIGPIPE generation for the platforms that have it. */
    apr_signal(SIGPIPE, SIG_IGN);
#endif
#ifdef SIGXFSZ
    /* Disable SIGXFSZ generation for the platforms that have it, otherwise
     * working with large files when compiled against an APR that doesn't have
     * large file support will crash the program, which is uncool. */
    apr_signal(SIGXFSZ, SIG_IGN);
#endif

    received_opts = apr_array_make(pool, SVN_OPT_MAX_OPTIONS, sizeof(int));

    while (1)
    {
        int opt;
        const char *opt_arg;
        apr_status_t status = apr_getopt_long(os, svnrdump__options, &opt,
                                              &opt_arg);

        if (APR_STATUS_IS_EOF(status))
            break;
        if (status != APR_SUCCESS)
        {
            SVNRDUMP_ERR(usage(argv[0], pool));
            exit(EXIT_FAILURE);
        }

        /* Stash the option code in an array before parsing it. */
        APR_ARRAY_PUSH(received_opts, int) = opt;

        switch(opt)
        {
        case 'r':
        {
            /* Make sure we've not seen -r already. */
            if (opt_baton->start_revision.kind != svn_opt_revision_unspecified)
            {
                err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
                                       _("Multiple revision arguments "
                                         "encountered; try '-r N:M' instead "
                                         "of '-r N -r M'"));
                return svn_cmdline_handle_exit_error(err, pool, "svnrdump: ");
            }
            /* Parse the -r argument. */
            if (svn_opt_parse_revision(&(opt_baton->start_revision),
                                       &(opt_baton->end_revision),
                                       opt_arg, pool) != 0)
            {
                const char *utf8_opt_arg;
                err = svn_utf_cstring_to_utf8(&utf8_opt_arg, opt_arg, pool);
                if (! err)
                    err = svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
                                            _("Syntax error in revision "
                                              "argument '%s'"), utf8_opt_arg);
                return svn_cmdline_handle_exit_error(err, pool, "svnrdump: ");
            }
        }
        break;
        case 'q':
            opt_baton->quiet = TRUE;
            break;
        case opt_config_dir:
            config_dir = opt_arg;
            break;
        case opt_version:
            opt_baton->version = TRUE;
            break;
        case 'h':
            opt_baton->help = TRUE;
            break;
        case opt_auth_username:
            SVNRDUMP_ERR(svn_utf_cstring_to_utf8(&username, opt_arg, pool));
            break;
        case opt_auth_password:
            SVNRDUMP_ERR(svn_utf_cstring_to_utf8(&password, opt_arg, pool));
            break;
        case opt_auth_nocache:
            no_auth_cache = TRUE;
            break;
        case opt_non_interactive:
            non_interactive = TRUE;
            break;
        case opt_incremental:
            opt_baton->incremental = TRUE;
            break;
        case opt_trust_server_cert:
            trust_server_cert = TRUE;
            break;
        case opt_config_option:
            if (!config_options)
                config_options =
                    apr_array_make(pool, 1,
                                   sizeof(svn_cmdline__config_argument_t*));

            SVNRDUMP_ERR(svn_utf_cstring_to_utf8(&opt_arg, opt_arg, pool));
            SVNRDUMP_ERR(svn_cmdline__parse_config_option(config_options,
                         opt_arg, pool));
        }
    }

    if (opt_baton->help)
    {
        subcommand = svn_opt_get_canonical_subcommand2(svnrdump__cmd_table,
                     "help");
    }
    if (subcommand == NULL)
    {
        if (os->ind >= os->argc)
        {
            if (opt_baton->version)
            {
                /* Use the "help" subcommand to handle the "--version" option. */
                static const svn_opt_subcommand_desc2_t pseudo_cmd =
                {   "--version", help_cmd, {0}, "",
                    {   opt_version,  /* must accept its own option */
                        'q',  /* --quiet */
                    }
                };
                subcommand = &pseudo_cmd;
            }

            else
            {
                SVNRDUMP_ERR(help_cmd(NULL, NULL, pool));
                svn_pool_destroy(pool);
                exit(EXIT_FAILURE);
            }
        }
        else
        {
            first_arg = os->argv[os->ind++];
            subcommand = svn_opt_get_canonical_subcommand2(svnrdump__cmd_table,
                         first_arg);

            if (subcommand == NULL)
            {
                const char *first_arg_utf8;
                err = svn_utf_cstring_to_utf8(&first_arg_utf8, first_arg, pool);
                if (err)
                    return svn_cmdline_handle_exit_error(err, pool, "svnrdump: ");
                svn_error_clear(svn_cmdline_fprintf(stderr, pool,
                                                    _("Unknown command: '%s'\n"),
                                                    first_arg_utf8));
                SVNRDUMP_ERR(help_cmd(NULL, NULL, pool));
                svn_pool_destroy(pool);
                exit(EXIT_FAILURE);
            }
        }
    }

    /* Check that the subcommand wasn't passed any inappropriate options. */
    for (i = 0; i < received_opts->nelts; i++)
    {
        int opt_id = APR_ARRAY_IDX(received_opts, i, int);

        /* All commands implicitly accept --help, so just skip over this
           when we see it. Note that we don't want to include this option
           in their "accepted options" list because it would be awfully
           redundant to display it in every commands' help text. */
        if (opt_id == 'h' || opt_id == '?')
            continue;

        if (! svn_opt_subcommand_takes_option3(subcommand, opt_id, NULL))
        {
            const char *optstr;
            const apr_getopt_option_t *badopt =
                svn_opt_get_option_from_code2(opt_id, svnrdump__options,
                                              subcommand, pool);
            svn_opt_format_option(&optstr, badopt, FALSE, pool);
            if (subcommand->name[0] == '-')
                SVN_INT_ERR(help_cmd(NULL, NULL, pool));
            else
                svn_error_clear(svn_cmdline_fprintf(
                                    stderr, pool,
                                    _("Subcommand '%s' doesn't accept option '%s'\n"
                                      "Type 'svnrdump help %s' for usage.\n"),
                                    subcommand->name, optstr, subcommand->name));
            svn_pool_destroy(pool);
            return EXIT_FAILURE;
        }
    }

    if (subcommand && strcmp(subcommand->name, "--version") == 0)
    {
        SVNRDUMP_ERR(version(argv[0], opt_baton->quiet, pool));
        svn_pool_destroy(pool);
        exit(EXIT_SUCCESS);
    }

    if (subcommand && strcmp(subcommand->name, "help") == 0)
    {
        SVNRDUMP_ERR(help_cmd(os, opt_baton, pool));
        svn_pool_destroy(pool);
        exit(EXIT_SUCCESS);
    }

    /* --trust-server-cert can only be used with --non-interactive */
    if (trust_server_cert && !non_interactive)
    {
        err = svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
                               _("--trust-server-cert requires "
                                 "--non-interactive"));
        return svn_cmdline_handle_exit_error(err, pool, "svnrdump: ");
    }

    /* Expect one more non-option argument:  the repository URL. */
    if (os->ind != os->argc - 1)
    {
        SVNRDUMP_ERR(usage(argv[0], pool));
        svn_pool_destroy(pool);
        exit(EXIT_FAILURE);
    }
    else
    {
        const char *repos_url;

        SVNRDUMP_ERR(svn_utf_cstring_to_utf8(&repos_url,
                                             os->argv[os->ind], pool));
        if (! svn_path_is_url(repos_url))
        {
            err = svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, 0,
                                    "Target '%s' is not a URL",
                                    repos_url);
            SVNRDUMP_ERR(err);
            svn_pool_destroy(pool);
            exit(EXIT_FAILURE);
        }
        opt_baton->url = svn_uri_canonicalize(repos_url, pool);
    }

    SVNRDUMP_ERR(init_client_context(&(opt_baton->ctx),
                                     non_interactive,
                                     username,
                                     password,
                                     config_dir,
                                     no_auth_cache,
                                     trust_server_cert,
                                     config_options,
                                     pool));

    SVNRDUMP_ERR(svn_client_open_ra_session(&(opt_baton->session),
                                            opt_baton->url,
                                            opt_baton->ctx, pool));

    /* Have sane opt_baton->start_revision and end_revision defaults if
       unspecified.  */
    SVNRDUMP_ERR(svn_ra_get_latest_revnum(opt_baton->session,
                                          &latest_revision, pool));

    /* Make sure any provided revisions make sense. */
    SVNRDUMP_ERR(validate_and_resolve_revisions(opt_baton,
                 latest_revision, pool));

    /* Dispatch the subcommand */
    SVNRDUMP_ERR((*subcommand->cmd_func)(os, opt_baton, pool));

    svn_pool_destroy(pool);

    return EXIT_SUCCESS;
}
Example #17
0
/* Standard svn test program */
int
main(int argc, const char *argv[])
{
  const char *prog_name;
  int i;
  svn_boolean_t got_error = FALSE;
  apr_allocator_t *allocator;
  apr_pool_t *pool, *test_pool;
  svn_boolean_t ran_a_test = FALSE;
  svn_boolean_t list_mode = FALSE;
  int opt_id;
  apr_status_t apr_err;
  apr_getopt_t *os;
  svn_error_t *err;
  char errmsg[200];
  /* How many tests are there? */
  int array_size = get_array_size();

  svn_test_opts_t opts = { NULL };

  opts.fs_type = DEFAULT_FS_TYPE;

  /* Initialize APR (Apache pools) */
  if (apr_initialize() != APR_SUCCESS)
    {
      printf("apr_initialize() failed.\n");
      exit(1);
    }

  /* set up the global pool.  Use a separate mutexless allocator,
   * given this application is single threaded.
   */
  if (apr_allocator_create(&allocator))
    return EXIT_FAILURE;

  apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);

  pool = svn_pool_create_ex(NULL, allocator);
  apr_allocator_owner_set(allocator, pool);

  /* Remember the command line */
  test_argc = argc;
  test_argv = argv;

  err = svn_cmdline__getopt_init(&os, argc, argv, pool);

  os->interleave = TRUE; /* Let options and arguments be interleaved */

  /* Strip off any leading path components from the program name.  */
  prog_name = strrchr(argv[0], '/');
  if (prog_name)
    prog_name++;
  else
    {
      /* Just check if this is that weird platform that uses \ instead
         of / for the path separator. */
      prog_name = strrchr(argv[0], '\\');
      if (prog_name)
        prog_name++;
      else
        prog_name = argv[0];
    }

  if (err)
    return svn_cmdline_handle_exit_error(err, pool, prog_name);
  while (1)
    {
      const char *opt_arg;

      /* Parse the next option. */
      apr_err = apr_getopt_long(os, cl_options, &opt_id, &opt_arg);
      if (APR_STATUS_IS_EOF(apr_err))
        break;
      else if (apr_err && (apr_err != APR_BADCH))
        {
          /* Ignore invalid option error to allow passing arbitary options */
          fprintf(stderr, "apr_getopt_long failed : [%d] %s\n",
                  apr_err, apr_strerror(apr_err, errmsg, sizeof(errmsg)));
          exit(1);
        }

      switch (opt_id) {
        case cleanup_opt:
          cleanup_mode = TRUE;
          break;
        case config_opt:
          opts.config_file = apr_pstrdup(pool, opt_arg);
          break;
        case fstype_opt:
          opts.fs_type = apr_pstrdup(pool, opt_arg);
          break;
        case list_opt:
          list_mode = TRUE;
          break;
        case mode_filter_opt:
          if (svn_cstring_casecmp(opt_arg, "PASS") == 0)
            mode_filter = svn_test_pass;
          else if (svn_cstring_casecmp(opt_arg, "XFAIL") == 0)
            mode_filter = svn_test_xfail;
          else if (svn_cstring_casecmp(opt_arg, "SKIP") == 0)
            mode_filter = svn_test_skip;
          else if (svn_cstring_casecmp(opt_arg, "ALL") == 0)
            mode_filter = svn_test_all;
          else
            {
              fprintf(stderr, "FAIL: Invalid --mode-filter option.  Try ");
              fprintf(stderr, " PASS, XFAIL, SKIP or ALL.\n");
              exit(1);
            }
          break;
        case verbose_opt:
          verbose_mode = TRUE;
          break;
        case trap_assert_opt:
          trap_assertion_failures = TRUE;
          break;
        case quiet_opt:
          quiet_mode = TRUE;
          break;
        case allow_segfault_opt:
          allow_segfaults = TRUE;
          break;
        case server_minor_version_opt:
          {
            char *end;
            opts.server_minor_version = strtol(opt_arg, &end, 10);
            if (end == opt_arg || *end != '\0')
              {
                fprintf(stderr, "FAIL: Non-numeric minor version given\n");
                exit(1);
              }
            if ((opts.server_minor_version < 3)
                || (opts.server_minor_version > 6))
              {
                fprintf(stderr, "FAIL: Invalid minor version given\n");
                exit(1);
              }
          }
      }
    }

  /* Disable sleeping for timestamps, to speed up the tests. */
  apr_env_set(
         "SVN_I_LOVE_CORRUPTED_WORKING_COPIES_SO_DISABLE_SLEEP_FOR_TIMESTAMPS",
         "yes", pool);

  /* You can't be both quiet and verbose. */
  if (quiet_mode && verbose_mode)
    {
      fprintf(stderr, "FAIL: --verbose and --quiet are mutually exclusive\n");
      exit(1);
    }

  /* Create an iteration pool for the tests */
  cleanup_pool = svn_pool_create(pool);
  test_pool = svn_pool_create(pool);

  if (trap_assertion_failures)
    svn_error_set_malfunction_handler(svn_error_raise_on_malfunction);

  if (argc >= 2)  /* notice command-line arguments */
    {
      if (! strcmp(argv[1], "list") || list_mode)
        {
          const char *header_msg;
          ran_a_test = TRUE;

          /* run all tests with MSG_ONLY set to TRUE */
          header_msg = "Test #  Mode   Test Description\n"
                       "------  -----  ----------------\n";
          for (i = 1; i <= array_size; i++)
            {
              if (do_test_num(prog_name, i, TRUE, &opts, &header_msg,
                              test_pool))
                got_error = TRUE;

              /* Clear the per-function pool */
              svn_pool_clear(test_pool);
              svn_pool_clear(cleanup_pool);
            }
        }
      else
        {
          for (i = 1; i < argc; i++)
            {
              if (svn_ctype_isdigit(argv[i][0]) || argv[i][0] == '-')
                {
                  int test_num = atoi(argv[i]);
                  if (test_num == 0)
                    /* A --option argument, most likely. */
                    continue;

                  ran_a_test = TRUE;
                  if (do_test_num(prog_name, test_num, FALSE, &opts, NULL,
                                  test_pool))
                    got_error = TRUE;

                  /* Clear the per-function pool */
                  svn_pool_clear(test_pool);
                  svn_pool_clear(cleanup_pool);
                }
            }
        }
    }

  if (! ran_a_test)
    {
      /* just run all tests */
      for (i = 1; i <= array_size; i++)
        {
          if (do_test_num(prog_name, i, FALSE, &opts, NULL, test_pool))
            got_error = TRUE;

          /* Clear the per-function pool */
          svn_pool_clear(test_pool);
          svn_pool_clear(cleanup_pool);
        }
    }

  /* Clean up APR */
  svn_pool_destroy(pool);      /* takes test_pool with it */
  apr_terminate();

  return got_error;
}
Example #18
0
/* This is the thread that actually does all the work. */
static int32 worker_thread(void *dummy)
{
    int worker_slot = (int)dummy;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    apr_status_t rv = APR_EINIT;
    int last_poll_idx = 0;
    sigset_t sig_mask;
    int requests_this_child = 0;
    apr_pollset_t *pollset = NULL;
    ap_listen_rec *lr = NULL;
    ap_sb_handle_t *sbh = NULL;
    int i;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0;
    /* We have 2 pools that we create/use throughout the lifetime of this
     * worker. The first and longest lived is the pworker pool. From
     * this we create the ptrans pool, the lifetime of which is the same
     * as each connection and is reset prior to each attempt to
     * process a connection.
     */
    apr_pool_t *ptrans = NULL;
    apr_pool_t *pworker = NULL;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */

    on_exit_thread(check_restart, (void*)worker_slot);

    /* block the signals for this thread only if we're not running as a
     * single process.
     */
    if (!one_process) {
        sigfillset(&sig_mask);
        sigprocmask(SIG_BLOCK, &sig_mask, NULL);
    }

    /* Each worker thread is fully in control of it's destinay and so
     * to allow each thread to handle the lifetime of it's own resources
     * we create and use a subcontext for every thread.
     * The subcontext is a child of the pconf pool.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pworker, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pworker);

    apr_pool_create(&ptrans, pworker);
    apr_pool_tag(ptrans, "transaction");

    ap_create_sb_handle(&sbh, pworker, 0, worker_slot);
    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* We add an extra socket here as we add the udp_sock we use for signalling
     * death. This gets added after the others.
     */
    apr_pollset_create(&pollset, num_listening_sockets + 1, pworker, 0);

    for (lr = ap_listeners, i = num_listening_sockets; i--; lr = lr->next) {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        apr_pollset_add(pollset, &pfd);
    }
    {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = udp_sock;
        pfd.reqevents = APR_POLLIN;

        apr_pollset_add(pollset, &pfd);
    }

    bucket_alloc = apr_bucket_alloc_create(pworker);

    mpm_state = AP_MPMQ_RUNNING;

        while (!this_worker_should_exit) {
        conn_rec *current_conn;
        void *csd;

        /* (Re)initialize this child to a pre-connection state. */
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_thread > 0
             && requests_this_child++ >= ap_max_requests_per_thread))
            clean_child_exit(0, worker_slot);

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        apr_thread_mutex_lock(accept_mutex);

        /* We always (presently) have at least 2 sockets we listen on, so
         * we don't have the ability for a fast path for a single socket
         * as some MPM's allow :(
         */
        for (;;) {
            apr_int32_t numdesc = 0;
            const apr_pollfd_t *pdesc = NULL;

            rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
            if (rv != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(rv)) {
                    if (one_process && shutdown_pending)
                        return;
                    continue;
                }
                ap_log_error(APLOG_MARK, APLOG_ERR, rv,
                             ap_server_conf, "apr_pollset_poll: (listen)");
                clean_child_exit(1, worker_slot);
            }
            /* We can always use pdesc[0], but sockets at position N
             * could end up completely starved of attention in a very
             * busy server. Therefore, we round-robin across the
             * returned set of descriptors. While it is possible that
             * the returned set of descriptors might flip around and
             * continue to starve some sockets, we happen to know the
             * internal pollset implementation retains ordering
             * stability of the sockets. Thus, the round-robin should
             * ensure that a socket will eventually be serviced.
             */
            if (last_poll_idx >= numdesc)
                last_poll_idx = 0;

            /* Grab a listener record from the client_data of the poll
             * descriptor, and advance our saved index to round-robin
             * the next fetch.
             *
             * ### hmm... this descriptor might have POLLERR rather
             * ### than POLLIN
             */

            lr = pdesc[last_poll_idx++].client_data;

            /* The only socket we add without client_data is the first, the UDP socket
             * we listen on for restart signals. If we've therefore gotten a hit on that
             * listener lr will be NULL here and we know we've been told to die.
             * Before we jump to the end of the while loop with this_worker_should_exit
             * set to 1 (causing us to exit normally we hope) we release the accept_mutex
             * as we want every thread to go through this same routine :)
             * Bit of a hack, but compared to what I had before...
             */
            if (lr == NULL) {
                this_worker_should_exit = 1;
                apr_thread_mutex_unlock(accept_mutex);
                goto got_a_black_spot;
            }
            goto got_fd;
        }
got_fd:
        /* Run beos_accept to accept the connection and set things up to
         * allow us to process it. We always release the accept_lock here,
         * even if we failt o accept as otherwise we'll starve other workers
         * which would be bad.
         */
        rv = beos_accept(&csd, lr, ptrans);
        apr_thread_mutex_unlock(accept_mutex);

        if (rv == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1, worker_slot);
        } else if (rv != APR_SUCCESS)
            continue;

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, worker_slot, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            this_worker_should_exit = 1;
        }
got_a_black_spot:
    }

    apr_pool_destroy(ptrans);
    apr_pool_destroy(pworker);

    clean_child_exit(0, worker_slot);
}

static int make_worker(int slot)
{
    thread_id tid;

    if (slot + 1 > ap_max_child_assigned)
            ap_max_child_assigned = slot + 1;

    (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL);

    if (one_process) {
        set_signals();
        ap_scoreboard_image->parent[0].pid = getpid();
        ap_scoreboard_image->servers[0][slot].tid = find_thread(NULL);
        return 0;
    }

    tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY,
                       (void *)slot);
    if (tid < B_NO_ERROR) {
        ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL,
            "spawn_thread: Unable to start a new thread");
        /* In case system resources are maxed out, we don't want
         * Apache running away with the CPU trying to fork over and
         * over and over again.
         */
        (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD,
                                                   (request_rec*)NULL);

        sleep(10);
        return -1;
    }
    resume_thread(tid);

    ap_scoreboard_image->servers[0][slot].tid = tid;
    return 0;
}
Example #19
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    conn_rec *current_conn;
    apr_status_t status = APR_EINIT;
    int i;
    ap_listen_rec *lr;
    int curr_pollfd, last_pollfd = 0;
    apr_pollfd_t *pollset;
    int offset;
    void *csd;
    ap_sb_handle_t *sbh;
    apr_status_t rv;
    apr_bucket_alloc_t *bucket_alloc;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */
    
    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    csd = NULL;
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
                     "Couldn't initialize cross-process lock in child");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
	clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    listensocks = apr_pcalloc(pchild,
                            sizeof(*listensocks) * (num_listensocks));
    for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) {
        listensocks[i].accept_func = lr->accept_func;
        listensocks[i].sd = lr->sd;
    }

    pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks);
    pollset[0].p = pchild;
    for (i = 0; i < num_listensocks; i++) {
        pollset[i].desc.s = listensocks[i].sd;
        pollset[i].desc_type = APR_POLL_SOCKET;
        pollset[i].reqevents = APR_POLLIN;
    }

    mpm_state = AP_MPMQ_RUNNING;
    
    bucket_alloc = apr_bucket_alloc_create(pchild);

    while (!die_now) {
	/*
	 * (Re)initialize this child to a pre-connection state.
	 */

	current_conn = NULL;

	apr_pool_clear(ptrans);

	if ((ap_max_requests_per_child > 0
	     && requests_this_child++ >= ap_max_requests_per_child)) {
	    clean_child_exit(0);
	}

	(void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

	/*
	 * Wait for an acceptable connection to arrive.
	 */

	/* Lock around "accept", if necessary */
	SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            offset = 0;
        }
        else {
            /* multiple listening sockets - need to poll */
	    for (;;) {
                apr_status_t ret;
                apr_int32_t n;

                ret = apr_poll(pollset, num_listensocks, &n, -1);
                if (ret != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(ret)) {
                        continue;
                    }
    	            /* Single Unix documents select as returning errnos
    	             * EBADF, EINTR, and EINVAL... and in none of those
    	             * cases does it make sense to continue.  In fact
    	             * on Linux 2.0.x we seem to end up with EFAULT
    	             * occasionally, and we'd loop forever due to it.
    	             */
    	            ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf,
                             "apr_poll: (listen)");
    	            clean_child_exit(1);
                }
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;
                    if (curr_pollfd >= num_listensocks) {
                        curr_pollfd = 0;
                    }
                    /* XXX: Should we check for POLLERR? */
                    if (pollset[curr_pollfd].rtnevents & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        offset = curr_pollfd;
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);

                continue;
            }
        }
    got_fd:
	/* if we accept() something we don't want to die, so we have to
	 * defer the exit
	 */
        status = listensocks[offset].accept_func(&csd, 
                                                 &listensocks[offset], ptrans);
        SAFE_ACCEPT(accept_mutex_off());	/* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

	/*
	 * We now have a connection, so set it up with the appropriate
	 * socket options, file descriptors, and read/write buffers.
	 */

	current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        
        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Example #20
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_status_t status;
    int i;
    ap_listen_rec *lr;
    apr_pollset_t *pollset;
    ap_sb_handle_t *sbh;
    apr_bucket_alloc_t *bucket_alloc;
    int last_poll_idx = 0;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't initialize cross-process lock in child "
                     "(%s) (%d)", ap_lock_fname, ap_accept_lock_mech);
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    status = apr_pollset_create(&pollset, num_listensocks, pchild, 0);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't create pollset in child; check system or user limits");
        clean_child_exit(APEXIT_CHILDSICK); /* assume temporary resource issue */
    }

    for (lr = ap_listeners, i = num_listensocks; i--; lr = lr->next) {
        apr_pollfd_t pfd = { 0 };

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, &pfd);
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    /* die_now is set when AP_SIG_GRACEFUL is received in the child;
     * shutdown_pending is set when SIGTERM is received when running
     * in single process mode.  */
    while (!die_now && !shutdown_pending) {
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
            for (;;) {
                apr_int32_t numdesc;
                const apr_pollfd_t *pdesc;

                /* check for termination first so we don't sleep for a while in
                 * poll if already signalled
                 */
                if (one_process && shutdown_pending) {
                    SAFE_ACCEPT(accept_mutex_off());
                    return;
                }
                else if (die_now) {
                    /* In graceful stop/restart; drop the mutex
                     * and terminate the child. */
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(0);
                }
                /* timeout == 10 seconds to avoid a hang at graceful restart/stop
                 * caused by the closing of sockets by the signal handler
                 */
                status = apr_pollset_poll(pollset, apr_time_from_sec(10), 
                                          &numdesc, &pdesc);
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_TIMEUP(status) ||
                        APR_STATUS_IS_EINTR(status)) {
                        continue;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    ap_log_error(APLOG_MARK, APLOG_ERR, status,
                                 ap_server_conf, "apr_pollset_poll: (listen)");
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = pdesc[last_poll_idx++].client_data;
                goto got_fd;
            }
        }
    got_fd:
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Example #21
0
/*static */
void worker_main(void *arg)
{
    ap_listen_rec *lr, *first_lr, *last_lr = NULL;
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    conn_rec *current_conn;
    apr_status_t stat = APR_EINIT;
    ap_sb_handle_t *sbh;
    apr_thread_t *thd = NULL;
    apr_os_thread_t osthd;

    int my_worker_num = (int)arg;
    apr_socket_t *csd = NULL;
    int requests_this_child = 0;
    apr_socket_t *sd = NULL;
    fd_set main_fds;

    int sockdes;
    int srv;
    struct timeval tv;
    int wouldblock_retry;

    osthd = apr_os_thread_current();
    apr_os_thread_put(&thd, &osthd, pmain);

    tv.tv_sec = 1;
    tv.tv_usec = 0;

    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);

    apr_pool_create_ex(&ptrans, pmain, NULL, allocator);
    apr_allocator_owner_set(allocator, ptrans);
    apr_pool_tag(ptrans, "transaction");

    bucket_alloc = apr_bucket_alloc_create_ex(allocator);

    atomic_inc (&worker_thread_count);

    while (!die_now) {
        /*
        * (Re)initialize this child to a pre-connection state.
        */
        current_conn = NULL;
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
            && requests_this_child++ >= ap_max_requests_per_child)) {
            DBPRINT1 ("\n**Thread slot %d is shutting down", my_worker_num);
            clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
        }

        ap_update_child_status_from_indexes(0, my_worker_num, WORKER_READY,
                                            (request_rec *) NULL);

        /*
        * Wait for an acceptable connection to arrive.
        */

        for (;;) {
            if (shutdown_pending || restart_pending || (ap_scoreboard_image->servers[0][my_worker_num].status == WORKER_IDLE_KILL)) {
                DBPRINT1 ("\nThread slot %d is shutting down\n", my_worker_num);
                clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
            }

            /* Check the listen queue on all sockets for requests */
            memcpy(&main_fds, &listenfds, sizeof(fd_set));
            srv = select(listenmaxfd + 1, &main_fds, NULL, NULL, &tv);

            if (srv <= 0) {
                if (srv < 0) {
                    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00217)
                        "select() failed on listen socket");
                    apr_thread_yield();
                }
                continue;
            }

            /* remember the last_lr we searched last time around so that
            we don't end up starving any particular listening socket */
            if (last_lr == NULL) {
                lr = ap_listeners;
            }
            else {
                lr = last_lr->next;
                if (!lr)
                    lr = ap_listeners;
            }
            first_lr = lr;
            do {
                apr_os_sock_get(&sockdes, lr->sd);
                if (FD_ISSET(sockdes, &main_fds))
                    goto got_listener;
                lr = lr->next;
                if (!lr)
                    lr = ap_listeners;
            } while (lr != first_lr);
            /* if we get here, something unexpected happened. Go back
            into the select state and try again.
            */
            continue;
        got_listener:
            last_lr = lr;
            sd = lr->sd;

            wouldblock_retry = MAX_WB_RETRIES;

            while (wouldblock_retry) {
                if ((stat = apr_socket_accept(&csd, sd, ptrans)) == APR_SUCCESS) {
                    break;
                }
                else {
                    /* if the error is a wouldblock then maybe we were too
                        quick try to pull the next request from the listen
                        queue.  Try a few more times then return to our idle
                        listen state. */
                    if (!APR_STATUS_IS_EAGAIN(stat)) {
                        break;
                    }

                    if (wouldblock_retry--) {
                        apr_thread_yield();
                    }
                }
            }

            /* If we got a new socket, set it to non-blocking mode and process
                it.  Otherwise handle the error. */
            if (stat == APR_SUCCESS) {
                apr_socket_opt_set(csd, APR_SO_NONBLOCK, 0);
#ifdef DBINFO_ON
                if (wouldblock_retry < MAX_WB_RETRIES) {
                    retry_success++;
                    avg_retries += (MAX_WB_RETRIES-wouldblock_retry);
                }
#endif
                break;       /* We have a socket ready for reading */
            }
            else {
#ifdef DBINFO_ON
                if (APR_STATUS_IS_EAGAIN(stat)) {
                        would_block++;
                        retry_fail++;
                }
                else if (
#else
                if (APR_STATUS_IS_EAGAIN(stat) ||
#endif
                    APR_STATUS_IS_ECONNRESET(stat) ||
                    APR_STATUS_IS_ETIMEDOUT(stat) ||
                    APR_STATUS_IS_EHOSTUNREACH(stat) ||
                    APR_STATUS_IS_ENETUNREACH(stat)) {
                        ;
                }
#ifdef USE_WINSOCK
                else if (APR_STATUS_IS_ENETDOWN(stat)) {
                       /*
                        * When the network layer has been shut down, there
                        * is not much use in simply exiting: the parent
                        * would simply re-create us (and we'd fail again).
                        * Use the CHILDFATAL code to tear the server down.
                        * @@@ Martin's idea for possible improvement:
                        * A different approach would be to define
                        * a new APEXIT_NETDOWN exit code, the reception
                        * of which would make the parent shutdown all
                        * children, then idle-loop until it detected that
                        * the network is up again, and restart the children.
                        * Ben Hyde noted that temporary ENETDOWN situations
                        * occur in mobile IP.
                        */
                        ap_log_error(APLOG_MARK, APLOG_EMERG, stat, ap_server_conf, APLOGNO(00218)
                            "apr_socket_accept: giving up.");
                        clean_child_exit(APEXIT_CHILDFATAL, my_worker_num, ptrans,
                                         bucket_alloc);
                }
#endif
                else {
                        ap_log_error(APLOG_MARK, APLOG_ERR, stat, ap_server_conf, APLOGNO(00219)
                            "apr_socket_accept: (client socket)");
                        clean_child_exit(1, my_worker_num, ptrans, bucket_alloc);
                }
            }
        }

        ap_create_sb_handle(&sbh, ptrans, 0, my_worker_num);
        /*
        * We now have a connection, so set it up with the appropriate
        * socket options, file descriptors, and read/write buffers.
        */
        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd,
                                                my_worker_num, sbh,
                                                bucket_alloc);
        if (current_conn) {
            current_conn->current_thread = thd;
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        request_count++;
    }
    clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
}
Example #22
0
nsvn_t*
nsvn_base_init (const char *config_dir)
{
  nsvn_t *nsvn;

  /* Initializing apr module. */
  if (nsvn_base__init_apr() == EXIT_FAILURE)
    return NULL;

  /* Allocating memory for NSVN internal data structure. */
  nsvn = (nsvn_t*) calloc (1, sizeof(nsvn_t));
  if (nsvn == NULL)
    return NULL;
    
  if (apr_allocator_create (&nsvn->allocator))
    return nsvn_base_uninit(nsvn);

  apr_allocator_max_free_set (nsvn->allocator,
                              SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);
  nsvn->pool = svn_pool_create_ex (NULL,
                                   nsvn->allocator);
  apr_allocator_owner_set (nsvn->allocator, nsvn->pool);

  /* Initializing auth provider array. */
  nsvn->auth_providers = NULL;

  /* Check svn library version, which are used here */
  #ifdef NSVN_DEBUG
  //nsvn->err = nsvn_base__check_svnlib_ver();
  //if (nsvn->err)
  //  return nsvn_base_uninit (nsvn);
  #endif

  /* Initializing the SVN FS library. */
  nsvn->err = svn_fs_initialize (nsvn->pool);
  if (nsvn->err)
    return nsvn_base_uninit (nsvn); 

  /* TODO: Need to ignore SIGPIPE and hook SIGINT, SIGHUP, SIGTERM, and
     SIGUSR1 signals to a handler which will set a global volatile varaible
     to TRUE. So by looking at the variable we can decide to cancel a on
     going process. Also we need to write a exportable function which is
     same as the handler. */

  /* TODO: Need to decide, whether to check config_dir is a valid subversion
     config directory or not, if check needed add it here. */

  if (config_dir)
    {
      nsvn->config_dir = strdup (config_dir);
      svn_config_ensure(nsvn->config_dir, nsvn->pool);
    }
  else
    nsvn->config_dir = NULL;

  /* Creating Subversion client context object. */
  nsvn->err = svn_client_create_context (&(nsvn->ctx), nsvn->pool);
  if (nsvn->err)
    return nsvn_base_uninit (nsvn);

  nsvn->err = svn_config_get_config (&(nsvn->ctx->config),
                                     nsvn->config_dir, nsvn->pool);
  if (nsvn->err)
    return nsvn_base_uninit (nsvn);

  nsvn->cfg = apr_hash_get(nsvn->ctx->config, SVN_CONFIG_CATEGORY_CONFIG,
                           APR_HASH_KEY_STRING);

  return nsvn;
}
Example #23
0
/*
 * Why is this not an svn subcommand?  I have this vague idea that it could
 * be run as part of the build process, with the output embedded in the svn
 * program.  Obviously we don't want to have to run svn when building svn.
 */
int
main(int argc, const char *argv[])
{
  const char *wc_path, *trail_url;
  apr_allocator_t *allocator;
  apr_pool_t *pool;
  int wc_format;
  svn_wc_revision_status_t *res;
  svn_boolean_t no_newline = FALSE, committed = FALSE;
  svn_error_t *err;
  apr_getopt_t *os;
  const apr_getopt_option_t options[] =
    {
      {"no-newline", 'n', 0, N_("do not output the trailing newline")},
      {"committed",  'c', 0, N_("last changed rather than current revisions")},
      {"help", 'h', 0, N_("display this help")},
      {"version", SVNVERSION_OPT_VERSION, 0,
       N_("show program version information")},
      {0,             0,  0,  0}
    };

  /* Initialize the app. */
  if (svn_cmdline_init("svnversion", stderr) != EXIT_SUCCESS)
    return EXIT_FAILURE;

  /* Create our top-level pool.  Use a separate mutexless allocator,
   * given this application is single threaded.
   */
  if (apr_allocator_create(&allocator))
    return EXIT_FAILURE;

  apr_allocator_max_free_set(allocator, SVN_ALLOCATOR_RECOMMENDED_MAX_FREE);

  pool = svn_pool_create_ex(NULL, allocator);
  apr_allocator_owner_set(allocator, pool);

  /* Check library versions */
  err = check_lib_versions();
  if (err)
    return svn_cmdline_handle_exit_error(err, pool, "svnversion: ");

#if defined(WIN32) || defined(__CYGWIN__)
  /* Set the working copy administrative directory name. */
  if (getenv("SVN_ASP_DOT_NET_HACK"))
    {
      err = svn_wc_set_adm_dir("_svn", pool);
      if (err)
        return svn_cmdline_handle_exit_error(err, pool, "svnversion: ");
    }
#endif

  err = svn_cmdline__getopt_init(&os, argc, argv, pool);
  if (err)
    return svn_cmdline_handle_exit_error(err, pool, "svnversion: ");

  os->interleave = 1;
  while (1)
    {
      int opt;
      const char *arg;
      apr_status_t status = apr_getopt_long(os, options, &opt, &arg);
      if (APR_STATUS_IS_EOF(status))
        break;
      if (status != APR_SUCCESS)
        {
          usage(pool);
          return EXIT_FAILURE;
        }
      switch (opt)
        {
        case 'n':
          no_newline = TRUE;
          break;
        case 'c':
          committed = TRUE;
          break;
        case 'h':
          help(options, pool);
          break;
        case SVNVERSION_OPT_VERSION:
          SVN_INT_ERR(version(pool));
          exit(0);
          break;
        default:
          usage(pool);
          return EXIT_FAILURE;
        }
    }

  if (os->ind > argc || os->ind < argc - 2)
    {
      usage(pool);
      return EXIT_FAILURE;
    }

  SVN_INT_ERR(svn_utf_cstring_to_utf8
              (&wc_path, (os->ind < argc) ? os->argv[os->ind] : ".",
               pool));
  wc_path = svn_path_internal_style(wc_path, pool);

  if (os->ind+1 < argc)
    SVN_INT_ERR(svn_utf_cstring_to_utf8
                (&trail_url, os->argv[os->ind+1], pool));
  else
    trail_url = NULL;

  SVN_INT_ERR(svn_wc_check_wc(wc_path, &wc_format, pool));
  if (! wc_format)
    {
      svn_node_kind_t kind;
      SVN_INT_ERR(svn_io_check_path(wc_path, &kind, pool));
      if (kind == svn_node_dir)
        {
          SVN_INT_ERR(svn_cmdline_printf(pool, _("exported%s"),
                                         no_newline ? "" : "\n"));
          svn_pool_destroy(pool);
          return EXIT_SUCCESS;
        }
      else
        {
          svn_error_clear
            (svn_cmdline_fprintf(stderr, pool,
                                 _("'%s' not versioned, and not exported\n"),
                                 wc_path));
          svn_pool_destroy(pool);
          return EXIT_FAILURE;
        }
    }


  SVN_INT_ERR(svn_wc_revision_status(&res, wc_path, trail_url, committed,
                                     NULL, NULL, pool));

  /* Build compact '123[:456]M?S?' string. */
  SVN_INT_ERR(svn_cmdline_printf(pool, "%ld", res->min_rev));
  if (res->min_rev != res->max_rev)
    SVN_INT_ERR(svn_cmdline_printf(pool, ":%ld", res->max_rev));
  if (res->modified)
    SVN_INT_ERR(svn_cmdline_fputs("M", stdout, pool));
  if (res->switched)
    SVN_INT_ERR(svn_cmdline_fputs("S", stdout, pool));
  if (res->sparse_checkout)
    SVN_INT_ERR(svn_cmdline_fputs("P", stdout, pool));

  if (! no_newline)
    SVN_INT_ERR(svn_cmdline_fputs("\n", stdout, pool));

  svn_pool_destroy(pool);

  /* Flush stdout to make sure that the user will see any printing errors. */
  SVN_INT_ERR(svn_cmdline_fflush(stdout));

  return EXIT_SUCCESS;
}
Example #24
0
static int32 worker_thread(void * dummy)
{
    proc_info * ti = dummy;
    int child_slot = ti->slot;
    apr_pool_t *tpool = ti->tpool;
    apr_allocator_t *allocator;
    apr_socket_t *csd = NULL;
    apr_pool_t *ptrans;		/* Pool for per-transaction stuff */
    apr_bucket_alloc_t *bucket_alloc;
    apr_socket_t *sd = NULL;
    apr_status_t rv = APR_EINIT;
    int srv , n;
    int curr_pollfd = 0, last_pollfd = 0;
    sigset_t sig_mask;
    int requests_this_child = ap_max_requests_per_thread;
    apr_pollfd_t *pollset;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0; 
    free(ti);

    mpm_state = AP_MPMQ_STARTING;

    on_exit_thread(check_restart, (void*)child_slot);
          
    /* block the signals for this thread */
    sigfillset(&sig_mask);
    sigprocmask(SIG_BLOCK, &sig_mask, NULL);

    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&ptrans, tpool, NULL, allocator);
    apr_allocator_owner_set(allocator, ptrans);

    apr_pool_tag(ptrans, "transaction");

    bucket_alloc = apr_bucket_alloc_create_ex(allocator);

    apr_thread_mutex_lock(worker_thread_count_mutex);
    worker_thread_count++;
    apr_thread_mutex_unlock(worker_thread_count_mutex);

    (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_STARTING,
                                               (request_rec*)NULL);
                                  
    apr_poll_setup(&pollset, num_listening_sockets + 1, tpool);
    for(n=0 ; n <= num_listening_sockets ; n++)
        apr_poll_socket_add(pollset, listening_sockets[n], APR_POLLIN);

    mpm_state = AP_MPMQ_RUNNING;

    while (1) {
        /* If we're here, then chances are (unless we're the first thread created) 
         * we're going to be held up in the accept mutex, so doing this here
         * shouldn't hurt performance.
         */

        this_worker_should_exit |= (ap_max_requests_per_thread != 0) && (requests_this_child <= 0);
        
        if (this_worker_should_exit) break;

        (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_READY,
                                                   (request_rec*)NULL);

        apr_thread_mutex_lock(accept_mutex);

        while (!this_worker_should_exit) {
            apr_int16_t event;
            apr_status_t ret;

            ret = apr_poll(pollset, num_listening_sockets + 1, &srv, -1);

            if (ret != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(ret)) {
                    continue;
                }
                /* poll() will only return errors in catastrophic
                 * circumstances. Let's try exiting gracefully, for now. */
                ap_log_error(APLOG_MARK, APLOG_ERR, ret, (const server_rec *)
                             ap_server_conf, "apr_poll: (listen)");
                this_worker_should_exit = 1;
            } else {
                /* if we've bailed in apr_poll what's the point of trying to use the data? */
                apr_poll_revents_get(&event, listening_sockets[0], pollset);

                if (event & APR_POLLIN){
                    apr_sockaddr_t *rec_sa;
                    apr_size_t len = 5;
                    char *tmpbuf = apr_palloc(ptrans, sizeof(char) * 5);
                    apr_sockaddr_info_get(&rec_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, ptrans);
                    
                    if ((ret = apr_recvfrom(rec_sa, listening_sockets[0], 0, tmpbuf, &len))
                        != APR_SUCCESS){
                        ap_log_error(APLOG_MARK, APLOG_ERR, ret, NULL, 
                            "error getting data from UDP!!");
                    }else {
                        /* add checking??? */              
                    }
                    this_worker_should_exit = 1;
                }
            }
          
            if (this_worker_should_exit) break;

            if (num_listening_sockets == 1) {
                sd = ap_listeners->sd;
                goto got_fd;
            }
            else {
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;

                    if (curr_pollfd > num_listening_sockets)
                        curr_pollfd = 1;
                    
                    /* Get the revent... */
                    apr_poll_revents_get(&event, listening_sockets[curr_pollfd], pollset);
                    
                    if (event & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        sd = listening_sockets[curr_pollfd];
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);
            }
        }
    got_fd:

        if (!this_worker_should_exit) {
            rv = apr_accept(&csd, sd, ptrans);

            apr_thread_mutex_unlock(accept_mutex);
            if (rv != APR_SUCCESS) {
                ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
                  "apr_accept");
            } else {
                process_socket(ptrans, csd, child_slot, bucket_alloc);
                requests_this_child--;
            }
        }
        else {
            apr_thread_mutex_unlock(accept_mutex);
            break;
        }
        apr_pool_clear(ptrans);
    }

    ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL);

    apr_bucket_alloc_destroy(bucket_alloc);

    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL,
                 "worker_thread %ld exiting", find_thread(NULL));
    
    apr_thread_mutex_lock(worker_thread_count_mutex);
    worker_thread_count--;
    apr_thread_mutex_unlock(worker_thread_count_mutex);

    return (0);
}