Beispiel #1
0
static void test_manyfile(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb = apr_brigade_create(p, ba);
    apr_file_t *f;

    f = make_test_file(tc, "manyfile.bin",
                       "world" "hello" "brave" " ,\n");

    apr_brigade_insert_file(bb, f, 5, 5, p);
    apr_brigade_insert_file(bb, f, 16, 1, p);
    apr_brigade_insert_file(bb, f, 15, 1, p);
    apr_brigade_insert_file(bb, f, 10, 5, p);
    apr_brigade_insert_file(bb, f, 15, 1, p);
    apr_brigade_insert_file(bb, f, 0, 5, p);
    apr_brigade_insert_file(bb, f, 17, 1, p);

    /* can you tell what it is yet? */
    flatten_match(tc, "file seek test", bb,
                  "hello, brave world\n");

    apr_file_close(f);
    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #2
0
/* Regression test for PR 34708, where a file bucket will keep
 * duplicating itself on being read() when EOF is reached
 * prematurely. */
static void test_truncfile(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb = apr_brigade_create(p, ba);
    apr_file_t *f = make_test_file(tc, "testfile.txt", "hello");
    apr_bucket *e;
    const char *buf;
    apr_size_t len;

    apr_brigade_insert_file(bb, f, 0, 5, p);

    apr_file_trunc(f, 0);

    e = APR_BRIGADE_FIRST(bb);

    ABTS_ASSERT(tc, "single bucket in brigade",
                APR_BUCKET_NEXT(e) == APR_BRIGADE_SENTINEL(bb));

    apr_bucket_file_enable_mmap(e, 0);

    ABTS_ASSERT(tc, "read gave APR_EOF",
                apr_bucket_read(e, &buf, &len, APR_BLOCK_READ) == APR_EOF);

    ABTS_ASSERT(tc, "read length 0", len == 0);
    
    ABTS_ASSERT(tc, "still a single bucket in brigade",
                APR_BUCKET_NEXT(e) == APR_BRIGADE_SENTINEL(bb));

    apr_file_close(f);
    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #3
0
void mpm_recycle_completion_context(PCOMP_CONTEXT context)
{
    /* Recycle the completion context.
     * - clear the ptrans pool
     * - put the context on the queue to be consumed by the accept thread
     * Note:
     * context->accept_socket may be in a disconnected but reusable
     * state so -don't- close it.
     */
    if (context) {
        apr_pool_clear(context->ptrans);
        context->ba = apr_bucket_alloc_create(context->ptrans);
        context->next = NULL;
        ResetEvent(context->Overlapped.hEvent);
        apr_thread_mutex_lock(qlock);
        if (qtail) {
            qtail->next = context;
        } else {
            qhead = context;
            SetEvent(qwait_event);
        }
        qtail = context;
        apr_thread_mutex_unlock(qlock);
    }
}
Beispiel #4
0
static void test_partition(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb = apr_brigade_create(p, ba);
    apr_bucket *e;

    e = apr_bucket_immortal_create(hello, strlen(hello), ba);
    APR_BRIGADE_INSERT_HEAD(bb, e);

    apr_assert_success(tc, "partition brigade",
                       apr_brigade_partition(bb, 5, &e));

    test_bucket_content(tc, APR_BRIGADE_FIRST(bb),
                        "hello", 5);

    test_bucket_content(tc, APR_BRIGADE_LAST(bb),
                        ", world", 7);

    ABTS_ASSERT(tc, "partition returns APR_INCOMPLETE",
                apr_brigade_partition(bb, 8192, &e));

    ABTS_ASSERT(tc, "APR_INCOMPLETE partition returned sentinel",
                e == APR_BRIGADE_SENTINEL(bb));

    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #5
0
static PCOMP_CONTEXT win9x_get_connection(PCOMP_CONTEXT context)
{
    apr_os_sock_info_t sockinfo;
    int len, salen;
#if APR_HAVE_IPV6
    salen = sizeof(struct sockaddr_in6);
#else
    salen = sizeof(struct sockaddr_in);
#endif


    if (context == NULL) {
        /* allocate the completion context and the transaction pool */
        apr_allocator_t *allocator;
        apr_thread_mutex_lock(child_lock);
        context = apr_pcalloc(pchild, sizeof(COMP_CONTEXT));
        apr_allocator_create(&allocator);
        apr_allocator_max_free_set(allocator, ap_max_mem_free);
        apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator);
        apr_allocator_owner_set(allocator, context->ptrans);
        apr_pool_tag(context->ptrans, "transaction");
        apr_thread_mutex_unlock(child_lock);
    }

    while (1) {
        apr_pool_clear(context->ptrans);
        context->ba = apr_bucket_alloc_create(context->ptrans);
        context->accept_socket = remove_job();
        if (context->accept_socket == INVALID_SOCKET) {
            return NULL;
        }
        len = salen;
        context->sa_server = apr_palloc(context->ptrans, len);
        if (getsockname(context->accept_socket,
                        context->sa_server, &len)== SOCKET_ERROR) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "getsockname failed");
            continue;
        }
        len = salen;
        context->sa_client = apr_palloc(context->ptrans, len);
        if ((getpeername(context->accept_socket,
                         context->sa_client, &len)) == SOCKET_ERROR) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "getpeername failed");
            memset(&context->sa_client, '\0', sizeof(context->sa_client));
        }
        sockinfo.os_sock = &context->accept_socket;
        sockinfo.local   = context->sa_server;
        sockinfo.remote  = context->sa_client;
        sockinfo.family  = context->sa_server->sa_family;
        sockinfo.type    = SOCK_STREAM;
        apr_os_sock_make(&context->sock, &sockinfo, context->ptrans);

        return context;
    }
}
Beispiel #6
0
pstar_io::pstar_io(request_rec *r) : wpl_io() {
	this->r = r;
	this->rpos = 0;
	this->headers_sent = false;
	this->waiting_buckets = 0;

	this->ba = apr_bucket_alloc_create(r->pool);
	this->bb = apr_brigade_create(r->pool, this->ba);
}
Beispiel #7
0
static void test_insertfile(abts_case *tc, void *ctx)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb;
    const apr_off_t bignum = (APR_INT64_C(2) << 32) + 424242;
    apr_off_t count;
    apr_file_t *f;
    apr_bucket *e;

    ABTS_ASSERT(tc, "open test file",
                apr_file_open(&f, TIF_FNAME,
                              APR_FOPEN_WRITE | APR_FOPEN_TRUNCATE
                            | APR_FOPEN_CREATE | APR_FOPEN_SPARSE,
                              APR_OS_DEFAULT, p) == APR_SUCCESS);

    if (apr_file_trunc(f, bignum)) {
        apr_file_close(f);
        apr_file_remove(TIF_FNAME, p);
        ABTS_NOT_IMPL(tc, "Skipped: could not create large file");
        return;
    }
    
    bb = apr_brigade_create(p, ba);

    e = apr_brigade_insert_file(bb, f, 0, bignum, p);
    
    ABTS_ASSERT(tc, "inserted file was not at end of brigade",
                e == APR_BRIGADE_LAST(bb));

    /* check that the total size of inserted buckets is equal to the
     * total size of the file. */
    count = 0;

    for (e = APR_BRIGADE_FIRST(bb);
         e != APR_BRIGADE_SENTINEL(bb);
         e = APR_BUCKET_NEXT(e)) {
        ABTS_ASSERT(tc, "bucket size sane", e->length != (apr_size_t)-1);
        count += e->length;
    }

    ABTS_ASSERT(tc, "total size of buckets incorrect", count == bignum);

    apr_brigade_destroy(bb);

    /* Truncate the file to zero size before close() so that we don't
     * actually write out the large file if we are on a non-sparse file
     * system - like Mac OS X's HFS.  Otherwise, pity the poor user who
     * has to wait for the 8GB file to be written to disk.
     */
    apr_file_trunc(f, 0);

    apr_file_close(f);
    apr_bucket_alloc_destroy(ba);
    apr_file_remove(TIF_FNAME, p);
}
Beispiel #8
0
static void test_flatten(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb;

    bb = make_simple_brigade(ba, "hello, ", "world");

    flatten_match(tc, "flatten brigade", bb, "hello, world");

    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);    
}
Beispiel #9
0
static void test_create(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba;
    apr_bucket_brigade *bb;

    ba = apr_bucket_alloc_create(p);
    bb = apr_brigade_create(p, ba);

    ABTS_ASSERT(tc, "new brigade not NULL", bb != NULL);
    ABTS_ASSERT(tc, "new brigade is empty", APR_BRIGADE_EMPTY(bb));

    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #10
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->refs = 1;
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        
        m->q = h2_tq_create(m->pool, h2_config_geti(conf, H2_CONF_MAX_STREAMS));
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->workers = workers;
        
        m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
    }
    return m;
}
Beispiel #11
0
static void fix_event_conn(conn_rec *c, conn_rec *master) 
{
    event_conn_state_t *master_cs = ap_get_module_config(master->conn_config, 
                                                         h2_conn_mpm_module());
    event_conn_state_t *cs = apr_pcalloc(c->pool, sizeof(event_conn_state_t));
    cs->bucket_alloc = apr_bucket_alloc_create(c->pool);
    
    ap_set_module_config(c->conn_config, h2_conn_mpm_module(), cs);
    
    cs->c = c;
    cs->r = NULL;
    cs->p = master_cs->p;
    cs->pfd = master_cs->pfd;
    cs->pub = master_cs->pub;
    cs->pub.state = CONN_STATE_READ_REQUEST_LINE;
    
    c->cs = &(cs->pub);
}
Beispiel #12
0
int output_init_bb (apr_pool_t* pool, output_t** output_ptr){

	output_t* output = *output_ptr = apr_pcalloc(pool, sizeof(output_t));

	//Prepare Output
	output->pool = pool;
	output->headers = apr_table_make(pool, 10);
	output->content_type = apr_pcalloc(pool, sizeof(char) * 255);
	output->bucket_allocator = apr_bucket_alloc_create(pool);

	output->error_messages = apr_pcalloc(pool, sizeof(error_messages_t));
	output->error_messages->num_errors = 0;

	output->bucket_brigade = apr_brigade_create(pool, output->bucket_allocator);
	output->length = 0;
	
	return 0;
}
Beispiel #13
0
void WapacheProtocol::ProcessConnection(void) {
	// this function runs inside the worker thread
	apr_socket_t *sock;
	long thread_num = 1;
    ap_sb_handle_t *sbh;
	apr_bucket_alloc_t *ba;
	bool redirected;

	// create a fake socket
	sock = (apr_socket_t *) apr_pcalloc(Pool, sizeof(apr_socket_t));

    sock->pool = Pool;
    sock->local_port_unknown = 1;
    sock->local_interface_unknown = 1;
    sock->remote_addr_unknown = 1;
    sock->userdata = (sock_userdata_t *) this;

    ba = apr_bucket_alloc_create(Pool);

    ap_create_sb_handle(&sbh, Pool, 0, thread_num);
	Connection = ap_run_create_connection(Pool, Application.ServerConf, sock, thread_num, sbh, ba);

	if(Connection) {
		do {
			redirected = Redirecting;
			Redirecting = false;
			if(!Terminated) {
				ap_process_connection(Connection, sock);
			}
		} while(Redirecting);
	}
	else {
		// can't create the connection object for some reason
		HttpStatusCode = 500;
	}

	if(!HttpStatusCode || HttpStatusCode >= 400 || HttpStatusCode == 204) {
		ReportResult(E_ABORT, ERROR_CANCELLED);
	}
	else {
		ReportResult(S_OK, 0);
	} 
}
Beispiel #14
0
static void test_splitline(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bin, *bout;

    bin = make_simple_brigade(ba, "blah blah blah-",
                              "end of line.\nfoo foo foo");
    bout = apr_brigade_create(p, ba);

    apr_assert_success(tc, "split line",
                       apr_brigade_split_line(bout, bin,
                                              APR_BLOCK_READ, 100));

    flatten_match(tc, "split line", bout, "blah blah blah-end of line.\n");
    flatten_match(tc, "remainder", bin, "foo foo foo");

    apr_brigade_destroy(bout);
    apr_brigade_destroy(bin);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #15
0
static void test_simple(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba;
    apr_bucket_brigade *bb;
    apr_bucket *fb, *tb;
    
    ba = apr_bucket_alloc_create(p);
    bb = apr_brigade_create(p, ba);
    
    fb = APR_BRIGADE_FIRST(bb);
    ABTS_ASSERT(tc, "first bucket of empty brigade is sentinel",
                fb == APR_BRIGADE_SENTINEL(bb));

    fb = apr_bucket_flush_create(ba);
    APR_BRIGADE_INSERT_HEAD(bb, fb);

    ABTS_ASSERT(tc, "first bucket of brigade is flush",
                APR_BRIGADE_FIRST(bb) == fb);

    ABTS_ASSERT(tc, "bucket after flush is sentinel",
                APR_BUCKET_NEXT(fb) == APR_BRIGADE_SENTINEL(bb));

    tb = apr_bucket_transient_create("aaa", 3, ba);
    APR_BUCKET_INSERT_BEFORE(fb, tb);

    ABTS_ASSERT(tc, "bucket before flush now transient",
                APR_BUCKET_PREV(fb) == tb);
    ABTS_ASSERT(tc, "bucket after transient is flush",
                APR_BUCKET_NEXT(tb) == fb);
    ABTS_ASSERT(tc, "bucket before transient is sentinel",
                APR_BUCKET_PREV(tb) == APR_BRIGADE_SENTINEL(bb));

    apr_brigade_cleanup(bb);

    ABTS_ASSERT(tc, "cleaned up brigade was empty", APR_BRIGADE_EMPTY(bb));

    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #16
0
static void test_bwrite(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb = apr_brigade_create(p, ba);
    apr_off_t length;
    int n;

    for (n = 0; n < COUNT; n++) {
        apr_assert_success(tc, "brigade_write", 
                           apr_brigade_write(bb, NULL, NULL,
                                             THESTR, sizeof THESTR));
    }

    apr_assert_success(tc, "determine brigade length",
                       apr_brigade_length(bb, 1, &length));

    ABTS_ASSERT(tc, "brigade has correct length",
                length == (COUNT * sizeof THESTR));
    
    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #17
0
static void test_split(abts_case *tc, void *data)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb, *bb2;
    apr_bucket *e;

    bb = make_simple_brigade(ba, "hello, ", "world");

    /* split at the "world" bucket */
    e = APR_BRIGADE_LAST(bb);
    bb2 = apr_brigade_split(bb, e);

    ABTS_ASSERT(tc, "split brigade contains one bucket",
                count_buckets(bb2) == 1);
    ABTS_ASSERT(tc, "original brigade contains one bucket",
                count_buckets(bb) == 1);

    flatten_match(tc, "match original brigade", bb, "hello, ");
    flatten_match(tc, "match split brigade", bb2, "world");

    apr_brigade_destroy(bb2);
    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
Beispiel #18
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf, 
                        apr_interval_time_t stream_timeout,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_pool_tag(m->pool, "h2_mplx");
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        status = apr_thread_cond_create(&m->task_thawed, m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
    
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->q = h2_iq_create(m->pool, m->max_streams);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_timeout = stream_timeout;
        m->workers = workers;
        m->workers_max = workers->max_workers;
        m->workers_def_limit = 4;
        m->workers_limit = m->workers_def_limit;
        m->last_limit_change = m->last_idle_block = apr_time_now();
        m->limit_change_interval = apr_time_from_msec(200);
        
        m->tx_handles_reserved = 0;
        m->tx_chunk_size = 4;
        
        m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
        
        m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, 
                                         m->stream_max_mem);
        h2_ngn_shed_set_ctx(m->ngn_shed , m);
    }
    return m;
}
Beispiel #19
0
conn_rec *h2_slave_create(conn_rec *master, apr_pool_t *p, 
                          apr_thread_t *thread, apr_socket_t *socket)
{
    conn_rec *c;
    
    AP_DEBUG_ASSERT(master);
    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
                  "h2_conn(%ld): created from master", master->id);
    
    /* This is like the slave connection creation from 2.5-DEV. A
     * very efficient way - not sure how compatible this is, since
     * the core hooks are no longer run.
     * But maybe it's is better this way, not sure yet.
     */
    c = (conn_rec *) apr_palloc(p, sizeof(conn_rec));
    if (c == NULL) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, 
                      APLOGNO(02913) "h2_task: creating conn");
        return NULL;
    }
    
    memcpy(c, master, sizeof(conn_rec));
           
    /* Replace these */
    c->id                     = (master->id & (long)p);
    c->master                 = master;
    c->pool                   = p;        
    c->current_thread         = thread;
    c->conn_config            = ap_create_conn_config(p);
    c->notes                  = apr_table_make(p, 5);
    c->input_filters          = NULL;
    c->output_filters         = NULL;
    c->bucket_alloc           = apr_bucket_alloc_create(p);
    c->cs                     = NULL;
    c->data_in_input_filters  = 0;
    c->data_in_output_filters = 0;
    c->clogging_input_filters = 1;
    c->log                    = NULL;
    c->log_id                 = NULL;
    
    /* TODO: these should be unique to this thread */
    c->sbh                    = master->sbh;
    
    /* Simulate that we had already a request on this connection. */
    c->keepalives             = 1;
    
    ap_set_module_config(c->conn_config, &core_module, socket);
    
    /* This works for mpm_worker so far. Other mpm modules have 
     * different needs, unfortunately. The most interesting one 
     * being mpm_event...
     */
    switch (h2_conn_mpm_type()) {
        case H2_MPM_WORKER:
            /* all fine */
            break;
        case H2_MPM_EVENT: 
            fix_event_conn(c, master);
            break;
        default:
            /* fingers crossed */
            break;
    }
    
    return c;
}
Beispiel #20
0
PCOMP_CONTEXT mpm_get_completion_context(void)
{
    apr_status_t rv;
    PCOMP_CONTEXT context = NULL;

    while (1) {
        /* Grab a context off the queue */
        apr_thread_mutex_lock(qlock);
        if (qhead) {
            context = qhead;
            qhead = qhead->next;
            if (!qhead)
                qtail = NULL;
        } else {
            ResetEvent(qwait_event);
        }
        apr_thread_mutex_unlock(qlock);

        if (!context) {
            /* We failed to grab a context off the queue, consider allocating
             * a new one out of the child pool. There may be up to
             * (ap_threads_per_child + num_listeners) contexts in the system
             * at once.
             */
            if (num_completion_contexts >= max_num_completion_contexts) {
                /* All workers are busy, need to wait for one */
                static int reported = 0;
                if (!reported) {
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf,
                                 "Server ran out of threads to serve requests. Consider "
                                 "raising the ThreadsPerChild setting");
                    reported = 1;
                }

                /* Wait for a worker to free a context. Once per second, give
                 * the caller a chance to check for shutdown. If the wait
                 * succeeds, get the context off the queue. It must be available,
                 * since there's only one consumer.
                 */
                rv = WaitForSingleObject(qwait_event, 1000);
                if (rv == WAIT_OBJECT_0)
                    continue;
                else /* Hopefully, WAIT_TIMEOUT */
                    return NULL;
            } else {
                /* Allocate another context.
                 * Note:
                 * Multiple failures in the next two steps will cause the pchild pool
                 * to 'leak' storage. I don't think this is worth fixing...
                 */
                apr_allocator_t *allocator;

                apr_thread_mutex_lock(child_lock);
                context = (PCOMP_CONTEXT) apr_pcalloc(pchild, sizeof(COMP_CONTEXT));

                context->Overlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
                if (context->Overlapped.hEvent == NULL) {
                    /* Hopefully this is a temporary condition ... */
                    ap_log_error(APLOG_MARK,APLOG_WARNING, apr_get_os_error(), ap_server_conf,
                                 "mpm_get_completion_context: CreateEvent failed.");

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }

                /* Create the tranaction pool */
                apr_allocator_create(&allocator);
                apr_allocator_max_free_set(allocator, ap_max_mem_free);
                rv = apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator);
                if (rv != APR_SUCCESS) {
                    ap_log_error(APLOG_MARK,APLOG_WARNING, rv, ap_server_conf,
                                 "mpm_get_completion_context: Failed to create the transaction pool.");
                    CloseHandle(context->Overlapped.hEvent);

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }
                apr_allocator_owner_set(allocator, context->ptrans);
                apr_pool_tag(context->ptrans, "transaction");

                context->accept_socket = INVALID_SOCKET;
                context->ba = apr_bucket_alloc_create(context->ptrans);
                apr_atomic_inc32(&num_completion_contexts);

                apr_thread_mutex_unlock(child_lock);
                break;
            }
        } else {
            /* Got a context from the queue */
            break;
        }
    }

    return context;
}
Beispiel #21
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_status_t status;
    int i;
    ap_listen_rec *lr;
    apr_pollset_t *pollset;
    ap_sb_handle_t *sbh;
    apr_bucket_alloc_t *bucket_alloc;
    int last_poll_idx = 0;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't initialize cross-process lock in child "
                     "(%s) (%d)", ap_lock_fname, ap_accept_lock_mech);
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    status = apr_pollset_create(&pollset, num_listensocks, pchild, 0);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't create pollset in child; check system or user limits");
        clean_child_exit(APEXIT_CHILDSICK); /* assume temporary resource issue */
    }

    for (lr = ap_listeners, i = num_listensocks; i--; lr = lr->next) {
        apr_pollfd_t pfd = { 0 };

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, &pfd);
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    /* die_now is set when AP_SIG_GRACEFUL is received in the child;
     * shutdown_pending is set when SIGTERM is received when running
     * in single process mode.  */
    while (!die_now && !shutdown_pending) {
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
            for (;;) {
                apr_int32_t numdesc;
                const apr_pollfd_t *pdesc;

                /* check for termination first so we don't sleep for a while in
                 * poll if already signalled
                 */
                if (one_process && shutdown_pending) {
                    SAFE_ACCEPT(accept_mutex_off());
                    return;
                }
                else if (die_now) {
                    /* In graceful stop/restart; drop the mutex
                     * and terminate the child. */
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(0);
                }
                /* timeout == 10 seconds to avoid a hang at graceful restart/stop
                 * caused by the closing of sockets by the signal handler
                 */
                status = apr_pollset_poll(pollset, apr_time_from_sec(10), 
                                          &numdesc, &pdesc);
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_TIMEUP(status) ||
                        APR_STATUS_IS_EINTR(status)) {
                        continue;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    ap_log_error(APLOG_MARK, APLOG_ERR, status,
                                 ap_server_conf, "apr_pollset_poll: (listen)");
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = pdesc[last_poll_idx++].client_data;
                goto got_fd;
            }
        }
    got_fd:
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Beispiel #22
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    conn_rec *current_conn;
    apr_status_t status = APR_EINIT;
    int i;
    ap_listen_rec *lr;
    int curr_pollfd, last_pollfd = 0;
    apr_pollfd_t *pollset;
    int offset;
    void *csd;
    ap_sb_handle_t *sbh;
    apr_status_t rv;
    apr_bucket_alloc_t *bucket_alloc;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */
    
    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    csd = NULL;
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
                     "Couldn't initialize cross-process lock in child");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
	clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    listensocks = apr_pcalloc(pchild,
                            sizeof(*listensocks) * (num_listensocks));
    for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) {
        listensocks[i].accept_func = lr->accept_func;
        listensocks[i].sd = lr->sd;
    }

    pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks);
    pollset[0].p = pchild;
    for (i = 0; i < num_listensocks; i++) {
        pollset[i].desc.s = listensocks[i].sd;
        pollset[i].desc_type = APR_POLL_SOCKET;
        pollset[i].reqevents = APR_POLLIN;
    }

    mpm_state = AP_MPMQ_RUNNING;
    
    bucket_alloc = apr_bucket_alloc_create(pchild);

    while (!die_now) {
	/*
	 * (Re)initialize this child to a pre-connection state.
	 */

	current_conn = NULL;

	apr_pool_clear(ptrans);

	if ((ap_max_requests_per_child > 0
	     && requests_this_child++ >= ap_max_requests_per_child)) {
	    clean_child_exit(0);
	}

	(void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

	/*
	 * Wait for an acceptable connection to arrive.
	 */

	/* Lock around "accept", if necessary */
	SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            offset = 0;
        }
        else {
            /* multiple listening sockets - need to poll */
	    for (;;) {
                apr_status_t ret;
                apr_int32_t n;

                ret = apr_poll(pollset, num_listensocks, &n, -1);
                if (ret != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(ret)) {
                        continue;
                    }
    	            /* Single Unix documents select as returning errnos
    	             * EBADF, EINTR, and EINVAL... and in none of those
    	             * cases does it make sense to continue.  In fact
    	             * on Linux 2.0.x we seem to end up with EFAULT
    	             * occasionally, and we'd loop forever due to it.
    	             */
    	            ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf,
                             "apr_poll: (listen)");
    	            clean_child_exit(1);
                }
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;
                    if (curr_pollfd >= num_listensocks) {
                        curr_pollfd = 0;
                    }
                    /* XXX: Should we check for POLLERR? */
                    if (pollset[curr_pollfd].rtnevents & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        offset = curr_pollfd;
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);

                continue;
            }
        }
    got_fd:
	/* if we accept() something we don't want to die, so we have to
	 * defer the exit
	 */
        status = listensocks[offset].accept_func(&csd, 
                                                 &listensocks[offset], ptrans);
        SAFE_ACCEPT(accept_mutex_off());	/* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

	/*
	 * We now have a connection, so set it up with the appropriate
	 * socket options, file descriptors, and read/write buffers.
	 */

	current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        
        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Beispiel #23
0
static void test_splits(abts_case *tc, void *ctx)
{
    apr_bucket_alloc_t *ba = apr_bucket_alloc_create(p);
    apr_bucket_brigade *bb;
    apr_bucket *e;
    char *str = "alphabeta";
    int n;

    bb = apr_brigade_create(p, ba);

    APR_BRIGADE_INSERT_TAIL(bb,
                            apr_bucket_immortal_create(str, 9, ba));
    APR_BRIGADE_INSERT_TAIL(bb, 
                            apr_bucket_transient_create(str, 9, ba));
    APR_BRIGADE_INSERT_TAIL(bb, 
                            apr_bucket_heap_create(strdup(str), 9, free, ba));
    APR_BRIGADE_INSERT_TAIL(bb, 
                            apr_bucket_pool_create(apr_pstrdup(p, str), 9, p, 
                                                   ba));

    ABTS_ASSERT(tc, "four buckets inserted", count_buckets(bb) == 4);
    
    /* now split each of the buckets after byte 5 */
    for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) {
        ABTS_ASSERT(tc, "reached end of brigade", 
                    e != APR_BRIGADE_SENTINEL(bb));
        ABTS_ASSERT(tc, "split bucket OK",
                    apr_bucket_split(e, 5) == APR_SUCCESS);
        e = APR_BUCKET_NEXT(e);
        ABTS_ASSERT(tc, "split OK", e != APR_BRIGADE_SENTINEL(bb));
        e = APR_BUCKET_NEXT(e);
    }
    
    ABTS_ASSERT(tc, "four buckets split into eight", 
                count_buckets(bb) == 8);

    for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) {
        const char *data;
        apr_size_t len;
        
        apr_assert_success(tc, "read alpha from bucket",
                           apr_bucket_read(e, &data, &len, APR_BLOCK_READ));
        ABTS_ASSERT(tc, "read 5 bytes", len == 5);
        ABTS_STR_NEQUAL(tc, "alpha", data, 5);

        e = APR_BUCKET_NEXT(e);

        apr_assert_success(tc, "read beta from bucket",
                           apr_bucket_read(e, &data, &len, APR_BLOCK_READ));
        ABTS_ASSERT(tc, "read 4 bytes", len == 4);
        ABTS_STR_NEQUAL(tc, "beta", data, 5);

        e = APR_BUCKET_NEXT(e);
    }

    /* now delete the "alpha" buckets */
    for (n = 0, e = APR_BRIGADE_FIRST(bb); n < 4; n++) {
        apr_bucket *f;

        ABTS_ASSERT(tc, "reached end of brigade",
                    e != APR_BRIGADE_SENTINEL(bb));
        f = APR_BUCKET_NEXT(e);
        apr_bucket_delete(e);
        e = APR_BUCKET_NEXT(f);
    }    
    
    ABTS_ASSERT(tc, "eight buckets reduced to four", 
                count_buckets(bb) == 4);

    flatten_match(tc, "flatten beta brigade", bb,
                  "beta" "beta" "beta" "beta");

    apr_brigade_destroy(bb);
    apr_bucket_alloc_destroy(ba);
}
/**
 * The output filter routine. This one gets called whenever a response is
 * generated that passes this filter. Returns APR_SUCCESS if everything works
 * out.
 *
 * @param f     The filter definition.
 * @param bb    The bucket brigade containing the data.
 */
static apr_status_t replace_output_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    request_rec *r = f->r;
    conn_rec *c = r->connection;
    replace_ctx_t *ctx = f->ctx;
    apr_bucket *b;
    apr_size_t len;
    const char *data;
    const char *header;
    apr_status_t rv;
    int re_vector[RE_VECTOR_SIZE];  // 3 elements per matched pattern
    replace_pattern_t *next;
    header_replace_pattern_t *next_header;
    int modified = 0;               // flag to determine if a replacement has
                                    // occured.

    if (!ctx) {
        /* Initialize context */
        ctx = apr_pcalloc(f->r->pool, sizeof(replace_ctx_t));
        f->ctx = ctx;
        ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc);
    }

    /* parse config settings */
    
    /* look for the user-defined filter */
    ctx->filter = find_filter_def(f->r->server, f->frec->name);
    if (!ctx->filter) {
        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
                      "couldn't find definition of filter '%s'",
                      f->frec->name);
        return APR_EINVAL;
    }
    ctx->p = f->r->pool;
    if (ctx->filter->intype &&
        ctx->filter->intype != INTYPE_ALL) {
        if (!f->r->content_type) {
            ctx->noop = 1;
        }
        else {
            const char *ctypes = f->r->content_type;
            const char *ctype = ap_getword(f->r->pool, &ctypes, ';');

            if (strcasecmp(ctx->filter->intype, ctype)) {
                /* wrong IMT for us; don't mess with the output */
                ctx->noop = 1;
            }
        }
    }

    /* exit immediately if there are indications that the filter shouldn't be
     * executed.
     */
    if (ctx->noop == 1) {
        ap_pass_brigade(f->next, bb);
        return APR_SUCCESS;
    }

    /**
     * Loop through the configured header patterns.
     */
    for (next_header = ctx->filter->header_pattern;
         next_header != NULL;
         next_header = next_header->next) {

        // create a separate table with the requested HTTP header entries and
        // unset those headers in the original request.
        apr_table_t *header_table;
        header_table = apr_table_make(r->pool, 2);
    	// create a data structure for the callback function
    	header_replace_cb_t *hrcb;
    	hrcb = apr_palloc(r->pool, sizeof(header_replace_cb_t));
    	hrcb->header_table = header_table;
	    hrcb->pattern = next_header->pattern;
    	hrcb->extra = next_header->extra;
	    hrcb->replacement = next_header->replacement;
    	hrcb->r = r;
	    // pass any header that is defined to be processed to the callback 
    	// function and unset those headers in the original outgoing record.
        apr_table_do(replace_header_cb, hrcb, r->headers_out, 
                     next_header->header, NULL);
        // only touch the header if the changed header table is not empty.
        if (!apr_is_empty_table(header_table)) {
            apr_table_unset(r->headers_out, next_header->header);
            // overlay the original header table with the new one to reintegrate
            // the changed headers.
            r->headers_out = apr_table_overlay(r->pool, r->headers_out, 
                                               header_table);
        }
    }

    /* Not nice but neccessary: Unset the ETag , because we cannot adjust the 
     * value correctly, because we do not know how.
     */
    apr_table_unset(f->r->headers_out, "ETag"); 

    int eos = 0;        // flag to check if an EOS bucket is in the brigade.
    apr_bucket *eos_bucket;
                        // Backup for the EOS bucket.

    /* Interate through the available data. Stop if there is an EOS */

   for (b = APR_BRIGADE_FIRST(bb); b != APR_BRIGADE_SENTINEL(bb); b = APR_BUCKET_NEXT(b)) {
        if (APR_BUCKET_IS_EOS(b)) {
            eos = 1;
            ap_save_brigade(f, &ctx->bb, &bb, ctx->p);
            APR_BUCKET_REMOVE(b);
            eos_bucket = b;
            break;
        }
    }


    /* If the iteration over the brigade hasn't found an EOS bucket, just save
     * the brigade and return.
     */
    if (eos != 1) {
        ap_save_brigade(f, &ctx->bb, &bb, ctx->p);
        return APR_SUCCESS;
    }

    if ((rv = apr_brigade_pflatten(ctx->bb, (char **)&data, &len, ctx->p)) 
        != APR_SUCCESS) { 
        /* Return if the flattening didn't work. */
        return rv;
    } else {
        /* Remove the original data from the bucket brigade. Otherwise it would
         * be passed twice (original data and the processed, flattened copy) to
         * the next filter.
         */
        apr_brigade_cleanup(ctx->bb);
    }

    /* Good cast, we just tested len isn't negative or zero */
    if (len > 0) {

        /* start checking for the regex's. */
        for (next = ctx->filter->pattern; 
             next != NULL; 
             next = next->next)
        {
            int rc = 0;
            int offset = 0;

            /* loop through the configured patterns */
            do {
                rc = pcre_exec(next->pattern, next->extra, data, 
                               len, offset, 0,
                               re_vector, RE_VECTOR_SIZE);
                               
                if (rc < 0 && rc != PCRE_ERROR_NOMATCH) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r, 
                                  "Matching Error %d", rc);
                    return rc;
                }

                /* This shouldn´t happen */
                if (rc == 0) {
                    ap_log_rerror(APLOG_MARK, APLOG_ERR, rc, r,
                                  "PCRE output vector too small (%d)", 
                                  RE_VECTOR_SIZE/3-1);
                }

                /* If the result count is greater than 0 then there are
                 * matches in the data string. Thus we try to replace those
                 * strings with the user provided string.
                 */
                if (rc > 0) {
                    char *prefix;   // the string before the matching part.
                    char *postfix;  // the string after the matching part.
                    char *newdata;  // the concatenated string of prefix,
                                    // the replaced string and postfix.
                    char *replacement;
                                    // the string with the data to replace
                                    // (after the subpattern processing has
                                    // been done).
                    char *to_replace[10];
                                    // the string array containing the
                                    // strings that are to be replaced.
                    int match_diff; // the difference between the matching
                                    // string and its replacement.
                    int x;          // a simple counter.
                    char *pos;      // the starting position within the
                                    // replacement string, where there is a
                                    // subpattern to replace.

                    /* start with building the replacement string */
                    replacement = apr_pstrcat(ctx->p, next->replacement,
                                              NULL);

                    /* look for the subpatterns \0 to \9 */

                    for (x = 0; x < rc && x < 10; x++) {
                        /* extract the x'ths subpattern */
                        to_replace[x] = substr(data, re_vector[x*2],
                                               re_vector[x*2+1] -
                                               re_vector[x*2], r); 

                        ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
                                      "Found match: %s", to_replace[x]);
                        
                        /* the token ( \0 to \9) we are looking for */
                        char *token = apr_pstrcat(ctx->p, "\\",
                                                apr_itoa(ctx->p, x), NULL);
                        /* allocate memory for the replacement operation */
                        char *tmp;
                        if (!to_replace[x] || strlen(to_replace[x]) < 2) {
                            tmp = malloc(strlen(replacement) + 1);
                        } else {
                            tmp = malloc(strlen(replacement) - 1 +
                                         strlen(to_replace[x]));
                        }
                        /* copy the replacement string to the new
                         * location.
                         */
                        memcpy(tmp, replacement, strlen(replacement) + 1);
                        replacement = tmp;
                        /* try to replace each occurence of the token with
                         * its matched subpattern. */
                        pos = ap_strstr(replacement, token);
                        while (pos) { 
                            if (!to_replace[x]) {
                                break;
                            }
                            substr_replace(pos, to_replace[x],
                                           strlen(pos), 
                                           strlen(to_replace[x]));
                            if (strlen(to_replace[x]) < 2) {
                                tmp = malloc(strlen(replacement) + 1);
                            } else {
                                tmp = malloc(strlen(replacement) - 1 + 
                                             strlen(to_replace[x]));
                            }
                            memcpy(tmp, replacement, 
                                   strlen(replacement) + 1);
                            /* clean up. */
                            free(replacement);
                            replacement = tmp; 
                            pos = ap_strstr(replacement, token);
                        }
                    }

                    match_diff = strlen(replacement) -
                                 (re_vector[1] - re_vector[0]);

                    /* Allocate memory for a buffer to copy the first part
                     * of the data string up to (but not including) the
                     * the matching pattern.
                     */
                    prefix = apr_pcalloc(ctx->p, re_vector[0] + 1);
                    if (prefix == NULL) {
                        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
                            "Unable to allocate memory for prefix",
                            NULL);
                        return -1;
                    }

                    /* Copy the string from the offset (beginning of
                     * pattern matching) to the first occurence of the
                     * pattern and add a trailing \0.
                     */
                    memcpy(prefix, data, (size_t)re_vector[0]); 

                    /* Copy the string from the end of the pattern to the
                     * end of the data string itself.
                     */
                    postfix = apr_pcalloc(ctx->p, len);
                    if (postfix == NULL) {
                        ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
                            "Unable to allocate memory for postfix",
                            NULL);
                        return -1;
                    }
                    
                    memcpy(postfix, 
                           (data + re_vector[1]),
                           len - re_vector[1]);
                    
                    /* Create the new data string, replace the old one
                     * and clean up.
                     */
                    newdata = apr_pstrcat(ctx->p, prefix, 
                                          replacement, postfix, 
                                          NULL);
                    /* update the point of the data and free the allocated
                     * memory for the replacement string.
                     */
                    data = newdata;
                    free(replacement);

                    /* Calculate the new offset in the data string, where
                     * the new matching round is to begin.
                     */
                    offset = re_vector[1] + match_diff; 
                    len += match_diff;
                    modified = 1;
                }
            } while (rc > 0);
        }
        /* Adjust the real length of the processed data. */
        if (apr_table_get(f->r->headers_out, "Content-Length") != NULL) {
            apr_table_set(f->r->headers_out, "Content-Length",
                apr_itoa(ctx->p, len));
        }
        /* If an Entity Tag is set, change the mtime and generate a new ETag.*/
        if (apr_table_get(f->r->headers_out, "ETag") != NULL) {
           r->mtime = time(NULL);
           ap_set_etag(r);
        }
    }
    /* Create a new bucket with the processed data, insert that one into our
     * brigade, then insert the saved EOS bucket at the end of the brigade
     * and pass the brigade to the next filter.
     */
    APR_BRIGADE_INSERT_TAIL(ctx->bb, apr_bucket_transient_create(data, len, apr_bucket_alloc_create(ctx->p)));
    APR_BRIGADE_INSERT_TAIL(ctx->bb, eos_bucket);
    ap_pass_brigade(f->next, ctx->bb);

    return APR_SUCCESS;
}
Beispiel #25
0
/* This is the thread that actually does all the work. */
static int32 worker_thread(void *dummy)
{
    int worker_slot = (int)dummy;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    apr_status_t rv = APR_EINIT;
    int last_poll_idx = 0;
    sigset_t sig_mask;
    int requests_this_child = 0;
    apr_pollset_t *pollset = NULL;
    ap_listen_rec *lr = NULL;
    ap_sb_handle_t *sbh = NULL;
    int i;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0;
    /* We have 2 pools that we create/use throughout the lifetime of this
     * worker. The first and longest lived is the pworker pool. From
     * this we create the ptrans pool, the lifetime of which is the same
     * as each connection and is reset prior to each attempt to
     * process a connection.
     */
    apr_pool_t *ptrans = NULL;
    apr_pool_t *pworker = NULL;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */

    on_exit_thread(check_restart, (void*)worker_slot);

    /* block the signals for this thread only if we're not running as a
     * single process.
     */
    if (!one_process) {
        sigfillset(&sig_mask);
        sigprocmask(SIG_BLOCK, &sig_mask, NULL);
    }

    /* Each worker thread is fully in control of it's destinay and so
     * to allow each thread to handle the lifetime of it's own resources
     * we create and use a subcontext for every thread.
     * The subcontext is a child of the pconf pool.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pworker, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pworker);

    apr_pool_create(&ptrans, pworker);
    apr_pool_tag(ptrans, "transaction");

    ap_create_sb_handle(&sbh, pworker, 0, worker_slot);
    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* We add an extra socket here as we add the udp_sock we use for signalling
     * death. This gets added after the others.
     */
    apr_pollset_create(&pollset, num_listening_sockets + 1, pworker, 0);

    for (lr = ap_listeners, i = num_listening_sockets; i--; lr = lr->next) {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        apr_pollset_add(pollset, &pfd);
    }
    {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = udp_sock;
        pfd.reqevents = APR_POLLIN;

        apr_pollset_add(pollset, &pfd);
    }

    bucket_alloc = apr_bucket_alloc_create(pworker);

    mpm_state = AP_MPMQ_RUNNING;

        while (!this_worker_should_exit) {
        conn_rec *current_conn;
        void *csd;

        /* (Re)initialize this child to a pre-connection state. */
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_thread > 0
             && requests_this_child++ >= ap_max_requests_per_thread))
            clean_child_exit(0, worker_slot);

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        apr_thread_mutex_lock(accept_mutex);

        /* We always (presently) have at least 2 sockets we listen on, so
         * we don't have the ability for a fast path for a single socket
         * as some MPM's allow :(
         */
        for (;;) {
            apr_int32_t numdesc = 0;
            const apr_pollfd_t *pdesc = NULL;

            rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
            if (rv != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(rv)) {
                    if (one_process && shutdown_pending)
                        return;
                    continue;
                }
                ap_log_error(APLOG_MARK, APLOG_ERR, rv,
                             ap_server_conf, "apr_pollset_poll: (listen)");
                clean_child_exit(1, worker_slot);
            }
            /* We can always use pdesc[0], but sockets at position N
             * could end up completely starved of attention in a very
             * busy server. Therefore, we round-robin across the
             * returned set of descriptors. While it is possible that
             * the returned set of descriptors might flip around and
             * continue to starve some sockets, we happen to know the
             * internal pollset implementation retains ordering
             * stability of the sockets. Thus, the round-robin should
             * ensure that a socket will eventually be serviced.
             */
            if (last_poll_idx >= numdesc)
                last_poll_idx = 0;

            /* Grab a listener record from the client_data of the poll
             * descriptor, and advance our saved index to round-robin
             * the next fetch.
             *
             * ### hmm... this descriptor might have POLLERR rather
             * ### than POLLIN
             */

            lr = pdesc[last_poll_idx++].client_data;

            /* The only socket we add without client_data is the first, the UDP socket
             * we listen on for restart signals. If we've therefore gotten a hit on that
             * listener lr will be NULL here and we know we've been told to die.
             * Before we jump to the end of the while loop with this_worker_should_exit
             * set to 1 (causing us to exit normally we hope) we release the accept_mutex
             * as we want every thread to go through this same routine :)
             * Bit of a hack, but compared to what I had before...
             */
            if (lr == NULL) {
                this_worker_should_exit = 1;
                apr_thread_mutex_unlock(accept_mutex);
                goto got_a_black_spot;
            }
            goto got_fd;
        }
got_fd:
        /* Run beos_accept to accept the connection and set things up to
         * allow us to process it. We always release the accept_lock here,
         * even if we failt o accept as otherwise we'll starve other workers
         * which would be bad.
         */
        rv = beos_accept(&csd, lr, ptrans);
        apr_thread_mutex_unlock(accept_mutex);

        if (rv == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1, worker_slot);
        } else if (rv != APR_SUCCESS)
            continue;

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, worker_slot, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            this_worker_should_exit = 1;
        }
got_a_black_spot:
    }

    apr_pool_destroy(ptrans);
    apr_pool_destroy(pworker);

    clean_child_exit(0, worker_slot);
}

static int make_worker(int slot)
{
    thread_id tid;

    if (slot + 1 > ap_max_child_assigned)
            ap_max_child_assigned = slot + 1;

    (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL);

    if (one_process) {
        set_signals();
        ap_scoreboard_image->parent[0].pid = getpid();
        ap_scoreboard_image->servers[0][slot].tid = find_thread(NULL);
        return 0;
    }

    tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY,
                       (void *)slot);
    if (tid < B_NO_ERROR) {
        ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL,
            "spawn_thread: Unable to start a new thread");
        /* In case system resources are maxed out, we don't want
         * Apache running away with the CPU trying to fork over and
         * over and over again.
         */
        (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD,
                                                   (request_rec*)NULL);

        sleep(10);
        return -1;
    }
    resume_thread(tid);

    ap_scoreboard_image->servers[0][slot].tid = tid;
    return 0;
}
//static void child_main(int child_num_arg)
void body()
{

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(allocator); //// removed deref
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(pchild, pconf, NULL, allocator); //// removed deref
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(ptrans, pchild); //// removed deref
    apr_pool_tag(ptrans, 65); // "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(accept_mutex, ap_lock_fname, pchild); //// removed deref
    if (status != APR_SUCCESS) {
        /* ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, */
        /*              "Couldnt initialize crossprocess lock in child " */
        /*              "%s %d", ap_lock_fname, ap_accept_lock_mech); */
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child() > 0) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(sbh, pchild, my_child_num, 0); //// removed deref

    ap_update_child_status(sbh, SERVER_READY, NULL);

    /* Set up the pollfd array */
    /* ### check the status */
    (void) apr_pollset_create(pollset, num_listensocks, pchild, 0); //// removed deref

    num_listensocks = nondet(); assume(num_listensocks>0);

    lr = ap_listeners;
    i = num_listensocks; 
    while (1) {
      if ( i<=0 ) break; 
        int pfd = 0;

        pfd_desc_type = APR_POLL_SOCKET;
        pfd_desc_s = 1; // lr->sd;
        pfd_reqevents = APR_POLLIN;
        pfd_client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, pfd); //// removed deref
	i--;
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    while(1>0) {
      if (die_now>0) break;
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());
	do_ACCEPT=1; do_ACCEPT=0;

	dummy = nondet();
	if(dummy > 0) {
          /* goto loc_return; */
          while(1>0) { int ddd; ddd=ddd; }
        }

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
	  while(1) {
	      int numdesc;
                const void *pdesc;

                /* timeout == -1 == wait forever */
                status = apr_pollset_poll(pollset, -1, numdesc, pdesc); //// removed deref
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(status) > 0) {
                        if (one_process>0 && shutdown_pending>0) {
			  /* goto loc_return; */
                          while(1>0) { int ddd; ddd=ddd; }
                        }
                        goto loc_continueA;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    /* ap_log_error5(APLOG_MARK, APLOG_ERR, status, */
                    /*              ap_server_conf, "apr_pollset_poll: (listen)"); */
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = 1; //pdesc[last_poll_idx++].client_data;
		break;

	    loc_continueA: {int yyy2; yyy2=yyy2; }
            }
        }
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = nondet(); // lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
	  goto loc_continueB;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn > 0) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
	dummy = nondet();
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation != dummy) {
	  //ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    loc_continueB: { int uuu; uuu=uuu; }
    }
    clean_child_exit(0);
 /* loc_return: */
    while(1>0) { int ddd; ddd=ddd; }
}
Beispiel #27
0
static apr_status_t readfile_heartbeats(const char *path, apr_hash_t *servers,
                                    apr_pool_t *pool)
{
    apr_finfo_t fi;
    apr_status_t rv;
    apr_file_t *fp;

    if (!path) {
        return APR_SUCCESS;
    }

    rv = apr_file_open(&fp, path, APR_READ|APR_BINARY|APR_BUFFERED,
                       APR_OS_DEFAULT, pool);

    if (rv) {
        return rv;
    }

    rv = apr_file_info_get(&fi, APR_FINFO_SIZE, fp);

    if (rv) {
        return rv;
    }

    {
        char *t;
        int lineno = 0;
        apr_bucket_alloc_t *ba = apr_bucket_alloc_create(pool);
        apr_bucket_brigade *bb = apr_brigade_create(pool, ba);
        apr_bucket_brigade *tmpbb = apr_brigade_create(pool, ba);
        apr_table_t *hbt = apr_table_make(pool, 10);

        apr_brigade_insert_file(bb, fp, 0, fi.size, pool);

        do {
            hb_server_t *server;
            char buf[4096];
            apr_size_t bsize = sizeof(buf);
            const char *ip;

            apr_brigade_cleanup(tmpbb);

            if (APR_BRIGADE_EMPTY(bb)) {
                break;
            }

            rv = apr_brigade_split_line(tmpbb, bb,
                                        APR_BLOCK_READ, sizeof(buf));
            lineno++;

            if (rv) {
                return rv;
            }

            apr_brigade_flatten(tmpbb, buf, &bsize);

            if (bsize == 0) {
                break;
            }

            buf[bsize - 1] = 0;

            /* comment */
            if (buf[0] == '#') {
                continue;
            }

            /* line format: <IP> <query_string>\n */
            t = strchr(buf, ' ');
            if (!t) {
                continue;
            }

            ip = apr_pstrmemdup(pool, buf, t - buf);
            t++;

            server = apr_hash_get(servers, ip, APR_HASH_KEY_STRING);

            if (server == NULL) {
                server = apr_pcalloc(pool, sizeof(hb_server_t));
                server->ip = ip;
                server->port = 80;
                server->seen = -1;

                apr_hash_set(servers, server->ip, APR_HASH_KEY_STRING, server);
            }

            apr_table_clear(hbt);

            argstr_to_table(pool, apr_pstrdup(pool, t), hbt);

            if (apr_table_get(hbt, "busy")) {
                server->busy = atoi(apr_table_get(hbt, "busy"));
            }

            if (apr_table_get(hbt, "ready")) {
                server->ready = atoi(apr_table_get(hbt, "ready"));
            }

            if (apr_table_get(hbt, "lastseen")) {
                server->seen = atoi(apr_table_get(hbt, "lastseen"));
            }

            if (apr_table_get(hbt, "port")) {
                server->port = atoi(apr_table_get(hbt, "port"));
            }

            if (server->busy == 0 && server->ready != 0) {
                /* Server has zero threads active, but lots of them ready,
                 * it likely just started up, so lets /4 the number ready,
                 * to prevent us from completely flooding it with all new
                 * requests.
                 */
                server->ready = server->ready / 4;
            }

        } while (1);
    }

    return APR_SUCCESS;
}
Beispiel #28
0
static winnt_conn_ctx_t *mpm_get_completion_context(int *timeout)
{
    apr_status_t rv;
    winnt_conn_ctx_t *context = NULL;

    *timeout = 0;
    while (1) {
        /* Grab a context off the queue */
        apr_thread_mutex_lock(qlock);
        if (qhead) {
            context = qhead;
            qhead = qhead->next;
            if (!qhead)
                qtail = NULL;
        } else {
            ResetEvent(qwait_event);
        }
        apr_thread_mutex_unlock(qlock);

        if (!context) {
            /* We failed to grab a context off the queue, consider allocating
             * a new one out of the child pool. There may be up to
             * (ap_threads_per_child + num_listeners) contexts in the system
             * at once.
             */
            if (num_completion_contexts >= max_num_completion_contexts) {
                /* All workers are busy, need to wait for one */
                static int reported = 0;
                if (!reported) {
                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00326)
                                 "Server ran out of threads to serve "
                                 "requests. Consider raising the "
                                 "ThreadsPerChild setting");
                    reported = 1;
                }

                /* Wait for a worker to free a context. Once per second, give
                 * the caller a chance to check for shutdown. If the wait
                 * succeeds, get the context off the queue. It must be
                 * available, since there's only one consumer.
                 */
                rv = WaitForSingleObject(qwait_event, 1000);
                if (rv == WAIT_OBJECT_0)
                    continue;
                else {
                    if (rv == WAIT_TIMEOUT) {
                        /* somewhat-normal condition where threads are busy */
                        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00327)
                                     "mpm_get_completion_context: Failed to get a "
                                     "free context within 1 second");
                        *timeout = 1;
                    }
                    else {
                        /* should be the unexpected, generic WAIT_FAILED */
                        ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(),
                                     ap_server_conf, APLOGNO(00328)
                                     "mpm_get_completion_context: "
                                     "WaitForSingleObject failed to get free context");
                    }
                    return NULL;
                }
            } else {
                /* Allocate another context.
                 * Note: Multiple failures in the next two steps will cause
                 * the pchild pool to 'leak' storage. I don't think this
                 * is worth fixing...
                 */
                apr_allocator_t *allocator;

                apr_thread_mutex_lock(child_lock);
                context = (winnt_conn_ctx_t *)apr_pcalloc(pchild,
                                                     sizeof(winnt_conn_ctx_t));


                context->overlapped.hEvent = CreateEvent(NULL, TRUE,
                                                         FALSE, NULL);
                if (context->overlapped.hEvent == NULL) {
                    /* Hopefully this is a temporary condition ... */
                    ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(),
                                 ap_server_conf, APLOGNO(00329)
                                 "mpm_get_completion_context: "
                                 "CreateEvent failed.");

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }

                /* Create the transaction pool */
                apr_allocator_create(&allocator);
                apr_allocator_max_free_set(allocator, ap_max_mem_free);
                rv = apr_pool_create_ex(&context->ptrans, pchild, NULL,
                                        allocator);
                if (rv != APR_SUCCESS) {
                    ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(00330)
                                 "mpm_get_completion_context: Failed "
                                 "to create the transaction pool.");
                    CloseHandle(context->overlapped.hEvent);

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }
                apr_allocator_owner_set(allocator, context->ptrans);
                apr_pool_tag(context->ptrans, "transaction");

                context->accept_socket = INVALID_SOCKET;
                context->ba = apr_bucket_alloc_create(context->ptrans);
                apr_atomic_inc32(&num_completion_contexts);

                apr_thread_mutex_unlock(child_lock);
                break;
            }
        } else {
            /* Got a context from the queue */
            break;
        }
    }

    return context;
}
Beispiel #29
0
conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
{
    apr_allocator_t *allocator;
    apr_status_t status;
    apr_pool_t *pool;
    conn_rec *c;
    void *cfg;
    module *mpm;
    
    ap_assert(master);
    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
                  "h2_stream(%ld-%d): create slave", master->id, slave_id);
    
    /* We create a pool with its own allocator to be used for
     * processing a request. This is the only way to have the processing
     * independant of its parent pool in the sense that it can work in
     * another thread. Also, the new allocator needs its own mutex to
     * synchronize sub-pools.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    status = apr_pool_create_ex(&pool, parent, NULL, allocator);
    if (status != APR_SUCCESS) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, 
                      APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
                      master->id, slave_id);
        return NULL;
    }
    apr_allocator_owner_set(allocator, pool);
    apr_pool_abort_set(abort_on_oom, pool);
    apr_pool_tag(pool, "h2_slave_conn");

    c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
    if (c == NULL) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, 
                      APLOGNO(02913) "h2_session(%ld-%d): create slave",
                      master->id, slave_id);
        apr_pool_destroy(pool);
        return NULL;
    }
    
    memcpy(c, master, sizeof(conn_rec));
        
    c->master                 = master;
    c->pool                   = pool;   
    c->conn_config            = ap_create_conn_config(pool);
    c->notes                  = apr_table_make(pool, 5);
    c->input_filters          = NULL;
    c->output_filters         = NULL;
    c->keepalives             = 0;
#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
    c->filter_conn_ctx        = NULL;
#endif
    c->bucket_alloc           = apr_bucket_alloc_create(pool);
#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
    c->data_in_input_filters  = 0;
    c->data_in_output_filters = 0;
#endif
    /* prevent mpm_event from making wrong assumptions about this connection,
     * like e.g. using its socket for an async read check. */
    c->clogging_input_filters = 1;
    c->log                    = NULL;
    c->log_id                 = apr_psprintf(pool, "%ld-%d", 
                                             master->id, slave_id);
    c->aborted                = 0;
    /* We cannot install the master connection socket on the slaves, as
     * modules mess with timeouts/blocking of the socket, with
     * unwanted side effects to the master connection processing.
     * Fortunately, since we never use the slave socket, we can just install
     * a single, process-wide dummy and everyone is happy.
     */
    ap_set_module_config(c->conn_config, &core_module, dummy_socket);
    /* TODO: these should be unique to this thread */
    c->sbh                    = master->sbh;
    /* TODO: not all mpm modules have learned about slave connections yet.
     * copy their config from master to slave.
     */
    if ((mpm = h2_conn_mpm_module()) != NULL) {
        cfg = ap_get_module_config(master->conn_config, mpm);
        ap_set_module_config(c->conn_config, mpm, cfg);
    }

    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, 
                  "h2_slave(%s): created", c->log_id);
    return c;
}