/* **ONLY** alloc things with this function that **WILL NOT** outlive
   the session itself or expect an earth shattering KABOOM!*/
SWITCH_DECLARE(void *) switch_core_perform_session_alloc(switch_core_session_t *session, switch_size_t memory, const char *file, const char *func,
														 int line)
{
	void *ptr = NULL;
	switch_assert(session != NULL);
	switch_assert(session->pool != NULL);

#ifdef LOCK_MORE
#ifdef USE_MEM_LOCK
	switch_mutex_lock(memory_manager.mem_lock);
#endif
#endif

#ifdef DEBUG_ALLOC
	if (memory > 500)
		switch_log_printf(SWITCH_CHANNEL_ID_LOG, file, func, line, NULL, SWITCH_LOG_CONSOLE, "%p %p Session Allocate %s %d\n", 
						  (void *) session->pool, (void *) session, apr_pool_tag(session->pool, NULL), (int) memory);
#endif

	ptr = apr_palloc(session->pool, memory);
	switch_assert(ptr != NULL);

	memset(ptr, 0, memory);

#ifdef LOCK_MORE
#ifdef USE_MEM_LOCK
	switch_mutex_unlock(memory_manager.mem_lock);
#endif
#endif

	return ptr;
}
Beispiel #2
0
/*
 * This function assumes that either ctx->buffered_bb == NULL, or
 * ctx->buffered_bb is empty, or ctx->buffered_bb == bb
 */
static void setaside_remaining_output(ap_filter_t *f,
                                      core_output_filter_ctx_t *ctx,
                                      apr_bucket_brigade *bb,
                                      conn_rec *c)
{
    if (bb == NULL) {
        return;
    }
    remove_empty_buckets(bb);
    if (!APR_BRIGADE_EMPTY(bb)) {
        c->data_in_output_filters = 1;
        if (bb != ctx->buffered_bb) {
            if (!ctx->deferred_write_pool) {
                apr_pool_create(&ctx->deferred_write_pool, c->pool);
                apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
            }
            ap_save_brigade(f, &(ctx->buffered_bb), &bb,
                            ctx->deferred_write_pool);
        }
    }
    else if (ctx->deferred_write_pool) {
        /*
         * There are no more requests in the pipeline. We can just clear the
         * pool.
         */
        apr_pool_clear(ctx->deferred_write_pool);
    }
}
Beispiel #3
0
APR_DECLARE(apr_status_t) apr_initialize(void)
{
    apr_pool_t *pool;
    int err;
    void *nlmhandle = getnlmhandle();

    /* Register the NLM as using APR. If it is already
        registered then just return. */
    if (register_NLM(nlmhandle) != 0) {
        return APR_SUCCESS;
    }

    /* apr_pool_initialize() is being called from the library
        startup code since all of the memory resources belong 
        to the library rather than the application. */
    
    if (apr_pool_create(&pool, NULL) != APR_SUCCESS) {
        return APR_ENOPOOL;
    }

    apr_pool_tag(pool, "apr_initilialize");

#ifdef USE_WINSOCK
    err = RegisterAppWithWinSock (nlmhandle);
    
    if (err) {
        return err;
    }
#endif

    apr_signal_init(pool);

    return APR_SUCCESS;
}
Beispiel #4
0
APR_DECLARE(apr_status_t) apr_initialize(void)
{
    apr_pool_t *pool;
    apr_status_t status;

    if (initialized++) {
        return APR_SUCCESS;
    }

#if !defined(BEOS) && !defined(OS2)
    apr_proc_mutex_unix_setup_lock();
    apr_unix_setup_time();
#endif

    if ((status = apr_pool_initialize()) != APR_SUCCESS)
        return status;
    
    if (apr_pool_create(&pool, NULL) != APR_SUCCESS) {
        return APR_ENOPOOL;
    }

    apr_pool_tag(pool, "apr_initialize");

    /* apr_atomic_init() used to be called from here aswell.
     * Pools rely on mutexes though, which can be backed by
     * atomics.  Due to this circular dependency
     * apr_pool_initialize() is taking care of calling
     * apr_atomic_init() at the correct time.
     */

    apr_signal_init(pool);

    return APR_SUCCESS;
}
Beispiel #5
0
modperl_filter_t *modperl_filter_new(ap_filter_t *f,
                                     apr_bucket_brigade *bb,
                                     modperl_filter_mode_e mode,
                                     ap_input_mode_t input_mode,
                                     apr_read_type_e block,
                                     apr_off_t readbytes)
{
    apr_pool_t *p = MP_FILTER_POOL(f);
    apr_pool_t *temp_pool;
    modperl_filter_t *filter;

    /* we can't allocate memory from the pool here, since potentially
     * a filter can be called hundreds of times during the same
     * request/connection resulting in enormous memory demands
     * (sizeof(*filter)*number of invocations). so we use a sub-pool
     * which will get destroyed at the end of each modperl_filter
     * invocation.
     */
    apr_status_t rv = apr_pool_create(&temp_pool, p);
    if (rv != APR_SUCCESS) {
        /* XXX: how do we handle the error? assert? */
        return NULL;
    }
    filter = (modperl_filter_t *)apr_pcalloc(temp_pool, sizeof(*filter));

#ifdef MP_DEBUG
    apr_pool_tag(temp_pool, "mod_perl temp filter");
#endif

    filter->temp_pool = temp_pool;
    filter->mode      = mode;
    filter->f         = f;
    filter->pool      = p;
    filter->wbucket   = NULL;

    if (mode == MP_INPUT_FILTER_MODE) {
        filter->bb_in      = NULL;
        filter->bb_out     = bb;
        filter->input_mode = input_mode;
        filter->block      = block;
        filter->readbytes  = readbytes;
    }
    else {
        filter->bb_in  = bb;
        filter->bb_out = NULL;
    }

    MP_TRACE_f(MP_FUNC, MP_FILTER_NAME_FORMAT
               "new: %s %s filter (modperl_filter_t *0x%lx), "
               "f (ap_filter_t *0x%lx)",
               MP_FILTER_NAME(f),
               MP_FILTER_TYPE(filter),
               MP_FILTER_MODE(filter),
               (unsigned long)filter,
               (unsigned long)filter->f);

    return filter;
}
Beispiel #6
0
static process_rec *init_process(int *argc, const char * const * *argv)
{
    process_rec *process;
    apr_pool_t *cntx;
    apr_status_t stat;
    const char *failed = "apr_app_initialize()";

    stat = apr_app_initialize(argc, argv, NULL);
    if (stat == APR_SUCCESS) {
        failed = "apr_pool_create()";
        stat = apr_pool_create(&cntx, NULL);
    }

    if (stat != APR_SUCCESS) {
        /* For all intents and purposes, this is impossibly unlikely,
         * but APR doesn't exist yet, we can't use it for reporting
         * these earliest two failures;
         */
        char ctimebuff[APR_CTIME_LEN];
        apr_ctime(ctimebuff, apr_time_now());
        fprintf(stderr, "[%s] [crit] (%d) %s: %s failed "
                        "to initial context, exiting\n", 
                        ctimebuff, stat, (*argv)[0], failed);
        apr_terminate();
        exit(1);
    }

    apr_pool_tag(cntx, "process");
    ap_open_stderr_log(cntx);

    /* Now we have initialized apr and our logger, no more
     * exceptional error reporting required for the lifetime
     * of this server process.
     */

    process = apr_palloc(cntx, sizeof(process_rec));
    process->pool = cntx;

    apr_pool_create(&process->pconf, process->pool);
    apr_pool_tag(process->pconf, "pconf");
    process->argc = *argc;
    process->argv = *argv;
    process->short_name = apr_filepath_name_get((*argv)[0]);
    return process;
}
Beispiel #7
0
static PCOMP_CONTEXT win9x_get_connection(PCOMP_CONTEXT context)
{
    apr_os_sock_info_t sockinfo;
    int len, salen;
#if APR_HAVE_IPV6
    salen = sizeof(struct sockaddr_in6);
#else
    salen = sizeof(struct sockaddr_in);
#endif


    if (context == NULL) {
        /* allocate the completion context and the transaction pool */
        apr_allocator_t *allocator;
        apr_thread_mutex_lock(child_lock);
        context = apr_pcalloc(pchild, sizeof(COMP_CONTEXT));
        apr_allocator_create(&allocator);
        apr_allocator_max_free_set(allocator, ap_max_mem_free);
        apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator);
        apr_allocator_owner_set(allocator, context->ptrans);
        apr_pool_tag(context->ptrans, "transaction");
        apr_thread_mutex_unlock(child_lock);
    }

    while (1) {
        apr_pool_clear(context->ptrans);
        context->ba = apr_bucket_alloc_create(context->ptrans);
        context->accept_socket = remove_job();
        if (context->accept_socket == INVALID_SOCKET) {
            return NULL;
        }
        len = salen;
        context->sa_server = apr_palloc(context->ptrans, len);
        if (getsockname(context->accept_socket,
                        context->sa_server, &len)== SOCKET_ERROR) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "getsockname failed");
            continue;
        }
        len = salen;
        context->sa_client = apr_palloc(context->ptrans, len);
        if ((getpeername(context->accept_socket,
                         context->sa_client, &len)) == SOCKET_ERROR) {
            ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_netos_error(), ap_server_conf,
                         "getpeername failed");
            memset(&context->sa_client, '\0', sizeof(context->sa_client));
        }
        sockinfo.os_sock = &context->accept_socket;
        sockinfo.local   = context->sa_server;
        sockinfo.remote  = context->sa_client;
        sockinfo.family  = context->sa_server->sa_family;
        sockinfo.type    = SOCK_STREAM;
        apr_os_sock_make(&context->sock, &sockinfo, context->ptrans);

        return context;
    }
}
Beispiel #8
0
void initialize(void) {
    if (apr_initialize() != APR_SUCCESS) {
        abort();
    }
    atexit(apr_terminate);
    
    apr_pool_create(&p, NULL);
    apr_pool_tag(p, "apr-util global test pool");
}
Beispiel #9
0
APR_DECLARE(apr_status_t) apr_initialize(void)
{
    apr_pool_t *pool;
    apr_status_t status;
    int iVersionRequested;
    WSADATA wsaData;
    int err;
    apr_oslevel_e osver;

    if (initialized++) {
        return APR_SUCCESS;
    }

    /* Initialize apr_os_level global */
    if (apr_get_oslevel(&osver) != APR_SUCCESS) {
        return APR_EEXIST;
    }

    tls_apr_thread = TlsAlloc();
    if ((status = apr_pool_initialize()) != APR_SUCCESS)
        return status;

    if (apr_pool_create(&pool, NULL) != APR_SUCCESS) {
        return APR_ENOPOOL;
    }

    apr_pool_tag(pool, "apr_initialize");

    iVersionRequested = MAKEWORD(WSAHighByte, WSALowByte);
    err = WSAStartup((WORD) iVersionRequested, &wsaData);
    if (err) {
        return err;
    }
    if (LOBYTE(wsaData.wVersion) != WSAHighByte ||
        HIBYTE(wsaData.wVersion) != WSALowByte) {
        WSACleanup();
        return APR_EEXIST;
    }

    apr_signal_init(pool);

    apr_threadproc_init(pool);

    return APR_SUCCESS;
}
Beispiel #10
0
SWITCH_DECLARE(char *) switch_core_perform_session_strdup(switch_core_session_t *session, const char *todup, const char *file, const char *func, int line)
{
	char *duped = NULL;
#ifdef DEBUG_ALLOC
	switch_size_t len;
#endif

	switch_assert(session != NULL);
	switch_assert(session->pool != NULL);

	if (!todup) {
		return NULL;
	}

	if (zstr(todup)) {
		return SWITCH_BLANK_STRING;
	}
#ifdef LOCK_MORE
#ifdef USE_MEM_LOCK
	switch_mutex_lock(memory_manager.mem_lock);
#endif
#endif



#ifdef DEBUG_ALLOC
	len = strlen(todup);
	if (len > 500)
		switch_log_printf(SWITCH_CHANNEL_ID_LOG, file, func, line, NULL, SWITCH_LOG_CONSOLE, "%p %p Sess Strdup Allocate %s %ld\n", 
						  (void *) session->pool, (void *)session, apr_pool_tag(session->pool, NULL), strlen(todup));
#endif

	duped = apr_pstrdup(session->pool, todup);
	switch_assert(duped != NULL);

#ifdef LOCK_MORE
#ifdef USE_MEM_LOCK
	switch_mutex_unlock(memory_manager.mem_lock);
#endif
#endif

	return duped;
}
Beispiel #11
0
SWITCH_DECLARE(char *) switch_core_perform_strdup(switch_memory_pool_t *pool, const char *todup, const char *file, const char *func, int line)
{
	char *duped = NULL;
	switch_size_t len;
	switch_assert(pool != NULL);

	if (!todup) {
		return NULL;
	}

	if (zstr(todup)) {
		return SWITCH_BLANK_STRING;
	}
#ifdef LOCK_MORE
#ifdef USE_MEM_LOCK
	switch_mutex_lock(memory_manager.mem_lock);
#endif
#endif

	len = strlen(todup) + 1;

#ifdef DEBUG_ALLOC
	if (len > 500)
		switch_log_printf(SWITCH_CHANNEL_ID_LOG, file, func, line, NULL, SWITCH_LOG_CONSOLE, "%p Core Strdup Allocate %s %d\n", 
						  (void *) pool, apr_pool_tag(pool, NULL), (int)len);
#endif

	duped = apr_pstrmemdup(pool, todup, len);
	switch_assert(duped != NULL);

#ifdef LOCK_MORE
#ifdef USE_MEM_LOCK
	switch_mutex_unlock(memory_manager.mem_lock);
#endif
#endif

	return duped;
}
Beispiel #12
0
static icl_mutex_t *
    icl_mutex_alloc_ (
    char * file,                        //  Source file for call
    size_t line                         //  Line number for call
)
{

    icl_mutex_t *
        self = NULL;                    //  Object reference

self = (icl_mutex_t *) malloc (sizeof (icl_mutex_t));
if (self)
    memset (self, 0, sizeof (icl_mutex_t));

#if (defined (BASE_THREADSAFE))
    icl_apr_assert (apr_pool_create (&self->pool, icl_global_pool));
    apr_pool_tag (self->pool, "icl_mutex_alloc");
    icl_apr_assert (apr_thread_mutex_create (&self->mutex, 
                    APR_THREAD_MUTEX_DEFAULT, self->pool));
#endif


    return (self);
}
Beispiel #13
0
conn_rec *h2_slave_create(conn_rec *master, int slave_id, apr_pool_t *parent)
{
    apr_allocator_t *allocator;
    apr_status_t status;
    apr_pool_t *pool;
    conn_rec *c;
    void *cfg;
    module *mpm;
    
    ap_assert(master);
    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
                  "h2_stream(%ld-%d): create slave", master->id, slave_id);
    
    /* We create a pool with its own allocator to be used for
     * processing a request. This is the only way to have the processing
     * independant of its parent pool in the sense that it can work in
     * another thread. Also, the new allocator needs its own mutex to
     * synchronize sub-pools.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    status = apr_pool_create_ex(&pool, parent, NULL, allocator);
    if (status != APR_SUCCESS) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master, 
                      APLOGNO(10004) "h2_session(%ld-%d): create slave pool",
                      master->id, slave_id);
        return NULL;
    }
    apr_allocator_owner_set(allocator, pool);
    apr_pool_abort_set(abort_on_oom, pool);
    apr_pool_tag(pool, "h2_slave_conn");

    c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
    if (c == NULL) {
        ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master, 
                      APLOGNO(02913) "h2_session(%ld-%d): create slave",
                      master->id, slave_id);
        apr_pool_destroy(pool);
        return NULL;
    }
    
    memcpy(c, master, sizeof(conn_rec));
        
    c->master                 = master;
    c->pool                   = pool;   
    c->conn_config            = ap_create_conn_config(pool);
    c->notes                  = apr_table_make(pool, 5);
    c->input_filters          = NULL;
    c->output_filters         = NULL;
    c->keepalives             = 0;
#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
    c->filter_conn_ctx        = NULL;
#endif
    c->bucket_alloc           = apr_bucket_alloc_create(pool);
#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
    c->data_in_input_filters  = 0;
    c->data_in_output_filters = 0;
#endif
    /* prevent mpm_event from making wrong assumptions about this connection,
     * like e.g. using its socket for an async read check. */
    c->clogging_input_filters = 1;
    c->log                    = NULL;
    c->log_id                 = apr_psprintf(pool, "%ld-%d", 
                                             master->id, slave_id);
    c->aborted                = 0;
    /* We cannot install the master connection socket on the slaves, as
     * modules mess with timeouts/blocking of the socket, with
     * unwanted side effects to the master connection processing.
     * Fortunately, since we never use the slave socket, we can just install
     * a single, process-wide dummy and everyone is happy.
     */
    ap_set_module_config(c->conn_config, &core_module, dummy_socket);
    /* TODO: these should be unique to this thread */
    c->sbh                    = master->sbh;
    /* TODO: not all mpm modules have learned about slave connections yet.
     * copy their config from master to slave.
     */
    if ((mpm = h2_conn_mpm_module()) != NULL) {
        cfg = ap_get_module_config(master->conn_config, mpm);
        ap_set_module_config(c->conn_config, mpm, cfg);
    }

    ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, 
                  "h2_slave(%s): created", c->log_id);
    return c;
}
Beispiel #14
0
int cstat (NXPathCtx_t ctx, char *path, struct stat *buf, unsigned long requestmap, apr_pool_t *p)
{
    apr_pool_t *gPool = (apr_pool_t *)getGlobalPool();
    apr_hash_t *statCache = NULL;
    apr_thread_rwlock_t *rwlock = NULL;

    NXPathCtx_t pathctx = 0;
    char *ptr = NULL, *tr;
    int len = 0, x;
    char *ppath;
    char *pinfo;

    if (ctx == 1) {

        /* If there isn't a global pool then just stat the file
           and return */
        if (!gPool) {
            char poolname[50];
    
            if (apr_pool_create(&gPool, NULL) != APR_SUCCESS) {
                return getstat(ctx, path, buf, requestmap);
            }
    
            setGlobalPool(gPool);
            apr_pool_tag(gPool, apr_pstrdup(gPool, "cstat_mem_pool"));
    
            statCache = apr_hash_make(gPool);
            apr_pool_userdata_set ((void*)statCache, "STAT_CACHE", stat_cache_cleanup, gPool);

            apr_thread_rwlock_create(&rwlock, gPool);
            apr_pool_userdata_set ((void*)rwlock, "STAT_CACHE_LOCK", apr_pool_cleanup_null, gPool);
        }
        else {
            apr_pool_userdata_get((void**)&statCache, "STAT_CACHE", gPool);
            apr_pool_userdata_get((void**)&rwlock, "STAT_CACHE_LOCK", gPool);
        }

        if (!gPool || !statCache || !rwlock) {
            return getstat(ctx, path, buf, requestmap);
        }
    
        for (x = 0,tr = path;*tr != '\0';tr++,x++) {
            if (*tr == '\\' || *tr == '/') {
                ptr = tr;
                len = x;
            }
            if (*tr == ':') {
                ptr = "\\";
                len = x;
            }
        }
    
        if (ptr) {
            ppath = apr_pstrndup (p, path, len);
            strlwr(ppath);
            if (ptr[1] != '\0') {
                ptr++;
            }
            /* If the path ended in a trailing slash then our result path
               will be a single slash. To avoid stat'ing the root with a
               slash, we need to make sure we stat the current directory
               with a dot */
            if (((*ptr == '/') || (*ptr == '\\')) && (*(ptr+1) == '\0')) {
                pinfo = apr_pstrdup (p, ".");
            }
            else {
                pinfo = apr_pstrdup (p, ptr);
            }
        }
    
        /* If we have a statCache then try to pull the information
           from the cache.  Otherwise just stat the file and return.*/
        if (statCache) {
            apr_thread_rwlock_rdlock(rwlock);
            pathctx = (NXPathCtx_t) apr_hash_get(statCache, ppath, APR_HASH_KEY_STRING);
            apr_thread_rwlock_unlock(rwlock);
            if (pathctx) {
                return getstat(pathctx, pinfo, buf, requestmap);
            }
            else {
                int err;

                err = NXCreatePathContext(0, ppath, 0, NULL, &pathctx);
                if (!err) {
                    apr_thread_rwlock_wrlock(rwlock);
                    apr_hash_set(statCache, apr_pstrdup(gPool,ppath) , APR_HASH_KEY_STRING, (void*)pathctx);
                    apr_thread_rwlock_unlock(rwlock);
                    return getstat(pathctx, pinfo, buf, requestmap);
                }
            }
        }
    }
    return getstat(ctx, path, buf, requestmap);
}
Beispiel #15
0
/*static */
void worker_main(void *arg)
{
    ap_listen_rec *lr, *first_lr, *last_lr = NULL;
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    conn_rec *current_conn;
    apr_status_t stat = APR_EINIT;
    ap_sb_handle_t *sbh;
    apr_thread_t *thd = NULL;
    apr_os_thread_t osthd;

    int my_worker_num = (int)arg;
    apr_socket_t *csd = NULL;
    int requests_this_child = 0;
    apr_socket_t *sd = NULL;
    fd_set main_fds;

    int sockdes;
    int srv;
    struct timeval tv;
    int wouldblock_retry;

    osthd = apr_os_thread_current();
    apr_os_thread_put(&thd, &osthd, pmain);

    tv.tv_sec = 1;
    tv.tv_usec = 0;

    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);

    apr_pool_create_ex(&ptrans, pmain, NULL, allocator);
    apr_allocator_owner_set(allocator, ptrans);
    apr_pool_tag(ptrans, "transaction");

    bucket_alloc = apr_bucket_alloc_create_ex(allocator);

    atomic_inc (&worker_thread_count);

    while (!die_now) {
        /*
        * (Re)initialize this child to a pre-connection state.
        */
        current_conn = NULL;
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
            && requests_this_child++ >= ap_max_requests_per_child)) {
            DBPRINT1 ("\n**Thread slot %d is shutting down", my_worker_num);
            clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
        }

        ap_update_child_status_from_indexes(0, my_worker_num, WORKER_READY,
                                            (request_rec *) NULL);

        /*
        * Wait for an acceptable connection to arrive.
        */

        for (;;) {
            if (shutdown_pending || restart_pending || (ap_scoreboard_image->servers[0][my_worker_num].status == WORKER_IDLE_KILL)) {
                DBPRINT1 ("\nThread slot %d is shutting down\n", my_worker_num);
                clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
            }

            /* Check the listen queue on all sockets for requests */
            memcpy(&main_fds, &listenfds, sizeof(fd_set));
            srv = select(listenmaxfd + 1, &main_fds, NULL, NULL, &tv);

            if (srv <= 0) {
                if (srv < 0) {
                    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, ap_server_conf, APLOGNO(00217)
                        "select() failed on listen socket");
                    apr_thread_yield();
                }
                continue;
            }

            /* remember the last_lr we searched last time around so that
            we don't end up starving any particular listening socket */
            if (last_lr == NULL) {
                lr = ap_listeners;
            }
            else {
                lr = last_lr->next;
                if (!lr)
                    lr = ap_listeners;
            }
            first_lr = lr;
            do {
                apr_os_sock_get(&sockdes, lr->sd);
                if (FD_ISSET(sockdes, &main_fds))
                    goto got_listener;
                lr = lr->next;
                if (!lr)
                    lr = ap_listeners;
            } while (lr != first_lr);
            /* if we get here, something unexpected happened. Go back
            into the select state and try again.
            */
            continue;
        got_listener:
            last_lr = lr;
            sd = lr->sd;

            wouldblock_retry = MAX_WB_RETRIES;

            while (wouldblock_retry) {
                if ((stat = apr_socket_accept(&csd, sd, ptrans)) == APR_SUCCESS) {
                    break;
                }
                else {
                    /* if the error is a wouldblock then maybe we were too
                        quick try to pull the next request from the listen
                        queue.  Try a few more times then return to our idle
                        listen state. */
                    if (!APR_STATUS_IS_EAGAIN(stat)) {
                        break;
                    }

                    if (wouldblock_retry--) {
                        apr_thread_yield();
                    }
                }
            }

            /* If we got a new socket, set it to non-blocking mode and process
                it.  Otherwise handle the error. */
            if (stat == APR_SUCCESS) {
                apr_socket_opt_set(csd, APR_SO_NONBLOCK, 0);
#ifdef DBINFO_ON
                if (wouldblock_retry < MAX_WB_RETRIES) {
                    retry_success++;
                    avg_retries += (MAX_WB_RETRIES-wouldblock_retry);
                }
#endif
                break;       /* We have a socket ready for reading */
            }
            else {
#ifdef DBINFO_ON
                if (APR_STATUS_IS_EAGAIN(stat)) {
                        would_block++;
                        retry_fail++;
                }
                else if (
#else
                if (APR_STATUS_IS_EAGAIN(stat) ||
#endif
                    APR_STATUS_IS_ECONNRESET(stat) ||
                    APR_STATUS_IS_ETIMEDOUT(stat) ||
                    APR_STATUS_IS_EHOSTUNREACH(stat) ||
                    APR_STATUS_IS_ENETUNREACH(stat)) {
                        ;
                }
#ifdef USE_WINSOCK
                else if (APR_STATUS_IS_ENETDOWN(stat)) {
                       /*
                        * When the network layer has been shut down, there
                        * is not much use in simply exiting: the parent
                        * would simply re-create us (and we'd fail again).
                        * Use the CHILDFATAL code to tear the server down.
                        * @@@ Martin's idea for possible improvement:
                        * A different approach would be to define
                        * a new APEXIT_NETDOWN exit code, the reception
                        * of which would make the parent shutdown all
                        * children, then idle-loop until it detected that
                        * the network is up again, and restart the children.
                        * Ben Hyde noted that temporary ENETDOWN situations
                        * occur in mobile IP.
                        */
                        ap_log_error(APLOG_MARK, APLOG_EMERG, stat, ap_server_conf, APLOGNO(00218)
                            "apr_socket_accept: giving up.");
                        clean_child_exit(APEXIT_CHILDFATAL, my_worker_num, ptrans,
                                         bucket_alloc);
                }
#endif
                else {
                        ap_log_error(APLOG_MARK, APLOG_ERR, stat, ap_server_conf, APLOGNO(00219)
                            "apr_socket_accept: (client socket)");
                        clean_child_exit(1, my_worker_num, ptrans, bucket_alloc);
                }
            }
        }

        ap_create_sb_handle(&sbh, ptrans, 0, my_worker_num);
        /*
        * We now have a connection, so set it up with the appropriate
        * socket options, file descriptors, and read/write buffers.
        */
        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd,
                                                my_worker_num, sbh,
                                                bucket_alloc);
        if (current_conn) {
            current_conn->current_thread = thd;
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        request_count++;
    }
    clean_child_exit(0, my_worker_num, ptrans, bucket_alloc);
}
Beispiel #16
0
int main(int argc, const char * const argv[])
{
    char c;
    const char *ap_confname = "httpd.conf";
    const char *ngx_confname = "nginx.conf";
    const char *def_server_root = NULL;
    const char *temp_error_log = NULL;
    const char *error;
    process_rec *process;
    server_rec *server_conf;
    apr_pool_t *pglobal;
    apr_pool_t *pconf;
    apr_pool_t *plog; /* Pool of log streams, reset _after_ each read of conf */
    apr_pool_t *ptemp; /* Pool for temporary config stuff, reset often */
    apr_pool_t *pcommands; /* Pool for -D, -C and -c switches */
    apr_getopt_t *opt;
    apr_status_t rv;
    const char *optarg;

    AP_MONCONTROL(0); /* turn off profiling of startup */

    process = init_process(&argc, &argv);
    pglobal = process->pool;
    pconf = process->pconf;
    ap_server_argv0 = process->short_name;

#if APR_CHARSET_EBCDIC
    if (ap_init_ebcdic(pglobal) != APR_SUCCESS) {
        destroy_and_exit_process(process, 1);
    }
#endif

    apr_pool_create(&pcommands, pglobal);
    apr_pool_tag(pcommands, "pcommands");
    ap_server_pre_read_config  = apr_array_make(pcommands, 1, sizeof(char *));
    ap_server_post_read_config = apr_array_make(pcommands, 1, sizeof(char *));
    ap_server_config_defines   = apr_array_make(pcommands, 1, sizeof(char *));

    error = ap_setup_prelinked_modules(process);
    if (error) {
        ap_log_error(APLOG_MARK, APLOG_STARTUP|APLOG_EMERG, 0, NULL, "%s: %s",
                     ap_server_argv0, error);
        destroy_and_exit_process(process, 1);
    }

    ap_run_rewrite_args(process);

    /* Maintain AP_SERVER_BASEARGS list in http_main.h to allow the MPM
     * to safely pass on our args from its rewrite_args() handler.
     */
    apr_getopt_init(&opt, pcommands, process->argc, process->argv);
    
    while ((rv = apr_getopt(opt, APN_SERVER_BASEARGS, &c, &optarg))
            == APR_SUCCESS) {
        switch (c) {

        case 'f':
            ap_confname = optarg;
            break;

        case 'o':
            ngx_confname = optarg;
            break;

        case 'd':
            def_server_root = optarg;
            break;

        case 'l':
            ap_show_modules();
            destroy_and_exit_process(process, 0);

        case 'L':
            ap_show_directives();
            destroy_and_exit_process(process, 0);

        case 'h':
        case '?':
            usage(process);
            destroy_and_exit_process(process, 0);
            
        case 'H':
            setconf = 1;
            confname = optarg;
            break;
                
        case 'i':
            defindex = 1;
            break;
  
        case 'G':
            defgzip = 1;
            break;
            
        case 'D':
            cleancfg = 1;
            break;
            
        case 'R':
            forcerm = 1;
            break;
        }    
    }

    if (rv != APR_EOF || argc < 3) {
        if (c != 'h' && c != '?') {
            usage(process);
        }
        destroy_and_exit_process(process, 1);
    }

    apr_pool_create(&plog, pglobal);
    apr_pool_tag(plog, "plog");
    apr_pool_create(&ptemp, pconf);
    apr_pool_tag(ptemp, "ptemp");

	/** we need the real path */
	char* fullpath = NULL;
	rv = apr_get_realpath(&fullpath, ap_confname, plog);
	if (rv != APR_SUCCESS){
		apn_error("Apache conf file is not found " 
				"or the given path is invalid! Exit.\n");
		destroy_and_exit_process(process, 1);
	}
	ap_confname = apr_pstrdup(plog, fullpath);

    /* Note that we preflight the config file once
     * before reading it _again_ in the main loop.
     * This allows things, log files configuration
     * for example, to settle down.
     */
    ap_server_root = def_server_root;
    if (!ap_server_root){ // no specify serverroot by -d in commandline.

        if (!ap_confname) {
            apn_error("Apache conf file name is null!\n");
            destroy_and_exit_process(process, 1);
        }

        /**
         * if ap_confname is absolute path, get the prefix as serverroot.
         * if it is not, set the current path as serverroot.
         */
        char* basedir;
        rv = apr_get_basedir(&basedir, ap_confname, process->pool);
        if(rv!=APR_SUCCESS){
            apn_error("Apache conf file is not found " 
                    "or the given path is invalid! Exit.\n");
            destroy_and_exit_process(process, 1);
        }

        ap_server_root = def_server_root = basedir;

		/**
		 * Sometimes, ap_server_root should be set more intelligence.
		 * Because of apache conf depend on the ServerRoot.
		 * when not in localhost, maybe ServerRoot is not valid,
		 * and here need to guess the ap_server_root.
		 */
		apn_fixed_server_root(plog);
    }
    if (temp_error_log) {
        ap_replace_stderr_log(process->pool, temp_error_log);
    }
 
    char *ngx_fullpath = NULL;
    rv = apr_get_realpath(&ngx_fullpath, ngx_confname, plog);
    
    if (rv == APR_SUCCESS && forcerm != 1) {
             apn_error("Config file exists: %s. Exit.\n", ngx_fullpath);
             destroy_and_exit_process(process, 1);
    } else {
        /* Create a empty nginx.conf, because mod_mime needs this file. */
        apr_file_t *f;
        rv = apr_file_open(&f, ngx_confname, 
                (forcerm == 1 ? APR_TRUNCATE : APR_CREATE ) |APR_APPEND|APR_WRITE,
                APR_OS_DEFAULT, plog);
        if (rv != APR_SUCCESS) {
            apn_error("Create file error: %s\n", ngx_fullpath);
            destroy_and_exit_process(process, 1);
        }
        apr_file_close(f);
    }

    /*
     * here just create the main server, and the vhost is not created.
     */
    server_conf = ap_read_config(process, ptemp, ap_confname, &ap_conftree);
    if (!server_conf) {
        destroy_and_exit_process(process, 1);
    }
    /* sort hooks here to make sure pre_config hooks are sorted properly */
    apr_hook_sort_all();

    if (ap_run_pre_config(pconf, plog, ptemp) != OK) {
        ap_log_error(APLOG_MARK, APLOG_STARTUP |APLOG_ERR, 0,
                     NULL, "Pre-configuration failed");
        destroy_and_exit_process(process, 1);
    }

    /* Lijinhu added : check the configuration validation */
    if (ap_conftree == NULL) {
        apn_error("The apache conf file is invalid! Please check it. Exit.\n");
        destroy_and_exit_process(process, 1);
    }

    rv = ap_process_config_tree(server_conf, ap_conftree,
                                process->pconf, ptemp);
    if (rv == OK) {
        /*
         * 1. merge server configs.
         * 2. merge per dir configs for each server.
         * 3. re-order the directorise.
         */
        ap_fixup_virtual_hosts(pconf, server_conf);

        /* compile the tables and such we need to do the run-time vhost lookups */
        ap_fini_vhost_config(pconf, server_conf);

        /*
         * Sort hooks again because ap_process_config_tree may have added
         * modules and hence hooks. This happens with mod_perl and modules
         * written in perl.
         */
        apr_hook_sort_all();

        ap_run_test_config(pconf, server_conf);
        ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL, "Syntax OK");

        if ( ap_run_post_config(pconf, plog, ptemp, server_conf) != OK) {
            ap_log_error(APLOG_MARK, APLOG_STARTUP |APLOG_ERR, 0,
                    NULL, "Failed when merge some configurations");
            destroy_and_exit_process(process, 1);
        }


        /*
         * migrate the config to nginx conf format.
         * generate the conf file of nginx.
         */
        rv = apn_migrate_to_nginx(plog, server_conf, ngx_confname);
        if (rv != OK) {
            ap_log_error(APLOG_MARK, APLOG_STARTUP, 0, NULL, "Migrate Error!");
        }
        destroy_and_exit_process(process, 0);
    }

   return 0; /* Termination 'ok' */
}
//static void child_main(int child_num_arg)
void body()
{

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(allocator); //// removed deref
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(pchild, pconf, NULL, allocator); //// removed deref
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(ptrans, pchild); //// removed deref
    apr_pool_tag(ptrans, 65); // "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(accept_mutex, ap_lock_fname, pchild); //// removed deref
    if (status != APR_SUCCESS) {
        /* ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf, */
        /*              "Couldnt initialize crossprocess lock in child " */
        /*              "%s %d", ap_lock_fname, ap_accept_lock_mech); */
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child() > 0) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(sbh, pchild, my_child_num, 0); //// removed deref

    ap_update_child_status(sbh, SERVER_READY, NULL);

    /* Set up the pollfd array */
    /* ### check the status */
    (void) apr_pollset_create(pollset, num_listensocks, pchild, 0); //// removed deref

    num_listensocks = nondet(); assume(num_listensocks>0);

    lr = ap_listeners;
    i = num_listensocks; 
    while (1) {
      if ( i<=0 ) break; 
        int pfd = 0;

        pfd_desc_type = APR_POLL_SOCKET;
        pfd_desc_s = 1; // lr->sd;
        pfd_reqevents = APR_POLLIN;
        pfd_client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, pfd); //// removed deref
	i--;
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    while(1>0) {
      if (die_now>0) break;
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());
	do_ACCEPT=1; do_ACCEPT=0;

	dummy = nondet();
	if(dummy > 0) {
          /* goto loc_return; */
          while(1>0) { int ddd; ddd=ddd; }
        }

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
	  while(1) {
	      int numdesc;
                const void *pdesc;

                /* timeout == -1 == wait forever */
                status = apr_pollset_poll(pollset, -1, numdesc, pdesc); //// removed deref
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(status) > 0) {
                        if (one_process>0 && shutdown_pending>0) {
			  /* goto loc_return; */
                          while(1>0) { int ddd; ddd=ddd; }
                        }
                        goto loc_continueA;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    /* ap_log_error5(APLOG_MARK, APLOG_ERR, status, */
                    /*              ap_server_conf, "apr_pollset_poll: (listen)"); */
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = 1; //pdesc[last_poll_idx++].client_data;
		break;

	    loc_continueA: {int yyy2; yyy2=yyy2; }
            }
        }
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = nondet(); // lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
	  goto loc_continueB;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn > 0) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
	dummy = nondet();
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation != dummy) {
	  //ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    loc_continueB: { int uuu; uuu=uuu; }
    }
    clean_child_exit(0);
 /* loc_return: */
    while(1>0) { int ddd; ddd=ddd; }
}
Beispiel #18
0
switch_memory_pool_t *switch_core_memory_init(void)
{
#ifndef INSTANTLY_DESTROY_POOLS
	switch_threadattr_t *thd_attr;
#endif
#ifdef PER_POOL_LOCK
	apr_allocator_t *my_allocator = NULL;
	apr_thread_mutex_t *my_mutex;
#endif

	memset(&memory_manager, 0, sizeof(memory_manager));

#ifdef PER_POOL_LOCK
	if ((apr_allocator_create(&my_allocator)) != APR_SUCCESS) {
		abort();
	}

	if ((apr_pool_create_ex(&memory_manager.memory_pool, NULL, NULL, my_allocator)) != APR_SUCCESS) {
		apr_allocator_destroy(my_allocator);
		my_allocator = NULL;
		abort();
	}

	if ((apr_thread_mutex_create(&my_mutex, APR_THREAD_MUTEX_NESTED, memory_manager.memory_pool)) != APR_SUCCESS) {
		abort();
	}

	apr_allocator_mutex_set(my_allocator, my_mutex);
	apr_pool_mutex_set(memory_manager.memory_pool, my_mutex);
	apr_allocator_owner_set(my_allocator, memory_manager.memory_pool);
	apr_pool_tag(memory_manager.memory_pool, "core_pool");
#else
	apr_pool_create(&memory_manager.memory_pool, NULL);
	switch_assert(memory_manager.memory_pool != NULL);
#endif

#ifdef USE_MEM_LOCK
	switch_mutex_init(&memory_manager.mem_lock, SWITCH_MUTEX_NESTED, memory_manager.memory_pool);
#endif

#ifdef INSTANTLY_DESTROY_POOLS
	{
		void *foo;
		foo = (void *) (intptr_t) pool_thread;
	}
#else

	switch_queue_create(&memory_manager.pool_queue, 50000, memory_manager.memory_pool);
	switch_queue_create(&memory_manager.pool_recycle_queue, 50000, memory_manager.memory_pool);

	switch_threadattr_create(&thd_attr, memory_manager.memory_pool);

	switch_threadattr_stacksize_set(thd_attr, SWITCH_THREAD_STACKSIZE);
	switch_thread_create(&pool_thread_p, thd_attr, pool_thread, NULL, memory_manager.memory_pool);

	while (!memory_manager.pool_thread_running) {
		switch_cond_next();
	}
#endif

	return memory_manager.memory_pool;
}
Beispiel #19
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    apr_status_t status;
    int i;
    ap_listen_rec *lr;
    apr_pollset_t *pollset;
    ap_sb_handle_t *sbh;
    apr_bucket_alloc_t *bucket_alloc;
    int last_poll_idx = 0;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                   * child initializes
                                   */

    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    status = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't initialize cross-process lock in child "
                     "(%s) (%d)", ap_lock_fname, ap_accept_lock_mech);
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    status = apr_pollset_create(&pollset, num_listensocks, pchild, 0);
    if (status != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, status, ap_server_conf,
                     "Couldn't create pollset in child; check system or user limits");
        clean_child_exit(APEXIT_CHILDSICK); /* assume temporary resource issue */
    }

    for (lr = ap_listeners, i = num_listensocks; i--; lr = lr->next) {
        apr_pollfd_t pfd = { 0 };

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        /* ### check the status */
        (void) apr_pollset_add(pollset, &pfd);
    }

    mpm_state = AP_MPMQ_RUNNING;

    bucket_alloc = apr_bucket_alloc_create(pchild);

    /* die_now is set when AP_SIG_GRACEFUL is received in the child;
     * shutdown_pending is set when SIGTERM is received when running
     * in single process mode.  */
    while (!die_now && !shutdown_pending) {
        conn_rec *current_conn;
        void *csd;

        /*
         * (Re)initialize this child to a pre-connection state.
         */

        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_child > 0
             && requests_this_child++ >= ap_max_requests_per_child)) {
            clean_child_exit(0);
        }

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        /*
         * Wait for an acceptable connection to arrive.
         */

        /* Lock around "accept", if necessary */
        SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            /* There is only one listener record, so refer to that one. */
            lr = ap_listeners;
        }
        else {
            /* multiple listening sockets - need to poll */
            for (;;) {
                apr_int32_t numdesc;
                const apr_pollfd_t *pdesc;

                /* check for termination first so we don't sleep for a while in
                 * poll if already signalled
                 */
                if (one_process && shutdown_pending) {
                    SAFE_ACCEPT(accept_mutex_off());
                    return;
                }
                else if (die_now) {
                    /* In graceful stop/restart; drop the mutex
                     * and terminate the child. */
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(0);
                }
                /* timeout == 10 seconds to avoid a hang at graceful restart/stop
                 * caused by the closing of sockets by the signal handler
                 */
                status = apr_pollset_poll(pollset, apr_time_from_sec(10), 
                                          &numdesc, &pdesc);
                if (status != APR_SUCCESS) {
                    if (APR_STATUS_IS_TIMEUP(status) ||
                        APR_STATUS_IS_EINTR(status)) {
                        continue;
                    }
                    /* Single Unix documents select as returning errnos
                     * EBADF, EINTR, and EINVAL... and in none of those
                     * cases does it make sense to continue.  In fact
                     * on Linux 2.0.x we seem to end up with EFAULT
                     * occasionally, and we'd loop forever due to it.
                     */
                    ap_log_error(APLOG_MARK, APLOG_ERR, status,
                                 ap_server_conf, "apr_pollset_poll: (listen)");
                    SAFE_ACCEPT(accept_mutex_off());
                    clean_child_exit(1);
                }

                /* We can always use pdesc[0], but sockets at position N
                 * could end up completely starved of attention in a very
                 * busy server. Therefore, we round-robin across the
                 * returned set of descriptors. While it is possible that
                 * the returned set of descriptors might flip around and
                 * continue to starve some sockets, we happen to know the
                 * internal pollset implementation retains ordering
                 * stability of the sockets. Thus, the round-robin should
                 * ensure that a socket will eventually be serviced.
                 */
                if (last_poll_idx >= numdesc)
                    last_poll_idx = 0;

                /* Grab a listener record from the client_data of the poll
                 * descriptor, and advance our saved index to round-robin
                 * the next fetch.
                 *
                 * ### hmm... this descriptor might have POLLERR rather
                 * ### than POLLIN
                 */
                lr = pdesc[last_poll_idx++].client_data;
                goto got_fd;
            }
        }
    got_fd:
        /* if we accept() something we don't want to die, so we have to
         * defer the exit
         */
        status = lr->accept_func(&csd, lr, ptrans);

        SAFE_ACCEPT(accept_mutex_off());      /* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

        /*
         * We now have a connection, so set it up with the appropriate
         * socket options, file descriptors, and read/write buffers.
         */

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Beispiel #20
0
SWITCH_DECLARE(switch_status_t) switch_core_perform_new_memory_pool(switch_memory_pool_t **pool, const char *file, const char *func, int line)
{
	char *tmp;
#ifdef INSTANTLY_DESTROY_POOLS
	apr_pool_create(pool, NULL);
	switch_assert(*pool != NULL);
#else

#ifdef PER_POOL_LOCK
	apr_allocator_t *my_allocator = NULL;
	apr_thread_mutex_t *my_mutex;
#else
	void *pop = NULL;
#endif

#ifdef USE_MEM_LOCK
	switch_mutex_lock(memory_manager.mem_lock);
#endif
	switch_assert(pool != NULL);

#ifndef PER_POOL_LOCK
	if (switch_queue_trypop(memory_manager.pool_recycle_queue, &pop) == SWITCH_STATUS_SUCCESS && pop) {
		*pool = (switch_memory_pool_t *) pop;
	} else {
#endif

#ifdef PER_POOL_LOCK
		if ((apr_allocator_create(&my_allocator)) != APR_SUCCESS) {
			abort();
		}

		if ((apr_pool_create_ex(pool, NULL, NULL, my_allocator)) != APR_SUCCESS) {
			abort();
		}

		if ((apr_thread_mutex_create(&my_mutex, APR_THREAD_MUTEX_NESTED, *pool)) != APR_SUCCESS) {
			abort();
		}

		apr_allocator_mutex_set(my_allocator, my_mutex);
		apr_allocator_owner_set(my_allocator, *pool);

		apr_pool_mutex_set(*pool, my_mutex);

#else
		apr_pool_create(pool, NULL);
		switch_assert(*pool != NULL);
	}
#endif
#endif

	tmp = switch_core_sprintf(*pool, "%s:%d", file, line);
	apr_pool_tag(*pool, tmp);

#ifdef DEBUG_ALLOC2
	switch_log_printf(SWITCH_CHANNEL_ID_LOG, file, func, line, NULL, SWITCH_LOG_CONSOLE, "%p New Pool %s\n", (void *) *pool, apr_pool_tag(*pool, NULL));
#endif


#ifdef USE_MEM_LOCK
	switch_mutex_unlock(memory_manager.mem_lock);
#endif

	return SWITCH_STATUS_SUCCESS;
}
Beispiel #21
0
SWITCH_DECLARE(switch_status_t) switch_core_perform_destroy_memory_pool(switch_memory_pool_t **pool, const char *file, const char *func, int line)
{
	switch_assert(pool != NULL);

#ifdef DEBUG_ALLOC2
	switch_log_printf(SWITCH_CHANNEL_ID_LOG, file, func, line, NULL, SWITCH_LOG_CONSOLE, "%p Free Pool %s\n", (void *) *pool, apr_pool_tag(*pool, NULL));
#endif

#ifdef INSTANTLY_DESTROY_POOLS
#ifdef USE_MEM_LOCK
	switch_mutex_lock(memory_manager.mem_lock);
#endif
	apr_pool_destroy(*pool);
#ifdef USE_MEM_LOCK
	switch_mutex_unlock(memory_manager.mem_lock);
#endif
#else
	if ((memory_manager.pool_thread_running != 1) || (switch_queue_push(memory_manager.pool_queue, *pool) != SWITCH_STATUS_SUCCESS)) {
#ifdef USE_MEM_LOCK
		switch_mutex_lock(memory_manager.mem_lock);
#endif
		apr_pool_destroy(*pool);
#ifdef USE_MEM_LOCK
		switch_mutex_unlock(memory_manager.mem_lock);
#endif
	}
#endif

	*pool = NULL;

	return SWITCH_STATUS_SUCCESS;
}
Beispiel #22
0
static void child_main(int child_num_arg)
{
    apr_pool_t *ptrans;
    apr_allocator_t *allocator;
    conn_rec *current_conn;
    apr_status_t status = APR_EINIT;
    int i;
    ap_listen_rec *lr;
    int curr_pollfd, last_pollfd = 0;
    apr_pollfd_t *pollset;
    int offset;
    void *csd;
    ap_sb_handle_t *sbh;
    apr_status_t rv;
    apr_bucket_alloc_t *bucket_alloc;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */
    
    my_child_num = child_num_arg;
    ap_my_pid = getpid();
    csd = NULL;
    requests_this_child = 0;

    ap_fatal_signal_child_setup(ap_server_conf);

    /* Get a sub context for global allocations in this child, so that
     * we can have cleanups occur when the child exits.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pchild, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pchild);

    apr_pool_create(&ptrans, pchild);
    apr_pool_tag(ptrans, "transaction");

    /* needs to be done before we switch UIDs so we have permissions */
    ap_reopen_scoreboard(pchild, NULL, 0);
    rv = apr_proc_mutex_child_init(&accept_mutex, ap_lock_fname, pchild);
    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
                     "Couldn't initialize cross-process lock in child");
        clean_child_exit(APEXIT_CHILDFATAL);
    }

    if (unixd_setup_child()) {
	clean_child_exit(APEXIT_CHILDFATAL);
    }

    ap_run_child_init(pchild, ap_server_conf);

    ap_create_sb_handle(&sbh, pchild, my_child_num, 0);

    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* Set up the pollfd array */
    listensocks = apr_pcalloc(pchild,
                            sizeof(*listensocks) * (num_listensocks));
    for (lr = ap_listeners, i = 0; i < num_listensocks; lr = lr->next, i++) {
        listensocks[i].accept_func = lr->accept_func;
        listensocks[i].sd = lr->sd;
    }

    pollset = apr_palloc(pchild, sizeof(*pollset) * num_listensocks);
    pollset[0].p = pchild;
    for (i = 0; i < num_listensocks; i++) {
        pollset[i].desc.s = listensocks[i].sd;
        pollset[i].desc_type = APR_POLL_SOCKET;
        pollset[i].reqevents = APR_POLLIN;
    }

    mpm_state = AP_MPMQ_RUNNING;
    
    bucket_alloc = apr_bucket_alloc_create(pchild);

    while (!die_now) {
	/*
	 * (Re)initialize this child to a pre-connection state.
	 */

	current_conn = NULL;

	apr_pool_clear(ptrans);

	if ((ap_max_requests_per_child > 0
	     && requests_this_child++ >= ap_max_requests_per_child)) {
	    clean_child_exit(0);
	}

	(void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

	/*
	 * Wait for an acceptable connection to arrive.
	 */

	/* Lock around "accept", if necessary */
	SAFE_ACCEPT(accept_mutex_on());

        if (num_listensocks == 1) {
            offset = 0;
        }
        else {
            /* multiple listening sockets - need to poll */
	    for (;;) {
                apr_status_t ret;
                apr_int32_t n;

                ret = apr_poll(pollset, num_listensocks, &n, -1);
                if (ret != APR_SUCCESS) {
                    if (APR_STATUS_IS_EINTR(ret)) {
                        continue;
                    }
    	            /* Single Unix documents select as returning errnos
    	             * EBADF, EINTR, and EINVAL... and in none of those
    	             * cases does it make sense to continue.  In fact
    	             * on Linux 2.0.x we seem to end up with EFAULT
    	             * occasionally, and we'd loop forever due to it.
    	             */
    	            ap_log_error(APLOG_MARK, APLOG_ERR, ret, ap_server_conf,
                             "apr_poll: (listen)");
    	            clean_child_exit(1);
                }
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;
                    if (curr_pollfd >= num_listensocks) {
                        curr_pollfd = 0;
                    }
                    /* XXX: Should we check for POLLERR? */
                    if (pollset[curr_pollfd].rtnevents & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        offset = curr_pollfd;
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);

                continue;
            }
        }
    got_fd:
	/* if we accept() something we don't want to die, so we have to
	 * defer the exit
	 */
        status = listensocks[offset].accept_func(&csd, 
                                                 &listensocks[offset], ptrans);
        SAFE_ACCEPT(accept_mutex_off());	/* unlock after "accept" */

        if (status == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1);
        }
        else if (status != APR_SUCCESS) {
            continue;
        }

	/*
	 * We now have a connection, so set it up with the appropriate
	 * socket options, file descriptors, and read/write buffers.
	 */

	current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, my_child_num, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }
        
        /* Check the pod and the generation number after processing a
         * connection so that we'll go away if a graceful restart occurred
         * while we were processing the connection or we are the lucky
         * idle server process that gets to die.
         */
        if (ap_mpm_pod_check(pod) == APR_SUCCESS) { /* selected as idle? */
            die_now = 1;
        }
        else if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            die_now = 1;
        }
    }
    clean_child_exit(0);
}
Beispiel #23
0
SWITCH_DECLARE(void) switch_core_memory_pool_tag(switch_memory_pool_t *pool, const char *tag)
{
	apr_pool_tag(pool, tag);
}
Beispiel #24
0
/* This is the thread that actually does all the work. */
static int32 worker_thread(void *dummy)
{
    int worker_slot = (int)dummy;
    apr_allocator_t *allocator;
    apr_bucket_alloc_t *bucket_alloc;
    apr_status_t rv = APR_EINIT;
    int last_poll_idx = 0;
    sigset_t sig_mask;
    int requests_this_child = 0;
    apr_pollset_t *pollset = NULL;
    ap_listen_rec *lr = NULL;
    ap_sb_handle_t *sbh = NULL;
    int i;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0;
    /* We have 2 pools that we create/use throughout the lifetime of this
     * worker. The first and longest lived is the pworker pool. From
     * this we create the ptrans pool, the lifetime of which is the same
     * as each connection and is reset prior to each attempt to
     * process a connection.
     */
    apr_pool_t *ptrans = NULL;
    apr_pool_t *pworker = NULL;

    mpm_state = AP_MPMQ_STARTING; /* for benefit of any hooks that run as this
                                  * child initializes
                                  */

    on_exit_thread(check_restart, (void*)worker_slot);

    /* block the signals for this thread only if we're not running as a
     * single process.
     */
    if (!one_process) {
        sigfillset(&sig_mask);
        sigprocmask(SIG_BLOCK, &sig_mask, NULL);
    }

    /* Each worker thread is fully in control of it's destinay and so
     * to allow each thread to handle the lifetime of it's own resources
     * we create and use a subcontext for every thread.
     * The subcontext is a child of the pconf pool.
     */
    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&pworker, pconf, NULL, allocator);
    apr_allocator_owner_set(allocator, pworker);

    apr_pool_create(&ptrans, pworker);
    apr_pool_tag(ptrans, "transaction");

    ap_create_sb_handle(&sbh, pworker, 0, worker_slot);
    (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

    /* We add an extra socket here as we add the udp_sock we use for signalling
     * death. This gets added after the others.
     */
    apr_pollset_create(&pollset, num_listening_sockets + 1, pworker, 0);

    for (lr = ap_listeners, i = num_listening_sockets; i--; lr = lr->next) {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = lr->sd;
        pfd.reqevents = APR_POLLIN;
        pfd.client_data = lr;

        apr_pollset_add(pollset, &pfd);
    }
    {
        apr_pollfd_t pfd = {0};

        pfd.desc_type = APR_POLL_SOCKET;
        pfd.desc.s = udp_sock;
        pfd.reqevents = APR_POLLIN;

        apr_pollset_add(pollset, &pfd);
    }

    bucket_alloc = apr_bucket_alloc_create(pworker);

    mpm_state = AP_MPMQ_RUNNING;

        while (!this_worker_should_exit) {
        conn_rec *current_conn;
        void *csd;

        /* (Re)initialize this child to a pre-connection state. */
        apr_pool_clear(ptrans);

        if ((ap_max_requests_per_thread > 0
             && requests_this_child++ >= ap_max_requests_per_thread))
            clean_child_exit(0, worker_slot);

        (void) ap_update_child_status(sbh, SERVER_READY, (request_rec *) NULL);

        apr_thread_mutex_lock(accept_mutex);

        /* We always (presently) have at least 2 sockets we listen on, so
         * we don't have the ability for a fast path for a single socket
         * as some MPM's allow :(
         */
        for (;;) {
            apr_int32_t numdesc = 0;
            const apr_pollfd_t *pdesc = NULL;

            rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc);
            if (rv != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(rv)) {
                    if (one_process && shutdown_pending)
                        return;
                    continue;
                }
                ap_log_error(APLOG_MARK, APLOG_ERR, rv,
                             ap_server_conf, "apr_pollset_poll: (listen)");
                clean_child_exit(1, worker_slot);
            }
            /* We can always use pdesc[0], but sockets at position N
             * could end up completely starved of attention in a very
             * busy server. Therefore, we round-robin across the
             * returned set of descriptors. While it is possible that
             * the returned set of descriptors might flip around and
             * continue to starve some sockets, we happen to know the
             * internal pollset implementation retains ordering
             * stability of the sockets. Thus, the round-robin should
             * ensure that a socket will eventually be serviced.
             */
            if (last_poll_idx >= numdesc)
                last_poll_idx = 0;

            /* Grab a listener record from the client_data of the poll
             * descriptor, and advance our saved index to round-robin
             * the next fetch.
             *
             * ### hmm... this descriptor might have POLLERR rather
             * ### than POLLIN
             */

            lr = pdesc[last_poll_idx++].client_data;

            /* The only socket we add without client_data is the first, the UDP socket
             * we listen on for restart signals. If we've therefore gotten a hit on that
             * listener lr will be NULL here and we know we've been told to die.
             * Before we jump to the end of the while loop with this_worker_should_exit
             * set to 1 (causing us to exit normally we hope) we release the accept_mutex
             * as we want every thread to go through this same routine :)
             * Bit of a hack, but compared to what I had before...
             */
            if (lr == NULL) {
                this_worker_should_exit = 1;
                apr_thread_mutex_unlock(accept_mutex);
                goto got_a_black_spot;
            }
            goto got_fd;
        }
got_fd:
        /* Run beos_accept to accept the connection and set things up to
         * allow us to process it. We always release the accept_lock here,
         * even if we failt o accept as otherwise we'll starve other workers
         * which would be bad.
         */
        rv = beos_accept(&csd, lr, ptrans);
        apr_thread_mutex_unlock(accept_mutex);

        if (rv == APR_EGENERAL) {
            /* resource shortage or should-not-occur occured */
            clean_child_exit(1, worker_slot);
        } else if (rv != APR_SUCCESS)
            continue;

        current_conn = ap_run_create_connection(ptrans, ap_server_conf, csd, worker_slot, sbh, bucket_alloc);
        if (current_conn) {
            ap_process_connection(current_conn, csd);
            ap_lingering_close(current_conn);
        }

        if (ap_my_generation !=
                 ap_scoreboard_image->global->running_generation) { /* restart? */
            /* yeah, this could be non-graceful restart, in which case the
             * parent will kill us soon enough, but why bother checking?
             */
            this_worker_should_exit = 1;
        }
got_a_black_spot:
    }

    apr_pool_destroy(ptrans);
    apr_pool_destroy(pworker);

    clean_child_exit(0, worker_slot);
}

static int make_worker(int slot)
{
    thread_id tid;

    if (slot + 1 > ap_max_child_assigned)
            ap_max_child_assigned = slot + 1;

    (void) ap_update_child_status_from_indexes(0, slot, SERVER_STARTING, (request_rec*)NULL);

    if (one_process) {
        set_signals();
        ap_scoreboard_image->parent[0].pid = getpid();
        ap_scoreboard_image->servers[0][slot].tid = find_thread(NULL);
        return 0;
    }

    tid = spawn_thread(worker_thread, "apache_worker", B_NORMAL_PRIORITY,
                       (void *)slot);
    if (tid < B_NO_ERROR) {
        ap_log_error(APLOG_MARK, APLOG_ERR, errno, NULL,
            "spawn_thread: Unable to start a new thread");
        /* In case system resources are maxed out, we don't want
         * Apache running away with the CPU trying to fork over and
         * over and over again.
         */
        (void) ap_update_child_status_from_indexes(0, slot, SERVER_DEAD,
                                                   (request_rec*)NULL);

        sleep(10);
        return -1;
    }
    resume_thread(tid);

    ap_scoreboard_image->servers[0][slot].tid = tid;
    return 0;
}
Beispiel #25
0
apr_status_t modsecurity_tx_init(modsec_rec *msr) 
{
    const char *s = NULL;

    /* Register TX cleanup */
    apr_pool_cleanup_register(msr->mp, msr, modsecurity_tx_cleanup, apr_pool_cleanup_null);

    /* Initialise C-L */
    msr->request_content_length = -1;
    s = apr_table_get(msr->request_headers, "Content-Length");
    if (s != NULL) {
        msr->request_content_length = strtol(s, NULL, 10);
    }

    /* Figure out whether this request has a body */
    msr->reqbody_chunked = 0;
    msr->reqbody_should_exist = 0;
    if (msr->request_content_length == -1) {
        /* There's no C-L, but is chunked encoding used? */
        char *transfer_encoding = (char *)apr_table_get(msr->request_headers, "Transfer-Encoding");
        if ((transfer_encoding != NULL) && (strstr(transfer_encoding, "chunked") != NULL)) {
            msr->reqbody_should_exist = 1;
            msr->reqbody_chunked = 1;
        }
    } else {
        /* C-L found */
        msr->reqbody_should_exist = 1;
    }

    /* Initialise C-T */
    msr->request_content_type = NULL;
    s = apr_table_get(msr->request_headers, "Content-Type");
    if (s != NULL) {
        msr->request_content_type = s;
    }
    
    /* Decide what to do with the request body. */
    if ((msr->request_content_type != NULL) 
            && (strncasecmp(msr->request_content_type, "application/x-www-form-urlencoded", 33) == 0)) {
        /* Always place POST requests with "application/x-www-form-urlencoded" payloads in memory. */
        msr->msc_reqbody_storage = MSC_REQBODY_MEMORY;
        msr->msc_reqbody_spilltodisk = 1;
        msr->msc_reqbody_processor = "URLENCODED";
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "msc reqbody processor change to URLENCODED");
    } else {
        /* In all other cases, try using the memory first but switch over to disk for larger bodies. */
        msr->msc_reqbody_storage = MSC_REQBODY_MEMORY;
        msr->msc_reqbody_spilltodisk = 1;

        /* If the C-L is known and there's more data than our limit go to disk straight away. */
        if ((msr->request_content_length != -1) 
             && (msr->request_content_length > msr->txcfg->reqbody_inmemory_limit)) {
            msr->msc_reqbody_storage = MSC_REQBODY_DISK;
        }

        if (msr->request_content_type != NULL) {
            if (strncasecmp(msr->request_content_type, "multipart/form-data", 19) == 0) {
                msr->msc_reqbody_processor = "MULTIPART";
                ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "msc reqbody processor change to MULTIPART");
            } else if (strncasecmp(msr->request_content_type, "text/xml", 8) == 0) {
                msr->msc_reqbody_processor = "XML";
                ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, NULL, "msc reqbody processor change to XML");
            }
        }
    }

    /* Check if we are forcing buffering, then use memory only. */
    if (msr->txcfg->reqbody_buffering != REQUEST_BODY_FORCEBUF_OFF) {
        msr->msc_reqbody_storage = MSC_REQBODY_MEMORY;
        msr->msc_reqbody_spilltodisk = 0;
    }

    /* Initialise arguments */
    msr->arguments = apr_table_make(msr->mp, 32);
    if (msr->arguments == NULL) {
        return -1;
    }
    if (msr->query_string != NULL) {
        int invalid_count = 0;
        /* 查询串参数解析 */               
        if (parse_arguments(msr, msr->query_string, strlen(msr->query_string),
            msr->txcfg->argument_separator, "QUERY_STRING", msr->arguments, &invalid_count) < 0) {
            msr_log(msr, 1, "Initialisation: Error occurred while parsing QUERY_STRING arguments.");
            return -1;
        }

        if (invalid_count) {
            msr->urlencoded_error = 1;
        }
    }
     
    if (msr->request_uri != NULL) {  
        /* request_uri解析 */       
        msr->request_uri = (const char *)parse_uri(msr->mp, msr->request_uri);
        if (msr->request_uri == NULL) {
            msr_log(msr, 1, "Initialisation: Error occurred while parsing REQUEST_URI arguments.");
            return -1;
        }
    } 

    if (msr->request_line != NULL) {
        /* request_line解析 */
        msr->request_line = (const char *)parse_request_line(msr->mp, msr->request_line);
        if (msr->request_line == NULL) {
            msr_log(msr, 1, "Initialisation: Error occurred while parsing REQUEST_LINE arguments.");
            return -1;
        }
    }

    msr->arguments_to_sanitize = apr_table_make(msr->mp, 16);
    if (msr->arguments_to_sanitize == NULL) {
        return -1;
    }
    
    msr->request_headers_to_sanitize = apr_table_make(msr->mp, 16);
    if (msr->request_headers_to_sanitize == NULL) {
        return -1;
    }
    
    msr->response_headers_to_sanitize = apr_table_make(msr->mp, 16);
    if (msr->response_headers_to_sanitize == NULL) {
        return -1;
    }
    
    msr->pattern_to_sanitize = apr_table_make(msr->mp, 32);
    if (msr->pattern_to_sanitize == NULL) {
        return -1;
    }

    /* Initialise cookies */
    msr->request_cookies = apr_table_make(msr->mp, 5);
    if (msr->request_cookies == NULL) {
        return -1;
    }

    /* Initialize matched vars */
    msr->matched_vars = apr_table_make(msr->mp, 8);
    if (msr->matched_vars == NULL) {
        return -1;
    }
    apr_table_clear(msr->matched_vars);

    /* Locate the cookie headers and parse them */
    /* 解析请求cookie */
    if (parse_request_cookie(msr) == -1) {
        return -1;
    }

    /* Collections. */
    msr->tx_vars = apr_table_make(msr->mp, 32);
    if (msr->tx_vars == NULL) {
        return -1;
    }

    msr->geo_vars = apr_table_make(msr->mp, 8);
    if (msr->geo_vars == NULL) {
        return -1;
    }

    msr->collections_original = apr_table_make(msr->mp, 8);
    if (msr->collections_original == NULL) {
        return -1;
    }
    msr->collections = apr_table_make(msr->mp, 8);
    if (msr->collections == NULL) {
        return -1;
    }
    msr->collections_dirty = apr_table_make(msr->mp, 8);
    if (msr->collections_dirty == NULL) {
        return -1;
    }

    /* Other */
    msr->tcache = NULL;
    msr->tcache_items = 0;
    
    /* 初始化变量缓存内存池 */
#ifdef VAR_FETCH_CACHE
    if (apr_pool_create(&msr->var_fetch_cache_mptmp, msr->mp) != APR_SUCCESS) {
        return -1;
    }
    apr_pool_tag(msr->var_fetch_cache_mptmp, "varfetchcache");
#endif

    msr->matched_rules = apr_array_make(msr->mp, 16, sizeof(void *));
    if (msr->matched_rules == NULL) {
        return -1;
    }

    msr->matched_var = (msc_string *)apr_pcalloc(msr->mp, sizeof(msc_string));
    if (msr->matched_var == NULL) {
        return -1;
    }

    msr->highest_severity = 255; /* high, invalid value */

    msr->removed_rules = apr_array_make(msr->mp, 16, sizeof(char *));
    if (msr->removed_rules == NULL) {
        return -1;
    }

    msr->removed_rules_tag = apr_array_make(msr->mp, 16, sizeof(char *));
    if (msr->removed_rules_tag == NULL) {
        return -1;
    }

    return 1;
}
Beispiel #26
0
static request_rec *h2_task_create_request(h2_task_env *env)
{
    conn_rec *conn = &env->c;
    request_rec *r;
    apr_pool_t *p;
    int access_status = HTTP_OK;    
    
    apr_pool_create(&p, conn->pool);
    apr_pool_tag(p, "request");
    r = apr_pcalloc(p, sizeof(request_rec));
    AP_READ_REQUEST_ENTRY((intptr_t)r, (uintptr_t)conn);
    r->pool            = p;
    r->connection      = conn;
    r->server          = conn->base_server;
    
    r->user            = NULL;
    r->ap_auth_type    = NULL;
    
    r->allowed_methods = ap_make_method_list(p, 2);
    
    r->headers_in = apr_table_copy(r->pool, env->headers);
    r->trailers_in     = apr_table_make(r->pool, 5);
    r->subprocess_env  = apr_table_make(r->pool, 25);
    r->headers_out     = apr_table_make(r->pool, 12);
    r->err_headers_out = apr_table_make(r->pool, 5);
    r->trailers_out    = apr_table_make(r->pool, 5);
    r->notes           = apr_table_make(r->pool, 5);
    
    r->request_config  = ap_create_request_config(r->pool);
    /* Must be set before we run create request hook */
    
    r->proto_output_filters = conn->output_filters;
    r->output_filters  = r->proto_output_filters;
    r->proto_input_filters = conn->input_filters;
    r->input_filters   = r->proto_input_filters;
    ap_run_create_request(r);
    r->per_dir_config  = r->server->lookup_defaults;
    
    r->sent_bodyct     = 0;                      /* bytect isn't for body */
    
    r->read_length     = 0;
    r->read_body       = REQUEST_NO_BODY;
    
    r->status          = HTTP_OK;  /* Until further notice */
    r->header_only     = 0;
    r->the_request     = NULL;
    
    /* Begin by presuming any module can make its own path_info assumptions,
     * until some module interjects and changes the value.
     */
    r->used_path_info = AP_REQ_DEFAULT_PATH_INFO;
    
    r->useragent_addr = conn->client_addr;
    r->useragent_ip = conn->client_ip;
    
    ap_run_pre_read_request(r, conn);
    
    /* Time to populate r with the data we have. */
    r->request_time = apr_time_now();
    r->the_request = apr_psprintf(r->pool, "%s %s HTTP/1.1", 
                                  env->method, env->path);
    r->method = env->method;
    /* Provide quick information about the request method as soon as known */
    r->method_number = ap_method_number_of(r->method);
    if (r->method_number == M_GET && r->method[0] == 'H') {
        r->header_only = 1;
    }

    ap_parse_uri(r, env->path);
    r->protocol = (char*)"HTTP/1.1";
    r->proto_num = HTTP_VERSION(1, 1);
    
    r->hostname = env->authority;
    
    /* update what we think the virtual host is based on the headers we've
     * now read. may update status.
     */
    ap_update_vhost_from_headers(r);
    
    /* we may have switched to another server */
    r->per_dir_config = r->server->lookup_defaults;
    
    /*
     * Add the HTTP_IN filter here to ensure that ap_discard_request_body
     * called by ap_die and by ap_send_error_response works correctly on
     * status codes that do not cause the connection to be dropped and
     * in situations where the connection should be kept alive.
     */
    ap_add_input_filter_handle(ap_http_input_filter_handle,
                               NULL, r, r->connection);
    
    if (access_status != HTTP_OK
        || (access_status = ap_run_post_read_request(r))) {
        /* Request check post hooks failed. An example of this would be a
         * request for a vhost where h2 is disabled --> 421.
         */
        h2_task_die(env, access_status, r);
        ap_update_child_status(conn->sbh, SERVER_BUSY_LOG, r);
        ap_run_log_transaction(r);
        r = NULL;
        goto traceout;
    }
    
    AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method, 
                            (char *)r->uri, (char *)r->server->defn_name, 
                            r->status);
    return r;
traceout:
    AP_READ_REQUEST_FAILURE((uintptr_t)r);
    return r;
}
Beispiel #27
0
apr_status_t ap_core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
{
    apr_status_t rv;
    apr_bucket_brigade *more;
    conn_rec *c = f->c;
    core_net_rec *net = f->ctx;
    core_output_filter_ctx_t *ctx = net->out_ctx;
    apr_read_type_e eblock = APR_NONBLOCK_READ;
    apr_pool_t *input_pool = b->p;

    /* Fail quickly if the connection has already been aborted. */
    if (c->aborted) {
        apr_brigade_cleanup(b);
        return APR_ECONNABORTED;
    }

    if (ctx == NULL) {
        ctx = apr_pcalloc(c->pool, sizeof(*ctx));
        net->out_ctx = ctx;
    }

    /* If we have a saved brigade, concatenate the new brigade to it */
    if (ctx->b) {
        APR_BRIGADE_CONCAT(ctx->b, b);
        b = ctx->b;
        ctx->b = NULL;
    }

    /* Perform multiple passes over the brigade, sending batches of output
       to the connection. */
    while (b && !APR_BRIGADE_EMPTY(b)) {
        apr_size_t nbytes = 0;
        apr_bucket *last_e = NULL; /* initialized for debugging */
        apr_bucket *e;

        /* one group of iovecs per pass over the brigade */
        apr_size_t nvec = 0;
        apr_size_t nvec_trailers = 0;
        struct iovec vec[MAX_IOVEC_TO_WRITE];
        struct iovec vec_trailers[MAX_IOVEC_TO_WRITE];

        /* one file per pass over the brigade */
        apr_file_t *fd = NULL;
        apr_size_t flen = 0;
        apr_off_t foffset = 0;

        /* keep track of buckets that we've concatenated
         * to avoid small writes
         */
        apr_bucket *last_merged_bucket = NULL;

        /* tail of brigade if we need another pass */
        more = NULL;

        /* Iterate over the brigade: collect iovecs and/or a file */
        for (e = APR_BRIGADE_FIRST(b);
                e != APR_BRIGADE_SENTINEL(b);
                e = APR_BUCKET_NEXT(e))
        {
            /* keep track of the last bucket processed */
            last_e = e;
            if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) {
                break;
            }
            else if (APR_BUCKET_IS_FLUSH(e)) {
                if (e != APR_BRIGADE_LAST(b)) {
                    more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
                }
                break;
            }

            /* It doesn't make any sense to use sendfile for a file bucket
             * that represents 10 bytes.
             */
            else if (APR_BUCKET_IS_FILE(e)
                     && (e->length >= AP_MIN_SENDFILE_BYTES)) {
                apr_bucket_file *a = e->data;

                /* We can't handle more than one file bucket at a time
                 * so we split here and send the file we have already
                 * found.
                 */
                if (fd) {
                    more = apr_brigade_split(b, e);
                    break;
                }

                fd = a->fd;
                flen = e->length;
                foffset = e->start;
            }
            else {
                const char *str;
                apr_size_t n;

                rv = apr_bucket_read(e, &str, &n, eblock);
                if (APR_STATUS_IS_EAGAIN(rv)) {
                    /* send what we have so far since we shouldn't expect more
                     * output for a while...  next time we read, block
                     */
                    more = apr_brigade_split(b, e);
                    eblock = APR_BLOCK_READ;
                    break;
                }
                eblock = APR_NONBLOCK_READ;
                if (n) {
                    if (!fd) {
                        if (nvec == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. buffer them up, for use later. */
                            apr_bucket *temp, *next;
                            apr_bucket_brigade *temp_brig;

                            if (nbytes >= AP_MIN_BYTES_TO_WRITE) {
                                /* We have enough data in the iovec
                                 * to justify doing a writev
                                 */
                                more = apr_brigade_split(b, e);
                                break;
                            }

                            /* Create a temporary brigade as a means
                             * of concatenating a bunch of buckets together
                             */
                            temp_brig = apr_brigade_create(f->c->pool,
                                                           f->c->bucket_alloc);
                            if (last_merged_bucket) {
                                /* If we've concatenated together small
                                 * buckets already in a previous pass,
                                 * the initial buckets in this brigade
                                 * are heap buckets that may have extra
                                 * space left in them (because they
                                 * were created by apr_brigade_write()).
                                 * We can take advantage of this by
                                 * building the new temp brigade out of
                                 * these buckets, so that the content
                                 * in them doesn't have to be copied again.
                                 */
                                APR_BRIGADE_PREPEND(b, temp_brig);
                                brigade_move(temp_brig, b, APR_BUCKET_NEXT(last_merged_bucket));
                            }

                            temp = APR_BRIGADE_FIRST(b);
                            while (temp != e) {
                                apr_bucket *d;
                                rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                                apr_brigade_write(temp_brig, NULL, NULL, str, n);
                                d = temp;
                                temp = APR_BUCKET_NEXT(temp);
                                apr_bucket_delete(d);
                            }

                            nvec = 0;
                            nbytes = 0;
                            temp = APR_BRIGADE_FIRST(temp_brig);
                            APR_BUCKET_REMOVE(temp);
                            APR_BRIGADE_INSERT_HEAD(b, temp);
                            apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;

                            /* Just in case the temporary brigade has
                             * multiple buckets, recover the rest of
                             * them and put them in the brigade that
                             * we're sending.
                             */
                            for (next = APR_BRIGADE_FIRST(temp_brig);
                                    next != APR_BRIGADE_SENTINEL(temp_brig);
                                    next = APR_BRIGADE_FIRST(temp_brig)) {
                                APR_BUCKET_REMOVE(next);
                                APR_BUCKET_INSERT_AFTER(temp, next);
                                temp = next;
                                apr_bucket_read(next, &str, &n,
                                                APR_BLOCK_READ);
                                vec[nvec].iov_base = (char*) str;
                                vec[nvec].iov_len = n;
                                nvec++;
                            }

                            apr_brigade_destroy(temp_brig);

                            last_merged_bucket = temp;
                            e = temp;
                            last_e = e;
                        }
                        else {
                            vec[nvec].iov_base = (char*) str;
                            vec[nvec].iov_len = n;
                            nvec++;
                        }
                    }
                    else {
                        /* The bucket is a trailer to a file bucket */

                        if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
                            /* woah! too many. stop now. */
                            more = apr_brigade_split(b, e);
                            break;
                        }

                        vec_trailers[nvec_trailers].iov_base = (char*) str;
                        vec_trailers[nvec_trailers].iov_len = n;
                        nvec_trailers++;
                    }

                    nbytes += n;
                }
            }
        }


        /* Completed iterating over the brigade, now determine if we want
         * to buffer the brigade or send the brigade out on the network.
         *
         * Save if we haven't accumulated enough bytes to send, the connection
         * is not about to be closed, and:
         *
         *   1) we didn't see a file, we don't have more passes over the
         *      brigade to perform,  AND we didn't stop at a FLUSH bucket.
         *      (IOW, we will save plain old bytes such as HTTP headers)
         * or
         *   2) we hit the EOS and have a keep-alive connection
         *      (IOW, this response is a bit more complex, but we save it
         *       with the hope of concatenating with another response)
         */
        if (nbytes + flen < AP_MIN_BYTES_TO_WRITE
                && !AP_BUCKET_IS_EOC(last_e)
                && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e))
                    || (APR_BUCKET_IS_EOS(last_e)
                        && c->keepalive == AP_CONN_KEEPALIVE))) {

            /* NEVER save an EOS in here.  If we are saving a brigade with
             * an EOS bucket, then we are doing keepalive connections, and
             * we want to process to second request fully.
             */
            if (APR_BUCKET_IS_EOS(last_e)) {
                apr_bucket *bucket;
                int file_bucket_saved = 0;
                apr_bucket_delete(last_e);
                for (bucket = APR_BRIGADE_FIRST(b);
                        bucket != APR_BRIGADE_SENTINEL(b);
                        bucket = APR_BUCKET_NEXT(bucket)) {

                    /* Do a read on each bucket to pull in the
                     * data from pipe and socket buckets, so
                     * that we don't leave their file descriptors
                     * open indefinitely.  Do the same for file
                     * buckets, with one exception: allow the
                     * first file bucket in the brigade to remain
                     * a file bucket, so that we don't end up
                     * doing an mmap+memcpy every time a client
                     * requests a <8KB file over a keepalive
                     * connection.
                     */
                    if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
                        file_bucket_saved = 1;
                    }
                    else {
                        const char *buf;
                        apr_size_t len = 0;
                        rv = apr_bucket_read(bucket, &buf, &len,
                                             APR_BLOCK_READ);
                        if (rv != APR_SUCCESS) {
                            ap_log_cerror(APLOG_MARK, APLOG_ERR, rv,
                                          c, "core_output_filter:"
                                          " Error reading from bucket.");
                            return HTTP_INTERNAL_SERVER_ERROR;
                        }
                    }
                }
            }
            if (!ctx->deferred_write_pool) {
                apr_pool_create(&ctx->deferred_write_pool, c->pool);
                apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
            }
            ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool);

            return APR_SUCCESS;
        }

        if (fd) {
            apr_hdtr_t hdtr;
            apr_size_t bytes_sent;

#if APR_HAS_SENDFILE
            apr_int32_t flags = 0;
#endif

            memset(&hdtr, '\0', sizeof(hdtr));
            if (nvec) {
                hdtr.numheaders = nvec;
                hdtr.headers = vec;
            }

            if (nvec_trailers) {
                hdtr.numtrailers = nvec_trailers;
                hdtr.trailers = vec_trailers;
            }

#if APR_HAS_SENDFILE
            if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) {

                if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) {
                    /* Prepare the socket to be reused */
                    flags |= APR_SENDFILE_DISCONNECT_SOCKET;
                }

                rv = sendfile_it_all(net,      /* the network information   */
                                     fd,       /* the file to send          */
                                     &hdtr,    /* header and trailer iovecs */
                                     foffset,  /* offset in the file to begin
                                                  sending from              */
                                     flen,     /* length of file            */
                                     nbytes + flen, /* total length including
                                                       headers              */
                                     &bytes_sent,   /* how many bytes were
                                                       sent                 */
                                     flags);   /* apr_sendfile flags        */
            }
            else
#endif
            {
                rv = emulate_sendfile(net, fd, &hdtr, foffset, flen,
                                      &bytes_sent);
            }

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);

            fd = NULL;
        }
        else {
            apr_size_t bytes_sent;

            rv = writev_it_all(net->client_socket,
                               vec, nvec,
                               nbytes, &bytes_sent);

            if (logio_add_bytes_out && bytes_sent > 0)
                logio_add_bytes_out(c, bytes_sent);
        }

        apr_brigade_cleanup(b);

        /* drive cleanups for resources which were set aside
         * this may occur before or after termination of the request which
         * created the resource
         */
        if (ctx->deferred_write_pool) {
            if (more && more->p == ctx->deferred_write_pool) {
                /* "more" belongs to the deferred_write_pool,
                 * which is about to be cleared.
                 */
                if (APR_BRIGADE_EMPTY(more)) {
                    more = NULL;
                }
                else {
                    /* uh oh... change more's lifetime
                     * to the input brigade's lifetime
                     */
                    apr_bucket_brigade *tmp_more = more;
                    more = NULL;
                    ap_save_brigade(f, &more, &tmp_more, input_pool);
                }
            }
            apr_pool_clear(ctx->deferred_write_pool);
        }

        if (rv != APR_SUCCESS) {
            ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c,
                          "core_output_filter: writing data to the network");

            if (more)
                apr_brigade_cleanup(more);

            /* No need to check for SUCCESS, we did that above. */
            if (!APR_STATUS_IS_EAGAIN(rv)) {
                c->aborted = 1;
                return APR_ECONNABORTED;
            }

            return APR_SUCCESS;
        }

        b = more;
        more = NULL;
    }  /* end while () */

    return APR_SUCCESS;
}
Beispiel #28
0
static int32 worker_thread(void * dummy)
{
    proc_info * ti = dummy;
    int child_slot = ti->slot;
    apr_pool_t *tpool = ti->tpool;
    apr_allocator_t *allocator;
    apr_socket_t *csd = NULL;
    apr_pool_t *ptrans;		/* Pool for per-transaction stuff */
    apr_bucket_alloc_t *bucket_alloc;
    apr_socket_t *sd = NULL;
    apr_status_t rv = APR_EINIT;
    int srv , n;
    int curr_pollfd = 0, last_pollfd = 0;
    sigset_t sig_mask;
    int requests_this_child = ap_max_requests_per_thread;
    apr_pollfd_t *pollset;
    /* each worker thread is in control of its own destiny...*/
    int this_worker_should_exit = 0; 
    free(ti);

    mpm_state = AP_MPMQ_STARTING;

    on_exit_thread(check_restart, (void*)child_slot);
          
    /* block the signals for this thread */
    sigfillset(&sig_mask);
    sigprocmask(SIG_BLOCK, &sig_mask, NULL);

    apr_allocator_create(&allocator);
    apr_allocator_max_free_set(allocator, ap_max_mem_free);
    apr_pool_create_ex(&ptrans, tpool, NULL, allocator);
    apr_allocator_owner_set(allocator, ptrans);

    apr_pool_tag(ptrans, "transaction");

    bucket_alloc = apr_bucket_alloc_create_ex(allocator);

    apr_thread_mutex_lock(worker_thread_count_mutex);
    worker_thread_count++;
    apr_thread_mutex_unlock(worker_thread_count_mutex);

    (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_STARTING,
                                               (request_rec*)NULL);
                                  
    apr_poll_setup(&pollset, num_listening_sockets + 1, tpool);
    for(n=0 ; n <= num_listening_sockets ; n++)
        apr_poll_socket_add(pollset, listening_sockets[n], APR_POLLIN);

    mpm_state = AP_MPMQ_RUNNING;

    while (1) {
        /* If we're here, then chances are (unless we're the first thread created) 
         * we're going to be held up in the accept mutex, so doing this here
         * shouldn't hurt performance.
         */

        this_worker_should_exit |= (ap_max_requests_per_thread != 0) && (requests_this_child <= 0);
        
        if (this_worker_should_exit) break;

        (void) ap_update_child_status_from_indexes(0, child_slot, SERVER_READY,
                                                   (request_rec*)NULL);

        apr_thread_mutex_lock(accept_mutex);

        while (!this_worker_should_exit) {
            apr_int16_t event;
            apr_status_t ret;

            ret = apr_poll(pollset, num_listening_sockets + 1, &srv, -1);

            if (ret != APR_SUCCESS) {
                if (APR_STATUS_IS_EINTR(ret)) {
                    continue;
                }
                /* poll() will only return errors in catastrophic
                 * circumstances. Let's try exiting gracefully, for now. */
                ap_log_error(APLOG_MARK, APLOG_ERR, ret, (const server_rec *)
                             ap_server_conf, "apr_poll: (listen)");
                this_worker_should_exit = 1;
            } else {
                /* if we've bailed in apr_poll what's the point of trying to use the data? */
                apr_poll_revents_get(&event, listening_sockets[0], pollset);

                if (event & APR_POLLIN){
                    apr_sockaddr_t *rec_sa;
                    apr_size_t len = 5;
                    char *tmpbuf = apr_palloc(ptrans, sizeof(char) * 5);
                    apr_sockaddr_info_get(&rec_sa, "127.0.0.1", APR_UNSPEC, 7772, 0, ptrans);
                    
                    if ((ret = apr_recvfrom(rec_sa, listening_sockets[0], 0, tmpbuf, &len))
                        != APR_SUCCESS){
                        ap_log_error(APLOG_MARK, APLOG_ERR, ret, NULL, 
                            "error getting data from UDP!!");
                    }else {
                        /* add checking??? */              
                    }
                    this_worker_should_exit = 1;
                }
            }
          
            if (this_worker_should_exit) break;

            if (num_listening_sockets == 1) {
                sd = ap_listeners->sd;
                goto got_fd;
            }
            else {
                /* find a listener */
                curr_pollfd = last_pollfd;
                do {
                    curr_pollfd++;

                    if (curr_pollfd > num_listening_sockets)
                        curr_pollfd = 1;
                    
                    /* Get the revent... */
                    apr_poll_revents_get(&event, listening_sockets[curr_pollfd], pollset);
                    
                    if (event & APR_POLLIN) {
                        last_pollfd = curr_pollfd;
                        sd = listening_sockets[curr_pollfd];
                        goto got_fd;
                    }
                } while (curr_pollfd != last_pollfd);
            }
        }
    got_fd:

        if (!this_worker_should_exit) {
            rv = apr_accept(&csd, sd, ptrans);

            apr_thread_mutex_unlock(accept_mutex);
            if (rv != APR_SUCCESS) {
                ap_log_error(APLOG_MARK, APLOG_ERR, rv, ap_server_conf,
                  "apr_accept");
            } else {
                process_socket(ptrans, csd, child_slot, bucket_alloc);
                requests_this_child--;
            }
        }
        else {
            apr_thread_mutex_unlock(accept_mutex);
            break;
        }
        apr_pool_clear(ptrans);
    }

    ap_update_child_status_from_indexes(0, child_slot, SERVER_DEAD, (request_rec*)NULL);

    apr_bucket_alloc_destroy(bucket_alloc);

    ap_log_error(APLOG_MARK, APLOG_NOTICE, 0, NULL,
                 "worker_thread %ld exiting", find_thread(NULL));
    
    apr_thread_mutex_lock(worker_thread_count_mutex);
    worker_thread_count--;
    apr_thread_mutex_unlock(worker_thread_count_mutex);

    return (0);
}
Beispiel #29
0
/*
   =============================================================================
    dbacquire(dbType, dbString): Opens a new connection to a database of type 
    _dbType_ and with the connection parameters _dbString_. If successful, 
    returns a table with functions for using the database handle. If an error 
    occurs, returns nil as the first parameter and the error message as the 
    second. See the APR_DBD for a list of database types and connection strings 
    supported.
   =============================================================================
 */
AP_LUA_DECLARE(int) lua_db_acquire(lua_State *L)
{
    /*~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
    const char      *type;
    const char      *arguments;
    const char      *error = 0;
    request_rec     *r;
    lua_db_handle   *db = 0;
    apr_status_t     rc = 0;
    ap_dbd_t        *dbdhandle = NULL;
    apr_pool_t      *pool = NULL;
    /*~~~~~~~~~~~~~~~~~~~~~~~~~~~*/

    r = ap_lua_check_request_rec(L, 1);
    if (r) {
        type = luaL_optstring(L, 2, "mod_dbd"); /* Defaults to mod_dbd */
        
        if (!strcmp(type, "mod_dbd")) {

            lua_settop(L, 0);
            lua_ap_dbd_open = APR_RETRIEVE_OPTIONAL_FN(ap_dbd_open);
            if (lua_ap_dbd_open)
                dbdhandle = (ap_dbd_t *) lua_ap_dbd_open(
                        r->server->process->pool, r->server);

            if (dbdhandle) {
                db = lua_push_db_handle(L, r, LUA_DBTYPE_MOD_DBD, dbdhandle->pool);
                db->driver = dbdhandle->driver;
                db->handle = dbdhandle->handle;
                db->dbdhandle = dbdhandle;
                return 1;
            }
            else {
                lua_pushnil(L);
                if ( lua_ap_dbd_open == NULL )
                    lua_pushliteral(L,
                                    "mod_dbd doesn't seem to have been loaded.");
                else
                    lua_pushliteral(
                        L,
                        "Could not acquire connection from mod_dbd. If your database is running, this may indicate a permission problem.");
                return 2;
            }
        }
        else {
            rc = apr_pool_create(&pool, NULL);
            if (rc != APR_SUCCESS) {
                lua_pushnil(L);
                lua_pushliteral(L, "Could not allocate memory for database!");
                return 2;
            }
            apr_pool_tag(pool, "lua_dbd_pool");
            apr_dbd_init(pool);
            dbdhandle = apr_pcalloc(pool, sizeof(ap_dbd_t));
            rc = apr_dbd_get_driver(pool, type, &dbdhandle->driver);
            if (rc == APR_SUCCESS) {
                luaL_checktype(L, 3, LUA_TSTRING);
                arguments = lua_tostring(L, 3);
                lua_settop(L, 0);
                
                if (strlen(arguments)) {
                    rc = apr_dbd_open_ex(dbdhandle->driver, pool, 
                            arguments, &dbdhandle->handle, &error);
                    if (rc == APR_SUCCESS) {
                        db = lua_push_db_handle(L, r, LUA_DBTYPE_APR_DBD, pool);
                        db->driver = dbdhandle->driver;
                        db->handle = dbdhandle->handle;
                        db->dbdhandle = dbdhandle;
                        return 1;
                    }
                    else {
                        lua_pushnil(L);
                        if (error) {
                            lua_pushstring(L, error);
                            return 2;
                        }

                        return 1;
                    }
                }

                lua_pushnil(L);
                lua_pushliteral(L,
                                "No database connection string was specified.");
                apr_pool_destroy(pool);
                return (2);
            }
            else {
                lua_pushnil(L);
                if (APR_STATUS_IS_ENOTIMPL(rc)) {
                    lua_pushfstring(L, 
                         "driver for %s not available", type);
                }
                else if (APR_STATUS_IS_EDSOOPEN(rc)) {
                    lua_pushfstring(L, 
                                "can't find driver for %s", type);
                }
                else if (APR_STATUS_IS_ESYMNOTFOUND(rc)) {
                    lua_pushfstring(L, 
                                "driver for %s is invalid or corrupted",
                                type);
                }
                else {
                    lua_pushliteral(L, 
                                "mod_lua not compatible with APR in get_driver");
                }
                lua_pushinteger(L, rc);
                apr_pool_destroy(pool);
                return 3;
            }
        }

        lua_pushnil(L);
        return 1;
    }

    return 0;
}
Beispiel #30
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, 
                        const h2_config *conf, 
                        apr_interval_time_t stream_timeout,
                        h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_pool_tag(m->pool, "h2_mplx");
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        status = apr_thread_cond_create(&m->task_thawed, m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
    
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        m->max_streams = h2_config_geti(conf, H2_CONF_MAX_STREAMS);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->q = h2_iq_create(m->pool, m->max_streams);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->stream_timeout = stream_timeout;
        m->workers = workers;
        m->workers_max = workers->max_workers;
        m->workers_def_limit = 4;
        m->workers_limit = m->workers_def_limit;
        m->last_limit_change = m->last_idle_block = apr_time_now();
        m->limit_change_interval = apr_time_from_msec(200);
        
        m->tx_handles_reserved = 0;
        m->tx_chunk_size = 4;
        
        m->spare_slaves = apr_array_make(m->pool, 10, sizeof(conn_rec*));
        
        m->ngn_shed = h2_ngn_shed_create(m->pool, m->c, m->max_streams, 
                                         m->stream_max_mem);
        h2_ngn_shed_set_ctx(m->ngn_shed , m);
    }
    return m;
}