Пример #1
0
static apr_status_t ntp_cleanup(void *data)
{
    tcn_ntp_conn_t *con = (tcn_ntp_conn_t *)data;

    if (con) {
        if (con->h_pipe) {
            FlushFileBuffers(con->h_pipe);
            CloseHandle(con->h_pipe);
            con->h_pipe = NULL;
        }
        if (con->rd_event) {
            CloseHandle(con->rd_event);
            con->rd_event = NULL;
        }
        if (con->wr_event) {
            CloseHandle(con->wr_event);
            con->wr_event= NULL;
        }
    }

#ifdef TCN_DO_STATISTICS
    apr_atomic_inc32(&ntp_cleared);
#endif
    return APR_SUCCESS;
}
Пример #2
0
apr_status_t apu_dso_init(apr_pool_t *pool)
{
    apr_status_t ret = APR_SUCCESS;
    apr_pool_t *parent;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;

        return APR_SUCCESS;
    }

    /* Top level pool scope, need process-scope lifetime */
    for (parent = apr_pool_parent_get(pool);
         parent && parent != pool;
         parent = apr_pool_parent_get(pool))
        pool = parent;

    dsos = apr_hash_make(pool);

#if APR_HAS_THREADS
    ret = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
    /* This already registers a pool cleanup */
#endif

    apr_pool_cleanup_register(pool, NULL, apu_dso_term,
                              apr_pool_cleanup_null);

    apr_atomic_dec32(&in_init);

    return ret;
}
Пример #3
0
apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
                                    apr_pool_t * pool_to_recycle)
{
    apr_status_t rv;
    apr_int32_t prev_idlers;

    ap_push_pool(queue_info, pool_to_recycle);

    /* Atomically increment the count of idle workers */
    prev_idlers = apr_atomic_inc32(&(queue_info->idlers)) - zero_pt;

    /* If other threads are waiting on a worker, wake one up */
    if (prev_idlers < 0) {
        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            AP_DEBUG_ASSERT(0);
            return rv;
        }
        rv = apr_thread_cond_signal(queue_info->wait_for_idler);
        if (rv != APR_SUCCESS) {
            apr_thread_mutex_unlock(queue_info->idlers_mutex);
            return rv;
        }
        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    return APR_SUCCESS;
}
Пример #4
0
SWITCH_DECLARE(void) switch_atomic_inc(volatile switch_atomic_t *mem)
{
#ifdef apr_atomic_t
	apr_atomic_inc((apr_atomic_t *)mem);
#else
	apr_atomic_inc32((apr_uint32_t *)mem);
#endif
}
Пример #5
0
static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex)
{
    if (mutex->pthread_refcounting) {
        apr_atomic_inc32(&proc_pthread_mutex_refcount(mutex));
        return 1;
    }
    return 0;
}
Пример #6
0
static apr_status_t APR_THREAD_FUNC
ntp_socket_close(apr_socket_t *sock)
{
#ifdef TCN_DO_STATISTICS
    apr_atomic_inc32(&ntp_closed);
#endif
    return ntp_cleanup(sock);;
}
Пример #7
0
static void test_inc32(abts_case *tc, void *data)
{
    apr_uint32_t oldval;
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 23);
    oldval = apr_atomic_inc32(&y32);
    ABTS_INT_EQUAL(tc, 23, oldval);
    ABTS_INT_EQUAL(tc, 24, y32);
}
Пример #8
0
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
    apr_int32_t new_idlers;
    new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
    if (--new_idlers <= 0) {
        apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
        return APR_EAGAIN;
    }
    return APR_SUCCESS;
}
Пример #9
0
static void test_set_add_inc_sub(abts_case *tc, void *data)
{
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 0);
    apr_atomic_add32(&y32, 20);
    apr_atomic_inc32(&y32);
    apr_atomic_sub32(&y32, 10);

    ABTS_INT_EQUAL(tc, 11, y32);
}
Пример #10
0
void update_and_notify_health_check(px_config *conf) {
    if (!conf->px_health_check) {
        return;
    }
    apr_uint32_t old_value = apr_atomic_inc32(&conf->px_errors_count);
    apr_thread_mutex_lock(conf->health_check_cond_mutex);
    if (old_value >= conf->px_errors_threshold) {
        apr_thread_cond_signal(conf->health_check_cond);
    }
    apr_thread_mutex_unlock(conf->health_check_cond_mutex);
}
Пример #11
0
static void busyloop_inc32(tbox_t *tbox)
{
    apr_uint32_t val;

    do {
        busyloop_read32(tbox);
        val = apr_atomic_inc32(tbox->mem);
        apr_thread_mutex_lock(thread_lock);
        ABTS_INT_EQUAL(tbox->tc, val, tbox->preval);
        apr_thread_mutex_unlock(thread_lock);
    } while (--tbox->loop);
}
Пример #12
0
static void test_inc_neg1(abts_case *tc, void *data)
{
    apr_uint32_t y32 = -1;
    apr_uint32_t minus1 = -1;
    apr_uint32_t rv;
    char *str;

    rv = apr_atomic_inc32(&y32);

    ABTS_ASSERT(tc, "apr_atomic_inc32 on zero returned zero.", rv == minus1);
    str = apr_psprintf(p, "zero wrap failed: -1 + 1 = %d", y32);
    ABTS_ASSERT(tc, str, y32 == 0);
}
Пример #13
0
void * APR_THREAD_FUNC thread_func_atomic(apr_thread_t *thd, void *data)
{
    int i;

    for (i = 0; i < NUM_ITERATIONS ; i++) {
        apr_atomic_inc32(&y);
        apr_atomic_add32(&y, 2);
        apr_atomic_dec32(&y);
        apr_atomic_dec32(&y);
    }
    apr_thread_exit(thd, exit_ret_val);
    return NULL;
}
Пример #14
0
static apr_status_t ntp_socket_cleanup(void *data)
{
    tcn_socket_t *s = (tcn_socket_t *)data;

    if (s->net->cleanup) {
        (*s->net->cleanup)(s->opaque);
        s->net->cleanup = NULL;
    }
#ifdef TCN_DO_STATISTICS
    apr_atomic_inc32(&ntp_cleared);
#endif
    return APR_SUCCESS;
}
Пример #15
0
static void femto_child_init(apr_pool_t *p, server_rec *s)
{
  int rc;
  apr_uint32_t was = apr_atomic_inc32( &femto_num_running );
  //fprintf(stderr, "FEMTO starting was %i\n", (int) was);
  if( was == 0 ) {
    rc = femto_start_server(&femto_server);
    if( rc ) {
      ap_log_error(APLOG_MARK, APLOG_ERR, rc, s,
                   "FEMTO could not start");
      return;
    }
    apr_pool_cleanup_register(p, s, femto_child_exit, femto_child_willexec);
  }
}
Пример #16
0
static int open_entity(cache_handle_t *h, request_rec *r, const char *key)
{
    cache_object_t *obj;

    /* Look up entity keyed to 'url' */
    if (sconf->lock) {
        apr_thread_mutex_lock(sconf->lock);
    }
    obj = (cache_object_t *) cache_find(sconf->cache_cache, key);
    if (obj) {
        if (obj->complete) {
            request_rec *rmain=r, *rtmp;
            apr_atomic_inc32(&obj->refcount);
            /* cache is worried about overall counts, not 'open' ones */
            cache_update(sconf->cache_cache, obj);

            /* If this is a subrequest, register the cleanup against
             * the main request. This will prevent the cache object
             * from being cleaned up from under the request after the
             * subrequest is destroyed.
             */
            rtmp = r;
            while (rtmp) {
                rmain = rtmp;
                rtmp = rmain->main;
            }
            apr_pool_cleanup_register(rmain->pool, obj, decrement_refcount,
                                      apr_pool_cleanup_null);
        }
        else {
            obj = NULL;
        }
    }

    if (sconf->lock) {
        apr_thread_mutex_unlock(sconf->lock);
    }

    if (!obj) {
        return DECLINED;
    }

    /* Initialize the cache_handle */
    h->cache_obj = obj;
    h->req_hdrs = NULL;  /* Pick these up in recall_headers() */
    return OK;
}
Пример #17
0
static winnt_conn_ctx_t *winnt_get_connection(winnt_conn_ctx_t *context)
{
    int rc;
    DWORD BytesRead;
    LPOVERLAPPED pol;
#ifdef _WIN64
    ULONG_PTR CompKey;
#else
    DWORD CompKey;
#endif

    mpm_recycle_completion_context(context);

    apr_atomic_inc32(&g_blocked_threads);
    while (1) {
        if (workers_may_exit) {
            apr_atomic_dec32(&g_blocked_threads);
            return NULL;
        }
        rc = GetQueuedCompletionStatus(ThreadDispatchIOCP, &BytesRead,
                                       &CompKey, &pol, INFINITE);
        if (!rc) {
            rc = apr_get_os_error();
            ap_log_error(APLOG_MARK, APLOG_DEBUG, rc, ap_server_conf, APLOGNO(00349)
                         "Child: GetQueuedComplationStatus returned %d",
                         rc);
            continue;
        }

        switch (CompKey) {
        case IOCP_CONNECTION_ACCEPTED:
            context = CONTAINING_RECORD(pol, winnt_conn_ctx_t, overlapped);
            break;
        case IOCP_SHUTDOWN:
            apr_atomic_dec32(&g_blocked_threads);
            return NULL;
        default:
            apr_atomic_dec32(&g_blocked_threads);
            return NULL;
        }
        break;
    }
    apr_atomic_dec32(&g_blocked_threads);

    return context;
}
Пример #18
0
static void lock_and_wait(toolbox_t *box)
{
    apr_status_t rv;
    abts_case *tc = box->tc;
    apr_uint32_t *count = box->data;

    rv = apr_thread_mutex_lock(box->mutex);
    ABTS_SUCCESS(rv);

    apr_atomic_inc32(count);

    rv = apr_thread_cond_wait(box->cond, box->mutex);
    ABTS_SUCCESS(rv);

    apr_atomic_dec32(count);

    rv = apr_thread_mutex_unlock(box->mutex);
    ABTS_SUCCESS(rv);
}
Пример #19
0
static apr_status_t uxp_cleanup(void *data)
{
    tcn_uxp_conn_t *con = (tcn_uxp_conn_t *)data;

    if (con) {
        if (con->sock) {
            apr_socket_close(con->sock);
            con->sock = NULL;
        }
        if (con->mode == TCN_UXP_SERVER) {
            unlink(con->name);
            con->mode = TCN_UXP_UNKNOWN;
        }
    }

#ifdef TCN_DO_STATISTICS
    apr_atomic_inc32(&uxp_cleared);
#endif
    return APR_SUCCESS;
}
Пример #20
0
void ap_push_pool(fd_queue_info_t * queue_info,
                                    apr_pool_t * pool_to_recycle)
{
    struct recycled_pool *new_recycle;
    /* If we have been given a pool to recycle, atomically link
     * it into the queue_info's list of recycled pools
     */
    if (!pool_to_recycle)
        return;

    if (queue_info->max_recycled_pools >= 0) {
        apr_uint32_t cnt = apr_atomic_read32(&queue_info->recycled_pools_count);
        if (cnt >= queue_info->max_recycled_pools) {
            apr_pool_destroy(pool_to_recycle);
            return;
        }
        apr_atomic_inc32(&queue_info->recycled_pools_count);
    }

    apr_pool_clear(pool_to_recycle);
    new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle,
                                                      sizeof (*new_recycle));
    new_recycle->pool = pool_to_recycle;
    for (;;) {
        /*
         * Save queue_info->recycled_pool in local variable next because
         * new_recycle->next can be changed after apr_atomic_casptr
         * function call. For gory details see PR 44402.
         */
        struct recycled_pool *next = queue_info->recycled_pools;
        new_recycle->next = next;
        if (apr_atomic_casptr((void*) &(queue_info->recycled_pools),
                              new_recycle, next) == next)
            break;
    }
}
Пример #21
0
static apr_status_t handle_response(serf_request_t *request,
                                    serf_bucket_t *response,
                                    void *handler_baton,
                                    apr_pool_t *pool)
{
    serf_status_line sl;
    apr_status_t status;
    handler_baton_t *ctx = handler_baton;

    if (!response) {
        /* A NULL response probably means that the connection was closed while
           this request was already written. Just requeue it. */
        serf_connection_t *conn = serf_request_get_conn(request);

        serf_connection_request_create(conn, setup_request, handler_baton);
        return APR_SUCCESS;
    }

    status = serf_bucket_response_status(response, &sl);
    if (status) {
        return status;
    }

    while (1) {
        struct iovec vecs[64];
        int vecs_read;
        apr_size_t bytes_written;

        status = serf_bucket_read_iovec(response, 8000, 64, vecs, &vecs_read);
        if (SERF_BUCKET_READ_ERROR(status))
            return status;

        /* got some data. print it out. */
        if (vecs_read) {
            apr_file_writev(ctx->output_file, vecs, vecs_read, &bytes_written);
        }

        /* are we done yet? */
        if (APR_STATUS_IS_EOF(status)) {
            if (ctx->print_headers) {
                serf_bucket_t *hdrs;
                hdrs = serf_bucket_response_get_headers(response);
                while (1) {
                    status = serf_bucket_read_iovec(hdrs, 8000, 64, vecs,
                                                    &vecs_read);

                    if (SERF_BUCKET_READ_ERROR(status))
                        return status;

                    if (vecs_read) {
                        apr_file_writev(ctx->output_file, vecs, vecs_read,
                                        &bytes_written);
                    }
                    if (APR_STATUS_IS_EOF(status)) {
                        break;
                    }
                }
            }

            apr_atomic_inc32(&ctx->completed_requests);
            return APR_EOF;
        }

        /* have we drained the response so far? */
        if (APR_STATUS_IS_EAGAIN(status))
            return status;

        /* loop to read some more. */
    }
    /* NOTREACHED */
}
Пример #22
0
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
                                          int *had_to_block)
{
    apr_status_t rv;
    apr_int32_t prev_idlers;

    /* Atomically decrement the idle worker count, saving the old value */
    /* See TODO in ap_queue_info_set_idle() */
    prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;

    /* Block if there weren't any idle workers */
    if (prev_idlers <= 0) {
        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            AP_DEBUG_ASSERT(0);
            /* See TODO in ap_queue_info_set_idle() */
            apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
            return rv;
        }
        /* Re-check the idle worker count to guard against a
         * race condition.  Now that we're in the mutex-protected
         * region, one of two things may have happened:
         *   - If the idle worker count is still negative, the
         *     workers are all still busy, so it's safe to
         *     block on a condition variable.
         *   - If the idle worker count is non-negative, then a
         *     worker has become idle since the first check
         *     of queue_info->idlers above.  It's possible
         *     that the worker has also signaled the condition
         *     variable--and if so, the listener missed it
         *     because it wasn't yet blocked on the condition
         *     variable.  But if the idle worker count is
         *     now non-negative, it's safe for this function to
         *     return immediately.
         *
         *     A "negative value" (relative to zero_pt) in
         *     queue_info->idlers tells how many
         *     threads are waiting on an idle worker.
         */
        if (queue_info->idlers < zero_pt) {
            *had_to_block = 1;
            rv = apr_thread_cond_wait(queue_info->wait_for_idler,
                                      queue_info->idlers_mutex);
            if (rv != APR_SUCCESS) {
                apr_status_t rv2;
                AP_DEBUG_ASSERT(0);
                rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
                if (rv2 != APR_SUCCESS) {
                    return rv2;
                }
                return rv;
            }
        }
        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    if (queue_info->terminated) {
        return APR_EOF;
    }
    else {
        return APR_SUCCESS;
    }
}
Пример #23
0
static apr_status_t impl_pollset_poll(apr_pollset_t *pollset,
                                      apr_interval_time_t timeout,
                                      apr_int32_t *num,
                                      const apr_pollfd_t **descriptors)
{
    apr_os_sock_t fd;
    int ret, i, j;
    unsigned int nget;
    pfd_elem_t *ep;
    apr_status_t rv = APR_SUCCESS;
    apr_pollfd_t fp;

    nget = 1;

    pollset_lock_rings();

    apr_atomic_inc32(&pollset->p->waiting);

    while (!APR_RING_EMPTY(&(pollset->p->add_ring), pfd_elem_t, link)) {
        ep = APR_RING_FIRST(&(pollset->p->add_ring));
        APR_RING_REMOVE(ep, link);

        if (ep->pfd.desc_type == APR_POLL_SOCKET) {
            fd = ep->pfd.desc.s->socketdes;
        }
        else {
            fd = ep->pfd.desc.f->filedes;
        }

        ret = port_associate(pollset->p->port_fd, PORT_SOURCE_FD, 
                             fd, get_event(ep->pfd.reqevents), ep);
        if (ret < 0) {
            rv = apr_get_netos_error();
            APR_RING_INSERT_TAIL(&(pollset->p->free_ring), ep, pfd_elem_t, link);
            break;
        }

        ep->on_query_ring = 1;
        APR_RING_INSERT_TAIL(&(pollset->p->query_ring), ep, pfd_elem_t, link);
    }

    pollset_unlock_rings();

    if (rv != APR_SUCCESS) {
        apr_atomic_dec32(&pollset->p->waiting);
        return rv;
    }

    rv = call_port_getn(pollset->p->port_fd, pollset->p->port_set, 
                        pollset->nalloc, &nget, timeout);

    /* decrease the waiting ASAP to reduce the window for calling 
       port_associate within apr_pollset_add() */
    apr_atomic_dec32(&pollset->p->waiting);

    (*num) = nget;
    if (nget) {

        pollset_lock_rings();

        for (i = 0, j = 0; i < nget; i++) {
            fp = (((pfd_elem_t*)(pollset->p->port_set[i].portev_user))->pfd);
            if ((pollset->flags & APR_POLLSET_WAKEABLE) &&
                fp.desc_type == APR_POLL_FILE &&
                fp.desc.f == pollset->wakeup_pipe[0]) {
                apr_pollset_drain_wakeup_pipe(pollset);
                rv = APR_EINTR;
            }
            else {
                pollset->p->result_set[j] = fp;            
                pollset->p->result_set[j].rtnevents =
                    get_revent(pollset->p->port_set[i].portev_events);

                /* If the ring element is still on the query ring, move it
                 * to the add ring for re-association with the event port
                 * later.  (It may have already been moved to the dead ring
                 * by a call to pollset_remove on another thread.)
                 */
                ep = (pfd_elem_t *)pollset->p->port_set[i].portev_user;
                if (ep->on_query_ring) {
                    APR_RING_REMOVE(ep, link);
                    ep->on_query_ring = 0;
                    APR_RING_INSERT_TAIL(&(pollset->p->add_ring), ep,
                                         pfd_elem_t, link);
                }
                j++;
            }
        }
        pollset_unlock_rings();
        if ((*num = j)) { /* any event besides wakeup pipe? */
            rv = APR_SUCCESS;
            if (descriptors) {
                *descriptors = pollset->p->result_set;
            }
        }
    }

    pollset_lock_rings();

    /* Shift all PFDs in the Dead Ring to the Free Ring */
    APR_RING_CONCAT(&(pollset->p->free_ring), &(pollset->p->dead_ring), pfd_elem_t, link);

    pollset_unlock_rings();

    return rv;
}
Пример #24
0
static winnt_conn_ctx_t *mpm_get_completion_context(int *timeout)
{
    apr_status_t rv;
    winnt_conn_ctx_t *context = NULL;

    *timeout = 0;
    while (1) {
        /* Grab a context off the queue */
        apr_thread_mutex_lock(qlock);
        if (qhead) {
            context = qhead;
            qhead = qhead->next;
            if (!qhead)
                qtail = NULL;
        } else {
            ResetEvent(qwait_event);
        }
        apr_thread_mutex_unlock(qlock);

        if (!context) {
            /* We failed to grab a context off the queue, consider allocating
             * a new one out of the child pool. There may be up to
             * (ap_threads_per_child + num_listeners) contexts in the system
             * at once.
             */
            if (num_completion_contexts >= max_num_completion_contexts) {
                /* All workers are busy, need to wait for one */
                static int reported = 0;
                if (!reported) {
                    ap_log_error(APLOG_MARK, APLOG_ERR, 0, ap_server_conf, APLOGNO(00326)
                                 "Server ran out of threads to serve "
                                 "requests. Consider raising the "
                                 "ThreadsPerChild setting");
                    reported = 1;
                }

                /* Wait for a worker to free a context. Once per second, give
                 * the caller a chance to check for shutdown. If the wait
                 * succeeds, get the context off the queue. It must be
                 * available, since there's only one consumer.
                 */
                rv = WaitForSingleObject(qwait_event, 1000);
                if (rv == WAIT_OBJECT_0)
                    continue;
                else {
                    if (rv == WAIT_TIMEOUT) {
                        /* somewhat-normal condition where threads are busy */
                        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(00327)
                                     "mpm_get_completion_context: Failed to get a "
                                     "free context within 1 second");
                        *timeout = 1;
                    }
                    else {
                        /* should be the unexpected, generic WAIT_FAILED */
                        ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(),
                                     ap_server_conf, APLOGNO(00328)
                                     "mpm_get_completion_context: "
                                     "WaitForSingleObject failed to get free context");
                    }
                    return NULL;
                }
            } else {
                /* Allocate another context.
                 * Note: Multiple failures in the next two steps will cause
                 * the pchild pool to 'leak' storage. I don't think this
                 * is worth fixing...
                 */
                apr_allocator_t *allocator;

                apr_thread_mutex_lock(child_lock);
                context = (winnt_conn_ctx_t *)apr_pcalloc(pchild,
                                                     sizeof(winnt_conn_ctx_t));


                context->overlapped.hEvent = CreateEvent(NULL, TRUE,
                                                         FALSE, NULL);
                if (context->overlapped.hEvent == NULL) {
                    /* Hopefully this is a temporary condition ... */
                    ap_log_error(APLOG_MARK, APLOG_WARNING, apr_get_os_error(),
                                 ap_server_conf, APLOGNO(00329)
                                 "mpm_get_completion_context: "
                                 "CreateEvent failed.");

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }

                /* Create the transaction pool */
                apr_allocator_create(&allocator);
                apr_allocator_max_free_set(allocator, ap_max_mem_free);
                rv = apr_pool_create_ex(&context->ptrans, pchild, NULL,
                                        allocator);
                if (rv != APR_SUCCESS) {
                    ap_log_error(APLOG_MARK, APLOG_WARNING, rv, ap_server_conf, APLOGNO(00330)
                                 "mpm_get_completion_context: Failed "
                                 "to create the transaction pool.");
                    CloseHandle(context->overlapped.hEvent);

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }
                apr_allocator_owner_set(allocator, context->ptrans);
                apr_pool_tag(context->ptrans, "transaction");

                context->accept_socket = INVALID_SOCKET;
                context->ba = apr_bucket_alloc_create(context->ptrans);
                apr_atomic_inc32(&num_completion_contexts);

                apr_thread_mutex_unlock(child_lock);
                break;
            }
        } else {
            /* Got a context from the queue */
            break;
        }
    }

    return context;
}
Пример #25
0
PCOMP_CONTEXT mpm_get_completion_context(void)
{
    apr_status_t rv;
    PCOMP_CONTEXT context = NULL;

    while (1) {
        /* Grab a context off the queue */
        apr_thread_mutex_lock(qlock);
        if (qhead) {
            context = qhead;
            qhead = qhead->next;
            if (!qhead)
                qtail = NULL;
        } else {
            ResetEvent(qwait_event);
        }
        apr_thread_mutex_unlock(qlock);

        if (!context) {
            /* We failed to grab a context off the queue, consider allocating
             * a new one out of the child pool. There may be up to
             * (ap_threads_per_child + num_listeners) contexts in the system
             * at once.
             */
            if (num_completion_contexts >= max_num_completion_contexts) {
                /* All workers are busy, need to wait for one */
                static int reported = 0;
                if (!reported) {
                    ap_log_error(APLOG_MARK, APLOG_WARNING, 0, ap_server_conf,
                                 "Server ran out of threads to serve requests. Consider "
                                 "raising the ThreadsPerChild setting");
                    reported = 1;
                }

                /* Wait for a worker to free a context. Once per second, give
                 * the caller a chance to check for shutdown. If the wait
                 * succeeds, get the context off the queue. It must be available,
                 * since there's only one consumer.
                 */
                rv = WaitForSingleObject(qwait_event, 1000);
                if (rv == WAIT_OBJECT_0)
                    continue;
                else /* Hopefully, WAIT_TIMEOUT */
                    return NULL;
            } else {
                /* Allocate another context.
                 * Note:
                 * Multiple failures in the next two steps will cause the pchild pool
                 * to 'leak' storage. I don't think this is worth fixing...
                 */
                apr_allocator_t *allocator;

                apr_thread_mutex_lock(child_lock);
                context = (PCOMP_CONTEXT) apr_pcalloc(pchild, sizeof(COMP_CONTEXT));

                context->Overlapped.hEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
                if (context->Overlapped.hEvent == NULL) {
                    /* Hopefully this is a temporary condition ... */
                    ap_log_error(APLOG_MARK,APLOG_WARNING, apr_get_os_error(), ap_server_conf,
                                 "mpm_get_completion_context: CreateEvent failed.");

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }

                /* Create the tranaction pool */
                apr_allocator_create(&allocator);
                apr_allocator_max_free_set(allocator, ap_max_mem_free);
                rv = apr_pool_create_ex(&context->ptrans, pchild, NULL, allocator);
                if (rv != APR_SUCCESS) {
                    ap_log_error(APLOG_MARK,APLOG_WARNING, rv, ap_server_conf,
                                 "mpm_get_completion_context: Failed to create the transaction pool.");
                    CloseHandle(context->Overlapped.hEvent);

                    apr_thread_mutex_unlock(child_lock);
                    return NULL;
                }
                apr_allocator_owner_set(allocator, context->ptrans);
                apr_pool_tag(context->ptrans, "transaction");

                context->accept_socket = INVALID_SOCKET;
                context->ba = apr_bucket_alloc_create(context->ptrans);
                apr_atomic_inc32(&num_completion_contexts);

                apr_thread_mutex_unlock(child_lock);
                break;
            }
        } else {
            /* Got a context from the queue */
            break;
        }
    }

    return context;
}
Пример #26
0
void tee_update_discarded(void)
{
	apr_atomic_inc32(&process_stats->discarded);
}
Пример #27
0
static apr_status_t dbm_open_type(apr_dbm_type_t const* * vtable,
                                  const char *type, 
                                  apr_pool_t *pool)
{
#if !APU_DSO_BUILD

    *vtable = NULL;
    if (!strcasecmp(type, "default"))     *vtable = &DBM_VTABLE;
#if APU_HAVE_DB
    else if (!strcasecmp(type, "db"))     *vtable = &apr_dbm_type_db;
#endif
    else if (*type && !strcasecmp(type + 1, "dbm")) {
#if APU_HAVE_GDBM
        if (*type == 'G' || *type == 'g') *vtable = &apr_dbm_type_gdbm;
#endif
#if APU_HAVE_NDBM
        if (*type == 'N' || *type == 'n') *vtable = &apr_dbm_type_ndbm;
#endif
#if APU_HAVE_SDBM
        if (*type == 'S' || *type == 's') *vtable = &apr_dbm_type_sdbm;
#endif
        /* avoid empty block */ ;
    }
    if (*vtable)
        return APR_SUCCESS;
    return APR_ENOTIMPL;

#else /* APU_DSO_BUILD */

    char modname[32];
    char symname[34];
    apr_dso_handle_sym_t symbol;
    apr_status_t rv;
    int usertype = 0;

    if (!strcasecmp(type, "default"))        type = DBM_NAME;
    else if (!strcasecmp(type, "db"))        type = "db";
    else if (*type && !strcasecmp(type + 1, "dbm")) {
        if      (*type == 'G' || *type == 'g') type = "gdbm"; 
        else if (*type == 'N' || *type == 'n') type = "ndbm"; 
        else if (*type == 'S' || *type == 's') type = "sdbm"; 
    }
    else usertype = 1;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;
    }
    else {
        apr_pool_t *parent;

        /* Top level pool scope, need process-scope lifetime */
        for (parent = pool;  parent; parent = apr_pool_parent_get(pool))
             pool = parent;

        /* deprecate in 2.0 - permit implicit initialization */
        apu_dso_init(pool);

        drivers = apr_hash_make(pool);
        apr_hash_set(drivers, "sdbm", APR_HASH_KEY_STRING, &apr_dbm_type_sdbm);

        apr_pool_cleanup_register(pool, NULL, dbm_term,
                                  apr_pool_cleanup_null);

        apr_atomic_dec32(&in_init);
    }

    rv = apu_dso_mutex_lock();
    if (rv) {
        *vtable = NULL;
        return rv;
    }

    *vtable = apr_hash_get(drivers, type, APR_HASH_KEY_STRING);
    if (*vtable) {
        apu_dso_mutex_unlock();
        return APR_SUCCESS;
    }

    /* The driver DSO must have exactly the same lifetime as the
     * drivers hash table; ignore the passed-in pool */
    pool = apr_hash_pool_get(drivers);

#if defined(NETWARE)
    apr_snprintf(modname, sizeof(modname), "dbm%s.nlm", type);
#elif defined(WIN32)
    apr_snprintf(modname, sizeof(modname),
                 "apr_dbm_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".dll", type);
#else
    apr_snprintf(modname, sizeof(modname),
                 "apr_dbm_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".so", type);
#endif
    apr_snprintf(symname, sizeof(symname), "apr_dbm_type_%s", type);

    rv = apu_dso_load(NULL, &symbol, modname, symname, pool);
    if (rv == APR_SUCCESS || rv == APR_EINIT) { /* previously loaded?!? */
        *vtable = symbol;
        if (usertype)
            type = apr_pstrdup(pool, type);
        apr_hash_set(drivers, type, APR_HASH_KEY_STRING, *vtable);
        rv = APR_SUCCESS;
    }
    else
        *vtable = NULL;

    apu_dso_mutex_unlock();
    return rv;

#endif /* APU_DSO_BUILD */
}
Пример #28
0
static int create_entity(cache_handle_t *h, cache_type_e type_e,
                         request_rec *r, const char *key, apr_off_t len)
{
    apr_status_t rv;
    apr_pool_t *pool;
    cache_object_t *obj, *tmp_obj;
    mem_cache_object_t *mobj;

    if (len == -1) {
        /* Caching a streaming response. Assume the response is
         * less than or equal to max_streaming_buffer_size. We will
         * correct all the cache size counters in store_body once
         * we know exactly know how much we are caching.
         */
        len = sconf->max_streaming_buffer_size;
    }

    /* Note: cache_insert() will automatically garbage collect
     * objects from the cache if the max_cache_size threshold is
     * exceeded. This means mod_mem_cache does not need to implement
     * max_cache_size checks.
     */
    if (len < sconf->min_cache_object_size ||
        len > sconf->max_cache_object_size) {
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "mem_cache: URL %s failed the size check and will not be cached.",
                     key);
        return DECLINED;
    }

    if (type_e == CACHE_TYPE_FILE) {
        /* CACHE_TYPE_FILE is only valid for local content handled by the
         * default handler. Need a better way to check if the file is
         * local or not.
         */
        if (!r->filename) {
            return DECLINED;
        }
    }

    rv = apr_pool_create(&pool, NULL);

    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
                     "mem_cache: Failed to create memory pool.");
        return DECLINED;
    }

    /* Allocate and initialize cache_object_t */
    obj = apr_pcalloc(pool, sizeof(*obj));
    obj->key = apr_pstrdup(pool, key);

    /* Allocate and init mem_cache_object_t */
    mobj = apr_pcalloc(pool, sizeof(*mobj));
    mobj->pool = pool;

    if (threaded_mpm) {
        apr_thread_mutex_create(&mobj->lock, APR_THREAD_MUTEX_DEFAULT, pool);
    }

    /* Finish initing the cache object */
    apr_atomic_set32(&obj->refcount, 1);
    mobj->total_refs = 1;
    obj->complete = 0;
    obj->vobj = mobj;
    /* Safe cast: We tested < sconf->max_cache_object_size above */
    mobj->m_len = (apr_size_t)len;
    mobj->type = type_e;

    /* Place the cache_object_t into the hash table.
     * Note: Perhaps we should wait to put the object in the
     * hash table when the object is complete?  I add the object here to
     * avoid multiple threads attempting to cache the same content only
     * to discover at the very end that only one of them will succeed.
     * Furthermore, adding the cache object to the table at the end could
     * open up a subtle but easy to exploit DoS hole: someone could request
     * a very large file with multiple requests. Better to detect this here
     * rather than after the cache object has been completely built and
     * initialized...
     * XXX Need a way to insert into the cache w/o such coarse grained locking
     */
    if (sconf->lock) {
        apr_thread_mutex_lock(sconf->lock);
    }
    tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key);

    if (!tmp_obj) {
        cache_insert(sconf->cache_cache, obj);
        /* Add a refcount to account for the reference by the
         * hashtable in the cache. Refcount should be 2 now, one
         * for this thread, and one for the cache.
         */
        apr_atomic_inc32(&obj->refcount);
    }
    if (sconf->lock) {
        apr_thread_mutex_unlock(sconf->lock);
    }

    if (tmp_obj) {
        /* This thread collided with another thread loading the same object
         * into the cache at the same time. Defer to the other thread which
         * is further along.
         */
        cleanup_cache_object(obj);
        return DECLINED;
    }

    apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
                              apr_pool_cleanup_null);

    /* Populate the cache handle */
    h->cache_obj = obj;

    return OK;
}
Пример #29
0
static apr_status_t create_request(const char *hostinfo,
                                   const char *path,
                                   const char *query,
                                   const char *fragment,
                                   parser_baton_t *ctx,
                                   apr_pool_t *tmppool)
{
    handler_baton_t *new_ctx;

    if (hostinfo) {
        /* Yes, this is a pointer comparison; not a string comparison. */
        if (hostinfo != ctx->hostinfo) {
            /* Not on the same host; ignore */
            return APR_SUCCESS;
        }
    }

    new_ctx = (handler_baton_t*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
                                                      sizeof(handler_baton_t));
    new_ctx->allocator = ctx->app_ctx->bkt_alloc;
    new_ctx->requests_outstanding = ctx->requests_outstanding;
    new_ctx->app_ctx = ctx->app_ctx;

    /* See above: this example restricts ourselves to the same vhost. */
    new_ctx->hostinfo = ctx->hostinfo;

    /* we need to copy it so it falls under the request's scope. */
    new_ctx->path_len = strlen(path);
    new_ctx->path = (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
                                                 new_ctx->path_len + 1);
    memcpy(new_ctx->path, path, new_ctx->path_len + 1);

    /* we need to copy it so it falls under the request's scope. */
    if (query) {
        new_ctx->query_len = strlen(query);
        new_ctx->query = (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
                                                      new_ctx->query_len + 1);
        memcpy(new_ctx->query, query, new_ctx->query_len + 1);
    }
    else {
        new_ctx->query = NULL;
        new_ctx->query_len = 0;
    }

    /* we need to copy it so it falls under the request's scope. */
    if (fragment) {
        new_ctx->fragment_len = strlen(fragment);
        new_ctx->fragment =
            (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
                                         new_ctx->fragment_len + 1);
        memcpy(new_ctx->fragment, fragment, new_ctx->fragment_len + 1);
    }
    else {
        new_ctx->fragment = NULL;
        new_ctx->fragment_len = 0;
    }

    if (!new_ctx->query) {
        new_ctx->full_path = new_ctx->path;
        new_ctx->full_path_len = new_ctx->path_len;
    }
    else {
        new_ctx->full_path_len = new_ctx->path_len + new_ctx->query_len;
        new_ctx->full_path =
            (char*)serf_bucket_mem_alloc(ctx->app_ctx->bkt_alloc,
                                         new_ctx->full_path_len + 1);
        memcpy(new_ctx->full_path, new_ctx->path, new_ctx->path_len);
        memcpy(new_ctx->full_path + new_ctx->path_len, new_ctx->query,
               new_ctx->query_len + 1);
    }

    new_ctx->hdr_read = 0;

    new_ctx->doc_queue_condvar = ctx->condvar;
    new_ctx->doc_queue = ctx->doc_queue;
    new_ctx->doc_queue_alloc = ctx->doc_queue_alloc;

    new_ctx->acceptor = accept_response;
    new_ctx->acceptor_baton = &ctx->app_ctx;
    new_ctx->handler = handle_response;

    apr_atomic_inc32(ctx->requests_outstanding);

    serf_connection_request_create(ctx->connection, setup_request, new_ctx);

    return APR_SUCCESS;
}
Пример #30
0
static apr_status_t store_body(cache_handle_t *h, request_rec *r, apr_bucket_brigade *b)
{
    apr_status_t rv;
    cache_object_t *obj = h->cache_obj;
    cache_object_t *tobj = NULL;
    mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
    apr_read_type_e eblock = APR_BLOCK_READ;
    apr_bucket *e;
    char *cur;
    int eos = 0;

    if (mobj->type == CACHE_TYPE_FILE) {
        apr_file_t *file = NULL;
        int fd = 0;
        int other = 0;

        /* We can cache an open file descriptor if:
         * - the brigade contains one and only one file_bucket &&
         * - the brigade is complete &&
         * - the file_bucket is the last data bucket in the brigade
         */
        for (e = APR_BRIGADE_FIRST(b);
             e != APR_BRIGADE_SENTINEL(b);
             e = APR_BUCKET_NEXT(e))
        {
            if (APR_BUCKET_IS_EOS(e)) {
                eos = 1;
            }
            else if (APR_BUCKET_IS_FILE(e)) {
                apr_bucket_file *a = e->data;
                fd++;
                file = a->fd;
            }
            else {
                other++;
            }
        }
        if (fd == 1 && !other && eos) {
            apr_file_t *tmpfile;
            const char *name;
            /* Open a new XTHREAD handle to the file */
            apr_file_name_get(&name, file);
            mobj->flags = ((APR_SENDFILE_ENABLED & apr_file_flags_get(file))
                           | APR_READ | APR_BINARY | APR_XTHREAD | APR_FILE_NOCLEANUP);
            rv = apr_file_open(&tmpfile, name, mobj->flags,
                               APR_OS_DEFAULT, r->pool);
            if (rv != APR_SUCCESS) {
                return rv;
            }
            apr_file_inherit_unset(tmpfile);
            apr_os_file_get(&(mobj->fd), tmpfile);

            /* Open for business */
            ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
                         "mem_cache: Cached file: %s with key: %s", name, obj->key);
            obj->complete = 1;
            return APR_SUCCESS;
        }

        /* Content not suitable for fd caching. Cache in-memory instead. */
        mobj->type = CACHE_TYPE_HEAP;
    }

    /*
     * FD cacheing is not enabled or the content was not
     * suitable for fd caching.
     */
    if (mobj->m == NULL) {
        mobj->m = malloc(mobj->m_len);
        if (mobj->m == NULL) {
            return APR_ENOMEM;
        }
        obj->count = 0;
    }
    cur = (char*) mobj->m + obj->count;

    /* Iterate accross the brigade and populate the cache storage */
    for (e = APR_BRIGADE_FIRST(b);
         e != APR_BRIGADE_SENTINEL(b);
         e = APR_BUCKET_NEXT(e))
    {
        const char *s;
        apr_size_t len;

        if (APR_BUCKET_IS_EOS(e)) {
            const char *cl_header = apr_table_get(r->headers_out, "Content-Length");
            if (cl_header) {
                apr_int64_t cl = apr_atoi64(cl_header);
                if ((errno == 0) && (obj->count != cl)) {
                    ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                                 "mem_cache: URL %s didn't receive complete response, not caching",
                                 h->cache_obj->key);
                    return APR_EGENERAL;
                }
            }
            if (mobj->m_len > obj->count) {
                /* Caching a streamed response. Reallocate a buffer of the
                 * correct size and copy the streamed response into that
                 * buffer */
                mobj->m = realloc(mobj->m, obj->count);
                if (!mobj->m) {
                    return APR_ENOMEM;
                }

                /* Now comes the crufty part... there is no way to tell the
                 * cache that the size of the object has changed. We need
                 * to remove the object, update the size and re-add the
                 * object, all under protection of the lock.
                 */
                if (sconf->lock) {
                    apr_thread_mutex_lock(sconf->lock);
                }
                /* Has the object been ejected from the cache?
                 */
                tobj = (cache_object_t *) cache_find(sconf->cache_cache, obj->key);
                if (tobj == obj) {
                    /* Object is still in the cache, remove it, update the len field then
                     * replace it under protection of sconf->lock.
                     */
                    cache_remove(sconf->cache_cache, obj);
                    /* For illustration, cache no longer has reference to the object
                     * so decrement the refcount
                     * apr_atomic_dec32(&obj->refcount);
                     */
                    mobj->m_len = obj->count;

                    cache_insert(sconf->cache_cache, obj);
                    /* For illustration, cache now has reference to the object, so
                     * increment the refcount
                     * apr_atomic_inc32(&obj->refcount);
                     */
                }
                else if (tobj) {
                    /* Different object with the same key found in the cache. Doing nothing
                     * here will cause the object refcount to drop to 0 in decrement_refcount
                     * and the object will be cleaned up.
                     */

                } else {
                    /* Object has been ejected from the cache, add it back to the cache */
                    mobj->m_len = obj->count;
                    cache_insert(sconf->cache_cache, obj);
                    apr_atomic_inc32(&obj->refcount);
                }

                if (sconf->lock) {
                    apr_thread_mutex_unlock(sconf->lock);
                }
            }
            /* Open for business */
            ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server,
                         "mem_cache: Cached url: %s", obj->key);
            obj->complete = 1;
            break;
        }
        rv = apr_bucket_read(e, &s, &len, eblock);
        if (rv != APR_SUCCESS) {
            return rv;
        }
        if (len) {
            /* Check for buffer (max_streaming_buffer_size) overflow  */
           if ((obj->count + len) > mobj->m_len) {
               ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r,
                            "mem_cache: URL %s exceeds the MCacheMaxStreamingBuffer (%" APR_SIZE_T_FMT ") limit and will not be cached.", 
                            obj->key, mobj->m_len);
               return APR_ENOMEM;
           }
           else {
               memcpy(cur, s, len);
               cur+=len;
               obj->count+=len;
           }
        }
        /* This should not fail, but if it does, we are in BIG trouble
         * cause we just stomped all over the heap.
         */
        AP_DEBUG_ASSERT(obj->count <= mobj->m_len);
    }
    return APR_SUCCESS;
}