Exemple #1
0
static void busyloop_set32(tbox_t *tbox)
{
    do {
        busyloop_read32(tbox);
        apr_atomic_set32(tbox->mem, tbox->postval);
    } while (--tbox->loop);
}
/** 
 * Returns interrupted status and clear interrupted flag.
 *
 * @param[in] thread where to clear interrupt flag
 * @returns TM_ERROR_INTERRUPT if thread was interrupted, TM_ERROR_NONE otherwise
 * @see java.lang.Thread.interrupted()
 */
UDATA VMCALL hythread_clear_interrupted_other(hythread_t thread) {
    int interrupted;
    assert(thread);
    interrupted = thread->interrupted;
    apr_atomic_set32(&thread->interrupted, FALSE);
    return interrupted ? TM_ERROR_INTERRUPT : TM_ERROR_NONE;
} // hythread_clear_interrupted_other
Exemple #3
0
apr_status_t apu_dso_init(apr_pool_t *pool)
{
    apr_status_t ret = APR_SUCCESS;
    apr_pool_t *parent;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;

        return APR_SUCCESS;
    }

    /* Top level pool scope, need process-scope lifetime */
    for (parent = apr_pool_parent_get(pool);
         parent && parent != pool;
         parent = apr_pool_parent_get(pool))
        pool = parent;

    dsos = apr_hash_make(pool);

#if APR_HAS_THREADS
    ret = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
    /* This already registers a pool cleanup */
#endif

    apr_pool_cleanup_register(pool, NULL, apu_dso_term,
                              apr_pool_cleanup_null);

    apr_atomic_dec32(&in_init);

    return ret;
}
Exemple #4
0
static void memcache_set_pos(void *a, apr_ssize_t pos)
{
    cache_object_t *obj = (cache_object_t *)a;
    mem_cache_object_t *mobj = obj->vobj;

    apr_atomic_set32(&mobj->pos, pos);
}
Exemple #5
0
SWITCH_DECLARE(void) switch_atomic_set(volatile switch_atomic_t *mem, uint32_t val)
{
#ifdef apr_atomic_t
	apr_atomic_set((apr_atomic_t *)mem, val);
#else
	apr_atomic_set32((apr_uint32_t *)mem, val);
#endif
}
Exemple #6
0
static void test_inc32(abts_case *tc, void *data)
{
    apr_uint32_t oldval;
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 23);
    oldval = apr_atomic_inc32(&y32);
    ABTS_INT_EQUAL(tc, 23, oldval);
    ABTS_INT_EQUAL(tc, 24, y32);
}
Exemple #7
0
static void test_add32_neg(abts_case *tc, void *data)
{
    apr_uint32_t oldval;
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 23);
    oldval = apr_atomic_add32(&y32, -10);
    ABTS_INT_EQUAL(tc, 23, oldval);
    ABTS_INT_EQUAL(tc, 13, y32);
}
Exemple #8
0
static void test_xchg32(abts_case *tc, void *data)
{
    apr_uint32_t oldval;
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 100);
    oldval = apr_atomic_xchg32(&y32, 50);

    ABTS_INT_EQUAL(tc, 100, oldval);
    ABTS_INT_EQUAL(tc, 50, y32);
}
Exemple #9
0
static void test_set_add_inc_sub(abts_case *tc, void *data)
{
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 0);
    apr_atomic_add32(&y32, 20);
    apr_atomic_inc32(&y32);
    apr_atomic_sub32(&y32, 10);

    ABTS_INT_EQUAL(tc, 11, y32);
}
Exemple #10
0
static void test_wrap_zero(abts_case *tc, void *data)
{
    apr_uint32_t y32;
    apr_uint32_t rv;
    apr_uint32_t minus1 = -1;
    char *str;

    apr_atomic_set32(&y32, 0);
    rv = apr_atomic_dec32(&y32);

    ABTS_ASSERT(tc, "apr_atomic_dec32 on zero returned zero.", rv != 0);
    str = apr_psprintf(p, "zero wrap failed: 0 - 1 = %d", y32);
    ABTS_ASSERT(tc, str, y32 == minus1);
}
Exemple #11
0
static void test_dec32(abts_case *tc, void *data)
{
    apr_uint32_t y32;
    int rv;

    apr_atomic_set32(&y32, 2);

    rv = apr_atomic_dec32(&y32);
    ABTS_INT_EQUAL(tc, 1, y32);
    ABTS_ASSERT(tc, "atomic_dec returned zero when it shouldn't", rv != 0);

    rv = apr_atomic_dec32(&y32);
    ABTS_INT_EQUAL(tc, 0, y32);
    ABTS_ASSERT(tc, "atomic_dec didn't returned zero when it should", rv == 0);
}
Exemple #12
0
/**
 * A h2_mplx needs to be thread-safe *and* if will be called by
 * the h2_session thread *and* the h2_worker threads. Therefore:
 * - calls are protected by a mutex lock, m->lock
 * - the pool needs its own allocator, since apr_allocator_t are 
 *   not re-entrant. The separate allocator works without a 
 *   separate lock since we already protect h2_mplx itself.
 *   Since HTTP/2 connections can be expected to live longer than
 *   their HTTP/1 cousins, the separate allocator seems to work better
 *   than protecting a shared h2_session one with an own lock.
 */
h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, h2_workers *workers)
{
    apr_status_t status = APR_SUCCESS;
    h2_config *conf = h2_config_get(c);
    apr_allocator_t *allocator = NULL;
    h2_mplx *m;
    AP_DEBUG_ASSERT(conf);
    
    status = apr_allocator_create(&allocator);
    if (status != APR_SUCCESS) {
        return NULL;
    }

    m = apr_pcalloc(parent, sizeof(h2_mplx));
    if (m) {
        m->id = c->id;
        APR_RING_ELEM_INIT(m, link);
        apr_atomic_set32(&m->refs, 1);
        m->c = c;
        apr_pool_create_ex(&m->pool, parent, NULL, allocator);
        if (!m->pool) {
            return NULL;
        }
        apr_allocator_owner_set(allocator, m->pool);
        
        status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
                                         m->pool);
        if (status != APR_SUCCESS) {
            h2_mplx_destroy(m);
            return NULL;
        }
        
        m->bucket_alloc = apr_bucket_alloc_create(m->pool);
        
        m->q = h2_tq_create(m->id, m->pool);
        m->stream_ios = h2_io_set_create(m->pool);
        m->ready_ios = h2_io_set_create(m->pool);
        m->closed = h2_stream_set_create(m->pool);
        m->stream_max_mem = h2_config_geti(conf, H2_CONF_STREAM_MAX_MEM);
        m->workers = workers;
        
        m->file_handles_allowed = h2_config_geti(conf, H2_CONF_SESSION_FILES);
    }
    return m;
}
/** 
 * Interrupt a thread.
 * 
 * If the thread is currently blocked (i.e. waiting on a monitor_wait or sleeping)
 * resume the thread and cause it to return from the blocking function with
 * HYTHREAD_INTERRUPTED.
 * 
 * @param[in] thread a thread to be interrupted
 * @return none
 */
void VMCALL hythread_interrupt(hythread_t thread) {
    IDATA status;
    hythread_monitor_t mon;

    apr_atomic_set32(&thread->interrupted, TRUE);

    mon = thread->waited_monitor;
    if (mon) {
        // If thread was doing any kind of wait, notify it.
        if (hythread_monitor_try_enter(mon) == TM_ERROR_NONE) {
            status = hycond_notify_all(&mon->condition);
            assert(status == TM_ERROR_NONE);
            status = hythread_monitor_exit(mon);
            assert(status == TM_ERROR_NONE);
        } else {
            status = hythread_create(NULL, 0, 0, 0,
                hythread_interrupter, (void *)mon);
            assert (status == TM_ERROR_NONE);
        }
    }
} // hythread_interrupt
Exemple #14
0
int main(int argc, const char **argv)
{
    apr_status_t status;
    apr_pool_t *pool;
    apr_sockaddr_t *address;
    serf_context_t *context;
    serf_connection_t *connection;
    app_baton_t app_ctx;
    handler_baton_t *handler_ctx;
    apr_uri_t url;
    const char *raw_url, *method;
    int count;
    apr_getopt_t *opt;
    char opt_c;
    char *authn = NULL;
    const char *opt_arg;

    /* For the parser threads */
    apr_thread_t *thread[3];
    apr_threadattr_t *tattr;
    apr_status_t parser_status;
    parser_baton_t *parser_ctx;

    apr_initialize();
    atexit(apr_terminate);

    apr_pool_create(&pool, NULL);
    apr_atomic_init(pool);
    /* serf_initialize(); */

    /* Default to one round of fetching. */
    count = 1;
    /* Default to GET. */
    method = "GET";

    apr_getopt_init(&opt, pool, argc, argv);

    while ((status = apr_getopt(opt, "a:hv", &opt_c, &opt_arg)) ==
           APR_SUCCESS) {
        int srclen, enclen;

        switch (opt_c) {
        case 'a':
            srclen = strlen(opt_arg);
            enclen = apr_base64_encode_len(srclen);
            authn = apr_palloc(pool, enclen + 6);
            strcpy(authn, "Basic ");
            (void) apr_base64_encode(&authn[6], opt_arg, srclen);
            break;
        case 'h':
            print_usage(pool);
            exit(0);
            break;
        case 'v':
            puts("Serf version: " SERF_VERSION_STRING);
            exit(0);
        default:
            break;
        }
    }

    if (opt->ind != opt->argc - 1) {
        print_usage(pool);
        exit(-1);
    }

    raw_url = argv[opt->ind];

    apr_uri_parse(pool, raw_url, &url);
    if (!url.port) {
        url.port = apr_uri_port_of_scheme(url.scheme);
    }
    if (!url.path) {
        url.path = "/";
    }

    if (strcasecmp(url.scheme, "https") == 0) {
        app_ctx.using_ssl = 1;
    }
    else {
        app_ctx.using_ssl = 0;
    }

    status = apr_sockaddr_info_get(&address,
                                   url.hostname, APR_UNSPEC, url.port, 0,
                                   pool);
    if (status) {
        printf("Error creating address: %d\n", status);
        exit(1);
    }

    context = serf_context_create(pool);

    /* ### Connection or Context should have an allocator? */
    app_ctx.bkt_alloc = serf_bucket_allocator_create(pool, NULL, NULL);
    app_ctx.ssl_ctx = NULL;
    app_ctx.authn = authn;

    connection = serf_connection_create(context, address,
                                        conn_setup, &app_ctx,
                                        closed_connection, &app_ctx,
                                        pool);

    handler_ctx = (handler_baton_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc,
                                                      sizeof(handler_baton_t));
    handler_ctx->allocator = app_ctx.bkt_alloc;
    handler_ctx->doc_queue = apr_array_make(pool, 1, sizeof(doc_path_t*));
    handler_ctx->doc_queue_alloc = app_ctx.bkt_alloc;

    handler_ctx->requests_outstanding =
        (apr_uint32_t*)serf_bucket_mem_alloc(app_ctx.bkt_alloc,
                                             sizeof(apr_uint32_t));
    apr_atomic_set32(handler_ctx->requests_outstanding, 0);
    handler_ctx->hdr_read = 0;

    parser_ctx = (void*)serf_bucket_mem_alloc(app_ctx.bkt_alloc,
                                       sizeof(parser_baton_t));

    parser_ctx->requests_outstanding = handler_ctx->requests_outstanding;
    parser_ctx->connection = connection;
    parser_ctx->app_ctx = &app_ctx;
    parser_ctx->doc_queue = handler_ctx->doc_queue;
    parser_ctx->doc_queue_alloc = handler_ctx->doc_queue_alloc;
    /* Restrict ourselves to this host. */
    parser_ctx->hostinfo = url.hostinfo;

    status = apr_thread_mutex_create(&parser_ctx->mutex,
                                     APR_THREAD_MUTEX_DEFAULT, pool);
    if (status) {
        printf("Couldn't create mutex %d\n", status);
        return status;
    }

    status = apr_thread_cond_create(&parser_ctx->condvar, pool);
    if (status) {
        printf("Couldn't create condvar: %d\n", status);
        return status;
    }

    /* Let the handler now which condvar to use. */
    handler_ctx->doc_queue_condvar = parser_ctx->condvar;

    apr_threadattr_create(&tattr, pool);

    /* Start the parser thread. */
    apr_thread_create(&thread[0], tattr, parser_thread, parser_ctx, pool);

    /* Deliver the first request. */
    create_request(url.hostinfo, url.path, NULL, NULL, parser_ctx, pool);

    /* Go run our normal thread. */
    while (1) {
        int tries = 0;

        status = serf_context_run(context, SERF_DURATION_FOREVER, pool);
        if (APR_STATUS_IS_TIMEUP(status))
            continue;
        if (status) {
            char buf[200];

            printf("Error running context: (%d) %s\n", status,
                   apr_strerror(status, buf, sizeof(buf)));
            exit(1);
        }

        /* We run this check to allow our parser threads to add more
         * requests to our queue.
         */
        for (tries = 0; tries < 3; tries++) {
            if (!apr_atomic_read32(handler_ctx->requests_outstanding)) {
#ifdef SERF_VERBOSE
                printf("Waiting...");
#endif
                apr_sleep(100000);
#ifdef SERF_VERBOSE
                printf("Done\n");
#endif
            }
            else {
                break;
            }
        }
        if (tries >= 3) {
            break;
        }
        /* Debugging purposes only! */
        serf_debug__closed_conn(app_ctx.bkt_alloc);
    }

    printf("Quitting...\n");
    serf_connection_close(connection);

    /* wake up the parser via condvar signal */
    apr_thread_cond_signal(parser_ctx->condvar);

    status = apr_thread_join(&parser_status, thread[0]);
    if (status) {
        printf("Error joining thread: %d\n", status);
        return status;
    }

    serf_bucket_mem_free(app_ctx.bkt_alloc, handler_ctx->requests_outstanding);
    serf_bucket_mem_free(app_ctx.bkt_alloc, parser_ctx);

    apr_pool_destroy(pool);
    return 0;
}
Exemple #15
0
void h2_task_set_finished(h2_task *task)
{
    apr_atomic_set32(&task->has_finished, 1);
}
Exemple #16
0
void h2_task_set_started(h2_task *task)
{
    AP_DEBUG_ASSERT(task);
    apr_atomic_set32(&task->has_started, 1);
}
Exemple #17
0
void tee_update_queue_len(int length)
{
	ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, tee_server, "tee: Queue length %d (pid %d)", length, getpid());
	apr_atomic_set32(&process_stats->queue_len, length);
}
Exemple #18
0
static void test_read32(abts_case *tc, void *data)
{
    apr_uint32_t y32;
    apr_atomic_set32(&y32, 2);
    ABTS_INT_EQUAL(tc, 2, apr_atomic_read32(&y32));
}
Exemple #19
0
APR_DECLARE(apr_status_t) apr_dbd_init(apr_pool_t *pool)
{
    apr_status_t ret = APR_SUCCESS;
    apr_pool_t *parent;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;

        return APR_SUCCESS;
    }

    /* Top level pool scope, need process-scope lifetime */
    for (parent = pool;  parent; parent = apr_pool_parent_get(pool))
         pool = parent;
#if APR_HAVE_MODULAR_DSO
    /* deprecate in 2.0 - permit implicit initialization */
    apu_dso_init(pool);
#endif

    drivers = apr_hash_make(pool);

#if APR_HAS_THREADS
    ret = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
    /* This already registers a pool cleanup */
#endif

#if !APR_HAVE_MODULAR_DSO

    /* Load statically-linked drivers: */
#if APU_HAVE_MYSQL
    DRIVER_LOAD("mysql", apr_dbd_mysql_driver, pool);
#endif
#if APU_HAVE_PGSQL
    DRIVER_LOAD("pgsql", apr_dbd_pgsql_driver, pool);
#endif
#if APU_HAVE_SQLITE3
    DRIVER_LOAD("sqlite3", apr_dbd_sqlite3_driver, pool);
#endif
#if APU_HAVE_SQLITE2
    DRIVER_LOAD("sqlite2", apr_dbd_sqlite2_driver, pool);
#endif
#if APU_HAVE_ORACLE
    DRIVER_LOAD("oracle", apr_dbd_oracle_driver, pool);
#endif
#if APU_HAVE_FREETDS
    DRIVER_LOAD("freetds", apr_dbd_freetds_driver, pool);
#endif
#if APU_HAVE_ODBC
    DRIVER_LOAD("odbc", apr_dbd_odbc_driver, pool);
#endif
#if APU_HAVE_SOME_OTHER_BACKEND
    DRIVER_LOAD("firebird", apr_dbd_other_driver, pool);
#endif
#endif /* APR_HAVE_MODULAR_DSO */

    apr_pool_cleanup_register(pool, NULL, apr_dbd_term,
                              apr_pool_cleanup_null);

    apr_atomic_dec32(&in_init);

    return ret;
}
Exemple #20
0
static apr_status_t dbm_open_type(apr_dbm_type_t const* * vtable,
                                  const char *type, 
                                  apr_pool_t *pool)
{
#if !APU_DSO_BUILD

    *vtable = NULL;
    if (!strcasecmp(type, "default"))     *vtable = &DBM_VTABLE;
#if APU_HAVE_DB
    else if (!strcasecmp(type, "db"))     *vtable = &apr_dbm_type_db;
#endif
    else if (*type && !strcasecmp(type + 1, "dbm")) {
#if APU_HAVE_GDBM
        if (*type == 'G' || *type == 'g') *vtable = &apr_dbm_type_gdbm;
#endif
#if APU_HAVE_NDBM
        if (*type == 'N' || *type == 'n') *vtable = &apr_dbm_type_ndbm;
#endif
#if APU_HAVE_SDBM
        if (*type == 'S' || *type == 's') *vtable = &apr_dbm_type_sdbm;
#endif
        /* avoid empty block */ ;
    }
    if (*vtable)
        return APR_SUCCESS;
    return APR_ENOTIMPL;

#else /* APU_DSO_BUILD */

    char modname[32];
    char symname[34];
    apr_dso_handle_sym_t symbol;
    apr_status_t rv;
    int usertype = 0;

    if (!strcasecmp(type, "default"))        type = DBM_NAME;
    else if (!strcasecmp(type, "db"))        type = "db";
    else if (*type && !strcasecmp(type + 1, "dbm")) {
        if      (*type == 'G' || *type == 'g') type = "gdbm"; 
        else if (*type == 'N' || *type == 'n') type = "ndbm"; 
        else if (*type == 'S' || *type == 's') type = "sdbm"; 
    }
    else usertype = 1;

    if (apr_atomic_inc32(&initialised)) {
        apr_atomic_set32(&initialised, 1); /* prevent wrap-around */

        while (apr_atomic_read32(&in_init)) /* wait until we get fully inited */
            ;
    }
    else {
        apr_pool_t *parent;

        /* Top level pool scope, need process-scope lifetime */
        for (parent = pool;  parent; parent = apr_pool_parent_get(pool))
             pool = parent;

        /* deprecate in 2.0 - permit implicit initialization */
        apu_dso_init(pool);

        drivers = apr_hash_make(pool);
        apr_hash_set(drivers, "sdbm", APR_HASH_KEY_STRING, &apr_dbm_type_sdbm);

        apr_pool_cleanup_register(pool, NULL, dbm_term,
                                  apr_pool_cleanup_null);

        apr_atomic_dec32(&in_init);
    }

    rv = apu_dso_mutex_lock();
    if (rv) {
        *vtable = NULL;
        return rv;
    }

    *vtable = apr_hash_get(drivers, type, APR_HASH_KEY_STRING);
    if (*vtable) {
        apu_dso_mutex_unlock();
        return APR_SUCCESS;
    }

    /* The driver DSO must have exactly the same lifetime as the
     * drivers hash table; ignore the passed-in pool */
    pool = apr_hash_pool_get(drivers);

#if defined(NETWARE)
    apr_snprintf(modname, sizeof(modname), "dbm%s.nlm", type);
#elif defined(WIN32)
    apr_snprintf(modname, sizeof(modname),
                 "apr_dbm_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".dll", type);
#else
    apr_snprintf(modname, sizeof(modname),
                 "apr_dbm_%s-" APU_STRINGIFY(APU_MAJOR_VERSION) ".so", type);
#endif
    apr_snprintf(symname, sizeof(symname), "apr_dbm_type_%s", type);

    rv = apu_dso_load(NULL, &symbol, modname, symname, pool);
    if (rv == APR_SUCCESS || rv == APR_EINIT) { /* previously loaded?!? */
        *vtable = symbol;
        if (usertype)
            type = apr_pstrdup(pool, type);
        apr_hash_set(drivers, type, APR_HASH_KEY_STRING, *vtable);
        rv = APR_SUCCESS;
    }
    else
        *vtable = NULL;

    apu_dso_mutex_unlock();
    return rv;

#endif /* APU_DSO_BUILD */
}
Exemple #21
0
void tee_update_status(status_t status)
{
	apr_atomic_set32(&process_stats->status, status);
}
Exemple #22
0
static int create_entity(cache_handle_t *h, cache_type_e type_e,
                         request_rec *r, const char *key, apr_off_t len)
{
    apr_status_t rv;
    apr_pool_t *pool;
    cache_object_t *obj, *tmp_obj;
    mem_cache_object_t *mobj;

    if (len == -1) {
        /* Caching a streaming response. Assume the response is
         * less than or equal to max_streaming_buffer_size. We will
         * correct all the cache size counters in store_body once
         * we know exactly know how much we are caching.
         */
        len = sconf->max_streaming_buffer_size;
    }

    /* Note: cache_insert() will automatically garbage collect
     * objects from the cache if the max_cache_size threshold is
     * exceeded. This means mod_mem_cache does not need to implement
     * max_cache_size checks.
     */
    if (len < sconf->min_cache_object_size ||
        len > sconf->max_cache_object_size) {
        ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
                     "mem_cache: URL %s failed the size check and will not be cached.",
                     key);
        return DECLINED;
    }

    if (type_e == CACHE_TYPE_FILE) {
        /* CACHE_TYPE_FILE is only valid for local content handled by the
         * default handler. Need a better way to check if the file is
         * local or not.
         */
        if (!r->filename) {
            return DECLINED;
        }
    }

    rv = apr_pool_create(&pool, NULL);

    if (rv != APR_SUCCESS) {
        ap_log_error(APLOG_MARK, APLOG_WARNING, rv, r->server,
                     "mem_cache: Failed to create memory pool.");
        return DECLINED;
    }

    /* Allocate and initialize cache_object_t */
    obj = apr_pcalloc(pool, sizeof(*obj));
    obj->key = apr_pstrdup(pool, key);

    /* Allocate and init mem_cache_object_t */
    mobj = apr_pcalloc(pool, sizeof(*mobj));
    mobj->pool = pool;

    if (threaded_mpm) {
        apr_thread_mutex_create(&mobj->lock, APR_THREAD_MUTEX_DEFAULT, pool);
    }

    /* Finish initing the cache object */
    apr_atomic_set32(&obj->refcount, 1);
    mobj->total_refs = 1;
    obj->complete = 0;
    obj->vobj = mobj;
    /* Safe cast: We tested < sconf->max_cache_object_size above */
    mobj->m_len = (apr_size_t)len;
    mobj->type = type_e;

    /* Place the cache_object_t into the hash table.
     * Note: Perhaps we should wait to put the object in the
     * hash table when the object is complete?  I add the object here to
     * avoid multiple threads attempting to cache the same content only
     * to discover at the very end that only one of them will succeed.
     * Furthermore, adding the cache object to the table at the end could
     * open up a subtle but easy to exploit DoS hole: someone could request
     * a very large file with multiple requests. Better to detect this here
     * rather than after the cache object has been completely built and
     * initialized...
     * XXX Need a way to insert into the cache w/o such coarse grained locking
     */
    if (sconf->lock) {
        apr_thread_mutex_lock(sconf->lock);
    }
    tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key);

    if (!tmp_obj) {
        cache_insert(sconf->cache_cache, obj);
        /* Add a refcount to account for the reference by the
         * hashtable in the cache. Refcount should be 2 now, one
         * for this thread, and one for the cache.
         */
        apr_atomic_inc32(&obj->refcount);
    }
    if (sconf->lock) {
        apr_thread_mutex_unlock(sconf->lock);
    }

    if (tmp_obj) {
        /* This thread collided with another thread loading the same object
         * into the cache at the same time. Defer to the other thread which
         * is further along.
         */
        cleanup_cache_object(obj);
        return DECLINED;
    }

    apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
                              apr_pool_cleanup_null);

    /* Populate the cache handle */
    h->cache_obj = obj;

    return OK;
}
Exemple #23
0
static void test_atomics_busyloop_threaded(abts_case *tc, void *data)
{
    unsigned int i;
    apr_status_t rv;
    apr_uint32_t count = 0;
    tbox_t tbox[NUM_THREADS];
    apr_thread_t *thread[NUM_THREADS];

    rv = apr_thread_mutex_create(&thread_lock, APR_THREAD_MUTEX_DEFAULT, p);
    APR_ASSERT_SUCCESS(tc, "Could not create lock", rv);

    /* get ready */
    for (i = 0; i < NUM_THREADS; i++) {
        tbox[i].tc = tc;
        tbox[i].mem = &count;
        tbox[i].loop = 50;
    }

    tbox[0].preval = 98;
    tbox[0].postval = 3891;
    tbox[0].func = busyloop_add32;

    tbox[1].preval = 3989;
    tbox[1].postval = 1010;
    tbox[1].func = busyloop_sub32;

    tbox[2].preval = 2979;
    tbox[2].postval = 0; /* not used */
    tbox[2].func = busyloop_inc32;

    tbox[3].preval = 2980;
    tbox[3].postval = 16384;
    tbox[3].func = busyloop_set32;

    tbox[4].preval = 16384;
    tbox[4].postval = 0; /* not used */
    tbox[4].func = busyloop_dec32;

    tbox[5].preval = 16383;
    tbox[5].postval = 1048576;
    tbox[5].func = busyloop_cas32;

    tbox[6].preval = 1048576;
    tbox[6].postval = 98; /* goto tbox[0] */
    tbox[6].func = busyloop_xchg32;

    /* get set */
    for (i = 0; i < NUM_THREADS; i++) {
        rv = apr_thread_create(&thread[i], NULL, thread_func_busyloop,
                               &tbox[i], p);
        ABTS_ASSERT(tc, "Failed creating thread", rv == APR_SUCCESS);
    }

    /* go! */
    apr_atomic_set32(tbox->mem, 98);

    for (i = 0; i < NUM_THREADS; i++) {
        apr_status_t retval;
        rv = apr_thread_join(&retval, thread[i]);
        ABTS_ASSERT(tc, "Thread join failed", rv == APR_SUCCESS);
        ABTS_ASSERT(tc, "Invalid return value from thread_join", retval == 0);
    }

    ABTS_INT_EQUAL(tbox->tc, 98, count);

    rv = apr_thread_mutex_destroy(thread_lock);
    ABTS_ASSERT(tc, "Failed creating threads", rv == APR_SUCCESS);
}