void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info) { /* Atomically pop a pool from the recycled list */ /* This function is safe only as long as it is single threaded because * it reaches into the queue and accesses "next" which can change. * We are OK today because it is only called from the listener thread. * cas-based pushes do not have the same limitation - any number can * happen concurrently with a single cas-based pop. */ *recycled_pool = NULL; /* Atomically pop a pool from the recycled list */ for (;;) { struct recycled_pool *first_pool = queue_info->recycled_pools; if (first_pool == NULL) { break; } if (apr_atomic_casptr ((void*) &(queue_info->recycled_pools), first_pool->next, first_pool) == first_pool) { *recycled_pool = first_pool->pool; if (queue_info->max_recycled_pools >= 0) apr_atomic_dec32(&queue_info->recycled_pools_count); break; } } }
apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle) { apr_status_t rv; int prev_idlers; /* If we have been given a pool to recycle, atomically link * it into the queue_info's list of recycled pools */ if (pool_to_recycle) { struct recycled_pool *new_recycle; new_recycle = (struct recycled_pool *)apr_palloc(pool_to_recycle, sizeof(*new_recycle)); new_recycle->pool = pool_to_recycle; for (;;) { /* Save queue_info->recycled_pool in local variable next because * new_recycle->next can be changed after apr_atomic_casptr * function call. For gory details see PR 44402. */ struct recycled_pool *next = queue_info->recycled_pools; new_recycle->next = next; if (apr_atomic_casptr((void*)&(queue_info->recycled_pools), new_recycle, next) == next) { break; } } } /* Atomically increment the count of idle workers */ for (;;) { prev_idlers = queue_info->idlers; if (apr_atomic_cas32(&(queue_info->idlers), prev_idlers + 1, prev_idlers) == prev_idlers) { break; } } /* If this thread just made the idle worker count nonzero, * wake up the listener. */ if (prev_idlers == 0) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } rv = apr_thread_cond_signal(queue_info->wait_for_idler); if (rv != APR_SUCCESS) { apr_thread_mutex_unlock(queue_info->idlers_mutex); return rv; } rv = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } } return APR_SUCCESS; }
static void test_casptr_notequal(abts_case *tc, void *data) { int a, b; volatile void *target_ptr = &a; void *old_ptr; old_ptr = apr_atomic_casptr(&target_ptr, &a, &b); ABTS_PTR_EQUAL(tc, &a, old_ptr); ABTS_PTR_EQUAL(tc, &a, (void *) target_ptr); }
static void test_casptr_equal(abts_case *tc, void *data) { int a; volatile void *target_ptr = NULL; void *old_ptr; old_ptr = apr_atomic_casptr(&target_ptr, &a, NULL); ABTS_PTR_EQUAL(tc, NULL, old_ptr); ABTS_PTR_EQUAL(tc, &a, (void *) target_ptr); }
static apr_status_t queue_info_cleanup(void *data_) { fd_queue_info_t *qi = data_; apr_thread_cond_destroy(qi->wait_for_idler); apr_thread_mutex_destroy(qi->idlers_mutex); /* Clean up any pools in the recycled list */ for (;;) { struct recycled_pool *first_pool = qi->recycled_pools; if (first_pool == NULL) { break; } if (apr_atomic_casptr((void*)&(qi->recycled_pools), first_pool->next, first_pool) == first_pool) { apr_pool_destroy(first_pool->pool); } } return APR_SUCCESS; }
void ap_push_pool(fd_queue_info_t * queue_info, apr_pool_t * pool_to_recycle) { struct recycled_pool *new_recycle; /* If we have been given a pool to recycle, atomically link * it into the queue_info's list of recycled pools */ if (!pool_to_recycle) return; if (queue_info->max_recycled_pools >= 0) { apr_uint32_t cnt = apr_atomic_read32(&queue_info->recycled_pools_count); if (cnt >= queue_info->max_recycled_pools) { apr_pool_destroy(pool_to_recycle); return; } apr_atomic_inc32(&queue_info->recycled_pools_count); } apr_pool_clear(pool_to_recycle); new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle, sizeof (*new_recycle)); new_recycle->pool = pool_to_recycle; for (;;) { /* * Save queue_info->recycled_pool in local variable next because * new_recycle->next can be changed after apr_atomic_casptr * function call. For gory details see PR 44402. */ struct recycled_pool *next = queue_info->recycled_pools; new_recycle->next = next; if (apr_atomic_casptr((void*) &(queue_info->recycled_pools), new_recycle, next) == next) break; } }
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info, apr_pool_t **recycled_pool) { apr_status_t rv; *recycled_pool = NULL; /* Block if the count of idle workers is zero */ if (queue_info->idlers == 0) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } /* Re-check the idle worker count to guard against a * race condition. Now that we're in the mutex-protected * region, one of two things may have happened: * - If the idle worker count is still zero, the * workers are all still busy, so it's safe to * block on a condition variable, BUT * we need to check for idle worker count again * when we are signaled since it can happen that * we are signaled by a worker thread that went idle * but received a context switch before it could * tell us. If it does signal us later once it is on * CPU again there might be no idle worker left. * See * https://issues.apache.org/bugzilla/show_bug.cgi?id=45605#c4 * - If the idle worker count is nonzero, then a * worker has become idle since the first check * of queue_info->idlers above. It's possible * that the worker has also signaled the condition * variable--and if so, the listener missed it * because it wasn't yet blocked on the condition * variable. But if the idle worker count is * now nonzero, it's safe for this function to * return immediately. */ while (queue_info->idlers == 0) { rv = apr_thread_cond_wait(queue_info->wait_for_idler, queue_info->idlers_mutex); if (rv != APR_SUCCESS) { apr_status_t rv2; rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv2 != APR_SUCCESS) { return rv2; } return rv; } } rv = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } } /* Atomically decrement the idle worker count */ apr_atomic_dec32(&(queue_info->idlers)); /* Atomically pop a pool from the recycled list */ /* This function is safe only as long as it is single threaded because * it reaches into the queue and accesses "next" which can change. * We are OK today because it is only called from the listener thread. * cas-based pushes do not have the same limitation - any number can * happen concurrently with a single cas-based pop. */ for (;;) { struct recycled_pool *first_pool = queue_info->recycled_pools; if (first_pool == NULL) { break; } if (apr_atomic_casptr((void*)&(queue_info->recycled_pools), first_pool->next, first_pool) == first_pool) { *recycled_pool = first_pool->pool; break; } } if (queue_info->terminated) { return APR_EOF; } else { return APR_SUCCESS; } }
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info, apr_pool_t **recycled_pool) { apr_status_t rv; *recycled_pool = NULL; /* Block if the count of idle workers is zero */ if (queue_info->idlers == 0) { rv = apr_thread_mutex_lock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } /* Re-check the idle worker count to guard against a * race condition. Now that we're in the mutex-protected * region, one of two things may have happened: * - If the idle worker count is still zero, the * workers are all still busy, so it's safe to * block on a condition variable. * - If the idle worker count is nonzero, then a * worker has become idle since the first check * of queue_info->idlers above. It's possible * that the worker has also signaled the condition * variable--and if so, the listener missed it * because it wasn't yet blocked on the condition * variable. But if the idle worker count is * now nonzero, it's safe for this function to * return immediately. */ if (queue_info->idlers == 0) { rv = apr_thread_cond_wait(queue_info->wait_for_idler, queue_info->idlers_mutex); if (rv != APR_SUCCESS) { apr_status_t rv2; rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv2 != APR_SUCCESS) { return rv2; } return rv; } } rv = apr_thread_mutex_unlock(queue_info->idlers_mutex); if (rv != APR_SUCCESS) { return rv; } } /* Atomically decrement the idle worker count */ apr_atomic_dec32(&(queue_info->idlers)); /* Atomically pop a pool from the recycled list */ for (;;) { struct recycled_pool *first_pool = queue_info->recycled_pools; if (first_pool == NULL) { break; } if (apr_atomic_casptr((volatile void**)&(queue_info->recycled_pools), first_pool->next, first_pool) == first_pool) { *recycled_pool = first_pool->pool; break; } } if (queue_info->terminated) { return APR_EOF; } else { return APR_SUCCESS; } }
void* apr_atomic_casptr_wrapper(volatile void** mem, void* with, const void *cmp) { void* ret = apr_atomic_casptr(mem, with, cmp); NoiseMaker(2,0); return ret; }