Exemplo n.º 1
0
rpc_state* add_cb(rpc_state *state) {
    // Do nothing
    
    //uint32_t j = apr_atomic_add32(&n_rpc_, 1);
    
    uint32_t *res = (uint32_t*) state->raw_input;
    uint32_t k = *res;
    
    n_callbacks_[k] += 1;
    LOG_DEBUG("client callback exceuted. rpc no: %d", n_rpc_);

    if (n_callbacks_[k] == max_rpc_) {       
	if (apr_atomic_dec32(&n_active_cli_) == 0) {
	    tm_end_ = apr_time_now();
	    apr_thread_mutex_lock(mx_rpc_);
	    apr_thread_cond_signal(cd_rpc_);
	    apr_thread_mutex_unlock(mx_rpc_);
	}
    }
    
    if (n_callbacks_[k] % 1000000 == 0) {
	apr_atomic_add32(&n_rpc_, 1000000);
	tm_middle_ = apr_time_now();
	uint64_t p = tm_middle_ - tm_begin_;
	double rate = n_rpc_ * 1.0 / p;
	LOG_INFO("rpc rate: %0.2f million per second", rate);
    }

    // do another rpc.    
    if (max_rpc_ < 0 || n_issues_[k] < max_rpc_) {
	n_issues_[k]++;
	call_add(clis_[k], k);
    }
    return NULL;
}
Exemplo n.º 2
0
SWITCH_DECLARE(void) switch_atomic_add(volatile switch_atomic_t *mem, uint32_t val)
{
#ifdef apr_atomic_t
	apr_atomic_add((apr_atomic_t *)mem, val);
#else
	apr_atomic_add32((apr_uint32_t *)mem, val);
#endif
}
Exemplo n.º 3
0
static void test_add32(abts_case *tc, void *data)
{
    apr_uint32_t oldval;
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 23);
    oldval = apr_atomic_add32(&y32, 4);
    ABTS_INT_EQUAL(tc, 23, oldval);
    ABTS_INT_EQUAL(tc, 27, y32);
}
Exemplo n.º 4
0
apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
{
    apr_int32_t new_idlers;
    new_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;
    if (--new_idlers <= 0) {
        apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
        return APR_EAGAIN;
    }
    return APR_SUCCESS;
}
Exemplo n.º 5
0
static void test_set_add_inc_sub(abts_case *tc, void *data)
{
    apr_uint32_t y32;

    apr_atomic_set32(&y32, 0);
    apr_atomic_add32(&y32, 20);
    apr_atomic_inc32(&y32);
    apr_atomic_sub32(&y32, 10);

    ABTS_INT_EQUAL(tc, 11, y32);
}
Exemplo n.º 6
0
static void busyloop_add32(tbox_t *tbox)
{
    apr_uint32_t val;

    do {
        busyloop_read32(tbox);
        val = apr_atomic_add32(tbox->mem, tbox->postval);
        apr_thread_mutex_lock(thread_lock);
        ABTS_INT_EQUAL(tbox->tc, val, tbox->preval);
        apr_thread_mutex_unlock(thread_lock);
    } while (--tbox->loop);
}
Exemplo n.º 7
0
void * APR_THREAD_FUNC thread_func_atomic(apr_thread_t *thd, void *data)
{
    int i;

    for (i = 0; i < NUM_ITERATIONS ; i++) {
        apr_atomic_inc32(&y);
        apr_atomic_add32(&y, 2);
        apr_atomic_dec32(&y);
        apr_atomic_dec32(&y);
    }
    apr_thread_exit(thd, exit_ret_val);
    return NULL;
}
Exemplo n.º 8
0
void
server_add_stat(dav_rawx_server_conf *conf, const char *n, apr_uint32_t value, apr_uint32_t duration)
{
	struct shm_stats_s *shm_stats;

	if (!n)
		return;

	if (!conf->shm.handle || !conf->lock.handle) { /* This should never happen! */
#ifdef HAVE_EXTRA_DEBUG
		abort();
#else
		return;
#endif
	}

	if (!n[0] || !n[1] || n[2]!='\0') { /* strlen(n)!=2 */
#ifdef HAVE_EXTRA_DEBUG
		abort();
#else
		return;
#endif
	}

	apr_global_mutex_lock(conf->lock.handle);
	shm_stats = apr_shm_baseaddr_get(conf->shm.handle);
	apr_global_mutex_unlock(conf->lock.handle);

	/* increase the appropriated counter */
	if (shm_stats) {
		switch (*n) {
			case 'q':
				switch (n[1]) {
					case '0':
						apr_atomic_add32(&(shm_stats->body.req_all), value);
						if(duration > 0) {
							apr_atomic_add32(&(shm_stats->body.time_all), duration);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_req_sec), shm_stats->body.req_all);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_duration), shm_stats->body.time_all);
						}
						break;
					case '1':
						apr_atomic_add32(&(shm_stats->body.req_chunk_get), value);
						if(duration > 0) {
							apr_atomic_add32(&(shm_stats->body.time_get), duration);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_req_get_sec), shm_stats->body.req_chunk_get);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_get_duration), shm_stats->body.time_get);
						}
						break;
					case '2':
						apr_atomic_add32(&(shm_stats->body.req_chunk_put), value);
						if(duration > 0) {
							apr_atomic_add32(&(shm_stats->body.time_put), duration);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_req_put_sec), shm_stats->body.req_chunk_put);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_put_duration), shm_stats->body.time_put);
						}
						break;
					case '3':
						apr_atomic_add32(&(shm_stats->body.req_chunk_del), value);
						if(duration > 0) {
							apr_atomic_add32(&(shm_stats->body.time_del), duration);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_req_del_sec), shm_stats->body.req_chunk_del);
							rawx_stats_rrd_push(&(shm_stats->body.rrd_del_duration), shm_stats->body.time_del);
						}
						break;
					case '4': apr_atomic_add32(&(shm_stats->body.req_stat), value); break;
					case '5': apr_atomic_add32(&(shm_stats->body.req_info), value); break;
					case '6': apr_atomic_add32(&(shm_stats->body.req_raw), value); break;
					case '7': apr_atomic_add32(&(shm_stats->body.req_other), value); break;
				}
				break;
			case 'r':
				switch (n[1]) {
					case '1': apr_atomic_add32(&(shm_stats->body.rep_2XX), value); break;
					case '2': apr_atomic_add32(&(shm_stats->body.rep_4XX), value); break;
					case '3': apr_atomic_add32(&(shm_stats->body.rep_5XX), value); break;
					case '4': apr_atomic_add32(&(shm_stats->body.rep_other), value); break;
					case '5': apr_atomic_add32(&(shm_stats->body.rep_403), value); break;
					case '6': apr_atomic_add32(&(shm_stats->body.rep_404), value); break;
					case '7': apr_atomic_add32(&(shm_stats->body.rep_bread), value); break;
					case '8': apr_atomic_add32(&(shm_stats->body.rep_bwritten), value); break;
				}
				break;
		}
	}
}
void
server_add_stat(dav_rainx_server_conf *conf, const char *n, apr_uint32_t value, apr_uint32_t duration)
{
	EXTRA_ASSERT(NULL != conf->shm.handle);
	EXTRA_ASSERT(NULL != conf->lock.handle);
	EXTRA_ASSERT(n && n[0] && n[1]);

	apr_global_mutex_lock(conf->lock.handle);
	struct shm_stats_s *shm_stats = apr_shm_baseaddr_get(conf->shm.handle);
	apr_global_mutex_unlock(conf->lock.handle);

	if (!shm_stats)
		return;

	switch (*n) {
		case 'q':
			switch (n[1]) {
				case '0':
					apr_atomic_add32(&(shm_stats->body.req_all), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_all), duration);
					break;
				case '1':
					apr_atomic_add32(&(shm_stats->body.req_chunk_get), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_get), duration);
					break;
				case '2':
					apr_atomic_add32(&(shm_stats->body.req_chunk_put), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_put), duration);
					break;
				case '3':
					apr_atomic_add32(&(shm_stats->body.req_chunk_del), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_del), duration);
					break;
				case '4':
					apr_atomic_add32(&(shm_stats->body.req_stat), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_stat), value);
					break;
				case '5':
					apr_atomic_add32(&(shm_stats->body.req_info), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_info), value);
					break;
				case '6':
					apr_atomic_add32(&(shm_stats->body.req_raw), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_raw), value);
					break;
				case '7':
					apr_atomic_add32(&(shm_stats->body.req_other), value);
					if (duration > 0)
						apr_atomic_add32(&(shm_stats->body.time_other), value);
					break;
			}
			break;
		case 'r':
			switch (n[1]) {
				case '1': apr_atomic_add32(&(shm_stats->body.rep_2XX), value); break;
				case '2': apr_atomic_add32(&(shm_stats->body.rep_4XX), value); break;
				case '3': apr_atomic_add32(&(shm_stats->body.rep_5XX), value); break;
				case '4': apr_atomic_add32(&(shm_stats->body.rep_other), value); break;
				case '5': apr_atomic_add32(&(shm_stats->body.rep_403), value); break;
				case '6': apr_atomic_add32(&(shm_stats->body.rep_404), value); break;
				case '7': apr_atomic_add32(&(shm_stats->body.rep_bread), value); break;
				case '8': apr_atomic_add32(&(shm_stats->body.rep_bwritten), value); break;
			}
			break;
	}
}
Exemplo n.º 10
0
APR_DECLARE(apr_uint32_t) apr_atomic_inc32(volatile apr_uint32_t *mem)
{
    return apr_atomic_add32(mem, 1);
}
Exemplo n.º 11
0
apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
                                          int *had_to_block)
{
    apr_status_t rv;
    apr_int32_t prev_idlers;

    /* Atomically decrement the idle worker count, saving the old value */
    /* See TODO in ap_queue_info_set_idle() */
    prev_idlers = apr_atomic_add32(&(queue_info->idlers), -1) - zero_pt;

    /* Block if there weren't any idle workers */
    if (prev_idlers <= 0) {
        rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            AP_DEBUG_ASSERT(0);
            /* See TODO in ap_queue_info_set_idle() */
            apr_atomic_inc32(&(queue_info->idlers));    /* back out dec */
            return rv;
        }
        /* Re-check the idle worker count to guard against a
         * race condition.  Now that we're in the mutex-protected
         * region, one of two things may have happened:
         *   - If the idle worker count is still negative, the
         *     workers are all still busy, so it's safe to
         *     block on a condition variable.
         *   - If the idle worker count is non-negative, then a
         *     worker has become idle since the first check
         *     of queue_info->idlers above.  It's possible
         *     that the worker has also signaled the condition
         *     variable--and if so, the listener missed it
         *     because it wasn't yet blocked on the condition
         *     variable.  But if the idle worker count is
         *     now non-negative, it's safe for this function to
         *     return immediately.
         *
         *     A "negative value" (relative to zero_pt) in
         *     queue_info->idlers tells how many
         *     threads are waiting on an idle worker.
         */
        if (queue_info->idlers < zero_pt) {
            *had_to_block = 1;
            rv = apr_thread_cond_wait(queue_info->wait_for_idler,
                                      queue_info->idlers_mutex);
            if (rv != APR_SUCCESS) {
                apr_status_t rv2;
                AP_DEBUG_ASSERT(0);
                rv2 = apr_thread_mutex_unlock(queue_info->idlers_mutex);
                if (rv2 != APR_SUCCESS) {
                    return rv2;
                }
                return rv;
            }
        }
        rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
        if (rv != APR_SUCCESS) {
            return rv;
        }
    }

    if (queue_info->terminated) {
        return APR_EOF;
    }
    else {
        return APR_SUCCESS;
    }
}