コード例 #1
0
static void *thr_enqueuer(void *_count)
{
	unsigned long long *count = _count;

	printf_verbose("thread_begin %s, tid %lu\n",
			"enqueuer", urcu_get_thread_id());

	set_affinity();

	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct test *node = malloc(sizeof(*node));
		if (!node)
			goto fail;
		cds_lfs_node_init(&node->list);
		cds_lfs_push(&s, &node->list);
		URCU_TLS(nr_successful_enqueues)++;

		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
fail:
		URCU_TLS(nr_enqueues)++;
		if (caa_unlikely(!test_duration_enqueue()))
			break;
	}

	rcu_unregister_thread();

	count[0] = URCU_TLS(nr_enqueues);
	count[1] = URCU_TLS(nr_successful_enqueues);
	printf_verbose("enqueuer thread_end, tid %lu, "
			"enqueues %llu successful_enqueues %llu\n",
			urcu_get_thread_id(),
			URCU_TLS(nr_enqueues),
			URCU_TLS(nr_successful_enqueues));
	return ((void*)1);

}
コード例 #2
0
ファイル: futex.c プロジェクト: MarkZ3/lttng-tools
/*
 * Wake 1 futex.
 */
LTTNG_HIDDEN
void futex_nto1_wake(int32_t *futex)
{
	if (caa_unlikely(uatomic_read(futex) == -1)) {
		uatomic_set(futex, 0);
		futex_async(futex, FUTEX_WAKE, 1, NULL, NULL, 0);
	}

	DBG("Futex n to 1 wake done");
}
コード例 #3
0
ファイル: test_urcu_wfq.c プロジェクト: avis/rcu
void *thr_enqueuer(void *_count)
{
	unsigned long long *count = _count;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"enqueuer", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct cds_wfq_node *node = malloc(sizeof(*node));
		if (!node)
			goto fail;
		cds_wfq_node_init(node);
		cds_wfq_enqueue(&q, node);
		URCU_TLS(nr_successful_enqueues)++;

		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
fail:
		URCU_TLS(nr_enqueues)++;
		if (caa_unlikely(!test_duration_enqueue()))
			break;
	}

	count[0] = URCU_TLS(nr_enqueues);
	count[1] = URCU_TLS(nr_successful_enqueues);
	printf_verbose("enqueuer thread_end, thread id : %lx, tid %lu, "
		       "enqueues %llu successful_enqueues %llu\n",
		       pthread_self(),
			(unsigned long) gettid(),
		       URCU_TLS(nr_enqueues), URCU_TLS(nr_successful_enqueues));
	return ((void*)1);

}
コード例 #4
0
ファイル: lttng-context-vpid.c プロジェクト: lttng/lttng-ust
static inline
pid_t wrapper_getvpid(void)
{
	pid_t vpid;

	vpid = CMM_LOAD_SHARED(cached_vpid);
	if (caa_unlikely(!vpid)) {
		vpid = getpid();
		CMM_STORE_SHARED(cached_vpid, vpid);
	}
	return vpid;
}
コード例 #5
0
void *thr_writer(void *data)
{
	unsigned long wtidx = (unsigned long)data;
	long tidx;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"writer", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		for (tidx = 0; tidx < nr_readers; tidx++) {
			pthread_mutex_lock(&per_thread_lock[tidx].lock);
		}
		test_array.a = 0;
		test_array.a = 8;
		if (caa_unlikely(wduration))
			loop_sleep(wduration);
		for (tidx = (long)nr_readers - 1; tidx >= 0; tidx--) {
			pthread_mutex_unlock(&per_thread_lock[tidx].lock);
		}
		URCU_TLS(nr_writes)++;
		if (caa_unlikely(!test_duration_write()))
			break;
		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
	}

	printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
			"writer", (unsigned long) pthread_self(),
			(unsigned long) gettid());
	tot_nr_writes[wtidx] = URCU_TLS(nr_writes);
	return ((void*)2);
}
コード例 #6
0
ファイル: test_urcu_wfq.c プロジェクト: avis/rcu
void *thr_dequeuer(void *_count)
{
	unsigned long long *count = _count;

	printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
			"dequeuer", (unsigned long) pthread_self(),
			(unsigned long) gettid());

	set_affinity();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		struct cds_wfq_node *node = cds_wfq_dequeue_blocking(&q);

		if (node) {
			free(node);
			URCU_TLS(nr_successful_dequeues)++;
		}

		URCU_TLS(nr_dequeues)++;
		if (caa_unlikely(!test_duration_dequeue()))
			break;
		if (caa_unlikely(rduration))
			loop_sleep(rduration);
	}

	printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
		       "dequeues %llu, successful_dequeues %llu\n",
		       pthread_self(),
			(unsigned long) gettid(),
		       URCU_TLS(nr_dequeues), URCU_TLS(nr_successful_dequeues));
	count[0] = URCU_TLS(nr_dequeues);
	count[1] = URCU_TLS(nr_successful_dequeues);
	return ((void*)2);
}
コード例 #7
0
ファイル: futex.c プロジェクト: RomainNaour/lttng-tools
/*
 * Wake 1 futex.
 */
LTTNG_HIDDEN
void futex_nto1_wake(int32_t *futex)
{
	if (caa_unlikely(uatomic_read(futex) != -1))
		goto end;
	uatomic_set(futex, 0);
	if (futex_async(futex, FUTEX_WAKE, 1, NULL, NULL, 0) < 0) {
		PERROR("futex_async");
		abort();
	}
end:
	DBG("Futex n to 1 wake done");
}
コード例 #8
0
/**
 * lib_ring_buffer_backend_allocate - allocate a channel buffer
 * @config: ring buffer instance configuration
 * @buf: the buffer struct
 * @size: total size of the buffer
 * @num_subbuf: number of subbuffers
 * @extra_reader_sb: need extra subbuffer for reader
 */
static
int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
				     struct lttng_ust_lib_ring_buffer_backend *bufb,
				     size_t size, size_t num_subbuf,
				     int extra_reader_sb,
				     struct lttng_ust_shm_handle *handle,
				     struct shm_object *shmobj)
{
	struct channel_backend *chanb;
	unsigned long subbuf_size, mmap_offset = 0;
	unsigned long num_subbuf_alloc;
	unsigned long i;
	long page_size;

	chanb = &shmp(handle, bufb->chan)->backend;
	if (!chanb)
		return -EINVAL;

	subbuf_size = chanb->subbuf_size;
	num_subbuf_alloc = num_subbuf;

	if (extra_reader_sb)
		num_subbuf_alloc++;

	page_size = sysconf(_SC_PAGE_SIZE);
	if (page_size <= 0) {
		goto page_size_error;
	}

	align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
	set_shmp(bufb->array, zalloc_shm(shmobj,
			sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
	if (caa_unlikely(!shmp(handle, bufb->array)))
		goto array_error;

	/*
	 * This is the largest element (the buffer pages) which needs to
	 * be aligned on page size.
	 */
	align_shm(shmobj, page_size);
	set_shmp(bufb->memory_map, zalloc_shm(shmobj,
			subbuf_size * num_subbuf_alloc));
	if (caa_unlikely(!shmp(handle, bufb->memory_map)))
		goto memory_map_error;

	/* Allocate backend pages array elements */
	for (i = 0; i < num_subbuf_alloc; i++) {
		align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
		set_shmp(shmp_index(handle, bufb->array, i)->shmp,
			zalloc_shm(shmobj,
				sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
		if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
			goto free_array;
	}

	/* Allocate write-side subbuffer table */
	align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
	set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
				sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
				* num_subbuf));
	if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
		goto free_array;

	for (i = 0; i < num_subbuf; i++) {
		struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;

		sb = shmp_index(handle, bufb->buf_wsb, i);
		if (!sb)
			goto free_array;
		sb->id = subbuffer_id(config, 0, 1, i);
	}

	/* Assign read-side subbuffer table */
	if (extra_reader_sb)
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
						num_subbuf_alloc - 1);
	else
		bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);

	/* Allocate subbuffer packet counter table */
	align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
	set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
				sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
				* num_subbuf));
	if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
		goto free_wsb;

	/* Assign pages to page index */
	for (i = 0; i < num_subbuf_alloc; i++) {
		struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
		struct lttng_ust_lib_ring_buffer_backend_pages *pages;
		struct shm_ref ref;

		ref.index = bufb->memory_map._ref.index;
		ref.offset = bufb->memory_map._ref.offset;
		ref.offset += i * subbuf_size;

		sbp = shmp_index(handle, bufb->array, i);
		if (!sbp)
			goto free_array;
		pages = shmp(handle, sbp->shmp);
		if (!pages)
			goto free_array;
		set_shmp(pages->p, ref);
		if (config->output == RING_BUFFER_MMAP) {
			pages->mmap_offset = mmap_offset;
			mmap_offset += subbuf_size;
		}
	}
	return 0;

free_wsb:
	/* bufb->buf_wsb will be freed by shm teardown */
free_array:
	/* bufb->array[i] will be freed by shm teardown */
memory_map_error:
	/* bufb->array will be freed by shm teardown */
array_error:
page_size_error:
	return -ENOMEM;
}
コード例 #9
0
void *test_hash_rw_thr_writer(void *_count)
{
	struct lfht_test_node *node;
	struct cds_lfht_node *ret_node;
	struct cds_lfht_iter iter;
	struct wr_count *count = _count;
	int ret;

	printf_verbose("thread_begin %s, tid %lu\n",
			"writer", urcu_get_thread_id());

	URCU_TLS(rand_lookup) = urcu_get_thread_id() ^ time(NULL);

	set_affinity();

	rcu_register_thread();

	while (!test_go)
	{
	}
	cmm_smp_mb();

	for (;;) {
		if ((addremove == AR_ADD || add_only)
				|| (addremove == AR_RANDOM && rand_r(&URCU_TLS(rand_lookup)) & 1)) {
			node = malloc(sizeof(struct lfht_test_node));
			lfht_test_node_init(node,
				(void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
				sizeof(void *));
			rcu_read_lock();
			if (add_unique) {
				ret_node = cds_lfht_add_unique(test_ht,
					test_hash(node->key, node->key_len, TEST_HASH_SEED),
					test_match, node->key, &node->node);
			} else {
				if (add_replace)
					ret_node = cds_lfht_add_replace(test_ht,
							test_hash(node->key, node->key_len, TEST_HASH_SEED),
							test_match, node->key, &node->node);
				else
					cds_lfht_add(test_ht,
						test_hash(node->key, node->key_len, TEST_HASH_SEED),
						&node->node);
			}
			rcu_read_unlock();
			if (add_unique && ret_node != &node->node) {
				free(node);
				URCU_TLS(nr_addexist)++;
			} else {
				if (add_replace && ret_node) {
					call_rcu(&to_test_node(ret_node)->head,
							free_node_cb);
					URCU_TLS(nr_addexist)++;
				} else {
					URCU_TLS(nr_add)++;
				}
			}
		} else {
			/* May delete */
			rcu_read_lock();
			cds_lfht_test_lookup(test_ht,
				(void *)(((unsigned long) rand_r(&URCU_TLS(rand_lookup)) % write_pool_size) + write_pool_offset),
				sizeof(void *), &iter);
			ret = cds_lfht_del(test_ht, cds_lfht_iter_get_node(&iter));
			rcu_read_unlock();
			if (ret == 0) {
				node = cds_lfht_iter_get_test_node(&iter);
				call_rcu(&node->head, free_node_cb);
				URCU_TLS(nr_del)++;
			} else
				URCU_TLS(nr_delnoent)++;
		}
#if 0
		//if (URCU_TLS(nr_writes) % 100000 == 0) {
		if (URCU_TLS(nr_writes) % 1000 == 0) {
			rcu_read_lock();
			if (rand_r(&URCU_TLS(rand_lookup)) & 1) {
				ht_resize(test_ht, 1);
			} else {
				ht_resize(test_ht, -1);
			}
			rcu_read_unlock();
		}
#endif //0
		URCU_TLS(nr_writes)++;
		if (caa_unlikely(!test_duration_write()))
			break;
		if (caa_unlikely(wdelay))
			loop_sleep(wdelay);
		if (caa_unlikely((URCU_TLS(nr_writes) & ((1 << 10) - 1)) == 0))
			rcu_quiescent_state();
	}

	rcu_unregister_thread();

	printf_verbose("thread_end %s, tid %lu\n",
			"writer", urcu_get_thread_id());
	printf_verbose("info tid %lu: nr_add %lu, nr_addexist %lu, nr_del %lu, "
			"nr_delnoent %lu\n", urcu_get_thread_id(),
			URCU_TLS(nr_add),
			URCU_TLS(nr_addexist),
			URCU_TLS(nr_del),
			URCU_TLS(nr_delnoent));
	count->update_ops = URCU_TLS(nr_writes);
	count->add = URCU_TLS(nr_add);
	count->add_exist = URCU_TLS(nr_addexist);
	count->remove = URCU_TLS(nr_del);
	return ((void*)2);
}
コード例 #10
0
ファイル: dynamic.c プロジェクト: 5kg/lttng-gsoc
/* Runtime tracepoint */
void tracepoint_of(const struct tracepoint* __tracepoint)
{
    if (caa_unlikely(__tracepoint->state))
        __tracepoint_cb(__tracepoint);
}