int main(int argc, char *argv[])
{
	int i;
	struct memblock *p[2 * TARGET_POOL_SIZE] = { 0 };
	long long nc;
	long long nf;
	int nkids = 1;
	int runlength = 1;
	int totbefore;

	smp_init();
	initpools();

	if (argc > 1) {
		nkids = strtoul(argv[1], NULL, 0);
		if (nkids > NR_THREADS) {
			fprintf(stderr, "nkids = %d too large, max = %d\n",
				nkids, NR_THREADS);
			usage(argv[0]);
		}
	}
	if (argc > 2) {
		runlength = strtoul(argv[2], NULL, 0);
		if (runlength > MAX_RUN) {
			fprintf(stderr, "nkids = %d too large, max = %d\n",
				runlength, MAX_RUN);
			usage(argv[0]);
		}
	}
	printf("%d %d ", nkids, runlength);

	init_per_thread(results, 0L);
	init_per_thread(failures, 0L);
	totbefore = memblocks_available();

	goflag = 1;
	for (i = 0; i < nkids; i++)
		create_thread(memblock_test, (void *)(long)runlength);

	sleep(1);
	goflag = 0;

	wait_all_threads();
	nf = nc = 0;
	for (i = 0; i < NR_THREADS; i++) {
		nc += per_thread(results, i);
		nf += per_thread(failures, i);
	}
	printf("a/f: %Ld  fail: %Ld\n", nc, nf);
	if (memblocks_available() != totbefore) {
		printf("memblocks: before: %d after: %d\n",
		       totbefore, memblocks_available());
	}

	exit(0);
}
void synchronize_rcu(void)
{
	int t;
	long oldval;

	/* Memory barrier ensures mutation seen before grace period. */

	smp_mb();

	/*
	 * Atomically advance to a new grace-period number, enforce ordering.
	 * Failure is OK -- someone will have advanced the grace-period
	 * number for use.
	 */

	oldval = rcu_gp_ctr;
	(void)cmpxchg(&rcu_gp_ctr, oldval, oldval - 1);
	smp_mb();

	/*
	 * Wait until all threads are either out of their RCU read-side
	 * critical sections or are aware of the new grace period.
	 */

	for_each_thread(t) {
		while (per_thread(rcu_reader_gp, t) <= -oldval) {
			/*@@@ poll(NULL, 0, 10); */
			barrier();
		}
	}

	/* Ensure that any subsequent free()s happen -after- above checks. */

	smp_mb();
}
Esempio n. 3
0
void initpools(void)
{
	int i;
	int j;

	for (i = 0; i < NR_THREADS; i++) {
		per_thread(perthreadmem, i).cur = -1;
		for (j = 0; j < 2 * TARGET_POOL_SIZE; j++) {
			per_thread(perthreadmem, i).pool[j] = NULL;
		}
	}
	spin_lock_init(&globalmem.mutex);
	globalmem.cur = -1;
	for (i = 0; i < GLOBAL_POOL_SIZE; i++) {
		memblock_free(&memblocks[i]);
	}
}
Esempio n. 4
0
unsigned long read_count(void)
{
	int t;
	unsigned long sum = 0;

	for_each_thread(t)
		sum += atomic_read(&per_thread(counter, t));
	return sum;
}
Esempio n. 5
0
int memblocks_available(void)
{
	int i;
	int sum = globalmem.cur + 1;

	for_each_thread(i)
		sum += per_thread(perthreadmem, i).cur + 1;
	return sum;
}
Esempio n. 6
0
void synchronize_rcu(void)
{
	int t;
	long rcu_gp_ctr;

	/* Memory barrier ensures mutation seen before grace period. */

	smp_mb();

	/* Only one synchronize_rcu() at a time. */

	spin_lock(&rcu_gp_lock);

	/*
	 * Pick up a new grace-period timestamp, clearing low-order bit,
	 * and enforcing ordering.
	 */

	rcu_gp_ctr = get_timestamp() & ~0x1;
	smp_mb();

	/*
	 * Wait until all threads are either out of their RCU read-side
	 * critical sections or are aware of the new grace period.
	 */

	for_each_thread(t) {
		while ((per_thread(rcu_reader_gp, t) & 0x1) &&
		       ((per_thread(rcu_reader_gp, t) - rcu_gp_ctr) < 0)) {
			/*@@@ poll(NULL, 0, 10); */
			barrier();
		}
	}

	/* Let other synchronize_rcu() instances move ahead. */

	spin_unlock(&rcu_gp_lock);

	/* Ensure that any subsequent free()s happen -after- above checks. */

	smp_mb();
}
	for_each_tid(t, tid) {
		p = per_thread(urcu_statep, t);
		if (p == NULL)
			continue;
		while (p->urcu_qs != URCU_QS_DONE) {
			poll(NULL, 0, 1);
			if (p->urcu_qs == URCU_QS_REQ)
				pthread_kill(tid, SIGUSR1);
		}
		p->urcu_qs = URCU_QS_IDLE;
	}
void perftestrun(int nthreads, int nreaders, int nwriters)
{
	int t;
	int duration = 240;
	long long n_reads = 0LL;
	long long n_read_retries = 0LL;
	long long n_read_errs = 0LL;
	long long n_writes = 0LL;

	smp_mb();
	while (atomic_read(&nthreadsrunning) < nthreads)
		poll(NULL, 0, 1);
	goflag = GOFLAG_RUN;
	smp_mb();
	poll(NULL, 0, duration);
	smp_mb();
	goflag = GOFLAG_STOP;
	smp_mb();
	wait_all_threads();
	for_each_thread(t) {
		n_reads += per_thread(n_reads_pt, t);
		n_read_retries += per_thread(n_read_retries_pt, t);
		n_read_errs += per_thread(n_read_errs_pt, t);
		n_writes += per_thread(n_writes_pt, t);
	}
	if (n_read_errs != 0)
		printf("!!! read-side errors detected: %lld\n", n_read_errs);
	printf("n_reads: %lld n_read_retries: %lld n_writes: %lld nreaders: %d  nwriters: %d n_elems: %d duration: %d\n",
	       n_reads, n_read_retries, n_writes, nreaders, nwriters, n_elems, duration);
	printf("ns/read: %g  ns/write: %g\n",
	       ((duration * 1000*1000.*(double)nreaders) /
	        (double)n_reads),
	       ((duration * 1000*1000.*(double)nwriters) /
	        (double)n_writes));
	exit(0);
}
Esempio n. 9
0
void *eventual(void *arg)
{
	int t;
	int sum;

	while (stopflag < 3) {
		sum = 0;
		for_each_thread(t)
			sum += ACCESS_ONCE(per_thread(counter, t));
		ACCESS_ONCE(global_count) = sum;
		poll(NULL, 0, 1);
		if (stopflag) {
			smp_mb();
			stopflag++;
		}
	}
	return NULL;
}
void synchronize_rcu(void)
{
	int t;

	/* Memory barrier ensures mutation seen before grace period. */

	smp_mb();

	/* Only one synchronize_rcu() at a time. */

	spin_lock(&rcu_gp_lock);

	/* Advance to a new grace-period number, enforce ordering. */

	rcu_gp_ctr += RCU_GP_CTR_BOTTOM_BIT;
	smp_mb();

	/*
	 * Wait until all threads are either out of their RCU read-side
	 * critical sections or are aware of the new grace period.
	 */

	for_each_thread(t) {
		while (rcu_gp_ongoing(t) &&
		       ((per_thread(rcu_reader_gp, t) - rcu_gp_ctr) < 0)) {
			/*@@@ poll(NULL, 0, 10); */
			barrier();
		}
	}

	/* Let other synchronize_rcu() instances move ahead. */

	spin_unlock(&rcu_gp_lock);

	/* Ensure that any subsequent free()s happen -after- above checks. */

	smp_mb();
}
void synchronize_rcu(void)
{
	struct urcu_state *p;
	int t;
	thread_id_t tid;

	/* Memory barrier ensures mutation seen before grace period. */

	smp_mb();

	/* Only one synchronize_rcu() at a time. */

	spin_lock(&rcu_gp_lock);

	/* Request a quiescent state from each thread. */

	for_each_tid(t, tid) {
		p = per_thread(urcu_statep, t);
		if (p != NULL) {
			p->urcu_qs = URCU_QS_REQ;
			pthread_kill(tid, SIGUSR1);
		}
	}
Esempio n. 12
0
int main(int argc, char *argv[])
{
	int done;
	int i;
	int iter;

	smp_init();

	if (argc > 1) {
		nkids = strtoul(argv[1], NULL, 0);
		if (nkids > NR_THREADS) {
			fprintf(stderr, "nkids = %d too large, max = %d\n",
				nkids, NR_THREADS);
			usage(argv[0]);
		}
	}
	printf("Number of threads: %d\n", nkids);

	spin_lock_init(&mutex);
	goflag = GOFLAG_INIT;
	for (i = 0; i < nkids; i++)
		create_thread(init_test, NULL);

	for (iter = 0; iter < 100; iter++) {
		spin_lock(&mutex);
		for_each_thread(i) {
			per_thread(doneflag, i) = 0;
		}
		__get_thread_var(doneflag) = 1;
		atomic_set(&counter, 0);
		atomic_set(&start_count, 0);
		initialized = 0;
		spin_unlock(&mutex);
		spin_lock(&mutex);
		goflag = GOFLAG_START;
		spin_unlock(&mutex);
		poll(NULL, 0, 1);
		done = 0;
		while (!done) {
			done = 1;
			for (i = 0; i < nkids; i++)
				if (!per_thread(doneflag, i)) {
					done = 0;
					break;
				}
			poll(NULL, 0, 1);
		}
		if (atomic_read(&counter) != 1) {
			printf("Double initialization, counter = %d\n",
			       atomic_read(&counter));
			exit(-1);
		} else {
			printf("Iteration %d succeeded\n", iter);
		}
		spin_lock(&mutex);
		atomic_set(&counter, 0);
		spin_unlock(&mutex);
		spin_lock(&mutex);
		goflag = GOFLAG_INIT;
		while (atomic_read(&counter) < nkids)
			poll(NULL, 0, 1);
		spin_unlock(&mutex);
		spin_lock(&mutex);
		atomic_set(&counter, 0);
		spin_unlock(&mutex);
	}

	goflag = GOFLAG_STOP;

	wait_all_threads();

	exit(0);
}