예제 #1
0
파일: act.c 프로젝트: aanguss/act
int main(int argc, char* argv[]) {
	signal(SIGSEGV, as_sig_handle_segv);
	signal(SIGTERM , as_sig_handle_term);

	fprintf(stdout, "\nAerospike act - device IO test\n");
	fprintf(stdout, "Copyright 2011 by Aerospike. All rights reserved.\n\n");

	if (! configure(argc, argv)) {
		exit(-1);
	}

	set_schedulers();
	srand(time(NULL));
//	rand_seed(g_rand_64_buffer);

	salter salters[g_num_write_buffers ? g_num_write_buffers : 1];

	g_salters = salters;

	if (! create_salters()) {
		exit(-1);
	}

	device devices[g_num_devices];
	readq readqs[g_num_queues];

	g_devices = devices;
	g_readqs = readqs;

	// TODO - 'salt' drive?

	g_p_large_block_read_histogram = histogram_create();
	g_p_large_block_write_histogram = histogram_create();
	g_p_raw_read_histogram = histogram_create();
	g_p_read_histogram = histogram_create();

	g_run_start_us = cf_getus();

	uint64_t run_stop_us = g_run_start_us + g_run_us;

	g_running = 1;

	for (int n = 0; n < g_num_devices; n++) {
		device* p_device = &g_devices[n];

		p_device->name = g_device_names[n];
		p_device->p_fd_queue = cf_queue_create(sizeof(int), true);
		discover_num_blocks(p_device);
		create_large_block_read_buffer(p_device);
		p_device->p_raw_read_histogram = histogram_create();
		sprintf(p_device->histogram_tag, "%-18s", p_device->name);

		if (pthread_create(&p_device->large_block_read_thread, NULL,
				run_large_block_reads, (void*)p_device)) {
			fprintf(stdout, "ERROR: create large block read thread %d\n", n);
			exit(-1);
		}

		if (pthread_create(&p_device->large_block_write_thread, NULL,
				run_large_block_writes, (void*)p_device)) {
			fprintf(stdout, "ERROR: create write thread %d\n", n);
			exit(-1);
		}
	}

	for (int i = 0; i < g_num_queues; i++) {
		readq* p_readq = &g_readqs[i];

		p_readq->p_req_queue = cf_queue_create(sizeof(readreq*), true);
		p_readq->threads = malloc(sizeof(pthread_t) * g_threads_per_queue);

		for (int j = 0; j < g_threads_per_queue; j++) {
			if (pthread_create(&p_readq->threads[j], NULL, run_reads,
					(void*)p_readq->p_req_queue)) {
				fprintf(stdout, "ERROR: create read thread %d:%d\n", i, j);
				exit(-1);
			}
		}
	}

	pthread_t thr_add_readreqs;

	if (pthread_create(&thr_add_readreqs, NULL, run_add_readreqs, NULL)) {
		fprintf(stdout, "ERROR: create thread thr_add_readreqs\n");
		exit(-1);
	}

	fprintf(stdout, "\n");

	uint64_t now_us;
	uint64_t count = 0;

	while ((now_us = cf_getus()) < run_stop_us && g_running) {	
		count++;

		int sleep_us = (int)
			((count * g_report_interval_us) - (now_us - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}

		fprintf(stdout, "After %" PRIu64 " sec:\n",
			(count * g_report_interval_us) / 1000000);

		fprintf(stdout, "read-reqs queued: %" PRIu64 "\n",
			cf_atomic_int_get(g_read_reqs_queued));

		histogram_dump(g_p_large_block_read_histogram,  "LARGE BLOCK READS ");
		histogram_dump(g_p_large_block_write_histogram, "LARGE BLOCK WRITES");
		histogram_dump(g_p_raw_read_histogram,          "RAW READS         ");

		for (int d = 0; d < g_num_devices; d++) {			
			histogram_dump(g_devices[d].p_raw_read_histogram,
				g_devices[d].histogram_tag);	
		}

		histogram_dump(g_p_read_histogram,              "READS             ");
		fprintf(stdout, "\n");
		fflush(stdout);
	}

	g_running = 0;

	void* pv_value;

	pthread_join(thr_add_readreqs, &pv_value);

	for (int i = 0; i < g_num_queues; i++) {
		readq* p_readq = &g_readqs[i];

		for (int j = 0; j < g_threads_per_queue; j++) {
			pthread_join(p_readq->threads[j], &pv_value);
		}

		cf_queue_destroy(p_readq->p_req_queue);
		free(p_readq->threads);
	}

	for (int d = 0; d < g_num_devices; d++) {
		device* p_device = &g_devices[d];

		pthread_join(p_device->large_block_read_thread, &pv_value);
		pthread_join(p_device->large_block_write_thread, &pv_value);

		fd_close_all(p_device);
		cf_queue_destroy(p_device->p_fd_queue);
		free(p_device->p_large_block_read_buffer);
		free(p_device->p_raw_read_histogram);
	}

	free(g_p_large_block_read_histogram);
	free(g_p_large_block_write_histogram);
	free(g_p_raw_read_histogram);
	free(g_p_read_histogram);

	destroy_salters();

	return (0);
}
예제 #2
0
파일: act_storage.c 프로젝트: aerospike/act
int
main(int argc, char* argv[])
{
	signal_setup();

	fprintf(stdout, "\nACT version %s\n", VERSION);
	fprintf(stdout, "Storage device IO test\n");
	fprintf(stdout, "Copyright 2018 by Aerospike. All rights reserved.\n\n");

	if (! storage_configure(argc, argv)) {
		exit(-1);
	}

	device devices[g_scfg.num_devices];
	queue* trans_qs[g_scfg.num_queues];

	g_devices = devices;
	g_trans_qs = trans_qs;

	histogram_scale scale =
			g_scfg.us_histograms ? HIST_MICROSECONDS : HIST_MILLISECONDS;

	if (! (g_large_block_read_hist = histogram_create(scale)) ||
		! (g_large_block_write_hist = histogram_create(scale)) ||
		! (g_raw_read_hist = histogram_create(scale)) ||
		! (g_read_hist = histogram_create(scale)) ||
		! (g_raw_write_hist = histogram_create(scale)) ||
		! (g_write_hist = histogram_create(scale))) {
		exit(-1);
	}

	for (uint32_t n = 0; n < g_scfg.num_devices; n++) {
		device* dev = &g_devices[n];

		dev->name = (const char*)g_scfg.device_names[n];

		if (g_scfg.file_size == 0) { // normally 0
			set_scheduler(dev->name, g_scfg.scheduler_mode);
		}

		if (! (dev->fd_q = queue_create(sizeof(int), true)) ||
			! discover_device(dev) ||
			! (dev->raw_read_hist = histogram_create(scale)) ||
			! (dev->raw_write_hist = histogram_create(scale))) {
			exit(-1);
		}

		sprintf(dev->read_hist_tag, "%s-reads", dev->name);
		sprintf(dev->write_hist_tag, "%s-writes", dev->name);
	}

	rand_seed();

	g_run_start_us = get_us();

	uint64_t run_stop_us = g_run_start_us + g_scfg.run_us;

	g_running = true;

	if (g_scfg.write_reqs_per_sec != 0) {
		for (uint32_t n = 0; n < g_scfg.num_devices; n++) {
			device* dev = &g_devices[n];

			if (pthread_create(&dev->large_block_read_thread, NULL,
					run_large_block_reads, (void*)dev) != 0) {
				fprintf(stdout, "ERROR: create large op read thread\n");
				exit(-1);
			}

			if (pthread_create(&dev->large_block_write_thread, NULL,
					run_large_block_writes, (void*)dev) != 0) {
				fprintf(stdout, "ERROR: create large op write thread\n");
				exit(-1);
			}
		}
	}

	if (g_scfg.tomb_raider) {
		for (uint32_t n = 0; n < g_scfg.num_devices; n++) {
			device* dev = &g_devices[n];

			if (pthread_create(&dev->tomb_raider_thread, NULL,
					run_tomb_raider, (void*)dev) != 0) {
				fprintf(stdout, "ERROR: create tomb raider thread\n");
				exit(-1);
			}
		}
	}

	uint32_t n_trans_tids = g_scfg.num_queues * g_scfg.threads_per_queue;
	pthread_t trans_tids[n_trans_tids];

	for (uint32_t i = 0; i < g_scfg.num_queues; i++) {
		if (! (g_trans_qs[i] = queue_create(sizeof(trans_req), true))) {
			exit(-1);
		}

		for (uint32_t j = 0; j < g_scfg.threads_per_queue; j++) {
			if (pthread_create(&trans_tids[(i * g_scfg.threads_per_queue) + j],
					NULL, run_transactions, (void*)g_trans_qs[i]) != 0) {
				fprintf(stdout, "ERROR: create transaction thread\n");
				exit(-1);
			}
		}
	}

	// Equivalent: g_scfg.internal_read_reqs_per_sec != 0.
	bool do_reads = g_scfg.read_reqs_per_sec != 0;

	pthread_t read_req_tids[g_scfg.read_req_threads];

	if (do_reads) {
		for (uint32_t k = 0; k < g_scfg.read_req_threads; k++) {
			if (pthread_create(&read_req_tids[k], NULL, run_generate_read_reqs,
					NULL) != 0) {
				fprintf(stdout, "ERROR: create read request thread\n");
				exit(-1);
			}
		}
	}

	// Equivalent: g_scfg.internal_write_reqs_per_sec != 0.
	bool do_commits = g_scfg.commit_to_device && g_scfg.write_reqs_per_sec != 0;

	pthread_t write_req_tids[g_scfg.write_req_threads];

	if (do_commits) {
		for (uint32_t k = 0; k < g_scfg.write_req_threads; k++) {
			if (pthread_create(&write_req_tids[k], NULL,
					run_generate_write_reqs, NULL) != 0) {
				fprintf(stdout, "ERROR: create write request thread\n");
				exit(-1);
			}
		}
	}

	fprintf(stdout, "\nHISTOGRAM NAMES\n");

	if (do_reads) {
		fprintf(stdout, "reads\n");
		fprintf(stdout, "device-reads\n");

		for (uint32_t d = 0; d < g_scfg.num_devices; d++) {
			fprintf(stdout, "%s\n", g_devices[d].read_hist_tag);
		}
	}

	if (g_scfg.write_reqs_per_sec != 0) {
		fprintf(stdout, "large-block-reads\n");
		fprintf(stdout, "large-block-writes\n");
	}

	if (do_commits) {
		fprintf(stdout, "writes\n");
		fprintf(stdout, "device-writes\n");

		for (uint32_t d = 0; d < g_scfg.num_devices; d++) {
			fprintf(stdout, "%s\n", g_devices[d].write_hist_tag);
		}
	}

	fprintf(stdout, "\n");

	uint64_t now_us = 0;
	uint64_t count = 0;

	while (g_running && (now_us = get_us()) < run_stop_us) {
		count++;

		int64_t sleep_us = (int64_t)
				((count * g_scfg.report_interval_us) -
						(now_us - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}

		fprintf(stdout, "after %" PRIu64 " sec:\n",
				(count * g_scfg.report_interval_us) / 1000000);

		fprintf(stdout, "requests-queued: %" PRIu32 "\n",
				atomic32_get(g_reqs_queued));

		if (do_reads) {
			histogram_dump(g_read_hist, "reads");
			histogram_dump(g_raw_read_hist, "device-reads");

			for (uint32_t d = 0; d < g_scfg.num_devices; d++) {
				histogram_dump(g_devices[d].raw_read_hist,
						g_devices[d].read_hist_tag);
			}
		}

		if (g_scfg.write_reqs_per_sec != 0) {
			histogram_dump(g_large_block_read_hist, "large-block-reads");
			histogram_dump(g_large_block_write_hist, "large-block-writes");
		}

		if (do_commits) {
			histogram_dump(g_write_hist, "writes");
			histogram_dump(g_raw_write_hist, "device-writes");

			for (uint32_t d = 0; d < g_scfg.num_devices; d++) {
				histogram_dump(g_devices[d].raw_write_hist,
						g_devices[d].write_hist_tag);
			}
		}

		fprintf(stdout, "\n");
		fflush(stdout);
	}

	g_running = false;

	if (do_reads) {
		for (uint32_t k = 0; k < g_scfg.read_req_threads; k++) {
			pthread_join(read_req_tids[k], NULL);
		}
	}

	if (do_commits) {
		for (uint32_t k = 0; k < g_scfg.write_req_threads; k++) {
			pthread_join(write_req_tids[k], NULL);
		}
	}

	for (uint32_t j = 0; j < n_trans_tids; j++) {
		pthread_join(trans_tids[j], NULL);
	}

	for (uint32_t i = 0; i < g_scfg.num_queues; i++) {
		queue_destroy(g_trans_qs[i]);
	}

	for (uint32_t d = 0; d < g_scfg.num_devices; d++) {
		device* dev = &g_devices[d];

		if (g_scfg.tomb_raider) {
			pthread_join(dev->tomb_raider_thread, NULL);
		}

		if (g_scfg.write_reqs_per_sec != 0) {
			pthread_join(dev->large_block_read_thread, NULL);
			pthread_join(dev->large_block_write_thread, NULL);
		}

		fd_close_all(dev);
		queue_destroy(dev->fd_q);
		free(dev->raw_read_hist);
		free(dev->raw_write_hist);
	}

	free(g_large_block_read_hist);
	free(g_large_block_write_hist);
	free(g_raw_read_hist);
	free(g_read_hist);
	free(g_raw_write_hist);
	free(g_write_hist);

	return 0;
}
예제 #3
0
파일: read.c 프로젝트: jlgale/dogfs2
static void
read_dump_stats(void)
{
    log("Read sizes:");
    histogram_dump(&reads);
}
예제 #4
0
파일: aio.c 프로젝트: sameerapadhye/act
int main(int argc, char* argv[]) {
	signal(SIGSEGV, as_sig_handle_segv);
	signal(SIGTERM, as_sig_handle_term);
	

	fprintf(stdout, "\nAerospike act - device IO test\n");
	fprintf(stdout, "Copyright 2011 by Aerospike. All rights reserved.\n\n");

	if (! configure(argc, argv)) {
		exit(-1);
	}

	set_schedulers();
	srand(time(NULL));
	//	rand_seed(g_rand_64_buffer);

	salter salters[g_num_write_buffers ? g_num_write_buffers : 1];

	g_salters = salters;

	if (! create_salters()) {
		exit(-1);
	}

	device devices[g_num_devices];
	g_devices = devices;

	g_p_large_block_read_histogram = histogram_create();
	g_p_large_block_write_histogram = histogram_create();
	g_p_raw_read_histogram = histogram_create();
	g_p_read_histogram = histogram_create();

	g_run_start_ms = cf_getms();

	uint64_t run_stop_ms = g_run_start_ms + g_run_ms;

	g_running = 1;
	int n;
	for (n = 0; n < g_num_devices; n++) 
	{
		device* p_device = &g_devices[n];
		p_device->name = g_device_names[n];
		p_device->p_fd_queue = cf_queue_create(sizeof(int), true);
		discover_num_blocks(p_device);
		create_large_block_read_buffer(p_device);
		p_device->p_raw_read_histogram = histogram_create();
		sprintf(p_device->histogram_tag, "%-18s", p_device->name);

		if (pthread_create(&p_device->large_block_read_thread, NULL,
					run_large_block_reads, (void*)p_device)) 
		{
			fprintf(stdout, "Error: create large block read thread %d\n", n);
			exit(-1);
		}

		if (pthread_create(&p_device->large_block_write_thread, NULL,
					run_large_block_writes, (void*)p_device)) 
		{
			fprintf(stdout, "Error: create write thread %d\n", n);
			exit(-1);
		}

	}

	aio_context_t aio_context = 0;
	if(io_setup(MAXEVENTS, &aio_context) != 0)
	{
		fprintf(stdout, "Error: AIO context not set up \n");
		exit(-1);
	}
	create_async_info_queue();

	/* read events generating thread */
	pthread_t read_generator;
	if (pthread_create(&read_generator, NULL, &generate_async_reads, (void*)&aio_context)) 
	{
		fprintf(stdout, "Error: create read generator thread\n");
		exit(-1);
	}
	
	/* Create the worker threads */
	pthread_t workers[g_worker_threads];
	int j;
	for (j = 0; j < g_worker_threads; j++) 
	{ 
		if (pthread_create(&workers[j], NULL, &worker_func , (void *)(&aio_context))) 
		{
			fprintf(stdout, "Error: creating worker thread %d failed\n", j);
			exit(-1);
		}	
	}
 
	fprintf(stdout, "\n");
	uint64_t now_ms;
	uint64_t time_count = 0;
	int nanosleep_ret = -1;
	struct timespec initial,remaining;
	while ((now_ms = cf_getms()) < run_stop_ms && g_running) 
	{	
		time_count++;
		int sleep_ms = (int)
			((time_count * g_report_interval_ms) - (now_ms - g_run_start_ms));
		if (sleep_ms > 0) 
		{
			initial.tv_sec = sleep_ms / 1000;
			initial.tv_nsec = (sleep_ms % 1000) * 1000000;
		retry:
			memset(&remaining, 0, sizeof(remaining));
			nanosleep_ret = nanosleep(&initial, &remaining);
			if(nanosleep_ret == -1 && errno == EINTR)
			{
				/* Interrupted by a signal */
				initial.tv_sec = remaining.tv_sec;
				initial.tv_nsec = remaining.tv_nsec;	
				goto retry;	
			}
		}

		fprintf(stdout, "After %" PRIu64 " sec:\n",
				(time_count * g_report_interval_ms) / 1000);

		fprintf(stdout, "read-reqs queued: %" PRIu64 "\n",
				cf_atomic_int_get(g_read_reqs_queued));

		histogram_dump(g_p_large_block_read_histogram,  "LARGE BLOCK READS ");
		histogram_dump(g_p_large_block_write_histogram, "LARGE BLOCK WRITES");
		histogram_dump(g_p_raw_read_histogram,          "RAW READS         ");
		int d;
		for (d = 0; d < g_num_devices; d++) {			
			histogram_dump(g_devices[d].p_raw_read_histogram,
					g_devices[d].histogram_tag);	
		}

		histogram_dump(g_p_read_histogram,              "READS             ");

		fprintf(stdout, "\n");
		fflush(stdout);
	}
	fprintf(stdout, "\nTEST COMPLETED \n");
	g_running = 0;
	int i;
//TODO aio_destroy?

	/* Freeing resources used by async */
	void* ret_value;
	for (i = 0; i < g_worker_threads; i++) 
	{
		pthread_join(workers[i], &ret_value);	
	}
	destroy_async_info_queue();

	int d;
	for (d = 0; d < g_num_devices; d++) {
		device* p_device = &g_devices[d];

		pthread_join(p_device->large_block_read_thread, &ret_value);
		pthread_join(p_device->large_block_write_thread, &ret_value);

		fd_close_all(p_device);
		cf_queue_destroy(p_device->p_fd_queue);
		free(p_device->p_large_block_read_buffer);
		free(p_device->p_raw_read_histogram);
	}

	free(g_p_large_block_read_histogram);
	free(g_p_large_block_write_histogram);
	free(g_p_raw_read_histogram);
	free(g_p_read_histogram);

	destroy_salters();

	return (0);
}