void cf_queue_priority_destroy(cf_queue_priority *q)
{
    cf_queue_destroy(q->high_q);
    cf_queue_destroy(q->medium_q);
    cf_queue_destroy(q->low_q);
    if (q->threadsafe) {
        pthread_mutex_destroy(&q->LOCK);
        pthread_cond_destroy(&q->CV);
    }
    cf_free(q);
}
Exemplo n.º 2
0
void cf_queue_priority_destroy(cf_queue_priority *q) {
	cf_queue_destroy(q->high_q);
	cf_queue_destroy(q->medium_q);
	cf_queue_destroy(q->low_q);
	if (q->threadsafe) {
#ifdef EXTERNAL_LOCKS
		cf_hooked_mutex_free(q->LOCK);
#else
		pthread_mutex_destroy(&q->LOCK);
		pthread_cond_destroy(&q->CV);
#endif // EXTERNAL_LOCKS
	}
	free(q);
}
cf_queue_priority *cf_queue_priority_create(size_t element_sz, bool threadsafe)
{
	cf_queue_priority *q = (cf_queue_priority*)cf_malloc(sizeof(cf_queue_priority));

	if (! q) {
		return NULL;
	}

	q->threadsafe = threadsafe;

	if (! (q->low_q = cf_queue_create(element_sz, false))) {
		goto Fail1;
	}

	if (! (q->medium_q = cf_queue_create(element_sz, false))) {
		goto Fail2;
	}

	if (! (q->high_q = cf_queue_create(element_sz, false))) {
		goto Fail3;
	}

	if (! threadsafe) {
		return q;
	}

	if (0 != pthread_mutex_init(&q->LOCK, NULL)) {
		goto Fail4;
	}

	if (0 != pthread_cond_init(&q->CV, NULL)) {
		goto Fail5;
	}

	return q;

Fail5:
	pthread_mutex_destroy(&q->LOCK);
Fail4:
	cf_queue_destroy(q->high_q);
Fail3:
	cf_queue_destroy(q->medium_q);
Fail2:
	cf_queue_destroy(q->low_q);
Fail1:
	cf_free(q);

	return NULL;
}
Exemplo n.º 4
0
void
cl_cluster_scan_shutdown(cl_cluster* asc)
{
	// Check whether we ever (lazily) initialized scan machinery.
	if (cf_atomic32_get(asc->scan_initialized) == 0 && ! asc->scan_q) {
		return;
	}

	// This tells the worker threads to stop. We do this (instead of using a
	// "running" flag) to allow the workers to "wait forever" on processing the
	// work dispatch queue, which has minimum impact when the queue is empty.
	// This also means all queued requests get processed when shutting down.
	for (int i = 0; i < NUM_SCAN_THREADS; i++) {
		cl_scan_task task;
		task.asc = NULL;
		cf_queue_push(asc->scan_q, &task);
	}

	for (int i = 0; i < NUM_SCAN_THREADS; i++) {
		pthread_join(asc->scan_threads[i], NULL);
	}

	cf_queue_destroy(asc->scan_q);
	asc->scan_q = NULL;
	cf_atomic32_set(&asc->scan_initialized, 0);
}
Exemplo n.º 5
0
//
// Close async worker threads gracefully.
//
void
citrusleaf_async_shutdown()
{
	if (g_cl_async_q == 0)
		return;

	/*
	 * If a process is forked, the threads in it do not get spawned in the child process.
	 * In citrusleaf_init(), we are remembering the process id(g_init_pid) of the process who spawned the
	 * background threads. If the current process is not the process who spawned the background threads
	 * then it cannot call pthread_join() on the threads which does not exist in this process.
	 */
	if(g_init_pid == getpid()) {
		// Send shutdown message to each worker thread.
		cl_async_work *workitem = malloc(sizeof(cl_async_work));
		memset(workitem, 0, sizeof(cl_async_work));
		workitem->fd = -1;

		uint i;

		for (i = 0; i < g_async_num_threads; i++) {
			cf_queue_push(g_cl_async_q, &workitem);
		}

		for (i = 0; i < g_async_num_threads; i++) {
			pthread_join(g_async_reciever[i], NULL);
		}

		free(workitem);
		cf_queue_destroy(g_cl_async_q);
		g_cl_async_q = 0;
	}
}
Exemplo n.º 6
0
cf_queue_priority * cf_queue_priority_create(size_t elementsz, bool threadsafe) {
	cf_queue_priority *q = (cf_queue_priority*)malloc(sizeof(cf_queue_priority));
	if (!q)	return(0);
	
	q->threadsafe = threadsafe;
	q->low_q = cf_queue_create(elementsz, false);
	if (!q->low_q) 		goto Fail1;
	q->medium_q = cf_queue_create(elementsz, false);
	if (!q->medium_q)	goto Fail2;
	q->high_q = cf_queue_create(elementsz, false);
	if (!q->high_q)		goto Fail3;
	
	if (threadsafe == false)
		return(q);
#ifdef EXTERNAL_LOCKS
	q->LOCK = cf_hooked_mutex_alloc();
	if (!q->LOCK ) goto Fail5;
#else	
	if (0 != pthread_mutex_init(&q->LOCK, NULL))
		goto Fail4;

	if (0 != pthread_cond_init(&q->CV, NULL))
		goto Fail5;
#endif // EXTERNAL_LOCKS
	
	return(q);
	
Fail5:	
#ifdef EXTERNAL_LOCKS
	cf_hooked_mutex_free(q->LOCK);
#else
	pthread_mutex_destroy(&q->LOCK);
Fail4:
#endif // EXTERNAL_LOCKS
	cf_queue_destroy(q->high_q);
Fail3:	
	cf_queue_destroy(q->medium_q);
Fail2:	
	cf_queue_destroy(q->low_q);
Fail1:	
	free(q);
	return(0);
}
Exemplo n.º 7
0
cf_queue_priority *
cf_queue_priority_create(size_t elementsz, bool threadsafe)
{
    cf_queue_priority *q = malloc(sizeof(cf_queue_priority));
    if (!q)	return(0);

    q->threadsafe = threadsafe;
    q->low_q = cf_queue_create(elementsz, false);
    if (!q->low_q) 		goto Fail1;
    q->medium_q = cf_queue_create(elementsz, false);
    if (!q->medium_q)	goto Fail2;
    q->high_q = cf_queue_create(elementsz, false);
    if (!q->high_q)		goto Fail3;

    if (threadsafe == false)
        return(q);

    if (0 != pthread_mutex_init(&q->LOCK, NULL))
        goto Fail4;

    if (0 != pthread_cond_init(&q->CV, NULL))
        goto Fail5;

    return(q);

Fail5:
    pthread_mutex_destroy(&q->LOCK);
Fail4:
    cf_queue_destroy(q->high_q);
Fail3:
    cf_queue_destroy(q->medium_q);
Fail2:
    cf_queue_destroy(q->low_q);
Fail1:
    free(q);
    return(0);
}
Exemplo n.º 8
0
void
as_node_destroy(as_node* node)
{
	// Drain out the queue and close the FDs
	int rv;
	do {
		int	fd;
		rv = cf_queue_pop(node->conn_q, &fd, CF_QUEUE_NOWAIT);
		if (rv == CF_QUEUE_OK)
			cf_close(fd);
	} while (rv == CF_QUEUE_OK);
	
	/*
	 do {
	 int	fd;
	 rv = cf_queue_pop(node->conn_q_asyncfd, &fd, CF_QUEUE_NOWAIT);
	 if (rv == CF_QUEUE_OK)
	 cf_close(fd);
	 } while (rv == CF_QUEUE_OK);
	 */
	
	/*
	 do {
	 //When we reach this point, ideally there should not be any workitems.
	 cl_async_work *aw;
	 rv = cf_queue_pop(node->asyncwork_q, &aw, CF_QUEUE_NOWAIT);
	 if (rv == CF_QUEUE_OK) {
	 free(aw);
	 }
	 } while (rv == CF_QUEUE_OK);
	 
	 //We want to delete all the workitems of this node
	 if (g_cl_async_hashtab) {
	 shash_reduce_delete(g_cl_async_hashtab, cl_del_node_asyncworkitems, node);
	 }
	 */
	
	as_vector_destroy(&node->addresses);
	cf_queue_destroy(node->conn_q);
	//cf_queue_destroy(node->conn_q_asyncfd);
	//cf_queue_destroy(node->asyncwork_q);
	
	if (node->info_fd >= 0) {
		cf_close(node->info_fd);
	}

	cf_free(node);
}
Exemplo n.º 9
0
static void create_async_info_queue()
{
	int i;
	uintptr_t info;
	as_async_info_t *temp_info;
	async_info_queue = cf_queue_create(sizeof(uintptr_t), true);

	async_info_array = (as_async_info_t*)malloc(MAX_READ_REQS_QUEUED * sizeof(as_async_info_t));
	if(async_info_array == NULL)
	{
		fprintf(stdout, "Error: Malloc info structs failed.\n Exiting. \n");
		cf_queue_destroy(async_info_queue);
		exit(-1);
	}
	for(i = 0; i < MAX_READ_REQS_QUEUED; i++)
	{
		temp_info = async_info_array + i;
		info = (uintptr_t)temp_info;
		cf_queue_push(async_info_queue, (void*)&info);
	}
}
Exemplo n.º 10
0
int
cf_queue_test_1()
{
    pthread_t 	write_th;
    pthread_t     read_th;
    cf_queue    *q;

    q = cf_queue_create(sizeof(int), true);

    pthread_create( & write_th, 0, cf_queue_test_1_write, q);

    pthread_create( & read_th, 0, cf_queue_test_1_read, q);

    void *th_return;

    if (0 != pthread_join(write_th, &th_return)) {
        fprintf(stderr, "queue test 1: could not join1 %d\n",errno);
        return(-1);
    }

    if (0 != th_return) {
        fprintf(stderr, "queue test 1: returned error %p\n",th_return);
        return(-1);
    }

    if (0 != pthread_join(read_th, &th_return)) {
        fprintf(stderr, "queue test 1: could not join2 %d\n",errno);
        return(-1);
    }

    if (0 != th_return) {
        fprintf(stderr, "queue test 1: returned error 2 %p\n",th_return);
        return(-1);
    }

    cf_queue_destroy(q);

    return(0);
}
Exemplo n.º 11
0
void cl_scan_destroy(cl_scan *scan) {

    if ( scan == NULL ) return;

    cl_scan_udf_destroy(&scan->udf);
    if (scan->ns)      free(scan->ns);
    if (scan->setname) free(scan->setname);

    if ( scan->res_streamq ) {
        as_val *val = NULL;
        while (CF_QUEUE_OK == cf_queue_pop (scan->res_streamq, 
                    &val, CF_QUEUE_NOWAIT)) {
            as_val_destroy(val);
            val = NULL;
        }

        cf_queue_destroy(scan->res_streamq);
        scan->res_streamq = NULL;
    }

    free(scan);
    scan = NULL;
}
Exemplo n.º 12
0
cf_vector * cl_scan_execute(cl_cluster * cluster, const cl_scan * scan, char * node_name, cl_rv * res, int (* callback)(as_val *, void *), void * udata) {

    cl_rv           rc                          = CITRUSLEAF_OK;
    uint8_t         wr_stack_buf[STACK_BUF_SZ]  = { 0 };
    uint8_t *       wr_buf                      = wr_stack_buf;
    size_t          wr_buf_sz                   = sizeof(wr_stack_buf);
    int             node_count                  = 0;
    cl_node_response  response;
    rc = scan_compile(scan, &wr_buf, &wr_buf_sz);

    if ( rc != CITRUSLEAF_OK ) {
        LOG("[ERROR] cl_scan_execute: scan compile failed: \n");
        *res = rc;
        return NULL;
    }

    // Setup worker
    cl_scan_task task = {
        .asc                = cluster,
        .ns                 = scan->ns,
        .scan_buf          = wr_buf,
        .scan_sz           = wr_buf_sz,
        .udata              = udata,
        .callback           = callback,
        .job_id                = scan->job_id,
        .type                = scan->udf.type,
    };

    task.complete_q      = cf_queue_create(sizeof(cl_node_response), true);
    cf_vector * result_v = NULL;

    // If node_name is not null, we are executing scan on a particular node
    if (node_name) {
        // Copy the node name in the task and push it in the global scan queue. One task for each node
        strcpy(task.node_name, node_name);
        cf_queue_push(cluster->scan_q, &task);
        node_count = 1;
    }
    else {
        // Node name is NULL, we have to scan all nodes 
        char *node_names    = NULL;    

        // Get a list of the node names, so we can can send work to each node
        cl_cluster_get_node_names(cluster, &node_count, &node_names);
        if ( node_count == 0 ) {
            LOG("[ERROR] cl_scan_execute: don't have any nodes?\n");
            *res = CITRUSLEAF_FAIL_CLIENT;
            goto Cleanup;
        }

        // Dispatch work to the worker queue to allow the transactions in parallel
        // NOTE: if a new node is introduced in the middle, it is NOT taken care of
        node_name = node_names;
        for ( int i=0; i < node_count; i++ ) {
            // fill in per-request specifics
            strcpy(task.node_name, node_name);
            cf_queue_push(cluster->scan_q, &task);
            node_name += NODE_NAME_SIZE;                    
        }
        free(node_names);
        node_names = NULL;
    }

    // Wait for the work to complete from all the nodes.
    // For every node, fill in the return value in the result vector
    result_v = cf_vector_create(sizeof(cl_node_response), node_count, 0);
    for ( int i=0; i < node_count; i++ ) {
        // Pop the response structure
        cf_queue_pop(task.complete_q, &response, CF_QUEUE_FOREVER);
        cf_vector_append(result_v, &response);
    }

Cleanup:
    if ( wr_buf && (wr_buf != wr_stack_buf) ) { 
        free(wr_buf); 
        wr_buf = 0;
    }
    cf_queue_destroy(task.complete_q);

    return result_v;
}

/**
 * Allocates and initializes a new cl_scan.
 */
cl_scan * cl_scan_new(const char * ns, const char * setname, uint64_t *job_id) {
    cl_scan * scan = (cl_scan*) malloc(sizeof(cl_scan));
    memset(scan, 0, sizeof(cl_scan));
    return cl_scan_init(scan, ns, setname, job_id);
}
Exemplo n.º 13
0
Arquivo: act.c Projeto: aanguss/act
int main(int argc, char* argv[]) {
	signal(SIGSEGV, as_sig_handle_segv);
	signal(SIGTERM , as_sig_handle_term);

	fprintf(stdout, "\nAerospike act - device IO test\n");
	fprintf(stdout, "Copyright 2011 by Aerospike. All rights reserved.\n\n");

	if (! configure(argc, argv)) {
		exit(-1);
	}

	set_schedulers();
	srand(time(NULL));
//	rand_seed(g_rand_64_buffer);

	salter salters[g_num_write_buffers ? g_num_write_buffers : 1];

	g_salters = salters;

	if (! create_salters()) {
		exit(-1);
	}

	device devices[g_num_devices];
	readq readqs[g_num_queues];

	g_devices = devices;
	g_readqs = readqs;

	// TODO - 'salt' drive?

	g_p_large_block_read_histogram = histogram_create();
	g_p_large_block_write_histogram = histogram_create();
	g_p_raw_read_histogram = histogram_create();
	g_p_read_histogram = histogram_create();

	g_run_start_us = cf_getus();

	uint64_t run_stop_us = g_run_start_us + g_run_us;

	g_running = 1;

	for (int n = 0; n < g_num_devices; n++) {
		device* p_device = &g_devices[n];

		p_device->name = g_device_names[n];
		p_device->p_fd_queue = cf_queue_create(sizeof(int), true);
		discover_num_blocks(p_device);
		create_large_block_read_buffer(p_device);
		p_device->p_raw_read_histogram = histogram_create();
		sprintf(p_device->histogram_tag, "%-18s", p_device->name);

		if (pthread_create(&p_device->large_block_read_thread, NULL,
				run_large_block_reads, (void*)p_device)) {
			fprintf(stdout, "ERROR: create large block read thread %d\n", n);
			exit(-1);
		}

		if (pthread_create(&p_device->large_block_write_thread, NULL,
				run_large_block_writes, (void*)p_device)) {
			fprintf(stdout, "ERROR: create write thread %d\n", n);
			exit(-1);
		}
	}

	for (int i = 0; i < g_num_queues; i++) {
		readq* p_readq = &g_readqs[i];

		p_readq->p_req_queue = cf_queue_create(sizeof(readreq*), true);
		p_readq->threads = malloc(sizeof(pthread_t) * g_threads_per_queue);

		for (int j = 0; j < g_threads_per_queue; j++) {
			if (pthread_create(&p_readq->threads[j], NULL, run_reads,
					(void*)p_readq->p_req_queue)) {
				fprintf(stdout, "ERROR: create read thread %d:%d\n", i, j);
				exit(-1);
			}
		}
	}

	pthread_t thr_add_readreqs;

	if (pthread_create(&thr_add_readreqs, NULL, run_add_readreqs, NULL)) {
		fprintf(stdout, "ERROR: create thread thr_add_readreqs\n");
		exit(-1);
	}

	fprintf(stdout, "\n");

	uint64_t now_us;
	uint64_t count = 0;

	while ((now_us = cf_getus()) < run_stop_us && g_running) {	
		count++;

		int sleep_us = (int)
			((count * g_report_interval_us) - (now_us - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}

		fprintf(stdout, "After %" PRIu64 " sec:\n",
			(count * g_report_interval_us) / 1000000);

		fprintf(stdout, "read-reqs queued: %" PRIu64 "\n",
			cf_atomic_int_get(g_read_reqs_queued));

		histogram_dump(g_p_large_block_read_histogram,  "LARGE BLOCK READS ");
		histogram_dump(g_p_large_block_write_histogram, "LARGE BLOCK WRITES");
		histogram_dump(g_p_raw_read_histogram,          "RAW READS         ");

		for (int d = 0; d < g_num_devices; d++) {			
			histogram_dump(g_devices[d].p_raw_read_histogram,
				g_devices[d].histogram_tag);	
		}

		histogram_dump(g_p_read_histogram,              "READS             ");
		fprintf(stdout, "\n");
		fflush(stdout);
	}

	g_running = 0;

	void* pv_value;

	pthread_join(thr_add_readreqs, &pv_value);

	for (int i = 0; i < g_num_queues; i++) {
		readq* p_readq = &g_readqs[i];

		for (int j = 0; j < g_threads_per_queue; j++) {
			pthread_join(p_readq->threads[j], &pv_value);
		}

		cf_queue_destroy(p_readq->p_req_queue);
		free(p_readq->threads);
	}

	for (int d = 0; d < g_num_devices; d++) {
		device* p_device = &g_devices[d];

		pthread_join(p_device->large_block_read_thread, &pv_value);
		pthread_join(p_device->large_block_write_thread, &pv_value);

		fd_close_all(p_device);
		cf_queue_destroy(p_device->p_fd_queue);
		free(p_device->p_large_block_read_buffer);
		free(p_device->p_raw_read_histogram);
	}

	free(g_p_large_block_read_histogram);
	free(g_p_large_block_write_histogram);
	free(g_p_raw_read_histogram);
	free(g_p_read_histogram);

	destroy_salters();

	return (0);
}
static as_status
as_scan_generic(
	aerospike* as, as_error* err, const as_policy_scan* policy, const as_scan* scan,
	aerospike_scan_foreach_callback callback, void* udata, uint64_t* task_id_ptr)
{
	as_error_reset(err);
	
	if (! policy) {
		policy = &as->config.policies.scan;
	}
	
	as_cluster* cluster = as->cluster;
	as_nodes* nodes = as_nodes_reserve(cluster);
	uint32_t n_nodes = nodes->size;
	
	if (n_nodes == 0) {
		as_nodes_release(nodes);
		return as_error_set_message(err, AEROSPIKE_ERR_SERVER, "Scan command failed because cluster is empty.");
	}
	
	// Reserve each node in cluster.
	for (uint32_t i = 0; i < n_nodes; i++) {
		as_node_reserve(nodes->array[i]);
	}
	
	uint64_t task_id;
	if (task_id_ptr) {
		if (*task_id_ptr == 0) {
			*task_id_ptr = cf_get_rand64() / 2;
		}
		task_id = *task_id_ptr;
	}
	else {
		task_id = cf_get_rand64() / 2;
	}

	// Create scan command
	as_buffer argbuffer;
	uint16_t n_fields = 0;
	size_t size = as_scan_command_size(scan, &n_fields, &argbuffer);
	uint8_t* cmd = as_command_init(size);
	size = as_scan_command_init(cmd, policy, scan, task_id, n_fields, &argbuffer);
	
	// Initialize task.
	uint32_t error_mutex = 0;
	as_scan_task task;
	task.cluster = as->cluster;
	task.policy = policy;
	task.scan = scan;
	task.callback = callback;
	task.udata = udata;
	task.err = err;
	task.error_mutex = &error_mutex;
	task.task_id = task_id;
	task.cmd = cmd;
	task.cmd_size = size;
	
	as_status status = AEROSPIKE_OK;
	
	if (scan->concurrent) {
		uint32_t n_wait_nodes = n_nodes;
		task.complete_q = cf_queue_create(sizeof(as_scan_complete_task), true);

		// Run node scans in parallel.
		for (uint32_t i = 0; i < n_nodes; i++) {
			// Stack allocate task for each node.  It should be fine since the task
			// only needs to be valid within this function.
			as_scan_task* task_node = alloca(sizeof(as_scan_task));
			memcpy(task_node, &task, sizeof(as_scan_task));
			task_node->node = nodes->array[i];
			
			int rc = as_thread_pool_queue_task(&cluster->thread_pool, as_scan_worker, task_node);
			
			if (rc) {
				// Thread could not be added. Abort entire scan.
				if (ck_pr_fas_32(task.error_mutex, 1) == 0) {
					status = as_error_update(task.err, AEROSPIKE_ERR_CLIENT, "Failed to add scan thread: %d", rc);
				}
				
				// Reset node count to threads that were run.
				n_wait_nodes = i;
				break;
			}
		}

		// Wait for tasks to complete.
		for (uint32_t i = 0; i < n_wait_nodes; i++) {
			as_scan_complete_task complete;
			cf_queue_pop(task.complete_q, &complete, CF_QUEUE_FOREVER);
			
			if (complete.result != AEROSPIKE_OK && status == AEROSPIKE_OK) {
				status = complete.result;
			}
		}
		
		// Release temporary queue.
		cf_queue_destroy(task.complete_q);
	}
	else {
		task.complete_q = 0;
		
		// Run node scans in series.
		for (uint32_t i = 0; i < n_nodes && status == AEROSPIKE_OK; i++) {
			task.node = nodes->array[i];
			status = as_scan_command_execute(&task);
		}
	}
	
	// Release each node in cluster.
	for (uint32_t i = 0; i < n_nodes; i++) {
		as_node_release(nodes->array[i]);
	}
	
	// Release nodes array.
	as_nodes_release(nodes);

	// Free command memory.
	as_command_free(cmd, size);

	// If user aborts query, command is considered successful.
	if (status == AEROSPIKE_ERR_CLIENT_ABORT) {
		status = AEROSPIKE_OK;
	}

	// If completely successful, make the callback that signals completion.
	if (callback && status == AEROSPIKE_OK) {
		callback(NULL, udata);
	}
	return status;
}
Exemplo n.º 15
0
int main(int argc, char* argv[]) {
	signal(SIGSEGV, as_sig_handle_segv);
	signal(SIGTERM, as_sig_handle_term);
	

	fprintf(stdout, "\nAerospike act - device IO test\n");
	fprintf(stdout, "Copyright 2011 by Aerospike. All rights reserved.\n\n");

	if (! configure(argc, argv)) {
		exit(-1);
	}

	set_schedulers();
	srand(time(NULL));
	//	rand_seed(g_rand_64_buffer);

	salter salters[g_num_write_buffers ? g_num_write_buffers : 1];

	g_salters = salters;

	if (! create_salters()) {
		exit(-1);
	}

	device devices[g_num_devices];
	g_devices = devices;

	g_p_large_block_read_histogram = histogram_create();
	g_p_large_block_write_histogram = histogram_create();
	g_p_raw_read_histogram = histogram_create();
	g_p_read_histogram = histogram_create();

	g_run_start_ms = cf_getms();

	uint64_t run_stop_ms = g_run_start_ms + g_run_ms;

	g_running = 1;
	int n;
	for (n = 0; n < g_num_devices; n++) 
	{
		device* p_device = &g_devices[n];
		p_device->name = g_device_names[n];
		p_device->p_fd_queue = cf_queue_create(sizeof(int), true);
		discover_num_blocks(p_device);
		create_large_block_read_buffer(p_device);
		p_device->p_raw_read_histogram = histogram_create();
		sprintf(p_device->histogram_tag, "%-18s", p_device->name);

		if (pthread_create(&p_device->large_block_read_thread, NULL,
					run_large_block_reads, (void*)p_device)) 
		{
			fprintf(stdout, "Error: create large block read thread %d\n", n);
			exit(-1);
		}

		if (pthread_create(&p_device->large_block_write_thread, NULL,
					run_large_block_writes, (void*)p_device)) 
		{
			fprintf(stdout, "Error: create write thread %d\n", n);
			exit(-1);
		}

	}

	aio_context_t aio_context = 0;
	if(io_setup(MAXEVENTS, &aio_context) != 0)
	{
		fprintf(stdout, "Error: AIO context not set up \n");
		exit(-1);
	}
	create_async_info_queue();

	/* read events generating thread */
	pthread_t read_generator;
	if (pthread_create(&read_generator, NULL, &generate_async_reads, (void*)&aio_context)) 
	{
		fprintf(stdout, "Error: create read generator thread\n");
		exit(-1);
	}
	
	/* Create the worker threads */
	pthread_t workers[g_worker_threads];
	int j;
	for (j = 0; j < g_worker_threads; j++) 
	{ 
		if (pthread_create(&workers[j], NULL, &worker_func , (void *)(&aio_context))) 
		{
			fprintf(stdout, "Error: creating worker thread %d failed\n", j);
			exit(-1);
		}	
	}
 
	fprintf(stdout, "\n");
	uint64_t now_ms;
	uint64_t time_count = 0;
	int nanosleep_ret = -1;
	struct timespec initial,remaining;
	while ((now_ms = cf_getms()) < run_stop_ms && g_running) 
	{	
		time_count++;
		int sleep_ms = (int)
			((time_count * g_report_interval_ms) - (now_ms - g_run_start_ms));
		if (sleep_ms > 0) 
		{
			initial.tv_sec = sleep_ms / 1000;
			initial.tv_nsec = (sleep_ms % 1000) * 1000000;
		retry:
			memset(&remaining, 0, sizeof(remaining));
			nanosleep_ret = nanosleep(&initial, &remaining);
			if(nanosleep_ret == -1 && errno == EINTR)
			{
				/* Interrupted by a signal */
				initial.tv_sec = remaining.tv_sec;
				initial.tv_nsec = remaining.tv_nsec;	
				goto retry;	
			}
		}

		fprintf(stdout, "After %" PRIu64 " sec:\n",
				(time_count * g_report_interval_ms) / 1000);

		fprintf(stdout, "read-reqs queued: %" PRIu64 "\n",
				cf_atomic_int_get(g_read_reqs_queued));

		histogram_dump(g_p_large_block_read_histogram,  "LARGE BLOCK READS ");
		histogram_dump(g_p_large_block_write_histogram, "LARGE BLOCK WRITES");
		histogram_dump(g_p_raw_read_histogram,          "RAW READS         ");
		int d;
		for (d = 0; d < g_num_devices; d++) {			
			histogram_dump(g_devices[d].p_raw_read_histogram,
					g_devices[d].histogram_tag);	
		}

		histogram_dump(g_p_read_histogram,              "READS             ");

		fprintf(stdout, "\n");
		fflush(stdout);
	}
	fprintf(stdout, "\nTEST COMPLETED \n");
	g_running = 0;
	int i;
//TODO aio_destroy?

	/* Freeing resources used by async */
	void* ret_value;
	for (i = 0; i < g_worker_threads; i++) 
	{
		pthread_join(workers[i], &ret_value);	
	}
	destroy_async_info_queue();

	int d;
	for (d = 0; d < g_num_devices; d++) {
		device* p_device = &g_devices[d];

		pthread_join(p_device->large_block_read_thread, &ret_value);
		pthread_join(p_device->large_block_write_thread, &ret_value);

		fd_close_all(p_device);
		cf_queue_destroy(p_device->p_fd_queue);
		free(p_device->p_large_block_read_buffer);
		free(p_device->p_raw_read_histogram);
	}

	free(g_p_large_block_read_histogram);
	free(g_p_large_block_write_histogram);
	free(g_p_raw_read_histogram);
	free(g_p_read_histogram);

	destroy_salters();

	return (0);
}
Exemplo n.º 16
0
static void destroy_async_info_queue()
{
	free(async_info_array);
	cf_queue_destroy(async_info_queue);
}