Example #1
0
//------------------------------------------------
// Runs in every transaction thread, pops
// trans_req objects, does the transaction and
// reports the duration.
//
static void*
run_transactions(void* pv_req_q)
{
	rand_seed_thread();

	queue* req_q = (queue*)pv_req_q;
	trans_req req;

	while (g_running) {
		if (queue_pop(req_q, (void*)&req, 100) != QUEUE_OK) {
			continue;
		}

		uint8_t stack_buffer[req.size + 4096];
		uint8_t* buf = align_4096(stack_buffer);

		if (req.is_write) {
			write_and_report(&req, buf);
		}
		else {
			read_and_report(&req, buf);
		}

		atomic32_decr(&g_reqs_queued);
	}

	return NULL;
}
Example #2
0
File: act.c Project: aanguss/act
//------------------------------------------------
// Runs in every thread of every read queue, pops
// readreq objects, does the read and reports the
// read transaction duration.
//
static void* run_reads(void* pv_req_queue) {
	cf_queue* p_req_queue = (cf_queue*)pv_req_queue;
	readreq* p_readreq;

	while (g_running) {
		if (cf_queue_pop(p_req_queue, (void*)&p_readreq, 100) != CF_QUEUE_OK) {
			continue;
		}

		if (g_use_valloc) {
			uint8_t* p_buffer = cf_valloc(p_readreq->size);

			if (p_buffer) {
				read_and_report(p_readreq, p_buffer);
				free(p_buffer);
			}
			else {
				fprintf(stdout, "ERROR: read buffer cf_valloc()\n");
			}
		}
		else {
			uint8_t stack_buffer[p_readreq->size + 4096];
			uint8_t* p_buffer = align_4096(stack_buffer);

			read_and_report(p_readreq, p_buffer);
		}

		free(p_readreq);
		cf_atomic_int_decr(&g_read_reqs_queued);
	}

	return (0);
}
Example #3
0
static void* generate_async_reads(void* aio_context)
{
	uint64_t count = 0;
	while(g_running)
	{
		/* Create the struct of info needed at the process_read end */
		uintptr_t info_ptr;
		if (cf_queue_pop(async_info_queue, (void*)&info_ptr, CF_QUEUE_NOWAIT) !=
				CF_QUEUE_OK) 
		{
			fprintf(stdout, "Error: Could not pop info struct \n");
			return (void*)(-1);
		}
		as_async_info_t *info = (as_async_info_t*)info_ptr;
		memset(info, 0, sizeof(as_async_info_t));
		/* Generate the actual read request */
		uint32_t random_device_index = rand_32() % g_num_devices;
		device* p_random_device = &g_devices[random_device_index];
		readreq* p_readreq = &(info->p_readreq);
		if(p_readreq == NULL)
		{
			fprintf(stdout, "Error: preadreq null \n");
			goto fail;
		}
		p_readreq->p_device = p_random_device;
		p_readreq->offset = random_read_offset(p_random_device);
		p_readreq->size = g_read_req_num_512_blocks * MIN_BLOCK_BYTES;
		p_readreq->start_time = cf_getms();

		/* Async read */
		if (g_use_valloc) 
		{
			uint8_t* p_buffer = cf_valloc(p_readreq->size);
			info->p_buffer = p_buffer;
			if (p_buffer) 
			{
				uint64_t raw_start_time = cf_getms();
				info->raw_start_time = raw_start_time;
				if(read_async_from_device(info, *(aio_context_t *)aio_context) < 0)
				{
					fprintf(stdout, "Error: Async read failed \n");
					free(p_buffer);
					goto fail; 
				}
			}
			else 
			{
				fprintf(stdout, "ERROR: read buffer cf_valloc()\n");
			}
		}
		else 
		{
			uint8_t stack_buffer[p_readreq->size + 4096];
			uint8_t* p_buffer = align_4096(stack_buffer);
			info->p_buffer = p_buffer;
			uint64_t raw_start_time = cf_getms();
			info->raw_start_time = raw_start_time;
			if(read_async_from_device(info, *(aio_context_t*)aio_context) < 0)
			{
				fprintf(stdout, "Error: Async read failed \n");
				goto fail;
			}
		}
		if (cf_atomic_int_incr(&g_read_reqs_queued) > MAX_READ_REQS_QUEUED) 
		{
		  fprintf(stdout, "ERROR: too many read reqs queued\n");
		  fprintf(stdout, "drive(s) can't keep up - test stopped\n");
		  g_running = false;
		  return (void*)-1;;
		}

		count++;

		int sleep_ms = (int)
			(((count * 1000) / g_read_reqs_per_sec) -
				(cf_getms() - g_run_start_ms));

		if (sleep_ms > 0) {
			usleep((uint32_t)sleep_ms * 1000);
		}

		continue;

		/* Rollback for failure */
fail:
		if(info)
		{
			uintptr_t temp = (uintptr_t)info;
			cf_queue_push(async_info_queue, (void*)&temp);
		}
	}
	return (0);
}