Example #1
0
File: act.c Project: aanguss/act
//------------------------------------------------
// Do one large block write operation and report.
//
static void write_and_report_large_block(device* p_device) {
	salter* p_salter;

	if (g_num_write_buffers > 1) {
		p_salter = &g_salters[rand_32() % g_num_write_buffers];

		pthread_mutex_lock(&p_salter->lock);
		*(uint32_t*)p_salter->p_buffer = p_salter->stamp++;
	}
	else {
		p_salter = &g_salters[0];
	}

	uint64_t offset = random_large_block_offset(p_device);
	uint64_t start_time = cf_getus();
	uint64_t stop_time = write_to_device(p_device, offset,
		g_large_block_ops_bytes, p_salter->p_buffer);

	if (g_num_write_buffers > 1) {
		pthread_mutex_unlock(&p_salter->lock);
	}

	if (stop_time != -1) {
		histogram_insert_data_point(g_p_large_block_write_histogram,
			safe_delta_us(start_time, stop_time));
	}
}
Example #2
0
static inline uint32_t
random_write_size(const device* dev)
{
	if (dev->n_write_sizes == 1) {
		return dev->write_bytes;
	}

	return dev->write_bytes +
			(dev->min_commit_bytes * (rand_32() % dev->n_write_sizes));
}
Example #3
0
static inline uint32_t
random_read_size(const device* dev)
{
	if (dev->n_read_sizes == 1) {
		return dev->read_bytes;
	}

	return dev->read_bytes +
			(dev->min_op_bytes * (rand_32() % dev->n_read_sizes));
}
Example #4
0
File: act.c Project: aanguss/act
//------------------------------------------------
// Runs in thr_add_readreqs, adds readreq objects
// to all read queues in an even, random spread.
//
static void* run_add_readreqs(void* pv_unused) {
	uint64_t count = 0;

	while (g_running) {
		if (cf_atomic_int_incr(&g_read_reqs_queued) > MAX_READ_REQS_QUEUED) {
			fprintf(stdout, "ERROR: too many read reqs queued\n");
			fprintf(stdout, "drive(s) can't keep up - test stopped\n");
			g_running = false;
			break;
		}

		uint32_t random_queue_index = rand_32() % g_num_queues;
		uint32_t random_device_index =
			g_queue_per_device ? random_queue_index : rand_32() % g_num_devices;

		device* p_random_device = &g_devices[random_device_index];
		readreq* p_readreq = malloc(sizeof(readreq));

		p_readreq->p_device = p_random_device;
		p_readreq->offset = random_read_offset(p_random_device);
		p_readreq->size = g_read_req_num_512_blocks * MIN_BLOCK_BYTES;
		p_readreq->start_time = cf_getus();

		cf_queue_push(g_readqs[random_queue_index].p_req_queue, &p_readreq);

		count++;

		int sleep_us = (int)
			(((count * 1000000) / g_read_reqs_per_sec) -
				(cf_getus() - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}

		if (sleep_us != 0) {
			fprintf(stdout, "%" PRIu64 ", sleep_us = %d\n", count, sleep_us);
		}
	}

	return (0);
}
Example #5
0
/*
 * Setup expiration timers for SA.  This is used for ISAKMP SAs, but also
 * possible to use for application SAs if the application does not deal
 * with expirations itself.  An example is the Linux FreeS/WAN KLIPS IPsec
 * stack.
 */
int
sa_setup_expirations(struct sa *sa)
{
	struct timeval  expiration;
	u_int64_t       seconds = sa->seconds;

	/*
	 * Set the soft timeout to a random percentage between 85 & 95 of
	 * the negotiated lifetime to break strictly synchronized
	 * renegotiations.  This works better when the randomization is on the
	 * order of processing plus network-roundtrip times, or larger.
	 * I.e. it depends on configuration and negotiated lifetimes.
	 * It is not good to do the decrease on the hard timeout, because then
	 * we may drop our SA before our peer.
	 * XXX Better scheme to come?
	 */
	if (!sa->soft_death) {
		gettimeofday(&expiration, 0);
		/*
		 * XXX This should probably be configuration controlled
		 * somehow.
		 */
		seconds = sa->seconds * (850 + rand_32() % 100) / 1000;
		LOG_DBG((LOG_TIMER, 95,
		    "sa_setup_expirations: SA %p soft timeout in %llu seconds",
		    sa, seconds));
		expiration.tv_sec += seconds;
		sa->soft_death = timer_add_event("sa_soft_expire",
		    sa_soft_expire, sa, &expiration);
		if (!sa->soft_death) {
			/* If we don't give up we might start leaking...  */
			sa_delete(sa, 1);
			return -1;
		}
		sa_reference(sa);
	}
	if (!sa->death) {
		gettimeofday(&expiration, 0);
		LOG_DBG((LOG_TIMER, 95,
		    "sa_setup_expirations: SA %p hard timeout in %llu seconds",
		    sa, sa->seconds));
		expiration.tv_sec += sa->seconds;
		sa->death = timer_add_event("sa_hard_expire", sa_hard_expire,
		    sa, &expiration);
		if (!sa->death) {
			/* If we don't give up we might start leaking...  */
			sa_delete(sa, 1);
			return -1;
		}
		sa_reference(sa);
	}
	return 0;
}
Example #6
0
void rand_test (long int max) {

  /* perform a test of our random number generator.  Mean value is mapped
     to the range 0 -> 1, and thus we aim for a mean of 0.500000.  The
     standard deviation we are aiming for is 1/sqrt(12) ~=  0.288675 */

  float list[LIST_BUFF+1], mean, std_dev, sum = 0, sum_sq = 0;
  unsigned long int holder;
  long int i;
  FILE *output;

  if (max > LIST_BUFF || max <= 0) {
    fprintf (stderr, "Please choose size in range: "
	     "0 < size <= %d!\n", LIST_BUFF);
    exit (EXIT_FAILURE);
  }

  if ((output = fopen ("out.dat", "w")) == NULL) {
    fprintf (stderr, "Couldn't open file out.dat!\n");
  }

  srand (173);

  for (i = 1; i <= max; i++) {
    holder = rand_32 ();
    fprintf (output, "%10lu ", holder);
    if (!(i%5) && i)
      fprintf (output, "\n");
    list[i] = (float) holder/0xffffffff;
    sum += list[i];
    sum_sq += list[i]*list[i];
  }

  mean = sum/max;
  printf ("Mean Value: %f\n", mean);

  if (max > 1) {
    std_dev = sqrt ((sum_sq - max*(mean*mean))/(max-1));
    printf ("Standard Deviation: %f\n", std_dev);
  }
  else {
    printf ("Standard Deviation: N/A\n");
  }

  fclose (output);

}
Example #7
0
//------------------------------------------------
// Runs in service threads, adds read trans_req
// objects to transaction queues in round-robin
// fashion.
//
static void*
run_generate_read_reqs(void* pv_unused)
{
	rand_seed_thread();

	uint64_t count = 0;
	uint64_t internal_read_reqs_per_sec =
			g_scfg.internal_read_reqs_per_sec / g_scfg.read_req_threads;

	while (g_running) {
		if (atomic32_incr(&g_reqs_queued) > g_scfg.max_reqs_queued) {
			fprintf(stdout, "ERROR: too many requests queued\n");
			fprintf(stdout, "drive(s) can't keep up - test stopped\n");
			g_running = false;
			break;
		}

		uint32_t q_index = count % g_scfg.num_queues;
		uint32_t random_dev_index = rand_32() % g_scfg.num_devices;
		device* random_dev = &g_devices[random_dev_index];

		trans_req read_req = {
				.dev = random_dev,
				.offset = random_read_offset(random_dev),
				.size = random_read_size(random_dev),
				.is_write = false,
				.start_time = get_ns()
		};

		queue_push(g_trans_qs[q_index], &read_req);

		count++;

		int64_t sleep_us = (int64_t)
				(((count * 1000000) / internal_read_reqs_per_sec) -
						(get_us() - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}
		else if (sleep_us < -(int64_t)g_scfg.max_lag_usec) {
			fprintf(stdout, "ERROR: read request generator can't keep up\n");
			fprintf(stdout, "ACT can't do requested load - test stopped\n");
			g_running = false;
		}
	}

	return NULL;
}

//------------------------------------------------
// Runs in service threads, adds write trans_req
// objects to transaction queues in round-robin
// fashion.
//
static void*
run_generate_write_reqs(void* pv_unused)
{
	rand_seed_thread();

	uint64_t count = 0;
	uint64_t internal_write_reqs_per_sec =
			g_scfg.internal_write_reqs_per_sec / g_scfg.write_req_threads;

	while (g_running) {
		if (atomic32_incr(&g_reqs_queued) > g_scfg.max_reqs_queued) {
			fprintf(stdout, "ERROR: too many requests queued\n");
			fprintf(stdout, "drive(s) can't keep up - test stopped\n");
			g_running = false;
			break;
		}

		uint32_t q_index = count % g_scfg.num_queues;
		uint32_t random_dev_index = rand_32() % g_scfg.num_devices;
		device* random_dev = &g_devices[random_dev_index];

		trans_req write_req = {
				.dev = random_dev,
				.offset = random_write_offset(random_dev),
				.size = random_write_size(random_dev),
				.is_write = true,
				.start_time = get_ns()
		};

		queue_push(g_trans_qs[q_index], &write_req);

		count++;

		int64_t sleep_us = (int64_t)
				(((count * 1000000) / internal_write_reqs_per_sec) -
						(get_us() - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}
		else if (sleep_us < -(int64_t)g_scfg.max_lag_usec) {
			fprintf(stdout, "ERROR: write request generator can't keep up\n");
			fprintf(stdout, "ACT can't do requested load - test stopped\n");
			g_running = false;
		}
	}

	return NULL;
}

//------------------------------------------------
// Runs in every device large-block read thread,
// executes large-block reads at a constant rate.
//
static void*
run_large_block_reads(void* pv_dev)
{
	rand_seed_thread();

	device* dev = (device*)pv_dev;

	uint8_t* buf = act_valloc(g_scfg.large_block_ops_bytes);

	if (! buf) {
		fprintf(stdout, "ERROR: large block read buffer act_valloc()\n");
		g_running = false;
		return NULL;
	}

	uint64_t count = 0;

	while (g_running) {
		read_and_report_large_block(dev, buf);

		count++;

		uint64_t target_us = (uint64_t)
				((double)(count * 1000000 * g_scfg.num_devices) /
						g_scfg.large_block_reads_per_sec);

		int64_t sleep_us = (int64_t)(target_us - (get_us() - g_run_start_us));

		if (sleep_us > 0) {
			usleep((uint32_t)sleep_us);
		}
		else if (sleep_us < -(int64_t)g_scfg.max_lag_usec) {
			fprintf(stdout, "ERROR: large block reads can't keep up\n");
			fprintf(stdout, "drive(s) can't keep up - test stopped\n");
			g_running = false;
		}
	}

	free(buf);

	return NULL;
}
Example #8
0
static void* generate_async_reads(void* aio_context)
{
	uint64_t count = 0;
	while(g_running)
	{
		/* Create the struct of info needed at the process_read end */
		uintptr_t info_ptr;
		if (cf_queue_pop(async_info_queue, (void*)&info_ptr, CF_QUEUE_NOWAIT) !=
				CF_QUEUE_OK) 
		{
			fprintf(stdout, "Error: Could not pop info struct \n");
			return (void*)(-1);
		}
		as_async_info_t *info = (as_async_info_t*)info_ptr;
		memset(info, 0, sizeof(as_async_info_t));
		/* Generate the actual read request */
		uint32_t random_device_index = rand_32() % g_num_devices;
		device* p_random_device = &g_devices[random_device_index];
		readreq* p_readreq = &(info->p_readreq);
		if(p_readreq == NULL)
		{
			fprintf(stdout, "Error: preadreq null \n");
			goto fail;
		}
		p_readreq->p_device = p_random_device;
		p_readreq->offset = random_read_offset(p_random_device);
		p_readreq->size = g_read_req_num_512_blocks * MIN_BLOCK_BYTES;
		p_readreq->start_time = cf_getms();

		/* Async read */
		if (g_use_valloc) 
		{
			uint8_t* p_buffer = cf_valloc(p_readreq->size);
			info->p_buffer = p_buffer;
			if (p_buffer) 
			{
				uint64_t raw_start_time = cf_getms();
				info->raw_start_time = raw_start_time;
				if(read_async_from_device(info, *(aio_context_t *)aio_context) < 0)
				{
					fprintf(stdout, "Error: Async read failed \n");
					free(p_buffer);
					goto fail; 
				}
			}
			else 
			{
				fprintf(stdout, "ERROR: read buffer cf_valloc()\n");
			}
		}
		else 
		{
			uint8_t stack_buffer[p_readreq->size + 4096];
			uint8_t* p_buffer = align_4096(stack_buffer);
			info->p_buffer = p_buffer;
			uint64_t raw_start_time = cf_getms();
			info->raw_start_time = raw_start_time;
			if(read_async_from_device(info, *(aio_context_t*)aio_context) < 0)
			{
				fprintf(stdout, "Error: Async read failed \n");
				goto fail;
			}
		}
		if (cf_atomic_int_incr(&g_read_reqs_queued) > MAX_READ_REQS_QUEUED) 
		{
		  fprintf(stdout, "ERROR: too many read reqs queued\n");
		  fprintf(stdout, "drive(s) can't keep up - test stopped\n");
		  g_running = false;
		  return (void*)-1;;
		}

		count++;

		int sleep_ms = (int)
			(((count * 1000) / g_read_reqs_per_sec) -
				(cf_getms() - g_run_start_ms));

		if (sleep_ms > 0) {
			usleep((uint32_t)sleep_ms * 1000);
		}

		continue;

		/* Rollback for failure */
fail:
		if(info)
		{
			uintptr_t temp = (uintptr_t)info;
			cf_queue_push(async_info_queue, (void*)&temp);
		}
	}
	return (0);
}