예제 #1
0
파일: verify.c 프로젝트: apexearth/fio
unsigned long fill_buffer(struct thread_data *td, void *p, unsigned int len)
{
	struct frand_state *fs = &td->verify_state;
	struct thread_options *o = &td->o;

	return fill_random_buf_percentage(fs, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
}
예제 #2
0
파일: verify.c 프로젝트: pallavija/fio
void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len,
			 struct io_u *io_u, unsigned long seed, int use_seed)
{
	struct thread_options *o = &td->o;

	if (!o->verify_pattern_bytes) {
		dprint(FD_VERIFY, "fill random bytes len=%u\n", len);

		if (use_seed)
			__fill_random_buf_percentage(seed, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
		else {
			struct frand_state *fs = &td->verify_state;

			io_u->rand_seed = fill_random_buf_percentage(fs, p, o->compress_percentage, len, len, o->buffer_pattern, o->buffer_pattern_bytes);
		}
		return;
	}

	if (io_u->buf_filled_len >= len) {
		dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n",
			o->verify_pattern_bytes, len);
		return;
	}

	fill_pattern(p, len, o->verify_pattern, o->verify_pattern_bytes);
	io_u->buf_filled_len = len;
}
예제 #3
0
파일: io_u.c 프로젝트: vsharma13/fio
/*
 * Called to complete min_events number of io for the async engines.
 */
int io_u_queued_complete(struct thread_data *td, int min_evts,
			 uint64_t *bytes)
{
	struct io_completion_data icd;
	struct timespec *tvp = NULL;
	int ret;
	struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, };

	dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts);

	if (!min_evts)
		tvp = &ts;

	ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp);
	if (ret < 0) {
		td_verror(td, -ret, "td_io_getevents");
		return ret;
	} else if (!ret)
		return ret;

	init_icd(td, &icd, ret);
	ios_completed(td, &icd);
	if (icd.error) {
		td_verror(td, icd.error, "io_u_queued_complete");
		return -1;
	}

	if (bytes) {
		int ddir;

		for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
			bytes[ddir] += icd.bytes_done[ddir];
	}

	return 0;
}

/*
 * Call when io_u is really queued, to update the submission latency.
 */
void io_u_queued(struct thread_data *td, struct io_u *io_u)
{
	if (!td->o.disable_slat) {
		unsigned long slat_time;

		slat_time = utime_since(&io_u->start_time, &io_u->issue_time);
		add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen);
	}
}

void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write,
		    unsigned int max_bs)
{
	if (td->o.buffer_pattern_bytes)
		fill_buffer_pattern(td, buf, max_bs);
	else if (!td->o.zero_buffers) {
		unsigned int perc = td->o.compress_percentage;

		if (perc) {
			unsigned int seg = min_write;

			seg = min(min_write, td->o.compress_chunk);
			if (!seg)
				seg = min_write;

			fill_random_buf_percentage(&td->buf_state, buf,
						perc, seg, max_bs);
		} else
			fill_random_buf(&td->buf_state, buf, max_bs);
	} else
		memset(buf, 0, max_bs);
}

/*
 * "randomly" fill the buffer contents
 */
void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u,
		      unsigned int min_write, unsigned int max_bs)
{
	io_u->buf_filled_len = 0;
	fill_io_buffer(td, io_u->buf, min_write, max_bs);
}