static int ufs_test_run_write_read_test(struct test_data *td)
{
	int ret = 0;
	unsigned int start_sec;
	unsigned int num_bios;
	struct request_queue *q = td->req_q;


	start_sec = td->start_sector + sizeof(int) * BIO_U32_SIZE
			* td->num_of_write_bios;
	if (utd->random_test_seed != 0)
		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
	else
		num_bios = DEFAULT_NUM_OF_BIOS;

	/* Adding a write request */
	test_pr_info(
		"%s: Adding a write request with %d bios to Q, req_id=%d"
			, __func__, num_bios, td->wr_rd_next_req_id);

	utd->write_completed = false;
	ret = test_iosched_add_wr_rd_test_req(0, WRITE, start_sec,
					num_bios, TEST_PATTERN_5A,
					ufs_test_write_read_test_end_io_fn);

	if (ret) {
		test_pr_err("%s: failed to add a write request", __func__);
		return ret;
	}

	/* waiting for the write request to finish */
	blk_run_queue(q);
	wait_event(utd->wait_q, utd->write_completed);

	/* Adding a read request*/
	test_pr_info("%s: Adding a read request to Q", __func__);

	ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec,
			num_bios, TEST_PATTERN_5A, NULL);

	if (ret) {
		test_pr_err("%s: failed to add a read request", __func__);
		return ret;
	}

	blk_run_queue(q);
	return ret;
}
static int run_long_seq_test(struct test_data *td)
{
	int ret = 0;
	int direction;
	static unsigned int inserted_requests;

	BUG_ON(!td);
	td->test_count = 0;
	utd->completed_req_count = 0;
	inserted_requests = 0;

	if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_READ)
		direction = READ;
	else
		direction = WRITE;

	test_pr_info("%s: Adding %d requests, first req_id=%d",
		     __func__, LONG_SEQ_TEST_NUM_REQS,
		     td->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the mmc layer
		* to fetch them
		*/
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction,
			td->start_sector, TEST_MAX_BIOS_PER_REQ,
			TEST_PATTERN_5A,
			long_seq_test_free_end_io_fn);
		if (ret) {
			test_pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		td->test_info.test_byte_count +=
			(TEST_MAX_BIOS_PER_REQ * sizeof(unsigned int) *
			BIO_U32_SIZE);

	} while (inserted_requests < LONG_SEQ_TEST_NUM_REQS);

	/* in this case the queue will not run in the above loop */
	if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
static void ufs_test_run_scenario(void *data, async_cookie_t cookie)
{
	struct test_scenario *ts = (struct test_scenario *)data;
	struct test_iosched *test_iosched = ts->test_iosched;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
	int start_sec;
	int i;
	int ret = 0;

	BUG_ON(!ts);
	start_sec = ts->test_iosched->start_sector;

	for (i = 0; i < ts->total_req; i++) {
		int num_bios = DEFAULT_NUM_OF_BIOS;
		int direction;

		if (ufs_test_toggle_direction(ts->toggle_direction, i))
			direction = (ts->direction == WRITE) ? READ : WRITE;
		else
			direction = ts->direction;

		/* use randomly generated requests */
		if (ts->rnd_req && utd->random_test_seed != 0)
			pseudo_rnd_sector_and_size(&utd->random_test_seed,
				ts->test_iosched->start_sector, &start_sec,
				&num_bios);

		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
			direction, start_sec, num_bios, TEST_PATTERN_5A,
			scenario_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}

		/*
		 * We want to run the queue every run_q requests, or,
		 * when the requests pool is exhausted
		 */

		if (test_iosched->dispatched_count >= QUEUE_MAX_REQUESTS ||
				(ts->run_q && !(i % ts->run_q)))
			blk_post_runtime_resume(test_iosched->req_q, 0);
	}

	blk_post_runtime_resume(test_iosched->req_q, 0);
	ufs_test_thread_complete(utd, ret);
}
static int run_long_seq_test(struct test_data *td)
{
	int ret = 0;
	int direction;
	static unsigned int inserted_requests;

	BUG_ON(!td);
	td->test_count = 0;
	utd->completed_req_count = 0;
	inserted_requests = 0;

	if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_READ)
		direction = READ;
	else
		direction = WRITE;

	test_pr_info("%s: Adding %d requests, first req_id=%d",
		     __func__, LONG_SEQ_TEST_NUM_REQS,
		     td->wr_rd_next_req_id);

	do {
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction,
			td->start_sector, TEST_MAX_BIOS_PER_REQ,
			TEST_PATTERN_5A,
			long_seq_test_free_end_io_fn);
		if (ret) {
			test_pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		td->test_info.test_byte_count +=
			(TEST_MAX_BIOS_PER_REQ * sizeof(unsigned int) *
			BIO_U32_SIZE);

	} while (inserted_requests < LONG_SEQ_TEST_NUM_REQS);

	
	if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
/**
 * run_long_seq_test - main function for long sequential test
 * @td - test specific data
 *
 * This function is used to fill up (and keep full) the test queue with
 * requests. There are two scenarios this function works with:
 * 1. Only read/write (STAGE_1 or no stage)
 * 2. Simultaneous read and write to the same LBAs (STAGE_2)
 */
static int run_long_seq_test(struct test_data *td)
{
	int ret = 0;
	int direction;
	static unsigned int inserted_requests;
	u32 sector;

	BUG_ON(!td);
	sector = td->start_sector;
	if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
		td->test_count = 0;
		utd->completed_req_count = 0;
		inserted_requests = 0;
	}

	/* Set the direction */
	switch (td->test_info.testcase) {
	case UFS_TEST_LONG_SEQUENTIAL_READ:
		direction = READ;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
	default:
		direction = WRITE;
	}
	pr_info("%s: Adding %d requests, first req_id=%d", __func__,
		     LONG_SEQ_TEST_NUM_REQS, td->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the block layer
		* to fetch them
		*/
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction, sector,
				TEST_MAX_BIOS_PER_REQ, TEST_PATTERN_5A,
				long_seq_test_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
			ret = test_iosched_add_wr_rd_test_req(0, READ, sector,
					TEST_MAX_BIOS_PER_REQ, TEST_PATTERN_5A,
					long_seq_test_free_end_io_fn);
			if (ret) {
				pr_err("%s: failed to create request" ,
						__func__);
				break;
			}
			inserted_requests++;
		}
		/* NUM_OF_BLOCK * (BLOCK_SIZE / SECTOR_SIZE) */
		sector += TEST_MAX_BIOS_PER_REQ * (PAGE_SIZE /
				td->req_q->limits.logical_block_size);

	} while (inserted_requests < LONG_SEQ_TEST_NUM_REQS);

	/* in this case the queue will not run in the above loop */
	if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
/**
 * run_long_test - main function for long sequential test
 * @td - test specific data
 *
 * This function is used to fill up (and keep full) the test queue with
 * requests. There are two scenarios this function works with:
 * 1. Only read/write (STAGE_1 or no stage)
 * 2. Simultaneous read and write to the same LBAs (STAGE_2)
 */
static int run_long_test(struct test_iosched *test_iosched)
{
	int ret = 0;
	int direction, long_test_num_requests, num_bios_per_request;
	static unsigned int inserted_requests;
	u32 sector, seed, num_bios, seq_sector_delta;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	BUG_ON(!test_iosched);
	sector = test_iosched->start_sector;
	if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
		test_iosched->test_count = 0;
		utd->completed_req_count = 0;
		inserted_requests = 0;
	}

	/* Set test parameters */
	switch (test_iosched->test_info.testcase) {
	case  UFS_TEST_LONG_RANDOM_READ:
		num_bios_per_request = 1;
		long_test_num_requests = LONG_RAND_TEST_NUM_REQS;
		direction = READ;
		break;
	case  UFS_TEST_LONG_RANDOM_WRITE:
		num_bios_per_request = 1;
		long_test_num_requests = LONG_RAND_TEST_NUM_REQS;
		direction = WRITE;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_READ:
		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
		long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
		direction = READ;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
		long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
	default:
		direction = WRITE;
	}

	seq_sector_delta = num_bios_per_request * (TEST_BIO_SIZE / SECTOR_SIZE);

	seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED;

	pr_info("%s: Adding %d requests, first req_id=%d", __func__,
		     long_test_num_requests, test_iosched->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the block layer
		* to fetch them
		*/
		if (test_iosched->test_count >= QUEUE_MAX_REQUESTS) {
			blk_post_runtime_resume(test_iosched->req_q, 0);
			continue;
		}

		switch (test_iosched->test_info.testcase) {
		case UFS_TEST_LONG_SEQUENTIAL_READ:
		case UFS_TEST_LONG_SEQUENTIAL_WRITE:
		case UFS_TEST_LONG_SEQUENTIAL_MIXED:
			/* don't need to increment on the first iteration */
			if (inserted_requests)
				sector += seq_sector_delta;
			break;
		case  UFS_TEST_LONG_RANDOM_READ:
		case  UFS_TEST_LONG_RANDOM_WRITE:
			pseudo_rnd_sector_and_size(&seed,
				test_iosched->start_sector, &sector, &num_bios);
		default:
			break;
		}

		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
			direction, sector, num_bios_per_request,
			TEST_PATTERN_5A, long_test_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
			ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
				READ, sector, num_bios_per_request,
				TEST_PATTERN_5A, long_test_free_end_io_fn);
			if (ret) {
				pr_err("%s: failed to create request" ,
						__func__);
				break;
			}
			inserted_requests++;
		}

	} while (inserted_requests < long_test_num_requests);

	/* in this case the queue will not run in the above loop */
	if (long_test_num_requests < QUEUE_MAX_REQUESTS)
		blk_post_runtime_resume(test_iosched->req_q, 0);

	return ret;
}
static int ufs_test_run_data_integrity_test(struct test_iosched *test_iosched)
{
	int ret = 0;
	int i, j;
	unsigned int start_sec, num_bios, retries = NUM_UNLUCKY_RETRIES;
	struct request_queue *q = test_iosched->req_q;
	int sectors[QUEUE_MAX_REQUESTS] = {0};
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	start_sec = test_iosched->start_sector;
	utd->queue_complete = false;

	if (utd->random_test_seed != 0) {
		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
	} else {
		num_bios = DEFAULT_NUM_OF_BIOS;
		utd->random_test_seed = MAGIC_SEED;
	}

	/* Adding write requests */
	pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
		     QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);

	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
		/* make sure that we didn't draw the same start_sector twice */
		while (retries--) {
			pseudo_rnd_sector_and_size(&utd->random_test_seed,
				test_iosched->start_sector, &start_sec,
				&num_bios);
			sectors[i] = start_sec;
			for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++)
				/* just increment j */;
			if (j == i)
				break;
		}
		if (!retries) {
			pr_err("%s: too many unlucky start_sector draw retries",
			       __func__);
			ret = -EINVAL;
			return ret;
		}
		retries = NUM_UNLUCKY_RETRIES;

		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE,
			start_sec, 1, i, long_test_free_end_io_fn);

		if (ret) {
			pr_err("%s: failed to add a write request", __func__);
			return ret;
		}
	}

	/* waiting for the write request to finish */
	blk_post_runtime_resume(q, 0);
	wait_event(utd->wait_q, utd->queue_complete);

	/* Adding read requests */
	pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
		     QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);

	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ,
			sectors[i], 1, i, long_test_free_end_io_fn);

		if (ret) {
			pr_err("%s: failed to add a read request", __func__);
			return ret;
		}
	}

	blk_post_runtime_resume(q, 0);
	return ret;
}