static void long_seq_test_free_end_io_fn(struct request *rq, int err)
{
	struct test_request *test_rq;
	struct test_data *ptd = test_get_test_data();

	if (rq)
		test_rq = (struct test_request *)rq->elv.priv[0];
	else {
		test_pr_err("%s: error: NULL request", __func__);
		return;
	}

	BUG_ON(!test_rq);

	spin_lock_irq(&ptd->lock);
	ptd->dispatched_count--;
	list_del_init(&test_rq->queuelist);
	__blk_put_request(ptd->req_q, test_rq->rq);
	spin_unlock_irq(&ptd->lock);

	kfree(test_rq->bios_buffer);
	kfree(test_rq);
	utd->completed_req_count++;

	test_pr_err("%s: request %d completed, err=%d",
	       __func__, test_rq->req_id, err);

	check_test_completion();

}
static void long_test_free_end_io_fn(struct request *rq, int err)
{
	struct test_request *test_rq;
	struct test_iosched *test_iosched = rq->q->elevator->elevator_data;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	if (!rq) {
		pr_err("%s: error: NULL request", __func__);
		return;
	}

	test_rq = (struct test_request *)rq->elv.priv[0];

	BUG_ON(!test_rq);

	spin_lock_irq(&test_iosched->lock);
	test_iosched->dispatched_count--;
	list_del_init(&test_rq->queuelist);
	__blk_put_request(test_iosched->req_q, test_rq->rq);
	spin_unlock_irq(&test_iosched->lock);

	if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2 &&
			rq_data_dir(rq) == READ &&
			compare_buffer_to_pattern(test_rq)) {
		/* if the pattern does not match */
		pr_err("%s: read pattern not as expected", __func__);
		utd->test_stage = UFS_TEST_ERROR;
		check_test_completion(test_iosched);
		return;
	}

	test_iosched_free_test_req_data_buffer(test_rq);
	kfree(test_rq);
	utd->completed_req_count++;

	if (err)
		pr_err("%s: request %d completed, err=%d", __func__,
			test_rq->req_id, err);

	check_test_completion(test_iosched);
}
static int ufs_test_run_parallel_read_and_write_test(
	struct test_iosched *test_iosched)
{
	struct test_scenario *read_data, *write_data;
	int i;
	bool changed_seed = false;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	read_data = get_scenario(test_iosched, SCEN_RANDOM_READ_50);
	write_data = get_scenario(test_iosched, SCEN_RANDOM_WRITE_50);

	/* allow randomness even if user forgot */
	if (utd->random_test_seed <= 0) {
		changed_seed = true;
		utd->random_test_seed = 1;
	}

	atomic_set(&utd->outstanding_threads, 0);
	utd->fail_threads = 0;
	init_completion(&utd->outstanding_complete);

	for (i = 0; i < (RANDOM_REQUEST_THREADS / 2); i++) {
		async_schedule(ufs_test_run_scenario, read_data);
		async_schedule(ufs_test_run_scenario, write_data);
		atomic_add(2, &utd->outstanding_threads);
	}

	if (!wait_for_completion_timeout(&utd->outstanding_complete,
				THREADS_COMPLETION_TIMOUT)) {
		pr_err("%s: Multi-thread test timed-out %d threads left",
			__func__, atomic_read(&utd->outstanding_threads));
	}
	check_test_completion(test_iosched);

	/* clear random seed if changed */
	if (changed_seed)
		utd->random_test_seed = 0;

	return 0;
}
static void scenario_free_end_io_fn(struct request *rq, int err)
{
	struct test_request *test_rq;
	struct test_data *ptd = test_get_test_data();

	BUG_ON(!rq);
	test_rq = (struct test_request *)rq->elv.priv[0];
	BUG_ON(!test_rq);

	spin_lock_irq(&ptd->lock);
	ptd->dispatched_count--;
	list_del_init(&test_rq->queuelist);
	__blk_put_request(ptd->req_q, test_rq->rq);
	spin_unlock_irq(&ptd->lock);

	kfree(test_rq->bios_buffer);
	kfree(test_rq);

	if (err)
		pr_err("%s: request %d completed, err=%d", __func__,
			test_rq->req_id, err);

	check_test_completion();
}
static int ufs_test_run_lun_depth_test(struct test_data *td)
{
	struct test_scenario *read_data, *write_data;
	struct scsi_device *sdev;
	bool changed_seed = false;
	int i = 0, num_req[LUN_DEPTH_TEST_SIZE];
	int lun_qdepth, nutrs, num_scenarios;

	BUG_ON(!td || !td->req_q || !td->req_q->queuedata);
	sdev = (struct scsi_device *)td->req_q->queuedata;
	lun_qdepth = sdev->max_queue_depth;
	nutrs = sdev->host->can_queue;

	/* allow randomness even if user forgot */
	if (utd->random_test_seed <= 0) {
		changed_seed = true;
		utd->random_test_seed = 1;
	}

	/* initialize the number of request for each iteration */
	num_req[i++] = ufs_test_pseudo_random_seed(
			&utd->random_test_seed, 1, lun_qdepth - 2);
	num_req[i++] = lun_qdepth - 1;
	num_req[i++] = lun_qdepth;
	num_req[i++] = lun_qdepth + 1;
	/* if (nutrs-lun_qdepth-2 <= 0), do not run this scenario */
	if (nutrs - lun_qdepth - 2 > 0)
		num_req[i++] = lun_qdepth + 1 + ufs_test_pseudo_random_seed(
			&utd->random_test_seed, 1, nutrs - lun_qdepth - 2);

	/* if nutrs == lun_qdepth, do not run these three scenarios */
	if (nutrs != lun_qdepth) {
		num_req[i++] = nutrs - 1;
		num_req[i++] = nutrs;
		num_req[i++] = nutrs + 1;
	}

	/* a random number up to 10, not to cause overflow or timeout */
	num_req[i++] = nutrs + 1 + ufs_test_pseudo_random_seed(
			&utd->random_test_seed, 1, 10);

	num_scenarios = i;
	utd->test_stage = UFS_TEST_LUN_DEPTH_TEST_RUNNING;
	utd->fail_threads = 0;
	read_data = get_scenario(td, SCEN_RANDOM_READ_32_NO_FLUSH);
	write_data = get_scenario(td, SCEN_RANDOM_WRITE_32_NO_FLUSH);

	for (i = 0; i < num_scenarios; i++) {
		int reqs = num_req[i];

		read_data->total_req = reqs;
		write_data->total_req = reqs;

		ufs_test_run_synchronous_scenario(read_data);
		ufs_test_run_synchronous_scenario(write_data);
	}

	utd->test_stage = UFS_TEST_LUN_DEPTH_DONE_ISSUING_REQ;
	check_test_completion();

	/* clear random seed if changed */
	if (changed_seed)
		utd->random_test_seed = 0;

	return 0;
}