コード例 #1
0
static void ufs_test_run_scenario(void *data, async_cookie_t cookie)
{
	struct test_scenario *ts = (struct test_scenario *)data;
	struct test_iosched *test_iosched = ts->test_iosched;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
	int start_sec;
	int i;
	int ret = 0;

	BUG_ON(!ts);
	start_sec = ts->test_iosched->start_sector;

	for (i = 0; i < ts->total_req; i++) {
		int num_bios = DEFAULT_NUM_OF_BIOS;
		int direction;

		if (ufs_test_toggle_direction(ts->toggle_direction, i))
			direction = (ts->direction == WRITE) ? READ : WRITE;
		else
			direction = ts->direction;

		/* use randomly generated requests */
		if (ts->rnd_req && utd->random_test_seed != 0)
			pseudo_rnd_sector_and_size(&utd->random_test_seed,
				ts->test_iosched->start_sector, &start_sec,
				&num_bios);

		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
			direction, start_sec, num_bios, TEST_PATTERN_5A,
			scenario_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}

		/*
		 * We want to run the queue every run_q requests, or,
		 * when the requests pool is exhausted
		 */

		if (test_iosched->dispatched_count >= QUEUE_MAX_REQUESTS ||
				(ts->run_q && !(i % ts->run_q)))
			blk_post_runtime_resume(test_iosched->req_q, 0);
	}

	blk_post_runtime_resume(test_iosched->req_q, 0);
	ufs_test_thread_complete(utd, ret);
}
コード例 #2
0
static int ufs_test_run_write_read_test(struct test_iosched *test_iosched)
{
	int ret = 0;
	unsigned int start_sec;
	unsigned int num_bios;
	struct request_queue *q = test_iosched->req_q;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	start_sec = test_iosched->start_sector + sizeof(int) * BIO_U32_SIZE
			* test_iosched->num_of_write_bios;
	if (utd->random_test_seed != 0)
		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
	else
		num_bios = DEFAULT_NUM_OF_BIOS;

	/* Adding a write request */
	pr_info("%s: Adding a write request with %d bios to Q, req_id=%d",
		__func__, num_bios, test_iosched->wr_rd_next_req_id);

	utd->queue_complete = false;
	ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE, start_sec,
		num_bios, TEST_PATTERN_5A, NULL);
	if (ret) {
		pr_err("%s: failed to add a write request", __func__);
		return ret;
	}

	/* waiting for the write request to finish */
	blk_post_runtime_resume(q, 0);
	wait_event(utd->wait_q, utd->queue_complete);

	/* Adding a read request*/
	pr_info("%s: Adding a read request to Q", __func__);

	ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ, start_sec,
			num_bios, TEST_PATTERN_5A, NULL);
	if (ret) {
		pr_err("%s: failed to add a read request", __func__);
		return ret;
	}

	blk_post_runtime_resume(q, 0);
	return ret;
}
コード例 #3
0
ファイル: scsi_pm.c プロジェクト: Niisp/MT6795.kernel
static int sdev_blk_runtime_resume(struct scsi_device *sdev,
                                   int (*cb)(struct device *))
{
    int err = 0;

    blk_pre_runtime_resume(sdev->request_queue);
    if (cb)
        err = cb(&sdev->sdev_gendev);
    blk_post_runtime_resume(sdev->request_queue, err);

    return err;
}
コード例 #4
0
ファイル: scsi_pm.c プロジェクト: 020gzh/linux
static int sdev_runtime_resume(struct device *dev)
{
	struct scsi_device *sdev = to_scsi_device(dev);
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
	int err = 0;

	blk_pre_runtime_resume(sdev->request_queue);
	if (pm && pm->runtime_resume)
		err = pm->runtime_resume(dev);
	blk_post_runtime_resume(sdev->request_queue, err);

	return err;
}
コード例 #5
0
/**
 * run_long_test - main function for long sequential test
 * @td - test specific data
 *
 * This function is used to fill up (and keep full) the test queue with
 * requests. There are two scenarios this function works with:
 * 1. Only read/write (STAGE_1 or no stage)
 * 2. Simultaneous read and write to the same LBAs (STAGE_2)
 */
static int run_long_test(struct test_iosched *test_iosched)
{
	int ret = 0;
	int direction, long_test_num_requests, num_bios_per_request;
	static unsigned int inserted_requests;
	u32 sector, seed, num_bios, seq_sector_delta;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	BUG_ON(!test_iosched);
	sector = test_iosched->start_sector;
	if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
		test_iosched->test_count = 0;
		utd->completed_req_count = 0;
		inserted_requests = 0;
	}

	/* Set test parameters */
	switch (test_iosched->test_info.testcase) {
	case  UFS_TEST_LONG_RANDOM_READ:
		num_bios_per_request = 1;
		long_test_num_requests = LONG_RAND_TEST_NUM_REQS;
		direction = READ;
		break;
	case  UFS_TEST_LONG_RANDOM_WRITE:
		num_bios_per_request = 1;
		long_test_num_requests = LONG_RAND_TEST_NUM_REQS;
		direction = WRITE;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_READ:
		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
		long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
		direction = READ;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
		long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
	default:
		direction = WRITE;
	}

	seq_sector_delta = num_bios_per_request * (TEST_BIO_SIZE / SECTOR_SIZE);

	seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED;

	pr_info("%s: Adding %d requests, first req_id=%d", __func__,
		     long_test_num_requests, test_iosched->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the block layer
		* to fetch them
		*/
		if (test_iosched->test_count >= QUEUE_MAX_REQUESTS) {
			blk_post_runtime_resume(test_iosched->req_q, 0);
			continue;
		}

		switch (test_iosched->test_info.testcase) {
		case UFS_TEST_LONG_SEQUENTIAL_READ:
		case UFS_TEST_LONG_SEQUENTIAL_WRITE:
		case UFS_TEST_LONG_SEQUENTIAL_MIXED:
			/* don't need to increment on the first iteration */
			if (inserted_requests)
				sector += seq_sector_delta;
			break;
		case  UFS_TEST_LONG_RANDOM_READ:
		case  UFS_TEST_LONG_RANDOM_WRITE:
			pseudo_rnd_sector_and_size(&seed,
				test_iosched->start_sector, &sector, &num_bios);
		default:
			break;
		}

		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
			direction, sector, num_bios_per_request,
			TEST_PATTERN_5A, long_test_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
			ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
				READ, sector, num_bios_per_request,
				TEST_PATTERN_5A, long_test_free_end_io_fn);
			if (ret) {
				pr_err("%s: failed to create request" ,
						__func__);
				break;
			}
			inserted_requests++;
		}

	} while (inserted_requests < long_test_num_requests);

	/* in this case the queue will not run in the above loop */
	if (long_test_num_requests < QUEUE_MAX_REQUESTS)
		blk_post_runtime_resume(test_iosched->req_q, 0);

	return ret;
}
コード例 #6
0
static int ufs_test_run_data_integrity_test(struct test_iosched *test_iosched)
{
	int ret = 0;
	int i, j;
	unsigned int start_sec, num_bios, retries = NUM_UNLUCKY_RETRIES;
	struct request_queue *q = test_iosched->req_q;
	int sectors[QUEUE_MAX_REQUESTS] = {0};
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	start_sec = test_iosched->start_sector;
	utd->queue_complete = false;

	if (utd->random_test_seed != 0) {
		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
	} else {
		num_bios = DEFAULT_NUM_OF_BIOS;
		utd->random_test_seed = MAGIC_SEED;
	}

	/* Adding write requests */
	pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
		     QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);

	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
		/* make sure that we didn't draw the same start_sector twice */
		while (retries--) {
			pseudo_rnd_sector_and_size(&utd->random_test_seed,
				test_iosched->start_sector, &start_sec,
				&num_bios);
			sectors[i] = start_sec;
			for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++)
				/* just increment j */;
			if (j == i)
				break;
		}
		if (!retries) {
			pr_err("%s: too many unlucky start_sector draw retries",
			       __func__);
			ret = -EINVAL;
			return ret;
		}
		retries = NUM_UNLUCKY_RETRIES;

		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE,
			start_sec, 1, i, long_test_free_end_io_fn);

		if (ret) {
			pr_err("%s: failed to add a write request", __func__);
			return ret;
		}
	}

	/* waiting for the write request to finish */
	blk_post_runtime_resume(q, 0);
	wait_event(utd->wait_q, utd->queue_complete);

	/* Adding read requests */
	pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
		     QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);

	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ,
			sectors[i], 1, i, long_test_free_end_io_fn);

		if (ret) {
			pr_err("%s: failed to add a read request", __func__);
			return ret;
		}
	}

	blk_post_runtime_resume(q, 0);
	return ret;
}