static int run_long_seq_test(struct test_data *td)
{
	int ret = 0;
	int direction;
	static unsigned int inserted_requests;

	BUG_ON(!td);
	td->test_count = 0;
	utd->completed_req_count = 0;
	inserted_requests = 0;

	if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_READ)
		direction = READ;
	else
		direction = WRITE;

	test_pr_info("%s: Adding %d requests, first req_id=%d",
		     __func__, LONG_SEQ_TEST_NUM_REQS,
		     td->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the mmc layer
		* to fetch them
		*/
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction,
			td->start_sector, TEST_MAX_BIOS_PER_REQ,
			TEST_PATTERN_5A,
			long_seq_test_free_end_io_fn);
		if (ret) {
			test_pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		td->test_info.test_byte_count +=
			(TEST_MAX_BIOS_PER_REQ * sizeof(unsigned int) *
			BIO_U32_SIZE);

	} while (inserted_requests < LONG_SEQ_TEST_NUM_REQS);

	/* in this case the queue will not run in the above loop */
	if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
static int ufs_test_run_write_read_test(struct test_data *td)
{
	int ret = 0;
	unsigned int start_sec;
	unsigned int num_bios;
	struct request_queue *q = td->req_q;


	start_sec = td->start_sector + sizeof(int) * BIO_U32_SIZE
			* td->num_of_write_bios;
	if (utd->random_test_seed != 0)
		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
	else
		num_bios = DEFAULT_NUM_OF_BIOS;

	/* Adding a write request */
	test_pr_info(
		"%s: Adding a write request with %d bios to Q, req_id=%d"
			, __func__, num_bios, td->wr_rd_next_req_id);

	utd->write_completed = false;
	ret = test_iosched_add_wr_rd_test_req(0, WRITE, start_sec,
					num_bios, TEST_PATTERN_5A,
					ufs_test_write_read_test_end_io_fn);

	if (ret) {
		test_pr_err("%s: failed to add a write request", __func__);
		return ret;
	}

	/* waiting for the write request to finish */
	blk_run_queue(q);
	wait_event(utd->wait_q, utd->write_completed);

	/* Adding a read request*/
	test_pr_info("%s: Adding a read request to Q", __func__);

	ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec,
			num_bios, TEST_PATTERN_5A, NULL);

	if (ret) {
		test_pr_err("%s: failed to add a read request", __func__);
		return ret;
	}

	blk_run_queue(q);
	return ret;
}
static int run_long_seq_test(struct test_data *td)
{
	int ret = 0;
	int direction;
	static unsigned int inserted_requests;

	BUG_ON(!td);
	td->test_count = 0;
	utd->completed_req_count = 0;
	inserted_requests = 0;

	if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_READ)
		direction = READ;
	else
		direction = WRITE;

	test_pr_info("%s: Adding %d requests, first req_id=%d",
		     __func__, LONG_SEQ_TEST_NUM_REQS,
		     td->wr_rd_next_req_id);

	do {
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction,
			td->start_sector, TEST_MAX_BIOS_PER_REQ,
			TEST_PATTERN_5A,
			long_seq_test_free_end_io_fn);
		if (ret) {
			test_pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		td->test_info.test_byte_count +=
			(TEST_MAX_BIOS_PER_REQ * sizeof(unsigned int) *
			BIO_U32_SIZE);

	} while (inserted_requests < LONG_SEQ_TEST_NUM_REQS);

	
	if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
Esempio n. 4
0
/******************** Static helper functions ***********************/
static void kick_queue(struct work_struct *work)
{
	struct idling_data *read_data =
		container_of(work, struct idling_data, idle_work);
	struct row_data *rd =
		container_of(read_data, struct row_data, rd_idle_data);

	blk_run_queue(rd->dispatch_queue);
}
static void ufs_test_run_scenario(void *data, async_cookie_t cookie)
{
	struct test_scenario *ts = (struct test_scenario *)data;
	int ret = 0, i, start_sec;

	BUG_ON(!ts);
	start_sec = ts->td->start_sector;

	for (i = 0; i < ts->total_req; i++) {
		int num_bios = DEFAULT_NUM_OF_BIOS;
		int direction;

		if (ufs_test_toggle_direction(ts->toggle_direction, i))
			direction = (ts->direction == WRITE) ? READ : WRITE;
		else
			direction = ts->direction;

		/* use randomly generated requests */
		if (ts->rnd_req && utd->random_test_seed != 0)
			pseudo_rnd_sector_and_size(&utd->random_test_seed,
				ts->td->start_sector, &start_sec, &num_bios);

		ret = test_iosched_add_wr_rd_test_req(0, direction, start_sec,
					num_bios, TEST_PATTERN_5A,
					scenario_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}

		/*
		 * We want to run the queue every run_q requests, or,
		 * when the requests pool is exhausted
		 */

		if (ts->td->dispatched_count >= QUEUE_MAX_REQUESTS ||
				(ts->run_q && !(i % ts->run_q)))
			blk_run_queue(ts->td->req_q);
	}

	blk_run_queue(ts->td->req_q);
	ufs_test_thread_complete(ret);
}
Esempio n. 6
0
static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request_queue *q = drive->queue;
	struct request *rq;
	int rc;

	timeout += jiffies;
	spin_lock_irq(&hwif->lock);
	if (drive->dev_flags & IDE_DFLAG_PARKED) {
		int reset_timer = time_before(timeout, drive->sleep);
		int start_queue = 0;

		drive->sleep = timeout;
		wake_up_all(&ide_park_wq);
		if (reset_timer && del_timer(&hwif->timer))
			start_queue = 1;
		spin_unlock_irq(&hwif->lock);

		if (start_queue)
			blk_run_queue(q);
		return;
	}
	spin_unlock_irq(&hwif->lock);

	rq = blk_get_request(q, READ, __GFP_WAIT);
	rq->cmd[0] = REQ_PARK_HEADS;
	rq->cmd_len = 1;
	rq->cmd_type = REQ_TYPE_SPECIAL;
	rq->special = &timeout;
	rc = blk_execute_rq(q, NULL, rq, 1);
	blk_put_request(rq);
	if (rc)
		goto out;

	/*
                                                                
                                                               
  */
	rq = blk_get_request(q, READ, GFP_NOWAIT);
	if (unlikely(!rq))
		goto out;

	rq->cmd[0] = REQ_UNPARK_HEADS;
	rq->cmd_len = 1;
	rq->cmd_type = REQ_TYPE_SPECIAL;
	elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);

out:
	return;
}
Esempio n. 7
0
static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request_queue *q = drive->queue;
	struct request *rq;
	int rc;

	timeout += jiffies;
	spin_lock_irq(&hwif->lock);
	if (drive->dev_flags & IDE_DFLAG_PARKED) {
		int reset_timer = time_before(timeout, drive->sleep);
		int start_queue = 0;

		drive->sleep = timeout;
		wake_up_all(&ide_park_wq);
		if (reset_timer && del_timer(&hwif->timer))
			start_queue = 1;
		spin_unlock_irq(&hwif->lock);

		if (start_queue)
			blk_run_queue(q);
		return;
	}
	spin_unlock_irq(&hwif->lock);

	rq = blk_get_request(q, READ, __GFP_WAIT);
	rq->cmd[0] = REQ_PARK_HEADS;
	rq->cmd_len = 1;
	rq->cmd_type = REQ_TYPE_DRV_PRIV;
	rq->special = &timeout;
	rc = blk_execute_rq(q, NULL, rq, 1);
	blk_put_request(rq);
	if (rc)
		goto out;

	/*
	 * Make sure that *some* command is sent to the drive after the
	 * timeout has expired, so power management will be reenabled.
	 */
	rq = blk_get_request(q, READ, GFP_NOWAIT);
	if (IS_ERR(rq))
		goto out;

	rq->cmd[0] = REQ_UNPARK_HEADS;
	rq->cmd_len = 1;
	rq->cmd_type = REQ_TYPE_DRV_PRIV;
	elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);

out:
	return;
}
/**
 * run_long_seq_test - main function for long sequential test
 * @td - test specific data
 *
 * This function is used to fill up (and keep full) the test queue with
 * requests. There are two scenarios this function works with:
 * 1. Only read/write (STAGE_1 or no stage)
 * 2. Simultaneous read and write to the same LBAs (STAGE_2)
 */
static int run_long_seq_test(struct test_data *td)
{
	int ret = 0;
	int direction;
	static unsigned int inserted_requests;
	u32 sector;

	BUG_ON(!td);
	sector = td->start_sector;
	if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
		td->test_count = 0;
		utd->completed_req_count = 0;
		inserted_requests = 0;
	}

	/* Set the direction */
	switch (td->test_info.testcase) {
	case UFS_TEST_LONG_SEQUENTIAL_READ:
		direction = READ;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
	default:
		direction = WRITE;
	}
	pr_info("%s: Adding %d requests, first req_id=%d", __func__,
		     LONG_SEQ_TEST_NUM_REQS, td->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the block layer
		* to fetch them
		*/
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction, sector,
				TEST_MAX_BIOS_PER_REQ, TEST_PATTERN_5A,
				long_seq_test_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
			ret = test_iosched_add_wr_rd_test_req(0, READ, sector,
					TEST_MAX_BIOS_PER_REQ, TEST_PATTERN_5A,
					long_seq_test_free_end_io_fn);
			if (ret) {
				pr_err("%s: failed to create request" ,
						__func__);
				break;
			}
			inserted_requests++;
		}
		/* NUM_OF_BLOCK * (BLOCK_SIZE / SECTOR_SIZE) */
		sector += TEST_MAX_BIOS_PER_REQ * (PAGE_SIZE /
				td->req_q->limits.logical_block_size);

	} while (inserted_requests < LONG_SEQ_TEST_NUM_REQS);

	/* in this case the queue will not run in the above loop */
	if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
/**
 * run_long_test - main function for long sequential test
 * @td - test specific data
 *
 * This function is used to fill up (and keep full) the test queue with
 * requests. There are two scenarios this function works with:
 * 1. Only read/write (STAGE_1 or no stage)
 * 2. Simultaneous read and write to the same LBAs (STAGE_2)
 */
static int run_long_test(struct test_data *td)
{
	int ret = 0;
	int direction, long_test_num_requests, num_bios_per_request;
	static unsigned int inserted_requests;
	u32 sector, seed, num_bios, seq_sector_delta;

	BUG_ON(!td);
	sector = td->start_sector;
	if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
		td->test_count = 0;
		utd->completed_req_count = 0;
		inserted_requests = 0;
	}

	/* Set test parameters */
	switch (td->test_info.testcase) {
	case  UFS_TEST_LONG_RANDOM_READ:
		num_bios_per_request = 1;
		long_test_num_requests = LONG_RAND_TEST_NUM_REQS;
		direction = READ;
		break;
	case  UFS_TEST_LONG_RANDOM_WRITE:
		num_bios_per_request = 1;
		long_test_num_requests = LONG_RAND_TEST_NUM_REQS;
		direction = WRITE;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_READ:
		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
		long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
		direction = READ;
		break;
	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
		long_test_num_requests = LONG_SEQ_TEST_NUM_REQS;
	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
	default:
		direction = WRITE;
	}

	seq_sector_delta = num_bios_per_request * (TEST_BIO_SIZE / SECTOR_SIZE);

	seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED;

	pr_info("%s: Adding %d requests, first req_id=%d", __func__,
		     long_test_num_requests, td->wr_rd_next_req_id);

	do {
		/*
		* since our requests come from a pool containing 128
		* requests, we don't want to exhaust this quantity,
		* therefore we add up to QUEUE_MAX_REQUESTS (which
		* includes a safety margin) and then call the block layer
		* to fetch them
		*/
		if (td->test_count >= QUEUE_MAX_REQUESTS) {
			blk_run_queue(td->req_q);
			continue;
		}

		switch (td->test_info.testcase) {
		case UFS_TEST_LONG_SEQUENTIAL_READ:
		case UFS_TEST_LONG_SEQUENTIAL_WRITE:
		case UFS_TEST_LONG_SEQUENTIAL_MIXED:
			/* don't need to increment on the first iteration */
			if (inserted_requests)
				sector += seq_sector_delta;
			break;
		case  UFS_TEST_LONG_RANDOM_READ:
		case  UFS_TEST_LONG_RANDOM_WRITE:
			pseudo_rnd_sector_and_size(&seed, td->start_sector,
						   &sector, &num_bios);
		default:
			break;
		}

		ret = test_iosched_add_wr_rd_test_req(0, direction, sector,
				num_bios_per_request, TEST_PATTERN_5A,
				long_test_free_end_io_fn);
		if (ret) {
			pr_err("%s: failed to create request" , __func__);
			break;
		}
		inserted_requests++;
		if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
			ret = test_iosched_add_wr_rd_test_req(0, READ, sector,
					num_bios_per_request, TEST_PATTERN_5A,
					long_test_free_end_io_fn);
			if (ret) {
				pr_err("%s: failed to create request" ,
						__func__);
				break;
			}
			inserted_requests++;
		}

	} while (inserted_requests < long_test_num_requests);

	/* in this case the queue will not run in the above loop */
	if (long_test_num_requests < QUEUE_MAX_REQUESTS)
		blk_run_queue(td->req_q);

	return ret;
}
Esempio n. 10
0
static int ufs_test_run_data_integrity_test(struct test_data *td)
{
	int ret = 0;
	int i, j;
	unsigned int start_sec, num_bios, retries = NUM_UNLUCKY_RETRIES;
	struct request_queue *q = td->req_q;
	int sectors[QUEUE_MAX_REQUESTS] = {0};

	start_sec = td->start_sector;
	utd->queue_complete = false;

	if (utd->random_test_seed != 0) {
		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
	} else {
		num_bios = DEFAULT_NUM_OF_BIOS;
		utd->random_test_seed = MAGIC_SEED;
	}

	/* Adding write requests */
	pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
		     QUEUE_MAX_REQUESTS, td->wr_rd_next_req_id);

	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
		/* make sure that we didn't draw the same start_sector twice */
		while (retries--) {
			pseudo_rnd_sector_and_size(&utd->random_test_seed,
				td->start_sector, &start_sec, &num_bios);
			sectors[i] = start_sec;
			for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++)
				/* just increment j */;
			if (j == i)
				break;
		}
		if (!retries) {
			pr_err("%s: too many unlucky start_sector draw retries",
			       __func__);
			ret = -EINVAL;
			return ret;
		}
		retries = NUM_UNLUCKY_RETRIES;

		ret = test_iosched_add_wr_rd_test_req(0, WRITE, start_sec, 1, i,
						      long_test_free_end_io_fn);

		if (ret) {
			pr_err("%s: failed to add a write request", __func__);
			return ret;
		}
	}

	/* waiting for the write request to finish */
	blk_run_queue(q);
	wait_event(utd->wait_q, utd->queue_complete);

	/* Adding read requests */
	pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
		     QUEUE_MAX_REQUESTS, td->wr_rd_next_req_id);

	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
		ret = test_iosched_add_wr_rd_test_req(0, READ, sectors[i],
				1, i, long_test_free_end_io_fn);

		if (ret) {
			pr_err("%s: failed to add a read request", __func__);
			return ret;
		}
	}

	blk_run_queue(q);

	return ret;
}