static struct gendisk *ufs_test_get_rq_disk(void) { struct request_queue *req_q = test_iosched_get_req_queue(); struct scsi_device *sd; struct device *dev; struct scsi_disk *sdkp; struct gendisk *gd; if (!req_q) { test_pr_info("%s: Could not fetch request_queue", __func__); gd = NULL; goto exit; } sd = (struct scsi_device *)req_q->queuedata; dev = &sd->sdev_gendev; sdkp = scsi_disk_get_from_dev(dev); if (!sdkp) { test_pr_info("%s: Could not fatch scsi disk", __func__); gd = NULL; goto exit; } gd = sdkp->disk; exit: return gd; }
static int ufs_test_run_write_read_test(struct test_data *td) { int ret = 0; unsigned int start_sec; unsigned int num_bios; struct request_queue *q = td->req_q; start_sec = td->start_sector + sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios; if (utd->random_test_seed != 0) ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios); else num_bios = DEFAULT_NUM_OF_BIOS; /* Adding a write request */ test_pr_info( "%s: Adding a write request with %d bios to Q, req_id=%d" , __func__, num_bios, td->wr_rd_next_req_id); utd->write_completed = false; ret = test_iosched_add_wr_rd_test_req(0, WRITE, start_sec, num_bios, TEST_PATTERN_5A, ufs_test_write_read_test_end_io_fn); if (ret) { test_pr_err("%s: failed to add a write request", __func__); return ret; } /* waiting for the write request to finish */ blk_run_queue(q); wait_event(utd->wait_q, utd->write_completed); /* Adding a read request*/ test_pr_info("%s: Adding a read request to Q", __func__); ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, num_bios, TEST_PATTERN_5A, NULL); if (ret) { test_pr_err("%s: failed to add a read request", __func__); return ret; } blk_run_queue(q); return ret; }
static int ufs_test_write_read_test_open_cb(struct inode *inode, struct file *file) { file->private_data = inode->i_private; message_repeat = 1; test_pr_info("%s:UFS test initialized", __func__); return 0; }
static ssize_t long_sequential_write_test_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret = 0; int i = 0; int number = -1; unsigned long mtime, byte_count; test_pr_info("%s: -- UFS Long Sequential Write TEST --", __func__); sscanf(buf, "%d", &number); if (number <= 0) number = 1; memset(&utd->test_info, 0, sizeof(struct test_info)); utd->test_info.data = utd; utd->test_info.get_rq_disk_fn = ufs_test_get_rq_disk; utd->test_info.get_test_case_str_fn = ufs_test_get_test_case_str; utd->test_info.run_test_fn = run_long_seq_test; utd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE; for (i = 0 ; i < number ; ++i) { test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number); test_pr_info("%s: ====================", __func__); utd->test_info.test_byte_count = 0; ret = test_iosched_start_test(&utd->test_info); if (ret) break; mtime = ktime_to_ms(utd->test_info.test_duration); byte_count = utd->test_info.test_byte_count; long_seq_test_calc_throughput(mtime, byte_count); /* Allow FS requests to be dispatched */ msleep(1000); } return count; }
void long_seq_test_calc_throughput(unsigned long mtime, unsigned long byte_count) { unsigned long fraction, integer; test_pr_info("%s: time is %lu msec, size is %lu.%lu MiB", __func__, mtime, LONG_TEST_SIZE_INTEGER(byte_count), LONG_TEST_SIZE_FRACTION(byte_count)); mtime *= MB_MSEC_RATIO_APPROXIMATION; fraction = integer = (byte_count * 10) / mtime; integer /= 10; fraction -= integer * 10; test_pr_info("%s: Throughput: %lu.%lu MiB/sec\n", __func__, integer, fraction); }
static int run_long_seq_test(struct test_data *td) { int ret = 0; int direction; static unsigned int inserted_requests; BUG_ON(!td); td->test_count = 0; utd->completed_req_count = 0; inserted_requests = 0; if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_READ) direction = READ; else direction = WRITE; test_pr_info("%s: Adding %d requests, first req_id=%d", __func__, LONG_SEQ_TEST_NUM_REQS, td->wr_rd_next_req_id); do { /* * since our requests come from a pool containing 128 * requests, we don't want to exhaust this quantity, * therefore we add up to QUEUE_MAX_REQUESTS (which * includes a safety margin) and then call the mmc layer * to fetch them */ if (td->test_count >= QUEUE_MAX_REQUESTS) { blk_run_queue(td->req_q); continue; } ret = test_iosched_add_wr_rd_test_req(0, direction, td->start_sector, TEST_MAX_BIOS_PER_REQ, TEST_PATTERN_5A, long_seq_test_free_end_io_fn); if (ret) { test_pr_err("%s: failed to create request" , __func__); break; } inserted_requests++; td->test_info.test_byte_count += (TEST_MAX_BIOS_PER_REQ * sizeof(unsigned int) * BIO_U32_SIZE); } while (inserted_requests < LONG_SEQ_TEST_NUM_REQS); /* in this case the queue will not run in the above loop */ if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS) blk_run_queue(td->req_q); return ret; }
void long_seq_test_calc_throughput(unsigned long mtime, unsigned long byte_count) { unsigned long fraction, integer; test_pr_info("%s: time is %lu msec, size is %lu.%lu MiB", __func__, mtime, LONG_TEST_SIZE_INTEGER(byte_count), LONG_TEST_SIZE_FRACTION(byte_count)); /* we first multiply in order not to lose precision */ mtime *= MB_MSEC_RATIO_APPROXIMATION; /* divide values to get a MiB/sec integer value with one digit of precision */ fraction = integer = (byte_count * 10) / mtime; integer /= 10; /* and calculate the MiB value fraction */ fraction -= integer * 10; test_pr_info("%s: Throughput: %lu.%lu MiB/sec\n", __func__, integer, fraction); }
static void ufs_test_write_read_test_end_io_fn(struct request *rq, int err) { struct test_request *test_rq = (struct test_request *)rq->elv.priv[0]; BUG_ON(!test_rq); test_rq->req_completed = 1; test_rq->req_result = err; test_pr_info("%s: request %d completed, err=%d", __func__, test_rq->req_id, err); utd->write_completed = true; wake_up(&utd->wait_q); }
static ssize_t ufs_test_write_read_test_write_cb(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int ret = 0; int i; int number; sscanf(buf, "%d", &number); if (number <= 0) number = 1; test_pr_info("%s:the test will run for %d iterations.", __func__, number); memset(&utd->test_info, 0, sizeof(struct test_info)); /* Initializing test */ utd->test_info.data = utd; utd->test_info.get_test_case_str_fn = ufs_test_get_test_case_str; utd->test_info.testcase = UFS_TEST_WRITE_READ_TEST; utd->test_info.get_rq_disk_fn = ufs_test_get_rq_disk; utd->test_info.run_test_fn = ufs_test_run_write_read_test; /* Running the test multiple times */ for (i = 0; i < number; ++i) { ret = test_iosched_start_test(&utd->test_info); if (ret) { test_pr_err("%s: Test failed.", __func__); return ret; } } test_pr_info("%s: Completed all the ufs test iterations.", __func__); return count; }
static int run_long_seq_test(struct test_data *td) { int ret = 0; int direction; static unsigned int inserted_requests; BUG_ON(!td); td->test_count = 0; utd->completed_req_count = 0; inserted_requests = 0; if (td->test_info.testcase == TEST_LONG_SEQUENTIAL_READ) direction = READ; else direction = WRITE; test_pr_info("%s: Adding %d requests, first req_id=%d", __func__, LONG_SEQ_TEST_NUM_REQS, td->wr_rd_next_req_id); do { if (td->test_count >= QUEUE_MAX_REQUESTS) { blk_run_queue(td->req_q); continue; } ret = test_iosched_add_wr_rd_test_req(0, direction, td->start_sector, TEST_MAX_BIOS_PER_REQ, TEST_PATTERN_5A, long_seq_test_free_end_io_fn); if (ret) { test_pr_err("%s: failed to create request" , __func__); break; } inserted_requests++; td->test_info.test_byte_count += (TEST_MAX_BIOS_PER_REQ * sizeof(unsigned int) * BIO_U32_SIZE); } while (inserted_requests < LONG_SEQ_TEST_NUM_REQS); if (LONG_SEQ_TEST_NUM_REQS < QUEUE_MAX_REQUESTS) blk_run_queue(td->req_q); return ret; }