예제 #1
0
void
split_test(void)
{
	struct nvme_namespace	ns;
	struct nvme_controller	ctrlr;
	void			*payload;
	uint64_t		lba, cmd_lba;
	uint32_t		lba_count, cmd_lba_count;
	int			rc;

	prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
	payload = malloc(512);
	lba = 0;
	lba_count = 1;

	rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL);

	CU_ASSERT(rc == 0);
	CU_ASSERT_FATAL(g_request != NULL);

	CU_ASSERT(g_request->num_children == 0);
	nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(cmd_lba == lba);
	CU_ASSERT(cmd_lba_count == lba_count);

	free(payload);
	nvme_free_request(g_request);
}
예제 #2
0
void
test_nvme_ns_cmd_deallocate(void)
{
	struct nvme_namespace	ns;
	struct nvme_controller	ctrlr;
	nvme_cb_fn_t		cb_fn = NULL;
	void			*cb_arg = NULL;
	uint8_t			num_ranges = 1;
	void			*payload = NULL;
	int			rc = 0;

	prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
	payload = malloc(num_ranges * sizeof(struct nvme_dsm_range));

	nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
	CU_ASSERT(g_request->cmd.opc == NVME_OPC_DATASET_MANAGEMENT);
	CU_ASSERT(g_request->cmd.nsid == ns.id);
	CU_ASSERT(g_request->cmd.cdw10 == num_ranges - 1);
	CU_ASSERT(g_request->cmd.cdw11 == NVME_DSM_ATTR_DEALLOCATE);
	free(payload);
	nvme_free_request(g_request);

	payload = NULL;
	num_ranges = 0;
	rc = nvme_ns_cmd_deallocate(&ns, payload, num_ranges, cb_fn, cb_arg);
	CU_ASSERT(rc != 0);
}
예제 #3
0
static struct nvme_request *
_nvme_ns_cmd_split_request(struct spdk_nvme_ns *ns,
			   const struct nvme_payload *payload,
			   uint64_t lba, uint32_t lba_count,
			   spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
			   uint32_t io_flags, struct nvme_request *req,
			   uint32_t sectors_per_max_io, uint32_t sector_mask)
{
	uint32_t		sector_size = ns->sector_size;
	uint32_t		remaining_lba_count = lba_count;
	uint32_t		offset = 0;
	struct nvme_request	*child;

	while (remaining_lba_count > 0) {
		lba_count = sectors_per_max_io - (lba & sector_mask);
		lba_count = nvme_min(remaining_lba_count, lba_count);

		child = _nvme_ns_cmd_rw(ns, payload, lba, lba_count, cb_fn,
					cb_arg, opc, io_flags);
		if (child == NULL) {
			nvme_free_request(req);
			return NULL;
		}
		child->payload_offset = offset;
		nvme_request_add_child(req, child);
		remaining_lba_count -= lba_count;
		lba += lba_count;
		offset += lba_count * sector_size;
	}

	return req;
}
예제 #4
0
void
split_test3(void)
{
	struct nvme_namespace	ns;
	struct nvme_controller	ctrlr;
	struct nvme_request	*child;
	void			*payload;
	uint64_t		lba, cmd_lba;
	uint32_t		lba_count, cmd_lba_count;
	int			rc;

	/*
	 * Controller has max xfer of 128 KB (256 blocks).
	 * Submit an I/O of 256 KB starting at LBA 10, which should be split
	 * into two I/Os:
	 *  1) LBA = 10, count = 256 blocks
	 *  2) LBA = 266, count = 256 blocks
	 */

	prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
	payload = malloc(256 * 1024);
	lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
	lba_count = (256 * 1024) / 512;

	rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL);

	CU_ASSERT(rc == 0);
	CU_ASSERT_FATAL(g_request != NULL);

	CU_ASSERT_FATAL(g_request->num_children == 2);

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == 128 * 1024);
	CU_ASSERT(cmd_lba == 10);
	CU_ASSERT(cmd_lba_count == 256);

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == 128 * 1024);
	CU_ASSERT(cmd_lba == 266);
	CU_ASSERT(cmd_lba_count == 256);

	CU_ASSERT(TAILQ_EMPTY(&g_request->children));

	free(payload);
	nvme_free_request(g_request);
}
예제 #5
0
void
split_test2(void)
{
	struct nvme_namespace	ns;
	struct nvme_controller	ctrlr;
	struct nvme_request	*child;
	void			*payload;
	uint64_t		lba, cmd_lba;
	uint32_t		lba_count, cmd_lba_count;
	int			rc;

	/*
	 * Controller has max xfer of 128 KB (256 blocks).
	 * Submit an I/O of 256 KB starting at LBA 0, which should be split
	 * on the max I/O boundary into two I/Os of 128 KB.
	 */

	prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);
	payload = malloc(256 * 1024);
	lba = 0;
	lba_count = (256 * 1024) / 512;

	rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL);

	CU_ASSERT(rc == 0);
	CU_ASSERT_FATAL(g_request != NULL);

	CU_ASSERT(g_request->num_children == 2);

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == 128 * 1024);
	CU_ASSERT(cmd_lba == 0);
	CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == 128 * 1024);
	CU_ASSERT(cmd_lba == 256);
	CU_ASSERT(cmd_lba_count == 256);

	CU_ASSERT(TAILQ_EMPTY(&g_request->children));

	free(payload);
	nvme_free_request(g_request);
}
예제 #6
0
void
test_nvme_ns_cmd_flush(void)
{
	struct nvme_namespace	ns;
	struct nvme_controller	ctrlr;
	nvme_cb_fn_t		cb_fn = NULL;
	void			*cb_arg = NULL;

	prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 0);

	nvme_ns_cmd_flush(&ns, cb_fn, cb_arg);
	CU_ASSERT(g_request->cmd.opc == NVME_OPC_FLUSH);
	CU_ASSERT(g_request->cmd.nsid == ns.id);

	nvme_free_request(g_request);
}
예제 #7
0
파일: nvme_qpair_ut.c 프로젝트: spdk/spdk
static void
test3(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct spdk_nvme_ctrlr		ctrlr = {};

	prepare_submit_request_test(&qpair, &ctrlr);

	req = nvme_allocate_request_null(expected_success_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	nvme_free_request(req);

	cleanup_submit_request_test(&qpair);
}
예제 #8
0
static void
nvme_cb_complete_child(void *child_arg, const struct spdk_nvme_cpl *cpl)
{
	struct nvme_request *child = child_arg;
	struct nvme_request *parent = child->parent;

	parent->num_children--;
	TAILQ_REMOVE(&parent->children, child, child_tailq);

	if (spdk_nvme_cpl_is_error(cpl)) {
		memcpy(&parent->parent_status, cpl, sizeof(*cpl));
	}

	if (parent->num_children == 0) {
		if (parent->cb_fn) {
			parent->cb_fn(parent->cb_arg, &parent->parent_status);
		}
		nvme_free_request(parent);
	}
}
예제 #9
0
void
test3(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(NULL, 0, expected_success_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 1);

	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
예제 #10
0
void
split_test4(void)
{
	struct nvme_namespace	ns;
	struct nvme_controller	ctrlr;
	struct nvme_request	*child;
	void			*payload;
	uint64_t		lba, cmd_lba;
	uint32_t		lba_count, cmd_lba_count;
	int			rc;

	/*
	 * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
	 * (Same as split_test3 except with driver-assisted striping enabled.)
	 * Submit an I/O of 256 KB starting at LBA 10, which should be split
	 * into three I/Os:
	 *  1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
	 *  2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
	 *  3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
	 */

	prepare_for_test(&ns, &ctrlr, 512, 128 * 1024, 128 * 1024);
	payload = malloc(256 * 1024);
	lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
	lba_count = (256 * 1024) / 512;

	rc = nvme_ns_cmd_read(&ns, payload, lba, lba_count, NULL, NULL);

	CU_ASSERT(rc == 0);
	CU_ASSERT_FATAL(g_request != NULL);

	CU_ASSERT_FATAL(g_request->num_children == 3);

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == (256 - 10) * 512);
	CU_ASSERT(cmd_lba == 10);
	CU_ASSERT(cmd_lba_count == 256 - 10);

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == 128 * 1024);
	CU_ASSERT(cmd_lba == 256);
	CU_ASSERT(cmd_lba_count == 256);

	child = TAILQ_FIRST(&g_request->children);
	TAILQ_REMOVE(&g_request->children, child, child_tailq);
	nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
	CU_ASSERT(child->num_children == 0);
	CU_ASSERT(child->payload_size == 10 * 512);
	CU_ASSERT(cmd_lba == 512);
	CU_ASSERT(cmd_lba_count == 10);

	CU_ASSERT(TAILQ_EMPTY(&g_request->children));

	free(payload);
	nvme_free_request(g_request);
}
예제 #11
0
static void
process_comp_queue (struct nvme_host *host,
		    u16 comp_queue_id,
		    struct nvme_queue_info *h_comp_queue_info,
		    struct nvme_queue_info *g_comp_queue_info)
{
	struct nvme_request_hub *hub;
	hub = host->h_queue.request_hub[comp_queue_id];

	u16 h_cur_head = h_comp_queue_info->cur_pos.head;
	u16 g_cur_head = g_comp_queue_info->cur_pos.head;

	struct nvme_comp first_h_comp = {0}, *first_g_comp = NULL;

	struct nvme_comp *h_comp, *g_comp;
	for (h_comp = nvme_comp_queue_at_idx (h_comp_queue_info, h_cur_head),
	     g_comp = nvme_comp_queue_at_idx (g_comp_queue_info, g_cur_head);
	     NVME_COMP_GET_PHASE (h_comp) == h_comp_queue_info->phase;
	     h_comp = nvme_comp_queue_at_idx (h_comp_queue_info, h_cur_head),
	     g_comp = nvme_comp_queue_at_idx (g_comp_queue_info, g_cur_head)) {

		/* This queue ID is submission queue ID */
		u16 subm_queue_id = h_comp->queue_id;

		struct nvme_request *req;
		req = get_request (host, hub, subm_queue_id, h_comp->cmd_id);

		ASSERT (req);

		u64 time_taken = get_time () - req->submit_time;
		if (time_taken > NVME_TIME_TAKEN_WATERMARK) {
			printf ("Long time controller response: %llu\n",
				time_taken);
			printf ("Submission Queue ID: %u opcode: %u\n",
				subm_queue_id, req->cmd.std.opcode);
		}

		if (subm_queue_id == 0)
			process_admin_comp (host, h_comp, req);
		else
			process_io_comp (host, h_comp, req);

		h_cur_head++;

		if (h_cur_head >= h_comp_queue_info->n_entries) {
			h_comp_queue_info->phase ^= 1;
			h_cur_head = 0;
		}

		if (!req->is_h_req) {
			struct nvme_comp comp = *h_comp;
			comp.cmd_id = req->orig_cmd_id;
			comp.status &= ~0x1;
			comp.status |= g_comp_queue_info->phase;

			/*
			 * Replace with the host value instead of the
			 * value reported by the controller. This is necessary
			 * if we mix guest commands and host commands to share
			 * queues.
			 */
			comp.queue_head = g_subm_cur_tail (host,
							   subm_queue_id);

			if (first_g_comp) {
				*g_comp = comp;
			} else {
				/* Copy the first completion entry later */
				first_g_comp = g_comp;
				first_h_comp = comp;
			}

			g_cur_head++;
			if (g_cur_head >= g_comp_queue_info->n_entries) {
				g_comp_queue_info->phase ^= 1;
				g_cur_head = 0;
			}

			spinlock_lock (&hub->lock);
			g_comp_queue_info->cur_pos.head = g_cur_head;
			h_comp_queue_info->cur_pos.head = h_cur_head;
			spinlock_unlock (&hub->lock);
		} else {
			spinlock_lock (&hub->lock);
			nvme_write_comp_db (host, comp_queue_id, h_cur_head);
			hub->n_not_ack_h_reqs--;
			h_comp_queue_info->cur_pos.head = h_cur_head;
			spinlock_unlock (&hub->lock);
		}

		nvme_free_request (hub, req);
	}

	if (first_g_comp) {
		first_g_comp->cmd_specific = first_h_comp.cmd_specific;
		first_g_comp->rsvd = first_h_comp.rsvd;
		first_g_comp->queue_head = first_h_comp.queue_head;
		first_g_comp->queue_id = first_h_comp.queue_id;
		first_g_comp->cmd_id = first_h_comp.cmd_id;
		/*
		 * Make sure everything are stored in the memory properly
		 * before we copy the status field. This is to avoid
		 * data corruption.
		 */
		cpu_sfence ();
		first_g_comp->status = first_h_comp.status;
	}
}
예제 #12
0
파일: nvme_qpair_ut.c 프로젝트: spdk/spdk
static void
test_hw_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	req->payload_offset = 0;
	ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;

	nvme_qpair_submit_request(&qpair, req);

	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	CU_ASSERT(sgl_tr != NULL);
	CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
	CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
	CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
	CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 2023 | 0;
	req->payload_offset = 0;
	ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;

	nvme_qpair_submit_request(&qpair, req);

	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	CU_ASSERT(sgl_tr != NULL);
	for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
		CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
		CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
		CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
		CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
	}
	CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
	TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
예제 #13
0
파일: nvme_qpair_ut.c 프로젝트: spdk/spdk
static void
test_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	req->payload_offset = 1;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	spdk_nvme_retry_count = 1;
	fail_next_sge = true;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	fail_next_sge = false;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 15 | 0;
	req->payload_offset = 2;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 4095 | 0;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
	CU_ASSERT(qpair.sq_tail == 1);
	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	if (sgl_tr != NULL) {
		for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
			CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
		}

		TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	}
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
예제 #14
0
static void
test_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct spdk_nvme_registers	regs = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	req->payload_offset = 1;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(req->cmd.psdt == SPDK_NVME_PSDT_PRP);
	CU_ASSERT(req->cmd.dptr.prp.prp1 == 7);
	CU_ASSERT(req->cmd.dptr.prp.prp2 == 4096);

	sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
	LIST_REMOVE(sgl_tr, list);
	free(sgl_tr);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	spdk_nvme_retry_count = 1;
	fail_next_sge = true;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	fail_next_sge = false;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	req->payload_offset = 2;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, 33 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
	CU_ASSERT(qpair.sq_tail == 1);
	sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
	if (sgl_tr != NULL) {
		for (i = 0; i < 32; i++) {
			CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
		}

		LIST_REMOVE(sgl_tr, list);
		free(sgl_tr);
	}
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}