Example #1
0
void
test4(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};
	char			payload[4096];

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	/* Force vtophys to return a failure.  This should
	 *  result in the nvme_qpair manually failing
	 *  the request with error status to signify
	 *  a bad payload buffer.
	 */
	fail_vtophys = true;
	outbuf[0] = '\0';

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 0);
	/* Assert that command/completion data was printed to log. */
	CU_ASSERT(strlen(outbuf) > 0);

	cleanup_submit_request_test(&qpair);
}
Example #2
0
void
nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
			    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn,
			    void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_CREATE_IO_CQ;

	/*
	 * TODO: create a create io completion queue command data
	 *  structure.
	 */
	cmd->cdw10 = ((io_que->num_entries - 1) << 16) | io_que->id;
	/*
	 * 0x2 = interrupts enabled
	 * 0x1 = physically contiguous
	 */
	cmd->cdw11 = (io_que->id << 16) | 0x1;
	cmd->dptr.prp.prp1 = io_que->cpl_bus_addr;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #3
0
void
test_ctrlr_failed(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};
	char			payload[4096];

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	/* Disable the queue and set the controller to failed.
	 * Set the controller to resetting so that the qpair won't get re-enabled.
	 */
	qpair.is_enabled = false;
	ctrlr.is_failed = true;
	ctrlr.is_resetting = true;

	outbuf[0] = '\0';

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 0);
	/* Assert that command/completion data was printed to log. */
	CU_ASSERT(strlen(outbuf) > 0);

	cleanup_submit_request_test(&qpair);
}
Example #4
0
static struct nvme_request *
_nvme_ns_cmd_rw(struct spdk_nvme_ns *ns, const struct nvme_payload *payload,
		uint64_t lba, uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t opc,
		uint32_t io_flags)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;
	uint64_t		*tmp_lba;
	uint32_t		sector_size;
	uint32_t		sectors_per_max_io;
	uint32_t		sectors_per_stripe;

	if (io_flags & 0xFFFF) {
		/* The bottom 16 bits must be empty */
		return NULL;
	}

	sector_size = ns->sector_size;
	sectors_per_max_io = ns->sectors_per_max_io;
	sectors_per_stripe = ns->sectors_per_stripe;

	req = nvme_allocate_request(payload, lba_count * sector_size, cb_fn, cb_arg);
	if (req == NULL) {
		return NULL;
	}

	/*
	 * Intel DC P3*00 NVMe controllers benefit from driver-assisted striping.
	 * If this controller defines a stripe boundary and this I/O spans a stripe
	 *  boundary, split the request into multiple requests and submit each
	 *  separately to hardware.
	 */
	if (sectors_per_stripe > 0 &&
	    (((lba & (sectors_per_stripe - 1)) + lba_count) > sectors_per_stripe)) {

		return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
						  io_flags, req, sectors_per_stripe, sectors_per_stripe - 1);
	} else if (lba_count > sectors_per_max_io) {
		return _nvme_ns_cmd_split_request(ns, payload, lba, lba_count, cb_fn, cb_arg, opc,
						  io_flags, req, sectors_per_max_io, 0);
	} else {
		cmd = &req->cmd;
		cmd->opc = opc;
		cmd->nsid = ns->id;

		tmp_lba = (uint64_t *)&cmd->cdw10;
		*tmp_lba = lba;

		cmd->cdw12 = lba_count - 1;
		cmd->cdw12 |= io_flags;
	}

	return req;
}
Example #5
0
struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
			     void *cb_arg)
{
	struct nvme_payload payload;

	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
	payload.u.contig = buffer;

	return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
}
Example #6
0
void
nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
    nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_ASYNC_EVENT_REQUEST;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #7
0
void
nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
		     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_ABORT;
	cmd->cdw10 = (cid << 16) | sqid;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #8
0
void
nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
			   uint32_t cdw11, void *payload, uint32_t payload_size,
			   nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_GET_FEATURES;
	cmd->cdw10 = feature;
	cmd->cdw11 = cdw11;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #9
0
void
nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
    uint32_t nsid, struct nvme_health_information_page *payload,
    nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(payload, sizeof(*payload), cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_GET_LOG_PAGE;
	cmd->nsid = nsid;
	cmd->cdw10 = ((sizeof(*payload)/sizeof(uint32_t)) - 1) << 16;
	cmd->cdw10 |= NVME_LOG_HEALTH_INFORMATION;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #10
0
void
nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
			    uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
			    void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(payload, payload_size, cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_GET_LOG_PAGE;
	cmd->nsid = nsid;
	cmd->cdw10 = ((payload_size / sizeof(uint32_t)) - 1) << 16;
	cmd->cdw10 |= log_page;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #11
0
int
nvme_ctrlr_cmd_io_raw(struct nvme_controller *ctrlr,
		      struct nvme_command *cmd,
		      void *buf, uint32_t len,
		      nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request	*req;

	req = nvme_allocate_request(buf, len, cb_fn, cb_arg);

	if (req == NULL) {
		return ENOMEM;
	}

	memcpy(&req->cmd, cmd, sizeof(req->cmd));

	nvme_ctrlr_submit_io_request(ctrlr, req);
	return 0;
}
Example #12
0
void
nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_DELETE_IO_SQ;

	/*
	 * TODO: create a delete io submission queue command data
	 *  structure.
	 */
	cmd->cdw10 = io_que->id;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #13
0
void
nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
	void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(payload,
	    sizeof(struct nvme_namespace_data), cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_IDENTIFY;

	/*
	 * TODO: create an identify command data structure
	 */
	cmd->nsid = nsid;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #14
0
void
nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
	nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request(payload,
	    sizeof(struct nvme_controller_data), cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_IDENTIFY;

	/*
	 * TODO: create an identify command data structure, which
	 *  includes this CNS bit in cdw10.
	 */
	cmd->cdw10 = 1;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Example #15
0
void
test3(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(NULL, 0, expected_success_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 1);

	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
Example #16
0
static void
test_hw_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	req->payload_offset = 0;
	ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;

	nvme_qpair_submit_request(&qpair, req);

	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	CU_ASSERT(sgl_tr != NULL);
	CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
	CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
	CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
	CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 2023 | 0;
	req->payload_offset = 0;
	ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;

	nvme_qpair_submit_request(&qpair, req);

	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	CU_ASSERT(sgl_tr != NULL);
	for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
		CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
		CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
		CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
		CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
	}
	CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
	TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
Example #17
0
static void
test_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	req->payload_offset = 1;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	spdk_nvme_retry_count = 1;
	fail_next_sge = true;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	fail_next_sge = false;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 15 | 0;
	req->payload_offset = 2;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 4095 | 0;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
	CU_ASSERT(qpair.sq_tail == 1);
	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	if (sgl_tr != NULL) {
		for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
			CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
		}

		TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	}
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
Example #18
0
static void
test_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct spdk_nvme_registers	regs = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	req->payload_offset = 1;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(req->cmd.psdt == SPDK_NVME_PSDT_PRP);
	CU_ASSERT(req->cmd.dptr.prp.prp1 == 7);
	CU_ASSERT(req->cmd.dptr.prp.prp2 == 4096);

	sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
	LIST_REMOVE(sgl_tr, list);
	free(sgl_tr);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	spdk_nvme_retry_count = 1;
	fail_next_sge = true;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	fail_next_sge = false;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	req->payload_offset = 2;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, 33 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
	CU_ASSERT(qpair.sq_tail == 1);
	sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
	if (sgl_tr != NULL) {
		for (i = 0; i < 32; i++) {
			CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
		}

		LIST_REMOVE(sgl_tr, list);
		free(sgl_tr);
	}
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}