Ejemplo n.º 1
0
void
test_ctrlr_failed(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};
	char			payload[4096];

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	/* Disable the queue and set the controller to failed.
	 * Set the controller to resetting so that the qpair won't get re-enabled.
	 */
	qpair.is_enabled = false;
	ctrlr.is_failed = true;
	ctrlr.is_resetting = true;

	outbuf[0] = '\0';

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 0);
	/* Assert that command/completion data was printed to log. */
	CU_ASSERT(strlen(outbuf) > 0);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 2
0
	/*
	 * Only memset up to (but not including) the children
	 *  TAILQ_ENTRY.  children, and following members, are
	 *  only used as part of I/O splitting so we avoid
	 *  memsetting them until it is actually needed.
	 *  They will be initialized in nvme_request_add_child()
	 *  if the request is split.
	 */
	memset(req, 0, offsetof(struct nvme_request, children));
	req->cb_fn = cb_fn;
	req->cb_arg = cb_arg;
	req->payload = *payload;
	req->payload_size = payload_size;
	req->pid = getpid();

	return req;
}

struct nvme_request *
nvme_allocate_request_contig(void *buffer, uint32_t payload_size, spdk_nvme_cmd_cb cb_fn,
			     void *cb_arg)
{
	struct nvme_payload payload;

	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
	payload.u.contig = buffer;

	return nvme_allocate_request(&payload, payload_size, cb_fn, cb_arg);
}

struct nvme_request *
nvme_allocate_request_null(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	return nvme_allocate_request_contig(NULL, 0, cb_fn, cb_arg);
}

void
nvme_free_request(struct nvme_request *req)
{
	spdk_mempool_put(_g_nvme_driver.request_mempool, req);
}

void
nvme_request_remove_child(struct nvme_request *parent,
			  struct nvme_request *child)
{
	parent->num_children--;
	TAILQ_REMOVE(&parent->children, child, child_tailq);
}

int
nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair)
{
	return 0;
}

int
nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair)
{
	return 0;
}

int
nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair)
{
	return 0;
}

int
nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
{
	// TODO
	return 0;
}

int32_t
nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
{
	// TODO
	return 0;
}

static void
prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
			    struct spdk_nvme_ctrlr *ctrlr)
{
	memset(ctrlr, 0, sizeof(*ctrlr));
	ctrlr->free_io_qids = NULL;
	TAILQ_INIT(&ctrlr->active_io_qpairs);
	TAILQ_INIT(&ctrlr->active_procs);
	nvme_qpair_construct(qpair, 1, 128, ctrlr, 0);

	ut_fail_vtophys = false;
}

static void
cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
{
}

#if 0 /* TODO: move to PCIe-specific unit test */
static void
ut_insert_cq_entry(struct spdk_nvme_qpair *qpair, uint32_t slot)
{
	struct nvme_request	*req;
	struct nvme_tracker 	*tr;
	struct spdk_nvme_cpl	*cpl;

	req = spdk_mempool_get(_g_nvme_driver.request_mempool);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	memset(req, 0, sizeof(*req));

	tr = TAILQ_FIRST(&qpair->free_tr);
	TAILQ_REMOVE(&qpair->free_tr, tr, tq_list); /* remove tr from free_tr */
	TAILQ_INSERT_HEAD(&qpair->outstanding_tr, tr, tq_list);
	req->cmd.cid = tr->cid;
	tr->req = req;
	qpair->tr[tr->cid].active = true;

	cpl = &qpair->cpl[slot];
	cpl->status.p = qpair->phase;
	cpl->cid = tr->cid;
}
#endif

static void
expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
{
	CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
}

static void
expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
{
	CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
}

static void
test3(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct spdk_nvme_ctrlr		ctrlr = {};

	prepare_submit_request_test(&qpair, &ctrlr);

	req = nvme_allocate_request_null(expected_success_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	nvme_free_request(req);

	cleanup_submit_request_test(&qpair);
}

#if 0 /* TODO: move to PCIe-specific unit test */
static void
test4(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct spdk_nvme_ctrlr		ctrlr = {};
	char				payload[4096];

	prepare_submit_request_test(&qpair, &ctrlr);

	req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	/* Force vtophys to return a failure.  This should
	 *  result in the nvme_qpair manually failing
	 *  the request with error status to signify
	 *  a bad payload buffer.
	 */
	ut_fail_vtophys = true;

	CU_ASSERT(qpair.sq_tail == 0);

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);

	CU_ASSERT(qpair.sq_tail == 0);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 3
0
void
test4(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};
	char			payload[4096];

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(payload, sizeof(payload), expected_failure_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	/* Force vtophys to return a failure.  This should
	 *  result in the nvme_qpair manually failing
	 *  the request with error status to signify
	 *  a bad payload buffer.
	 */
	fail_vtophys = true;
	outbuf[0] = '\0';

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 0);
	/* Assert that command/completion data was printed to log. */
	CU_ASSERT(strlen(outbuf) > 0);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 4
0
static void
test3(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct spdk_nvme_ctrlr		ctrlr = {};

	prepare_submit_request_test(&qpair, &ctrlr);

	req = nvme_allocate_request_null(expected_success_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	nvme_free_request(req);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 5
0
void
test3(void)
{
	struct nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct nvme_controller	ctrlr = {};
	struct nvme_registers	regs = {};

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request(NULL, 0, expected_success_callback, NULL);
	CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 1);

	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
Ejemplo n.º 6
0
static void
test3(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct nvme_tracker		*tr;
	struct spdk_nvme_ctrlr		ctrlr = {};
	struct spdk_nvme_registers	regs = {};
	uint16_t			cid;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request_null(expected_success_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 1);

	/*
	 * Since sq_tail was 0 when the command was submitted, it is in cmd[0].
	 * Extract its command ID to retrieve its tracker.
	 */
	cid = qpair.cmd[0].cid;
	tr = qpair.act_tr[cid];
	SPDK_CU_ASSERT_FATAL(tr != NULL);

	/*
	 * Complete the tracker so that it is returned to the free list.
	 * This also frees the request.
	 */
	nvme_qpair_manual_complete_tracker(&qpair, tr, SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_SUCCESS, 0,
					   false);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 7
0
static void
test_ctrlr_failed(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct spdk_nvme_ctrlr		ctrlr = {};
	char				payload[4096];

	prepare_submit_request_test(&qpair, &ctrlr);

	req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	/* Set the controller to failed.
	 * Set the controller to resetting so that the qpair won't get re-enabled.
	 */
	ctrlr.is_failed = true;
	ctrlr.is_resetting = true;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 8
0
static void
test_hw_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	req->payload_offset = 0;
	ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;

	nvme_qpair_submit_request(&qpair, req);

	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	CU_ASSERT(sgl_tr != NULL);
	CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
	CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
	CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
	CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
	TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 2023 | 0;
	req->payload_offset = 0;
	ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;

	nvme_qpair_submit_request(&qpair, req);

	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	CU_ASSERT(sgl_tr != NULL);
	for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
		CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
		CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
		CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
		CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
	}
	CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
	TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
Ejemplo n.º 9
0
static void
test_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	req->payload_offset = 1;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 7 | 0;
	spdk_nvme_retry_count = 1;
	fail_next_sge = true;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	fail_next_sge = false;

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 15 | 0;
	req->payload_offset = 2;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr);
	req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 4095 | 0;

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
	CU_ASSERT(qpair.sq_tail == 1);
	sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
	if (sgl_tr != NULL) {
		for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
			CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
		}

		TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
	}
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}
Ejemplo n.º 10
0
static void
test_sgl_req(void)
{
	struct spdk_nvme_qpair	qpair = {};
	struct nvme_request	*req;
	struct spdk_nvme_ctrlr	ctrlr = {};
	struct spdk_nvme_registers	regs = {};
	struct nvme_payload	payload = {};
	struct nvme_tracker 	*sgl_tr = NULL;
	uint64_t 		i;
	struct io_request	io_req = {};

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = nvme_request_reset_sgl;
	payload.u.sgl.next_sge_fn = nvme_request_next_sge;
	payload.u.sgl.cb_arg = &io_req;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	req->payload_offset = 1;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(req->cmd.psdt == SPDK_NVME_PSDT_PRP);
	CU_ASSERT(req->cmd.dptr.prp.prp1 == 7);
	CU_ASSERT(req->cmd.dptr.prp.prp2 == 4096);

	sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
	LIST_REMOVE(sgl_tr, list);
	free(sgl_tr);
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	spdk_nvme_retry_count = 1;
	fail_next_sge = true;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	fail_next_sge = false;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, 2 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;
	req->payload_offset = 2;

	nvme_qpair_submit_request(&qpair, req);
	CU_ASSERT(qpair.sq_tail == 0);
	cleanup_submit_request_test(&qpair);

	prepare_submit_request_test(&qpair, &ctrlr, &regs);
	req = nvme_allocate_request(&payload, 33 * PAGE_SIZE, NULL, &io_req);
	SPDK_CU_ASSERT_FATAL(req != NULL);
	req->cmd.opc = SPDK_NVME_OPC_WRITE;
	req->cmd.cdw10 = 10000;
	req->cmd.cdw12 = 255 | 0;

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
	CU_ASSERT(qpair.sq_tail == 1);
	sgl_tr = LIST_FIRST(&qpair.outstanding_tr);
	if (sgl_tr != NULL) {
		for (i = 0; i < 32; i++) {
			CU_ASSERT(sgl_tr->u.prp[i] == (PAGE_SIZE * (i + 1)));
		}

		LIST_REMOVE(sgl_tr, list);
		free(sgl_tr);
	}
	cleanup_submit_request_test(&qpair);
	nvme_free_request(req);
}