Ejemplo n.º 1
0
int
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
			      uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
			      uint32_t io_flags)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;
	uint64_t		*tmp_lba;

	if (lba_count == 0) {
		return EINVAL;
	}

	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
	cmd->nsid = ns->id;

	tmp_lba = (uint64_t *)&cmd->cdw10;
	*tmp_lba = lba;
	cmd->cdw12 = lba_count - 1;
	cmd->cdw12 |= io_flags;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Ejemplo n.º 2
0
int
nvme_ctrlr_cmd_create_io_sq(struct spdk_nvme_ctrlr *ctrlr,
			    struct spdk_nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct spdk_nvme_cmd *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_CREATE_IO_SQ;

	/*
	 * TODO: create a create io submission queue command data
	 *  structure.
	 */
	cmd->cdw10 = ((io_que->num_entries - 1) << 16) | io_que->id;
	/* 0x1 = physically contiguous */
	cmd->cdw11 = (io_que->id << 16) | (io_que->qprio << 1) | 0x1;
	cmd->dptr.prp.prp1 = io_que->cmd_bus_addr;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
	return 0;
}
Ejemplo n.º 3
0
void
nvme_ctrlr_cmd_create_io_cq(struct spdk_nvme_ctrlr *ctrlr,
			    struct nvme_qpair *io_que, spdk_nvme_cmd_cb cb_fn,
			    void *cb_arg)
{
	struct nvme_request *req;
	struct spdk_nvme_cmd *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_CREATE_IO_CQ;

	/*
	 * TODO: create a create io completion queue command data
	 *  structure.
	 */
	cmd->cdw10 = ((io_que->num_entries - 1) << 16) | io_que->id;
	/*
	 * 0x2 = interrupts enabled
	 * 0x1 = physically contiguous
	 */
	cmd->cdw11 = (io_que->id << 16) | 0x1;
	cmd->dptr.prp.prp1 = io_que->cpl_bus_addr;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 4
0
int
spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
				uint32_t cdw11, void *payload, uint32_t payload_size,
				spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct spdk_nvme_cmd *cmd;

	nvme_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		nvme_mutex_unlock(&ctrlr->ctrlr_lock);
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_GET_FEATURES;
	cmd->cdw10 = feature;
	cmd->cdw11 = cdw11;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
	nvme_mutex_unlock(&ctrlr->ctrlr_lock);

	return 0;
}
Ejemplo n.º 5
0
static void
nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
{
	struct ccb_nvmeio	*nvmeio = &ccb->nvmeio;
	struct nvme_request	*req;
	void			*payload;
	uint32_t		size;
	struct nvme_controller *ctrlr;

	ctrlr = sim2ctrlr(sim);
	payload = nvmeio->data_ptr;
	size = nvmeio->dxfer_len;
	/* SG LIST ??? */
	if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
		req = nvme_allocate_request_bio((struct bio *)payload,
		    nvme_sim_nvmeio_done, ccb);
	else if (payload == NULL)
		req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
	else
		req = nvme_allocate_request_vaddr(payload, size,
		    nvme_sim_nvmeio_done, ccb);

	if (req == NULL) {
		nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
		xpt_done(ccb);
		return;
	}

	memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));

	nvme_ctrlr_submit_io_request(ctrlr, req);

	ccb->ccb_h.status |= CAM_SIM_QUEUED;
}
Ejemplo n.º 6
0
void
nvme_ctrlr_cmd_abort(struct spdk_nvme_ctrlr *ctrlr, uint16_t cid,
		     uint16_t sqid, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct spdk_nvme_cmd *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_ABORT;
	cmd->cdw10 = (cid << 16) | sqid;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 7
0
void
nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
    uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_ABORT);
	cmd->cdw10 = htole32((cid << 16) | sqid);

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 8
0
void
nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
    uint32_t cdw11, void *payload, uint32_t payload_size,
    nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_GET_FEATURES;
	cmd->cdw10 = feature;
	cmd->cdw11 = cdw11;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 9
0
void
nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
    uint32_t cdw11, void *payload, uint32_t payload_size,
    nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_SET_FEATURES);
	cmd->cdw10 = htole32(feature);
	cmd->cdw11 = htole32(cdw11);

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 10
0
static void
test3(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct spdk_nvme_ctrlr		ctrlr = {};

	prepare_submit_request_test(&qpair, &ctrlr);

	req = nvme_allocate_request_null(expected_success_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);

	nvme_free_request(req);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 11
0
int
spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_FLUSH;
	cmd->nsid = ns->id;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Ejemplo n.º 12
0
int
nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct nvme_command	*cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	if (req == NULL)
		return (ENOMEM);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_FLUSH;
	cmd->nsid = ns->id;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return (0);
}
Ejemplo n.º 13
0
void
nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_DELETE_IO_SQ;

	/*
	 * TODO: create a delete io submission queue command data
	 *  structure.
	 */
	cmd->cdw10 = io_que->id;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 14
0
void
nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc_fuse = NVME_CMD_SET_OPC(NVME_OPC_DELETE_IO_CQ);

	/*
	 * TODO: create a delete io completion queue command data
	 *  structure.
	 */
	cmd->cdw10 = htole32(io_que->id);

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 15
0
static void
test3(void)
{
	struct spdk_nvme_qpair		qpair = {};
	struct nvme_request		*req;
	struct nvme_tracker		*tr;
	struct spdk_nvme_ctrlr		ctrlr = {};
	struct spdk_nvme_registers	regs = {};
	uint16_t			cid;

	prepare_submit_request_test(&qpair, &ctrlr, &regs);

	req = nvme_allocate_request_null(expected_success_callback, NULL);
	SPDK_CU_ASSERT_FATAL(req != NULL);

	CU_ASSERT(qpair.sq_tail == 0);

	nvme_qpair_submit_request(&qpair, req);

	CU_ASSERT(qpair.sq_tail == 1);

	/*
	 * Since sq_tail was 0 when the command was submitted, it is in cmd[0].
	 * Extract its command ID to retrieve its tracker.
	 */
	cid = qpair.cmd[0].cid;
	tr = qpair.act_tr[cid];
	SPDK_CU_ASSERT_FATAL(tr != NULL);

	/*
	 * Complete the tracker so that it is returned to the free list.
	 * This also frees the request.
	 */
	nvme_qpair_manual_complete_tracker(&qpair, tr, SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_SUCCESS, 0,
					   false);

	cleanup_submit_request_test(&qpair);
}
Ejemplo n.º 16
0
void
nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
    struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_CREATE_IO_SQ;

	/*
	 * TODO: create a create io submission queue command data
	 *  structure.
	 */
	cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
	/* 0x1 = physically contiguous */
	cmd->cdw11 = (io_que->id << 16) | 0x1;
	cmd->prp1 = io_que->cmd_bus_addr;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 17
0
void
nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
    struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
    void *cb_arg)
{
	struct nvme_request *req;
	struct nvme_command *cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_CREATE_IO_CQ;

	/*
	 * TODO: create a create io completion queue command data
	 *  structure.
	 */
	cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
	/* 0x3 = interrupts enabled | physically contiguous */
	cmd->cdw11 = (vector << 16) | 0x3;
	cmd->prp1 = io_que->cpl_bus_addr;

	nvme_ctrlr_submit_admin_request(ctrlr, req);
}
Ejemplo n.º 18
0
int
nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
			 void *cb_arg)
{
	struct nvme_request			*req;
	struct spdk_nvme_cmd			*cmd;

	nvme_mutex_lock(&ctrlr->ctrlr_lock);
	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		nvme_mutex_unlock(&ctrlr->ctrlr_lock);
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_NS_MANAGEMENT;
	cmd->cdw10 = SPDK_NVME_NS_MANAGEMENT_DELETE;
	cmd->nsid = nsid;

	nvme_ctrlr_submit_admin_request(ctrlr, req);

	nvme_mutex_unlock(&ctrlr->ctrlr_lock);
	return 0;
}