Пример #1
0
int
spdk_nvme_ns_cmd_reservation_acquire(struct spdk_nvme_ns *ns,
				     struct spdk_nvme_reservation_acquire_data *payload,
				     bool ignore_key,
				     enum spdk_nvme_reservation_acquire_action action,
				     enum spdk_nvme_reservation_type type,
				     spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;

	req = nvme_allocate_request_contig(payload,
					   sizeof(struct spdk_nvme_reservation_acquire_data),
					   cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_RESERVATION_ACQUIRE;
	cmd->nsid = ns->id;

	/* Bits 0-2 */
	cmd->cdw10 = action;
	/* Bit 3 */
	cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
	/* Bits 8-15 */
	cmd->cdw10 |= (uint32_t)type << 8;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Пример #2
0
static int
nvme_write_uio(struct nvme_namespace *ns, struct uio *uio)
{
	struct nvme_request	*req;
	struct nvme_command	*cmd;
	uint64_t		lba;

	req = nvme_allocate_request_uio(uio, nvme_uio_done, uio);

	if (req == NULL)
		return (ENOMEM);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_WRITE;
	cmd->nsid = ns->id;
	lba = uio->uio_offset / nvme_ns_get_sector_size(ns);

	*(uint64_t *)&cmd->cdw10 = lba;
	/*
	 * Store the sector size in cdw12 (where the LBA count normally goes).
	 *  We'll adjust cdw12 in the map_uio callback based on the mapsize
	 *  parameter.  This allows us to not have to store the namespace
	 *  in the request simply to get the sector size in the map_uio
	 *  callback.
	 */
	cmd->cdw12 = nvme_ns_get_sector_size(ns);

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return (0);
}
Пример #3
0
int
spdk_nvme_ns_cmd_reservation_report(struct spdk_nvme_ns *ns, void *payload,
				    uint32_t len, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	uint32_t		num_dwords;
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;

	if (len % 4)
		return EINVAL;
	num_dwords = len / 4;

	req = nvme_allocate_request_contig(payload, len, cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_RESERVATION_REPORT;
	cmd->nsid = ns->id;

	cmd->cdw10 = num_dwords;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Пример #4
0
int
spdk_nvme_ns_cmd_write_zeroes(struct spdk_nvme_ns *ns, uint64_t lba,
			      uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
			      uint32_t io_flags)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;
	uint64_t		*tmp_lba;

	if (lba_count == 0) {
		return EINVAL;
	}

	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_WRITE_ZEROES;
	cmd->nsid = ns->id;

	tmp_lba = (uint64_t *)&cmd->cdw10;
	*tmp_lba = lba;
	cmd->cdw12 = lba_count - 1;
	cmd->cdw12 |= io_flags;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Пример #5
0
int
spdk_nvme_ns_cmd_reservation_register(struct spdk_nvme_ns *ns,
				      struct spdk_nvme_reservation_register_data *payload,
				      bool ignore_key,
				      enum spdk_nvme_reservation_register_action action,
				      enum spdk_nvme_reservation_register_cptpl cptpl,
				      spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;

	req = nvme_allocate_request_contig(payload,
					   sizeof(struct spdk_nvme_reservation_register_data),
					   cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_RESERVATION_REGISTER;
	cmd->nsid = ns->id;

	/* Bits 0-2 */
	cmd->cdw10 = action;
	/* Bit 3 */
	cmd->cdw10 |= ignore_key ? 1 << 3 : 0;
	/* Bits 30-31 */
	cmd->cdw10 |= (uint32_t)cptpl << 30;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Пример #6
0
int
spdk_nvme_ns_cmd_deallocate(struct spdk_nvme_ns *ns, void *payload,
			    uint16_t num_ranges, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;

	if (num_ranges == 0 || num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES) {
		return EINVAL;
	}

	req = nvme_allocate_request_contig(payload,
					   num_ranges * sizeof(struct spdk_nvme_dsm_range),
					   cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
	cmd->nsid = ns->id;

	/* TODO: create a delete command data structure */
	cmd->cdw10 = num_ranges - 1;
	cmd->cdw11 = SPDK_NVME_DSM_ATTR_DEALLOCATE;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Пример #7
0
int
nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
    uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct nvme_command	*cmd;

	req = nvme_allocate_request_vaddr(payload,
	    lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);

	if (req == NULL)
		return (ENOMEM);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_WRITE;
	cmd->nsid = ns->id;

	/* TODO: create a write command data structure */
	*(uint64_t *)&cmd->cdw10 = lba;
	cmd->cdw12 = lba_count-1;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return (0);
}
Пример #8
0
int
spdk_nvme_ns_cmd_writev(struct spdk_nvme_ns *ns, uint64_t lba, uint32_t lba_count,
			spdk_nvme_cmd_cb cb_fn, void *cb_arg, uint32_t io_flags,
			spdk_nvme_req_reset_sgl_cb reset_sgl_fn,
			spdk_nvme_req_next_sge_cb next_sge_fn)
{
	struct nvme_request *req;
	struct nvme_payload payload;

	if (reset_sgl_fn == NULL || next_sge_fn == NULL)
		return EINVAL;

	payload.type = NVME_PAYLOAD_TYPE_SGL;
	payload.u.sgl.reset_sgl_fn = reset_sgl_fn;
	payload.u.sgl.next_sge_fn = next_sge_fn;
	payload.u.sgl.cb_arg = cb_arg;

	req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags);
	if (req != NULL) {
		nvme_ctrlr_submit_io_request(ns->ctrlr, req);
		return 0;
	} else {
		return ENOMEM;
	}
}
Пример #9
0
int
nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
    nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct nvme_command	*cmd;
	uint64_t		lba;
	uint64_t		lba_count;

	req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);

	if (req == NULL)
		return (ENOMEM);
	cmd = &req->cmd;
	cmd->opc = NVME_OPC_READ;
	cmd->nsid = ns->id;

	lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
	lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);

	/* TODO: create a read command data structure */
	*(uint64_t *)&cmd->cdw10 = lba;
	cmd->cdw12 = lba_count-1;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return (0);
}
Пример #10
0
int
nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
    uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct nvme_command	*cmd;

	req = nvme_allocate_request_vaddr(payload,
	    num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);

	if (req == NULL)
		return (ENOMEM);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
	cmd->nsid = ns->id;

	/* TODO: create a delete command data structure */
	cmd->cdw10 = num_ranges - 1;
	cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return (0);
}
Пример #11
0
static void
nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
{
	struct ccb_nvmeio	*nvmeio = &ccb->nvmeio;
	struct nvme_request	*req;
	void			*payload;
	uint32_t		size;
	struct nvme_controller *ctrlr;

	ctrlr = sim2ctrlr(sim);
	payload = nvmeio->data_ptr;
	size = nvmeio->dxfer_len;
	/* SG LIST ??? */
	if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
		req = nvme_allocate_request_bio((struct bio *)payload,
		    nvme_sim_nvmeio_done, ccb);
	else if (payload == NULL)
		req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
	else
		req = nvme_allocate_request_vaddr(payload, size,
		    nvme_sim_nvmeio_done, ccb);

	if (req == NULL) {
		nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
		xpt_done(ccb);
		return;
	}

	memcpy(&req->cmd, &ccb->nvmeio.cmd, sizeof(ccb->nvmeio.cmd));

	nvme_ctrlr_submit_io_request(ctrlr, req);

	ccb->ccb_h.status |= CAM_SIM_QUEUED;
}
Пример #12
0
int
spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
			   struct spdk_nvme_cmd *cmd,
			   void *buf, uint32_t len,
			   spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request	*req;

	req = nvme_allocate_request_contig(buf, len, cb_fn, cb_arg);

	if (req == NULL) {
		return ENOMEM;
	}

	memcpy(&req->cmd, cmd, sizeof(req->cmd));

	nvme_ctrlr_submit_io_request(ctrlr, req);
	return 0;
}
Пример #13
0
int
spdk_nvme_ns_cmd_flush(struct spdk_nvme_ns *ns, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct spdk_nvme_cmd	*cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);
	if (req == NULL) {
		return ENOMEM;
	}

	cmd = &req->cmd;
	cmd->opc = SPDK_NVME_OPC_FLUSH;
	cmd->nsid = ns->id;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return 0;
}
Пример #14
0
int
spdk_nvme_ns_cmd_write(struct spdk_nvme_ns *ns, void *buffer, uint64_t lba,
		       uint32_t lba_count, spdk_nvme_cmd_cb cb_fn, void *cb_arg,
		       uint32_t io_flags)
{
	struct nvme_request *req;
	struct nvme_payload payload;

	payload.type = NVME_PAYLOAD_TYPE_CONTIG;
	payload.u.contig = buffer;

	req = _nvme_ns_cmd_rw(ns, &payload, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE, io_flags);
	if (req != NULL) {
		nvme_ctrlr_submit_io_request(ns->ctrlr, req);
		return 0;
	} else {
		return ENOMEM;
	}
}
Пример #15
0
int
nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
{
	struct nvme_request	*req;
	struct nvme_command	*cmd;

	req = nvme_allocate_request_null(cb_fn, cb_arg);

	if (req == NULL)
		return (ENOMEM);

	cmd = &req->cmd;
	cmd->opc = NVME_OPC_FLUSH;
	cmd->nsid = ns->id;

	nvme_ctrlr_submit_io_request(ns->ctrlr, req);

	return (0);
}