Esempio n. 1
0
static int target_xcopy_setup_pt_cmd(
	struct xcopy_pt_cmd *xpt_cmd,
	struct xcopy_op *xop,
	struct se_device *se_dev,
	unsigned char *cdb,
	bool remote_port,
	bool alloc_mem)
{
	struct se_cmd *cmd = &xpt_cmd->se_cmd;
	sense_reason_t sense_rc;
	int ret = 0, rc;
	/*
	 * Setup LUN+port to honor reservations based upon xop->op_origin for
	 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
	 */
	target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
	target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);

	cmd->tag = 0;
	sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
	if (sense_rc) {
		ret = -EINVAL;
		goto out;
	}

	if (alloc_mem) {
		rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
				      cmd->data_length, false, false);
		if (rc < 0) {
			ret = rc;
			goto out;
		}
		/*
		 * Set this bit so that transport_free_pages() allows the
		 * caller to release SGLs + physical memory allocated by
		 * transport_generic_get_mem()..
		 */
		cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
	} else {
		/*
		 * Here the previously allocated SGLs for the internal READ
		 * are mapped zero-copy to the internal WRITE.
		 */
		sense_rc = transport_generic_map_mem_to_cmd(cmd,
					xop->xop_data_sg, xop->xop_data_nents,
					NULL, 0);
		if (sense_rc) {
			ret = -EINVAL;
			goto out;
		}

		pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
			 " %u\n", cmd->t_data_sg, cmd->t_data_nents);
	}

	return 0;

out:
	return ret;
}
Esempio n. 2
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
    struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
                                  struct tcm_loop_cmd, tl_se_cmd);
    struct scsi_cmnd *sc = tl_cmd->sc;
    struct scatterlist *sgl_bidi = NULL;
    u32 sgl_bidi_count = 0;
    int ret;
    /*
     * Allocate the necessary tasks to complete the received CDB+data
     */
    ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
    if (ret != 0)
        return ret;
    /*
     * For BIDI commands, pass in the extra READ buffer
     * to transport_generic_map_mem_to_cmd() below..
     */
    if (se_cmd->se_cmd_flags & SCF_BIDI) {
        struct scsi_data_buffer *sdb = scsi_in(sc);

        sgl_bidi = sdb->table.sgl;
        sgl_bidi_count = sdb->table.nents;
    }
    /*
     * Because some userspace code via scsi-generic do not memset their
     * associated read buffers, go ahead and do that here for type
     * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
     * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
     * by target core in transport_generic_allocate_tasks() ->
     * transport_generic_cmd_sequencer().
     */
    if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
            se_cmd->data_direction == DMA_FROM_DEVICE) {
        struct scatterlist *sg = scsi_sglist(sc);
        unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

        if (buf != NULL) {
            memset(buf, 0, sg->length);
            kunmap(sg_page(sg));
        }
    }

    /* Tell the core about our preallocated memory */
    return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                                            scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
}
Esempio n. 3
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -EINVAL) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			return PYX_TRANSPORT_RESERVATION_CONFLICT;
		/*
		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
		 * sense data.
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}

	/*
	 * For BIDI commands, pass in the extra READ buffer
	 * to transport_generic_map_mem_to_cmd() below..
	 */
	if (se_cmd->t_tasks_bidi) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
	}

	/* Tell the core about our preallocated memory */
	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

	return 0;
}
Esempio n. 4
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}

	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
			tl_nexus->se_sess,
			scsi_bufflen(sc), sc->sc_data_direction,
			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);

	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in target_setup_cmd_from_cdb() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	} else if (ret < 0) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			tcm_loop_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}

	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret) {
		transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}
	transport_handle_cdb_direct(se_cmd);
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
Esempio n. 5
0
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -EINVAL) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			return PYX_TRANSPORT_RESERVATION_CONFLICT;
		/*
		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
		 * sense data.
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}

	/*
	 * For BIDI commands, pass in the extra READ buffer
	 * to transport_generic_map_mem_to_cmd() below..
	 */
	if (se_cmd->t_tasks_bidi) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
	}
	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in transport_generic_allocate_tasks() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	/* Tell the core about our preallocated memory */
	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

	return 0;
}
/*
 * Called by struct target_core_fabric_ops->new_cmd_map()
 *
 * Always called in process context.  A non zero return value
 * here will signal to handle an exception based on the return code.
 */
static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;
	void *mem_ptr, *mem_bidi_ptr = NULL;
	u32 sg_no_bidi = 0;
	int ret;
	/*
	 * Allocate the necessary tasks to complete the received CDB+data
	 */
	ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
	if (ret == -1) {
		/* Out of Resources */
		return PYX_TRANSPORT_LU_COMM_FAILURE;
	} else if (ret == -2) {
		/*
		 * Handle case for SAM_STAT_RESERVATION_CONFLICT
		 */
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			return PYX_TRANSPORT_RESERVATION_CONFLICT;
		/*
		 * Otherwise, return SAM_STAT_CHECK_CONDITION and return
		 * sense data.
		 */
		return PYX_TRANSPORT_USE_SENSE_REASON;
	}
	/*
	 * Setup the struct scatterlist memory from the received
	 * struct scsi_cmnd.
	 */
	if (scsi_sg_count(sc)) {
		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
		mem_ptr = (void *)scsi_sglist(sc);
		/*
		 * For BIDI commands, pass in the extra READ buffer
		 * to transport_generic_map_mem_to_cmd() below..
		 */
		if (T_TASK(se_cmd)->t_tasks_bidi) {
			struct scsi_data_buffer *sdb = scsi_in(sc);

			mem_bidi_ptr = (void *)sdb->table.sgl;
			sg_no_bidi = sdb->table.nents;
		}
	} else {
		/*
		 * Used for DMA_NONE
		 */
		mem_ptr = NULL;
	}
	/*
	 * Map the SG memory into struct se_mem->page linked list using the same
	 * physical memory at sg->page_link.
	 */
	ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
			scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
	if (ret < 0)
		return PYX_TRANSPORT_LU_COMM_FAILURE;

	return 0;
}