Пример #1
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}
	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count);
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
Пример #2
0
static int tcm_loop_queue_status(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;

	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
			" cdb: 0x%02x\n", sc, sc->cmnd[0]);

	if (se_cmd->sense_buffer &&
	   ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
	    (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {

		memcpy(sc->sense_buffer, se_cmd->sense_buffer,
				SCSI_SENSE_BUFFERSIZE);
		sc->result = SAM_STAT_CHECK_CONDITION;
		set_driver_byte(sc, DRIVER_SENSE);
	} else
		sc->result = se_cmd->scsi_status;

	set_host_byte(sc, DID_OK);
	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
		scsi_set_resid(sc, se_cmd->residual_count);
	sc->scsi_done(sc);
	return 0;
}
Пример #3
0
/**
 * zfcp_scsi_dif_sense_error - Report DIF/DIX error as driver sense error
 * @scmd: The SCSI command to report the error for
 * @ascq: The ASCQ to put in the sense buffer
 *
 * See the error handling in sd_done for the sense codes used here.
 * Set DID_SOFT_ERROR to retry the request, if possible.
 */
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
{
	scsi_build_sense_buffer(1, scmd->sense_buffer,
				ILLEGAL_REQUEST, 0x10, ascq);
	set_driver_byte(scmd, DRIVER_SENSE);
	scmd->result |= SAM_STAT_CHECK_CONDITION;
	set_host_byte(scmd, DID_SOFT_ERROR);
}
Пример #4
0
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
{
    struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
                                  struct tcm_loop_cmd, tl_se_cmd);
    struct scsi_cmnd *sc = tl_cmd->sc;

    pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
             " cdb: 0x%02x\n", sc, sc->cmnd[0]);

    sc->result = SAM_STAT_GOOD;
    set_host_byte(sc, DID_OK);
    sc->scsi_done(sc);
    return 0;
}
Пример #5
0
static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
{
	struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
				struct tcm_loop_cmd, tl_se_cmd);
	struct scsi_cmnd *sc = tl_cmd->sc;

	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);

	sc->result = SAM_STAT_GOOD;
	set_host_byte(sc, DID_OK);
	if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
	    (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
		scsi_set_resid(sc, se_cmd->residual_count);
	sc->scsi_done(sc);
	return 0;
}
Пример #6
0
/*
 * Main entry point from struct scsi_host_template for incoming SCSI CDB+Data
 * from Linux/SCSI subsystem for SCSI low level device drivers (LLDs)
 */
static int tcm_loop_queuecommand(
    struct Scsi_Host *sh,
    struct scsi_cmnd *sc)
{
    struct se_cmd *se_cmd;
    struct se_portal_group *se_tpg;
    struct tcm_loop_hba *tl_hba;
    struct tcm_loop_tpg *tl_tpg;

    pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
             " scsi_buf_len: %u\n", sc->device->host->host_no,
             sc->device->id, sc->device->channel, sc->device->lun,
             sc->cmnd[0], scsi_bufflen(sc));
    /*
     * Locate the tcm_loop_hba_t pointer
     */
    tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
    tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
    /*
     * Ensure that this tl_tpg reference from the incoming sc->device->id
     * has already been configured via tcm_loop_make_naa_tpg().
     */
    if (!tl_tpg->tl_hba) {
        set_host_byte(sc, DID_NO_CONNECT);
        sc->scsi_done(sc);
        return 0;
    }
    se_tpg = &tl_tpg->tl_se_tpg;
    /*
     * Determine the SAM Task Attribute and allocate tl_cmd and
     * tl_cmd->tl_se_cmd from TCM infrastructure
     */
    se_cmd = tcm_loop_allocate_core_cmd(tl_hba, se_tpg, sc);
    if (!se_cmd) {
        sc->scsi_done(sc);
        return 0;
    }
    /*
     * Queue up the newly allocated to be processed in TCM thread context.
    */
    transport_generic_handle_cdb_map(se_cmd);
    return 0;
}
Пример #7
0
/*
 * ->queuecommand can be and usually is called from interrupt context, so
 * defer the actual submission to a workqueue.
 */
static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
	struct tcm_loop_cmd *tl_cmd;

	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
		" scsi_buf_len: %u\n", sc->device->host->host_no,
		sc->device->id, sc->device->channel, sc->device->lun,
		sc->cmnd[0], scsi_bufflen(sc));

	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
	if (!tl_cmd) {
		pr_err("Unable to allocate struct tcm_loop_cmd\n");
		set_host_byte(sc, DID_ERROR);
		sc->scsi_done(sc);
		return 0;
	}

	tl_cmd->sc = sc;
	INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
	queue_work(tcm_loop_workqueue, &tl_cmd->work);
	return 0;
}
Пример #8
0
static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
{
	set_host_byte(scpnt, result);
	zfcp_dbf_scsi_fail_send(scpnt);
	scpnt->scsi_done(scpnt);
}
Пример #9
0
/*
 * Allocate a tcm_loop cmd descriptor from target_core_mod code
 *
 * Can be called from interrupt context in tcm_loop_queuecommand() below
 */
static struct se_cmd *tcm_loop_allocate_core_cmd(
    struct tcm_loop_hba *tl_hba,
    struct se_portal_group *se_tpg,
    struct scsi_cmnd *sc)
{
    struct se_cmd *se_cmd;
    struct se_session *se_sess;
    struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
    struct tcm_loop_cmd *tl_cmd;
    int sam_task_attr;

    if (!tl_nexus) {
        scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
                    " does not exist\n");
        set_host_byte(sc, DID_ERROR);
        return NULL;
    }
    se_sess = tl_nexus->se_sess;

    tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
    if (!tl_cmd) {
        pr_err("Unable to allocate struct tcm_loop_cmd\n");
        set_host_byte(sc, DID_ERROR);
        return NULL;
    }
    se_cmd = &tl_cmd->tl_se_cmd;
    /*
     * Save the pointer to struct scsi_cmnd *sc
     */
    tl_cmd->sc = sc;
    /*
     * Locate the SAM Task Attr from struct scsi_cmnd *
     */
    if (sc->device->tagged_supported) {
        switch (sc->tag) {
        case HEAD_OF_QUEUE_TAG:
            sam_task_attr = MSG_HEAD_TAG;
            break;
        case ORDERED_QUEUE_TAG:
            sam_task_attr = MSG_ORDERED_TAG;
            break;
        default:
            sam_task_attr = MSG_SIMPLE_TAG;
            break;
        }
    } else
        sam_task_attr = MSG_SIMPLE_TAG;

    /*
     * Initialize struct se_cmd descriptor from target_core_mod infrastructure
     */
    transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
                          scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                          &tl_cmd->tl_sense_buf[0]);

    if (scsi_bidi_cmnd(sc))
        se_cmd->se_cmd_flags |= SCF_BIDI;

    /*
     * Locate the struct se_lun pointer and attach it to struct se_cmd
     */
    if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
        kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
        set_host_byte(sc, DID_NO_CONNECT);
        return NULL;
    }

    return se_cmd;
}
Пример #10
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}

	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
			tl_nexus->se_sess,
			scsi_bufflen(sc), sc->sc_data_direction,
			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);

	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in target_setup_cmd_from_cdb() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	} else if (ret < 0) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			tcm_loop_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}

	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret) {
		transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}
	transport_handle_cdb_direct(se_cmd);
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
Пример #11
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0, transfer_length;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	transfer_length = scsi_transfer_length(sc);
	if (!scsi_prot_sg_count(sc) &&
	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
		se_cmd->prot_pto = true;
		/*
		 * loopback transport doesn't support
		 * WRITE_GENERATE, READ_STRIP protection
		 * information operations, go ahead unprotected.
		 */
		transfer_length = scsi_bufflen(sc);
	}

	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			transfer_length, tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count,
			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
	sc->scsi_done(sc);
	return;
}
Пример #12
0
/*
 * Allocate a tcm_loop cmd descriptor from target_core_mod code
 *
 * Can be called from interrupt context in tcm_loop_queuecommand() below
 */
static struct se_cmd *tcm_loop_allocate_core_cmd(
	struct tcm_loop_hba *tl_hba,
	struct se_portal_group *se_tpg,
	struct scsi_cmnd *sc)
{
	struct se_cmd *se_cmd;
	struct se_session *se_sess;
	struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
	struct tcm_loop_cmd *tl_cmd;
	int sam_task_attr;

	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		return NULL;
	}
	se_sess = tl_nexus->se_sess;

	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
	if (!tl_cmd) {
		printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
		set_host_byte(sc, DID_ERROR);
		return NULL;
	}
	se_cmd = &tl_cmd->tl_se_cmd;
	/*
	 * Save the pointer to struct scsi_cmnd *sc
	 */
	tl_cmd->sc = sc;
	/*
	 * Locate the SAM Task Attr from struct scsi_cmnd *
	 */
	if (sc->device->tagged_supported) {
		switch (sc->tag) {
		case HEAD_OF_QUEUE_TAG:
			sam_task_attr = MSG_HEAD_TAG;
			break;
		case ORDERED_QUEUE_TAG:
			sam_task_attr = MSG_ORDERED_TAG;
			break;
		default:
			sam_task_attr = MSG_SIMPLE_TAG;
			break;
		}
	} else
		sam_task_attr = MSG_SIMPLE_TAG;

	/*
	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
			&tl_cmd->tl_sense_buf[0]);

	/*
	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
	 */
	if (scsi_bidi_cmnd(sc))
		T_TASK(se_cmd)->t_tasks_bidi = 1;
	/*
	 * Locate the struct se_lun pointer and attach it to struct se_cmd
	 */
	if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		return NULL;
	}
	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in transport_generic_allocate_tasks() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	transport_device_setup_cmd(se_cmd);
	return se_cmd;
}