int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
{
	struct iscsi_conn *conn = task->conn;
	struct iscsi_tcp_task *tcp_task = task->dd_data;
	struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
	struct scsi_cmnd *sc = task->sc;
	int headroom = SKB_TX_PDU_HEADER_LEN;

	tcp_task->dd_data = tdata;
	task->hdr = NULL;

	/* write command, need to send data pdus */
	if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
	    (opcode == ISCSI_OP_SCSI_CMD &&
	    (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
		headroom += min(skb_extra_headroom, conn->max_xmit_dlength);

	tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
	if (!tdata->skb)
		return -ENOMEM;
	skb_reserve(tdata->skb, TX_HEADER_LEN);

	cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
			task, opcode, tdata->skb);

	task->hdr = (struct iscsi_hdr *)tdata->skb->data;
	task->hdr_max = SKB_TX_PDU_HEADER_LEN;

	/* data_out uses scsi_cmd's itt */
	if (opcode != ISCSI_OP_SCSI_DATA_OUT)
		cxgb3i_reserve_itt(task, &task->hdr->itt);

	return 0;
}
示例#2
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}
	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count);
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
示例#3
0
文件: fetch26.c 项目: arcapix/mhvtl
/*
 * Copy data from SCSI command buffer to device buffer
 *  (SCSI command buffer -> user space)
 *
 * Returns number of bytes fetched into 'arr' or -1 if error.
 */
static int fetch_to_dev_buffer(struct scsi_cmnd *scp, char __user *arr, int len)
{
	struct scsi_data_buffer *sdb = scsi_out(scp);

	if (!scsi_bufflen(scp))
		return 0;
	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
		return -1;

	return vtl_copy_to_user(sdb->table.sgl, sdb->table.nents, arr, len);
}
示例#4
0
文件: fetch26.c 项目: arcapix/mhvtl
/* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
				int arr_len)
{
	int act_len;
	struct scsi_data_buffer *sdb = scsi_in(scp);

	if (!sdb->length)
		return 0;
	if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
		return DID_ERROR << 16;

	act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
					arr, arr_len);
	if (sdb->resid)
		sdb->resid -= act_len;
	else
		sdb->resid = scsi_bufflen(scp) - act_len;

	return 0;
}
示例#5
0
/*
 * Allocate a tcm_loop cmd descriptor from target_core_mod code
 *
 * Can be called from interrupt context in tcm_loop_queuecommand() below
 */
static struct se_cmd *tcm_loop_allocate_core_cmd(
    struct tcm_loop_hba *tl_hba,
    struct se_portal_group *se_tpg,
    struct scsi_cmnd *sc)
{
    struct se_cmd *se_cmd;
    struct se_session *se_sess;
    struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
    struct tcm_loop_cmd *tl_cmd;
    int sam_task_attr;

    if (!tl_nexus) {
        scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
                    " does not exist\n");
        set_host_byte(sc, DID_ERROR);
        return NULL;
    }
    se_sess = tl_nexus->se_sess;

    tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
    if (!tl_cmd) {
        pr_err("Unable to allocate struct tcm_loop_cmd\n");
        set_host_byte(sc, DID_ERROR);
        return NULL;
    }
    se_cmd = &tl_cmd->tl_se_cmd;
    /*
     * Save the pointer to struct scsi_cmnd *sc
     */
    tl_cmd->sc = sc;
    /*
     * Locate the SAM Task Attr from struct scsi_cmnd *
     */
    if (sc->device->tagged_supported) {
        switch (sc->tag) {
        case HEAD_OF_QUEUE_TAG:
            sam_task_attr = MSG_HEAD_TAG;
            break;
        case ORDERED_QUEUE_TAG:
            sam_task_attr = MSG_ORDERED_TAG;
            break;
        default:
            sam_task_attr = MSG_SIMPLE_TAG;
            break;
        }
    } else
        sam_task_attr = MSG_SIMPLE_TAG;

    /*
     * Initialize struct se_cmd descriptor from target_core_mod infrastructure
     */
    transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
                          scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                          &tl_cmd->tl_sense_buf[0]);

    if (scsi_bidi_cmnd(sc))
        se_cmd->se_cmd_flags |= SCF_BIDI;

    /*
     * Locate the struct se_lun pointer and attach it to struct se_cmd
     */
    if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
        kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
        set_host_byte(sc, DID_NO_CONNECT);
        return NULL;
    }

    return se_cmd;
}
示例#6
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}

	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
			tl_nexus->se_sess,
			scsi_bufflen(sc), sc->sc_data_direction,
			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);

	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in target_setup_cmd_from_cdb() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	} else if (ret < 0) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			tcm_loop_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}

	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret) {
		transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}
	transport_handle_cdb_direct(se_cmd);
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
示例#7
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0, transfer_length;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	transfer_length = scsi_transfer_length(sc);
	if (!scsi_prot_sg_count(sc) &&
	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
		se_cmd->prot_pto = true;
		/*
		 * loopback transport doesn't support
		 * WRITE_GENERATE, READ_STRIP protection
		 * information operations, go ahead unprotected.
		 */
		transfer_length = scsi_bufflen(sc);
	}

	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			transfer_length, tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count,
			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
	sc->scsi_done(sc);
	return;
}
/*
 * Allocate a tcm_loop cmd descriptor from target_core_mod code
 *
 * Can be called from interrupt context in tcm_loop_queuecommand() below
 */
static struct se_cmd *tcm_loop_allocate_core_cmd(
	struct tcm_loop_hba *tl_hba,
	struct se_portal_group *se_tpg,
	struct scsi_cmnd *sc)
{
	struct se_cmd *se_cmd;
	struct se_session *se_sess;
	struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
	struct tcm_loop_cmd *tl_cmd;
	int sam_task_attr;

	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		return NULL;
	}
	se_sess = tl_nexus->se_sess;

	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
	if (!tl_cmd) {
		printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
		set_host_byte(sc, DID_ERROR);
		return NULL;
	}
	se_cmd = &tl_cmd->tl_se_cmd;
	/*
	 * Save the pointer to struct scsi_cmnd *sc
	 */
	tl_cmd->sc = sc;
	/*
	 * Locate the SAM Task Attr from struct scsi_cmnd *
	 */
	if (sc->device->tagged_supported) {
		switch (sc->tag) {
		case HEAD_OF_QUEUE_TAG:
			sam_task_attr = MSG_HEAD_TAG;
			break;
		case ORDERED_QUEUE_TAG:
			sam_task_attr = MSG_ORDERED_TAG;
			break;
		default:
			sam_task_attr = MSG_SIMPLE_TAG;
			break;
		}
	} else
		sam_task_attr = MSG_SIMPLE_TAG;

	/*
	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
			&tl_cmd->tl_sense_buf[0]);

	/*
	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
	 */
	if (scsi_bidi_cmnd(sc))
		T_TASK(se_cmd)->t_tasks_bidi = 1;
	/*
	 * Locate the struct se_lun pointer and attach it to struct se_cmd
	 */
	if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		return NULL;
	}
	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in transport_generic_allocate_tasks() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	transport_device_setup_cmd(se_cmd);
	return se_cmd;
}