Пример #1
0
int iscsit_get_lun_for_tmr(
	struct iscsi_cmd *cmd,
	u64 lun)
{
	u32 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);

	return transport_lookup_tmr_lun(&cmd->se_cmd, unpacked_lun);
}
Пример #2
0
/*
 * Called from SCSI EH process context to issue a LUN_RESET TMR
 * to struct scsi_device
 */
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
{
    struct se_cmd *se_cmd = NULL;
    struct se_portal_group *se_tpg;
    struct se_session *se_sess;
    struct tcm_loop_cmd *tl_cmd = NULL;
    struct tcm_loop_hba *tl_hba;
    struct tcm_loop_nexus *tl_nexus;
    struct tcm_loop_tmr *tl_tmr = NULL;
    struct tcm_loop_tpg *tl_tpg;
    int ret = FAILED;
    /*
     * Locate the tcm_loop_hba_t pointer
     */
    tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
    /*
     * Locate the tl_nexus and se_sess pointers
     */
    tl_nexus = tl_hba->tl_nexus;
    if (!tl_nexus) {
        pr_err("Unable to perform device reset without"
               " active I_T Nexus\n");
        return FAILED;
    }
    se_sess = tl_nexus->se_sess;
    /*
     * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
     */
    tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
    se_tpg = &tl_tpg->tl_se_tpg;

    tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
    if (!tl_cmd) {
        pr_err("Unable to allocate memory for tl_cmd\n");
        return FAILED;
    }

    tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
    if (!tl_tmr) {
        pr_err("Unable to allocate memory for tl_tmr\n");
        goto release;
    }
    init_waitqueue_head(&tl_tmr->tl_tmr_wait);

    se_cmd = &tl_cmd->tl_se_cmd;
    /*
     * Initialize struct se_cmd descriptor from target_core_mod infrastructure
     */
    transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
                          DMA_NONE, MSG_SIMPLE_TAG,
                          &tl_cmd->tl_sense_buf[0]);
    /*
     * Allocate the LUN_RESET TMR
     */
    se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
                                            TMR_LUN_RESET, GFP_KERNEL);
    if (IS_ERR(se_cmd->se_tmr_req))
        goto release;
    /*
     * Locate the underlying TCM struct se_lun from sc->device->lun
     */
    if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
        goto release;
    /*
     * Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
     * to wake us up.
     */
    transport_generic_handle_tmr(se_cmd);
    wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
    /*
     * The TMR LUN_RESET has completed, check the response status and
     * then release allocations.
     */
    ret = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
          SUCCESS : FAILED;
release:
    if (se_cmd)
        transport_generic_free_cmd(se_cmd, 1);
    else
        kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
    kfree(tl_tmr);
    return ret;
}
Пример #3
0
/*
 * Called from SCSI EH process context to issue a LUN_RESET TMR
 * to struct scsi_device
 */
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
			      struct tcm_loop_nexus *tl_nexus,
			      int lun, int task, enum tcm_tmreq_table tmr)
{
	struct se_cmd *se_cmd = NULL;
	struct se_session *se_sess;
	struct se_portal_group *se_tpg;
	struct tcm_loop_cmd *tl_cmd = NULL;
	struct tcm_loop_tmr *tl_tmr = NULL;
	int ret = TMR_FUNCTION_FAILED, rc;

	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
	if (!tl_cmd) {
		pr_err("Unable to allocate memory for tl_cmd\n");
		return ret;
	}

	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
	if (!tl_tmr) {
		pr_err("Unable to allocate memory for tl_tmr\n");
		goto release;
	}
	init_waitqueue_head(&tl_tmr->tl_tmr_wait);

	se_cmd = &tl_cmd->tl_se_cmd;
	se_tpg = &tl_tpg->tl_se_tpg;
	se_sess = tl_nexus->se_sess;
	/*
	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
				DMA_NONE, MSG_SIMPLE_TAG,
				&tl_cmd->tl_sense_buf[0]);

	rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
	if (rc < 0)
		goto release;

	if (tmr == TMR_ABORT_TASK)
		se_cmd->se_tmr_req->ref_task_tag = task;

	/*
	 * Locate the underlying TCM struct se_lun
	 */
	if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
		ret = TMR_LUN_DOES_NOT_EXIST;
		goto release;
	}
	/*
	 * Queue the TMR to TCM Core and sleep waiting for
	 * tcm_loop_queue_tm_rsp() to wake us up.
	 */
	transport_generic_handle_tmr(se_cmd);
	wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
	/*
	 * The TMR LUN_RESET has completed, check the response status and
	 * then release allocations.
	 */
	ret = se_cmd->se_tmr_req->response;
release:
	if (se_cmd)
		transport_generic_free_cmd(se_cmd, 1);
	else
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
	kfree(tl_tmr);
	return ret;
}
Пример #4
0
/*
 * Handle Task Management Request.
 */
static void ft_send_tm(struct ft_cmd *cmd)
{
	struct se_tmr_req *tmr;
	struct fcp_cmnd *fcp;
	struct ft_sess *sess;
	u8 tm_func;

	transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
			cmd->sess->se_sess, 0, DMA_NONE, 0,
			&cmd->ft_sense_buffer[0]);
	target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);

	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));

	switch (fcp->fc_tm_flags) {
	case FCP_TMF_LUN_RESET:
		tm_func = TMR_LUN_RESET;
		break;
	case FCP_TMF_TGT_RESET:
		tm_func = TMR_TARGET_WARM_RESET;
		break;
	case FCP_TMF_CLR_TASK_SET:
		tm_func = TMR_CLEAR_TASK_SET;
		break;
	case FCP_TMF_ABT_TASK_SET:
		tm_func = TMR_ABORT_TASK_SET;
		break;
	case FCP_TMF_CLR_ACA:
		tm_func = TMR_CLEAR_ACA;
		break;
	default:
		/*
		 * FCP4r01 indicates having a combination of
		 * tm_flags set is invalid.
		 */
		pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
		ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
		return;
	}

	pr_debug("alloc tm cmd fn %d\n", tm_func);
	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);
	if (!tmr) {
		pr_debug("alloc failed\n");
		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED);
		return;
	}
	cmd->se_cmd.se_tmr_req = tmr;

	switch (fcp->fc_tm_flags) {
	case FCP_TMF_LUN_RESET:
		cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
		if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
			/*
			 * Make sure to clean up newly allocated TMR request
			 * since "unable to  handle TMR request because failed
			 * to get to LUN"
			 */
			pr_debug("Failed to get LUN for TMR func %d, "
				  "se_cmd %p, unpacked_lun %d\n",
				  tm_func, &cmd->se_cmd, cmd->lun);
			ft_dump_cmd(cmd, __func__);
			sess = cmd->sess;
			transport_send_check_condition_and_sense(&cmd->se_cmd,
				cmd->se_cmd.scsi_sense_reason, 0);
			ft_sess_put(sess);
			return;
		}
		break;
	case FCP_TMF_TGT_RESET:
	case FCP_TMF_CLR_TASK_SET:
	case FCP_TMF_ABT_TASK_SET:
	case FCP_TMF_CLR_ACA:
		break;
	default:
		return;
	}
	transport_generic_handle_tmr(&cmd->se_cmd);
}