Esempio n. 1
0
static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
			      void *arg)
{
	struct fcoe_ctlr *fip = arg;
	struct fc_exch *exch = fc_seq_exch(seq);
	struct fc_lport *lport = exch->lp;
	u8 *mac;
	u8 op;

	if (IS_ERR(fp))
		goto done;

	mac = fr_cb(fp)->granted_mac;
	if (is_zero_ether_addr(mac)) {
		op = fc_frame_payload_op(fp);
		if (lport->vport) {
			if (op == ELS_LS_RJT) {
				printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
				fc_vport_terminate(lport->vport);
				fc_frame_free(fp);
				return;
			}
		}
		fcoe_ctlr_recv_flogi(fip, lport, fp);
	}
	if (!is_zero_ether_addr(mac))
		fip->update_mac(lport, mac);
done:
	fc_lport_flogi_resp(seq, fp, lport);
}
Esempio n. 2
0
/*
 * Handle and cleanup any HW specific resources if
 * received ABORTS, errors, timeouts.
 */
void ft_invl_hw_context(struct ft_cmd *cmd)
{
	struct fc_seq *seq;
	struct fc_exch *ep = NULL;
	struct fc_lport *lport = NULL;

	BUG_ON(!cmd);
	seq = cmd->seq;

	/* Cleanup the DDP context in HW if DDP was setup */
	if (cmd->was_ddp_setup && seq) {
		ep = fc_seq_exch(seq);
		if (ep) {
			lport = ep->lp;
			if (lport && (ep->xid <= lport->lro_xid)) {
				/*
				 * "ddp_done" trigger invalidation of HW
				 * specific DDP context
				 */
				cmd->write_data_len = lport->tt.ddp_done(lport,
								      ep->xid);

				/*
				 * Resetting same variable to indicate HW's
				 * DDP context has been invalidated to avoid
				 * re_invalidation of same context (context is
				 * identified using ep->xid)
				 */
				cmd->was_ddp_setup = 0;
			}
		}
	}
}
static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
				   struct fc_lport *lport)
{
	struct fc_frame *fp;
	struct fc_exch *ep = fc_seq_exch(sp);
	unsigned int len;
	void *pp;
	void *dp;
	u32 f_ctl;

	FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
		     fc_lport_state(lport));

	len = fr_len(in_fp) - sizeof(struct fc_frame_header);
	pp = fc_frame_payload_get(in_fp, len);

	if (len < sizeof(__be32))
		len = sizeof(__be32);

	fp = fc_frame_alloc(lport, len);
	if (fp) {
		dp = fc_frame_payload_get(fp, len);
		memcpy(dp, pp, len);
		*((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
		sp = lport->tt.seq_start_next(sp);
		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
			       FC_TYPE_ELS, f_ctl, 0);
		lport->tt.seq_send(lport, sp, fp);
	}
	fc_frame_free(in_fp);
}
Esempio n. 4
0
/*
 * Send TX_RDY (transfer ready).
 */
int ft_write_pending(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_txrdy *txrdy;
	struct fc_lport *lport;
	struct fc_exch *ep;
	struct fc_frame_header *fh;
	u32 f_ctl;

	ft_dump_cmd(cmd, __func__);

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	fp = fc_frame_alloc(lport, sizeof(*txrdy));
	if (!fp)
		return -ENOMEM; /* Signal QUEUE_FULL */

	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
	memset(txrdy, 0, sizeof(*txrdy));
	txrdy->ft_burst_len = htonl(se_cmd->data_length);

	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

	fh = fc_frame_header_get(fp);
	f_ctl = ntoh24(fh->fh_f_ctl);

	lport->tt.seq_send(lport, cmd->seq, fp);
	return 0;
}
Esempio n. 5
0
/*
 * Dump cmd state for debugging.
 */
static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
	struct fc_exch *ep;
	struct fc_seq *sp;
	struct se_cmd *se_cmd;
	struct scatterlist *sg;
	int count;

	se_cmd = &cmd->se_cmd;
	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
		caller, cmd, cmd->sess, cmd->seq, se_cmd);

	pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
		caller, cmd, se_cmd->t_data_nents,
	       se_cmd->data_length, se_cmd->se_cmd_flags);

	for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
		pr_debug("%s: cmd %p sg %p page %p "
			"len 0x%x off 0x%x\n",
			caller, cmd, sg,
			sg_page(sg), sg->length, sg->offset);

	sp = cmd->seq;
	if (sp) {
		ep = fc_seq_exch(sp);
		pr_debug("%s: cmd %p sid %x did %x "
			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
			sp->id, ep->esb_stat);
	}
}
Esempio n. 6
0
u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);

	if (cmd->aborted)
		return ~0;
	return fc_seq_exch(cmd->seq)->rxid;
}
Esempio n. 7
0
/*
 * Send TX_RDY (transfer ready).
 */
int ft_write_pending(struct se_cmd *se_cmd)
{
    struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
    struct fc_frame *fp;
    struct fcp_txrdy *txrdy;
    struct fc_lport *lport;
    struct fc_exch *ep;
    struct fc_frame_header *fh;
    u32 f_ctl;

    ft_dump_cmd(cmd, __func__);

    if (cmd->aborted)
        return 0;
    ep = fc_seq_exch(cmd->seq);
    lport = ep->lp;
    fp = fc_frame_alloc(lport, sizeof(*txrdy));
    if (!fp)
        return -ENOMEM; /* Signal QUEUE_FULL */

    txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
    memset(txrdy, 0, sizeof(*txrdy));
    txrdy->ft_burst_len = htonl(se_cmd->data_length);

    cmd->seq = lport->tt.seq_start_next(cmd->seq);
    fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
                   FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

    fh = fc_frame_header_get(fp);
    f_ctl = ntoh24(fh->fh_f_ctl);

    /* Only if it is 'Exchange Responder' */
    if (f_ctl & FC_FC_EX_CTX) {
        /* Target is 'exchange responder' and sending XFER_READY
         * to 'exchange initiator (initiator)'
         */
        if ((ep->xid <= lport->lro_xid) &&
                (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
            if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
                /*
                 * cmd may have been broken up into multiple
                 * tasks. Link their sgs together so we can
                 * operate on them all at once.
                 */
                transport_do_task_sg_chain(se_cmd);
                cmd->sg = se_cmd->t_tasks_sg_chained;
                cmd->sg_cnt =
                    se_cmd->t_tasks_sg_chained_no;
            }
            if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
                                                cmd->sg,
                                                cmd->sg_cnt))
                cmd->was_ddp_setup = 1;
        }
    }
    lport->tt.seq_send(lport, cmd->seq, fp);
    return 0;
}
Esempio n. 8
0
static void ft_send_work(struct work_struct *work)
{
	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
	struct fcp_cmnd *fcp;
	int data_dir = 0;
	int task_attr;

	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
	if (!fcp)
		goto err;

	if (fcp->fc_flags & FCP_CFL_LEN_MASK)
		goto err;		

	if (fcp->fc_tm_flags) {
		ft_send_tm(cmd);
		return;
	}

	switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
	case 0:
		data_dir = DMA_NONE;
		break;
	case FCP_CFL_RDDATA:
		data_dir = DMA_FROM_DEVICE;
		break;
	case FCP_CFL_WRDATA:
		data_dir = DMA_TO_DEVICE;
		break;
	case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
		goto err;	
	}
	switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
	case FCP_PTA_HEADQ:
		task_attr = MSG_HEAD_TAG;
		break;
	case FCP_PTA_ORDERED:
		task_attr = MSG_ORDERED_TAG;
		break;
	case FCP_PTA_ACA:
		task_attr = MSG_ACA_TAG;
		break;
	case FCP_PTA_SIMPLE: 
	default:
		task_attr = MSG_SIMPLE_TAG;
	}

	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
	target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
			&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
			ntohl(fcp->fc_dl), task_attr, data_dir, 0);
	pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
	return;

err:
	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
				   struct fc_lport *lport)
{
	struct fc_frame *fp;
	struct fc_exch *ep = fc_seq_exch(sp);
	struct fc_els_rnid *req;
	struct {
		struct fc_els_rnid_resp rnid;
		struct fc_els_rnid_cid cid;
		struct fc_els_rnid_gen gen;
	} *rp;
	struct fc_seq_els_data rjt_data;
	u8 fmt;
	size_t len;
	u32 f_ctl;

	FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
		     fc_lport_state(lport));

	req = fc_frame_payload_get(in_fp, sizeof(*req));
	if (!req) {
		rjt_data.fp = NULL;
		rjt_data.reason = ELS_RJT_LOGIC;
		rjt_data.explan = ELS_EXPL_NONE;
		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
	} else {
		fmt = req->rnid_fmt;
		len = sizeof(*rp);
		if (fmt != ELS_RNIDF_GEN ||
		    ntohl(lport->rnid_gen.rnid_atype) == 0) {
			fmt = ELS_RNIDF_NONE;	/* nothing to provide */
			len -= sizeof(rp->gen);
		}
		fp = fc_frame_alloc(lport, len);
		if (fp) {
			rp = fc_frame_payload_get(fp, len);
			memset(rp, 0, len);
			rp->rnid.rnid_cmd = ELS_LS_ACC;
			rp->rnid.rnid_fmt = fmt;
			rp->rnid.rnid_cid_len = sizeof(rp->cid);
			rp->cid.rnid_wwpn = htonll(lport->wwpn);
			rp->cid.rnid_wwnn = htonll(lport->wwnn);
			if (fmt == ELS_RNIDF_GEN) {
				rp->rnid.rnid_sid_len = sizeof(rp->gen);
				memcpy(&rp->gen, &lport->rnid_gen,
				       sizeof(rp->gen));
			}
			sp = lport->tt.seq_start_next(sp);
			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
				       FC_TYPE_ELS, f_ctl, 0);
			lport->tt.seq_send(lport, sp, fp);
		}
	}
	fc_frame_free(in_fp);
}
Esempio n. 10
0
/*
 * Send TX_RDY (transfer ready).
 */
int ft_write_pending(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_txrdy *txrdy;
	struct fc_lport *lport;
	struct fc_exch *ep;
	struct fc_frame_header *fh;
	u32 f_ctl;

	ft_dump_cmd(cmd, __func__);

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	fp = fc_frame_alloc(lport, sizeof(*txrdy));
	if (!fp)
		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;

	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
	memset(txrdy, 0, sizeof(*txrdy));
	txrdy->ft_burst_len = htonl(se_cmd->data_length);

	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

	fh = fc_frame_header_get(fp);
	f_ctl = ntoh24(fh->fh_f_ctl);

	/* Only if it is 'Exchange Responder' */
	if (f_ctl & FC_FC_EX_CTX) {
		/* Target is 'exchange responder' and sending XFER_READY
		 * to 'exchange initiator (initiator)'
		 */
		if ((ep->xid <= lport->lro_xid) &&
		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
			if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
				/*
				 * Map se_mem list to scatterlist, so that
				 * DDP can be setup. DDP setup function require
				 * scatterlist. se_mem_list is internal to
				 * TCM/LIO target
				 */
				transport_do_task_sg_chain(se_cmd);
				cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
				cmd->sg_cnt =
					T_TASK(se_cmd)->t_tasks_sg_chained_no;
			}
			if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
						    cmd->sg, cmd->sg_cnt))
				cmd->was_ddp_setup = 1;
		}
	}
	lport->tt.seq_send(lport, cmd->seq, fp);
	return 0;
}
Esempio n. 11
0
/*
 * Send response.
 */
int ft_queue_status(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_resp_with_ext *fcp;
	struct fc_lport *lport;
	struct fc_exch *ep;
	size_t len;

	if (cmd->aborted)
		return 0;
	ft_dump_cmd(cmd, __func__);
	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
	fp = fc_frame_alloc(lport, len);
	if (!fp) {
		/* XXX shouldn't just drop it - requeue and retry? */
		return 0;
	}
	fcp = fc_frame_payload_get(fp, len);
	memset(fcp, 0, len);
	fcp->resp.fr_status = se_cmd->scsi_status;

	len = se_cmd->scsi_sense_length;
	if (len) {
		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
		fcp->ext.fr_sns_len = htonl(len);
		memcpy((fcp + 1), se_cmd->sense_buffer, len);
	}

	/*
	 * Test underflow and overflow with one mask.  Usually both are off.
	 * Bidirectional commands are not handled yet.
	 */
	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
			fcp->resp.fr_flags |= FCP_RESID_OVER;
		else
			fcp->resp.fr_flags |= FCP_RESID_UNDER;
		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
	}

	/*
	 * Send response.
	 */
	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);

	lport->tt.seq_send(lport, cmd->seq, fp);
	lport->tt.exch_done(cmd->seq);
	return 0;
}
Esempio n. 12
0
static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
			     void *arg)
{
	struct fcoe_ctlr *fip = arg;
	struct fc_exch *exch = fc_seq_exch(seq);
	struct fc_lport *lport = exch->lp;
	static u8 zero_mac[ETH_ALEN] = { 0 };

	if (!IS_ERR(fp))
		fip->update_mac(lport, zero_mac);
	fc_lport_logo_resp(seq, fp, lport);
}
Esempio n. 13
0
int ft_write_pending(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_txrdy *txrdy;
	struct fc_lport *lport;
	struct fc_exch *ep;
	struct fc_frame_header *fh;
	u32 f_ctl;

	ft_dump_cmd(cmd, __func__);

	if (cmd->aborted)
		return 0;
	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	fp = fc_frame_alloc(lport, sizeof(*txrdy));
	if (!fp)
		return -ENOMEM; 

	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
	memset(txrdy, 0, sizeof(*txrdy));
	txrdy->ft_burst_len = htonl(se_cmd->data_length);

	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

	fh = fc_frame_header_get(fp);
	f_ctl = ntoh24(fh->fh_f_ctl);

	
	if (f_ctl & FC_FC_EX_CTX) {
		if ((ep->xid <= lport->lro_xid) &&
		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
			if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
				transport_do_task_sg_chain(se_cmd);
				cmd->sg = se_cmd->t_tasks_sg_chained;
				cmd->sg_cnt =
					se_cmd->t_tasks_sg_chained_no;
			}
			if (cmd->sg && lport->tt.ddp_target(lport, ep->xid,
							    cmd->sg,
							    cmd->sg_cnt))
				cmd->was_ddp_setup = 1;
		}
	}
	lport->tt.seq_send(lport, cmd->seq, fp);
	return 0;
}
Esempio n. 14
0
/*
 * Send TX_RDY (transfer ready).
 */
int ft_write_pending(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_txrdy *txrdy;
	struct fc_lport *lport;
	struct fc_exch *ep;
	struct fc_frame_header *fh;
	u32 f_ctl;

	ft_dump_cmd(cmd, __func__);

	if (cmd->aborted)
		return 0;
	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	fp = fc_frame_alloc(lport, sizeof(*txrdy));
	if (!fp)
		return -ENOMEM; /* Signal QUEUE_FULL */

	txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
	memset(txrdy, 0, sizeof(*txrdy));
	txrdy->ft_burst_len = htonl(se_cmd->data_length);

	cmd->seq = fc_seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

	fh = fc_frame_header_get(fp);
	f_ctl = ntoh24(fh->fh_f_ctl);

	/* Only if it is 'Exchange Responder' */
	if (f_ctl & FC_FC_EX_CTX) {
		/* Target is 'exchange responder' and sending XFER_READY
		 * to 'exchange initiator (initiator)'
		 */
		if ((ep->xid <= lport->lro_xid) &&
		    (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
			if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
			    lport->tt.ddp_target(lport, ep->xid,
						 se_cmd->t_data_sg,
						 se_cmd->t_data_nents))
				cmd->was_ddp_setup = 1;
		}
	}
	fc_seq_send(lport, cmd->seq, fp);
	return 0;
}
Esempio n. 15
0
int ft_queue_status(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_resp_with_ext *fcp;
	struct fc_lport *lport;
	struct fc_exch *ep;
	size_t len;

	if (cmd->aborted)
		return 0;
	ft_dump_cmd(cmd, __func__);
	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
	fp = fc_frame_alloc(lport, len);
	if (!fp) {
		
		return 0;
	}
	fcp = fc_frame_payload_get(fp, len);
	memset(fcp, 0, len);
	fcp->resp.fr_status = se_cmd->scsi_status;

	len = se_cmd->scsi_sense_length;
	if (len) {
		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
		fcp->ext.fr_sns_len = htonl(len);
		memcpy((fcp + 1), se_cmd->sense_buffer, len);
	}

	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
			fcp->resp.fr_flags |= FCP_RESID_OVER;
		else
			fcp->resp.fr_flags |= FCP_RESID_UNDER;
		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
	}

	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);

	lport->tt.seq_send(lport, cmd->seq, fp);
	lport->tt.exch_done(cmd->seq);
	return 0;
}
Esempio n. 16
0
/*
 * Deliver read data back to initiator.
 * XXX TBD handle resource problems later.
 */
int ft_queue_data_in(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct se_transport_task *task;
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct se_mem *mem;
	size_t remaining;
	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
	u32 mem_off;
	u32 fh_off = 0;
	u32 frame_off = 0;
	size_t frame_len = 0;
	size_t mem_len;
	size_t tlen;
	size_t off_in_page;
	struct page *page;
	int use_sg;
	int error;
	void *page_addr;
	void *from;
	void *to = NULL;

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	cmd->seq = lport->tt.seq_start_next(cmd->seq);

	task = T_TASK(se_cmd);
	BUG_ON(!task);
	remaining = se_cmd->data_length;

	/*
	 * Setup to use first mem list entry if any.
	 */
	if (task->t_tasks_se_num) {
		mem = list_first_entry(task->t_mem_list,
			 struct se_mem, se_list);
		mem_len = mem->se_len;
		mem_off = mem->se_off;
		page = mem->se_page;
	} else {
Esempio n. 17
0
/*
 * Dump cmd state for debugging.
 */
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
	struct fc_exch *ep;
	struct fc_seq *sp;
	struct se_cmd *se_cmd;
	struct se_mem *mem;
	struct se_transport_task *task;

	if (!(ft_debug_logging & FT_DEBUG_IO))
		return;

	se_cmd = &cmd->se_cmd;
//	printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
;
//	printk(KERN_INFO "%s: cmd %p cdb %p\n",
;
;

	task = T_TASK(se_cmd);
//	printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
//	       caller, cmd, task, task->t_tasks_se_num,
;
	if (task->t_mem_list)
		list_for_each_entry(mem, task->t_mem_list, se_list)
//			printk(KERN_INFO "%s: cmd %p mem %p page %p "
//			       "len 0x%x off 0x%x\n",
//			       caller, cmd, mem,
;
	sp = cmd->seq;
	if (sp) {
		ep = fc_seq_exch(sp);
//		printk(KERN_INFO "%s: cmd %p sid %x did %x "
//			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
//			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
;
	}
	print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
		16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
Esempio n. 18
0
/*
 * Dump cmd state for debugging.
 */
void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
{
	struct fc_exch *ep;
	struct fc_seq *sp;
	struct se_cmd *se_cmd;
	struct scatterlist *sg;
	int count;

	se_cmd = &cmd->se_cmd;
	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n",
		caller, cmd, cmd->sess, cmd->seq, se_cmd);
	pr_debug("%s: cmd %p cdb %p\n",
		caller, cmd, cmd->cdb);
	pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);

	pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
		caller, cmd, se_cmd->t_data_nents,
	       se_cmd->data_length, se_cmd->se_cmd_flags);

	for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
		pr_debug("%s: cmd %p sg %p page %p "
			"len 0x%x off 0x%x\n",
			caller, cmd, sg,
			sg_page(sg), sg->length, sg->offset);

	sp = cmd->seq;
	if (sp) {
		ep = fc_seq_exch(sp);
		pr_debug("%s: cmd %p sid %x did %x "
			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
			sp->id, ep->esb_stat);
	}
	print_hex_dump(KERN_INFO, "ft_dump_cmd ", DUMP_PREFIX_NONE,
		16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);
}
Esempio n. 19
0
/*
 * Send read data back to initiator.
 */
int ft_send_read_data(struct scst_cmd *cmd)
{
	struct ft_cmd *fcmd;
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	size_t remaining;
	u32 fh_off = 0;
	u32 frame_off;
	size_t frame_len = 0;
	size_t mem_len;
	u32 mem_off;
	size_t tlen;
	struct page *page;
	int use_sg;
	int error;
	void *to = NULL;
	u8 *from = NULL;
	int loop_limit = 10000;

	fcmd = scst_cmd_get_tgt_priv(cmd);
	ep = fc_seq_exch(fcmd->seq);
	lport = ep->lp;

	frame_off = fcmd->read_data_len;
	tlen = scst_cmd_get_resp_data_len(cmd);
	FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
		  ep->oid, ep->oxid, tlen, frame_off);
	if (tlen <= frame_off)
		return SCST_TGT_RES_SUCCESS;
	remaining = tlen - frame_off;
	if (remaining > UINT_MAX)
		FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
		       ep->oid, ep->oxid, tlen, frame_off);

	mem_len = scst_get_buf_first(cmd, &from);
	mem_off = 0;
	if (!mem_len) {
		FT_IO_DBG("mem_len 0\n");
		return SCST_TGT_RES_SUCCESS;
	}
	FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
		 ep->sid, ep->oxid, mem_len, frame_off, remaining);

	/*
	 * If we've already transferred some of the data, skip through
	 * the buffer over the data already sent and continue with the
	 * same sequence.  Otherwise, get a new sequence for the data.
	 */
	if (frame_off) {
		tlen = frame_off;
		while (mem_len <= tlen) {
			tlen -= mem_len;
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			if (!mem_len)
				return SCST_TGT_RES_SUCCESS;
		}
		mem_len -= tlen;
		mem_off = tlen;
	} else
		fcmd->seq = lport->tt.seq_start_next(fcmd->seq);

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4) && lport->sg_supp;

	while (remaining) {
		if (!loop_limit) {
			FT_ERR("hit loop limit.  remaining %zx mem_len %zx "
			       "frame_len %zx tlen %zx\n",
			       remaining, mem_len, frame_len, tlen);
			break;
		}
		loop_limit--;
		if (!mem_len) {
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			mem_off = 0;
			if (!mem_len) {
				FT_ERR("mem_len 0 from get_buf_next\n");
				break;
			}
		}
		if (!frame_len) {
			frame_len = fcmd->max_lso_payload;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp) {
				FT_IO_DBG("frame_alloc failed. "
					  "use_sg %d frame_len %zd\n",
					  use_sg, frame_len);
				break;
			}
			fr_max_payload(fp) = fcmd->max_payload;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
		}
		tlen = min(mem_len, frame_len);
		BUG_ON(!tlen);
		BUG_ON(tlen > remaining);
		BUG_ON(tlen > mem_len);
		BUG_ON(tlen > frame_len);

		if (use_sg) {
			page = virt_to_page(from + mem_off);
			get_page(page);
			tlen = min_t(size_t, tlen,
				     PAGE_SIZE - (mem_off & ~PAGE_MASK));
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, offset_in_page(from + mem_off),
					   tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
			frame_len -= tlen;
			if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
				frame_len = 0;
		} else {
			memcpy(to, from + mem_off, tlen);
			to += tlen;
			frame_len -= tlen;
		}

		mem_len -= tlen;
		mem_off += tlen;
		remaining -= tlen;
		frame_off += tlen;

		if (frame_len)
			continue;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP,
			       remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
			       (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
			       fh_off);
		error = lport->tt.seq_send(lport, fcmd->seq, fp);
		if (error) {
			WARN_ON(1);
			/* XXX For now, initiator will retry */
		} else
			fcmd->read_data_len = frame_off;
	}
	if (mem_len)
		scst_put_buf(cmd, from);
	if (remaining) {
		FT_IO_DBG("remaining read data %zd\n", remaining);
		return SCST_TGT_RES_QUEUE_FULL;
	}
	return SCST_TGT_RES_SUCCESS;
}
Esempio n. 20
0
/*
 * Deliver read data back to initiator.
 * XXX TBD handle resource problems later.
 */
int ft_queue_data_in(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct scatterlist *sg = NULL;
	size_t remaining;
	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
	u32 mem_off = 0;
	u32 fh_off = 0;
	u32 frame_off = 0;
	size_t frame_len = 0;
	size_t mem_len = 0;
	size_t tlen;
	size_t off_in_page;
	struct page *page = NULL;
	int use_sg;
	int error;
	void *page_addr;
	void *from;
	void *to = NULL;

	if (cmd->aborted)
		return 0;

	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
		goto queue_status;

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	cmd->seq = lport->tt.seq_start_next(cmd->seq);

	remaining = se_cmd->data_length;

	/*
	 * Setup to use first mem list entry, unless no data.
	 */
	BUG_ON(remaining && !se_cmd->t_data_sg);
	if (remaining) {
		sg = se_cmd->t_data_sg;
		mem_len = sg->length;
		mem_off = sg->offset;
		page = sg_page(sg);
	}

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4);

	while (remaining) {
		struct fc_seq *seq = cmd->seq;

		if (!seq) {
			pr_debug("%s: Command aborted, xid 0x%x\n",
				 __func__, ep->xid);
			break;
		}
		if (!mem_len) {
			sg = sg_next(sg);
			mem_len = min((size_t)sg->length, remaining);
			mem_off = sg->offset;
			page = sg_page(sg);
		}
		if (!frame_len) {
			/*
			 * If lport's has capability of Large Send Offload LSO)
			 * , then allow 'frame_len' to be as big as 'lso_max'
			 * if indicated transfer length is >= lport->lso_max
			 */
			frame_len = (lport->seq_offload) ? lport->lso_max :
							  cmd->sess->max_frame;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp)
				return -ENOMEM;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
			frame_off += frame_len;
			/*
			 * Setup the frame's max payload which is used by base
			 * driver to indicate HW about max frame size, so that
			 * HW can do fragmentation appropriately based on
			 * "gso_max_size" of underline netdev.
			 */
			fr_max_payload(fp) = cmd->sess->max_frame;
		}
		tlen = min(mem_len, frame_len);

		if (use_sg) {
			off_in_page = mem_off;
			BUG_ON(!page);
			get_page(page);
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, off_in_page, tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
		} else {
			BUG_ON(!page);
			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
			page_addr = from;
			from += mem_off & ~PAGE_MASK;
			tlen = min(tlen, (size_t)(PAGE_SIZE -
						(mem_off & ~PAGE_MASK)));
			memcpy(to, from, tlen);
			kunmap_atomic(page_addr);
			to += tlen;
		}

		mem_off += tlen;
		mem_len -= tlen;
		frame_len -= tlen;
		remaining -= tlen;

		if (frame_len &&
		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
			continue;
		if (!remaining)
			f_ctl |= FC_FC_END_SEQ;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_off);
		error = lport->tt.seq_send(lport, seq, fp);
		if (error) {
			pr_info_ratelimited("%s: Failed to send frame %p, "
						"xid <0x%x>, remaining %zu, "
						"lso_max <0x%x>\n",
						__func__, fp, ep->xid,
						remaining, lport->lso_max);
			/*
			 * Go ahead and set TASK_SET_FULL status ignoring the
			 * rest of the DataIN, and immediately attempt to
			 * send the response via ft_queue_status() in order
			 * to notify the initiator that it should reduce it's
			 * per LUN queue_depth.
			 */
			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
			break;
		}
	}
queue_status:
	return ft_queue_status(se_cmd);
}
Esempio n. 21
0
/*
 * Receive write data frame.
 */
void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
{
	struct se_cmd *se_cmd = &cmd->se_cmd;
	struct fc_seq *seq = cmd->seq;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct fc_frame_header *fh;
	struct scatterlist *sg = NULL;
	u32 mem_off = 0;
	u32 rel_off;
	size_t frame_len;
	size_t mem_len = 0;
	size_t tlen;
	struct page *page = NULL;
	void *page_addr;
	void *from;
	void *to;
	u32 f_ctl;
	void *buf;

	fh = fc_frame_header_get(fp);
	if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
		goto drop;

	f_ctl = ntoh24(fh->fh_f_ctl);
	ep = fc_seq_exch(seq);
	lport = ep->lp;
	if (cmd->was_ddp_setup) {
		BUG_ON(!ep);
		BUG_ON(!lport);
		/*
		 * Since DDP (Large Rx offload) was setup for this request,
		 * payload is expected to be copied directly to user buffers.
		 */
		buf = fc_frame_payload_get(fp, 1);
		if (buf)
			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "
				"cmd->sg_cnt 0x%x. DDP was setup"
				" hence not expected to receive frame with "
				"payload, Frame will be dropped if"
				"'Sequence Initiative' bit in f_ctl is"
				"not set\n", __func__, ep->xid, f_ctl,
				se_cmd->t_data_sg, se_cmd->t_data_nents);
		/*
		 * Invalidate HW DDP context if it was setup for respective
		 * command. Invalidation of HW DDP context is requited in both
		 * situation (success and error).
		 */
		ft_invl_hw_context(cmd);

		/*
		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last
		 * write data frame is received successfully where payload is
		 * posted directly to user buffer and only the last frame's
		 * header is posted in receive queue.
		 *
		 * If "Sequence Initiative (TSI)" bit is not set, means error
		 * condition w.r.t. DDP, hence drop the packet and let explict
		 * ABORTS from other end of exchange timer trigger the recovery.
		 */
		if (f_ctl & FC_FC_SEQ_INIT)
			goto last_frame;
		else
			goto drop;
	}

	rel_off = ntohl(fh->fh_parm_offset);
	frame_len = fr_len(fp);
	if (frame_len <= sizeof(*fh))
		goto drop;
	frame_len -= sizeof(*fh);
	from = fc_frame_payload_get(fp, 0);
	if (rel_off >= se_cmd->data_length)
		goto drop;
	if (frame_len + rel_off > se_cmd->data_length)
		frame_len = se_cmd->data_length - rel_off;

	/*
	 * Setup to use first mem list entry, unless no data.
	 */
	BUG_ON(frame_len && !se_cmd->t_data_sg);
	if (frame_len) {
		sg = se_cmd->t_data_sg;
		mem_len = sg->length;
		mem_off = sg->offset;
		page = sg_page(sg);
	}

	while (frame_len) {
		if (!mem_len) {
			sg = sg_next(sg);
			mem_len = sg->length;
			mem_off = sg->offset;
			page = sg_page(sg);
		}
		if (rel_off >= mem_len) {
			rel_off -= mem_len;
			mem_len = 0;
			continue;
		}
		mem_off += rel_off;
		mem_len -= rel_off;
		rel_off = 0;

		tlen = min(mem_len, frame_len);

		to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
		page_addr = to;
		to += mem_off & ~PAGE_MASK;
		tlen = min(tlen, (size_t)(PAGE_SIZE -
					  (mem_off & ~PAGE_MASK)));
		memcpy(to, from, tlen);
		kunmap_atomic(page_addr);

		from += tlen;
		frame_len -= tlen;
		mem_off += tlen;
		mem_len -= tlen;
		cmd->write_data_len += tlen;
	}
last_frame:
	if (cmd->write_data_len == se_cmd->data_length) {
		INIT_WORK(&cmd->work, ft_execute_work);
		queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
	}
drop:
	fc_frame_free(fp);
}
static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
				    struct fc_frame *rx_fp,
				    struct fc_lport *lport)
{
	struct fc_frame *fp;
	struct fc_frame_header *fh;
	struct fc_seq *sp;
	struct fc_exch *ep;
	struct fc_els_flogi *flp;
	struct fc_els_flogi *new_flp;
	u64 remote_wwpn;
	u32 remote_fid;
	u32 local_fid;
	u32 f_ctl;

	FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
		     fc_lport_state(lport));

	fh = fc_frame_header_get(rx_fp);
	remote_fid = ntoh24(fh->fh_s_id);
	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
	if (!flp)
		goto out;
	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
	if (remote_wwpn == lport->wwpn) {
		printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
		       "with same WWPN %16.16llx\n",
		       lport->host->host_no, remote_wwpn);
		goto out;
	}
	FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);

	/*
	 * XXX what is the right thing to do for FIDs?
	 * The originator might expect our S_ID to be 0xfffffe.
	 * But if so, both of us could end up with the same FID.
	 */
	local_fid = FC_LOCAL_PTP_FID_LO;
	if (remote_wwpn < lport->wwpn) {
		local_fid = FC_LOCAL_PTP_FID_HI;
		if (!remote_fid || remote_fid == local_fid)
			remote_fid = FC_LOCAL_PTP_FID_LO;
	} else if (!remote_fid) {
		remote_fid = FC_LOCAL_PTP_FID_HI;
	}

	fc_lport_set_port_id(lport, local_fid, rx_fp);

	fp = fc_frame_alloc(lport, sizeof(*flp));
	if (fp) {
		sp = lport->tt.seq_start_next(fr_seq(rx_fp));
		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
		fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
		new_flp->fl_cmd = (u8) ELS_LS_ACC;

		/*
		 * Send the response.  If this fails, the originator should
		 * repeat the sequence.
		 */
		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
		ep = fc_seq_exch(sp);
		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, remote_fid, local_fid,
			       FC_TYPE_ELS, f_ctl, 0);
		lport->tt.seq_send(lport, sp, fp);

	} else {
		fc_lport_error(lport, fp);
	}
	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
			   get_unaligned_be64(&flp->fl_wwnn));

out:
	sp = fr_seq(rx_fp);
	fc_frame_free(rx_fp);
}
Esempio n. 23
0
/*
 * Send new command to target.
 */
static void ft_send_work(struct work_struct *work)
{
	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
	struct fcp_cmnd *fcp;
	int data_dir = 0;
	int task_attr;

	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
	if (!fcp)
		goto err;

	if (fcp->fc_flags & FCP_CFL_LEN_MASK)
		goto err;		/* not handling longer CDBs yet */

	/*
	 * Check for FCP task management flags
	 */
	if (fcp->fc_tm_flags) {
		ft_send_tm(cmd);
		return;
	}

	switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
	case 0:
		data_dir = DMA_NONE;
		break;
	case FCP_CFL_RDDATA:
		data_dir = DMA_FROM_DEVICE;
		break;
	case FCP_CFL_WRDATA:
		data_dir = DMA_TO_DEVICE;
		break;
	case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
		goto err;	/* TBD not supported by tcm_fc yet */
	}
	/*
	 * Locate the SAM Task Attr from fc_pri_ta
	 */
	switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
	case FCP_PTA_HEADQ:
		task_attr = MSG_HEAD_TAG;
		break;
	case FCP_PTA_ORDERED:
		task_attr = MSG_ORDERED_TAG;
		break;
	case FCP_PTA_ACA:
		task_attr = MSG_ACA_TAG;
		break;
	case FCP_PTA_SIMPLE: /* Fallthrough */
	default:
		task_attr = MSG_SIMPLE_TAG;
	}

	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
	/*
	 * Use a single se_cmd->cmd_kref as we expect to release se_cmd
	 * directly from ft_check_stop_free callback in response path.
	 */
	if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
			      &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
			      ntohl(fcp->fc_dl), task_attr, data_dir, 0))
		goto err;

	pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
	return;

err:
	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);
}
Esempio n. 24
0
/*
 * Send new command to target.
 */
static void ft_send_cmd(struct ft_cmd *cmd)
{
	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
	struct se_cmd *se_cmd;
	struct fcp_cmnd *fcp;
	int data_dir;
	u32 data_len;
	int task_attr;
	int ret;

	fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
	if (!fcp)
		goto err;

	if (fcp->fc_flags & FCP_CFL_LEN_MASK)
		goto err;		/* not handling longer CDBs yet */

	if (fcp->fc_tm_flags) {
		task_attr = FCP_PTA_SIMPLE;
		data_dir = DMA_NONE;
		data_len = 0;
	} else {
		switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) {
		case 0:
			data_dir = DMA_NONE;
			break;
		case FCP_CFL_RDDATA:
			data_dir = DMA_FROM_DEVICE;
			break;
		case FCP_CFL_WRDATA:
			data_dir = DMA_TO_DEVICE;
			break;
		case FCP_CFL_WRDATA | FCP_CFL_RDDATA:
			goto err;	/* TBD not supported by tcm_fc yet */
		}
		/*
		 * Locate the SAM Task Attr from fc_pri_ta
		 */
		switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
		case FCP_PTA_HEADQ:
			task_attr = MSG_HEAD_TAG;
			break;
		case FCP_PTA_ORDERED:
			task_attr = MSG_ORDERED_TAG;
			break;
		case FCP_PTA_ACA:
			task_attr = MSG_ACA_TAG;
			break;
		case FCP_PTA_SIMPLE: /* Fallthrough */
		default:
			task_attr = MSG_SIMPLE_TAG;
		}


		task_attr = fcp->fc_pri_ta & FCP_PTA_MASK;
		data_len = ntohl(fcp->fc_dl);
		cmd->cdb = fcp->fc_cdb;
	}

	se_cmd = &cmd->se_cmd;
	/*
	 * Initialize struct se_cmd descriptor from target_core_mod
	 * infrastructure
	 */
	transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
			      data_len, data_dir, task_attr,
			      &cmd->ft_sense_buffer[0]);
	/*
	 * Check for FCP task management flags
	 */
	if (fcp->fc_tm_flags) {
		ft_send_tm(cmd);
		return;
	}

	fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);

	cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
	ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
	if (ret < 0) {
		ft_dump_cmd(cmd, __func__);
		transport_send_check_condition_and_sense(&cmd->se_cmd,
			cmd->se_cmd.scsi_sense_reason, 0);
		return;
	}

	ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);

	FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
	ft_dump_cmd(cmd, __func__);

	if (ret == -1) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0, 1, 0);
		return;
	}
	if (ret == -2) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			ft_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0, 1, 0);
		return;
	}
	transport_generic_handle_cdb(se_cmd);
	return;

err:
	ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
	return;
}
Esempio n. 25
0
static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
				    struct fc_frame *rx_fp,
				    struct fc_lport *lport)
{
	struct fc_frame *fp;
	struct fc_frame_header *fh;
	struct fc_seq *sp;
	struct fc_exch *ep;
	struct fc_els_flogi *flp;
	struct fc_els_flogi *new_flp;
	u64 remote_wwpn;
	u32 remote_fid;
	u32 local_fid;
	u32 f_ctl;

	FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
		     fc_lport_state(lport));

	fh = fc_frame_header_get(rx_fp);
	remote_fid = ntoh24(fh->fh_s_id);
	flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
	if (!flp)
		goto out;
	remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
	if (remote_wwpn == lport->wwpn) {
		printk(KERN_WARNING "libfc: Received FLOGI from port "
		       "with same WWPN %llx\n", remote_wwpn);
		goto out;
	}
	FC_LPORT_DBG(lport, "FLOGI from port WWPN %llx\n", remote_wwpn);

	
	local_fid = FC_LOCAL_PTP_FID_LO;
	if (remote_wwpn < lport->wwpn) {
		local_fid = FC_LOCAL_PTP_FID_HI;
		if (!remote_fid || remote_fid == local_fid)
			remote_fid = FC_LOCAL_PTP_FID_LO;
	} else if (!remote_fid) {
		remote_fid = FC_LOCAL_PTP_FID_HI;
	}

	fc_host_port_id(lport->host) = local_fid;

	fp = fc_frame_alloc(lport, sizeof(*flp));
	if (fp) {
		sp = lport->tt.seq_start_next(fr_seq(rx_fp));
		new_flp = fc_frame_payload_get(fp, sizeof(*flp));
		fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
		new_flp->fl_cmd = (u8) ELS_LS_ACC;

		
		f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
		ep = fc_seq_exch(sp);
		fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
			       FC_TYPE_ELS, f_ctl, 0);
		lport->tt.seq_send(lport, sp, fp);

	} else {
		fc_lport_error(lport, fp);
	}
	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
			   get_unaligned_be64(&flp->fl_wwnn));

out:
	sp = fr_seq(rx_fp);
	fc_frame_free(rx_fp);
}
Esempio n. 26
0
/*
 * Send response.
 */
int ft_queue_status(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp;
	struct fcp_resp_with_ext *fcp;
	struct fc_lport *lport;
	struct fc_exch *ep;
	size_t len;
	int rc;

	if (cmd->aborted)
		return 0;
	ft_dump_cmd(cmd, __func__);
	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	len = sizeof(*fcp) + se_cmd->scsi_sense_length;
	fp = fc_frame_alloc(lport, len);
	if (!fp) {
		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
		return -ENOMEM;
	}

	fcp = fc_frame_payload_get(fp, len);
	memset(fcp, 0, len);
	fcp->resp.fr_status = se_cmd->scsi_status;

	len = se_cmd->scsi_sense_length;
	if (len) {
		fcp->resp.fr_flags |= FCP_SNS_LEN_VAL;
		fcp->ext.fr_sns_len = htonl(len);
		memcpy((fcp + 1), se_cmd->sense_buffer, len);
	}

	/*
	 * Test underflow and overflow with one mask.  Usually both are off.
	 * Bidirectional commands are not handled yet.
	 */
	if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) {
		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT)
			fcp->resp.fr_flags |= FCP_RESID_OVER;
		else
			fcp->resp.fr_flags |= FCP_RESID_UNDER;
		fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count);
	}

	/*
	 * Send response.
	 */
	cmd->seq = lport->tt.seq_start_next(cmd->seq);
	fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP,
		       FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0);

	rc = lport->tt.seq_send(lport, cmd->seq, fp);
	if (rc) {
		pr_info_ratelimited("%s: Failed to send response frame %p, "
				    "xid <0x%x>\n", __func__, fp, ep->xid);
		/*
		 * Generate a TASK_SET_FULL status to notify the initiator
		 * to reduce it's queue_depth after the se_cmd response has
		 * been re-queued by target-core.
		 */
		se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
		return -ENOMEM;
	}
	lport->tt.exch_done(cmd->seq);
	return 0;
}