/** * fc_lport_recv_echo_req() - Handle received ECHO request * @lport: The local port recieving the ECHO * @fp: ECHO request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_echo_req(struct fc_lport *lport, struct fc_frame *in_fp) { struct fc_frame *fp; unsigned int len; void *pp; void *dp; FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", fc_lport_state(lport)); len = fr_len(in_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(in_fp, len); if (len < sizeof(__be32)) len = sizeof(__be32); fp = fc_frame_alloc(lport, len); if (fp) { dp = fc_frame_payload_get(fp, len); memcpy(dp, pp, len); *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); } fc_frame_free(in_fp); }
static void fc_lport_recv_echo_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_lport *lport) { struct fc_frame *fp; struct fc_exch *ep = fc_seq_exch(sp); unsigned int len; void *pp; void *dp; u32 f_ctl; FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", fc_lport_state(lport)); len = fr_len(in_fp) - sizeof(struct fc_frame_header); pp = fc_frame_payload_get(in_fp, len); if (len < sizeof(__be32)) len = sizeof(__be32); fp = fc_frame_alloc(lport, len); if (fp) { dp = fc_frame_payload_get(fp, len); memcpy(dp, pp, len); *((__be32 *)dp) = htonl(ELS_LS_ACC << 24); sp = lport->tt.seq_start_next(sp); f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, FC_TYPE_ELS, f_ctl, 0); lport->tt.seq_send(lport, sp, fp); } fc_frame_free(in_fp); }
static void fc_lport_recv_flogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_frame *fp; struct fc_frame_header *fh; struct fc_els_flogi *flp; struct fc_els_flogi *new_flp; u64 remote_wwpn; u32 remote_fid; u32 local_fid; FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", fc_lport_state(lport)); remote_fid = fc_frame_sid(rx_fp); flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); if (!flp) goto out; remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); if (remote_wwpn == lport->wwpn) { printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " "with same WWPN %16.16llx\n", lport->host->host_no, remote_wwpn); goto out; } FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); local_fid = FC_LOCAL_PTP_FID_LO; if (remote_wwpn < lport->wwpn) { local_fid = FC_LOCAL_PTP_FID_HI; if (!remote_fid || remote_fid == local_fid) remote_fid = FC_LOCAL_PTP_FID_LO; } else if (!remote_fid) { remote_fid = FC_LOCAL_PTP_FID_HI; } fc_lport_set_port_id(lport, local_fid, rx_fp); fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { new_flp = fc_frame_payload_get(fp, sizeof(*flp)); fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); new_flp->fl_cmd = (u8) ELS_LS_ACC; fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); fh = fc_frame_header_get(fp); hton24(fh->fh_s_id, local_fid); hton24(fh->fh_d_id, remote_fid); lport->tt.frame_send(lport, fp); } else { fc_lport_error(lport, fp); } fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, get_unaligned_be64(&flp->fl_wwnn)); out: fc_frame_free(rx_fp); }
static void fc_lport_recv_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp, struct fc_lport *lport) { struct fc_frame *fp; struct fc_exch *ep = fc_seq_exch(sp); struct fc_els_rnid *req; struct { struct fc_els_rnid_resp rnid; struct fc_els_rnid_cid cid; struct fc_els_rnid_gen gen; } *rp; struct fc_seq_els_data rjt_data; u8 fmt; size_t len; u32 f_ctl; FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", fc_lport_state(lport)); req = fc_frame_payload_get(in_fp, sizeof(*req)); if (!req) { rjt_data.fp = NULL; rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); } else { fmt = req->rnid_fmt; len = sizeof(*rp); if (fmt != ELS_RNIDF_GEN || ntohl(lport->rnid_gen.rnid_atype) == 0) { fmt = ELS_RNIDF_NONE; /* nothing to provide */ len -= sizeof(rp->gen); } fp = fc_frame_alloc(lport, len); if (fp) { rp = fc_frame_payload_get(fp, len); memset(rp, 0, len); rp->rnid.rnid_cmd = ELS_LS_ACC; rp->rnid.rnid_fmt = fmt; rp->rnid.rnid_cid_len = sizeof(rp->cid); rp->cid.rnid_wwpn = htonll(lport->wwpn); rp->cid.rnid_wwnn = htonll(lport->wwnn); if (fmt == ELS_RNIDF_GEN) { rp->rnid.rnid_sid_len = sizeof(rp->gen); memcpy(&rp->gen, &lport->rnid_gen, sizeof(rp->gen)); } sp = lport->tt.seq_start_next(sp); f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, FC_TYPE_ELS, f_ctl, 0); lport->tt.seq_send(lport, sp, fp); } } fc_frame_free(in_fp); }
/** * fc_lport_recv_rnid_req() - Handle received Request Node ID data request * @lport: The local port recieving the RNID * @fp: The RNID request frame * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_rnid_req(struct fc_lport *lport, struct fc_frame *in_fp) { struct fc_frame *fp; struct fc_els_rnid *req; struct { struct fc_els_rnid_resp rnid; struct fc_els_rnid_cid cid; struct fc_els_rnid_gen gen; } *rp; struct fc_seq_els_data rjt_data; u8 fmt; size_t len; FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", fc_lport_state(lport)); req = fc_frame_payload_get(in_fp, sizeof(*req)); if (!req) { rjt_data.reason = ELS_RJT_LOGIC; rjt_data.explan = ELS_EXPL_NONE; lport->tt.seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data); } else { fmt = req->rnid_fmt; len = sizeof(*rp); if (fmt != ELS_RNIDF_GEN || ntohl(lport->rnid_gen.rnid_atype) == 0) { fmt = ELS_RNIDF_NONE; /* nothing to provide */ len -= sizeof(rp->gen); } fp = fc_frame_alloc(lport, len); if (fp) { rp = fc_frame_payload_get(fp, len); memset(rp, 0, len); rp->rnid.rnid_cmd = ELS_LS_ACC; rp->rnid.rnid_fmt = fmt; rp->rnid.rnid_cid_len = sizeof(rp->cid); rp->cid.rnid_wwpn = htonll(lport->wwpn); rp->cid.rnid_wwnn = htonll(lport->wwnn); if (fmt == ELS_RNIDF_GEN) { rp->rnid.rnid_sid_len = sizeof(rp->gen); memcpy(&rp->gen, &lport->rnid_gen, sizeof(rp->gen)); } fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0); lport->tt.frame_send(lport, fp); } } fc_frame_free(in_fp); }
int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_els_logo *logo; struct fc_frame_header *fh; struct bnx2fc_els_cb_arg *cb_arg; struct fc_lport *lport = tgt->rdata->local_port; u32 r_a_tov = lport->r_a_tov; int rc; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); return -ENOMEM; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); logo = fc_frame_payload_get(fp, sizeof(*logo)); /* logo is initialized by libfc */ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); if (rc) kfree(cb_arg); return rc; }
/* * Send TX_RDY (transfer ready). */ int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); lport->tt.seq_send(lport, cmd->seq, fp); return 0; }
int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) { struct fc_els_rls *rls; struct fc_frame_header *fh; struct bnx2fc_els_cb_arg *cb_arg; struct fc_lport *lport = tgt->rdata->local_port; u32 r_a_tov = lport->r_a_tov; int rc; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); return -ENOMEM; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); rls = fc_frame_payload_get(fp, sizeof(*rls)); /* rls is initialized by libfc */ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); if (rc) kfree(cb_arg); return rc; }
static void ft_send_work(struct work_struct *work) { struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct fcp_cmnd *fcp; int data_dir = 0; int task_attr; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); if (!fcp) goto err; if (fcp->fc_flags & FCP_CFL_LEN_MASK) goto err; if (fcp->fc_tm_flags) { ft_send_tm(cmd); return; } switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { case 0: data_dir = DMA_NONE; break; case FCP_CFL_RDDATA: data_dir = DMA_FROM_DEVICE; break; case FCP_CFL_WRDATA: data_dir = DMA_TO_DEVICE; break; case FCP_CFL_WRDATA | FCP_CFL_RDDATA: goto err; } switch (fcp->fc_pri_ta & FCP_PTA_MASK) { case FCP_PTA_HEADQ: task_attr = MSG_HEAD_TAG; break; case FCP_PTA_ORDERED: task_attr = MSG_ORDERED_TAG; break; case FCP_PTA_ACA: task_attr = MSG_ACA_TAG; break; case FCP_PTA_SIMPLE: default: task_attr = MSG_SIMPLE_TAG; } fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), task_attr, data_dir, 0); pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); return; err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); }
/* * Send TX_RDY (transfer ready). */ int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); if (cmd->aborted) return 0; ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); /* Only if it is 'Exchange Responder' */ if (f_ctl & FC_FC_EX_CTX) { /* Target is 'exchange responder' and sending XFER_READY * to 'exchange initiator (initiator)' */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { /* * cmd may have been broken up into multiple * tasks. Link their sgs together so we can * operate on them all at once. */ transport_do_task_sg_chain(se_cmd); cmd->sg = se_cmd->t_tasks_sg_chained; cmd->sg_cnt = se_cmd->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, cmd->sg, cmd->sg_cnt)) cmd->was_ddp_setup = 1; } } lport->tt.seq_send(lport, cmd->seq, fp); return 0; }
/* * Send TX_RDY (transfer ready). */ int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); /* Only if it is 'Exchange Responder' */ if (f_ctl & FC_FC_EX_CTX) { /* Target is 'exchange responder' and sending XFER_READY * to 'exchange initiator (initiator)' */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { /* * Map se_mem list to scatterlist, so that * DDP can be setup. DDP setup function require * scatterlist. se_mem_list is internal to * TCM/LIO target */ transport_do_task_sg_chain(se_cmd); cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained; cmd->sg_cnt = T_TASK(se_cmd)->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid, cmd->sg, cmd->sg_cnt)) cmd->was_ddp_setup = 1; } } lport->tt.seq_send(lport, cmd->seq, fp); return 0; }
/** * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests * @sp: The sequence for the FC Passthrough response * @fp: The response frame * @info_arg: The BSG info that the response is for */ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, void *info_arg) { struct fc_bsg_info *info = info_arg; struct fc_bsg_job *job = info->job; struct fc_lport *lport = info->lport; struct fc_frame_header *fh; size_t len; void *buf; if (IS_ERR(fp)) { job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? -ECONNABORTED : -ETIMEDOUT; job->reply_len = sizeof(uint32_t); job->state_flags |= FC_RQST_STATE_DONE; job->job_done(job); kfree(info); return; } mutex_lock(&lport->lp_mutex); fh = fc_frame_header_get(fp); len = fr_len(fp) - sizeof(*fh); buf = fc_frame_payload_get(fp, 0); if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { /* Get the response code from the first frame payload */ unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : (unsigned short)fc_frame_payload_op(fp); /* Save the reply status of the job */ job->reply->reply_data.ctels_reply.status = (cmd == info->rsp_code) ? FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; } job->reply->reply_payload_rcv_len += fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, &info->offset, KM_BIO_SRC_IRQ, NULL); if (fr_eof(fp) == FC_EOF_T && (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { if (job->reply->reply_payload_rcv_len > job->reply_payload.payload_len) job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; job->reply->result = 0; job->state_flags |= FC_RQST_STATE_DONE; job->job_done(job); kfree(info); } fc_frame_free(fp); mutex_unlock(&lport->lp_mutex); }
/* * Send response. */ int ft_queue_status(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_resp_with_ext *fcp; struct fc_lport *lport; struct fc_exch *ep; size_t len; if (cmd->aborted) return 0; ft_dump_cmd(cmd, __func__); ep = fc_seq_exch(cmd->seq); lport = ep->lp; len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { /* XXX shouldn't just drop it - requeue and retry? */ return 0; } fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; len = se_cmd->scsi_sense_length; if (len) { fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; fcp->ext.fr_sns_len = htonl(len); memcpy((fcp + 1), se_cmd->sense_buffer, len); } /* * Test underflow and overflow with one mask. Usually both are off. * Bidirectional commands are not handled yet. */ if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) fcp->resp.fr_flags |= FCP_RESID_OVER; else fcp->resp.fr_flags |= FCP_RESID_UNDER; fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); } /* * Send response. */ cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); lport->tt.seq_send(lport, cmd->seq, fp); lport->tt.exch_done(cmd->seq); return 0; }
/** * fc_lport_ct_request() - Send CT Passthrough request * @job: The BSG Passthrough job * @lport: The local port sending the request * @did: The destination FC-ID * @tov: The timeout period to wait for the response * * Locking Note: The lport lock is expected to be held before calling * this routine. */ static int fc_lport_ct_request(struct fc_bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; struct fc_frame *fp; struct fc_frame_header *fh; struct fc_ct_req *ct; size_t len; fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + job->request_payload.payload_len); if (!fp) return -ENOMEM; len = job->request_payload.payload_len; ct = fc_frame_payload_get(fp, len); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, ct, len); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL; hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_CT; hton24(fh->fh_f_ctl, FC_FCTL_REQ); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); if (!info) { fc_frame_free(fp); return -ENOMEM; } info->job = job; info->lport = lport; info->rsp_code = FC_FS_ACC; info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, NULL, info, tov)) { kfree(info); return -ECOMM; } return 0; }
int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); if (cmd->aborted) return 0; ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return -ENOMEM; txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); if (f_ctl & FC_FC_EX_CTX) { if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { transport_do_task_sg_chain(se_cmd); cmd->sg = se_cmd->t_tasks_sg_chained; cmd->sg_cnt = se_cmd->t_tasks_sg_chained_no; } if (cmd->sg && lport->tt.ddp_target(lport, ep->xid, cmd->sg, cmd->sg_cnt)) cmd->was_ddp_setup = 1; } } lport->tt.seq_send(lport, cmd->seq, fp); return 0; }
static int fc_lport_els_request(struct fc_bsg_job *job, struct fc_lport *lport, u32 did, u32 tov) { struct fc_bsg_info *info; struct fc_frame *fp; struct fc_frame_header *fh; char *pp; int len; fp = fc_frame_alloc(lport, job->request_payload.payload_len); if (!fp) return -ENOMEM; len = job->request_payload.payload_len; pp = fc_frame_payload_get(fp, len); sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, pp, len); fh = fc_frame_header_get(fp); fh->fh_r_ctl = FC_RCTL_ELS_REQ; hton24(fh->fh_d_id, did); hton24(fh->fh_s_id, lport->port_id); fh->fh_type = FC_TYPE_ELS; hton24(fh->fh_f_ctl, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT); fh->fh_cs_ctl = 0; fh->fh_df_ctl = 0; fh->fh_parm_offset = 0; info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL); if (!info) { fc_frame_free(fp); return -ENOMEM; } info->job = job; info->lport = lport; info->rsp_code = ELS_LS_ACC; info->nents = job->reply_payload.sg_cnt; info->sg = job->reply_payload.sg_list; if (!lport->tt.exch_seq_send(lport, fp, fc_lport_bsg_resp, NULL, info, tov)) return -ECOMM; return 0; }
/* * Send TX_RDY (transfer ready). */ int ft_write_pending(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_txrdy *txrdy; struct fc_lport *lport; struct fc_exch *ep; struct fc_frame_header *fh; u32 f_ctl; ft_dump_cmd(cmd, __func__); if (cmd->aborted) return 0; ep = fc_seq_exch(cmd->seq); lport = ep->lp; fp = fc_frame_alloc(lport, sizeof(*txrdy)); if (!fp) return -ENOMEM; /* Signal QUEUE_FULL */ txrdy = fc_frame_payload_get(fp, sizeof(*txrdy)); memset(txrdy, 0, sizeof(*txrdy)); txrdy->ft_burst_len = htonl(se_cmd->data_length); cmd->seq = fc_seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_DATA_DESC, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); fh = fc_frame_header_get(fp); f_ctl = ntoh24(fh->fh_f_ctl); /* Only if it is 'Exchange Responder' */ if (f_ctl & FC_FC_EX_CTX) { /* Target is 'exchange responder' and sending XFER_READY * to 'exchange initiator (initiator)' */ if ((ep->xid <= lport->lro_xid) && (fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) { if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && lport->tt.ddp_target(lport, ep->xid, se_cmd->t_data_sg, se_cmd->t_data_nents)) cmd->was_ddp_setup = 1; } } fc_seq_send(lport, cmd->seq, fp); return 0; }
int ft_queue_status(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp; struct fcp_resp_with_ext *fcp; struct fc_lport *lport; struct fc_exch *ep; size_t len; if (cmd->aborted) return 0; ft_dump_cmd(cmd, __func__); ep = fc_seq_exch(cmd->seq); lport = ep->lp; len = sizeof(*fcp) + se_cmd->scsi_sense_length; fp = fc_frame_alloc(lport, len); if (!fp) { return 0; } fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = se_cmd->scsi_status; len = se_cmd->scsi_sense_length; if (len) { fcp->resp.fr_flags |= FCP_SNS_LEN_VAL; fcp->ext.fr_sns_len = htonl(len); memcpy((fcp + 1), se_cmd->sense_buffer, len); } if (se_cmd->se_cmd_flags & (SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT)) { if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) fcp->resp.fr_flags |= FCP_RESID_OVER; else fcp->resp.fr_flags |= FCP_RESID_UNDER; fcp->ext.fr_resid = cpu_to_be32(se_cmd->residual_count); } cmd->seq = lport->tt.seq_start_next(cmd->seq); fc_fill_fc_hdr(fp, FC_RCTL_DD_CMD_STATUS, ep->did, ep->sid, FC_TYPE_FCP, FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ, 0); lport->tt.seq_send(lport, cmd->seq, fp); lport->tt.exch_done(cmd->seq); return 0; }
static void fc_lport_rpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_RPN_ID) { FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } fh = fc_frame_header_get(fp); ct = fc_frame_payload_get(fp, sizeof(*ct)); if (fh && ct && fh->fh_type == FC_TYPE_CT && ct->ct_fs_type == FC_FST_DIR && ct->ct_fs_subtype == FC_NS_SUBTYPE && ntohs(ct->ct_cmd) == FC_FS_ACC) fc_lport_enter_rft_id(lport); else fc_lport_error(lport, fp); out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); }
/* * Handle Task Management Request. */ static void ft_send_tm(struct ft_cmd *cmd) { struct fcp_cmnd *fcp; int rc; u8 tm_func; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); switch (fcp->fc_tm_flags) { case FCP_TMF_LUN_RESET: tm_func = TMR_LUN_RESET; break; case FCP_TMF_TGT_RESET: tm_func = TMR_TARGET_WARM_RESET; break; case FCP_TMF_CLR_TASK_SET: tm_func = TMR_CLEAR_TASK_SET; break; case FCP_TMF_ABT_TASK_SET: tm_func = TMR_ABORT_TASK_SET; break; case FCP_TMF_CLR_ACA: tm_func = TMR_CLEAR_ACA; break; default: /* * FCP4r01 indicates having a combination of * tm_flags set is invalid. */ pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags); ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); return; } /* FIXME: Add referenced task tag for ABORT_TASK */ rc = target_submit_tmr(&cmd->se_cmd, cmd->sess->se_sess, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), cmd, tm_func, GFP_KERNEL, 0, 0); if (rc < 0) ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); }
/* * Send a FCP response including SCSI status and optional FCP rsp_code. * status is SAM_STAT_GOOD (zero) iff code is valid. * This is used in error cases, such as allocation failures. */ static void ft_send_resp_status(struct fc_lport *lport, const struct fc_frame *rx_fp, u32 status, enum fcp_resp_rsp_codes code) { struct fc_frame *fp; struct fc_seq *sp; const struct fc_frame_header *fh; size_t len; struct fcp_resp_with_ext *fcp; struct fcp_resp_rsp_info *info; fh = fc_frame_header_get(rx_fp); pr_debug("FCP error response: did %x oxid %x status %x code %x\n", ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code); len = sizeof(*fcp); if (status == SAM_STAT_GOOD) len += sizeof(*info); fp = fc_frame_alloc(lport, len); if (!fp) return; fcp = fc_frame_payload_get(fp, len); memset(fcp, 0, len); fcp->resp.fr_status = status; if (status == SAM_STAT_GOOD) { fcp->ext.fr_rsp_len = htonl(sizeof(*info)); fcp->resp.fr_flags |= FCP_RSP_LEN_VAL; info = (struct fcp_resp_rsp_info *)(fcp + 1); info->rsp_code = code; } fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); sp = fr_seq(fp); if (sp) { lport->tt.seq_send(lport, sp, fp); lport->tt.exch_done(sp); } else { lport->tt.frame_send(lport, fp); } }
int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp) { struct fc_els_adisc *adisc; struct fc_frame_header *fh; struct fc_lport *lport = fcport->qedf->lport; struct qedf_els_cb_arg *cb_arg = NULL; struct qedf_ctx *qedf; uint32_t r_a_tov = lport->r_a_tov; int rc; qedf = fcport->qedf; fh = fc_frame_header_get(fp); cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO); if (!cb_arg) { QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for " "ADISC\n"); rc = -ENOMEM; goto adisc_err; } cb_arg->l2_oxid = ntohs(fh->fh_ox_id); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid); adisc = fc_frame_payload_get(fp, sizeof(*adisc)); rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc), qedf_l2_els_compl, cb_arg, r_a_tov); adisc_err: if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n"); kfree(cb_arg); } return rc; }
/** * fc_lport_recv_flogi_req() - Receive a FLOGI request * @lport: The local port that recieved the request * @rx_fp: The FLOGI frame * * A received FLOGI request indicates a point-to-point connection. * Accept it with the common service parameters indicating our N port. * Set up to do a PLOGI if we have the higher-number WWPN. * * Locking Note: The lport lock is expected to be held before calling * this function. */ static void fc_lport_recv_flogi_req(struct fc_lport *lport, struct fc_frame *rx_fp) { struct fc_frame *fp; struct fc_frame_header *fh; struct fc_els_flogi *flp; struct fc_els_flogi *new_flp; u64 remote_wwpn; u32 remote_fid; u32 local_fid; FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", fc_lport_state(lport)); remote_fid = fc_frame_sid(rx_fp); flp = fc_frame_payload_get(rx_fp, sizeof(*flp)); if (!flp) goto out; remote_wwpn = get_unaligned_be64(&flp->fl_wwpn); if (remote_wwpn == lport->wwpn) { printk(KERN_WARNING "host%d: libfc: Received FLOGI from port " "with same WWPN %16.16llx\n", lport->host->host_no, remote_wwpn); goto out; } FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn); /* * XXX what is the right thing to do for FIDs? * The originator might expect our S_ID to be 0xfffffe. * But if so, both of us could end up with the same FID. */ local_fid = FC_LOCAL_PTP_FID_LO; if (remote_wwpn < lport->wwpn) { local_fid = FC_LOCAL_PTP_FID_HI; if (!remote_fid || remote_fid == local_fid) remote_fid = FC_LOCAL_PTP_FID_LO; } else if (!remote_fid) { remote_fid = FC_LOCAL_PTP_FID_HI; } fc_lport_set_port_id(lport, local_fid, rx_fp); fp = fc_frame_alloc(lport, sizeof(*flp)); if (fp) { new_flp = fc_frame_payload_get(fp, sizeof(*flp)); fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI); new_flp->fl_cmd = (u8) ELS_LS_ACC; /* * Send the response. If this fails, the originator should * repeat the sequence. */ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); fh = fc_frame_header_get(fp); hton24(fh->fh_s_id, local_fid); hton24(fh->fh_d_id, remote_fid); lport->tt.frame_send(lport, fp); } else { fc_lport_error(lport, fp); } fc_lport_ptp_setup(lport, remote_fid, remote_wwpn, get_unaligned_be64(&flp->fl_wwnn)); out: fc_frame_free(rx_fp); }
/* * Receive write data frame. */ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp) { struct se_cmd *se_cmd = &cmd->se_cmd; struct fc_seq *seq = cmd->seq; struct fc_exch *ep; struct fc_lport *lport; struct fc_frame_header *fh; struct scatterlist *sg = NULL; u32 mem_off = 0; u32 rel_off; size_t frame_len; size_t mem_len = 0; size_t tlen; struct page *page = NULL; void *page_addr; void *from; void *to; u32 f_ctl; void *buf; fh = fc_frame_header_get(fp); if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF)) goto drop; f_ctl = ntoh24(fh->fh_f_ctl); ep = fc_seq_exch(seq); lport = ep->lp; if (cmd->was_ddp_setup) { BUG_ON(!ep); BUG_ON(!lport); /* * Since DDP (Large Rx offload) was setup for this request, * payload is expected to be copied directly to user buffers. */ buf = fc_frame_payload_get(fp, 1); if (buf) pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " "cmd->sg_cnt 0x%x. DDP was setup" " hence not expected to receive frame with " "payload, Frame will be dropped if" "'Sequence Initiative' bit in f_ctl is" "not set\n", __func__, ep->xid, f_ctl, se_cmd->t_data_sg, se_cmd->t_data_nents); /* * Invalidate HW DDP context if it was setup for respective * command. Invalidation of HW DDP context is requited in both * situation (success and error). */ ft_invl_hw_context(cmd); /* * If "Sequence Initiative (TSI)" bit set in f_ctl, means last * write data frame is received successfully where payload is * posted directly to user buffer and only the last frame's * header is posted in receive queue. * * If "Sequence Initiative (TSI)" bit is not set, means error * condition w.r.t. DDP, hence drop the packet and let explict * ABORTS from other end of exchange timer trigger the recovery. */ if (f_ctl & FC_FC_SEQ_INIT) goto last_frame; else goto drop; } rel_off = ntohl(fh->fh_parm_offset); frame_len = fr_len(fp); if (frame_len <= sizeof(*fh)) goto drop; frame_len -= sizeof(*fh); from = fc_frame_payload_get(fp, 0); if (rel_off >= se_cmd->data_length) goto drop; if (frame_len + rel_off > se_cmd->data_length) frame_len = se_cmd->data_length - rel_off; /* * Setup to use first mem list entry, unless no data. */ BUG_ON(frame_len && !se_cmd->t_data_sg); if (frame_len) { sg = se_cmd->t_data_sg; mem_len = sg->length; mem_off = sg->offset; page = sg_page(sg); } while (frame_len) { if (!mem_len) { sg = sg_next(sg); mem_len = sg->length; mem_off = sg->offset; page = sg_page(sg); } if (rel_off >= mem_len) { rel_off -= mem_len; mem_len = 0; continue; } mem_off += rel_off; mem_len -= rel_off; rel_off = 0; tlen = min(mem_len, frame_len); to = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); page_addr = to; to += mem_off & ~PAGE_MASK; tlen = min(tlen, (size_t)(PAGE_SIZE - (mem_off & ~PAGE_MASK))); memcpy(to, from, tlen); kunmap_atomic(page_addr); from += tlen; frame_len -= tlen; mem_off += tlen; mem_len -= tlen; cmd->write_data_len += tlen; } last_frame: if (cmd->write_data_len == se_cmd->data_length) { INIT_WORK(&cmd->work, ft_execute_work); queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work); } drop: fc_frame_free(fp); }
/* * Deliver read data back to initiator. * XXX TBD handle resource problems later. */ int ft_queue_data_in(struct se_cmd *se_cmd) { struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); struct fc_frame *fp = NULL; struct fc_exch *ep; struct fc_lport *lport; struct scatterlist *sg = NULL; size_t remaining; u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF; u32 mem_off = 0; u32 fh_off = 0; u32 frame_off = 0; size_t frame_len = 0; size_t mem_len = 0; size_t tlen; size_t off_in_page; struct page *page = NULL; int use_sg; int error; void *page_addr; void *from; void *to = NULL; if (cmd->aborted) return 0; if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL) goto queue_status; ep = fc_seq_exch(cmd->seq); lport = ep->lp; cmd->seq = lport->tt.seq_start_next(cmd->seq); remaining = se_cmd->data_length; /* * Setup to use first mem list entry, unless no data. */ BUG_ON(remaining && !se_cmd->t_data_sg); if (remaining) { sg = se_cmd->t_data_sg; mem_len = sg->length; mem_off = sg->offset; page = sg_page(sg); } /* no scatter/gather in skb for odd word length due to fc_seq_send() */ use_sg = !(remaining % 4); while (remaining) { struct fc_seq *seq = cmd->seq; if (!seq) { pr_debug("%s: Command aborted, xid 0x%x\n", __func__, ep->xid); break; } if (!mem_len) { sg = sg_next(sg); mem_len = min((size_t)sg->length, remaining); mem_off = sg->offset; page = sg_page(sg); } if (!frame_len) { /* * If lport's has capability of Large Send Offload LSO) * , then allow 'frame_len' to be as big as 'lso_max' * if indicated transfer length is >= lport->lso_max */ frame_len = (lport->seq_offload) ? lport->lso_max : cmd->sess->max_frame; frame_len = min(frame_len, remaining); fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len); if (!fp) return -ENOMEM; to = fc_frame_payload_get(fp, 0); fh_off = frame_off; frame_off += frame_len; /* * Setup the frame's max payload which is used by base * driver to indicate HW about max frame size, so that * HW can do fragmentation appropriately based on * "gso_max_size" of underline netdev. */ fr_max_payload(fp) = cmd->sess->max_frame; } tlen = min(mem_len, frame_len); if (use_sg) { off_in_page = mem_off; BUG_ON(!page); get_page(page); skb_fill_page_desc(fp_skb(fp), skb_shinfo(fp_skb(fp))->nr_frags, page, off_in_page, tlen); fr_len(fp) += tlen; fp_skb(fp)->data_len += tlen; fp_skb(fp)->truesize += PAGE_SIZE << compound_order(page); } else { BUG_ON(!page); from = kmap_atomic(page + (mem_off >> PAGE_SHIFT)); page_addr = from; from += mem_off & ~PAGE_MASK; tlen = min(tlen, (size_t)(PAGE_SIZE - (mem_off & ~PAGE_MASK))); memcpy(to, from, tlen); kunmap_atomic(page_addr); to += tlen; } mem_off += tlen; mem_len -= tlen; frame_len -= tlen; remaining -= tlen; if (frame_len && (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN)) continue; if (!remaining) f_ctl |= FC_FC_END_SEQ; fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid, FC_TYPE_FCP, f_ctl, fh_off); error = lport->tt.seq_send(lport, seq, fp); if (error) { pr_info_ratelimited("%s: Failed to send frame %p, " "xid <0x%x>, remaining %zu, " "lso_max <0x%x>\n", __func__, fp, ep->xid, remaining, lport->lso_max); /* * Go ahead and set TASK_SET_FULL status ignoring the * rest of the DataIN, and immediately attempt to * send the response via ft_queue_status() in order * to notify the initiator that it should reduce it's * per LUN queue_depth. */ se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL; break; } } queue_status: return ft_queue_status(se_cmd); }
/** * fc_lport_flogi_resp() - Handle response to FLOGI request * @sp: The sequence that the FLOGI was on * @fp: The FLOGI response frame * @lp_arg: The lport port that received the FLOGI response * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_els_flogi *flp; u32 did; u16 csp_flags; unsigned int r_a_tov; unsigned int e_d_tov; u16 mfs; FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_FLOGI) { FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } did = fc_frame_did(fp); if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < lport->mfs) lport->mfs = mfs; csp_flags = ntohs(flp->fl_csp.sp_features); r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); if ((csp_flags & FC_SP_FT_FPORT) == 0) { if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; lport->r_a_tov = 2 * e_d_tov; fc_lport_set_port_id(lport, did, fp); printk(KERN_INFO "host%d: libfc: " "Port (%6.6x) entered " "point-to-point mode\n", lport->host->host_no, did); fc_lport_ptp_setup(lport, fc_frame_sid(fp), get_unaligned_be64( &flp->fl_wwpn), get_unaligned_be64( &flp->fl_wwnn)); } else { lport->e_d_tov = e_d_tov; lport->r_a_tov = r_a_tov; fc_host_fabric_name(lport->host) = get_unaligned_be64(&flp->fl_wwnn); fc_lport_set_port_id(lport, did, fp); fc_lport_enter_dns(lport); } } } else { FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); fc_lport_error(lport, fp); } out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); }
/** * fc_lport_ns_resp() - Handle response to a name server * registration exchange * @sp: current sequence in exchange * @fp: response frame * @lp_arg: Fibre Channel host port instance * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) { FC_LPORT_DBG(lport, "Received a name server response, " "but in state %s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } fh = fc_frame_header_get(fp); ct = fc_frame_payload_get(fp, sizeof(*ct)); if (fh && ct && fh->fh_type == FC_TYPE_CT && ct->ct_fs_type == FC_FST_DIR && ct->ct_fs_subtype == FC_NS_SUBTYPE && ntohs(ct->ct_cmd) == FC_FS_ACC) switch (lport->state) { case LPORT_ST_RNN_ID: fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN); break; case LPORT_ST_RSNN_NN: fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID); break; case LPORT_ST_RSPN_ID: fc_lport_enter_ns(lport, LPORT_ST_RFT_ID); break; case LPORT_ST_RFT_ID: fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); break; case LPORT_ST_RFF_ID: fc_lport_enter_scr(lport); break; default: /* should have already been caught by state checks */ break; } else fc_lport_error(lport, fp); out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); }
/* * Send new command to target. */ static void ft_send_work(struct work_struct *work) { struct ft_cmd *cmd = container_of(work, struct ft_cmd, work); struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct fcp_cmnd *fcp; int data_dir = 0; int task_attr; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); if (!fcp) goto err; if (fcp->fc_flags & FCP_CFL_LEN_MASK) goto err; /* not handling longer CDBs yet */ /* * Check for FCP task management flags */ if (fcp->fc_tm_flags) { ft_send_tm(cmd); return; } switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { case 0: data_dir = DMA_NONE; break; case FCP_CFL_RDDATA: data_dir = DMA_FROM_DEVICE; break; case FCP_CFL_WRDATA: data_dir = DMA_TO_DEVICE; break; case FCP_CFL_WRDATA | FCP_CFL_RDDATA: goto err; /* TBD not supported by tcm_fc yet */ } /* * Locate the SAM Task Attr from fc_pri_ta */ switch (fcp->fc_pri_ta & FCP_PTA_MASK) { case FCP_PTA_HEADQ: task_attr = MSG_HEAD_TAG; break; case FCP_PTA_ORDERED: task_attr = MSG_ORDERED_TAG; break; case FCP_PTA_ACA: task_attr = MSG_ACA_TAG; break; case FCP_PTA_SIMPLE: /* Fallthrough */ default: task_attr = MSG_SIMPLE_TAG; } fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); /* * Use a single se_cmd->cmd_kref as we expect to release se_cmd * directly from ft_check_stop_free callback in response path. */ if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), ntohl(fcp->fc_dl), task_attr, data_dir, 0)) goto err; pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl); return; err: ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID); }
/** * fc_els_resp_type() - Return a string describing the ELS response * @fp: The frame pointer or possible error code */ const char *fc_els_resp_type(struct fc_frame *fp) { const char *msg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; if (IS_ERR(fp)) { switch (-PTR_ERR(fp)) { case FC_NO_ERR: msg = "response no error"; break; case FC_EX_TIMEOUT: msg = "response timeout"; break; case FC_EX_CLOSED: msg = "response closed"; break; default: msg = "response unknown error"; break; } } else { fh = fc_frame_header_get(fp); switch (fh->fh_type) { case FC_TYPE_ELS: switch (fc_frame_payload_op(fp)) { case ELS_LS_ACC: msg = "accept"; break; case ELS_LS_RJT: msg = "reject"; break; default: msg = "response unknown ELS"; break; } break; case FC_TYPE_CT: ct = fc_frame_payload_get(fp, sizeof(*ct)); if (ct) { switch (ntohs(ct->ct_cmd)) { case FC_FS_ACC: msg = "CT accept"; break; case FC_FS_RJT: msg = "CT reject"; break; default: msg = "response unknown CT"; break; } } else { msg = "short CT response"; } break; default: msg = "response not ELS or CT"; break; } } return msg; }
/* * Send new command to target. */ static void ft_send_cmd(struct ft_cmd *cmd) { struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame); struct se_cmd *se_cmd; struct fcp_cmnd *fcp; int data_dir; u32 data_len; int task_attr; int ret; fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp)); if (!fcp) goto err; if (fcp->fc_flags & FCP_CFL_LEN_MASK) goto err; /* not handling longer CDBs yet */ if (fcp->fc_tm_flags) { task_attr = FCP_PTA_SIMPLE; data_dir = DMA_NONE; data_len = 0; } else { switch (fcp->fc_flags & (FCP_CFL_RDDATA | FCP_CFL_WRDATA)) { case 0: data_dir = DMA_NONE; break; case FCP_CFL_RDDATA: data_dir = DMA_FROM_DEVICE; break; case FCP_CFL_WRDATA: data_dir = DMA_TO_DEVICE; break; case FCP_CFL_WRDATA | FCP_CFL_RDDATA: goto err; /* TBD not supported by tcm_fc yet */ } /* * Locate the SAM Task Attr from fc_pri_ta */ switch (fcp->fc_pri_ta & FCP_PTA_MASK) { case FCP_PTA_HEADQ: task_attr = MSG_HEAD_TAG; break; case FCP_PTA_ORDERED: task_attr = MSG_ORDERED_TAG; break; case FCP_PTA_ACA: task_attr = MSG_ACA_TAG; break; case FCP_PTA_SIMPLE: /* Fallthrough */ default: task_attr = MSG_SIMPLE_TAG; } task_attr = fcp->fc_pri_ta & FCP_PTA_MASK; data_len = ntohl(fcp->fc_dl); cmd->cdb = fcp->fc_cdb; } se_cmd = &cmd->se_cmd; /* * Initialize struct se_cmd descriptor from target_core_mod * infrastructure */ transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess, data_len, data_dir, task_attr, &cmd->ft_sense_buffer[0]); /* * Check for FCP task management flags */ if (fcp->fc_tm_flags) { ft_send_tm(cmd); return; } fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd); cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun); ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun); if (ret < 0) { ft_dump_cmd(cmd, __func__); transport_send_check_condition_and_sense(&cmd->se_cmd, cmd->se_cmd.scsi_sense_reason, 0); return; } ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb); FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret); ft_dump_cmd(cmd, __func__); if (ret == -1) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); transport_generic_free_cmd(se_cmd, 0, 1, 0); return; } if (ret == -2) { if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) ft_queue_status(se_cmd); else transport_send_check_condition_and_sense(se_cmd, se_cmd->scsi_sense_reason, 0); transport_generic_free_cmd(se_cmd, 0, 1, 0); return; } transport_generic_handle_cdb(se_cmd); return; err: ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID); return; }