static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg) { struct fcoe_ctlr *fip = arg; struct fc_exch *exch = fc_seq_exch(seq); struct fc_lport *lport = exch->lp; u8 *mac; u8 op; if (IS_ERR(fp)) goto done; mac = fr_cb(fp)->granted_mac; if (is_zero_ether_addr(mac)) { op = fc_frame_payload_op(fp); if (lport->vport) { if (op == ELS_LS_RJT) { printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n"); fc_vport_terminate(lport->vport); fc_frame_free(fp); return; } } fcoe_ctlr_recv_flogi(fip, lport, fp); } if (!is_zero_ether_addr(mac)) fip->update_mac(lport, mac); done: fc_lport_flogi_resp(seq, fp, lport); }
static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); mutex_lock(&lport->lp_mutex); /* * Handle special ELS cases like FLOGI, LOGO, and * RSCN here. These don't require a session. * Even if we had a session, it might not be ready. */ if (!lport->link_up) fc_frame_free(fp); else if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) { /* * Check opcode. */ recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: fh = fc_frame_header_get(fp); if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: recv = lport->tt.disc_recv_req; break; case ELS_ECHO: recv = fc_lport_recv_echo_req; break; case ELS_RLIR: recv = fc_lport_recv_rlir_req; break; case ELS_RNID: recv = fc_lport_recv_rnid_req; break; } recv(sp, fp, lport); } else { FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", fr_eof(fp)); fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); /* * The common exch_done for all request may not be good * if any request requires longer hold on exhange. XXX */ lport->tt.exch_done(sp); }
/** * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests * @sp: The sequence for the FC Passthrough response * @fp: The response frame * @info_arg: The BSG info that the response is for */ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, void *info_arg) { struct fc_bsg_info *info = info_arg; struct fc_bsg_job *job = info->job; struct fc_lport *lport = info->lport; struct fc_frame_header *fh; size_t len; void *buf; if (IS_ERR(fp)) { job->reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ? -ECONNABORTED : -ETIMEDOUT; job->reply_len = sizeof(uint32_t); job->state_flags |= FC_RQST_STATE_DONE; job->job_done(job); kfree(info); return; } mutex_lock(&lport->lp_mutex); fh = fc_frame_header_get(fp); len = fr_len(fp) - sizeof(*fh); buf = fc_frame_payload_get(fp, 0); if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) { /* Get the response code from the first frame payload */ unsigned short cmd = (info->rsp_code == FC_FS_ACC) ? ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) : (unsigned short)fc_frame_payload_op(fp); /* Save the reply status of the job */ job->reply->reply_data.ctels_reply.status = (cmd == info->rsp_code) ? FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT; } job->reply->reply_payload_rcv_len += fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, &info->offset, KM_BIO_SRC_IRQ, NULL); if (fr_eof(fp) == FC_EOF_T && (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) { if (job->reply->reply_payload_rcv_len > job->reply_payload.payload_len) job->reply->reply_payload_rcv_len = job->reply_payload.payload_len; job->reply->result = 0; job->state_flags |= FC_RQST_STATE_DONE; job->job_done(job); kfree(info); } fc_frame_free(fp); mutex_unlock(&lport->lp_mutex); }
/** * fc_lport_recv_req() - The generic lport request handler * @lport: The local port that received the request * @fp: The request frame * * This function will see if the lport handles the request or * if an rport should handle the request. * * Locking Note: This function should not be called with the lport * lock held becuase it will grab the lock. */ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); /* * Handle special ELS cases like FLOGI, LOGO, and * RSCN here. These don't require a session. * Even if we had a session, it might not be ready. */ if (!lport->link_up) fc_frame_free(fp); else if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) { /* * Check opcode. */ recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: if (!lport->point_to_multipoint) recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: if (fc_frame_sid(fp) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: recv = lport->tt.disc_recv_req; break; case ELS_ECHO: recv = fc_lport_recv_echo_req; break; case ELS_RLIR: recv = fc_lport_recv_rlir_req; break; case ELS_RNID: recv = fc_lport_recv_rnid_req; break; } recv(lport, fp); } else { FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", fr_eof(fp)); fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); }
static void fc_lport_recv_req(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *); mutex_lock(&lport->lp_mutex); if (!lport->link_up) fc_frame_free(fp); else if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) { recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: fh = fc_frame_header_get(fp); if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: recv = lport->tt.disc_recv_req; break; case ELS_ECHO: recv = fc_lport_recv_echo_req; break; case ELS_RLIR: recv = fc_lport_recv_rlir_req; break; case ELS_RNID: recv = fc_lport_recv_rnid_req; break; } recv(sp, fp, lport); } else { FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", fr_eof(fp)); fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); lport->tt.exch_done(sp); }
/** * fc_lport_recv_els_req() - The generic lport ELS request handler * @lport: The local port that received the request * @fp: The request frame * * This function will see if the lport handles the request or * if an rport should handle the request. * * Locking Note: This function should not be called with the lport * lock held because it will grab the lock. */ static void fc_lport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); /* * Handle special ELS cases like FLOGI, LOGO, and * RSCN here. These don't require a session. * Even if we had a session, it might not be ready. */ if (!lport->link_up) fc_frame_free(fp); else { /* * Check opcode. */ recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: if (!lport->point_to_multipoint) recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: if (fc_frame_sid(fp) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: recv = lport->tt.disc_recv_req; break; case ELS_ECHO: recv = fc_lport_recv_echo_req; break; case ELS_RLIR: recv = fc_lport_recv_rlir_req; break; case ELS_RNID: recv = fc_lport_recv_rnid_req; break; } recv(lport, fp); } mutex_unlock(&lport->lp_mutex); }
static void fc_lport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); if (!lport->link_up) fc_frame_free(fp); else { recv = lport->tt.rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: if (!lport->point_to_multipoint) recv = fc_lport_recv_flogi_req; break; case ELS_LOGO: if (fc_frame_sid(fp) == FC_FID_FLOGI) recv = fc_lport_recv_logo_req; break; case ELS_RSCN: recv = lport->tt.disc_recv_req; break; case ELS_ECHO: recv = fc_lport_recv_echo_req; break; case ELS_RLIR: recv = fc_lport_recv_rlir_req; break; case ELS_RNID: recv = fc_lport_recv_rnid_req; break; } recv(lport, fp); } mutex_unlock(&lport->lp_mutex); }
/** * fc_lport_logo_resp() - Handle response to LOGO request * @sp: The sequence that the LOGO was on * @fp: The LOGO frame * @lp_arg: The lport port that received the LOGO request * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; u8 op; FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_LOGO) { FC_LPORT_DBG(lport, "Received a LOGO response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC) fc_lport_enter_disabled(lport); else fc_lport_error(lport, fp); out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); }
/** * fc_els_resp_type() - Return a string describing the ELS response * @fp: The frame pointer or possible error code */ const char *fc_els_resp_type(struct fc_frame *fp) { const char *msg; struct fc_frame_header *fh; struct fc_ct_hdr *ct; if (IS_ERR(fp)) { switch (-PTR_ERR(fp)) { case FC_NO_ERR: msg = "response no error"; break; case FC_EX_TIMEOUT: msg = "response timeout"; break; case FC_EX_CLOSED: msg = "response closed"; break; default: msg = "response unknown error"; break; } } else { fh = fc_frame_header_get(fp); switch (fh->fh_type) { case FC_TYPE_ELS: switch (fc_frame_payload_op(fp)) { case ELS_LS_ACC: msg = "accept"; break; case ELS_LS_RJT: msg = "reject"; break; default: msg = "response unknown ELS"; break; } break; case FC_TYPE_CT: ct = fc_frame_payload_get(fp, sizeof(*ct)); if (ct) { switch (ntohs(ct->ct_cmd)) { case FC_FS_ACC: msg = "CT accept"; break; case FC_FS_RJT: msg = "CT reject"; break; default: msg = "response unknown CT"; break; } } else { msg = "short CT response"; } break; default: msg = "response not ELS or CT"; break; } } return msg; }
/** * fc_lport_flogi_resp() - Handle response to FLOGI request * @sp: The sequence that the FLOGI was on * @fp: The FLOGI response frame * @lp_arg: The lport port that received the FLOGI response * * Locking Note: This function will be called without the lport lock * held, but it will lock, call an _enter_* function or fc_lport_error() * and then unlock the lport. */ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg) { struct fc_lport *lport = lp_arg; struct fc_els_flogi *flp; u32 did; u16 csp_flags; unsigned int r_a_tov; unsigned int e_d_tov; u16 mfs; FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp)); if (fp == ERR_PTR(-FC_EX_CLOSED)) return; mutex_lock(&lport->lp_mutex); if (lport->state != LPORT_ST_FLOGI) { FC_LPORT_DBG(lport, "Received a FLOGI response, but in state " "%s\n", fc_lport_state(lport)); if (IS_ERR(fp)) goto err; goto out; } if (IS_ERR(fp)) { fc_lport_error(lport, fp); goto err; } did = fc_frame_did(fp); if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < lport->mfs) lport->mfs = mfs; csp_flags = ntohs(flp->fl_csp.sp_features); r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); if (csp_flags & FC_SP_FT_EDTR) e_d_tov /= 1000000; lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC); if ((csp_flags & FC_SP_FT_FPORT) == 0) { if (e_d_tov > lport->e_d_tov) lport->e_d_tov = e_d_tov; lport->r_a_tov = 2 * e_d_tov; fc_lport_set_port_id(lport, did, fp); printk(KERN_INFO "host%d: libfc: " "Port (%6.6x) entered " "point-to-point mode\n", lport->host->host_no, did); fc_lport_ptp_setup(lport, fc_frame_sid(fp), get_unaligned_be64( &flp->fl_wwpn), get_unaligned_be64( &flp->fl_wwnn)); } else { lport->e_d_tov = e_d_tov; lport->r_a_tov = r_a_tov; fc_host_fabric_name(lport->host) = get_unaligned_be64(&flp->fl_wwnn); fc_lport_set_port_id(lport, did, fp); fc_lport_enter_dns(lport); } } } else { FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); fc_lport_error(lport, fp); } out: fc_frame_free(fp); err: mutex_unlock(&lport->lp_mutex); }
void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) { struct bnx2fc_cmd *orig_io_req, *new_io_req; struct bnx2fc_cmd *rec_req; struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr, *fh; struct fc_els_ls_rjt *rjt; struct fc_els_rec_acc *acc; struct bnx2fc_rport *tgt; struct fcoe_err_report_entry *err_entry; struct scsi_cmnd *sc_cmd; enum fc_rctl r_ctl; unsigned char *buf; void *resp_buf; struct fc_frame *fp; u8 opcode; u32 offset; u32 e_stat; u32 resp_len, hdr_len; int rc = 0; bool send_seq_clnp = false; bool abort_io = false; BNX2FC_MISC_DBG("Entered rec_compl callback\n"); rec_req = cb_arg->io_req; orig_io_req = cb_arg->aborted_io_req; BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); tgt = orig_io_req->tgt; /* Handle REC timeout case */ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "timed out, abort " "orig_io - 0x%x\n", orig_io_req->xid); /* els req is timed out. send abts for els */ rc = bnx2fc_initiate_abts(rec_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " "failed. issue cleanup\n"); bnx2fc_initiate_cleanup(rec_req); } orig_io_req->rec_retry++; /* REC timedout. send ABTS to the orig IO req */ if (orig_io_req->rec_retry <= REC_RETRY_COUNT) { spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_rec(orig_io_req); spin_lock_bh(&tgt->tgt_lock); if (!rc) goto rec_compl_done; } rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " "failed xid = 0x%x. issue cleanup\n", orig_io_req->xid); bnx2fc_initiate_cleanup(orig_io_req); } goto rec_compl_done; } if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "completed" "orig_io - 0x%x\n", orig_io_req->xid); goto rec_compl_done; } if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "abts in prog " "orig_io - 0x%x\n", orig_io_req->xid); goto rec_compl_done; } mp_req = &(rec_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; acc = resp_buf = mp_req->resp_buf; hdr_len = sizeof(*fc_hdr); buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (!buf) { printk(KERN_ERR PFX "rec buf: mem alloc failure\n"); goto rec_compl_done; } memcpy(buf, fc_hdr, hdr_len); memcpy(buf + hdr_len, resp_buf, resp_len); fp = fc_frame_alloc(NULL, resp_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); goto free_buf; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, hdr_len + resp_len); opcode = fc_frame_payload_op(fp); if (opcode == ELS_LS_RJT) { BNX2FC_IO_DBG(rec_req, "opcode is RJT\n"); rjt = fc_frame_payload_get(fp, sizeof(*rjt)); if ((rjt->er_reason == ELS_RJT_LOGIC || rjt->er_reason == ELS_RJT_UNAB) && rjt->er_explan == ELS_EXPL_OXID_RXID) { BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n"); new_io_req = bnx2fc_cmd_alloc(tgt); if (!new_io_req) goto abort_io; new_io_req->sc_cmd = orig_io_req->sc_cmd; /* cleanup orig_io_req that is with the FW */ set_bit(BNX2FC_FLAG_CMD_LOST, &orig_io_req->req_flags); bnx2fc_initiate_cleanup(orig_io_req); /* Post a new IO req with the same sc_cmd */ BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); rc = bnx2fc_post_io_req(tgt, new_io_req); if (!rc) goto free_frame; BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); } abort_io: rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " "failed. issue cleanup\n"); bnx2fc_initiate_cleanup(orig_io_req); } } else if (opcode == ELS_LS_ACC) { /* REVISIT: Check if the exchange is already aborted */ offset = ntohl(acc->reca_fc4value); e_stat = ntohl(acc->reca_e_stat); if (e_stat & ESB_ST_SEQ_INIT) { BNX2FC_IO_DBG(rec_req, "target has the seq init\n"); goto free_frame; } BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n", e_stat, offset); /* Seq initiative is with us */ err_entry = (struct fcoe_err_report_entry *) &orig_io_req->err_entry; sc_cmd = orig_io_req->sc_cmd; if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { /* SCSI WRITE command */ if (offset == orig_io_req->data_xfer_len) { BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n"); /* FCP_RSP lost */ r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { /* start transmitting from offset */ BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n"); send_seq_clnp = true; r_ctl = FC_RCTL_DD_DATA_DESC; if (bnx2fc_initiate_seq_cleanup(orig_io_req, offset, r_ctl)) abort_io = true; /* XFER_RDY */ } } else { /* SCSI READ command */ if (err_entry->data.rx_buf_off == orig_io_req->data_xfer_len) { /* FCP_RSP lost */ BNX2FC_IO_DBG(rec_req, "READ - resp lost\n"); r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { /* request retransmission from this offset */ send_seq_clnp = true; offset = err_entry->data.rx_buf_off; BNX2FC_IO_DBG(rec_req, "RD DATA lost\n"); /* FCP_DATA lost */ r_ctl = FC_RCTL_DD_SOL_DATA; if (bnx2fc_initiate_seq_cleanup(orig_io_req, offset, r_ctl)) abort_io = true; } } if (abort_io) { rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts" " failed. issue cleanup\n"); bnx2fc_initiate_cleanup(orig_io_req); } } else if (!send_seq_clnp) { BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n"); spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); spin_lock_bh(&tgt->tgt_lock); if (rc) { BNX2FC_IO_DBG(rec_req, "Unable to send SRR" " IO will abort\n"); } } } free_frame: fc_frame_free(fp); free_buf: kfree(buf); rec_compl_done: kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); kfree(cb_arg); }
void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) { struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr, *fh; struct bnx2fc_cmd *srr_req; struct bnx2fc_cmd *orig_io_req; struct fc_frame *fp; unsigned char *buf; void *resp_buf; u32 resp_len, hdr_len; u8 opcode; int rc = 0; orig_io_req = cb_arg->aborted_io_req; srr_req = cb_arg->io_req; if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { /* SRR timedout */ BNX2FC_IO_DBG(srr_req, "srr timed out, abort " "orig_io - 0x%x\n", orig_io_req->xid); rc = bnx2fc_initiate_abts(srr_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " "failed. issue cleanup\n"); bnx2fc_initiate_cleanup(srr_req); } if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx", orig_io_req->xid, orig_io_req->req_flags); goto srr_compl_done; } orig_io_req->srr_retry++; if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { struct bnx2fc_rport *tgt = orig_io_req->tgt; spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_srr(orig_io_req, orig_io_req->srr_offset, orig_io_req->srr_rctl); spin_lock_bh(&tgt->tgt_lock); if (!rc) goto srr_compl_done; } rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " "failed xid = 0x%x. issue cleanup\n", orig_io_req->xid); bnx2fc_initiate_cleanup(orig_io_req); } goto srr_compl_done; } if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx", orig_io_req->xid, orig_io_req->req_flags); goto srr_compl_done; } mp_req = &(srr_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; hdr_len = sizeof(*fc_hdr); buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); if (!buf) { printk(KERN_ERR PFX "srr buf: mem alloc failure\n"); goto srr_compl_done; } memcpy(buf, fc_hdr, hdr_len); memcpy(buf + hdr_len, resp_buf, resp_len); fp = fc_frame_alloc(NULL, resp_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); goto free_buf; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, hdr_len + resp_len); opcode = fc_frame_payload_op(fp); switch (opcode) { case ELS_LS_ACC: BNX2FC_IO_DBG(srr_req, "SRR success\n"); break; case ELS_LS_RJT: BNX2FC_IO_DBG(srr_req, "SRR rejected\n"); rc = bnx2fc_initiate_abts(orig_io_req); if (rc != SUCCESS) { BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " "failed xid = 0x%x. issue cleanup\n", orig_io_req->xid); bnx2fc_initiate_cleanup(orig_io_req); } break; default: BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n", opcode); break; } fc_frame_free(fp); free_buf: kfree(buf); srr_compl_done: kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); }
static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *rec_req; struct qedf_mp_req *mp_req; struct fc_frame_header *mp_fc_hdr, *fh; struct fc_frame *fp; void *resp_buf, *fc_payload; u32 resp_len; struct fc_lport *lport; struct qedf_ctx *qedf; int refcount; enum fc_rctl r_ctl; struct fc_els_ls_rjt *rjt; struct fc_els_rec_acc *acc; u8 opcode; u32 offset, e_stat; struct scsi_cmnd *sc_cmd; bool srr_needed = false; rec_req = cb_arg->io_req; qedf = rec_req->fcport->qedf; lport = qedf->lport; orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) goto out_free; if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, rec_req->xid, refcount); /* If a REC times out, free resources */ if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) goto out_put; /* Normalize response data into struct fc_frame */ mp_req = &(rec_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; acc = resp_buf = mp_req->resp_buf; fp = fc_frame_alloc(lport, resp_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); goto out_put; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); opcode = fc_frame_payload_op(fp); if (opcode == ELS_LS_RJT) { rjt = fc_frame_payload_get(fp, sizeof(*rjt)); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_RJT for REC: er_reason=0x%x, " "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan); /* * The following response(s) mean that we need to reissue the * request on another exchange. We need to do this without * informing the upper layers lest it cause an application * error. */ if ((rjt->er_reason == ELS_RJT_LOGIC || rjt->er_reason == ELS_RJT_UNAB) && rjt->er_explan == ELS_EXPL_OXID_RXID) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Handle CMD LOST case.\n"); qedf_requeue_io_req(orig_io_req); } } else if (opcode == ELS_LS_ACC) { offset = ntohl(acc->reca_fc4value); e_stat = ntohl(acc->reca_e_stat); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n", offset, e_stat); if (e_stat & ESB_ST_SEQ_INIT) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Target has the seq init\n"); goto out_free_frame; } sc_cmd = orig_io_req->sc_cmd; if (!sc_cmd) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "sc_cmd is NULL for xid=0x%x.\n", orig_io_req->xid); goto out_free_frame; } /* SCSI write case */ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { if (offset == orig_io_req->data_xfer_len) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - response lost.\n"); r_ctl = FC_RCTL_DD_CMD_STATUS; srr_needed = true; offset = 0; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "WRITE - XFER_RDY/DATA lost.\n"); r_ctl = FC_RCTL_DD_DATA_DESC; /* Use data from warning CQE instead of REC */ offset = orig_io_req->tx_buf_off; } /* SCSI read case */ } else { if (orig_io_req->rx_buf_off == orig_io_req->data_xfer_len) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - response lost.\n"); srr_needed = true; r_ctl = FC_RCTL_DD_CMD_STATUS; offset = 0; } else { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "READ - DATA lost.\n"); /* * For read case we always set the offset to 0 * for sequence recovery task. */ offset = 0; r_ctl = FC_RCTL_DD_SOL_DATA; } } if (srr_needed) qedf_send_srr(orig_io_req, offset, r_ctl); else qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl); } out_free_frame: fc_frame_free(fp); out_put: /* Put reference for original command since REC completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); }
static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) { struct qedf_ioreq *orig_io_req; struct qedf_ioreq *srr_req; struct qedf_mp_req *mp_req; struct fc_frame_header *mp_fc_hdr, *fh; struct fc_frame *fp; void *resp_buf, *fc_payload; u32 resp_len; struct fc_lport *lport; struct qedf_ctx *qedf; int refcount; u8 opcode; srr_req = cb_arg->io_req; qedf = srr_req->fcport->qedf; lport = qedf->lport; orig_io_req = cb_arg->aborted_io_req; if (!orig_io_req) goto out_free; clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO && srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) cancel_delayed_work_sync(&orig_io_req->timeout_work); refcount = kref_read(&orig_io_req->refcount); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p," " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n", orig_io_req, orig_io_req->xid, srr_req->xid, refcount); /* If a SRR times out, simply free resources */ if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) goto out_put; /* Normalize response data into struct fc_frame */ mp_req = &(srr_req->mp_req); mp_fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; resp_buf = mp_req->resp_buf; fp = fc_frame_alloc(lport, resp_len); if (!fp) { QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failure.\n"); goto out_put; } /* Copy frame header from firmware into fp */ fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header)); /* Copy payload from firmware into fp */ fc_payload = fc_frame_payload_get(fp, resp_len); memcpy(fc_payload, resp_buf, resp_len); opcode = fc_frame_payload_op(fp); switch (opcode) { case ELS_LS_ACC: QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "SRR success.\n"); break; case ELS_LS_RJT: QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS, "SRR rejected.\n"); qedf_initiate_abts(orig_io_req, true); break; } fc_frame_free(fp); out_put: /* Put reference for original command since SRR completed */ kref_put(&orig_io_req->refcount, qedf_release_cmd); out_free: kfree(cb_arg); }
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped __attribute__((unused)), void *opaque) { struct fnic *fnic = vnic_dev_priv(rq->vdev); struct sk_buff *skb; struct fc_frame *fp; unsigned int eth_hdrs_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; u8 fcoe = 0, fcoe_sof, fcoe_eof; u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; u8 fcs_ok = 1, packet_error = 0; u16 q_number, completed_index, bytes_written = 0, vlan, checksum; u32 rss_hash; u16 exchange_id, tmpl; u8 sof = 0; u8 eof = 0; u32 fcp_bytes_written = 0; unsigned long flags; pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, PCI_DMA_FROMDEVICE); skb = buf->os_buf; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); if (type == CQ_DESC_TYPE_RQ_FCP) { cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, &tmpl, &fcp_bytes_written, &sof, &eof, &ingress_port, &packet_error, &fcoe_enc_error, &fcs_ok, &vlan_stripped, &vlan); eth_hdrs_stripped = 1; } else if (type == CQ_DESC_TYPE_RQ_ENET) { cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &type, &color, &q_number, &completed_index, &ingress_port, &fcoe, &eop, &sop, &rss_type, &csum_not_calc, &rss_hash, &bytes_written, &packet_error, &vlan_stripped, &vlan, &checksum, &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, &fcs_ok); eth_hdrs_stripped = 0; } else { /* wrong CQ type*/ shost_printk(KERN_ERR, fnic->lport->host, "fnic rq_cmpl wrong cq type x%x\n", type); goto drop; } if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "fnic rq_cmpl fcoe x%x fcsok x%x" " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" " x%x\n", fcoe, fcs_ok, packet_error, fcoe_fc_crc_ok, fcoe_enc_error); goto drop; } if (eth_hdrs_stripped) fnic_import_rq_fc_frame(skb, fcp_bytes_written, sof, eof); else if (fnic_import_rq_eth_pkt(skb, bytes_written)) goto drop; fp = (struct fc_frame *)skb; /* * If frame is an ELS response that matches the cached FLOGI OX_ID, * and is accept, issue flogi_reg_request copy wq request to firmware * to register the S_ID and determine whether FC_OUI mode or GW mode. */ if (is_matching_flogi_resp_frame(fnic, fp)) { if (!eth_hdrs_stripped) { if (fc_frame_payload_op(fp) == ELS_LS_ACC) { fnic_handle_flogi_resp(fnic, fp); return; } /* * Recd. Flogi reject. No point registering * with fw, but forward to libFC */ goto forward; } goto drop; } if (!eth_hdrs_stripped) goto drop; forward: spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto drop; } /* Use fr_flags to indicate whether succ. flogi resp or not */ fr_flags(fp) = 0; fr_dev(fp) = fnic->lport; spin_unlock_irqrestore(&fnic->fnic_lock, flags); skb_queue_tail(&fnic->frame_queue, skb); queue_work(fnic_event_queue, &fnic->frame_work); return; drop: dev_kfree_skb_irq(skb); }