/** * qla2x00_lock_nvram_access() - * @ha: HA context */ void qla2x00_lock_nvram_access(scsi_qla_host_t *ha) { uint16_t data; device_reg_t *reg; reg = ha->iobase; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { data = RD_REG_WORD(®->nvram); while (data & NVR_BUSY) { udelay(100); data = RD_REG_WORD(®->nvram); } /* Lock resource */ WRT_REG_WORD(®->u.isp2300.host_semaphore, 0x1); udelay(5); data = RD_REG_WORD(®->u.isp2300.host_semaphore); while ((data & BIT_0) == 0) { /* Lock failed */ udelay(100); WRT_REG_WORD(®->u.isp2300.host_semaphore, 0x1); udelay(5); data = RD_REG_WORD(®->u.isp2300.host_semaphore); } } }
int qla2x00_fdmi_register(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return QLA_FUNCTION_FAILED; rval = qla2x00_mgmt_svr_login(vha); if (rval) return rval; rval = qla2x00_fdmi_rhba(vha); if (rval) { if (rval != QLA_ALREADY_REGISTERED) return rval; rval = qla2x00_fdmi_dhba(vha); if (rval) return rval; rval = qla2x00_fdmi_rhba(vha); if (rval) return rval; } rval = qla2x00_fdmi_rpa(vha); return rval; }
/** * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. * @ha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { return (qla2x00_sns_gpn_id(ha, list)); } for (i = 0; i < MAX_FIBRE_DEVICES; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ ms_pkt = qla2x00_prep_ms_iocb(ha, GPN_ID_REQ_SIZE, GPN_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GPN_ID_CMD, GPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " "(%d).\n", ha->host_no, rval)); } else if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected " "request, gpn_id_rsp:\n", ha->host_no)); DEBUG2_3(qla2x00_dump_buffer((uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); rval = QLA_FUNCTION_FAILED; } else { /* Save portname */ memcpy(list[i].port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); }
int qla2x00_rsnn_nn(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2050, "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE); ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name); ct_req->req.rsnn_nn.name_len = (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); ms_pkt->req_bytecount = cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2051, "RSNN_NN issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x2052, "RSNN_NN exiting normally.\n"); } return (rval); }
/** * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. * @ha: HA context * * Returns 0 on success. */ int qla2x00_rsnn_nn(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2050, "RSNN_ID call unsupported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } /* Issue RSNN_NN */ /* Prepare common MS IOCB */ /* Request size adjusted after CT preparation */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- node_name, symbolic node_name, size */ memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); /* Prepare the Symbolic Node Name */ qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name); /* Calculate SNN length */ ct_req->req.rsnn_nn.name_len = (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name); /* Update MS IOCB request */ ms_pkt->req_bytecount = cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2051, "RSNN_NN issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x2052, "RSNN_NN exiting normally.\n"); } return (rval); }
/** * qla2x00_unlock_nvram_access() - * @ha: HA context */ void qla2x00_unlock_nvram_access(scsi_qla_host_t *ha) { device_reg_t *reg; reg = ha->iobase; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) WRT_REG_WORD(®->u.isp2300.host_semaphore, 0); }
/** * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. * @ha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gpn_id(vha, list); for (i = 0; i < ha->max_fibre_devices; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, GPN_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GPN_ID_CMD, GPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2056, "GPN_ID issue IOCB failed (%d).\n", rval); break; } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GPN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; break; } else { /* Save portname */ memcpy(list[i].port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); }
/** * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query. * @ha: HA context * @list: switch info entries to populate * * Returns 0 on success. */ int qla2x00_gpn_id(scsi_qla_host_t *ha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { return (qla2x00_sns_gpn_id(ha, list)); } for (i = 0; i < MAX_FIBRE_DEVICES; i++) { /* Issue GPN_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops.prep_ms_iocb(ha, GPN_ID_REQ_SIZE, GPN_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GPN_ID_CMD, GPN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed " "(%d).\n", ha->host_no, rval)); } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "GPN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { /* Save portname */ memcpy(list[i].port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); } /* Last device exit. */ if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); }
/** * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. * @ha: HA context * * Returns 0 on success. */ int qla2x00_rff_id(scsi_qla_host_t *ha) { int rval; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " "ISP2100/ISP2200.\n", ha->host_no)); return (QLA_SUCCESS); } /* Issue RFF_ID */ /* Prepare common MS IOCB */ ms_pkt = qla2x00_prep_ms_iocb(ha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, RFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain; ct_req->req.rff_id.port_id[1] = ha->d_id.b.area; ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa; ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", ha->host_no, rval)); } else if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): RFF_ID failed, rejected " "request, rff_id_rsp:\n", ha->host_no)); DEBUG2_3(qla2x00_dump_buffer((uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); rval = QLA_FUNCTION_FAILED; } else { DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", ha->host_no)); } return (rval); }
/** * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. * @ha: HA context * * Returns 0 on success. */ int qla2x00_rff_id(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2046, "RFF_ID call not supported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } /* Issue RFF_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, RFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; qlt_rff_id(vha, ct_req); ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2047, "RFF_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x2048, "RFF_ID exiting normally.\n"); } return (rval); }
int qla2x00_rff_id(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { ql_dbg(ql_dbg_disc, vha, 0x2046, "RFF_ID call not supported on ISP2100/ISP2200.\n"); return (QLA_SUCCESS); } ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE); ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, RFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; ct_req->req.rff_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; ct_req->req.rff_id.fc4_feature = BIT_1; ct_req->req.rff_id.fc4_type = 0x08; rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2047, "RFF_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x2048, "RFF_ID exiting normally.\n"); } return (rval); }
/** * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA. * @ha: HA context * * Returns 0 on success. */ int qla2x00_rff_id(scsi_qla_host_t *ha) { int rval; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on " "ISP2100/ISP2200.\n", ha->host_no)); return (QLA_SUCCESS); } /* Issue RFF_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops.prep_ms_iocb(ha, RFF_ID_REQ_SIZE, RFF_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RFF_ID_CMD, RFF_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ ct_req->req.rff_id.port_id[0] = ha->d_id.b.domain; ct_req->req.rff_id.port_id[1] = ha->d_id.b.area; ct_req->req.rff_id.port_id[2] = ha->d_id.b.al_pa; ct_req->req.rff_id.fc4_feature = BIT_1; ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n", ha->host_no, rval)); } else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RFF_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n", ha->host_no)); } return (rval); }
/** * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA. * @ha: HA context * * Returns 0 on success. */ int qla2x00_rnn_id(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_rnn_id(vha); /* Issue RNN_ID */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE, RNN_ID_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RNN_ID_CMD, RNN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id, node_name */ ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain; ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area; ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa; memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x204d, "RNN_ID issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { ql_dbg(ql_dbg_disc, vha, 0x204e, "RNN_ID exiting normally.\n"); } return (rval); }
/** * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. * @ha: HA context * @fcport: fcport entry to updated * * Returns 0 on success. */ int qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) { int rval; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_ga_nxt(vha, fcport); /* Issue GA_NXT */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE, GA_NXT_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD, GA_NXT_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2062, "GA_NXT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { /* Populate fc_port_t entry. */ fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0]; fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1]; fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2]; memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, WWN_SIZE); memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, WWN_SIZE); if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) fcport->d_id.b.domain = 0xf0; ql_dbg(ql_dbg_disc, vha, 0x2063, "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x " "pn %02x%02x%02x%02x%02x%02x%02x%02x " "port_id=%02x%02x%02x.\n", fcport->node_name[0], fcport->node_name[1], fcport->node_name[2], fcport->node_name[3], fcport->node_name[4], fcport->node_name[5], fcport->node_name[6], fcport->node_name[7], fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); } return (rval); }
int qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_sns_gid_pt_data *gid_data; struct qla_hw_data *ha = vha->hw; uint16_t gid_pt_rsp_size; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gid_pt(vha, list); gid_data = NULL; gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, gid_pt_rsp_size); ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, gid_pt_rsp_size); ct_rsp = &ha->ct_sns->p.rsp; ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2055, "GID_PT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { for (i = 0; i < ha->max_fibre_devices; i++) { gid_data = &ct_rsp->rsp.gid_pt.entries[i]; list[i].d_id.b.domain = gid_data->port_id[0]; list[i].d_id.b.area = gid_data->port_id[1]; list[i].d_id.b.al_pa = gid_data->port_id[2]; memset(list[i].fabric_port_name, 0, WWN_SIZE); list[i].fp_speed = PORT_SPEED_UNKNOWN; if (gid_data->control_byte & BIT_7) { list[i].d_id.b.rsvd_1 = gid_data->control_byte; break; } } if (i == ha->max_fibre_devices) rval = QLA_FUNCTION_FAILED; } return (rval); }
/** * qla2x00_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ void qla2x00_process_response_queue(struct scsi_qla_host *ha) { device_reg_t *reg = ha->iobase; sts_entry_t *pkt; uint16_t handle_cnt; uint16_t cnt; if (!ha->flags.online) return; while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { pkt = (sts_entry_t *)ha->response_ring_ptr; ha->rsp_ring_index++; if (ha->rsp_ring_index == ha->response_q_length) { ha->rsp_ring_index = 0; ha->response_ring_ptr = ha->response_ring; } else { ha->response_ring_ptr++; } if (pkt->entry_status != 0) { DEBUG3(printk(KERN_INFO "scsi(%ld): Process error entry.\n", ha->host_no)); qla2x00_error_entry(ha, pkt); ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); continue; } switch (pkt->entry_type) { case STATUS_TYPE: qla2x00_status_entry(ha, pkt); break; case STATUS_TYPE_21: handle_cnt = ((sts21_entry_t *)pkt)->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) { qla2x00_process_completed_request(ha, ((sts21_entry_t *)pkt)->handle[cnt]); } break; case STATUS_TYPE_22: handle_cnt = ((sts22_entry_t *)pkt)->handle_count; for (cnt = 0; cnt < handle_cnt; cnt++) { qla2x00_process_completed_request(ha, ((sts22_entry_t *)pkt)->handle[cnt]); } break; case STATUS_CONT_TYPE: qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); break; case MS_IOCB_TYPE: qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt); break; case MBX_IOCB_TYPE: if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA6312(ha) && !IS_QLA6322(ha)) { if (pkt->sys_define == SOURCE_ASYNC_IOCB) { qla2x00_process_iodesc(ha, (struct mbx_entry *)pkt); } else { /* MBX IOCB Type Not Supported. */ DEBUG4(printk(KERN_WARNING "scsi(%ld): Received unknown MBX " "IOCB response pkt type=%x " "source=%x entry status=%x.\n", ha->host_no, pkt->entry_type, pkt->sys_define, pkt->entry_status)); } break; } /* Fallthrough. */ default: /* Type Not Supported. */ DEBUG4(printk(KERN_WARNING "scsi(%ld): Received unknown response pkt type %x " "entry status=%x.\n", ha->host_no, pkt->entry_type, pkt->entry_status)); break; } ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } /* Adjust ring index */ WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); }
/** * qla2x00_intr_handler() - Process interrupts for the ISP. * @irq: * @dev_id: SCSI driver HA context * @regs: * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs) { scsi_qla_host_t *ha; device_reg_t *reg; uint32_t mbx; int status = 0; unsigned long flags = 0; unsigned long mbx_flags = 0; unsigned long intr_iter; uint32_t stat; uint16_t hccr; /* Don't loop forever, interrupt are OFF */ intr_iter = 50; ha = (scsi_qla_host_t *) dev_id; if (!ha) { printk(KERN_INFO "%s(): NULL host pointer\n", __func__); return (IRQ_NONE); } reg = ha->iobase; spin_lock_irqsave(&ha->hardware_lock, flags); for (;;) { /* Relax CPU! */ if (!(intr_iter--)) break; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) break; if (RD_REG_WORD(®->semaphore) & BIT_0) { WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); /* Get mailbox data. */ mbx = RD_MAILBOX_REG(ha, reg, 0); if (mbx > 0x3fff && mbx < 0x8000) { qla2x00_mbx_completion(ha, (uint16_t)mbx); status |= MBX_INTERRUPT; } else if (mbx > 0x7fff && mbx < 0xc000) { qla2x00_async_event(ha, mbx); } else { /*EMPTY*/ DEBUG2(printk("scsi(%ld): Unrecognized " "interrupt type (%d)\n", ha->host_no, mbx)); } /* Release mailbox registers. */ WRT_REG_WORD(®->semaphore, 0); /* Workaround for ISP2100 chip. */ if (IS_QLA2100(ha)) RD_REG_WORD(®->semaphore); } else { qla2x00_process_response_queue(ha); WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); } } else /* IS_QLA23XX(ha) */ { stat = RD_REG_DWORD(®->u.isp2300.host_status); if ((stat & HSR_RISC_INT) == 0) break; mbx = MSW(stat); switch (stat & 0xff) { case 0x13: qla2x00_process_response_queue(ha); break; case 0x1: case 0x2: case 0x10: case 0x11: qla2x00_mbx_completion(ha, (uint16_t)mbx); status |= MBX_INTERRUPT; /* Release mailbox registers. */ WRT_REG_WORD(®->semaphore, 0); break; case 0x12: qla2x00_async_event(ha, mbx); break; case 0x15: mbx = mbx << 16 | MBA_CMPLT_1_16BIT; qla2x00_async_event(ha, mbx); break; case 0x16: mbx = mbx << 16 | MBA_SCSI_COMPLETION; qla2x00_async_event(ha, mbx); break; default: hccr = RD_REG_WORD(®->hccr); if (hccr & HCCR_RISC_PAUSE) { qla_printk(KERN_INFO, ha, "RISC paused, dumping HCCR=%x\n", hccr); /* * Issue a "HARD" reset in order for * the RISC interrupt bit to be * cleared. Schedule a big hammmer to * get out of the RISC PAUSED state. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); RD_REG_WORD(®->hccr); set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; } else { DEBUG2(printk("scsi(%ld): Unrecognized " "interrupt type (%d)\n", ha->host_no, stat & 0xff)); } break; } WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); } } spin_unlock_irqrestore(&ha->hardware_lock, flags); qla2x00_next(ha); ha->last_irq_cpu = smp_processor_id(); ha->total_isr_cnt++; if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && (status & MBX_INTERRUPT) && ha->flags.mbox_int) { /* There was a mailbox completion */ DEBUG3(printk("%s(%ld): Going to get mbx reg lock.\n", __func__, ha->host_no)); spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags); if (ha->mcp == NULL) { DEBUG3(printk("%s(%ld): Error mbx pointer.\n", __func__, ha->host_no)); } else { DEBUG3(printk("%s(%ld): Going to set mbx intr flags. " "cmd=%x.\n", __func__, ha->host_no, ha->mcp->mb[0])); } set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); DEBUG3(printk("%s(%ld): Going to wake up mbx function for " "completion.\n", __func__, ha->host_no)); up(&ha->mbx_intr_sem); DEBUG3(printk("%s(%ld): Going to release mbx reg lock.\n", __func__, ha->host_no)); spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags); } if (!list_empty(&ha->done_queue)) qla2x00_done(ha); /* Wakeup the DPC routine */ if ((!ha->flags.mbox_busy && (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) || test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) || test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))) && ha->dpc_wait && !ha->dpc_active) { up(ha->dpc_wait); } return (IRQ_HANDLED); }
/** * qla2x00_async_event() - Process aynchronous events. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla2x00_async_event(scsi_qla_host_t *ha, uint32_t mbx) { static char *link_speeds[5] = { "1", "2", "4", "?", "10" }; char *link_speed; uint16_t mb[4]; uint16_t handle_cnt; uint16_t cnt; uint32_t handles[5]; device_reg_t *reg = ha->iobase; uint32_t rscn_entry, host_pid; uint8_t rscn_queue_index; /* Setup to process RIO completion. */ handle_cnt = 0; mb[0] = LSW(mbx); switch (mb[0]) { case MBA_SCSI_COMPLETION: if (IS_QLA2100(ha) || IS_QLA2200(ha)) handles[0] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 2) << 16)) | RD_MAILBOX_REG(ha, reg, 1)); else handles[0] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 2) << 16)) | MSW(mbx)); handle_cnt = 1; break; case MBA_CMPLT_1_16BIT: if (IS_QLA2100(ha) || IS_QLA2200(ha)) handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1); else handles[0] = MSW(mbx); handle_cnt = 1; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_16BIT: handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1); handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_3_16BIT: handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1); handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2); handles[2] = (uint32_t)RD_MAILBOX_REG(ha, reg, 3); handle_cnt = 3; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_4_16BIT: handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1); handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2); handles[2] = (uint32_t)RD_MAILBOX_REG(ha, reg, 3); handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handle_cnt = 4; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_5_16BIT: handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1); handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2); handles[2] = (uint32_t)RD_MAILBOX_REG(ha, reg, 3); handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); handle_cnt = 5; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_32BIT: handles[0] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 2) << 16)) | RD_MAILBOX_REG(ha, reg, 1)); handles[1] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | RD_MAILBOX_REG(ha, reg, 6)); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; default: break; } mb[0] = LSW(mbx); switch (mb[0]) { case MBA_SCSI_COMPLETION: /* Fast Post */ if (!ha->flags.online) break; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(ha, handles[cnt]); break; case MBA_RESET: /* Reset */ DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); break; case MBA_SYSTEM_ERR: /* System Error */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla_printk(KERN_INFO, ha, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", mb[1], mb[2], mb[3]); if (IS_QLA2100(ha) || IS_QLA2200(ha)) qla2100_fw_dump(ha, 1); else qla2300_fw_dump(ha, 1); set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", ha->host_no)); qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", ha->host_no)); qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", ha->host_no)); break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha); } set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); ha->flags.management_server_logged_in = 0; /* Update AEN queue. */ qla2x00_enqueue_aen(ha, MBA_LIP_OCCURRED, NULL); ha->total_lip_cnt++; break; case MBA_LOOP_UP: /* Loop Up Event */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); ha->link_data_rate = 0; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { link_speed = link_speeds[0]; } else { link_speed = link_speeds[3]; if (mb[1] < 5) link_speed = link_speeds[mb[1]]; ha->link_data_rate = mb[1]; } DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", ha->host_no, link_speed)); qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", link_speed); ha->flags.management_server_logged_in = 0; /* Update AEN queue. */ qla2x00_enqueue_aen(ha, MBA_LOOP_UP, NULL); break; case MBA_LOOP_DOWN: /* Loop Down Event */ DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN.\n", ha->host_no)); qla_printk(KERN_INFO, ha, "LOOP DOWN detected.\n"); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); ha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(ha); } ha->flags.management_server_logged_in = 0; ha->link_data_rate = 0; /* Update AEN queue. */ qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL); break; case MBA_LIP_RESET: /* LIP reset occurred */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", ha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LIP reset occured (%x).\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha); } set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); ha->operating_mode = LOOP; ha->flags.management_server_logged_in = 0; /* Update AEN queue. */ qla2x00_enqueue_aen(ha, MBA_LIP_RESET, NULL); ha->total_lip_cnt++; break; case MBA_POINT_TO_POINT: /* Point-to-Point */ if (IS_QLA2100(ha)) break; DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", ha->host_no)); /* * Until there's a transition from loop down to loop up, treat * this as loop down only. */ if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); if (!atomic_read(&ha->loop_down_timer)) atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha); } if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); } set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); break; case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ if (IS_QLA2100(ha)) break; mb[1] = RD_MAILBOX_REG(ha, reg, 1); DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " "received.\n", ha->host_no)); qla_printk(KERN_INFO, ha, "Configuration change detected: value=%x.\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); if (!atomic_read(&ha->loop_down_timer)) atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha); } set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); break; case MBA_PORT_UPDATE: /* Port database update */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); /* * If a single remote port just logged into (or logged out of) * us, create a new entry in our rscn fcports list and handle * the event like an RSCN. */ if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA6312(ha) && !IS_QLA6322(ha) && ha->flags.init_done && mb[1] != 0xffff && ((ha->operating_mode == P2P && mb[1] != 0) || (ha->operating_mode != P2P && mb[1] != SNS_FIRST_LOOP_ID)) && (mb[2] == 6 || mb[2] == 7)) { int rval; fc_port_t *rscn_fcport; /* Create new fcport for login. */ rscn_fcport = qla2x00_alloc_rscn_fcport(ha, GFP_ATOMIC); if (rscn_fcport) { DEBUG14(printk("scsi(%ld): Port Update -- " "creating RSCN fcport %p for login.\n", ha->host_no, rscn_fcport)); rscn_fcport->loop_id = mb[1]; rscn_fcport->d_id.b24 = INVALID_PORT_ID; atomic_set(&rscn_fcport->state, FCS_DEVICE_LOST); list_add_tail(&rscn_fcport->list, &ha->rscn_fcports); rval = qla2x00_handle_port_rscn(ha, 0, rscn_fcport, 1); if (rval == QLA_SUCCESS) break; } else { DEBUG14(printk("scsi(%ld): Port Update -- " "-- unable to allocate RSCN fcport " "login.\n", ha->host_no)); } } /* * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET * event etc. earlier indicating loop is down) then process * it. Otherwise ignore it and Wait for RSCN to come in. */ if (atomic_read(&ha->loop_state) != LOOP_DOWN) { DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " "ignored.\n", ha->host_no)); break; } DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", ha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): Port database changed %04x %04x.\n", ha->host_no, mb[1], mb[2])); /* * Mark all devices as missing so we will login again. */ atomic_set(&ha->loop_state, LOOP_UP); atomic_set(&ha->loop_down_timer, 0); qla2x00_mark_all_devices_lost(ha); ha->flags.rscn_queue_overflow = 1; set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); /* Update AEN queue. */ qla2x00_enqueue_aen(ha, MBA_PORT_UPDATE, NULL); break; case MBA_RSCN_UPDATE: /* State Change Registration */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", ha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): RSCN database changed -- %04x %04x.\n", ha->host_no, mb[1], mb[2])); rscn_entry = (mb[1] << 16) | mb[2]; host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | ha->d_id.b.al_pa; if (rscn_entry == host_pid) { DEBUG(printk(KERN_INFO "scsi(%ld): Ignoring RSCN update to local host " "port ID (%06x)\n", ha->host_no, host_pid)); break; } rscn_queue_index = ha->rscn_in_ptr + 1; if (rscn_queue_index == MAX_RSCN_COUNT) rscn_queue_index = 0; if (rscn_queue_index != ha->rscn_out_ptr) { ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; ha->rscn_in_ptr = rscn_queue_index; } else { ha->flags.rscn_queue_overflow = 1; } atomic_set(&ha->loop_state, LOOP_UPDATE); atomic_set(&ha->loop_down_timer, 0); ha->flags.management_server_logged_in = 0; set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); set_bit(RSCN_UPDATE, &ha->dpc_flags); /* Update AEN queue. */ qla2x00_enqueue_aen(ha, MBA_RSCN_UPDATE, &mb[0]); break; /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", ha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): [R|Z]IO update completion.\n", ha->host_no)); qla2x00_process_response_queue(ha); break; } }
/** * qla2x00_async_event() - Process aynchronous events. * @ha: SCSI driver HA context * @mb: Mailbox registers (0 - 3) */ static void qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) { #define LS_UNKNOWN 2 static char *link_speeds[5] = { "1", "2", "?", "4", "10" }; char *link_speed; uint16_t handle_cnt; uint16_t cnt; uint32_t handles[5]; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t rscn_entry, host_pid; uint8_t rscn_queue_index; /* Setup to process RIO completion. */ handle_cnt = 0; switch (mb[0]) { case MBA_SCSI_COMPLETION: handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handle_cnt = 1; break; case MBA_CMPLT_1_16BIT: handles[0] = mb[1]; handle_cnt = 1; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_3_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handle_cnt = 3; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_4_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handle_cnt = 4; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_5_16BIT: handles[0] = mb[1]; handles[1] = mb[2]; handles[2] = mb[3]; handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); handle_cnt = 5; mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_32BIT: handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handles[1] = le32_to_cpu( ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | RD_MAILBOX_REG(ha, reg, 6)); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; default: break; } switch (mb[0]) { case MBA_SCSI_COMPLETION: /* Fast Post */ if (!ha->flags.online) break; for (cnt = 0; cnt < handle_cnt; cnt++) qla2x00_process_completed_request(ha, handles[cnt]); break; case MBA_RESET: /* Reset */ DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); break; case MBA_SYSTEM_ERR: /* System Error */ mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); mb[3] = RD_MAILBOX_REG(ha, reg, 3); qla_printk(KERN_INFO, ha, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", mb[1], mb[2], mb[3]); ha->isp_ops.fw_dump(ha, 1); if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) { if (mb[1] == 0 && mb[2] == 0) { qla_printk(KERN_ERR, ha, "Unrecoverable Hardware Error: adapter " "marked OFFLINE!\n"); ha->flags.online = 0; } else set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); } else if (mb[1] == 0) { qla_printk(KERN_INFO, ha, "Unrecoverable Hardware Error: adapter marked " "OFFLINE!\n"); ha->flags.online = 0; } else set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", ha->host_no)); qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", ha->host_no)); qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); break; case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", ha->host_no)); break; case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha, 1); } set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); ha->flags.management_server_logged_in = 0; break; case MBA_LOOP_UP: /* Loop Up Event */ if (IS_QLA2100(ha) || IS_QLA2200(ha)) { link_speed = link_speeds[0]; ha->link_data_rate = LDR_1GB; } else { link_speed = link_speeds[LS_UNKNOWN]; if (mb[1] < 5) link_speed = link_speeds[mb[1]]; ha->link_data_rate = mb[1]; } DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", ha->host_no, link_speed)); qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", link_speed); ha->flags.management_server_logged_in = 0; break; case MBA_LOOP_DOWN: /* Loop Down Event */ DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n", ha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); ha->device_flags |= DFLG_NO_CABLE; qla2x00_mark_all_devices_lost(ha, 1); } ha->flags.management_server_logged_in = 0; ha->link_data_rate = LDR_UNKNOWN; if (ql2xfdmienable) set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); break; case MBA_LIP_RESET: /* LIP reset occurred */ DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", ha->host_no, mb[1])); qla_printk(KERN_INFO, ha, "LIP reset occured (%x).\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha, 1); } set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); ha->operating_mode = LOOP; ha->flags.management_server_logged_in = 0; break; case MBA_POINT_TO_POINT: /* Point-to-Point */ if (IS_QLA2100(ha)) break; DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", ha->host_no)); /* * Until there's a transition from loop down to loop up, treat * this as loop down only. */ if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); if (!atomic_read(&ha->loop_down_timer)) atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha, 1); } if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); } set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); break; case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ if (IS_QLA2100(ha)) break; DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " "received.\n", ha->host_no)); qla_printk(KERN_INFO, ha, "Configuration change detected: value=%x.\n", mb[1]); if (atomic_read(&ha->loop_state) != LOOP_DOWN) { atomic_set(&ha->loop_state, LOOP_DOWN); if (!atomic_read(&ha->loop_down_timer)) atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_all_devices_lost(ha, 1); } set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); break; case MBA_PORT_UPDATE: /* Port database update */ /* * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET * event etc. earlier indicating loop is down) then process * it. Otherwise ignore it and Wait for RSCN to come in. */ atomic_set(&ha->loop_down_timer, 0); if (atomic_read(&ha->loop_state) != LOOP_DOWN && atomic_read(&ha->loop_state) != LOOP_DEAD) { DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); break; } DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", ha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): Port database changed %04x %04x %04x.\n", ha->host_no, mb[1], mb[2], mb[3])); /* * Mark all devices as missing so we will login again. */ atomic_set(&ha->loop_state, LOOP_UP); qla2x00_mark_all_devices_lost(ha, 1); ha->flags.rscn_queue_overflow = 1; set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); break; case MBA_RSCN_UPDATE: /* State Change Registration */ DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", ha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): RSCN database changed -- %04x %04x.\n", ha->host_no, mb[1], mb[2])); rscn_entry = (mb[1] << 16) | mb[2]; host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | ha->d_id.b.al_pa; if (rscn_entry == host_pid) { DEBUG(printk(KERN_INFO "scsi(%ld): Ignoring RSCN update to local host " "port ID (%06x)\n", ha->host_no, host_pid)); break; } rscn_queue_index = ha->rscn_in_ptr + 1; if (rscn_queue_index == MAX_RSCN_COUNT) rscn_queue_index = 0; if (rscn_queue_index != ha->rscn_out_ptr) { ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; ha->rscn_in_ptr = rscn_queue_index; } else { ha->flags.rscn_queue_overflow = 1; } atomic_set(&ha->loop_state, LOOP_UPDATE); atomic_set(&ha->loop_down_timer, 0); ha->flags.management_server_logged_in = 0; set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); set_bit(RSCN_UPDATE, &ha->dpc_flags); break; /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", ha->host_no)); DEBUG(printk(KERN_INFO "scsi(%ld): [R|Z]IO update completion.\n", ha->host_no)); if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) qla24xx_process_response_queue(ha); else qla2x00_process_response_queue(ha); break; case MBA_DISCARD_RND_FRAME: DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); break; case MBA_TRACE_NOTIFICATION: DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", ha->host_no, mb[1], mb[2])); break; } }
/** * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. * @ha: HA context * @hardware_locked: Called with the hardware_lock */ void qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked) { int rval; uint32_t cnt, timer; uint16_t risc_address; uint16_t mb0, mb2; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint16_t __iomem *dmp_reg; unsigned long flags; struct qla2100_fw_dump *fw; risc_address = 0; mb0 = mb2 = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (!ha->fw_dump) { qla_printk(KERN_WARNING, ha, "No buffer available for dump!!!\n"); goto qla2100_fw_dump_failed; } if (ha->fw_dumped) { qla_printk(KERN_WARNING, ha, "Firmware has been previously dumped (%p) -- ignoring " "request...\n", ha->fw_dump); goto qla2100_fw_dump_failed; } fw = &ha->fw_dump->isp.isp21; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; fw->hccr = htons(RD_REG_WORD(®->hccr)); /* Pause RISC. */ WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { dmp_reg = ®->flash_address; for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); dmp_reg = ®->u.isp2100.mailbox0; for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (cnt == 8) dmp_reg = ®->u_end.isp2200.mailbox8; fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); } dmp_reg = ®->u.isp2100.unused_2[0]; for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); WRT_REG_WORD(®->ctrl_status, 0x00); dmp_reg = ®->risc_hw; for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++)); WRT_REG_WORD(®->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); WRT_REG_WORD(®->pcr, 0x2100); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); WRT_REG_WORD(®->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); WRT_REG_WORD(®->pcr, 0x2300); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); WRT_REG_WORD(®->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); WRT_REG_WORD(®->pcr, 0x2500); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); WRT_REG_WORD(®->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); WRT_REG_WORD(®->pcr, 0x2700); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); WRT_REG_WORD(®->ctrl_status, 0x10); qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); WRT_REG_WORD(®->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); WRT_REG_WORD(®->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset the ISP. */ WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); } for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } /* Pause RISC. */ if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { /* Set memory configuration and timing. */ if (IS_QLA2100(ha)) WRT_REG_WORD(®->mctr, 0xf1); else WRT_REG_WORD(®->mctr, 0xf2); RD_REG_WORD(®->mctr); /* PCI Posting. */ /* Release RISC. */ WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); } } if (rval == QLA_SUCCESS) { /* Get RISC SRAM. */ risc_address = 0x1000; WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); } for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; cnt++, risc_address++) { WRT_MAILBOX_REG(ha, reg, 1, risc_address); WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer != 0; timer--) { /* Check for pending interrupts. */ if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { if (RD_REG_WORD(®->semaphore) & BIT_0) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); mb2 = RD_MAILBOX_REG(ha, reg, 2); WRT_REG_WORD(®->semaphore, 0); WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); break; } WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; fw->risc_ram[cnt] = htons(mb2); } else { rval = QLA_FUNCTION_FAILED; } } if (rval == QLA_SUCCESS) qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); if (rval != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, "Failed to dump firmware (%x)!!!\n", rval); ha->fw_dumped = 0; } else { qla_printk(KERN_INFO, ha, "Firmware dump saved to temp buffer (%ld/%p).\n", ha->host_no, ha->fw_dump); ha->fw_dumped = 1; } qla2100_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); }
int qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) { int rval = QLA_SUCCESS; uint16_t i; struct qla_hw_data *ha = vha->hw; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gnn_id(vha, list); for (i = 0; i < ha->max_fibre_devices; i++) { ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, GNN_ID_RSP_SIZE); ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GNN_ID_CMD, GNN_ID_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_disc, vha, 0x2057, "GNN_ID issue IOCB failed (%d).\n", rval); break; } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GNN_ID") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; break; } else { memcpy(list[i].node_name, ct_rsp->rsp.gnn_id.node_name, WWN_SIZE); ql_dbg(ql_dbg_disc, vha, 0x2058, "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x " "pn %02x%02x%02x%02x%02x%02x%02X%02x " "portid=%02x%02x%02x.\n", list[i].node_name[0], list[i].node_name[1], list[i].node_name[2], list[i].node_name[3], list[i].node_name[4], list[i].node_name[5], list[i].node_name[6], list[i].node_name[7], list[i].port_name[0], list[i].port_name[1], list[i].port_name[2], list[i].port_name[3], list[i].port_name[4], list[i].port_name[5], list[i].port_name[6], list[i].port_name[7], list[i].d_id.b.domain, list[i].d_id.b.area, list[i].d_id.b.al_pa); } if (list[i].d_id.b.rsvd_1 != 0) break; } return (rval); }
/** * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command. * @ha: HA context * @fcport: fcport entry to updated * * Returns 0 on success. */ int qla2x00_ga_nxt(scsi_qla_host_t *ha, fc_port_t *fcport) { int rval; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { return (qla2x00_sns_ga_nxt(ha, fcport)); } /* Issue GA_NXT */ /* Prepare common MS IOCB */ ms_pkt = qla2x00_prep_ms_iocb(ha, GA_NXT_REQ_SIZE, GA_NXT_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GA_NXT_CMD, GA_NXT_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n", ha->host_no, rval)); } else if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, " "ga_nxt_rsp:\n", ha->host_no)); DEBUG2_3(qla2x00_dump_buffer((uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); rval = QLA_FUNCTION_FAILED; } else { /* Populate fc_port_t entry. */ fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0]; fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1]; fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2]; memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, WWN_SIZE); memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name, WWN_SIZE); if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) fcport->d_id.b.domain = 0xf0; DEBUG2_3(printk("scsi(%ld): GA_NXT entry - " "nn %02x%02x%02x%02x%02x%02x%02x%02x " "pn %02x%02x%02x%02x%02x%02x%02x%02x " "portid=%02x%02x%02x.\n", ha->host_no, fcport->node_name[0], fcport->node_name[1], fcport->node_name[2], fcport->node_name[3], fcport->node_name[4], fcport->node_name[5], fcport->node_name[6], fcport->node_name[7], fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], fcport->port_name[7], fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa)); } return (rval); }
/** * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. * @ha: HA context * @list: switch info entries to populate * * NOTE: Non-Nx_Ports are not requested. * * Returns 0 on success. */ int qla2x00_gid_pt(scsi_qla_host_t *ha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_sns_gid_pt_data *gid_data; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { return (qla2x00_sns_gid_pt(ha, list)); } gid_data = NULL; /* Issue GID_PT */ /* Prepare common MS IOCB */ ms_pkt = qla2x00_prep_ms_iocb(ha, GID_PT_REQ_SIZE, GID_PT_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, GID_PT_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_type */ ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n", ha->host_no, rval)); } else if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, " "gid_pt_rsp:\n", ha->host_no)); DEBUG2_3(qla2x00_dump_buffer((uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); rval = QLA_FUNCTION_FAILED; } else { /* Set port IDs in switch info list. */ for (i = 0; i < MAX_FIBRE_DEVICES; i++) { gid_data = &ct_rsp->rsp.gid_pt.entries[i]; list[i].d_id.b.domain = gid_data->port_id[0]; list[i].d_id.b.area = gid_data->port_id[1]; list[i].d_id.b.al_pa = gid_data->port_id[2]; /* Last one exit. */ if (gid_data->control_byte & BIT_7) { list[i].d_id.b.rsvd_1 = gid_data->control_byte; break; } } /* * If we've used all available slots, then the switch is * reporting back more devices than we can handle with this * single call. Return a failed status, and let GA_NXT handle * the overload. */ if (i == MAX_FIBRE_DEVICES) rval = QLA_FUNCTION_FAILED; } return (rval); }
/** * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA. * @ha: HA context * * Returns 0 on success. */ int qla2x00_rsnn_nn(scsi_qla_host_t *ha) { int rval; uint8_t *snn; uint8_t version[20]; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; if (IS_QLA2100(ha) || IS_QLA2200(ha)) { DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on " "ISP2100/ISP2200.\n", ha->host_no)); return (QLA_SUCCESS); } /* Issue RSNN_NN */ /* Prepare common MS IOCB */ /* Request size adjusted after CT preparation */ ms_pkt = qla2x00_prep_ms_iocb(ha, 0, RSNN_NN_RSP_SIZE); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, RSNN_NN_CMD, RSNN_NN_RSP_SIZE); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- node_name, symbolic node_name, size */ memcpy(ct_req->req.rsnn_nn.node_name, ha->init_cb->node_name, WWN_SIZE); /* Prepare the Symbolic Node Name */ /* Board type */ snn = ct_req->req.rsnn_nn.sym_node_name; strcpy(snn, ha->model_number); /* Firmware version */ strcat(snn, " FW:v"); sprintf(version, "%d.%02d.%02d", ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); strcat(snn, version); /* Driver version */ strcat(snn, " DVR:v"); strcat(snn, qla2x00_version_str); /* Calculate SNN length */ ct_req->req.rsnn_nn.name_len = (uint8_t)strlen(snn); /* Update MS IOCB request */ ms_pkt->req_bytecount = cpu_to_le32(24 + 1 + ct_req->req.rsnn_nn.name_len); ms_pkt->dseg_req_length = ms_pkt->req_bytecount; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n", ha->host_no, rval)); } else if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): RSNN_NN failed, rejected " "request, rsnn_id_rsp:\n", ha->host_no)); DEBUG2_3(qla2x00_dump_buffer((uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); rval = QLA_FUNCTION_FAILED; } else { DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n", ha->host_no)); } return (rval); }
/** * qla2x00_reset_chip() - Reset ISP chip. * @ha: HA context * * Returns 0 on success. */ void qla2x00_reset_chip(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t cnt; uint16_t cmd; if (unlikely(pci_channel_offline(ha->pdev))) return; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); /* Turn off master enable */ cmd = 0; pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd); cmd &= ~PCI_COMMAND_MASTER; pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); if (!IS_QLA2100(ha)) { /* Pause RISC. */ WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); if (IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(100); } } else { RD_REG_WORD(®->hccr); /* PCI Posting. */ udelay(10); } /* Select FPM registers. */ WRT_REG_WORD(®->ctrl_status, 0x20); RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ /* FPM Soft Reset. */ WRT_REG_WORD(®->fpm_diag_config, 0x100); RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ /* Toggle Fpm Reset. */ if (!IS_QLA2200(ha)) { WRT_REG_WORD(®->fpm_diag_config, 0x0); RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ } /* Select frame buffer registers. */ WRT_REG_WORD(®->ctrl_status, 0x10); RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ /* Reset frame buffer FIFOs. */ if (IS_QLA2200(ha)) { WRT_FB_CMD_REG(ha, reg, 0xa000); RD_FB_CMD_REG(ha, reg); /* PCI Posting. */ } else { WRT_FB_CMD_REG(ha, reg, 0x00fc); /* Read back fb_cmd until zero or 3 seconds max */ for (cnt = 0; cnt < 3000; cnt++) { if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0) break; udelay(100); } } /* Select RISC module registers. */ WRT_REG_WORD(®->ctrl_status, 0); RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ /* Reset RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); RD_REG_WORD(®->hccr); /* PCI Posting. */ /* Release RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); RD_REG_WORD(®->hccr); /* PCI Posting. */ } WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); /* Reset ISP chip. */ WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); /* Wait for RISC to recover from reset. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { /* * It is necessary to for a delay here since the card doesn't * respond to PCI reads during a reset. On some architectures * this will result in an MCA. */ udelay(20); for (cnt = 30000; cnt; cnt--) { if ((RD_REG_WORD(®->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; udelay(100); } } else udelay(10); /* Reset RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); WRT_REG_WORD(®->semaphore, 0); /* Release RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); RD_REG_WORD(®->hccr); /* PCI Posting. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY) break; udelay(100); } } else udelay(100); /* Turn on master enable */ cmd |= PCI_COMMAND_MASTER; pci_write_config_word(ha->pdev, PCI_COMMAND, cmd); /* Disable RISC pause on FPM parity error. */ if (!IS_QLA2100(ha)) { WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); RD_REG_WORD(®->hccr); /* PCI Posting. */ } spin_unlock_irqrestore(&ha->hardware_lock, flags); }
/** * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. * @ha: HA context * @hardware_locked: Called with the hardware_lock */ void qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked) { int rval; uint32_t cnt, timer; uint16_t risc_address; uint16_t mb0, mb2; device_reg_t __iomem *reg = ha->iobase; uint16_t __iomem *dmp_reg; unsigned long flags; struct qla2100_fw_dump *fw; risc_address = 0; mb0 = mb2 = 0; flags = 0; if (!hardware_locked) spin_lock_irqsave(&ha->hardware_lock, flags); if (ha->fw_dump != NULL) { qla_printk(KERN_WARNING, ha, "Firmware has been previously dumped (%p) -- ignoring " "request...\n", ha->fw_dump); goto qla2100_fw_dump_failed; } /* Allocate (large) dump buffer. */ ha->fw_dump_order = get_order(sizeof(struct qla2100_fw_dump)); ha->fw_dump = (struct qla2100_fw_dump *) __get_free_pages(GFP_ATOMIC, ha->fw_dump_order); if (ha->fw_dump == NULL) { qla_printk(KERN_WARNING, ha, "Unable to allocated memory for firmware dump (%d/%Zd).\n", ha->fw_dump_order, sizeof(struct qla2100_fw_dump)); goto qla2100_fw_dump_failed; } fw = ha->fw_dump; rval = QLA_SUCCESS; fw->hccr = RD_REG_WORD(®->hccr); /* Pause RISC. */ WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { dmp_reg = (uint16_t __iomem *)(reg + 0); for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (cnt == 8) { dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xe0); } fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++); } dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20); for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->ctrl_status, 0x00); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0); for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2000); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2100); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2200); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2300); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2400); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2500); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2600); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->pcr, 0x2700); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->ctrl_status, 0x10); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->ctrl_status, 0x20); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++); WRT_REG_WORD(®->ctrl_status, 0x30); dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80); for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++); /* Reset the ISP. */ WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); } for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } /* Pause RISC. */ if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); else rval = QLA_FUNCTION_TIMEOUT; } if (rval == QLA_SUCCESS) { /* Set memory configuration and timing. */ if (IS_QLA2100(ha)) WRT_REG_WORD(®->mctr, 0xf1); else WRT_REG_WORD(®->mctr, 0xf2); RD_REG_WORD(®->mctr); /* PCI Posting. */ /* Release RISC. */ WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); } } if (rval == QLA_SUCCESS) { /* Get RISC SRAM. */ risc_address = 0x1000; WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); } for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; cnt++, risc_address++) { WRT_MAILBOX_REG(ha, reg, 1, risc_address); WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer != 0; timer--) { /* Check for pending interrupts. */ if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { if (RD_REG_WORD(®->semaphore) & BIT_0) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); mb2 = RD_MAILBOX_REG(ha, reg, 2); WRT_REG_WORD(®->semaphore, 0); WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); break; } WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); RD_REG_WORD(®->hccr); } udelay(5); } if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; fw->risc_ram[cnt] = mb2; } else { rval = QLA_FUNCTION_FAILED; } } if (rval != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, "Failed to dump firmware (%x)!!!\n", rval); free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order); ha->fw_dump = NULL; } else { qla_printk(KERN_INFO, ha, "Firmware dump saved to temp buffer (%ld/%p).\n", ha->host_no, ha->fw_dump); } qla2100_fw_dump_failed: if (!hardware_locked) spin_unlock_irqrestore(&ha->hardware_lock, flags); }
/** * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. * @ha: HA context * @list: switch info entries to populate * * NOTE: Non-Nx_Ports are not requested. * * Returns 0 on success. */ int qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) { int rval; uint16_t i; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct ct_sns_gid_pt_data *gid_data; struct qla_hw_data *ha = vha->hw; uint16_t gid_pt_rsp_size; if (IS_QLA2100(ha) || IS_QLA2200(ha)) return qla2x00_sns_gid_pt(vha, list); gid_data = NULL; gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha); /* Issue GID_PT */ /* Prepare common MS IOCB */ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, gid_pt_rsp_size); /* Prepare CT request */ ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, gid_pt_rsp_size); ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_type */ ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE; /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, sizeof(ms_iocb_entry_t)); if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_disc, vha, 0x2055, "GID_PT issue IOCB failed (%d).\n", rval); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") != QLA_SUCCESS) { rval = QLA_FUNCTION_FAILED; } else { /* Set port IDs in switch info list. */ for (i = 0; i < ha->max_fibre_devices; i++) { gid_data = &ct_rsp->rsp.gid_pt.entries[i]; list[i].d_id.b.domain = gid_data->port_id[0]; list[i].d_id.b.area = gid_data->port_id[1]; list[i].d_id.b.al_pa = gid_data->port_id[2]; memset(list[i].fabric_port_name, 0, WWN_SIZE); list[i].fp_speed = PORT_SPEED_UNKNOWN; /* Last one exit. */ if (gid_data->control_byte & BIT_7) { list[i].d_id.b.rsvd_1 = gid_data->control_byte; break; } } /* * If we've used all available slots, then the switch is * reporting back more devices than we can handle with this * single call. Return a failed status, and let GA_NXT handle * the overload. */ if (i == ha->max_fibre_devices) rval = QLA_FUNCTION_FAILED; } return (rval); }