void qla4xxx_dump_registers(struct scsi_qla_host *ha) { uint8_t i; if (is_qla8022(ha)) { for (i = 1; i < MBOX_REG_COUNT; i++) // printk(KERN_INFO "mailbox[%d] = 0x%08X\n", ; return; } for (i = 0; i < MBOX_REG_COUNT; i++) { // printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, mailbox[i]), i, ; } // printk(KERN_INFO "0x%02X flash_address = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, flash_address), ; // printk(KERN_INFO "0x%02X flash_data = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, flash_data), ; // printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, ctrl_status), ; if (is_qla4010(ha)) { // printk(KERN_INFO "0x%02X nvram = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram), ; } else if (is_qla4022(ha) | is_qla4032(ha)) { // printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask), ; // printk(KERN_INFO "0x%02X nvram = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram), ; // printk(KERN_INFO "0x%02X semaphore = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore), ; } // printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, req_q_in), ; // printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, rsp_q_out), ; if (is_qla4010(ha)) { // printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf), ; // printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl), ; // printk(KERN_INFO "0x%02X port_status = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status), ; // printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out), ; // printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out), ; // printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in), ; // printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) // offsetof(struct isp_reg, u2.isp4010.port_err_status), ; } else if (is_qla4022(ha) | is_qla4032(ha)) { ; // printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t) // offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), ; // printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t) // offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), ; // printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t) // offsetof(struct isp_reg, u2.isp4022.p0.port_status), ; // printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), ; // printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), ; // printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) // offsetof(struct isp_reg, u2.isp4022.p0.port_err_status), ; ; writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), &ha->reg->ctrl_status); // printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", // (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out), ; writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), &ha->reg->ctrl_status); } }
static int qla4xxx_update_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, nvram, len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; }