static int qla4xxx_get_acb_state(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t status[MBOX_REG_COUNT]; uint32_t acb_idx; uint32_t ip_idx; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (bsg_job->reply_payload.payload_len < sizeof(status)) { ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n", __func__, bsg_job->reply_payload.payload_len); rval = -EINVAL; goto leave; } acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, status, sizeof(status)); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; }
int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { int i; uint32_t u32_word; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; ret_val = qla4_83xx_flash_lock(ha); if (ret_val == QLA_ERROR) goto exit_lock_error; if (addr & 0x03) { ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_ERROR; goto exit_flash_read; } for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, (addr & 0xFFFF0000)); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!", __func__, addr); goto exit_flash_read; } ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_flash_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; } exit_flash_read: qla4_83xx_flash_unlock(ha); exit_lock_error: return ret_val; }
static int qla4_83xx_flash_lock(struct scsi_qla_host *ha) { int lock_owner; int timeout = 0; uint32_t lock_status = 0; int ret_val = QLA_SUCCESS; while (lock_status == 0) { lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK); if (lock_status) break; if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) { lock_owner = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK_ID); ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n", __func__, ha->func_num, lock_owner); ret_val = QLA_ERROR; break; } msleep(20); } qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num); return ret_val; }
static int qla4xxx_restore_defaults(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t region = 0; uint32_t field0 = 0; uint32_t field1 = 0; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; }
void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha) { if (qla4_83xx_flash_lock(ha)) ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__); /* * We got the lock, or someone else is holding the lock * since we are restting, forcefully unlock */ qla4_83xx_flash_unlock(ha); }
static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha) { uint32_t lock = 0, lockid; int ret_val = QLA_ERROR; lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); /* Check for other Recovery in progress, go wait */ if ((lockid & 0x3) != 0) goto exit_lock_recovery; /* Intent to Recover */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, (ha->func_num << 2) | INTENT_TO_RECOVER); msleep(200); /* Check Intent to Recover is advertised */ lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY); if ((lockid & 0x3C) != (ha->func_num << 2)) goto exit_lock_recovery; ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n", __func__, ha->func_num); /* Proceed to Recover */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, (ha->func_num << 2) | PROCEED_TO_RECOVER); /* Force Unlock */ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF); ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK); /* Clear bits 0-5 in IDC_RECOVERY register*/ ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0); /* Get lock */ lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK); if (lock) { lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num; ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid); ret_val = QLA_SUCCESS; } exit_lock_recovery: return ret_val; }
int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, uint32_t data) { int ret_val; ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == QLA_SUCCESS) qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data); else ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n", __func__, addr, data); return ret_val; }
int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr, uint32_t *data) { int ret_val; ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == QLA_SUCCESS) *data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD); else ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n", __func__, addr); return ret_val; }
static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr) { uint32_t val; int ret_val = QLA_SUCCESS; qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr); val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num)); if (val != addr) { ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", __func__, addr, val); ret_val = QLA_ERROR; } return ret_val; }
void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha) { struct Scsi_Host *host = ha->host; struct sysfs_entry *iter; int ret; for (iter = bin_file_entries; iter->name; iter++) { ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); if (ret) ql4_printk(KERN_ERR, ha, "Unable to create sysfs %s binary attribute (%d).\n", iter->name, ret); } }
/** * qla4xxx_bsg_request - handle bsg request from ISCSI transport * @job: iscsi_bsg_job to handle */ int qla4xxx_bsg_request(struct bsg_job *bsg_job) { struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->msgcode) { case ISCSI_BSG_HST_VENDOR: return qla4xxx_process_vendor_specific(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n", __func__, bsg_req->msgcode); } return -ENOSYS; }
/** * qla4xxx_process_vendor_specific - handle vendor specific bsg request * @job: iscsi_bsg_job to handle **/ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) { struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case QLISCSI_VND_READ_FLASH: return qla4xxx_read_flash(bsg_job); case QLISCSI_VND_UPDATE_FLASH: return qla4xxx_update_flash(bsg_job); case QLISCSI_VND_GET_ACB_STATE: return qla4xxx_get_acb_state(bsg_job); case QLISCSI_VND_READ_NVRAM: return qla4xxx_read_nvram(bsg_job); case QLISCSI_VND_UPDATE_NVRAM: return qla4xxx_update_nvram(bsg_job); case QLISCSI_VND_RESTORE_DEFAULTS: return qla4xxx_restore_defaults(bsg_job); case QLISCSI_VND_GET_ACB: return qla4xxx_bsg_get_acb(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " "0x%x\n", __func__, bsg_req->msgcode); bsg_reply->result = (DID_ERROR << 16); bsg_reply->reply_payload_rcv_len = 0; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return -ENOSYS; } }
int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr, uint8_t *p_data, int u32_word_count) { uint32_t i; uint32_t u32_word; uint32_t flash_offset; uint32_t addr = flash_addr; int ret_val = QLA_SUCCESS; flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1); if (addr & 0x3) { ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n", __func__, addr); ret_val = QLA_ERROR; goto exit_lockless_read; } ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, addr); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } /* Check if data is spread across multiple sectors */ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { /* Multi sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; flash_offset = flash_offset + 4; if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) { /* This write is needed once for each sector */ ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW, addr); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", __func__, addr); goto exit_lockless_read; } flash_offset = 0; } } } else { /* Single sector read */ for (i = 0; i < u32_word_count; i++) { ret_val = qla4_83xx_rd_reg_indirect(ha, QLA83XX_FLASH_DIRECT_DATA(addr), &u32_word); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n", __func__, addr); goto exit_lockless_read; } *(__le32 *)p_data = le32_to_cpu(u32_word); p_data = p_data + 4; addr = addr + 4; } } exit_lockless_read: return ret_val; }
int qla4_83xx_drv_lock(struct scsi_qla_host *ha) { int timeout = 0; uint32_t status = 0; int ret_val = QLA_SUCCESS; uint32_t first_owner = 0; uint32_t tmo_owner = 0; uint32_t lock_id; uint32_t func_num; uint32_t lock_cnt; while (status == 0) { status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK); if (status) { /* Increment Counter (8-31) and update func_num (0-7) on * getting a successful lock */ lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num; qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id); break; } if (timeout == 0) /* Save counter + ID of function holding the lock for * first failure */ first_owner = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID); if (++timeout >= (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) { tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID); func_num = tmo_owner & 0xFF; lock_cnt = tmo_owner >> 8; ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", __func__, ha->func_num, func_num, lock_cnt, (first_owner & 0xFF)); if (first_owner != tmo_owner) { /* Some other driver got lock, OR same driver * got lock again (counter value changed), when * we were waiting for lock. * Retry for another 2 sec */ ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n", __func__, ha->func_num); timeout = 0; } else { /* Same driver holding lock > 2sec. * Force Recovery */ ret_val = qla4_83xx_lock_recovery(ha); if (ret_val == QLA_SUCCESS) { /* Recovered and got lock */ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n", __func__, ha->func_num); break; } /* Recovery Failed, some other function * has the lock, wait for 2secs and retry */ ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n", __func__, ha->func_num); timeout = 0; } } msleep(QLA83XX_DRV_LOCK_MSLEEP); }
/** * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory * @ha: Pointer to adapter structure * @addr: Flash address to write to * @data: Data to be written * @count: word_count to be written * * Return: On success return QLA_SUCCESS * On error return QLA_ERROR **/ static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, uint32_t *data, uint32_t count) { int i, j; uint32_t agt_ctrl; unsigned long flags; int ret_val = QLA_SUCCESS; /* Only 128-bit aligned access */ if (addr & 0xF) { ret_val = QLA_ERROR; goto exit_ms_mem_write; } write_lock_irqsave(&ha->hw_lock, flags); /* Write address */ ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", __func__); goto exit_ms_mem_write_unlock; } for (i = 0; i < count; i++, addr += 16) { if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, QLA8XXX_ADDR_QDR_NET_MAX)) || (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX)))) { ret_val = QLA_ERROR; goto exit_ms_mem_write_unlock; } ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, addr); /* Write data */ ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_LO, *data++); ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_HI, *data++); ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_ULO, *data++); ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_WRDATA_UHI, *data++); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", __func__); goto exit_ms_mem_write_unlock; } /* Check write status */ ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, MIU_TA_CTL_WRITE_ENABLE); ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, MIU_TA_CTL_WRITE_START); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", __func__); goto exit_ms_mem_write_unlock; } for (j = 0; j < MAX_CTL_CHECK; j++) { ret_val = qla4_83xx_rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, &agt_ctrl); if (ret_val == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", __func__); goto exit_ms_mem_write_unlock; } if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) break; } /* Status check failed */ if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", __func__); ret_val = QLA_ERROR; goto exit_ms_mem_write_unlock; } } exit_ms_mem_write_unlock: write_unlock_irqrestore(&ha->hw_lock, flags); exit_ms_mem_write: return ret_val; }
static int qla4xxx_update_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, nvram, len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; }
/** * qla4xxx_mailbox_command - issues mailbox commands * @ha: Pointer to host adapter structure. * @inCount: number of mailbox registers to load. * @outCount: number of mailbox registers to return. * @mbx_cmd: data pointer for mailbox in registers. * @mbx_sts: data pointer for mailbox out registers. * * This routine isssue mailbox commands and waits for completion. * If outCount is 0, this routine completes successfully WITHOUT waiting * for the mailbox command to complete. **/ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts) { int status = QLA_ERROR; uint8_t i; u_long wait_count; uint32_t intr_status; unsigned long flags = 0; /* Make sure that pointers are valid */ if (!mbx_cmd || !mbx_sts) { DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts " "pointer\n", ha->host_no, __func__)); return status; } if (is_qla8022(ha) && test_bit(AF_FW_RECOVERY, &ha->flags)) { DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: prematurely " "completing mbx cmd as firmware recovery detected\n", ha->host_no, __func__)); return status; } if ((is_aer_supported(ha)) && (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " "timeout MBX Exiting.\n", ha->host_no, __func__)); return status; } /* Mailbox code active */ wait_count = MBOX_TOV * 100; while (wait_count--) { mutex_lock(&ha->mbox_sem); if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) { set_bit(AF_MBOX_COMMAND, &ha->flags); mutex_unlock(&ha->mbox_sem); break; } mutex_unlock(&ha->mbox_sem); if (!wait_count) { DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n", ha->host_no, __func__)); return status; } msleep(10); } /* To prevent overwriting mailbox registers for a command that has * not yet been serviced, check to see if an active command * (AEN, IOCB, etc.) is interrupting, then service it. * ----------------------------------------------------------------- */ spin_lock_irqsave(&ha->hardware_lock, flags); if (is_qla8022(ha)) { intr_status = readl(&ha->qla4_8xxx_reg->host_int); if (intr_status & ISRX_82XX_RISC_INT) { /* Service existing interrupt */ DEBUG2(printk("scsi%ld: %s: " "servicing existing interrupt\n", ha->host_no, __func__)); intr_status = readl(&ha->qla4_8xxx_reg->host_status); ha->isp_ops->interrupt_service_routine(ha, intr_status); clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags); if (test_bit(AF_INTERRUPTS_ON, &ha->flags) && test_bit(AF_INTx_ENABLED, &ha->flags)) qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); } } else { intr_status = readl(&ha->reg->ctrl_status); if (intr_status & CSR_SCSI_PROCESSOR_INTR) { /* Service existing interrupt */ ha->isp_ops->interrupt_service_routine(ha, intr_status); clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags); } } ha->mbox_status_count = outCount; for (i = 0; i < outCount; i++) ha->mbox_status[i] = 0; if (is_qla8022(ha)) { /* Load all mailbox registers, except mailbox 0. */ DEBUG5( printk("scsi%ld: %s: Cmd ", ha->host_no, __func__); for (i = 0; i < inCount; i++) printk("mb%d=%04x ", i, mbx_cmd[i]); printk("\n")); for (i = 1; i < inCount; i++) writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]); writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]); readl(&ha->qla4_8xxx_reg->mailbox_in[0]); writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint); } else { /* Load all mailbox registers, except mailbox 0. */ for (i = 1; i < inCount; i++)
static int qla4xxx_update_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t length = 0; uint32_t offset = 0; uint32_t options = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_WRITING; length = bsg_job->request_payload.payload_len; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, flash, length); rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; }
static int qla4xxx_bsg_get_acb(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t acb_type = 0; uint32_t len = 0; dma_addr_t acb_dma; uint8_t *acb = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; if (len < sizeof(struct addr_ctrl_blk)) { ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n", __func__, len); rval = -EINVAL; goto leave; } acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, acb, len); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma); leave: return rval; }
static ssize_t qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj, struct bin_attribute *ba, char *buf, loff_t off, size_t count) { struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj, struct device, kobj))); uint32_t dev_state; int reading; if (is_qla40XX(ha)) return -EINVAL; if (off != 0) return 0; reading = simple_strtol(buf, NULL, 10); switch (reading) { case 0: /* clear dump collection flags */ if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) { clear_bit(AF_82XX_FW_DUMPED, &ha->flags); /* Reload minidump template */ qla4xxx_alloc_fw_dump(ha); DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware template reloaded\n")); } break; case 1: /* Set flag to read dump */ if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) && !test_bit(AF_82XX_DUMP_READING, &ha->flags)) { set_bit(AF_82XX_DUMP_READING, &ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "Raw firmware dump ready for read on (%ld).\n", ha->host_no)); } break; case 2: /* Reset HBA and collect FW dump */ ha->isp_ops->idc_lock(ha); dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n", __func__); qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); if (is_qla8022(ha) || (is_qla8032(ha) && qla4_83xx_can_perform_reset(ha))) { set_bit(AF_8XXX_RST_OWNER, &ha->flags); set_bit(AF_FW_RECOVERY, &ha->flags); ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n", __func__, ha->func_num); } } else ql4_printk(KERN_INFO, ha, "%s: Reset not performed as device state is 0x%x\n", __func__, dev_state); ha->isp_ops->idc_unlock(ha); break; default: /* do nothing */ break; } return count; }
/** * qla4xxx_mailbox_command - issues mailbox commands * @ha: Pointer to host adapter structure. * @inCount: number of mailbox registers to load. * @outCount: number of mailbox registers to return. * @mbx_cmd: data pointer for mailbox in registers. * @mbx_sts: data pointer for mailbox out registers. * * This routine issue mailbox commands and waits for completion. * If outCount is 0, this routine completes successfully WITHOUT waiting * for the mailbox command to complete. **/ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts) { int status = QLA_ERROR; uint8_t i; u_long wait_count; uint32_t intr_status; unsigned long flags = 0; uint32_t dev_state; /* Make sure that pointers are valid */ if (!mbx_cmd || !mbx_sts) { DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts " "pointer\n", ha->host_no, __func__)); return status; } if (is_qla8022(ha)) { if (test_bit(AF_FW_RECOVERY, &ha->flags)) { DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: " "prematurely completing mbx cmd as firmware " "recovery detected\n", ha->host_no, __func__)); return status; } /* Do not send any mbx cmd if h/w is in failed state*/ qla4_8xxx_idc_lock(ha); dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla4_8xxx_idc_unlock(ha); if (dev_state == QLA82XX_DEV_FAILED) { ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in " "failed state, do not send any mailbox commands\n", ha->host_no, __func__); return status; } } if ((is_aer_supported(ha)) && (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " "timeout MBX Exiting.\n", ha->host_no, __func__)); return status; } /* Mailbox code active */ wait_count = MBOX_TOV * 100; while (wait_count--) { mutex_lock(&ha->mbox_sem); if (!test_bit(AF_MBOX_COMMAND, &ha->flags)) { set_bit(AF_MBOX_COMMAND, &ha->flags); mutex_unlock(&ha->mbox_sem); break; } mutex_unlock(&ha->mbox_sem); if (!wait_count) { DEBUG2(printk("scsi%ld: %s: mbox_sem failed\n", ha->host_no, __func__)); return status; } msleep(10); } spin_lock_irqsave(&ha->hardware_lock, flags); ha->mbox_status_count = outCount; for (i = 0; i < outCount; i++) ha->mbox_status[i] = 0; if (is_qla8022(ha)) { /* Load all mailbox registers, except mailbox 0. */ DEBUG5( printk("scsi%ld: %s: Cmd ", ha->host_no, __func__); for (i = 0; i < inCount; i++) printk("mb%d=%04x ", i, mbx_cmd[i]); printk("\n")); for (i = 1; i < inCount; i++) writel(mbx_cmd[i], &ha->qla4_8xxx_reg->mailbox_in[i]); writel(mbx_cmd[0], &ha->qla4_8xxx_reg->mailbox_in[0]); readl(&ha->qla4_8xxx_reg->mailbox_in[0]); writel(HINT_MBX_INT_PENDING, &ha->qla4_8xxx_reg->hint); } else { /* Load all mailbox registers, except mailbox 0. */ for (i = 1; i < inCount; i++)