static ssize_t qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id); }
static ssize_t qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user); }
static ssize_t qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp); }
static ssize_t qla4xxx_serial_num_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->serial_number); }
static ssize_t qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); }
static ssize_t qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major, ha->fw_info.iscsi_minor); }
static ssize_t qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); qla4xxx_about_firmware(ha); return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs, ha->fw_uptime_msecs); }
static ssize_t qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", ha->bootload_major, ha->bootload_minor, ha->bootload_patch, ha->bootload_build); }
static int qla4xxx_get_acb_state(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t status[MBOX_REG_COUNT]; uint32_t acb_idx; uint32_t ip_idx; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (bsg_job->reply_payload.payload_len < sizeof(status)) { ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n", __func__, bsg_job->reply_payload.payload_len); rval = -EINVAL; goto leave; } acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, status, sizeof(status)); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; }
static ssize_t qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); qla4xxx_get_firmware_state(ha); return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state, ha->addl_fw_state); }
static ssize_t qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla40XX(ha)) return -ENOSYS; return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt); }
/* Scsi_Host attributes. */ static ssize_t qla4xxx_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla80XX(ha)) return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", ha->fw_info.fw_major, ha->fw_info.fw_minor, ha->fw_info.fw_patch, ha->fw_info.fw_build); else return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", ha->fw_info.fw_major, ha->fw_info.fw_minor, ha->fw_info.fw_patch, ha->fw_info.fw_build); }
/** * qla4xxx_bsg_request - handle bsg request from ISCSI transport * @job: iscsi_bsg_job to handle */ int qla4xxx_bsg_request(struct bsg_job *bsg_job) { struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->msgcode) { case ISCSI_BSG_HST_VENDOR: return qla4xxx_process_vendor_specific(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n", __func__, bsg_req->msgcode); } return -ENOSYS; }
/* Scsi_Host attributes. */ static ssize_t qla4xxx_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); if (is_qla80XX(ha)) return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", ha->firmware_version[0], ha->firmware_version[1], ha->patch_number, ha->build_number); else return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n", ha->firmware_version[0], ha->firmware_version[1], ha->patch_number, ha->build_number); }
static int qla4xxx_restore_defaults(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t region = 0; uint32_t field0 = 0; uint32_t field1 = 0; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: return rval; }
static ssize_t qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); char *load_src = NULL; switch (ha->fw_info.fw_load_source) { case 1: load_src = "Flash Primary"; break; case 2: load_src = "Flash Secondary"; break; case 3: load_src = "Host Download"; break; } return snprintf(buf, PAGE_SIZE, "%s\n", load_src); }
static ssize_t qla4xxx_store_host_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); int ret = -EINVAL; char str[10]; int type; sscanf(buf, "%s", str); type = qla4xxx_check_reset_type(str); if (!type) goto exit_store_host_reset; ret = qla4xxx_host_reset(ha, type); exit_store_host_reset: if (ret == 0) ret = count; return ret; }
/** * qla4xxx_process_vendor_specific - handle vendor specific bsg request * @job: iscsi_bsg_job to handle **/ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) { struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case QLISCSI_VND_READ_FLASH: return qla4xxx_read_flash(bsg_job); case QLISCSI_VND_UPDATE_FLASH: return qla4xxx_update_flash(bsg_job); case QLISCSI_VND_GET_ACB_STATE: return qla4xxx_get_acb_state(bsg_job); case QLISCSI_VND_READ_NVRAM: return qla4xxx_read_nvram(bsg_job); case QLISCSI_VND_UPDATE_NVRAM: return qla4xxx_update_nvram(bsg_job); case QLISCSI_VND_RESTORE_DEFAULTS: return qla4xxx_restore_defaults(bsg_job); case QLISCSI_VND_GET_ACB: return qla4xxx_bsg_get_acb(bsg_job); default: ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " "0x%x\n", __func__, bsg_req->msgcode); bsg_reply->result = (DID_ERROR << 16); bsg_reply->reply_payload_rcv_len = 0; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return -ENOSYS; } }
static int qla4xxx_bsg_get_acb(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t acb_type = 0; uint32_t len = 0; dma_addr_t acb_dma; uint8_t *acb = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; /* Only 4022 and above adapters are supported */ if (is_qla4010(ha)) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; if (len < sizeof(struct addr_ctrl_blk)) { ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n", __func__, len); rval = -EINVAL; goto leave; } acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL); if (!acb) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb " "data\n", __func__); rval = -ENOMEM; goto leave; } rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, acb, len); bsg_reply->result = DID_OK << 16; } bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma); leave: return rval; }
static int qla4xxx_update_flash(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; struct iscsi_bsg_request *bsg_req = bsg_job->request; uint32_t length = 0; uint32_t offset = 0; uint32_t options = 0; dma_addr_t flash_dma; uint8_t *flash = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } if (ha->flash_state != QLFLASH_WAITING) { ql4_printk(KERN_ERR, ha, "%s: another flash operation " "active\n", __func__); rval = -EBUSY; goto leave; } ha->flash_state = QLFLASH_WRITING; length = bsg_job->request_payload.payload_len; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, GFP_KERNEL); if (!flash) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, flash, length); rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); leave: ha->flash_state = QLFLASH_WAITING; return rval; }
static int qla4xxx_update_nvram(struct bsg_job *bsg_job) { struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); struct scsi_qla_host *ha = to_qla_host(host); struct iscsi_bsg_request *bsg_req = bsg_job->request; struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; uint32_t offset = 0; uint32_t len = 0; uint32_t total_len = 0; dma_addr_t nvram_dma; uint8_t *nvram = NULL; int rval = -EINVAL; bsg_reply->reply_payload_rcv_len = 0; if (unlikely(pci_channel_offline(ha->pdev))) goto leave; if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) goto leave; if (ql4xxx_reset_active(ha)) { ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); rval = -EBUSY; goto leave; } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; /* total len should not be greater than max NVRAM size */ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || ((is_qla4022(ha) || is_qla4032(ha)) && total_len > QL40X2_NVRAM_SIZE)) { ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" " nvram size, offset=%d len=%d\n", __func__, offset, len); goto leave; } nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, GFP_KERNEL); if (!nvram) { ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " "data\n", __func__); rval = -ENOMEM; goto leave; } sg_copy_to_buffer(bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, nvram, len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval) { ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); bsg_reply->result = DID_ERROR << 16; rval = -EIO; } else bsg_reply->result = DID_OK << 16; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); leave: return rval; }