/* * Name: qla_fw_cmd * Function: Issues firmware control commands on the Tx Ring. */ static int qla_fw_cmd(qla_host_t *ha, void *fw_cmd, uint32_t size) { device_t dev; q80_tx_cmd_t *tx_cmd; qla_hw_t *hw = &ha->hw; int count = 100; dev = ha->pci_dev; QLA_TX_LOCK(ha); if (hw->txr_free <= QLA_TX_MIN_FREE) { while (count--) { qla_hw_tx_done_locked(ha); if (hw->txr_free > QLA_TX_MIN_FREE) break; QLA_TX_UNLOCK(ha); qla_mdelay(__func__, 10); QLA_TX_LOCK(ha); } if (hw->txr_free <= QLA_TX_MIN_FREE) { QLA_TX_UNLOCK(ha); device_printf(dev, "%s: xmit queue full\n", __func__); return (-1); } } tx_cmd = &hw->tx_ring_base[hw->txr_next]; bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t)); bcopy(fw_cmd, tx_cmd, size); hw->txr_next = (hw->txr_next + 1) & (NUM_TX_DESCRIPTORS - 1); hw->txr_free--; QL_UPDATE_TX_PRODUCER_INDEX(ha, hw->txr_next); QLA_TX_UNLOCK(ha); return (0); }
static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; device_t pci_dev; struct ifnet *ifp; int count; q80_offchip_mem_val_t val; qla_rd_pci_ids_t *pci_ids; qla_rd_fw_dump_t *fw_dump; union { qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_erase_flash_t *erf; qla_offchip_mem_val_t *mem; } u; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: u.rv = (qla_reg_val_t *)data; if (u.rv->direct) { if (u.rv->rd) { u.rv->val = READ_REG32(ha, u.rv->reg); } else { WRITE_REG32(ha, u.rv->reg, u.rv->val); } } else { if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val, u.rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.rdf = (qla_rd_flash_t *)data; if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.wrf = (qla_wr_flash_t *)data; if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size, u.wrf->buffer))) { printf("flash write failed[%d]\n", rval); rval = ENXIO; } break; case QLA_ERASE_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.erf = (qla_erase_flash_t *)data; if ((rval = ql_erase_flash(ha, u.erf->off, u.erf->size))) { printf("flash erase failed[%d]\n", rval); rval = ENXIO; } break; case QLA_RDWR_MS_MEM: u.mem = (qla_offchip_mem_val_t *)data; if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, u.mem->rd))) rval = ENXIO; else { u.mem->data_lo = val.data_lo; u.mem->data_hi = val.data_hi; u.mem->data_ulo = val.data_ulo; u.mem->data_uhi = val.data_uhi; } break; case QLA_RD_FW_DUMP_SIZE: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; fw_dump->minidump_size = ha->hw.mdump_buffer_size + ha->hw.mdump_template_size; fw_dump->pci_func = ha->pci_func; break; case QLA_RD_FW_DUMP: if (ha->hw.mdump_init == 0) { device_printf(pci_dev, "%s: minidump not initialized\n", __func__); rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; if ((fw_dump->minidump == NULL) || (fw_dump->minidump_size != (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size))) { device_printf(pci_dev, "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__, fw_dump->minidump, fw_dump->minidump_size, (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size)); rval = EINVAL; break; } if ((ha->pci_func & 0x1)) { device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__); rval = ENXIO; break; } fw_dump->saved = 1; if (ha->offline) { if (ha->enable_minidump) ql_minidump(ha); fw_dump->saved = 0; fw_dump->usec_ts = ha->hw.mdump_usec_ts; if (!ha->hw.mdump_done) { device_printf(pci_dev, "%s: port offline minidump failed\n", __func__); rval = ENXIO; break; } } else { #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5) if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) { if (!ha->hw.mdump_done) { fw_dump->saved = 0; QL_INITIATE_RECOVERY(ha); device_printf(pci_dev, "%s: recovery initiated " " to trigger minidump\n", __func__); } QLA_UNLOCK(ha, __func__); } else { device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__); rval = ENXIO; break; } #define QLNX_DUMP_WAIT_SECS 30 count = QLNX_DUMP_WAIT_SECS * 1000; while (count) { if (ha->hw.mdump_done) break; qla_mdelay(__func__, 100); count -= 100; } if (!ha->hw.mdump_done) { device_printf(pci_dev, "%s: port not offline minidump failed\n", __func__); rval = ENXIO; break; } fw_dump->usec_ts = ha->hw.mdump_usec_ts; if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) { ha->hw.mdump_done = 0; QLA_UNLOCK(ha, __func__); } else { device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__); rval = ENXIO; break; } } if ((rval = copyout(ha->hw.mdump_template, fw_dump->minidump, ha->hw.mdump_template_size))) { device_printf(pci_dev, "%s: template copyout failed\n", __func__); rval = ENXIO; break; } if ((rval = copyout(ha->hw.mdump_buffer, ((uint8_t *)fw_dump->minidump + ha->hw.mdump_template_size), ha->hw.mdump_buffer_size))) { device_printf(pci_dev, "%s: minidump copyout failed\n", __func__); rval = ENXIO; } break; case QLA_RD_DRVR_STATE: rval = ql_drvr_state(ha, (qla_driver_state_t *)data); break; case QLA_RD_SLOWPATH_LOG: rval = ql_slowpath_log(ha, (qla_sp_log_t *)data); break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }
/* * Name: qla_issue_cmd * Function: Issues commands on the CDRP interface and returns responses. */ static int qla_issue_cmd(qla_host_t *ha, qla_cdrp_t *cdrp) { int ret = 0; uint32_t signature; uint32_t count = 400; /* 4 seconds or 400 10ms intervals */ uint32_t data; device_t dev; dev = ha->pci_dev; signature = 0xcafe0000 | 0x0100 | ha->pci_func; ret = qla_sem_lock(ha, Q8_SEM5_LOCK, 0, (uint32_t)ha->pci_func); if (ret) { device_printf(dev, "%s: SEM5_LOCK lock failed\n", __func__); return (ret); } WRITE_OFFSET32(ha, Q8_NX_CDRP_SIGNATURE, signature); WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG1, (cdrp->cmd_arg1)); WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG2, (cdrp->cmd_arg2)); WRITE_OFFSET32(ha, Q8_NX_CDRP_ARG3, (cdrp->cmd_arg3)); WRITE_OFFSET32(ha, Q8_NX_CDRP_CMD_RSP, cdrp->cmd); while (count) { qla_mdelay(__func__, 10); data = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); if ((!(data & 0x80000000))) break; count--; } if ((!count) || (data != 1)) ret = -1; cdrp->rsp = READ_REG32(ha, Q8_NX_CDRP_CMD_RSP); cdrp->rsp_arg1 = READ_REG32(ha, Q8_NX_CDRP_ARG1); cdrp->rsp_arg2 = READ_REG32(ha, Q8_NX_CDRP_ARG2); cdrp->rsp_arg3 = READ_REG32(ha, Q8_NX_CDRP_ARG3); qla_sem_unlock(ha, Q8_SEM5_UNLOCK); if (ret) { device_printf(dev, "%s: " "cmd[0x%08x] = 0x%08x\n" "\tsig[0x%08x] = 0x%08x\n" "\targ1[0x%08x] = 0x%08x\n" "\targ2[0x%08x] = 0x%08x\n" "\targ3[0x%08x] = 0x%08x\n", __func__, Q8_NX_CDRP_CMD_RSP, cdrp->cmd, Q8_NX_CDRP_SIGNATURE, signature, Q8_NX_CDRP_ARG1, cdrp->cmd_arg1, Q8_NX_CDRP_ARG2, cdrp->cmd_arg2, Q8_NX_CDRP_ARG3, cdrp->cmd_arg3); device_printf(dev, "%s: exit (ret = 0x%x)\n" "\t\t rsp = 0x%08x\n" "\t\t arg1 = 0x%08x\n" "\t\t arg2 = 0x%08x\n" "\t\t arg3 = 0x%08x\n", __func__, ret, cdrp->rsp, cdrp->rsp_arg1, cdrp->rsp_arg2, cdrp->rsp_arg3); } return (ret); }