int qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; qla_reg_val_t *rv; qla_rd_flash_t *rdf; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; switch(cmd) { case QLA_RDWR_REG: rv = (qla_reg_val_t *)data; if (rv->direct) { if (rv->rd) { rv->val = READ_OFFSET32(ha, rv->reg); } else { WRITE_OFFSET32(ha, rv->reg, rv->val); } } else { if ((rval = qla_rdwr_indreg32(ha, rv->reg, &rv->val, rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: rdf = (qla_rd_flash_t *)data; if ((rval = qla_rd_flash32(ha, rdf->off, &rdf->data))) rval = ENXIO; break; default: break; } return rval; }
int qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_rd_pci_ids_t *pci_ids; device_t pci_dev; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: rv = (qla_reg_val_t *)data; if (rv->direct) { if (rv->rd) { rv->val = READ_OFFSET32(ha, rv->reg); } else { WRITE_OFFSET32(ha, rv->reg, rv->val); } } else { if ((rval = qla_rdwr_indreg32(ha, rv->reg, &rv->val, rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: rdf = (qla_rd_flash_t *)data; if ((rval = qla_rd_flash32(ha, rdf->off, &rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: wrf = (qla_wr_flash_t *)data; if ((rval = qla_wr_flash_buffer(ha, wrf->off, wrf->size, wrf->buffer, wrf->pattern))) rval = ENXIO; break; case QLA_ERASE_FLASH: if (qla_erase_flash(ha, ((qla_erase_flash_t *)data)->off, ((qla_erase_flash_t *)data)->size)) rval = ENXIO; break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }