void qib_bad_intrstatus(struct qib_devdata *dd) { static int allbits; /* separate routine, for better optimization of qib_intr() */ /* * We print the message and disable interrupts, in hope of * having a better chance of debugging the problem. */ qib_dev_err(dd, "Read of chip interrupt status failed disabling interrupts\n"); if (allbits++) { /* disable interrupt delivery, something is very wrong */ if (allbits == 2) dd->f_set_intr_state(dd, 0); if (allbits == 3) { qib_dev_err(dd, "2nd bad interrupt status, unregistering interrupts\n"); dd->flags |= QIB_BADINTR; dd->flags &= ~QIB_INITTED; dd->f_free_irq(dd); } } }
/** * We save the msi lo and hi values, so we can restore them after * chip reset (the kernel PCI infrastructure doesn't yet handle that * correctly. */ static int qib_msi_setup(struct qib_devdata *dd, int pos) { struct pci_dev *pdev = dd->pcidev; u16 control; int ret; ret = pci_enable_msi(pdev); if (ret) qib_dev_err(dd, "pci_enable_msi failed: %d, " "interrupts may not work\n", ret); /* continue even if it fails, we may still be OK... */ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo); pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi); pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); /* now save the data (vector) info */ pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), &dd->msi_data); qib_cdbg(VERBOSE, "Read msi data 0x%x from config offset " "0x%x, control=0x%x\n", dd->msi_data, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), control); return ret; }
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, struct msix_entry *msix_entry) { int ret; u32 tabsize = 0; u16 msix_flags; pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); if (tabsize > *msixcnt) tabsize = *msixcnt; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); if (ret > 0) { tabsize = ret; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); } if (ret) { qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " "falling back to INTx\n", tabsize, ret); tabsize = 0; } *msixcnt = tabsize; if (ret) qib_enable_intx(dd->pcidev); }
/* * Setup pcie interrupt stuff again after a reset. I'd like to just call * pci_enable_msi() again for msi, but when I do that, * the MSI enable bit doesn't get set in the command word, and * we switch to to a different interrupt vector, which is confusing, * so I instead just do it all inline. Perhaps somehow can tie this * into the PCIe hotplug support at some point */ int qib_reinit_intr(struct qib_devdata *dd) { int pos; u16 control; int ret = 0; if (!dd->msi_lo) /* we weren't using MSI, so don't restore it */ goto bail; #ifdef CONFIG_PCI_MSI pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); if (!pos) { qib_dev_err(dd, "Can't find MSI capability, " "can't restore MSI settings\n"); ret = 0; /* nothing special for MSIx, just MSI */ goto bail; } qib_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", dd->msi_lo, pos + PCI_MSI_ADDRESS_LO); pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, dd->msi_lo); qib_cdbg(VERBOSE, "Writing msi_lo 0x%x to config offset 0x%x\n", dd->msi_hi, pos + PCI_MSI_ADDRESS_HI); pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, dd->msi_hi); pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); if (!(control & PCI_MSI_FLAGS_ENABLE)) { qib_cdbg(INIT, "MSI control at off %x was %x, " "setting MSI enable (%x)\n", pos + PCI_MSI_FLAGS, control, control | PCI_MSI_FLAGS_ENABLE); control |= PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, control); } /* now rewrite the data (vector) info */ pci_write_config_word(dd->pcidev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), dd->msi_data); ret = 1; #endif /* CONFIG_PCI_MSI */ bail: if (!ret && (dd->flags & QIB_HAS_INTX)) { qib_cdbg(INIT, "Using INTx, MSI disabled or not configured\n"); qib_enable_intx(dd->pcidev); ret = 1; } /* and now set the pci master bit again */ pci_set_master(dd->pcidev); return ret; }
void qib_bad_intrstatus(struct qib_devdata *dd) { static int allbits; qib_dev_err(dd, "Read of chip interrupt status failed" " disabling interrupts\n"); if (allbits++) { if (allbits == 2) dd->f_set_intr_state(dd, 0); if (allbits == 3) { qib_dev_err(dd, "2nd bad interrupt status, " "unregistering interrupts\n"); dd->flags |= QIB_BADINTR; dd->flags &= ~QIB_INITTED; dd->f_free_irq(dd); } } }
static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, size_t count) { struct qib_devdata *dd = ppd->dd; int ret; u16 val; ret = qib_parse_ushort(buf, &val); if (ret > 0) qib_set_led_override(ppd, val); else qib_dev_err(dd, "attempt to set invalid LED override\n"); return ret < 0 ? ret : count; }
static ssize_t flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct qib_devdata *dd; ssize_t ret; loff_t pos; char *tmp; pos = *ppos; if (pos < 0) { ret = -EINVAL; goto bail; } if (pos >= sizeof(struct qib_flash)) { ret = 0; goto bail; } if (count > sizeof(struct qib_flash) - pos) count = sizeof(struct qib_flash) - pos; tmp = kmalloc(count, GFP_KERNEL); if (!tmp) { ret = -ENOMEM; goto bail; } dd = private2dd(file); if (qib_eeprom_read(dd, pos, tmp, count)) { qib_dev_err(dd, "failed to read from flash\n"); ret = -ENXIO; goto bail_tmp; } if (copy_to_user(buf, tmp, count)) { ret = -EFAULT; goto bail_tmp; } *ppos = pos + count; ret = count; bail_tmp: kfree(tmp); bail: return ret; }
/* * Actually update the eeprom, first doing write enable if * needed, then restoring write enable state. * Must be called with eep_lock held */ static int eeprom_write_with_enable(struct qib_devdata *dd, u8 offset, const void *buf, int len) { int ret, pwen; pwen = dd->f_eeprom_wen(dd, 1); ret = qib_twsi_reset(dd); if (ret) qib_dev_err(dd, "EEPROM Reset for write failed\n"); else ret = qib_twsi_blk_wr(dd, dd->twsi_eeprom_dev, offset, buf, len); dd->f_eeprom_wen(dd, pwen); return ret; }
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, struct qib_msix_entry *qib_msix_entry) { int ret; int nvec = *msixcnt; struct msix_entry *msix_entry; int i; ret = pci_msix_vec_count(dd->pcidev); if (ret < 0) goto do_intx; nvec = min(nvec, ret); /* We can't pass qib_msix_entry array to qib_msix_setup * so use a dummy msix_entry array and copy the allocated * irq back to the qib_msix_entry array. */ msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL); if (!msix_entry) goto do_intx; for (i = 0; i < nvec; i++) msix_entry[i] = qib_msix_entry[i].msix; ret = pci_enable_msix_range(dd->pcidev, msix_entry, 1, nvec); if (ret < 0) goto free_msix_entry; else nvec = ret; for (i = 0; i < nvec; i++) qib_msix_entry[i].msix = msix_entry[i]; kfree(msix_entry); *msixcnt = nvec; return; free_msix_entry: kfree(msix_entry); do_intx: qib_dev_err( dd, "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n", nvec, ret); *msixcnt = 0; qib_enable_intx(dd->pcidev); }
static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, size_t count) { struct qib_devdata *dd = ppd->dd; int ret; u16 val; ret = kstrtou16(buf, 0, &val); if (ret) { qib_dev_err(dd, "attempt to set invalid LED override\n"); return ret; } qib_set_led_override(ppd, val); return count; }
static ssize_t flash_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct qib_devdata *dd; ssize_t ret; loff_t pos; char *tmp; pos = *ppos; if (pos != 0) { ret = -EINVAL; goto bail; } if (count != sizeof(struct qib_flash)) { ret = -EINVAL; goto bail; } tmp = kmalloc(count, GFP_KERNEL); if (!tmp) { ret = -ENOMEM; goto bail; } if (copy_from_user(tmp, buf, count)) { ret = -EFAULT; goto bail_tmp; } dd = private2dd(file); if (qib_eeprom_write(dd, pos, tmp, count)) { ret = -ENXIO; qib_dev_err(dd, "failed to write to flash\n"); goto bail_tmp; } *ppos = pos + count; ret = count; bail_tmp: kfree(tmp); bail: return ret; }
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, struct qib_msix_entry *qib_msix_entry) { int ret; u32 tabsize = 0; u16 msix_flags; struct msix_entry *msix_entry; int i; /* We can't pass qib_msix_entry array to qib_msix_setup * so use a dummy msix_entry array and copy the allocated * irq back to the qib_msix_entry array. */ msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); if (!msix_entry) { ret = -ENOMEM; goto do_intx; } for (i = 0; i < *msixcnt; i++) msix_entry[i] = qib_msix_entry[i].msix; pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); if (tabsize > *msixcnt) tabsize = *msixcnt; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); if (ret > 0) { tabsize = ret; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); } do_intx: if (ret) { qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, falling back to INTx\n", tabsize, ret); tabsize = 0; } for (i = 0; i < tabsize; i++) qib_msix_entry[i].msix = msix_entry[i]; kfree(msix_entry); *msixcnt = tabsize; if (ret) qib_enable_intx(dd->pcidev); }
static int qib_resync_ibepb(struct qib_devdata *dd) { int ret, pat, tries, chn; u32 loc; ret = -1; chn = 0; for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) { loc = IB_PGUDP(chn); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); if (ret < 0) { qib_dev_err(dd, "Failed read in resync\n"); continue; } if (ret != 0xF0 && ret != 0x55 && tries == 0) qib_dev_err(dd, "unexpected pattern in resync\n"); pat = ret ^ 0xA5; /* alternate F0 and 55 */ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF); if (ret < 0) { qib_dev_err(dd, "Failed write in resync\n"); continue; } ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); if (ret < 0) { qib_dev_err(dd, "Failed re-read in resync\n"); continue; } if (ret != pat) { qib_dev_err(dd, "Failed compare1 in resync\n"); continue; } loc = IB_CMUDONE(chn); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0); if (ret < 0) { qib_dev_err(dd, "Failed CMUDONE rd in resync\n"); continue; } if ((ret & 0x70) != ((chn << 4) | 0x40)) { qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n", ret, chn); continue; } if (++chn == 4) break; /* Success */ } return (ret > 0) ? 0 : ret; }
/** * qib_eeprom_read - receives bytes from the eeprom via I2C * @dd: the qlogic_ib device * @eeprom_offset: address to read from * @buffer: where to store result * @len: number of bytes to receive */ int qib_eeprom_read(struct qib_devdata *dd, u8 eeprom_offset, void *buff, int len) { int ret; ret = mutex_lock_interruptible(&dd->eep_lock); if (!ret) { ret = qib_twsi_reset(dd); if (ret) qib_dev_err(dd, "EEPROM Reset for read failed\n"); else ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, eeprom_offset, buff, len); mutex_unlock(&dd->eep_lock); } return ret; }
int qib_reinit_intr(struct qib_devdata *dd) { int pos; u16 control; int ret = 0; /* */ if (!dd->msi_lo) goto bail; pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); if (!pos) { qib_dev_err(dd, "Can't find MSI capability, " "can't restore MSI settings\n"); ret = 0; /* */ goto bail; } pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, dd->msi_lo); pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, dd->msi_hi); pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); if (!(control & PCI_MSI_FLAGS_ENABLE)) { control |= PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, control); } /* */ pci_write_config_word(dd->pcidev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), dd->msi_data); ret = 1; bail: if (!ret && (dd->flags & QIB_HAS_INTX)) { qib_enable_intx(dd->pcidev); ret = 1; } /* */ pci_set_master(dd->pcidev); return ret; }
/* * Setup pcie interrupt stuff again after a reset. I'd like to just call * pci_enable_msi() again for msi, but when I do that, * the MSI enable bit doesn't get set in the command word, and * we switch to to a different interrupt vector, which is confusing, * so I instead just do it all inline. Perhaps somehow can tie this * into the PCIe hotplug support at some point */ int qib_reinit_intr(struct qib_devdata *dd) { int pos; u16 control; int ret = 0; /* If we aren't using MSI, don't restore it */ if (!dd->msi_lo) goto bail; pos = dd->pcidev->msi_cap; if (!pos) { qib_dev_err(dd, "Can't find MSI capability, can't restore MSI settings\n"); ret = 0; /* nothing special for MSIx, just MSI */ goto bail; } pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO, dd->msi_lo); pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI, dd->msi_hi); pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control); if (!(control & PCI_MSI_FLAGS_ENABLE)) { control |= PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, control); } /* now rewrite the data (vector) info */ pci_write_config_word(dd->pcidev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), dd->msi_data); ret = 1; bail: if (!ret && (dd->flags & QIB_HAS_INTX)) { qib_enable_intx(dd); ret = 1; } /* and now set the pci master bit again */ pci_set_master(dd->pcidev); return ret; }
static int qib_allocate_irqs(struct qib_devdata *dd, u32 maxvec) { unsigned int flags = PCI_IRQ_LEGACY; /* Check our capabilities */ if (dd->pcidev->msix_cap) { flags |= PCI_IRQ_MSIX; } else { if (dd->pcidev->msi_cap) { flags |= PCI_IRQ_MSI; /* Get msi_lo and msi_hi */ qib_msi_setup(dd, dd->pcidev->msi_cap); } } if (!(flags & (PCI_IRQ_MSIX | PCI_IRQ_MSI))) qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); return pci_alloc_irq_vectors(dd->pcidev, 1, maxvec, flags); }
static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, size_t count) { struct qib_devdata *dd = ppd->dd; int ret; u16 val; ret = qib_parse_ushort(buf, &val); /* * Set the "intentional" heartbeat enable per either of * "Enable" and "Auto", as these are normally set together. * This bit is consulted when leaving loopback mode, * because entering loopback mode overrides it and automatically * disables heartbeat. */ if (ret >= 0) ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); if (ret < 0) qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); return ret < 0 ? ret : count; }
void qib_sd7220_clr_ibpar(struct qib_devdata *dd) { int ret; /* clear, then re-enable parity errs */ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, UC_PAR_CLR_D, UC_PAR_CLR_M); if (ret < 0) { qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n"); goto bail; } ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0, UC_PAR_CLR_M); qib_read_kreg32(dd, kr_scratch); udelay(4); qib_write_kreg(dd, kr_hwerrclear, QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); qib_read_kreg32(dd, kr_scratch); bail: return; }
static int qib_msi_setup(struct qib_devdata *dd, int pos) { struct pci_dev *pdev = dd->pcidev; u16 control; int ret; ret = pci_enable_msi(pdev); if (ret) qib_dev_err(dd, "pci_enable_msi failed: %d, " "interrupts may not work\n", ret); /* */ pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO, &dd->msi_lo); pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI, &dd->msi_hi); pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control); /* */ pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8), &dd->msi_data); return ret; }
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) { u16 linkstat, speed; int nvec; int maxvec; int ret = 0; if (!pci_is_pcie(dd->pcidev)) { qib_dev_err(dd, "Can't find PCI Express capability!\n"); /* set up something... */ dd->lbus_width = 1; dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ ret = -1; goto bail; } maxvec = (nent && *nent) ? *nent : 1; nvec = qib_allocate_irqs(dd, maxvec); if (nvec < 0) { ret = nvec; goto bail; } /* * If nent exists, make sure to record how many vectors were allocated */ if (nent) { *nent = nvec; /* * If we requested (nent) MSIX, but msix_enabled is not set, * pci_alloc_irq_vectors() enabled INTx. */ if (!dd->pcidev->msix_enabled) qib_dev_err(dd, "no msix vectors allocated, using INTx\n"); } pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat); /* * speed is bits 0-3, linkwidth is bits 4-8 * no defines for them in headers */ speed = linkstat & 0xf; linkstat >>= 4; linkstat &= 0x1f; dd->lbus_width = linkstat; switch (speed) { case 1: dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ break; case 2: dd->lbus_speed = 5000; /* Gen1, 5GHz */ break; default: /* not defined, assume gen1 */ dd->lbus_speed = 2500; break; } /* * Check against expected pcie width and complain if "wrong" * on first initialization, not afterwards (i.e., reset). */ if (minw && linkstat < minw) qib_dev_err(dd, "PCIe width %u (x%u HCA), performance reduced\n", linkstat, minw); qib_tune_pcie_caps(dd); qib_tune_pcie_coalesce(dd); bail: /* fill in string, even on errors */ snprintf(dd->lbus_info, sizeof(dd->lbus_info), "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width); return ret; }
static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) { struct qib_devdata *dd = ppd->dd; u32 out, mask; int ret, cnt, pass = 0; int stuck = 0; u8 *buff = bp; qib_cdbg(VERBOSE, "Grabbing Mutex for QSFP in %d:%d\n", dd->unit, ppd->port); ret = mutex_lock_interruptible(&dd->eep_lock); if (ret) goto no_unlock; if (dd->twsi_eeprom_dev == QIB_TWSI_NO_DEV) { qib_dbg("QSFP read on board without QSFP\n"); ret = -ENXIO; goto bail; } /* * We presume, if we are called at all, that this board has * QSFP. This is on the same i2c chain as the legacy parts, * but only responds if the module is selected via GPIO pins. * Further, there are very long setup and hold requirements * on MODSEL. */ mask = QSFP_GPIO_MOD_SEL_N | QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; out = QSFP_GPIO_MOD_RST_N | QSFP_GPIO_LP_MODE; if (ppd->hw_pidx) { mask <<= QSFP_GPIO_PORT2_SHIFT; out <<= QSFP_GPIO_PORT2_SHIFT; } dd->f_gpio_mod(dd, out, mask, mask); /* * Module could take up to 2 Msec to respond to MOD_SEL, and there * is no way to tell if it is ready, so we must wait. */ msleep(2); /* Make sure TWSI bus is in sane state. */ ret = qib_twsi_reset(dd); if (ret) { qib_dev_porterr(dd, ppd->port, "QSFP interface Reset for read failed\n"); ret = -EIO; stuck = 1; goto deselect; } /* All QSFP modules are at A0 */ cnt = 0; while (cnt < len) { unsigned in_page; int wlen = len - cnt; in_page = addr % QSFP_PAGESIZE; if ((in_page + wlen) > QSFP_PAGESIZE) wlen = QSFP_PAGESIZE - in_page; ret = qib_twsi_blk_rd(dd, QSFP_DEV, addr, buff + cnt, wlen); /* Some QSFP's fail first try. Retry as experiment */ if (ret && cnt == 0 && ++pass < QSFP_MAX_RETRY) continue; if (ret) { /* qib_twsi_blk_rd() 1 for error, else 0 */ ret = -EIO; goto deselect; } addr += wlen; cnt += wlen; } ret = cnt; deselect: /* * Module could take up to 10 uSec after transfer before * ready to respond to MOD_SEL negation, and there is no way * to tell if it is ready, so we must wait. */ udelay(10); /* set QSFP MODSEL, RST. LP all high */ dd->f_gpio_mod(dd, mask, mask, mask); /* * Module could take up to 2 Msec to respond to MOD_SEL * going away, and there is no way to tell if it is ready. * so we must wait. */ if (stuck) qib_dev_err(dd, "QSFP interface bus stuck non-idle\n"); if (pass >= QSFP_MAX_RETRY && ret) qib_dev_porterr(dd, ppd->port, "QSFP failed even retrying\n"); else if (pass) qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); msleep(2); bail: mutex_unlock(&dd->eep_lock); qib_cdbg(VERBOSE, "Released Mutex for QSFP %d:%d, ret %d\n", dd->unit, ppd->port, ret); no_unlock: return ret; }
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, struct qib_msix_entry *entry) { u16 linkstat, speed; int pos = 0, pose, ret = 1; pose = pci_pcie_cap(dd->pcidev); if (!pose) { qib_dev_err(dd, "Can't find PCI Express capability!\n"); /* */ dd->lbus_width = 1; dd->lbus_speed = 2500; /* */ goto bail; } pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX); if (nent && *nent && pos) { qib_msix_setup(dd, pos, nent, entry); ret = 0; /* */ } else { pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI); if (pos) ret = qib_msi_setup(dd, pos); else qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); } if (!pos) qib_enable_intx(dd->pcidev); pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat); /* */ speed = linkstat & 0xf; linkstat >>= 4; linkstat &= 0x1f; dd->lbus_width = linkstat; switch (speed) { case 1: dd->lbus_speed = 2500; /* */ break; case 2: dd->lbus_speed = 5000; /* */ break; default: /* */ dd->lbus_speed = 2500; break; } /* */ if (minw && linkstat < minw) qib_dev_err(dd, "PCIe width %u (x%u HCA), performance reduced\n", linkstat, minw); qib_tune_pcie_caps(dd); qib_tune_pcie_coalesce(dd); bail: /* */ snprintf(dd->lbus_info, sizeof(dd->lbus_info), "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width); return ret; }
/* * Localize the stuff that should be done to change IB uC reset * returns <0 for errors. */ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) { u64 rst_val; int ret = 0; unsigned long flags; rst_val = qib_read_kreg64(dd, kr_ibserdesctrl); if (assert_rst) { /* * Vendor recommends "interrupting" uC before reset, to * minimize possible glitches. */ spin_lock_irqsave(&dd->cspec->sdepb_lock, flags); epb_access(dd, IB_7220_SERDES, 1); rst_val |= 1ULL; /* Squelch possible parity error from _asserting_ reset */ qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask & ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); qib_write_kreg(dd, kr_ibserdesctrl, rst_val); /* flush write, delay to ensure it took effect */ qib_read_kreg32(dd, kr_scratch); udelay(2); /* once it's reset, can remove interrupt */ epb_access(dd, IB_7220_SERDES, -1); spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags); } else { /* * Before we de-assert reset, we need to deal with * possible glitch on the Parity-error line. * Suppress it around the reset, both in chip-level * hwerrmask and in IB uC control reg. uC will allow * it again during startup. */ u64 val; rst_val &= ~(1ULL); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask & ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR); ret = qib_resync_ibepb(dd); if (ret < 0) qib_dev_err(dd, "unable to re-sync IB EPB\n"); /* set uC control regs to suppress parity errs */ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1); if (ret < 0) goto bail; /* IB uC code past Version 1.32.17 allow suppression of wdog */ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); if (ret < 0) { qib_dev_err(dd, "Failed to set WDOG disable\n"); goto bail; } qib_write_kreg(dd, kr_ibserdesctrl, rst_val); /* flush write, delay for startup */ qib_read_kreg32(dd, kr_scratch); udelay(1); /* clear, then re-enable parity errs */ qib_sd7220_clr_ibpar(dd); val = qib_read_kreg64(dd, kr_hwerrstatus); if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) { qib_dev_err(dd, "IBUC Parity still set after RST\n"); dd->cspec->hwerrmask &= ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR; } qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask); } bail: return ret; }
static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where) { int ret, chn, baduns; u64 val; if (!where) where = "?"; /* give time for reset to settle out in EPB */ udelay(2); ret = qib_resync_ibepb(dd); if (ret < 0) qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where); /* Do "sacrificial read" to get EPB in sane state after reset */ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0); if (ret < 0) qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where); /* Check/show "summary" Trim-done bit in IBCStatus */ val = qib_read_kreg64(dd, kr_ibcstatus); if (!(val & (1ULL << 11))) qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where); /* * Do "dummy read/mod/wr" to get EPB in sane state after reset * The default value for MPREG6 is 0. */ udelay(2); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80); if (ret < 0) qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where); udelay(10); baduns = 0; for (chn = 3; chn >= 0; --chn) { /* Read CTRL reg for each channel to check TRIMDONE */ ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0, 0); if (ret < 0) qib_dev_err(dd, "Failed checking TRIMDONE, chn %d (%s)\n", chn, where); if (!(ret & 0x10)) { int probe; baduns |= (1 << chn); qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X). (%s)\n", chn, ret, where); probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_PGUDP(0), 0, 0); qib_dev_err(dd, "probe is %d (%02X)\n", probe, probe); probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0, 0); qib_dev_err(dd, "re-read: %d (%02X)\n", probe, probe); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); if (ret < 0) qib_dev_err(dd, "Err on TRIMDONE rewrite1\n"); } } for (chn = 3; chn >= 0; --chn) { /* Read CTRL reg for each channel to check TRIMDONE */ if (baduns & (1 << chn)) { qib_dev_err(dd, "Resetting TRIMDONE on chn %d (%s)\n", chn, where); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); if (ret < 0) qib_dev_err(dd, "Failed re-setting TRIMDONE, chn %d (%s)\n", chn, where); } } }
/* * Below is portion of IBA7220-specific bringup_serdes() that actually * deals with registers and memory within the SerDes itself. * Post IB uC code version 1.32.17, was_reset being 1 is not really * informative, so we double-check. */ int qib_sd7220_init(struct qib_devdata *dd) { const struct firmware *fw; int ret = 1; /* default to failure */ int first_reset, was_reset; /* SERDES MPU reset recorded in D0 */ was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1); if (!was_reset) { /* entered with reset not asserted, we need to do it */ qib_ibsd_reset(dd, 1); qib_sd_trimdone_monitor(dd, "Driver-reload"); } ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev); if (ret) goto done; /* Substitute our deduced value for was_reset */ ret = qib_ibsd_ucode_loaded(dd->pport, fw); if (ret < 0) goto bail; first_reset = !ret; /* First reset if IBSD uCode not yet loaded */ /* * Alter some regs per vendor latest doc, reset-defaults * are not right for IB. */ ret = qib_sd_early(dd); if (ret < 0) { qib_dev_err(dd, "Failed to set IB SERDES early defaults\n"); goto bail; } /* * Set DAC manual trim IB. * We only do this once after chip has been reset (usually * same as once per system boot). */ if (first_reset) { ret = qib_sd_dactrim(dd); if (ret < 0) { qib_dev_err(dd, "Failed IB SERDES DAC trim\n"); goto bail; } } /* * Set various registers (DDS and RXEQ) that will be * controlled by IBC (in 1.2 mode) to reasonable preset values * Calling the "internal" version avoids the "check for needed" * and "trimdone monitor" that might be counter-productive. */ ret = qib_internal_presets(dd); if (ret < 0) { qib_dev_err(dd, "Failed to set IB SERDES presets\n"); goto bail; } ret = qib_sd_trimself(dd, 0x80); if (ret < 0) { qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n"); goto bail; } /* Load image, then try to verify */ ret = 0; /* Assume success */ if (first_reset) { int vfy; int trim_done; ret = qib_sd7220_ib_load(dd, fw); if (ret < 0) { qib_dev_err(dd, "Failed to load IB SERDES image\n"); goto bail; } else { /* Loaded image, try to verify */ vfy = qib_sd7220_ib_vfy(dd, fw); if (vfy != ret) { qib_dev_err(dd, "SERDES PRAM VFY failed\n"); goto bail; } /* end if verified */ } /* end if loaded */ /* * Loaded and verified. Almost good... * hold "success" in ret */ ret = 0; /* * Prev steps all worked, continue bringup * De-assert RESET to uC, only in first reset, to allow * trimming. * * Since our default setup sets START_EQ1 to * PRESET, we need to clear that for this very first run. */ ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38); if (ret < 0) { qib_dev_err(dd, "Failed clearing START_EQ1\n"); goto bail; } qib_ibsd_reset(dd, 0); /* * If this is not the first reset, trimdone should be set * already. We may need to check about this. */ trim_done = qib_sd_trimdone_poll(dd); /* * Whether or not trimdone succeeded, we need to put the * uC back into reset to avoid a possible fight with the * IBC state-machine. */ qib_ibsd_reset(dd, 1); if (!trim_done) { qib_dev_err(dd, "No TRIMDONE seen\n"); goto bail; } /* * DEBUG: check each time we reset if trimdone bits have * gotten cleared, and re-set them. */ qib_sd_trimdone_monitor(dd, "First-reset"); /* Remember so we do not re-do the load, dactrim, etc. */ dd->cspec->serdes_first_init_done = 1; } /* * setup for channel training and load values for * RxEq and DDS in tables used by IBC in IB1.2 mode */ ret = 0; if (qib_sd_setvals(dd) >= 0) goto done; bail: ret = 1; done: /* start relock timer regardless, but start at 1 second */ set_7220_relock_poll(dd, -1); release_firmware(fw); return ret; }
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, struct qib_msix_entry *entry) { u16 linkstat, speed; int pos = 0, ret = 1; if (!pci_is_pcie(dd->pcidev)) { qib_dev_err(dd, "Can't find PCI Express capability!\n"); /* set up something... */ dd->lbus_width = 1; dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ goto bail; } pos = dd->pcidev->msix_cap; if (nent && *nent && pos) { qib_msix_setup(dd, pos, nent, entry); ret = 0; /* did it, either MSIx or INTx */ } else { pos = dd->pcidev->msi_cap; if (pos) ret = qib_msi_setup(dd, pos); else qib_dev_err(dd, "No PCI MSI or MSIx capability!\n"); } if (!pos) qib_enable_intx(dd->pcidev); pcie_capability_read_word(dd->pcidev, PCI_EXP_LNKSTA, &linkstat); /* * speed is bits 0-3, linkwidth is bits 4-8 * no defines for them in headers */ speed = linkstat & 0xf; linkstat >>= 4; linkstat &= 0x1f; dd->lbus_width = linkstat; switch (speed) { case 1: dd->lbus_speed = 2500; /* Gen1, 2.5GHz */ break; case 2: dd->lbus_speed = 5000; /* Gen1, 5GHz */ break; default: /* not defined, assume gen1 */ dd->lbus_speed = 2500; break; } /* * Check against expected pcie width and complain if "wrong" * on first initialization, not afterwards (i.e., reset). */ if (minw && linkstat < minw) qib_dev_err(dd, "PCIe width %u (x%u HCA), performance reduced\n", linkstat, minw); qib_tune_pcie_caps(dd); qib_tune_pcie_coalesce(dd); bail: /* fill in string, even on errors */ snprintf(dd->lbus_info, sizeof(dd->lbus_info), "PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width); return ret; }
/** * qib_get_eeprom_info- get the GUID et al. from the TSWI EEPROM device * @dd: the qlogic_ib device * * We have the capability to use the nguid field, and get * the guid from the first chip's flash, to use for all of them. */ void qib_get_eeprom_info(struct qib_devdata *dd) { void *buf; struct qib_flash *ifp; __be64 guid; int len, eep_stat; u8 csum, *bguid; int t = dd->unit; struct qib_devdata *dd0 = qib_lookup(0); if (t && dd0->nguid > 1 && t <= dd0->nguid) { u8 oguid; dd->base_guid = dd0->base_guid; bguid = (u8 *) &dd->base_guid; oguid = bguid[7]; bguid[7] += t; if (oguid > bguid[7]) { if (bguid[6] == 0xff) { if (bguid[5] == 0xff) { qib_dev_err(dd, "Can't set %s GUID" " from base, wraps to" " OUI!\n", qib_get_unit_name(t)); dd->base_guid = 0; goto bail; } bguid[5]++; } bguid[6]++; } dd->nguid = 1; goto bail; } /* * Read full flash, not just currently used part, since it may have * been written with a newer definition. * */ len = sizeof(struct qib_flash); buf = vmalloc(len); if (!buf) { qib_dev_err(dd, "Couldn't allocate memory to read %u " "bytes from eeprom for GUID\n", len); goto bail; } /* * Use "public" eeprom read function, which does locking and * figures out device. This will migrate to chip-specific. */ eep_stat = qib_eeprom_read(dd, 0, buf, len); if (eep_stat) { qib_dev_err(dd, "Failed reading GUID from eeprom\n"); goto done; } ifp = (struct qib_flash *)buf; csum = flash_csum(ifp, 0); if (csum != ifp->if_csum) { qib_devinfo(dd->pcidev, "Bad I2C flash checksum: " "0x%x, not 0x%x\n", csum, ifp->if_csum); goto done; } if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) || *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) { qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n", *(unsigned long long *) ifp->if_guid); /* don't allow GUID if all 0 or all 1's */ goto done; } /* complain, but allow it */ if (*(u64 *) ifp->if_guid == 0x100007511000000ULL) qib_devinfo(dd->pcidev, "Warning, GUID %llx is " "default, probably not correct!\n", *(unsigned long long *) ifp->if_guid); bguid = ifp->if_guid; if (!bguid[0] && !bguid[1] && !bguid[2]) { /* * Original incorrect GUID format in flash; fix in * core copy, by shifting up 2 octets; don't need to * change top octet, since both it and shifted are 0. */ bguid[1] = bguid[3]; bguid[2] = bguid[4]; bguid[3] = 0; bguid[4] = 0; guid = *(__be64 *) ifp->if_guid; } else guid = *(__be64 *) ifp->if_guid; dd->base_guid = guid; dd->nguid = ifp->if_numguid; /* * Things are slightly complicated by the desire to transparently * support both the Pathscale 10-digit serial number and the QLogic * 13-character version. */ if ((ifp->if_fversion > 1) && ifp->if_sprefix[0] && ((u8 *) ifp->if_sprefix)[0] != 0xFF) { char *snp = dd->serial; /* * This board has a Serial-prefix, which is stored * elsewhere for backward-compatibility. */ memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); snp[sizeof ifp->if_sprefix] = '\0'; len = strlen(snp); snp += len; len = (sizeof dd->serial) - len; if (len > sizeof ifp->if_serial) len = sizeof ifp->if_serial; memcpy(snp, ifp->if_serial, len); } else memcpy(dd->serial, ifp->if_serial, sizeof ifp->if_serial); if (!strstr(ifp->if_comment, "Tested successfully")) qib_dev_err(dd, "Board SN %s did not pass functional " "test: %s\n", dd->serial, ifp->if_comment); memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); /* * Power-on (actually "active") hours are kept as little-endian value * in EEPROM, but as seconds in a (possibly as small as 24-bit) * atomic_t while running. */ atomic_set(&dd->active_time, 0); dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); done: vfree(buf); bail:; }
int qib_create_port_files(struct ib_device *ibdev, u8 port_num, struct kobject *kobj) { struct qib_pportdata *ppd; struct qib_devdata *dd = dd_from_ibdev(ibdev); int ret; if (!port_num || port_num > dd->num_pports) { qib_dev_err(dd, "Skipping infiniband class with invalid port %u\n", port_num); ret = -ENODEV; goto bail; } ppd = &dd->pport[port_num - 1]; ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, "linkcontrol"); if (ret) { qib_dev_err(dd, "Skipping linkcontrol sysfs info, (err %d) port %u\n", ret, port_num); goto bail; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, "sl2vl"); if (ret) { qib_dev_err(dd, "Skipping sl2vl sysfs info, (err %d) port %u\n", ret, port_num); goto bail_link; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, "diag_counters"); if (ret) { qib_dev_err(dd, "Skipping diag_counters sysfs info, (err %d) port %u\n", ret, port_num); goto bail_sl; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); if (!qib_cc_table_size || !ppd->congestion_entries_shadow) return 0; ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype, kobj, "CCMgtA"); if (ret) { qib_dev_err(dd, "Skipping Congestion Control sysfs info, (err %d) port %u\n", ret, port_num); goto bail_diagc; } kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD); ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr); if (ret) { qib_dev_err(dd, "Skipping Congestion Control setting sysfs info, (err %d) port %u\n", ret, port_num); goto bail_cc; } ret = sysfs_create_bin_file(&ppd->pport_cc_kobj, &cc_table_bin_attr); if (ret) { qib_dev_err(dd, "Skipping Congestion Control table sysfs info, (err %d) port %u\n", ret, port_num); goto bail_cc_entry_bin; } qib_devinfo(dd->pcidev, "IB%u: Congestion Control Agent enabled for port %d\n", dd->unit, port_num); return 0; bail_cc_entry_bin: sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr); bail_cc: kobject_put(&ppd->pport_cc_kobj); bail_diagc: kobject_put(&ppd->diagc_kobj); bail_sl: kobject_put(&ppd->sl2vl_kobj); bail_link: kobject_put(&ppd->pport_kobj); bail: return ret; }
/** * qib_update_eeprom_log - copy active-time and error counters to eeprom * @dd: the qlogic_ib device * * Although the time is kept as seconds in the qib_devdata struct, it is * rounded to hours for re-write, as we have only 16 bits in EEPROM. * First-cut code reads whole (expected) struct qib_flash, modifies, * re-writes. Future direction: read/write only what we need, assuming * that the EEPROM had to have been "good enough" for driver init, and * if not, we aren't making it worse. * */ int qib_update_eeprom_log(struct qib_devdata *dd) { void *buf; struct qib_flash *ifp; int len, hi_water; uint32_t new_time, new_hrs; u8 csum; int ret, idx; unsigned long flags; /* first, check if we actually need to do anything. */ ret = 0; for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { if (dd->eep_st_new_errs[idx]) { ret = 1; break; } } new_time = atomic_read(&dd->active_time); if (ret == 0 && new_time < 3600) goto bail; /* * The quick-check above determined that there is something worthy * of logging, so get current contents and do a more detailed idea. * read full flash, not just currently used part, since it may have * been written with a newer definition */ len = sizeof(struct qib_flash); buf = vmalloc(len); ret = 1; if (!buf) { qib_dev_err(dd, "Couldn't allocate memory to read %u " "bytes from eeprom for logging\n", len); goto bail; } /* Grab semaphore and read current EEPROM. If we get an * error, let go, but if not, keep it until we finish write. */ ret = mutex_lock_interruptible(&dd->eep_lock); if (ret) { qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); goto free_bail; } ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); if (ret) { mutex_unlock(&dd->eep_lock); qib_dev_err(dd, "Unable read EEPROM for logging\n"); goto free_bail; } ifp = (struct qib_flash *)buf; csum = flash_csum(ifp, 0); if (csum != ifp->if_csum) { mutex_unlock(&dd->eep_lock); qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", csum, ifp->if_csum); ret = 1; goto free_bail; } hi_water = 0; spin_lock_irqsave(&dd->eep_st_lock, flags); for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { int new_val = dd->eep_st_new_errs[idx]; if (new_val) { /* * If we have seen any errors, add to EEPROM values * We need to saturate at 0xFF (255) and we also * would need to adjust the checksum if we were * trying to minimize EEPROM traffic * Note that we add to actual current count in EEPROM, * in case it was altered while we were running. */ new_val += ifp->if_errcntp[idx]; if (new_val > 0xFF) new_val = 0xFF; if (ifp->if_errcntp[idx] != new_val) { ifp->if_errcntp[idx] = new_val; hi_water = offsetof(struct qib_flash, if_errcntp) + idx; } /* * update our shadow (used to minimize EEPROM * traffic), to match what we are about to write. */ dd->eep_st_errs[idx] = new_val; dd->eep_st_new_errs[idx] = 0; } }