static int auvia_reset_codec(void *addr) { struct auvia_softc *sc; pcireg_t r; int i; /* perform a codec cold reset */ sc = addr; r = pci_conf_read(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK); r &= ~AUVIA_PCICONF_ACNOTRST; /* enable RESET (active low) */ pci_conf_write(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK, r); delay(2); r |= AUVIA_PCICONF_ACNOTRST; /* disable RESET (inactive high) */ pci_conf_write(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK, r); delay(200); for (i = 500000; i != 0 && !(pci_conf_read(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK) & AUVIA_PCICONF_PRIVALID); i--) DELAY(1); if (i == 0) { printf("%s: codec reset timed out\n", device_xname(sc->sc_dev)); return ETIMEDOUT; } return 0; }
int pci_mapreg_probe(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t *typep) { pcireg_t address, mask, csr; int s; s = splhigh(); csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr & ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)); address = pci_conf_read(pc, tag, reg); pci_conf_write(pc, tag, reg, 0xffffffff); mask = pci_conf_read(pc, tag, reg); pci_conf_write(pc, tag, reg, address); if (csr & (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE)) pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); splx(s); if (mask == 0) /* unimplemented mapping register */ return (0); if (typep) *typep = _PCI_MAPREG_TYPEBITS(address); return (1); }
static bool yds_resume(device_t dv, const pmf_qual_t *qual) { struct yds_softc *sc = device_private(dv); pci_chipset_tag_t pc = sc->sc_pc; pcitag_t tag = sc->sc_pcitag; pcireg_t reg; /* Disable legacy mode */ mutex_enter(&sc->sc_lock); mutex_spin_enter(&sc->sc_intr_lock); reg = pci_conf_read(pc, tag, YDS_PCI_LEGACY); pci_conf_write(pc, tag, YDS_PCI_LEGACY, reg & YDS_PCI_LEGACY_LAD); /* Enable the device. */ reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); reg |= (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg); reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (yds_init(sc)) { aprint_error_dev(dv, "reinitialize failed\n"); mutex_spin_exit(&sc->sc_intr_lock); mutex_exit(&sc->sc_lock); return false; } pci_conf_write(pc, tag, YDS_PCI_DSCTRL, sc->sc_dsctrl); mutex_spin_exit(&sc->sc_intr_lock); sc->sc_codec[0].codec_if->vtbl->restore_ports(sc->sc_codec[0].codec_if); mutex_exit(&sc->sc_lock); return true; }
static int xhci_pci_port_route(struct xhci_pci_softc *psc) { struct xhci_softc * const sc = &psc->sc_xhci; pcireg_t val; /* * Check USB3 Port Routing Mask register that indicates the ports * can be changed from OS, and turn on by USB3 Port SS Enable register. */ val = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_XHCI_INTEL_USB3PRM); aprint_debug_dev(sc->sc_dev, "USB3PRM / USB3.0 configurable ports: 0x%08x\n", val); pci_conf_write(psc->sc_pc, psc->sc_tag, PCI_XHCI_INTEL_USB3_PSSEN, val); val = pci_conf_read(psc->sc_pc, psc->sc_tag,PCI_XHCI_INTEL_USB3_PSSEN); aprint_debug_dev(sc->sc_dev, "USB3_PSSEN / Enabled USB3.0 ports under xHCI: 0x%08x\n", val); /* * Check USB2 Port Routing Mask register that indicates the USB2.0 * ports to be controlled by xHCI HC, and switch them to xHCI HC. */ val = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_XHCI_INTEL_USB2PRM); aprint_debug_dev(sc->sc_dev, "XUSB2PRM / USB2.0 ports can switch from EHCI to xHCI:" "0x%08x\n", val); pci_conf_write(psc->sc_pc, psc->sc_tag, PCI_XHCI_INTEL_XUSB2PR, val); val = pci_conf_read(psc->sc_pc, psc->sc_tag, PCI_XHCI_INTEL_XUSB2PR); aprint_debug_dev(sc->sc_dev, "XUSB2PR / USB2.0 ports under xHCI: 0x%08x\n", val); return 0; }
void aapic_attach(struct device *parent, struct device *self, void *aux) { struct pci_attach_args *pa = aux; int bus, dev, func; pcitag_t tag; pcireg_t reg; printf("\n"); #if NIOAPIC > 0 if (nioapics == 0) return; #else return; #endif reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMD8131_IOAPIC_CTL); reg |= AMD8131_IOAEN; pci_conf_write(pa->pa_pc, pa->pa_tag, AMD8131_IOAPIC_CTL, reg); pci_decompose_tag(pa->pa_pc, pa->pa_tag, &bus, &dev, &func); func = 0; tag = pci_make_tag(pa->pa_pc, bus, dev, func); reg = pci_conf_read(pa->pa_pc, tag, AMD8131_PCIX_MISC); reg &= ~AMD8131_NIOAMODE; pci_conf_write(pa->pa_pc, tag, AMD8131_PCIX_MISC, reg); }
void dc_cardbus_setup(struct dc_cardbus_softc *csc) { cardbus_devfunc_t ct = csc->sc_ct; cardbus_chipset_tag_t cc = ct->ct_cc; pci_chipset_tag_t pc = csc->sc_pc; pcireg_t reg; int r; /* wakeup the card if needed */ reg = pci_conf_read(pc, csc->sc_tag, PCI_CFDA); if (reg | (DC_CFDA_SUSPEND|DC_CFDA_STANDBY)) { pci_conf_write(pc, csc->sc_tag, PCI_CFDA, reg & ~(DC_CFDA_SUSPEND|DC_CFDA_STANDBY)); } if (pci_get_capability(csc->sc_pc, csc->sc_tag, PCI_CAP_PWRMGMT, &r, 0)) { r = pci_conf_read(csc->sc_pc, csc->sc_tag, r + 4) & 3; if (r) { printf("%s: awakening from state D%d\n", csc->sc_dc.sc_dev.dv_xname, r); pci_conf_write(csc->sc_pc, csc->sc_tag, r + 4, 0); } } (*ct->ct_cf->cardbus_ctrl)(cc, csc->sc_actype); (*ct->ct_cf->cardbus_ctrl)(cc, CARDBUS_BM_ENABLE); reg = pci_conf_read(csc->sc_pc, csc->sc_tag, PCI_COMMAND_STATUS_REG); reg |= PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE; pci_conf_write(csc->sc_pc, csc->sc_tag, PCI_COMMAND_STATUS_REG, reg); reg = pci_conf_read(csc->sc_pc, csc->sc_tag, PCI_COMMAND_STATUS_REG); }
static bool pci_child_suspend(device_t dv, const pmf_qual_t *qual) { struct pci_child_power *priv = device_pmf_bus_private(dv); pcireg_t ocsr, csr; pci_conf_capture(priv->p_pc, priv->p_tag, &priv->p_pciconf); if (!priv->p_has_pm) return true; /* ??? hopefully handled by ACPI */ if (PCI_CLASS(priv->p_class) == PCI_CLASS_DISPLAY) return true; /* XXX */ /* disable decoding and busmastering, see pcipm1.2 ch. 8.2.1 */ ocsr = pci_conf_read(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG); csr = ocsr & ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); pci_conf_write(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG, csr); if (pci_set_powerstate_int(priv->p_pc, priv->p_tag, PCI_PMCSR_STATE_D3, priv->p_pm_offset, priv->p_pm_cap)) { pci_conf_write(priv->p_pc, priv->p_tag, PCI_COMMAND_STATUS_REG, ocsr); aprint_error_dev(dv, "unsupported state, continuing.\n"); return false; } return true; }
static int sis_hostbr_match(const struct pci_attach_args *pa) { int i; pcireg_t id, masqid, reg; id = pa->pa_id; if (PCI_VENDOR(id) != PCI_VENDOR_SIS) return 0; if (PCI_PRODUCT(id) == PCI_PRODUCT_SIS_85C503) { reg = pci_conf_read(pa->pa_pc, pa->pa_tag, SIS96x_DETECT); pci_conf_write(pa->pa_pc, pa->pa_tag, SIS96x_DETECT, reg | SIS96x_DETECT_MASQ); masqid = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); if (((PCI_PRODUCT(masqid) & 0xfff0) != 0x0960) && (PCI_PRODUCT(masqid) != 0x0018)) { pci_conf_write(pa->pa_pc, pa->pa_tag, SIS96x_DETECT, reg); } else { id = masqid; } } sis_hostbr_type_match = NULL; for (i = 0; i < __arraycount(sis_hostbr_type); i++) { if (PCI_PRODUCT(id) == sis_hostbr_type[i].id && PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) sis_hostbr_type_match = &sis_hostbr_type[i]; } return (sis_hostbr_type_match != NULL); }
void auvia_reset_codec(void *addr) { int i; struct auvia_softc *sc = addr; pcireg_t r; /* perform a codec cold reset */ r = pci_conf_read(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK); r &= ~AUVIA_PCICONF_ACNOTRST; /* enable RESET (active low) */ pci_conf_write(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK, r); delay(2); r |= AUVIA_PCICONF_ACNOTRST; /* disable RESET (inactive high) */ pci_conf_write(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK, r); delay(200); for (i = 500000; i != 0 && !(pci_conf_read(sc->sc_pc, sc->sc_pt, AUVIA_PCICONF_JUNK) & AUVIA_PCICONF_PRIVALID); i--) DELAY(1); if (i == 0) printf("%s: codec reset timed out\n", sc->sc_dev.dv_xname); }
/* * Setup proper Local<->PCI mapping * PCI memory window: 256M @ PCI0MEMBASE with direct memory translation */ void ibm4xx_setup_pci(void) { pci_chipset_tag_t pc = &genppc_ibm4xx_chipset; pcitag_t tag; setup_pcicfg_window(); /* Disable all three memory mappers */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM0MA, 0x00000000); /* disabled */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM1MA, 0x00000000); /* disabled */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM2MA, 0x00000000); /* disabled */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PTM1MS, 0x00000000); /* Can't really disable PTM1. */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PTM2MS, 0x00000000); /* disabled */ /* Setup memory map #0 */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM0MA, 0xF0000001); /* 256M non-prefetchable, enabled */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM0LA, PCI0_MEM_BASE); bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM0PCILA, PCI0_MEM_BASE); bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PMM0PCIHA, 0); /* Configure PCI bridge */ tag = pci_make_tag(pc, 0, 0, 0); // x = pci_conf_read(pc, tag, PCI0_CMD); /* Read PCI command register */ // pci_conf_write(pc, tag, PCI0_CMD, x | MA | ME); /* enable bus mastering and memory space */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PTM1MS, 0xF0000001); /* Enable PTM1 */ bus_space_write_4(pcicfg_iot, pcicfg_ioh, PCIL_PTM1LA, 0); pci_conf_write(pc, tag, PCIC_PTM1BAR, 0); /* Set up proper PCI->Local address base. Always enabled */ pci_conf_write(pc, tag, PCIC_PTM2BAR, 0); }
void rtw_cardbus_setup(struct rtw_cardbus_softc *csc) { struct rtw_softc *sc = &csc->sc_rtw; cardbus_devfunc_t ct = csc->sc_ct; cardbus_chipset_tag_t cc = ct->ct_cc; pci_chipset_tag_t pc = csc->sc_pc; pcireg_t reg; int pmreg; if (pci_get_capability(pc, csc->sc_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { reg = pci_conf_read(pc, csc->sc_tag, pmreg + 4) & 0x03; #if 1 /* XXX Probably not right for CardBus. */ if (reg == 3) { /* * The card has lost all configuration data in * this state, so punt. */ printf("%s: unable to wake up from power state D3\n", sc->sc_dev.dv_xname); return; } #endif if (reg != 0) { printf("%s: waking up from power state D%d\n", sc->sc_dev.dv_xname, reg); pci_conf_write(pc, csc->sc_tag, pmreg + 4, 0); } } /* Program the BAR. */ pci_conf_write(pc, csc->sc_tag, csc->sc_bar_reg, csc->sc_bar_val); /* Make sure the right access type is on the CardBus bridge. */ (*ct->ct_cf->cardbus_ctrl)(cc, csc->sc_cben); (*ct->ct_cf->cardbus_ctrl)(cc, CARDBUS_BM_ENABLE); /* Enable the appropriate bits in the PCI CSR. */ reg = pci_conf_read(pc, csc->sc_tag, PCI_COMMAND_STATUS_REG); reg &= ~(PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE); reg |= csc->sc_csr; pci_conf_write(pc, csc->sc_tag, PCI_COMMAND_STATUS_REG, reg); /* * Make sure the latency timer is set to some reasonable * value. */ reg = pci_conf_read(pc, csc->sc_tag, PCI_BHLC_REG); if (PCI_LATTIMER(reg) < 0x20) { reg &= ~(PCI_LATTIMER_MASK << PCI_LATTIMER_SHIFT); reg |= (0x20 << PCI_LATTIMER_SHIFT); pci_conf_write(pc, csc->sc_tag, PCI_BHLC_REG, reg); } }
int pci_func_configure(struct pci_func *f) { uint32_t bar_width; uint32_t bar; for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END; bar += bar_width) { uint32_t oldv = pci_conf_read(f, bar); bar_width = 4; pci_conf_write(f, bar, 0xffffffff); uint32_t rv = pci_conf_read(f, bar); if (rv == 0) continue; int regnum = PCI_MAPREG_NUM(bar); uint32_t base, size; if (PCI_MAPREG_TYPE(rv) == PCI_MAPREG_TYPE_MEM) { if (PCI_MAPREG_MEM_TYPE(rv) == PCI_MAPREG_MEM_TYPE_64BIT) bar_width = 8; size = PCI_MAPREG_MEM_SIZE(rv); base = PCI_MAPREG_MEM_ADDR(oldv); if (!base) { /* device is not properly configured, allocate mmio address for it */ base = pci_allocate_memory(size); if (!base) return ENOMEM; oldv = base; } #ifdef SHOW_PCI_VERBOSE_INFO printf("pci: allocated mem region %d: %d bytes at 0x%x\n", regnum, size, base); #endif } else { #ifdef CONFIG_ARCH_HAS_IO_SPACE /* TODO handle IO region */ #endif } pci_conf_write(f, bar, oldv); f->reg_base[regnum] = base; f->reg_size[regnum] = size; } f->irq_line = pci_allocate_irqline(); /* FIXME */ f->irq_pin = PCI_INTERRUPT_PIN_C; pci_conf_write(f, PCI_INTERRUPT_REG, PCI_INTERRUPT_LINE(f->irq_line) | PCI_INTERRUPT_PIN(f->irq_pin)); printf("pci: function %02x:%02x.%d (%04x:%04x) configured\n", f->bus->busno, f->dev, f->func, PCI_VENDOR(f->dev_id), PCI_PRODUCT(f->dev_id)); return 0; }
static __inline void ba5_write_4_ind(struct pciide_softc *sc, bus_addr_t reg, uint32_t val) { int s; s = splbio(); pci_conf_write(sc->sc_pc, sc->sc_tag, SII3112_BA5_IND_ADDR, reg); pci_conf_write(sc->sc_pc, sc->sc_tag, SII3112_BA5_IND_DATA, val); splx(s); }
static int pci_io_find(pci_chipset_tag_t pc, pcitag_t tag, int reg, pcireg_t type, bus_addr_t *basep, bus_size_t *sizep, int *flagsp) { pcireg_t address, mask; int s; if (reg < PCI_MAPREG_START || #if 0 /* * Can't do this check; some devices have mapping registers * way out in left field. */ reg >= PCI_MAPREG_END || #endif (reg & 3)) panic("pci_io_find: bad request"); /* * Section 6.2.5.1, `Address Maps', tells us that: * * 1) The builtin software should have already mapped the device in a * reasonable way. * * 2) A device which wants 2^n bytes of memory will hardwire the bottom * n bits of the address to 0. As recommended, we write all 1s and see * what we get back. */ s = splhigh(); address = pci_conf_read(pc, tag, reg); pci_conf_write(pc, tag, reg, 0xffffffff); mask = pci_conf_read(pc, tag, reg); pci_conf_write(pc, tag, reg, address); splx(s); if (PCI_MAPREG_TYPE(address) != PCI_MAPREG_TYPE_IO) { aprint_debug("pci_io_find: expected type i/o, found mem\n"); return (1); } if (PCI_MAPREG_IO_SIZE(mask) == 0) { aprint_debug("pci_io_find: void region\n"); return (1); } if (basep != 0) *basep = PCI_MAPREG_IO_ADDR(address); if (sizep != 0) *sizep = PCI_MAPREG_IO_SIZE(mask); if (flagsp != 0) *flagsp = 0; return (0); }
void * pci_msi_establish(struct pci_attach_args *pa, int level, int (*func)(void *), void *arg) { int co; struct intrhand *ih; struct msi_hdl *msih; struct cpu_info *ci; struct intrsource *is; pcireg_t reg; if (!pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSI, &co, 0)) return NULL; ih = intr_establish(-1, &msi_pic, -1, IST_EDGE, level, func, arg, 0); if (ih == NULL) return NULL; msih = malloc(sizeof(*msih), M_DEVBUF, M_WAITOK); msih->ih = ih; msih->pc = pa->pa_pc; msih->tag = pa->pa_tag; msih->co = co; ci = ih->ih_cpu; is = ci->ci_isources[ih->ih_slot]; reg = pci_conf_read(pa->pa_pc, pa->pa_tag, co + PCI_MSI_CTL); pci_conf_write(pa->pa_pc, pa->pa_tag, co + PCI_MSI_MADDR64_LO, LAPIC_MSIADDR_BASE | __SHIFTIN(ci->ci_cpuid, LAPIC_MSIADDR_DSTID_MASK)); if (reg & PCI_MSI_CTL_64BIT_ADDR) { pci_conf_write(pa->pa_pc, pa->pa_tag, co + PCI_MSI_MADDR64_HI, 0); /* XXX according to the manual, ASSERT is unnecessary if * EDGE */ pci_conf_write(pa->pa_pc, pa->pa_tag, co + PCI_MSI_MDATA64, __SHIFTIN(is->is_idtvec, LAPIC_MSIDATA_VECTOR_MASK) | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_LEVEL_ASSERT | LAPIC_MSIDATA_DM_FIXED); } else { /* XXX according to the manual, ASSERT is unnecessary if * EDGE */ pci_conf_write(pa->pa_pc, pa->pa_tag, co + PCI_MSI_MDATA, __SHIFTIN(is->is_idtvec, LAPIC_MSIDATA_VECTOR_MASK) | LAPIC_MSIDATA_TRGMODE_EDGE | LAPIC_MSIDATA_LEVEL_ASSERT | LAPIC_MSIDATA_DM_FIXED); } pci_conf_write(pa->pa_pc, pa->pa_tag, co + PCI_MSI_CTL, PCI_MSI_CTL_MSI_ENABLE); return msih; }
static bool piixpm_resume(device_t dv, const pmf_qual_t *qual) { struct piixpm_softc *sc = device_private(dv); pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_DEVACTA, sc->sc_devact[0]); pci_conf_write(sc->sc_pc, sc->sc_pcitag, PIIX_DEVACTB, sc->sc_devact[1]); return true; }
static bool piixide_resume(device_t dv, const pmf_qual_t *qual) { struct pciide_softc *sc = device_private(dv); pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, sc->sc_pm_reg[0]); pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, sc->sc_pm_reg[1]); return true; }
int pciaddr_do_resource_allocate(struct shpcic_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, int mapreg, struct extent *ex, int type, bus_addr_t *addr, bus_size_t size) { bus_addr_t start; int error; if (type == PCI_MAPREG_TYPE_IO) { if ((*addr & PCIADDR_PORT_END) != 0) return (0); } else if (*addr) /* no need to allocate */ return (0); /* XXX Don't allocate if device is AGP device to avoid conflict. */ if (pciaddr_device_is_agp(pc, tag)) return (0); start = (type == PCI_MAPREG_TYPE_MEM ? sc->sc_membus_space.bus_base : sc->sc_iobus_space.bus_base); if (start < ex->ex_start || start + size - 1 >= ex->ex_end) { PCIBIOS_PRINTV(("No available resources. fixup failed\n")); return (1); } error = extent_alloc_subregion(ex, start, ex->ex_end, size, size, 0, 0, EX_FAST|EX_NOWAIT|EX_MALLOCOK, addr); if (error) { PCIBIOS_PRINTV(("No available resources. fixup failed\n")); return (1); } /* write new address to PCI device configuration header */ pci_conf_write(pc, tag, mapreg, *addr); /* check */ if (pcibr_flags & PCIBR_VERBOSE) { printf("pci_addr_fixup: "); pciaddr_print_devid(pc, tag); } if (pciaddr_ioaddr(pci_conf_read(pc, tag, mapreg)) != *addr) { pci_conf_write(pc, tag, mapreg, 0); /* clear */ printf("fixup failed. (new address=%#lx)\n", *addr); return (1); } if (pcibr_flags & PCIBR_VERBOSE) printf("new address 0x%08lx\n", *addr); return (0); }
static int pciaddr_do_resource_allocate(pci_chipset_tag_t pc, pcitag_t tag, int mapreg, void *ctx, int type, bus_addr_t *addr, bus_size_t size) { struct pciaddr *pciaddrmap = (struct pciaddr *)ctx; bus_addr_t start; int error; struct extent *ex; if (*addr != 0) /* no need to allocate */ return 0; ex = (type == PCI_MAPREG_TYPE_MEM ? pciaddrmap->extent_mem : pciaddrmap->extent_port); /* XXX Don't allocate if device is AGP device to avoid conflict. */ if (device_is_agp(pc, tag)) return 0; start = (type == PCI_MAPREG_TYPE_MEM ? pciaddrmap->mem_alloc_start : pciaddrmap->port_alloc_start); if (start < ex->ex_start || start + size - 1 >= ex->ex_end) { aprint_debug("No available resources. fixup failed\n"); return 1; } error = extent_alloc_subregion(ex, start, ex->ex_end, size, size, 0, EX_FAST|EX_NOWAIT|EX_MALLOCOK, (u_long *)addr); if (error) { aprint_debug("No available resources. fixup failed\n"); return 1; } /* write new address to PCI device configuration header */ pci_conf_write(pc, tag, mapreg, *addr); /* check */ aprint_debug("pci_addr_fixup: "); pciaddr_print_devid(pc, tag); if (pciaddr_ioaddr(pci_conf_read(pc, tag, mapreg)) != *addr) { pci_conf_write(pc, tag, mapreg, 0); /* clear */ aprint_error("fixup failed. (new address=%#x)\n", (unsigned)*addr); return 1; } aprint_debug("new address 0x%08x\n", (unsigned)*addr); return 0; }
static void bwi_pci_conf_write(void *sc, uint32_t reg, uint32_t val) { struct bwi_pci_softc *psc = (struct bwi_pci_softc *)sc; pci_conf_write(psc->psc_pc, psc->psc_pcitag, reg, val); }
int amdpm_activate(struct device *self, int act) { struct amdpm_softc *sc = (struct amdpm_softc *)self; int rv = 0; switch (act) { case DVACT_RESUME: if (timeout_initialized(&sc->sc_rnd_ch)) { pcireg_t cfg_reg; /* Restart the AMD PBC768_PMC/8111_PMC RNG */ cfg_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMDPM_CONFREG); pci_conf_write(sc->sc_pc, sc->sc_tag, AMDPM_CONFREG, cfg_reg | AMDPM_RNGEN); } rv = config_activate_children(self, act); break; default: rv = config_activate_children(self, act); break; } return (rv); }
int pciaddr_do_resource_reserve_disabled(struct pcibios_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, int mapreg, struct extent *ex, int type, u_long *addr, bus_size_t size) { pcireg_t val; int error; if (*addr == 0) return (0); val = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (type == PCI_MAPREG_TYPE_MEM && (val & PCI_COMMAND_MEM_ENABLE) == PCI_COMMAND_MEM_ENABLE) return (0); if (type == PCI_MAPREG_TYPE_IO && (val & PCI_COMMAND_IO_ENABLE) == PCI_COMMAND_IO_ENABLE) return (0); PCIBIOS_PRINTV(("disabled %s space at addr 0x%x size 0x%x\n", type == PCI_MAPREG_TYPE_MEM ? "mem" : "io", *addr, size)); error = extent_alloc_region(ex, *addr, size, EX_NOWAIT | EX_MALLOCOK); if (error) { PCIBIOS_PRINTV(("Resource conflict.\n")); pci_conf_write(pc, tag, mapreg, 0); /* clear */ return (1); } return (0); }
int via82c586_set_trigger(pciintr_icu_handle_t v, int irq, int trigger) { struct piix_handle *ph = v; int i, pciirq, shift, testtrig; pcireg_t reg; if (VP3_LEGAL_IRQ(irq) == 0) return (1); for (i = 0; i <= 3; i++) { via82c586_get_intr(v, i, &pciirq); if (pciirq == irq) { reg = pci_conf_read(ph->ph_pc, ph->ph_tag, VP3_CFG_PIRQ_REG); shift = vp3_cfg_trigger_shift[i]; /* XXX we only upgrade the trigger here */ if (trigger == IST_LEVEL) reg &= ~(VP3_CFG_TRIGGER_MASK << shift); pci_conf_write(ph->ph_pc, ph->ph_tag, VP3_CFG_PIRQ_REG, reg); break; } } if (piix_set_trigger(v, irq, trigger) != 0 || via82c586_get_trigger(v, irq, &testtrig) != 0 || testtrig != trigger) return (1); return (0); }
static int agp_via_set_aperture(struct agp_softc *sc, u_int32_t aperture) { struct agp_via_softc *asc = sc->as_chipc; u_int32_t apsize, key; pcireg_t reg; if (asc->regs == via_v2_regs) { /* * Reverse the magic from get_aperture. */ apsize = ((aperture - 1) >> 20) ^ 0xff; /* * Double check for sanity. */ if ((((apsize ^ 0xff) << 20) | ((1 << 20) - 1)) + 1 != aperture) return EINVAL; reg = pci_conf_read(sc->as_pc, sc->as_tag, asc->regs[REG_APSIZE]); reg &= ~0xff; reg |= apsize; pci_conf_write(sc->as_pc, sc->as_tag, asc->regs[REG_APSIZE], reg); } else {
int pciaddr_do_resource_reserve_disabled(struct shpcic_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, int mapreg, struct extent *ex, int type, bus_addr_t *addr, bus_size_t size) { pcireg_t val; int error; if ((type == PCI_MAPREG_TYPE_IO) && ((*addr & PCIADDR_PORT_END) == 0)) return (0); if (*addr == 0) return (0); val = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (type == PCI_MAPREG_TYPE_MEM && (val & PCI_COMMAND_MEM_ENABLE) == PCI_COMMAND_MEM_ENABLE) return (0); if (type == PCI_MAPREG_TYPE_IO && (val & PCI_COMMAND_IO_ENABLE) == PCI_COMMAND_IO_ENABLE) return (0); error = extent_alloc_region(ex, *addr, size, EX_NOWAIT | EX_MALLOCOK); if (error) { PCIBIOS_PRINTV(("Resource conflict.\n")); pci_conf_write(pc, tag, mapreg, 0); /* clear */ return (1); } return (0); }
static int yds_init(struct yds_softc *sc) { uint32_t reg; DPRINTF(("yds_init()\n")); /* Download microcode */ if (yds_download_mcode(sc)) { aprint_error_dev(sc->sc_dev, "download microcode failed\n"); return 1; } /* Allocate DMA buffers */ if (yds_allocate_slots(sc)) { aprint_error_dev(sc->sc_dev, "could not allocate slots\n"); return 1; } /* Warm reset */ reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, YDS_PCI_DSCTRL); pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_DSCTRL, reg | YDS_DSCTRL_WRST); delay(50000); return 0; }
void ehci_pci_takecontroller(struct ehci_pci_softc *sc, int silent) { u_int32_t cparams, eec, legsup; int eecp, i; cparams = EREAD4(&sc->sc, EHCI_HCCPARAMS); /* Synchronise with the BIOS if it owns the controller. */ for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; eecp = EHCI_EECP_NEXT(eec)) { eec = pci_conf_read(sc->sc_pc, sc->sc_tag, eecp); if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) continue; legsup = eec; if (legsup & EHCI_LEGSUP_BIOSOWNED) { pci_conf_write(sc->sc_pc, sc->sc_tag, eecp, legsup | EHCI_LEGSUP_OSOWNED); DPRINTF(("%s: waiting for BIOS to give up control\n", sc->sc.sc_bus.bdev.dv_xname)); for (i = 0; i < 5000; i++) { legsup = pci_conf_read(sc->sc_pc, sc->sc_tag, eecp); if ((legsup & EHCI_LEGSUP_BIOSOWNED) == 0) break; DELAY(1000); } if (silent == 0 && (legsup & EHCI_LEGSUP_BIOSOWNED)) printf("%s: timed out waiting for BIOS\n", sc->sc.sc_bus.bdev.dv_xname); } } }
static int hdaudio_pci_detach(device_t self, int flags) { struct hdaudio_pci_softc *sc = device_private(self); pcireg_t csr; hdaudio_detach(&sc->sc_hdaudio, flags); if (sc->sc_ih != NULL) { pci_intr_disestablish(sc->sc_pc, sc->sc_ih); sc->sc_ih = NULL; } if (sc->sc_hdaudio.sc_memvalid == true) { bus_space_unmap(sc->sc_hdaudio.sc_memt, sc->sc_hdaudio.sc_memh, sc->sc_hdaudio.sc_memsize); sc->sc_hdaudio.sc_memvalid = false; } /* Disable busmastering and MMIO access */ csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); csr &= ~(PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_BACKTOBACK_ENABLE); pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, csr); pmf_device_deregister(self); return 0; }
static void sdhc_pci_quirk_ti_hack(struct pci_attach_args *pa) { pci_chipset_tag_t pc = pa->pa_pc; pcitag_t tag; pcireg_t id, reg; /* Look at func - 1 for the flash device */ tag = pci_make_tag(pc, pa->pa_bus, pa->pa_device, pa->pa_function - 1); id = pci_conf_read(pc, tag, PCI_ID_REG); if (PCI_VENDOR(id) != PCI_VENDOR_TI) { return; } switch (PCI_PRODUCT(id)) { case PCI_PRODUCT_TI_PCI72111FM: case PCI_PRODUCT_TI_PCIXX12FM: break; default: return; } /* * Disable MMC/SD on the flash media controller so the * SD host takes over. */ reg = pci_conf_read(pc, tag, SDHC_PCI_GENERAL_CTL); reg |= MMC_SD_DIS; pci_conf_write(pc, tag, SDHC_PCI_GENERAL_CTL, reg); }
int che_read_eeprom(struct cheg_softc *sc, struct pci_attach_args *pa, pcireg_t addr, pcireg_t *dp) { pcireg_t rv, base; int i = 4; if (!pci_get_capability(pa->pa_pc, pa->pa_tag, CHE_PCI_CAP_ID_VPD, &base, NULL)) { printf("%s: VPD EEPROM not found\n", DEVNAME(sc), addr); return EIO; } addr <<= 16; pci_conf_write(pa->pa_pc, pa->pa_tag, base, addr); while(i--) { delay(10); rv = pci_conf_read(pa->pa_pc, pa->pa_tag, base); if (rv & CHE_PCI_F_VPD_ADDR) break; } if (!(rv & CHE_PCI_F_VPD_ADDR)) { printf("%s: reading EEPROM address 0x%x failed\n", DEVNAME(sc), addr); return EIO; } *dp = pci_conf_read(pa->pa_pc, pa->pa_tag, base + CHE_PCI_VPD_DATA); return (0); }