static void cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; sc->sc_wdcdev.UDMA_cap = 6; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.PIO_cap = 4; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.set_modes = cmd680_setup_channel; pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) cmd680_channel_map(pa, sc, channel); }
static void jmpata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct jmide_softc *jmidesc = (struct jmide_softc *)sc; int channel; pcireg_t interface; struct pciide_channel *cp; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose("%s: bus-master DMA support present", JM_NAME(jmidesc)); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_set_modes = jmpata_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); /* * can't rely on the PCI_CLASS_REG content if the chip was in raid * mode. We have to fake interface */ interface = PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; aprint_normal("%s: %s channel is ", JM_NAME(jmidesc), PCIIDE_CHANNEL_NAME(channel)); switch(jmidesc->sc_chan_type[channel]) { case TYPE_PATA: aprint_normal("PATA"); break; case TYPE_SATA: aprint_normal("SATA"); break; case TYPE_NONE: aprint_normal("unused"); break; default: aprint_normal("impossible"); panic("jmide: wrong/uninitialised channel type"); } aprint_normal("\n"); if (jmidesc->sc_chan_type[channel] == TYPE_NONE) { cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; bus_size_t cmdsize, ctlsize; pcireg_t interface; int channel; if (pciide_chipen(sc, pa) == 0) return; interface = PCI_INTERFACE(pa->pa_class); if (interface == 0) { artisea_chip_map_dpa (sc, pa); return; } aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); #ifdef PCIIDE_I31244_DISABLEDMA if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 && PCI_REVISION(pa->pa_class) == 0) { aprint_verbose(" but disabled due to rev. 0"); sc->sc_dma_ok = 0; } else #endif pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); /* * XXX Configure LEDs to show activity. */ sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; } sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }
static void artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; bus_size_t cmdsize, ctlsize; pcireg_t interface; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); #ifndef PCIIDE_I31244_ENABLEDMA if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 && PCI_REVISION(pa->pa_class) == 0) { aprint_normal(" but disabled due to rev. 0"); sc->sc_dma_ok = 0; } else #endif pciide_mapreg_dma(sc, pa); aprint_normal("\n"); /* * XXX Configure LEDs to show activity. */ sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; sc->sc_wdcdev.PIO_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.UDMA_cap = 6; } sc->sc_wdcdev.set_modes = sata_setup_channel; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; interface = PCI_INTERFACE(pa->pa_class); for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }
static void piixsata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface, cmdsts; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; /* Do all revisions require DMA alignment workaround? */ sc->sc_wdcdev.dma_init = piix_dma_init; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; } sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; cmdsts = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); cmdsts &= ~PCI_COMMAND_INTERRUPT_DISABLE; pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, cmdsts); if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID; interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void piccolo_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA32 | ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 5; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_dma_cap = 3; sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } sc->sc_wdcdev.sc_atac.atac_set_modes = piccolo_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_wdcdev.wdc_maxdrives = 2; /* * XXX one for now. We'll figure out how to talk to the second channel * later, hopefully! Second interface config is via the * "alternate PCI Configuration Space" whatever that is! */ interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void sis_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); int channel; if (pciide_chipen(sc, pa) == 0) return; if (interface == 0) { ATADEBUG_PRINT(("sis_sata_chip_map interface == 0\n"), DEBUG_PROBE); interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); } aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "Silicon Integrated Systems 180/96X SATA controller " "(rev. 0x%02x)\n", PCI_REVISION(pa->pa_class)); aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
void gcsc_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface; bus_size_t cmdsize, ctlsize; printf(": DMA"); pciide_mapreg_dma(sc, pa); sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.PIO_cap = 4; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.UDMA_cap = 4; sc->sc_wdcdev.set_modes = gcsc_setup_channel; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = 1; interface = PCI_INTERFACE(pa->pa_class); pciide_print_channels(sc->sc_wdcdev.nchannels, interface); cp = &sc->pciide_channels[0]; if (pciide_chansetup(sc, 0, interface) == 0) return; pciide_map_compat_intr(pa, cp, 0, interface); if (cp->hw_ok == 0) return; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); if (cp->hw_ok == 0) { pciide_unmap_compat_intr(pa, cp, 0, interface); return; } gcsc_setup_channel(&cp->wdc_channel); }
static void via_sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); int channel; bus_size_t cmdsize, ctlsize; if (pciide_chipen(sc, pa) == 0) return; if (interface == 0) { WDCDEBUG_PRINT(("via_sata_chip_map interface == 0\n"), DEBUG_PROBE); interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); } aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.PIO_cap = 4; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.UDMA_cap = 6; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; sc->sc_wdcdev.set_modes = sata_setup_channel; for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }
static void gcscide_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { pcireg_t interface; bus_size_t cmdsize, ctlsize; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; sc->sc_wdcdev.sc_atac.atac_set_modes = gcscide_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); if (pciide_chansetup(sc, 0, interface) == 0) return; pciide_mapchan(pa, &sc->pciide_channels[0], interface, &cmdsize, &ctlsize, pciide_pci_intr); }
static void stpc_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; pcireg_t interface = PCI_INTERFACE(pa->pa_class); if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; sc->sc_wdcdev.sc_atac.atac_set_modes = stpc_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void piix_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; u_int32_t idetim; pcireg_t interface = PCI_INTERFACE(pa->pa_class); if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; /* Do all revisions require DMA alignment workaround? */ sc->sc_wdcdev.dma_init = piix_dma_init; switch(sc->sc_pp->ide_product) { case PCI_PRODUCT_INTEL_82371AB_IDE: case PCI_PRODUCT_INTEL_82440MX_IDE: case PCI_PRODUCT_INTEL_82801AA_IDE: case PCI_PRODUCT_INTEL_82801AB_IDE: case PCI_PRODUCT_INTEL_82801BA_IDE: case PCI_PRODUCT_INTEL_82801BAM_IDE: case PCI_PRODUCT_INTEL_82801CA_IDE_1: case PCI_PRODUCT_INTEL_82801CA_IDE_2: case PCI_PRODUCT_INTEL_82801DB_IDE: case PCI_PRODUCT_INTEL_82801DBM_IDE: case PCI_PRODUCT_INTEL_82801EB_IDE: case PCI_PRODUCT_INTEL_6300ESB_IDE: case PCI_PRODUCT_INTEL_82801FB_IDE: case PCI_PRODUCT_INTEL_82801G_IDE: case PCI_PRODUCT_INTEL_82801HBM_IDE: sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; } } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; switch(sc->sc_pp->ide_product) { case PCI_PRODUCT_INTEL_82801AA_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; break; case PCI_PRODUCT_INTEL_82801BA_IDE: case PCI_PRODUCT_INTEL_82801BAM_IDE: case PCI_PRODUCT_INTEL_82801CA_IDE_1: case PCI_PRODUCT_INTEL_82801CA_IDE_2: case PCI_PRODUCT_INTEL_82801DB_IDE: case PCI_PRODUCT_INTEL_82801DBM_IDE: case PCI_PRODUCT_INTEL_82801EB_IDE: case PCI_PRODUCT_INTEL_6300ESB_IDE: case PCI_PRODUCT_INTEL_82801FB_IDE: case PCI_PRODUCT_INTEL_82801G_IDE: case PCI_PRODUCT_INTEL_82801HBM_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; default: sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) sc->sc_wdcdev.sc_atac.atac_set_modes = piix_setup_channel; else sc->sc_wdcdev.sc_atac.atac_set_modes = piix3_4_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; ATADEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), DEBUG_PROBE); if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { ATADEBUG_PRINT((", sidetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), DEBUG_PROBE); if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) { ATADEBUG_PRINT((", udamreg 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), DEBUG_PROBE); } if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801G_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { ATADEBUG_PRINT((", IDE_CONTROL 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), DEBUG_PROBE); } } ATADEBUG_PRINT(("\n"), DEBUG_PROBE); wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); if ((PIIX_IDETIM_READ(idetim, channel) & PIIX_IDETIM_IDE) == 0) { #if 1 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; #else pcireg_t interface; idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); aprint_normal("channel %d idetim=%08x interface=%02x\n", channel, idetim, interface); #endif } pciide_mapchan(pa, cp, interface, pciide_pci_intr); } ATADEBUG_PRINT(("piix_setup_chip: idetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), DEBUG_PROBE); if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { ATADEBUG_PRINT((", sidetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), DEBUG_PROBE); if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) { ATADEBUG_PRINT((", udamreg 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), DEBUG_PROBE); } if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801G_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { ATADEBUG_PRINT((", IDE_CONTROL 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), DEBUG_PROBE); } } ATADEBUG_PRINT(("\n"), DEBUG_PROBE); }
static int via_sata_chip_map_common(struct pciide_softc *sc, const struct pci_attach_args *cpa) { pcireg_t csr; int maptype, ret; struct pci_attach_args pac, *pa = &pac; pac = *cpa; if (pciide_chipen(sc, pa) == 0) return 0; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.wdc_maxdrives = 2; if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID; wdc_allocate_regs(&sc->sc_wdcdev); maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START + 0x14); switch(maptype) { case PCI_MAPREG_TYPE_IO: ret = pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, PCI_MAPREG_TYPE_IO, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, &sc->sc_ba5_ss); break; case PCI_MAPREG_MEM_TYPE_32BIT: /* * Enable memory-space access if it isn't already there. */ csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); if ((csr & PCI_COMMAND_MEM_ENABLE) == 0 && (pa->pa_flags & PCI_FLAGS_MEM_OKAY) != 0) { pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr | PCI_COMMAND_MEM_ENABLE); } ret = pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, &sc->sc_ba5_ss); break; default: aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map sata regs, unsupported maptype (0x%x)\n", maptype); return 0; } if (ret != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map sata regs\n"); return 0; } return 1; }
static void via_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); pcireg_t vendor = PCI_VENDOR(pa->pa_id); int channel; u_int32_t ideconf; pcireg_t pcib_id, pcib_class; struct pci_attach_args pcib_pa; if (pciide_chipen(sc, pa) == 0) return; switch (vendor) { case PCI_VENDOR_VIATECH: switch (PCI_PRODUCT(pa->pa_id)) { case PCI_PRODUCT_VIATECH_VT6410_RAID: aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "VIA Technologies VT6410 IDE controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); break; case PCI_PRODUCT_VIATECH_VX900_IDE: aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "VIA Technologies VX900 ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; default: /* * get a PCI tag for the ISA bridge. */ if (pci_find_device(&pcib_pa, via_pcib_match) == 0) goto unknown; pcib_id = pcib_pa.pa_id; pcib_class = pcib_pa.pa_class; aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "VIA Technologies "); switch (PCI_PRODUCT(pcib_id)) { case PCI_PRODUCT_VIATECH_VT82C586_ISA: aprint_normal("VT82C586 (Apollo VP) "); if(PCI_REVISION(pcib_class) >= 0x02) { aprint_normal("ATA33 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } else { aprint_normal("controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; } break; case PCI_PRODUCT_VIATECH_VT82C596A: aprint_normal("VT82C596A (Apollo Pro) "); if (PCI_REVISION(pcib_class) >= 0x12) { aprint_normal("ATA66 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; } else { aprint_normal("ATA33 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } break; case PCI_PRODUCT_VIATECH_VT82C686A_ISA: aprint_normal("VT82C686A (Apollo KX133) "); if (PCI_REVISION(pcib_class) >= 0x40) { aprint_normal("ATA100 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; } else { aprint_normal("ATA66 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; } break; case PCI_PRODUCT_VIATECH_VT8231: aprint_normal("VT8231 ATA100 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; case PCI_PRODUCT_VIATECH_VT8233: aprint_normal("VT8233 ATA100 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; case PCI_PRODUCT_VIATECH_VT8233A: aprint_normal("VT8233A ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; case PCI_PRODUCT_VIATECH_VT8235: aprint_normal("VT8235 ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; case PCI_PRODUCT_VIATECH_VT8237: aprint_normal("VT8237 ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; case PCI_PRODUCT_VIATECH_VT8237A_ISA: aprint_normal("VT8237A ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; case PCI_PRODUCT_VIATECH_CX700: aprint_normal("CX700 ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; case PCI_PRODUCT_VIATECH_VT8251: aprint_normal("VT8251 ATA133 controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; default: unknown: aprint_normal("unknown VIA ATA controller\n"); sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; } break; } sc->sc_apo_regbase = APO_VIA_REGBASE; break; case PCI_VENDOR_AMD: switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_AMD_PBC8111_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; case PCI_PRODUCT_AMD_CS5536_IDE: case PCI_PRODUCT_AMD_PBC766_IDE: case PCI_PRODUCT_AMD_PBC768_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; default: sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; } sc->sc_apo_regbase = APO_AMD_REGBASE; break; case PCI_VENDOR_NVIDIA: switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_NVIDIA_NFORCE_ATA100: sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133: case PCI_PRODUCT_NVIDIA_NFORCE2_400_ATA133: case PCI_PRODUCT_NVIDIA_NFORCE3_ATA133: case PCI_PRODUCT_NVIDIA_NFORCE3_250_ATA133: case PCI_PRODUCT_NVIDIA_NFORCE4_ATA133: case PCI_PRODUCT_NVIDIA_NFORCE430_ATA133: case PCI_PRODUCT_NVIDIA_MCP04_IDE: case PCI_PRODUCT_NVIDIA_MCP55_IDE: case PCI_PRODUCT_NVIDIA_MCP61_IDE: case PCI_PRODUCT_NVIDIA_MCP65_IDE: case PCI_PRODUCT_NVIDIA_MCP67_IDE: case PCI_PRODUCT_NVIDIA_MCP73_IDE: case PCI_PRODUCT_NVIDIA_MCP77_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; break; } sc->sc_apo_regbase = APO_NVIDIA_REGBASE; break; default: panic("via_chip_map: unknown vendor"); } aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 0) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_set_modes = via_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID; wdc_allocate_regs(&sc->sc_wdcdev); ATADEBUG_PRINT(("via_chip_map: old APO_IDECONF=0x%x, " "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)), pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)), pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)), pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))), DEBUG_PROBE); ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if ((ideconf & APO_IDECONF_EN(channel)) == 0) { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } via_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; u_int32_t idetim; bus_size_t cmdsize, ctlsize; if (pciide_chipen(sc, pa) == 0) return; aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; switch(sc->sc_pp->ide_product) { case PCI_PRODUCT_INTEL_82371AB_IDE: case PCI_PRODUCT_INTEL_82440MX_IDE: case PCI_PRODUCT_INTEL_82801AA_IDE: case PCI_PRODUCT_INTEL_82801AB_IDE: case PCI_PRODUCT_INTEL_82801BA_IDE: case PCI_PRODUCT_INTEL_82801BAM_IDE: case PCI_PRODUCT_INTEL_82801CA_IDE_1: case PCI_PRODUCT_INTEL_82801CA_IDE_2: case PCI_PRODUCT_INTEL_82801DB_IDE: case PCI_PRODUCT_INTEL_82801DBM_IDE: case PCI_PRODUCT_INTEL_82801EB_IDE: case PCI_PRODUCT_INTEL_6300ESB_IDE: sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; } } sc->sc_wdcdev.PIO_cap = 4; sc->sc_wdcdev.DMA_cap = 2; switch(sc->sc_pp->ide_product) { case PCI_PRODUCT_INTEL_82801AA_IDE: sc->sc_wdcdev.UDMA_cap = 4; break; case PCI_PRODUCT_INTEL_82801BA_IDE: case PCI_PRODUCT_INTEL_82801BAM_IDE: case PCI_PRODUCT_INTEL_82801CA_IDE_1: case PCI_PRODUCT_INTEL_82801CA_IDE_2: case PCI_PRODUCT_INTEL_82801DB_IDE: case PCI_PRODUCT_INTEL_82801DBM_IDE: case PCI_PRODUCT_INTEL_82801EB_IDE: case PCI_PRODUCT_INTEL_6300ESB_IDE: sc->sc_wdcdev.UDMA_cap = 5; break; default: sc->sc_wdcdev.UDMA_cap = 2; } if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) sc->sc_wdcdev.set_modes = piix_setup_channel; else sc->sc_wdcdev.set_modes = piix3_4_setup_channel; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), DEBUG_PROBE); if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { WDCDEBUG_PRINT((", sidetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), DEBUG_PROBE); if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { WDCDEBUG_PRINT((", udamreg 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), DEBUG_PROBE); } if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE) { WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), DEBUG_PROBE); } } WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; /* PIIX is compat-only */ if (pciide_chansetup(sc, channel, 0) == 0) continue; idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); if ((PIIX_IDETIM_READ(idetim, channel) & PIIX_IDETIM_IDE) == 0) { #if 1 aprint_normal("%s: %s channel ignored (disabled)\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); cp->wdc_channel.ch_flags |= WDCF_DISABLED; continue; #else pcireg_t interface; idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); aprint_normal("channel %d idetim=%08x interface=%02x\n", channel, idetim, interface); #endif } /* PIIX are compat-only pciide devices */ pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); } WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), DEBUG_PROBE); if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { WDCDEBUG_PRINT((", sidetim=0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), DEBUG_PROBE); if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { WDCDEBUG_PRINT((", udamreg 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), DEBUG_PROBE); } if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE) { WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), DEBUG_PROBE); } } WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); }
static void opti_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface; u_int8_t init_ctrl; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); /* * XXXSCW: * There seem to be a couple of buggy revisions/implementations * of the OPTi pciide chipset. This kludge seems to fix one of * the reported problems (PR/11644) but still fails for the * other (PR/13151), although the latter may be due to other * issues too... */ if (PCI_REVISION(pa->pa_class) <= 0x12) { aprint_verbose(" but disabled due to chip rev. <= 0x12"); sc->sc_dma_ok = 0; } else pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA32 | ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; } sc->sc_wdcdev.sc_atac.atac_set_modes = opti_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_INIT_CONTROL); interface = PCI_INTERFACE(pa->pa_class); wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if (channel == 1 && (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void cy693_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); if (pciide_chipen(sc, pa) == 0) return; /* * this chip has 2 PCI IDE functions, one for primary and one for * secondary. So we need to call pciide_mapregs_compat() with * the real channel */ if (pa->pa_function == 1) { sc->sc_cy_compatchan = 0; } else if (pa->pa_function == 2) { sc->sc_cy_compatchan = 1; } else { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "unexpected PCI function %d\n", pa->pa_function); return; } if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present\n"); pciide_mapreg_dma(sc, pa); } else { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "hardware does not support DMA\n"); sc->sc_dma_ok = 0; } sc->sc_cy_handle = cy82c693_init(pa->pa_iot); if (sc->sc_cy_handle == NULL) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "unable to map hyperCache control registers\n"); sc->sc_dma_ok = 0; } sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_set_modes = cy693_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); /* Only one channel for this chip; if we are here it's enabled */ cp = &sc->pciide_channels[0]; sc->wdc_chanarray[0] = &cp->ata_channel; cp->name = PCIIDE_CHANNEL_NAME(0); cp->ata_channel.ch_channel = 0; cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; cp->ata_channel.ch_queue = malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); if (cp->ata_channel.ch_queue == NULL) { aprint_error("%s primary channel: " "can't allocate memory for command queue", device_xname(sc->sc_wdcdev.sc_atac.atac_dev)); return; } aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "primary channel %s to ", (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? "configured" : "wired"); if (interface & PCIIDE_INTERFACE_PCI(0)) { aprint_normal("native-PCI mode\n"); pciide_mapregs_native(pa, cp, pciide_pci_intr); } else { aprint_normal("compatibility mode\n"); pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan); if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0) pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan); } wdcattach(&cp->ata_channel); }
static void serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); pcitag_t pcib_tag; int channel; bus_size_t cmdsize, ctlsize; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_SERVERWORKS_OSB4_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; break; case PCI_PRODUCT_SERVERWORKS_CSB5_IDE: if (PCI_REVISION(pa->pa_class) < 0x92) sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; else sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; case PCI_PRODUCT_SERVERWORKS_CSB6_IDE: case PCI_PRODUCT_SERVERWORKS_CSB6_RAID: case PCI_PRODUCT_SERVERWORKS_HT1000_IDE: sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; break; } sc->sc_wdcdev.sc_atac.atac_set_modes = serverworks_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 2; wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_SERVERWORKS_CSB6_IDE: case PCI_PRODUCT_SERVERWORKS_CSB6_RAID: pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, serverworkscsb6_pci_intr); break; default: pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, serverworks_pci_intr); } } pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); pci_conf_write(pa->pa_pc, pcib_tag, 0x64, (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); }
static void via_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t interface = PCI_INTERFACE(pa->pa_class); pcireg_t vendor = PCI_VENDOR(pa->pa_id); int channel; u_int32_t ideconf; bus_size_t cmdsize, ctlsize; pcireg_t pcib_id, pcib_class; struct pci_attach_args pcib_pa; if (pciide_chipen(sc, pa) == 0) return; switch (vendor) { case PCI_VENDOR_VIATECH: /* * get a PCI tag for the ISA bridge. */ if (pci_enumerate_bus( (struct pci_softc *)sc->sc_wdcdev.sc_dev.dv_parent, via_pcib_match, &pcib_pa) == 0) goto unknown; pcib_id = pcib_pa.pa_id; pcib_class = pcib_pa.pa_class; aprint_normal("%s: VIA Technologies ", sc->sc_wdcdev.sc_dev.dv_xname); switch (PCI_PRODUCT(pcib_id)) { case PCI_PRODUCT_VIATECH_VT82C586_ISA: aprint_normal("VT82C586 (Apollo VP) "); if(PCI_REVISION(pcib_class) >= 0x02) { aprint_normal("ATA33 controller\n"); sc->sc_wdcdev.UDMA_cap = 2; } else { aprint_normal("controller\n"); sc->sc_wdcdev.UDMA_cap = 0; } break; case PCI_PRODUCT_VIATECH_VT82C596A: aprint_normal("VT82C596A (Apollo Pro) "); if (PCI_REVISION(pcib_class) >= 0x12) { aprint_normal("ATA66 controller\n"); sc->sc_wdcdev.UDMA_cap = 4; } else { aprint_normal("ATA33 controller\n"); sc->sc_wdcdev.UDMA_cap = 2; } break; case PCI_PRODUCT_VIATECH_VT82C686A_ISA: aprint_normal("VT82C686A (Apollo KX133) "); if (PCI_REVISION(pcib_class) >= 0x40) { aprint_normal("ATA100 controller\n"); sc->sc_wdcdev.UDMA_cap = 5; } else { aprint_normal("ATA66 controller\n"); sc->sc_wdcdev.UDMA_cap = 4; } break; case PCI_PRODUCT_VIATECH_VT8231: aprint_normal("VT8231 ATA100 controller\n"); sc->sc_wdcdev.UDMA_cap = 5; break; case PCI_PRODUCT_VIATECH_VT8233: aprint_normal("VT8233 ATA100 controller\n"); sc->sc_wdcdev.UDMA_cap = 5; break; case PCI_PRODUCT_VIATECH_VT8233A: aprint_normal("VT8233A ATA133 controller\n"); sc->sc_wdcdev.UDMA_cap = 6; break; case PCI_PRODUCT_VIATECH_VT8235: aprint_normal("VT8235 ATA133 controller\n"); sc->sc_wdcdev.UDMA_cap = 6; break; case PCI_PRODUCT_VIATECH_VT8237: aprint_normal("VT8237 ATA133 controller\n"); sc->sc_wdcdev.UDMA_cap = 6; break; default: unknown: aprint_normal("unknown VIA ATA controller\n"); sc->sc_wdcdev.UDMA_cap = 0; } sc->sc_apo_regbase = APO_VIA_REGBASE; break; case PCI_VENDOR_AMD: switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_AMD_PBC8111_IDE: sc->sc_wdcdev.UDMA_cap = 6; break; case PCI_PRODUCT_AMD_PBC766_IDE: case PCI_PRODUCT_AMD_PBC768_IDE: sc->sc_wdcdev.UDMA_cap = 5; break; default: sc->sc_wdcdev.UDMA_cap = 4; } sc->sc_apo_regbase = APO_AMD_REGBASE; break; case PCI_VENDOR_NVIDIA: switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_NVIDIA_NFORCE_ATA100: sc->sc_wdcdev.UDMA_cap = 5; break; case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133: case PCI_PRODUCT_NVIDIA_NFORCE3_ATA133: sc->sc_wdcdev.UDMA_cap = 6; break; } sc->sc_apo_regbase = APO_NVIDIA_REGBASE; break; default: panic("via_chip_map: unknown vendor"); } aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; if (sc->sc_wdcdev.UDMA_cap > 0) sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; } sc->sc_wdcdev.PIO_cap = 4; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.set_modes = via_setup_channel; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; WDCDEBUG_PRINT(("via_chip_map: old APO_IDECONF=0x%x, " "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)), pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)), pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)), pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))), DEBUG_PROBE); ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)); for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if ((ideconf & APO_IDECONF_EN(channel)) == 0) { aprint_normal("%s: %s channel ignored (disabled)\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name); cp->wdc_channel.ch_flags |= WDCF_DISABLED; continue; } pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }
static void cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { int channel; pcireg_t rev = PCI_REVISION(pa->pa_class); /* * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE * and base addresses registers can be disabled at * hardware level. In this case, the device is wired * in compat mode and its first channel is always enabled, * but we can't rely on PCI_COMMAND_IO_ENABLE. * In fact, it seems that the first channel of the CMD PCI0640 * can't be disabled. */ #ifdef PCIIDE_CMD064x_DISABLE if (pciide_chipen(sc, pa) == 0) return; #endif aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; switch (sc->sc_pp->ide_product) { case PCI_PRODUCT_CMDTECH_649: sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; sc->sc_wdcdev.UDMA_cap = 5; sc->sc_wdcdev.irqack = cmd646_9_irqack; break; case PCI_PRODUCT_CMDTECH_648: sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; sc->sc_wdcdev.UDMA_cap = 4; sc->sc_wdcdev.irqack = cmd646_9_irqack; break; case PCI_PRODUCT_CMDTECH_646: if (rev >= CMD0646U2_REV) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; sc->sc_wdcdev.UDMA_cap = 2; } else if (rev >= CMD0646U_REV) { /* * Linux's driver claims that the 646U is broken * with UDMA. Only enable it if we know what we're * doing */ #ifdef PCIIDE_CMD0646U_ENABLEUDMA sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; sc->sc_wdcdev.UDMA_cap = 2; #endif /* explicitly disable UDMA */ pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_UDMATIM(0), 0); pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_UDMATIM(1), 0); } sc->sc_wdcdev.irqack = cmd646_9_irqack; break; default: sc->sc_wdcdev.irqack = pciide_irqack; } } sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.PIO_cap = 4; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), DEBUG_PROBE); for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) cmd_channel_map(pa, sc, channel); /* * note - this also makes sure we clear the irq disable and reset * bits */ pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), DEBUG_PROBE); }
static void acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; pcireg_t cr, interface; pcireg_t rev = PCI_REVISION(pa->pa_class); struct aceride_softc *acer_sc = (struct aceride_softc *)sc; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; if (rev >= 0x20) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; if (rev >= 0xC7) sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; else if (rev >= 0xC4) sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; else if (rev >= 0xC2) sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; else sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } sc->sc_wdcdev.irqack = pciide_irqack; if (rev <= 0xc4) { sc->sc_wdcdev.dma_init = acer_dma_init; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "using PIO transfers above 137GB as workaround for " "48bit DMA access bug, expect reduced performance\n"); } } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); /* Enable "microsoft register bits" R/W. */ pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & ~ACER_CHANSTATUSREGS_RO); cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); { /* * some BIOSes (port-cats ABLE) enable native mode, but don't * setup everything correctly, so allow the forcing of * compat mode */ bool force_compat_mode; bool property_is_set; property_is_set = prop_dictionary_get_bool( device_properties(sc->sc_wdcdev.sc_atac.atac_dev), "ali1543-ide-force-compat-mode", &force_compat_mode); if (property_is_set && force_compat_mode) { cr &= ~((PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1)) << PCI_INTERFACE_SHIFT); } } pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); /* Don't use cr, re-read the real register content instead */ interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); /* From linux: enable "Cable Detection" */ if (rev >= 0xC2) { pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) | ACER_0x4B_CDETECT); } wdc_allocate_regs(&sc->sc_wdcdev); if (rev == 0xC3) { /* install reset bug workaround */ if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "WARNING: can't find pci-isa bridge\n"); } else sc->sc_wdcdev.reset = acer_do_reset; } for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } /* newer controllers seems to lack the ACER_CHIDS. Sigh */ pciide_mapchan(pa, cp, interface, (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); } }
static void sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; bus_size_t cmdsize, ctlsize; pcireg_t interface, scs_cmd, cfgctl; int channel; if (pciide_chipen(sc, pa) == 0) return; #define SII3112_RESET_BITS \ (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) /* * Reset everything and then unblock all of the interrupts. */ scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, scs_cmd | SII3112_RESET_BITS); delay(50 * 1000); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, scs_cmd & SCS_CMD_BA5_EN); delay(50 * 1000); if (scs_cmd & SCS_CMD_BA5_EN) { aprint_verbose("%s: SATALink BA5 register space enabled\n", sc->sc_wdcdev.sc_dev.dv_xname); if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, PCI_MAPREG_TYPE_MEM| PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, NULL) != 0) aprint_error("%s: unable to map SATALink BA5 " "register space\n", sc->sc_wdcdev.sc_dev.dv_xname); else sc->sc_ba5_en = 1; } else { aprint_verbose("%s: SATALink BA5 register space disabled\n", sc->sc_wdcdev.sc_dev.dv_xname); cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, cfgctl | CFGCTL_BA5INDEN); } aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); /* * Rev. <= 0x01 of the 3112 have a bug that can cause data * corruption if DMA transfers cross an 8K boundary. This is * apparently hard to tickle, but we'll go ahead and play it * safe. */ if (PCI_REVISION(pa->pa_class) <= 0x01) { sc->sc_dma_maxsegsz = 8192; sc->sc_dma_boundary = 8192; } sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; sc->sc_wdcdev.PIO_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.UDMA_cap = 6; } sc->sc_wdcdev.set_modes = sii3112_setup_channel; /* We can use SControl and SStatus to probe for drives. */ sc->sc_wdcdev.drv_probe = sii3112_drv_probe; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; /* * The 3112 either identifies itself as a RAID storage device * or a Misc storage device. Fake up the interface bits for * what our driver expects. */ if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { interface = PCI_INTERFACE(pa->pa_class); } else { interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); } for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }
static void sis_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); pcireg_t interface = PCI_INTERFACE(pa->pa_class); pcireg_t rev = PCI_REVISION(pa->pa_class); if (pciide_chipen(sc, pa) == 0) return; aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "Silicon Integrated Systems "); pci_find_device(NULL, sis_hostbr_match); if (sis_hostbr_type_match) { if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57) & 0x7f); if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ID_REG)) == SIS_PRODUCT_5518) { aprint_normal("96X UDMA%d", sis_hostbr_type_match->udma_mode); sc->sis_type = SIS_TYPE_133NEW; sc->sc_wdcdev.sc_atac.atac_udma_cap = sis_hostbr_type_match->udma_mode; } else { if (pci_find_device(NULL, sis_south_match)) { sc->sis_type = SIS_TYPE_133OLD; sc->sc_wdcdev.sc_atac.atac_udma_cap = sis_hostbr_type_match->udma_mode; } else { sc->sis_type = SIS_TYPE_100NEW; sc->sc_wdcdev.sc_atac.atac_udma_cap = sis_hostbr_type_match->udma_mode; } } } else { sc->sis_type = sis_hostbr_type_match->type; sc->sc_wdcdev.sc_atac.atac_udma_cap = sis_hostbr_type_match->udma_mode; } aprint_normal("%s", sis_hostbr_type_match->name); } else { aprint_normal("5597/5598"); if (rev >= 0xd0) { sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; sc->sis_type = SIS_TYPE_66; } else { sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; sc->sis_type = SIS_TYPE_NOUDMA; } } aprint_normal(" IDE controller (rev. 0x%02x)\n", PCI_REVISION(pa->pa_class)); aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.irqack = pciide_irqack; if (sc->sis_type >= SIS_TYPE_66) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; switch(sc->sis_type) { case SIS_TYPE_NOUDMA: case SIS_TYPE_66: case SIS_TYPE_100OLD: sc->sc_wdcdev.sc_atac.atac_set_modes = sis_setup_channel; pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); break; case SIS_TYPE_100NEW: case SIS_TYPE_133OLD: sc->sc_wdcdev.sc_atac.atac_set_modes = sis_setup_channel; pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); break; case SIS_TYPE_133NEW: sc->sc_wdcdev.sc_atac.atac_set_modes = sis96x_setup_channel; pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); break; } wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } pciide_mapchan(pa, cp, interface, pciide_pci_intr); } }
static void geodeide_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; /* * XXXJRT What chip revisions actually need the DMA * alignment work-around? */ sc->sc_wdcdev.dma_init = geodeide_dma_init; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; /* * The 5530 is utterly swamped by UDMA mode 2, so limit to mode 1 * so that the chip is able to perform the other functions it has * while IDE UDMA is going on. */ if (sc->sc_pp->ide_product == PCI_PRODUCT_CYRIX_CX5530_IDE) { sc->sc_wdcdev.sc_atac.atac_udma_cap = 1; } sc->sc_wdcdev.sc_atac.atac_set_modes = geodeide_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.wdc_maxdrives = 2; /* * Soekris Engineering Issue #0003: * "The SC1100 built in busmaster IDE controller is pretty * standard, but have two bugs: data transfers need to be * dword aligned and it cannot do an exact 64Kbyte data * transfer." */ if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SC1100_IDE) { if (sc->sc_dma_boundary == 0x10000) sc->sc_dma_boundary -= PAGE_SIZE; if (sc->sc_dma_maxsegsz == 0x10000) sc->sc_dma_maxsegsz -= PAGE_SIZE; } wdc_allocate_regs(&sc->sc_wdcdev); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; /* controller is compat-only */ if (pciide_chansetup(sc, channel, 0) == 0) continue; pciide_mapchan(pa, cp, 0, pciide_pci_intr); } }
static void ite_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; pcireg_t interface; pcireg_t cfg, modectl; /* fake interface since IT8212 claims to be a RAID device */ interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK, modectl & IT_MODE_MASK), DEBUG_PROBE); if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; sc->sc_wdcdev.sc_atac.atac_set_modes = ite_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); /* Disable RAID */ modectl &= ~IT_MODE_RAID1; /* Disable CPU firmware mode */ modectl &= ~IT_MODE_CPU; pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, pciide_pci_intr); } /* Re-read configuration registers after channels setup */ cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK, modectl & IT_MODE_MASK), DEBUG_PROBE); }