コード例 #1
0
static void
jmpata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct jmide_softc *jmidesc = (struct jmide_softc *)sc;
	int channel;
	pcireg_t interface;
	struct pciide_channel *cp;

	if (pciide_chipen(sc, pa) == 0)
		return;
	aprint_verbose("%s: bus-master DMA support present", JM_NAME(jmidesc));
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = jmpata_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	wdc_allocate_regs(&sc->sc_wdcdev);
	/*
         * can't rely on the PCI_CLASS_REG content if the chip was in raid
         * mode. We have to fake interface
         */
	interface = PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	    channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		aprint_normal("%s: %s channel is ", JM_NAME(jmidesc),
		    PCIIDE_CHANNEL_NAME(channel));
		switch(jmidesc->sc_chan_type[channel]) {
		case TYPE_PATA:
			aprint_normal("PATA");
			break;
		case TYPE_SATA:
			aprint_normal("SATA");
			break;
		case TYPE_NONE:
			aprint_normal("unused");
			break;
		default:
			aprint_normal("impossible");
			panic("jmide: wrong/uninitialised channel type");
		}
		aprint_normal("\n");
		if (jmidesc->sc_chan_type[channel] == TYPE_NONE) {
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #2
0
static void
artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	bus_size_t cmdsize, ctlsize;
	pcireg_t interface;
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	interface = PCI_INTERFACE(pa->pa_class);

	if (interface == 0) {
		artisea_chip_map_dpa (sc, pa);
		return;
	}

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
#ifdef PCIIDE_I31244_DISABLEDMA
	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_31244 &&
	    PCI_REVISION(pa->pa_class) == 0) {
		aprint_verbose(" but disabled due to rev. 0");
		sc->sc_dma_ok = 0;
	} else
#endif
		pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	/*
	 * XXX Configure LEDs to show activity.
	 */

	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
		    pciide_pci_intr);
	}
}
コード例 #3
0
ファイル: piixide.c プロジェクト: ryo/netbsd-src
static void
piixsata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface, cmdsts;
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		/* Do all revisions require DMA alignment workaround? */
		sc->sc_wdcdev.dma_init = piix_dma_init;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	cmdsts = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG);
	cmdsts &= ~PCI_COMMAND_INTERRUPT_DISABLE;
	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, cmdsts);

	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID;

	interface = PCI_INTERFACE(pa->pa_class);
	
	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #4
0
static void
piccolo_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface;
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");

	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA32 | ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 5;

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 3;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
	}

	sc->sc_wdcdev.sc_atac.atac_set_modes = piccolo_setup_channel;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	/* 
	 * XXX one for now. We'll figure out how to talk to the second channel
	 * later, hopefully! Second interface config is via the
	 * "alternate PCI Configuration Space" whatever that is!
	 */

	interface = PCI_INTERFACE(pa->pa_class);

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #5
0
static void
sis_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	if (interface == 0) {
		ATADEBUG_PRINT(("sis_sata_chip_map interface == 0\n"),
		    DEBUG_PROBE);
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "Silicon Integrated Systems 180/96X SATA controller "
	    "(rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #6
0
static void
stpc_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_set_modes = stpc_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #7
0
ファイル: gcscide.c プロジェクト: lacombar/netbsd-alc
static void
gcscide_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
{
	pcireg_t interface;
	bus_size_t cmdsize, ctlsize;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}

	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_set_modes = gcscide_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;

	interface = PCI_INTERFACE(pa->pa_class);

	wdc_allocate_regs(&sc->sc_wdcdev);

	if (pciide_chansetup(sc, 0, interface) == 0)
		return;

	pciide_mapchan(pa, &sc->pciide_channels[0], interface,
	    &cmdsize, &ctlsize, pciide_pci_intr);
}
コード例 #8
0
static int
via_sata_chip_map_common(struct pciide_softc *sc,
    const struct pci_attach_args *cpa)
{
	pcireg_t csr;
	int maptype, ret;
	struct pci_attach_args pac, *pa = &pac;

	pac = *cpa;

	if (pciide_chipen(sc, pa) == 0)
		return 0;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID;

	wdc_allocate_regs(&sc->sc_wdcdev);
	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
	    PCI_MAPREG_START + 0x14);
	switch(maptype) {
	case PCI_MAPREG_TYPE_IO:
		ret = pci_mapreg_map(pa, PCI_MAPREG_START + 0x14,
		    PCI_MAPREG_TYPE_IO, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh,
		    NULL, &sc->sc_ba5_ss);
		break;
	case PCI_MAPREG_MEM_TYPE_32BIT:
		/*
		 * Enable memory-space access if it isn't already there.
		 */
		csr = pci_conf_read(pa->pa_pc, pa->pa_tag,
		    PCI_COMMAND_STATUS_REG);
		if ((csr & PCI_COMMAND_MEM_ENABLE) == 0 &&
		    (pa->pa_flags & PCI_FLAGS_MEM_OKAY) != 0) {

			pci_conf_write(pa->pa_pc, pa->pa_tag,
			    PCI_COMMAND_STATUS_REG,
			    csr | PCI_COMMAND_MEM_ENABLE);
		}

		ret = pci_mapreg_map(pa, PCI_MAPREG_START + 0x14,
		    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
		    0, &sc->sc_ba5_st, &sc->sc_ba5_sh,
		    NULL, &sc->sc_ba5_ss);
		break;
	default:
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map sata regs, unsupported maptype (0x%x)\n",
		    maptype);
		return 0;
	}
	if (ret != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map sata regs\n");
		return 0;
	}
	return 1;
}
コード例 #9
0
static void
via_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	pcireg_t vendor = PCI_VENDOR(pa->pa_id);
	int channel;
	u_int32_t ideconf;
	pcireg_t pcib_id, pcib_class;
	struct pci_attach_args pcib_pa;

	if (pciide_chipen(sc, pa) == 0)
		return;

	switch (vendor) {
	case PCI_VENDOR_VIATECH:
		switch (PCI_PRODUCT(pa->pa_id)) {
		case PCI_PRODUCT_VIATECH_VT6410_RAID:
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies VT6410 IDE controller\n");
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
			    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
			break;
		case PCI_PRODUCT_VIATECH_VX900_IDE:
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies VX900 ATA133 controller\n");
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		default:
			/*
			 * get a PCI tag for the ISA bridge.
			 */
			if (pci_find_device(&pcib_pa, via_pcib_match) == 0)
				goto unknown;
			pcib_id = pcib_pa.pa_id;
			pcib_class = pcib_pa.pa_class;
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies ");
			switch (PCI_PRODUCT(pcib_id)) {
			case PCI_PRODUCT_VIATECH_VT82C586_ISA:
				aprint_normal("VT82C586 (Apollo VP) ");
				if(PCI_REVISION(pcib_class) >= 0x02) {
					aprint_normal("ATA33 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
				} else {
					aprint_normal("controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT82C596A:
				aprint_normal("VT82C596A (Apollo Pro) ");
				if (PCI_REVISION(pcib_class) >= 0x12) {
					aprint_normal("ATA66 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
				} else {
					aprint_normal("ATA33 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
				aprint_normal("VT82C686A (Apollo KX133) ");
				if (PCI_REVISION(pcib_class) >= 0x40) {
					aprint_normal("ATA100 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				} else {
					aprint_normal("ATA66 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT8231:
				aprint_normal("VT8231 ATA100 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				break;
			case PCI_PRODUCT_VIATECH_VT8233:
				aprint_normal("VT8233 ATA100 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				break;
			case PCI_PRODUCT_VIATECH_VT8233A:
				aprint_normal("VT8233A ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8235:
				aprint_normal("VT8235 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8237:
				aprint_normal("VT8237 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8237A_ISA:
				aprint_normal("VT8237A ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_CX700:
				aprint_normal("CX700 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8251:
				aprint_normal("VT8251 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			default:
		unknown:
				aprint_normal("unknown VIA ATA controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
			}
			break;
		}
		sc->sc_apo_regbase = APO_VIA_REGBASE;
		break;
	case PCI_VENDOR_AMD:
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_AMD_PBC8111_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		case PCI_PRODUCT_AMD_CS5536_IDE:
		case PCI_PRODUCT_AMD_PBC766_IDE:
		case PCI_PRODUCT_AMD_PBC768_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			break;
		default:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
		}
		sc->sc_apo_regbase = APO_AMD_REGBASE;
		break;
	case PCI_VENDOR_NVIDIA:
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			break;
		case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE2_400_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE3_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE3_250_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE4_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE430_ATA133:
		case PCI_PRODUCT_NVIDIA_MCP04_IDE:
		case PCI_PRODUCT_NVIDIA_MCP55_IDE:
		case PCI_PRODUCT_NVIDIA_MCP61_IDE:
		case PCI_PRODUCT_NVIDIA_MCP65_IDE:
		case PCI_PRODUCT_NVIDIA_MCP67_IDE:
		case PCI_PRODUCT_NVIDIA_MCP73_IDE:
		case PCI_PRODUCT_NVIDIA_MCP77_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		}
		sc->sc_apo_regbase = APO_NVIDIA_REGBASE;
		break;
	default:
		panic("via_chip_map: unknown vendor");
	}

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 0)
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = via_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID;

	wdc_allocate_regs(&sc->sc_wdcdev);

	ATADEBUG_PRINT(("via_chip_map: old APO_IDECONF=0x%x, "
	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))),
	    DEBUG_PROBE);

	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc));
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		via_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #10
0
static void
via_sata_chip_map_new(struct pciide_softc *sc,
    const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	struct ata_channel *wdc_cp;
	struct wdc_regs *wdr;
	int channel;
	pci_intr_handle_t intrhandle;
	const char *intrstr;
	int i;

	if (pciide_chipen(sc, pa) == 0)
		return;

	sc->sc_apo_regbase = APO_VIA_VT6421_REGBASE;

	if (pci_mapreg_map(pa, PCI_BAR(5), PCI_MAPREG_TYPE_IO, 0,
	    &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, &sc->sc_ba5_ss) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map SATA regs\n");
	}

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	via_vt6421_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;
	
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 3;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	if (pci_intr_map(pa, &intrhandle) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map native-PCI interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pa->pa_pc, intrhandle);
	sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
	    intrhandle, IPL_BIO, pciide_pci_intr, sc);
	if (sc->sc_pci_ih == NULL) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't establish native-PCI interrupt");
		if (intrstr != NULL)
		    aprint_error(" at %s", intrstr);
		aprint_error("\n");
		return;
	}
	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "using %s for native-PCI interrupt\n",
	    intrstr ? intrstr : "unknown interrupt");

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (via_vt6421_chansetup(sc, channel) == 0)
			continue;
		wdc_cp = &cp->ata_channel;
		wdr = CHAN_TO_WDC_REGS(wdc_cp);

		wdr->sata_iot = sc->sc_ba5_st;
		wdr->sata_baseioh = sc->sc_ba5_sh;
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << 6) + 0x0, 4,
		    &wdr->sata_status) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_status regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << 6) + 0x4, 4,
		    &wdr->sata_error) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_error regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << 6) + 0x8, 4,
		    &wdr->sata_control) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_control regs\n",
			    wdc_cp->ch_channel);
			continue;
		}

		if (pci_mapreg_map(pa, PCI_BAR(wdc_cp->ch_channel),
		    PCI_MAPREG_TYPE_IO, 0, &wdr->cmd_iot, &wdr->cmd_baseioh,
		    NULL, &wdr->cmd_ios) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map %s channel regs\n", cp->name);
		}
		wdr->ctl_iot = wdr->cmd_iot;
		for (i = 0; i < WDC_NREG; i++) {
			if (bus_space_subregion(wdr->cmd_iot,
			    wdr->cmd_baseioh, i, i == 0 ? 4 : 1,
			    &wdr->cmd_iohs[i]) != 0) {
				aprint_error_dev(
				    sc->sc_wdcdev.sc_atac.atac_dev,
				    "couldn't subregion %s "
				    "channel cmd regs\n", cp->name);
				return;
			}
		}
		if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
		    WDC_NREG + 2, 1,  &wdr->ctl_ioh) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d ctl regs\n", channel);
			return;
		}
		wdc_init_shadow_regs(wdc_cp);
		wdr->data32iot = wdr->cmd_iot;
		wdr->data32ioh = wdr->cmd_iohs[wd_data];
		wdcattach(wdc_cp);
	}
}
コード例 #11
0
static void
opti_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface;
	u_int8_t init_ctrl;
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");

	/*
	 * XXXSCW:
	 * There seem to be a couple of buggy revisions/implementations
	 * of the OPTi pciide chipset. This kludge seems to fix one of
	 * the reported problems (PR/11644) but still fails for the
	 * other (PR/13151), although the latter may be due to other
	 * issues too...
	 */
	if (PCI_REVISION(pa->pa_class) <= 0x12) {
		aprint_verbose(" but disabled due to chip rev. <= 0x12");
		sc->sc_dma_ok = 0;
	} else
		pciide_mapreg_dma(sc, pa);

	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA32 | ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = opti_setup_channel;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
	    OPTI_REG_INIT_CONTROL);

	interface = PCI_INTERFACE(pa->pa_class);

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		if (channel == 1 &&
		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #12
0
static void
serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	pcitag_t pcib_tag;
	int channel;
	bus_size_t cmdsize, ctlsize;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	switch (sc->sc_pp->ide_product) {
	case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
		break;
	case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
		if (PCI_REVISION(pa->pa_class) < 0x92)
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
		else
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
		break;
	case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
	case PCI_PRODUCT_SERVERWORKS_CSB6_RAID:
	case PCI_PRODUCT_SERVERWORKS_HT1000_IDE:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
		break;
	}

	sc->sc_wdcdev.sc_atac.atac_set_modes = serverworks_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
		case PCI_PRODUCT_SERVERWORKS_CSB6_RAID:
			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
			    serverworkscsb6_pci_intr);
			break;
		default:
			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
			    serverworks_pci_intr);
		}
	}

	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
	pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
	    (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
}
コード例 #13
0
static void
ite_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	pcireg_t interface;
	pcireg_t cfg, modectl;

	/* fake interface since IT8212 claims to be a RAID device */
	interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
	    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);

	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK,
	    modectl & IT_MODE_MASK), DEBUG_PROBE);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_set_modes = ite_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	/* Disable RAID */
	modectl &= ~IT_MODE_RAID1;
	/* Disable CPU firmware mode */
	modectl &= ~IT_MODE_CPU;

	pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) {
		cp = &sc->pciide_channels[channel];

		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
	/* Re-read configuration registers after channels setup */
	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK,
	    modectl & IT_MODE_MASK), DEBUG_PROBE);
}
コード例 #14
0
ファイル: piixide.c プロジェクト: ryo/netbsd-src
static void
piix_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	u_int32_t idetim;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		/* Do all revisions require DMA alignment workaround? */
		sc->sc_wdcdev.dma_init = piix_dma_init;
		switch(sc->sc_pp->ide_product) {
		case PCI_PRODUCT_INTEL_82371AB_IDE:
		case PCI_PRODUCT_INTEL_82440MX_IDE:
		case PCI_PRODUCT_INTEL_82801AA_IDE:
		case PCI_PRODUCT_INTEL_82801AB_IDE:
		case PCI_PRODUCT_INTEL_82801BA_IDE:
		case PCI_PRODUCT_INTEL_82801BAM_IDE:
		case PCI_PRODUCT_INTEL_82801CA_IDE_1:
		case PCI_PRODUCT_INTEL_82801CA_IDE_2:
		case PCI_PRODUCT_INTEL_82801DB_IDE:
		case PCI_PRODUCT_INTEL_82801DBM_IDE:
		case PCI_PRODUCT_INTEL_82801EB_IDE:
		case PCI_PRODUCT_INTEL_6300ESB_IDE:
		case PCI_PRODUCT_INTEL_82801FB_IDE:
		case PCI_PRODUCT_INTEL_82801G_IDE:
		case PCI_PRODUCT_INTEL_82801HBM_IDE:
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
		}
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	switch(sc->sc_pp->ide_product) {
	case PCI_PRODUCT_INTEL_82801AA_IDE:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
		break;
	case PCI_PRODUCT_INTEL_82801BA_IDE:
	case PCI_PRODUCT_INTEL_82801BAM_IDE:
	case PCI_PRODUCT_INTEL_82801CA_IDE_1:
	case PCI_PRODUCT_INTEL_82801CA_IDE_2:
	case PCI_PRODUCT_INTEL_82801DB_IDE:
	case PCI_PRODUCT_INTEL_82801DBM_IDE:
	case PCI_PRODUCT_INTEL_82801EB_IDE:
	case PCI_PRODUCT_INTEL_6300ESB_IDE:
	case PCI_PRODUCT_INTEL_82801FB_IDE:
	case PCI_PRODUCT_INTEL_82801G_IDE:
	case PCI_PRODUCT_INTEL_82801HBM_IDE:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
		break;
	default:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
	}
	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
		sc->sc_wdcdev.sc_atac.atac_set_modes = piix_setup_channel;
	else
		sc->sc_wdcdev.sc_atac.atac_set_modes = piix3_4_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	ATADEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
	    DEBUG_PROBE);
	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
		ATADEBUG_PRINT((", sidetim=0x%x",
		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
		    DEBUG_PROBE);
		if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
			ATADEBUG_PRINT((", udamreg 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
			    DEBUG_PROBE);
		}
		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801G_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) {
			ATADEBUG_PRINT((", IDE_CONTROL 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
			    DEBUG_PROBE);
		}

	}
	ATADEBUG_PRINT(("\n"), DEBUG_PROBE);

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
		if ((PIIX_IDETIM_READ(idetim, channel) &
		    PIIX_IDETIM_IDE) == 0) {
#if 1
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
#else
			pcireg_t interface;

			idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
			    channel);
			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
			    idetim);
			interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
			    sc->sc_tag, PCI_CLASS_REG));
			aprint_normal("channel %d idetim=%08x interface=%02x\n",
			    channel, idetim, interface);
#endif
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}

	ATADEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
	    DEBUG_PROBE);
	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
		ATADEBUG_PRINT((", sidetim=0x%x",
		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
		    DEBUG_PROBE);
		if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
			ATADEBUG_PRINT((", udamreg 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
			    DEBUG_PROBE);
		}
		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801G_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) {
			ATADEBUG_PRINT((", IDE_CONTROL 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
			    DEBUG_PROBE);
		}
	}
	ATADEBUG_PRINT(("\n"), DEBUG_PROBE);
}
コード例 #15
0
ファイル: wdc_buddha.c プロジェクト: lacombar/netbsd-alc
void
wdc_buddha_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_buddha_softc *sc;
	struct zbus_args *zap;
	int nchannels;
	int ch;

	sc = device_private(self);
	sc->sc_wdcdev.sc_atac.atac_dev = self;
	zap = aux;

	sc->ba = zap->va;

	sc->sc_iot.base = (bus_addr_t)sc->ba;
	sc->sc_iot.absm = &amiga_bus_stride_4swap;

	nchannels = 2;
	if (zap->prodid == 42) {
		aprint_normal(": Catweasel Z2\n");
		nchannels = 3;
	} else if (zap->serno == 0) 
		aprint_normal(": Buddha\n");
	else
		aprint_normal(": Buddha Flash\n");

	/* XXX pio mode setting not implemented yet. */
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = nchannels;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (ch = 0; ch < nchannels; ch++) {
		struct ata_channel *cp;
		struct wdc_regs *wdr;
		int i;

		cp = &sc->channels[ch];
		sc->wdc_chanarray[ch] = cp;

		cp->ch_channel = ch;
		cp->ch_atac = &sc->sc_wdcdev.sc_atac;
		cp->ch_queue =
		    malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
		if (cp->ch_queue == NULL) {
			aprint_error_dev(self,
			    "can't allocate memory for command queue\n");
			return;
		}
		cp->ch_ndrive = 2;

		/*
		 * XXX According to the Buddha docs, we should use a method
		 * array that adds 0x40 to the address for byte accesses, to
		 * get the slow timing for command accesses, and the 0x00 
		 * offset for the word (fast) accesses. This will be
		 * reconsidered when implementing setting the timing.
		 *
		 * XXX We also could consider to abuse the 32bit capability, or
		 * 32bit accesses to the words (which will read in two words)
		 * for better performance.
		 *		-is
		 */
		wdr = CHAN_TO_WDC_REGS(cp);

		wdr->cmd_iot = &sc->sc_iot;
		if (bus_space_map(wdr->cmd_iot, 0x210+ch*0x80, 8, 0,
		    &wdr->cmd_baseioh)) {
			aprint_error_dev(self, "couldn't map cmd registers\n");
			return;
		}

		wdr->ctl_iot = &sc->sc_iot;
		if (bus_space_map(wdr->ctl_iot, 0x250+ch*0x80, 2, 0,
		    &wdr->ctl_ioh)) {
			bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 8);
			aprint_error_dev(self, "couldn't map ctl registers\n");
			return;
		}

		for (i = 0; i < WDC_NREG; i++) {
			if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
			    i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
				aprint_error_dev(self,
				    "couldn't subregion cmd regs\n");
				return;
			}
		}

		wdc_init_shadow_regs(cp);
		wdcattach(cp);
	}

	sc->sc_isr.isr_intr = wdc_buddha_intr;
	sc->sc_isr.isr_arg = sc;
	sc->sc_isr.isr_ipl = 2;
	add_isr (&sc->sc_isr);
	sc->ba[0xfc0] = 0;	/* enable interrupts */
}
コード例 #16
0
static void
acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	pcireg_t cr, interface;
	pcireg_t rev = PCI_REVISION(pa->pa_class);
	struct aceride_softc *acer_sc = (struct aceride_softc *)sc;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		if (rev >= 0x20) {
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
			if (rev >= 0xC7)
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			else if (rev >= 0xC4)
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			else if (rev >= 0xC2)
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
			else
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
		}
		sc->sc_wdcdev.irqack = pciide_irqack;
		if (rev <= 0xc4) {
			sc->sc_wdcdev.dma_init = acer_dma_init;
			aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			 "using PIO transfers above 137GB as workaround for "
			 "48bit DMA access bug, expect reduced performance\n");
		}
	}

	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);

	/* Enable "microsoft register bits" R/W. */
	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
	    ~ACER_CHANSTATUSREGS_RO);
	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
	
	{
		/*
		 * some BIOSes (port-cats ABLE) enable native mode, but don't
		 * setup everything correctly, so allow the forcing of
		 * compat mode
		 */
		bool force_compat_mode;
		bool property_is_set;
		property_is_set = prop_dictionary_get_bool(
				device_properties(sc->sc_wdcdev.sc_atac.atac_dev),
				"ali1543-ide-force-compat-mode",
				&force_compat_mode);
		if (property_is_set && force_compat_mode) {
			cr &= ~((PCIIDE_INTERFACE_PCI(0)
				| PCIIDE_INTERFACE_PCI(1))
				<< PCI_INTERFACE_SHIFT);
		}
	}

	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
	/* Don't use cr, re-read the real register content instead */
	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
	    PCI_CLASS_REG));

	/* From linux: enable "Cable Detection" */
	if (rev >= 0xC2) {
		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
		    | ACER_0x4B_CDETECT);
	}

	wdc_allocate_regs(&sc->sc_wdcdev);
	if (rev == 0xC3) {
		/* install reset bug workaround */
		if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "WARNING: can't find pci-isa bridge\n");
		} else
			sc->sc_wdcdev.reset = acer_do_reset;
	}

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		/* newer controllers seems to lack the ACER_CHIDS. Sigh */
		pciide_mapchan(pa, cp, interface,
		     (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
	}
}
コード例 #17
0
static void
sis_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	pcireg_t rev = PCI_REVISION(pa->pa_class);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "Silicon Integrated Systems ");
	pci_find_device(NULL, sis_hostbr_match);
	if (sis_hostbr_type_match) {
		if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
			    pciide_pci_read(sc->sc_pc, sc->sc_tag,
			    SIS_REG_57) & 0x7f);
			if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
			    PCI_ID_REG)) == SIS_PRODUCT_5518) {
				aprint_normal("96X UDMA%d",
				    sis_hostbr_type_match->udma_mode);
				sc->sis_type = SIS_TYPE_133NEW;
				sc->sc_wdcdev.sc_atac.atac_udma_cap =
			    	    sis_hostbr_type_match->udma_mode;
			} else {
				if (pci_find_device(NULL, sis_south_match)) {
					sc->sis_type = SIS_TYPE_133OLD;
					sc->sc_wdcdev.sc_atac.atac_udma_cap =
				    	    sis_hostbr_type_match->udma_mode;
				} else {
					sc->sis_type = SIS_TYPE_100NEW;
					sc->sc_wdcdev.sc_atac.atac_udma_cap =
					    sis_hostbr_type_match->udma_mode;
				}
			}
		} else {
			sc->sis_type = sis_hostbr_type_match->type;
			sc->sc_wdcdev.sc_atac.atac_udma_cap =
		    	    sis_hostbr_type_match->udma_mode;
		}
		aprint_normal("%s", sis_hostbr_type_match->name);
	} else {
		aprint_normal("5597/5598");
		if (rev >= 0xd0) {
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
			sc->sis_type = SIS_TYPE_66;
		} else {
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
			sc->sis_type = SIS_TYPE_NOUDMA;
		}
	}
	aprint_normal(" IDE controller (rev. 0x%02x)\n",
	    PCI_REVISION(pa->pa_class));
	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		if (sc->sis_type >= SIS_TYPE_66)
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
	}

	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	switch(sc->sis_type) {
	case SIS_TYPE_NOUDMA:
	case SIS_TYPE_66:
	case SIS_TYPE_100OLD:
		sc->sc_wdcdev.sc_atac.atac_set_modes = sis_setup_channel;
		pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
		    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
		break;
	case SIS_TYPE_100NEW:
	case SIS_TYPE_133OLD:
		sc->sc_wdcdev.sc_atac.atac_set_modes = sis_setup_channel;
		pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
		break;
	case SIS_TYPE_133NEW:
		sc->sc_wdcdev.sc_atac.atac_set_modes = sis96x_setup_channel;
		pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
		pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
		break;
	}

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
		    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
コード例 #18
0
ファイル: pciide_pnpbios.c プロジェクト: lacombar/netbsd-alc
void
pciide_pnpbios_attach(device_t parent, device_t self, void *aux)
{
	struct pciide_softc *sc = device_private(self);
	struct pnpbiosdev_attach_args *aa = aux;
	struct pciide_channel *cp;
	struct ata_channel *wdc_cp;
	struct wdc_regs *wdr;
	bus_space_tag_t compat_iot;
	bus_space_handle_t cmd_baseioh, ctl_ioh;
	int i, drive, size;
	uint8_t idedma_ctl;

	sc->sc_wdcdev.sc_atac.atac_dev = self;

	aprint_naive(": disk controller\n");
	aprint_normal("\n");
	pnpbios_print_devres(self, aa);

	aprint_normal_dev(self, "Toshiba Extended IDE Controller\n");

	if (pnpbios_io_map(aa->pbt, aa->resc, 2, &sc->sc_dma_iot,
	    &sc->sc_dma_ioh) != 0) {
		aprint_error_dev(self, "unable to map DMA registers\n");
		return;
	}
	if (pnpbios_io_map(aa->pbt, aa->resc, 0, &compat_iot,
	    &cmd_baseioh) != 0) {
		aprint_error_dev(self, "unable to map command registers\n");
		return;
	}
	if (pnpbios_io_map(aa->pbt, aa->resc, 1, &compat_iot,
	    &ctl_ioh) != 0) {
		aprint_error_dev(self, "unable to map control register\n");
		return;
	}

	sc->sc_dmat = &pci_bus_dma_tag;

	cp = &sc->pciide_channels[0];
	sc->wdc_chanarray[0] = &cp->ata_channel;
	cp->ata_channel.ch_channel = 0;
	cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	cp->ata_channel.ch_queue = malloc(sizeof(struct ata_queue),
					  M_DEVBUF, M_NOWAIT);
	cp->ata_channel.ch_ndrive = 2;
	if (cp->ata_channel.ch_queue == NULL) {
		aprint_error_dev(self, "unable to allocate memory for command "
		    "queue\n");
		return;
	}

	sc->sc_dma_ok = 1;
	for (i = 0; i < IDEDMA_NREGS; i++) {
		size = 4;
		if (size > (IDEDMA_SCH_OFFSET - i))
			size = IDEDMA_SCH_OFFSET - i;
		if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh,
		    i, size, &cp->dma_iohs[i]) != 0) {
			aprint_error_dev(self, "can't subregion offset %d "
			    "size %lu", i, (u_long)size);
			return;
		}
	}
	sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
	sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
	sc->sc_wdcdev.dma_arg = sc;
	sc->sc_wdcdev.dma_init = pciide_dma_init;
	sc->sc_wdcdev.dma_start = pciide_dma_start;
	sc->sc_wdcdev.dma_finish = pciide_dma_finish;
	sc->sc_wdcdev.irqack = pciide_irqack;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 0;		/* XXX */
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;	/* XXX */

	wdc_allocate_regs(&sc->sc_wdcdev);

	wdc_cp = &cp->ata_channel;
	wdr = CHAN_TO_WDC_REGS(wdc_cp);
	wdr->cmd_iot = compat_iot;
	wdr->cmd_baseioh = cmd_baseioh;

	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i,
		    i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
			    aprint_error_dev(self, "unable to subregion "
				"control register\n");
			    return;
		}
	}
	wdc_init_shadow_regs(wdc_cp);

	wdr->ctl_iot = wdr->data32iot = compat_iot;
	wdr->ctl_ioh = wdr->data32ioh = ctl_ioh;

	cp->compat = 1;

	cp->ih = pnpbios_intr_establish(aa->pbt, aa->resc, 0, IPL_BIO,
					pciide_compat_intr, cp);

	wdcattach(wdc_cp);

	idedma_ctl = 0;
	for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) {
		/*
		 * we have not probed the drives yet,
		 * allocate ressources for all of them.
		 */
		if (pciide_dma_table_setup(sc, 0, drive) != 0) {
			/* Abort DMA setup */
			aprint_error(
			    "%s:%d:%d: can't allocate DMA maps, "
			    "using PIO transfers\n",
			    device_xname(self), 0, drive);
			sc->sc_dma_ok = 0;
			sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA;
			sc->sc_wdcdev.irqack = NULL;
			idedma_ctl = 0;
			break;
		}
		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
	}
	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot,
		    cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl);
	}
}
コード例 #19
0
ファイル: cypide.c プロジェクト: ryo/netbsd-src
static void
cy693_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
    struct pciide_channel *cp;
    pcireg_t interface = PCI_INTERFACE(pa->pa_class);

    if (pciide_chipen(sc, pa) == 0)
        return;

    /*
     * this chip has 2 PCI IDE functions, one for primary and one for
     * secondary. So we need to call pciide_mapregs_compat() with
     * the real channel
     */
    if (pa->pa_function == 1) {
        sc->sc_cy_compatchan = 0;
    } else if (pa->pa_function == 2) {
        sc->sc_cy_compatchan = 1;
    } else {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unexpected PCI function %d\n", pa->pa_function);
        return;
    }
    if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
        aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                           "bus-master DMA support present\n");
        pciide_mapreg_dma(sc, pa);
    } else {
        aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                          "hardware does not support DMA\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
    if (sc->sc_cy_handle == NULL) {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unable to map hyperCache control registers\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
    if (sc->sc_dma_ok) {
        sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
        sc->sc_wdcdev.irqack = pciide_irqack;
    }
    sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
    sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
    sc->sc_wdcdev.sc_atac.atac_set_modes = cy693_setup_channel;

    sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
    sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
    sc->sc_wdcdev.wdc_maxdrives = 2;

    wdc_allocate_regs(&sc->sc_wdcdev);

    /* Only one channel for this chip; if we are here it's enabled */
    cp = &sc->pciide_channels[0];
    sc->wdc_chanarray[0] = &cp->ata_channel;
    cp->name = PCIIDE_CHANNEL_NAME(0);
    cp->ata_channel.ch_channel = 0;
    cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
    cp->ata_channel.ch_queue =
        malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
    if (cp->ata_channel.ch_queue == NULL) {
        aprint_error("%s primary channel: "
                     "can't allocate memory for command queue",
                     device_xname(sc->sc_wdcdev.sc_atac.atac_dev));
        return;
    }
    aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                      "primary channel %s to ",
                      (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
                      "configured" : "wired");
    if (interface & PCIIDE_INTERFACE_PCI(0)) {
        aprint_normal("native-PCI mode\n");
        pciide_mapregs_native(pa, cp, pciide_pci_intr);
    } else {
        aprint_normal("compatibility mode\n");
        pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan);
        if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
            pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan);
    }
    wdcattach(&cp->ata_channel);
}
コード例 #20
0
static void
geodeide_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		/*
		 * XXXJRT What chip revisions actually need the DMA
		 * alignment work-around?
		 */
		sc->sc_wdcdev.dma_init = geodeide_dma_init;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
	/*
	 * The 5530 is utterly swamped by UDMA mode 2, so limit to mode 1
	 * so that the chip is able to perform the other functions it has
	 * while IDE UDMA is going on.
	 */
	if (sc->sc_pp->ide_product == PCI_PRODUCT_CYRIX_CX5530_IDE) {
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 1;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = geodeide_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	/*
	 * Soekris Engineering Issue #0003:
	 * 	"The SC1100 built in busmaster IDE controller is pretty
	 *	 standard, but have two bugs: data transfers need to be
	 *	 dword aligned and it cannot do an exact 64Kbyte data
	 *	 transfer."
	 */
	if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SC1100_IDE) {
		if (sc->sc_dma_boundary == 0x10000)
			sc->sc_dma_boundary -= PAGE_SIZE;

		if (sc->sc_dma_maxsegsz == 0x10000)
			sc->sc_dma_maxsegsz -= PAGE_SIZE;
	}

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		/* controller is compat-only */
		if (pciide_chansetup(sc, channel, 0) == 0)
			continue;
		pciide_mapchan(pa, cp, 0, pciide_pci_intr);
	}
}
コード例 #21
0
static void
artisea_chip_map_dpa(struct pciide_softc *sc, struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	bus_size_t cmdsize, ctlsize;
	pcireg_t interface;
	int channel;

	interface = PCI_INTERFACE(pa->pa_class);

	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "interface wired in DPA mode\n");

	if (pci_mapreg_map(pa, ARTISEA_PCI_DPA_BASE, PCI_MAPREG_MEM_TYPE_64BIT,
	    0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, NULL) != 0)
		return;

	artisea_mapreg_dma(sc, pa);

	sc->sc_wdcdev.cap = WDC_CAPABILITY_WIDEREGS;

	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = ARTISEA_NUM_CHAN;
	sc->sc_wdcdev.sc_atac.atac_probe = wdc_sataprobe;

	wdc_allocate_regs(&sc->sc_wdcdev);

	/*
	 * Perform a quick check to ensure that the device isn't configured
	 * in Spread-spectrum clocking mode.  This feature is buggy and has
	 * been removed from the latest documentation.
	 *
	 * Note that although this bit is in the Channel regs, it's the same
	 * for all channels, so we check it just once here.
	 */
	if ((bus_space_read_4 (sc->sc_ba5_st, sc->sc_ba5_sh,
	    ARTISEA_DPA_PORT_BASE(0) + ARTISEA_SUPERSET_DPA_OFF +
	    ARTISEA_SUPDPFR) & SUPDPFR_SSCEN) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "Spread-specturm clocking not supported by device\n");
		return;
	}

	/* Clear the LED0-only bit.  */
	pci_conf_write (pa->pa_pc, pa->pa_tag, ARTISEA_PCI_SUECSR0,
	    pci_conf_read (pa->pa_pc, pa->pa_tag, ARTISEA_PCI_SUECSR0) &
	    ~SUECSR0_LED0_ONLY);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (artisea_chansetup(sc, channel, interface) == 0)
			continue;
		/* XXX We can probably do interrupts more efficiently.  */
		artisea_mapregs(pa, cp, &cmdsize, &ctlsize, pciide_pci_intr);
	}
}