static void
via_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa,
    int satareg_shift)
{
	struct pciide_channel *cp;
	struct ata_channel *wdc_cp;
	struct wdc_regs *wdr;
	pcireg_t interface;
	int channel;

	interface = PCI_INTERFACE(pa->pa_class);

	if (via_sata_chip_map_common(sc, pa) == 0)
		return;

	if (interface == 0) {
		ATADEBUG_PRINT(("via_sata_chip_map interface == 0\n"),
		    DEBUG_PROBE);
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	sc->sc_wdcdev.sc_atac.atac_probe = wdc_sataprobe;
	sc->sc_wdcdev.wdc_maxdrives = 1;
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		wdc_cp = &cp->ata_channel;
		wdr = CHAN_TO_WDC_REGS(wdc_cp);
		wdr->sata_iot = sc->sc_ba5_st;
		wdr->sata_baseioh = sc->sc_ba5_sh;
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << satareg_shift) + 0x0, 4,
		    &wdr->sata_status) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_status regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << satareg_shift) + 0x4, 4,
		    &wdr->sata_error) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_error regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << satareg_shift) + 0x8, 4,
		    &wdr->sata_control) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_control regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
static void
jmpata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct jmide_softc *jmidesc = (struct jmide_softc *)sc;
	int channel;
	pcireg_t interface;
	struct pciide_channel *cp;

	if (pciide_chipen(sc, pa) == 0)
		return;
	aprint_verbose("%s: bus-master DMA support present", JM_NAME(jmidesc));
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = jmpata_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	wdc_allocate_regs(&sc->sc_wdcdev);
	/*
         * can't rely on the PCI_CLASS_REG content if the chip was in raid
         * mode. We have to fake interface
         */
	interface = PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	    channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		aprint_normal("%s: %s channel is ", JM_NAME(jmidesc),
		    PCIIDE_CHANNEL_NAME(channel));
		switch(jmidesc->sc_chan_type[channel]) {
		case TYPE_PATA:
			aprint_normal("PATA");
			break;
		case TYPE_SATA:
			aprint_normal("SATA");
			break;
		case TYPE_NONE:
			aprint_normal("unused");
			break;
		default:
			aprint_normal("impossible");
			panic("jmide: wrong/uninitialised channel type");
		}
		aprint_normal("\n");
		if (jmidesc->sc_chan_type[channel] == TYPE_NONE) {
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
Example #3
0
static void
sis_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	if (interface == 0) {
		ATADEBUG_PRINT(("sis_sata_chip_map interface == 0\n"),
		    DEBUG_PROBE);
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "Silicon Integrated Systems 180/96X SATA controller "
	    "(rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
Example #4
0
static void
cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc,
    int channel)
{
	struct pciide_channel *cp = &sc->pciide_channels[channel];
	bus_size_t cmdsize, ctlsize;
	int interface, i, reg;
	static const u_int8_t init_val[] =
	    {             0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
	      0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };

	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
		interface = PCIIDE_INTERFACE_SETTABLE(0) |
		    PCIIDE_INTERFACE_SETTABLE(1);
		interface |= PCIIDE_INTERFACE_PCI(0) |
		    PCIIDE_INTERFACE_PCI(1);
	} else {
		interface = PCI_INTERFACE(pa->pa_class);
	}

	sc->wdc_chanarray[channel] = &cp->wdc_channel;
	cp->name = PCIIDE_CHANNEL_NAME(channel);
	cp->wdc_channel.ch_channel = channel;
	cp->wdc_channel.ch_wdc = &sc->sc_wdcdev;

	cp->wdc_channel.ch_queue =
	    malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
	if (cp->wdc_channel.ch_queue == NULL) {
		aprint_error("%s %s channel: "
		    "can't allocate memory for command queue",
		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
		    return;
	}

	/* XXX */
	reg = 0xa2 + channel * 16;
	for (i = 0; i < sizeof(init_val); i++)
		pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);

	aprint_normal("%s: %s channel %s to %s mode\n",
	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
	    "configured" : "wired",
	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
	    "native-PCI" : "compatibility");

	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
}
static void
via_mapchan(const struct pci_attach_args *pa,	struct pciide_channel *cp,
    pcireg_t interface, int (*pci_intr)(void *))
{
	struct ata_channel *wdc_cp;
	struct pciide_softc *sc;
	prop_bool_t compat_nat_enable;

	wdc_cp = &cp->ata_channel;
	sc = CHAN_TO_PCIIDE(&cp->ata_channel);
	compat_nat_enable = prop_dictionary_get(
	    device_properties(sc->sc_wdcdev.sc_atac.atac_dev),
	      "use-compat-native-irq");

	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) {
		/* native mode with irq 14/15 requested? */
		if (compat_nat_enable != NULL &&
		    prop_bool_true(compat_nat_enable))
			via_mapregs_compat_native(pa, cp);
		else
			pciide_mapregs_native(pa, cp, pci_intr);
	} else {
		pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel);
		if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
			pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel);
	}
	wdcattach(wdc_cp);
}
Example #6
0
static void
via_sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	int channel;
	bus_size_t cmdsize, ctlsize;

	if (pciide_chipen(sc, pa) == 0)
		return;

	if (interface == 0) {
		WDCDEBUG_PRINT(("via_sata_chip_map interface == 0\n"),
		    DEBUG_PROBE);
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	aprint_normal("%s: bus-master DMA support present",
	    sc->sc_wdcdev.sc_dev.dv_xname);
	pciide_mapreg_dma(sc, pa);
	aprint_normal("\n");

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | WDC_CAPABILITY_DMA |
		    WDC_CAPABILITY_IRQACK;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.PIO_cap = 4;
	sc->sc_wdcdev.DMA_cap = 2;
	sc->sc_wdcdev.UDMA_cap = 6;

	sc->sc_wdcdev.channels = sc->wdc_chanarray;
	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
	    WDC_CAPABILITY_MODE;
	sc->sc_wdcdev.set_modes = sata_setup_channel;

	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
		    pciide_pci_intr);
	}
}
static void
via_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	pcireg_t vendor = PCI_VENDOR(pa->pa_id);
	int channel;
	u_int32_t ideconf;
	pcireg_t pcib_id, pcib_class;
	struct pci_attach_args pcib_pa;

	if (pciide_chipen(sc, pa) == 0)
		return;

	switch (vendor) {
	case PCI_VENDOR_VIATECH:
		switch (PCI_PRODUCT(pa->pa_id)) {
		case PCI_PRODUCT_VIATECH_VT6410_RAID:
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies VT6410 IDE controller\n");
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
			    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
			break;
		case PCI_PRODUCT_VIATECH_VX900_IDE:
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies VX900 ATA133 controller\n");
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		default:
			/*
			 * get a PCI tag for the ISA bridge.
			 */
			if (pci_find_device(&pcib_pa, via_pcib_match) == 0)
				goto unknown;
			pcib_id = pcib_pa.pa_id;
			pcib_class = pcib_pa.pa_class;
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies ");
			switch (PCI_PRODUCT(pcib_id)) {
			case PCI_PRODUCT_VIATECH_VT82C586_ISA:
				aprint_normal("VT82C586 (Apollo VP) ");
				if(PCI_REVISION(pcib_class) >= 0x02) {
					aprint_normal("ATA33 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
				} else {
					aprint_normal("controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT82C596A:
				aprint_normal("VT82C596A (Apollo Pro) ");
				if (PCI_REVISION(pcib_class) >= 0x12) {
					aprint_normal("ATA66 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
				} else {
					aprint_normal("ATA33 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
				aprint_normal("VT82C686A (Apollo KX133) ");
				if (PCI_REVISION(pcib_class) >= 0x40) {
					aprint_normal("ATA100 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				} else {
					aprint_normal("ATA66 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT8231:
				aprint_normal("VT8231 ATA100 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				break;
			case PCI_PRODUCT_VIATECH_VT8233:
				aprint_normal("VT8233 ATA100 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				break;
			case PCI_PRODUCT_VIATECH_VT8233A:
				aprint_normal("VT8233A ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8235:
				aprint_normal("VT8235 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8237:
				aprint_normal("VT8237 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8237A_ISA:
				aprint_normal("VT8237A ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_CX700:
				aprint_normal("CX700 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8251:
				aprint_normal("VT8251 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			default:
		unknown:
				aprint_normal("unknown VIA ATA controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
			}
			break;
		}
		sc->sc_apo_regbase = APO_VIA_REGBASE;
		break;
	case PCI_VENDOR_AMD:
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_AMD_PBC8111_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		case PCI_PRODUCT_AMD_CS5536_IDE:
		case PCI_PRODUCT_AMD_PBC766_IDE:
		case PCI_PRODUCT_AMD_PBC768_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			break;
		default:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
		}
		sc->sc_apo_regbase = APO_AMD_REGBASE;
		break;
	case PCI_VENDOR_NVIDIA:
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			break;
		case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE2_400_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE3_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE3_250_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE4_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE430_ATA133:
		case PCI_PRODUCT_NVIDIA_MCP04_IDE:
		case PCI_PRODUCT_NVIDIA_MCP55_IDE:
		case PCI_PRODUCT_NVIDIA_MCP61_IDE:
		case PCI_PRODUCT_NVIDIA_MCP65_IDE:
		case PCI_PRODUCT_NVIDIA_MCP67_IDE:
		case PCI_PRODUCT_NVIDIA_MCP73_IDE:
		case PCI_PRODUCT_NVIDIA_MCP77_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		}
		sc->sc_apo_regbase = APO_NVIDIA_REGBASE;
		break;
	default:
		panic("via_chip_map: unknown vendor");
	}

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 0)
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = via_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID;

	wdc_allocate_regs(&sc->sc_wdcdev);

	ATADEBUG_PRINT(("via_chip_map: old APO_IDECONF=0x%x, "
	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))),
	    DEBUG_PROBE);

	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc));
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		via_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
Example #8
0
static void
cy693_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
    struct pciide_channel *cp;
    pcireg_t interface = PCI_INTERFACE(pa->pa_class);

    if (pciide_chipen(sc, pa) == 0)
        return;

    /*
     * this chip has 2 PCI IDE functions, one for primary and one for
     * secondary. So we need to call pciide_mapregs_compat() with
     * the real channel
     */
    if (pa->pa_function == 1) {
        sc->sc_cy_compatchan = 0;
    } else if (pa->pa_function == 2) {
        sc->sc_cy_compatchan = 1;
    } else {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unexpected PCI function %d\n", pa->pa_function);
        return;
    }
    if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
        aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                           "bus-master DMA support present\n");
        pciide_mapreg_dma(sc, pa);
    } else {
        aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                          "hardware does not support DMA\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
    if (sc->sc_cy_handle == NULL) {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unable to map hyperCache control registers\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
    if (sc->sc_dma_ok) {
        sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
        sc->sc_wdcdev.irqack = pciide_irqack;
    }
    sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
    sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
    sc->sc_wdcdev.sc_atac.atac_set_modes = cy693_setup_channel;

    sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
    sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
    sc->sc_wdcdev.wdc_maxdrives = 2;

    wdc_allocate_regs(&sc->sc_wdcdev);

    /* Only one channel for this chip; if we are here it's enabled */
    cp = &sc->pciide_channels[0];
    sc->wdc_chanarray[0] = &cp->ata_channel;
    cp->name = PCIIDE_CHANNEL_NAME(0);
    cp->ata_channel.ch_channel = 0;
    cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
    cp->ata_channel.ch_queue =
        malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
    if (cp->ata_channel.ch_queue == NULL) {
        aprint_error("%s primary channel: "
                     "can't allocate memory for command queue",
                     device_xname(sc->sc_wdcdev.sc_atac.atac_dev));
        return;
    }
    aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                      "primary channel %s to ",
                      (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
                      "configured" : "wired");
    if (interface & PCIIDE_INTERFACE_PCI(0)) {
        aprint_normal("native-PCI mode\n");
        pciide_mapregs_native(pa, cp, pciide_pci_intr);
    } else {
        aprint_normal("compatibility mode\n");
        pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan);
        if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
            pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan);
    }
    wdcattach(&cp->ata_channel);
}
Example #9
0
static void
cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc,
    int channel)
{
	struct pciide_channel *cp = &sc->pciide_channels[channel];
	bus_size_t cmdsize, ctlsize;
	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
	int interface, one_channel;

	/* 
	 * The 0648/0649 can be told to identify as a RAID controller.
	 * In this case, we have to fake interface
	 */
	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
		interface = PCIIDE_INTERFACE_SETTABLE(0) |
		    PCIIDE_INTERFACE_SETTABLE(1);
		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
		    CMD_CONF_DSA1)
			interface |= PCIIDE_INTERFACE_PCI(0) |
			    PCIIDE_INTERFACE_PCI(1);
	} else {
		interface = PCI_INTERFACE(pa->pa_class);
	}

	sc->wdc_chanarray[channel] = &cp->wdc_channel;
	cp->name = PCIIDE_CHANNEL_NAME(channel);
	cp->wdc_channel.ch_channel = channel;
	cp->wdc_channel.ch_wdc = &sc->sc_wdcdev;

	/*
	 * Older CMD64X doesn't have independant channels
	 */
	switch (sc->sc_pp->ide_product) {
	case PCI_PRODUCT_CMDTECH_649:
		one_channel = 0;
		break;
	default:
		one_channel = 1;
		break;
	}

	if (channel > 0 && one_channel) {
		cp->wdc_channel.ch_queue =
		    sc->pciide_channels[0].wdc_channel.ch_queue;
	} else {
		cp->wdc_channel.ch_queue =
		    malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
	}
	if (cp->wdc_channel.ch_queue == NULL) {
		aprint_error("%s %s channel: "
		    "can't allocate memory for command queue",
		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
		    return;
	}

	aprint_normal("%s: %s channel %s to %s mode\n",
	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
	    "configured" : "wired",
	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
	    "native-PCI" : "compatibility");

	/*
	 * with a CMD PCI64x, if we get here, the first channel is enabled:
	 * there's no way to disable the first channel without disabling
	 * the whole device
	 */
	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
		aprint_normal("%s: %s channel ignored (disabled)\n",
		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
		cp->wdc_channel.ch_flags |= WDCF_DISABLED;
		return;
	}

	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
}
Example #10
0
static void
acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	pcireg_t cr, interface;
	pcireg_t rev = PCI_REVISION(pa->pa_class);
	struct aceride_softc *acer_sc = (struct aceride_softc *)sc;

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		if (rev >= 0x20) {
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
			if (rev >= 0xC7)
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			else if (rev >= 0xC4)
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			else if (rev >= 0xC2)
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
			else
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
		}
		sc->sc_wdcdev.irqack = pciide_irqack;
		if (rev <= 0xc4) {
			sc->sc_wdcdev.dma_init = acer_dma_init;
			aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			 "using PIO transfers above 137GB as workaround for "
			 "48bit DMA access bug, expect reduced performance\n");
		}
	}

	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);

	/* Enable "microsoft register bits" R/W. */
	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
	    ~ACER_CHANSTATUSREGS_RO);
	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
	
	{
		/*
		 * some BIOSes (port-cats ABLE) enable native mode, but don't
		 * setup everything correctly, so allow the forcing of
		 * compat mode
		 */
		bool force_compat_mode;
		bool property_is_set;
		property_is_set = prop_dictionary_get_bool(
				device_properties(sc->sc_wdcdev.sc_atac.atac_dev),
				"ali1543-ide-force-compat-mode",
				&force_compat_mode);
		if (property_is_set && force_compat_mode) {
			cr &= ~((PCIIDE_INTERFACE_PCI(0)
				| PCIIDE_INTERFACE_PCI(1))
				<< PCI_INTERFACE_SHIFT);
		}
	}

	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
	/* Don't use cr, re-read the real register content instead */
	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
	    PCI_CLASS_REG));

	/* From linux: enable "Cable Detection" */
	if (rev >= 0xC2) {
		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
		    | ACER_0x4B_CDETECT);
	}

	wdc_allocate_regs(&sc->sc_wdcdev);
	if (rev == 0xC3) {
		/* install reset bug workaround */
		if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "WARNING: can't find pci-isa bridge\n");
		} else
			sc->sc_wdcdev.reset = acer_do_reset;
	}

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		/* newer controllers seems to lack the ACER_CHIDS. Sigh */
		pciide_mapchan(pa, cp, interface,
		     (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
	}
}
Example #11
0
static void
sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	bus_size_t cmdsize, ctlsize;
	pcireg_t interface, scs_cmd, cfgctl;
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

#define	SII3112_RESET_BITS						\
	(SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET |			\
	 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET |			\
	 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET)

	/*
	 * Reset everything and then unblock all of the interrupts.
	 */
	scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD);
	pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD,
		       scs_cmd | SII3112_RESET_BITS);
	delay(50 * 1000);
	pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD,
		       scs_cmd & SCS_CMD_BA5_EN);
	delay(50 * 1000);

	if (scs_cmd & SCS_CMD_BA5_EN) {
		aprint_verbose("%s: SATALink BA5 register space enabled\n",
		    sc->sc_wdcdev.sc_dev.dv_xname);
		if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14,
				   PCI_MAPREG_TYPE_MEM|
				   PCI_MAPREG_MEM_TYPE_32BIT, 0,
				   &sc->sc_ba5_st, &sc->sc_ba5_sh,
				   NULL, NULL) != 0)
			aprint_error("%s: unable to map SATALink BA5 "
			    "register space\n", sc->sc_wdcdev.sc_dev.dv_xname);
		else
			sc->sc_ba5_en = 1;
	} else {
		aprint_verbose("%s: SATALink BA5 register space disabled\n",
		    sc->sc_wdcdev.sc_dev.dv_xname);

		cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag,
				       SII3112_PCI_CFGCTL);
		pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL,
			       cfgctl | CFGCTL_BA5INDEN);
	}

	aprint_normal("%s: bus-master DMA support present",
	    sc->sc_wdcdev.sc_dev.dv_xname);
	pciide_mapreg_dma(sc, pa);
	aprint_normal("\n");

	/*
	 * Rev. <= 0x01 of the 3112 have a bug that can cause data
	 * corruption if DMA transfers cross an 8K boundary.  This is
	 * apparently hard to tickle, but we'll go ahead and play it
	 * safe.
	 */
	if (PCI_REVISION(pa->pa_class) <= 0x01) {
		sc->sc_dma_maxsegsz = 8192;
		sc->sc_dma_boundary = 8192;
	}

	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
	    WDC_CAPABILITY_MODE;
	sc->sc_wdcdev.PIO_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.DMA_cap = 2;
		sc->sc_wdcdev.UDMA_cap = 6;
	}
	sc->sc_wdcdev.set_modes = sii3112_setup_channel;

	/* We can use SControl and SStatus to probe for drives. */
	sc->sc_wdcdev.drv_probe = sii3112_drv_probe;

	sc->sc_wdcdev.channels = sc->wdc_chanarray;
	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;

	/* 
	 * The 3112 either identifies itself as a RAID storage device
	 * or a Misc storage device.  Fake up the interface bits for
	 * what our driver expects.
	 */
	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
		interface = PCI_INTERFACE(pa->pa_class);
	} else {
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
		    pciide_pci_intr);
	}
}
static void
ite_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	pcireg_t interface;
	pcireg_t cfg, modectl;

	/* fake interface since IT8212 claims to be a RAID device */
	interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
	    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);

	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK,
	    modectl & IT_MODE_MASK), DEBUG_PROBE);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_set_modes = ite_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	/* Disable RAID */
	modectl &= ~IT_MODE_RAID1;
	/* Disable CPU firmware mode */
	modectl &= ~IT_MODE_CPU;

	pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) {
		cp = &sc->pciide_channels[channel];

		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
	/* Re-read configuration registers after channels setup */
	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK,
	    modectl & IT_MODE_MASK), DEBUG_PROBE);
}