Esempio n. 1
0
static void
wdc_upc_attach(struct device *parent, struct device *self, void *aux)
{
	struct wdc_upc_softc *sc = (struct wdc_upc_softc *)self;
	struct upc_attach_args *ua = aux;

	sc->sc_wdc.cap = WDC_CAPABILITY_DATA16;
	sc->sc_wdc.PIO_cap = 1; /* XXX ??? */
	sc->sc_wdc.DMA_cap = 0;
	sc->sc_wdc.UDMA_cap = 0;
	sc->sc_wdc.nchannels = 1;
	sc->sc_chanptr = &sc->sc_channel;
	sc->sc_wdc.channels = &sc->sc_chanptr;
	sc->sc_channel.cmd_iot = ua->ua_iot;
	sc->sc_channel.cmd_ioh = ua->ua_ioh;
	sc->sc_channel.ctl_iot = ua->ua_iot;
	sc->sc_channel.ctl_ioh = ua->ua_ioh2;
	sc->sc_channel.channel = 0;
	sc->sc_channel.wdc = &sc->sc_wdc;
	sc->sc_channel.ch_queue = malloc(sizeof(struct channel_queue),
	    M_DEVBUF, M_NOWAIT);
	if (sc->sc_channel.ch_queue == NULL) {
		printf("%s: can't allocate memory for command queue\n",
		sc->sc_wdc.sc_dev.dv_xname);
		return;
	}
	printf("\n");
	wdcattach(&sc->sc_channel);

	upc_intr_establish(ua->ua_irqhandle, IPL_BIO, wdcintr,
			   &sc->sc_channel);
}
Esempio n. 2
0
void
wdc_spd_attach(device_t parent, device_t self, void *aux)
{
	struct spd_attach_args *spa = aux;
	struct wdc_spd_softc *sc = device_private(self);
	struct wdc_softc *wdc = &sc->sc_wdcdev;
	struct ata_channel *ch = &sc->sc_channel;

	aprint_normal(": %s\n", spa->spa_product_name);

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_wdcdev.regs = &sc->sc_wdc_regs;

	wdc->sc_atac.atac_cap =
	    ATAC_CAP_DMA | ATAC_CAP_UDMA | ATAC_CAP_DATA16;
	wdc->sc_atac.atac_pio_cap = 0;
	sc->sc_chanlist[0] = &sc->sc_channel;
	wdc->sc_atac.atac_channels = sc->sc_chanlist;
	wdc->sc_atac.atac_nchannels = 1;
	ch->ch_channel = 0;
	ch->ch_atac = &sc->sc_wdcdev.sc_atac;
	ch->ch_queue = &sc->sc_chqueue;
	ch->ch_ndrive = 2;

	__wdc_spd_bus_space(ch);

	spd_intr_establish(SPD_HDD, wdcintr, &sc->sc_channel);

	__wdc_spd_enable();

	wdcattach(&sc->sc_channel);
}
Esempio n. 3
0
void
wdc_isa_attach(struct device *parent, struct device *self, void *aux)
{
	struct wdc_isa_softc *sc = (void *)self;
	struct isa_attach_args *ia = aux;

	printf("\n");

	sc->wdc_channel.cmd_iot = ia->ia_iot;
	sc->wdc_channel.ctl_iot = ia->ia_iot;
	sc->sc_ic = ia->ia_ic;
	sc->sc_isa = parent;

	if (bus_space_map(sc->wdc_channel.cmd_iot, ia->ia_iobase,
	    WDC_ISA_REG_NPORTS, 0, &sc->wdc_channel.cmd_ioh) ||
	    bus_space_map(sc->wdc_channel.ctl_iot,
	      ia->ia_iobase + WDC_ISA_AUXREG_OFFSET, WDC_ISA_AUXREG_NPORTS,
	      0, &sc->wdc_channel.ctl_ioh)) {
		printf("%s: couldn't map registers\n",
		    sc->sc_wdcdev.sc_dev.dv_xname);
	}
	sc->wdc_channel.data32iot = sc->wdc_channel.cmd_iot;
	sc->wdc_channel.data32ioh = sc->wdc_channel.cmd_ioh;

	sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq, IST_EDGE,
	    IPL_BIO, wdcintr, &sc->wdc_channel, sc->sc_wdcdev.sc_dev.dv_xname);

	if (ia->ia_drq != DRQUNK) {
#if NISADMA > 0
		sc->sc_drq = ia->ia_drq;

		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
		sc->sc_wdcdev.dma_arg = sc;
		sc->sc_wdcdev.dma_init = wdc_isa_dma_init;
		sc->sc_wdcdev.dma_start = wdc_isa_dma_start;
		sc->sc_wdcdev.dma_finish = wdc_isa_dma_finish;
		wdc_isa_dma_setup(sc);
#else	/* NISADMA > 0 */
		printf("%s: ignoring drq, isa dma not supported",
		    sc->sc_wdcdev.sc_dev.dv_xname);
#endif	/* NISADMA > 0 */
	}
	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_PREATA;
	if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & WDC_OPTIONS_32)
		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA32;
	sc->sc_wdcdev.PIO_cap = 0;
	sc->wdc_chanptr = &sc->wdc_channel;
	sc->sc_wdcdev.channels = &sc->wdc_chanptr;
	sc->sc_wdcdev.nchannels = 1;
	sc->wdc_channel.channel = 0;
	sc->wdc_channel.wdc = &sc->sc_wdcdev;
	sc->wdc_channel.ch_queue = wdc_alloc_queue();
	if (sc->wdc_channel.ch_queue == NULL) {
		printf("%s: cannot allocate channel queue",
		    sc->sc_wdcdev.sc_dev.dv_xname);
		return;
	}
	wdcattach(&sc->wdc_channel);
	wdc_print_current_modes(&sc->wdc_channel);
}
static void
via_mapchan(const struct pci_attach_args *pa,	struct pciide_channel *cp,
    pcireg_t interface, int (*pci_intr)(void *))
{
	struct ata_channel *wdc_cp;
	struct pciide_softc *sc;
	prop_bool_t compat_nat_enable;

	wdc_cp = &cp->ata_channel;
	sc = CHAN_TO_PCIIDE(&cp->ata_channel);
	compat_nat_enable = prop_dictionary_get(
	    device_properties(sc->sc_wdcdev.sc_atac.atac_dev),
	      "use-compat-native-irq");

	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->ch_channel)) {
		/* native mode with irq 14/15 requested? */
		if (compat_nat_enable != NULL &&
		    prop_bool_true(compat_nat_enable))
			via_mapregs_compat_native(pa, cp);
		else
			pciide_mapregs_native(pa, cp, pci_intr);
	} else {
		pciide_mapregs_compat(pa, cp, wdc_cp->ch_channel);
		if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
			pciide_map_compat_intr(pa, cp, wdc_cp->ch_channel);
	}
	wdcattach(wdc_cp);
}
Esempio n. 5
0
static void
wdc_pnpbus_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_pnpbus_softc *sc = device_private(self);
	struct wdc_regs *wdr;
	struct pnpbus_dev_attach_args *pna = aux;
	int cmd_iobase, cmd_len, aux_iobase, aux_len, i;

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs;

	wdr->cmd_iot = pna->pna_iot;
	wdr->ctl_iot = pna->pna_iot;
	pnpbus_getioport(&pna->pna_res, 0, &cmd_iobase, &cmd_len);
	pnpbus_getioport(&pna->pna_res, 1, &aux_iobase, &aux_len);

	if (pnpbus_io_map(&pna->pna_res, 0, &wdr->cmd_iot, &wdr->cmd_baseioh) ||
	    pnpbus_io_map(&pna->pna_res, 1, &wdr->ctl_iot, &wdr->ctl_ioh)) {
		aprint_error_dev(self, "couldn't map registers\n");
	}

	for (i = 0; i < cmd_len; i++) {
		if (bus_space_subregion(wdr->cmd_iot,
		      wdr->cmd_baseioh, i, i == 0 ? 4 : 1,
		      &wdr->cmd_iohs[i]) != 0) {
			aprint_error(": couldn't subregion registers\n");
			return;
		}
	}

	wdr->data32iot = wdr->cmd_iot;
	wdr->data32ioh = wdr->cmd_iohs[0];

	sc->sc_wdcdev.cap |= WDC_CAPABILITY_PREATA;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
	if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags &
	    WDC_OPTIONS_32)
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA32;

	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_chanlist[0] = &sc->sc_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanlist;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_channel.ch_channel = 0;
	sc->sc_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	sc->sc_channel.ch_queue = &sc->sc_chqueue;
	sc->sc_channel.ch_ndrive = 2;
	wdc_init_shadow_regs(&sc->sc_channel);

	sc->sc_ih = pnpbus_intr_establish(0, IPL_BIO, IST_PNP,
	    wdcintr, &sc->sc_channel, &pna->pna_res);

	aprint_normal("\n");
	wdcattach(&sc->sc_channel);
}
Esempio n. 6
0
static void
wdc_obio_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_obio_softc *sc = device_private(self);
	struct obio_attach_args *oa = aux;
	struct wdc_regs *wdr;
	int i;

	aprint_naive("\n");
	aprint_normal("\n");

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs;

	wdr->cmd_iot = oa->oa_iot;
	wdr->ctl_iot = oa->oa_iot;
	if (bus_space_map(wdr->cmd_iot, oa->oa_io[0].or_addr,
	    WDC_OBIO_REG_SIZE, 0, &wdr->cmd_baseioh)
	 || bus_space_map(wdr->ctl_iot,
	    oa->oa_io[0].or_addr + WDC_OBIO_AUXREG_OFFSET,
	    WDC_OBIO_AUXREG_SIZE, 0, &wdr->ctl_ioh)) {
		aprint_error_dev(self, "couldn't map registers\n");
		return;
	}

	for (i = 0; i < WDC_OBIO_REG_NPORTS; i++) {
		if (bus_space_subregion(wdr->cmd_iot,
		      wdr->cmd_baseioh, i * 2, (i == 0) ? 2 : 1,
		      &wdr->cmd_iohs[i]) != 0) {
			aprint_error_dev(self,
			    "couldn't subregion registers\n");
			return;
		}
	}

	sc->sc_ih = obio_intr_establish(oa->oa_irq[0].or_irq, IPL_BIO, wdcintr,
	    &sc->sc_channel);

	sc->sc_wdcdev.cap |= WDC_CAPABILITY_PREATA;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_chanlist[0] = &sc->sc_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanlist;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_channel.ch_channel = 0;
	sc->sc_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	sc->sc_channel.ch_queue = &sc->sc_chqueue;
	sc->sc_channel.ch_ndrive = 2;

	wdc_init_shadow_regs(&sc->sc_channel);

	wdcattach(&sc->sc_channel);
}
Esempio n. 7
0
static void
sii3114_mapchan(struct pciide_channel *cp)
{
	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.ch_wdc;
	struct wdc_channel *wdc_cp = &cp->wdc_channel;
	int i;

	cp->compat = 0;
	cp->ih = sc->sc_pci_ih;

	wdc_cp->cmd_iot = sc->sc_ba5_st;
	if (bus_space_subregion(sc->sc_ba5_st, sc->sc_ba5_sh,
			satalink_ba5_regmap[wdc_cp->ch_channel].ba5_IDE_TF0,
			9, &wdc_cp->cmd_baseioh) != 0) {
		aprint_error("%s: couldn't subregion %s cmd base\n",
		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
		goto bad;
	}

	wdc_cp->ctl_iot = sc->sc_ba5_st;
	if (bus_space_subregion(sc->sc_ba5_st, sc->sc_ba5_sh,
			satalink_ba5_regmap[wdc_cp->ch_channel].ba5_IDE_TF8,
			1, &cp->ctl_baseioh) != 0) {
		aprint_error("%s: couldn't subregion %s ctl base\n",
		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
		goto bad;
	}
	wdc_cp->ctl_ioh = cp->ctl_baseioh;

	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(wdc_cp->cmd_iot, wdc_cp->cmd_baseioh,
					i, i == 0 ? 4 : 1,
					&wdc_cp->cmd_iohs[i]) != 0) {
			aprint_error("%s: couldn't subregion %s channel "
				     "cmd regs\n",
			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
			goto bad;
		}
	}
	wdc_cp->data32iot = wdc_cp->cmd_iot;
	wdc_cp->data32ioh = wdc_cp->cmd_iohs[0];
	wdcattach(wdc_cp);
	return;

 bad:
	cp->wdc_channel.ch_flags |= WDCF_DISABLED;
}
Esempio n. 8
0
void
wdc_obio_attach(struct device *parent, struct device *self, void *aux)
{
	struct wdc_obio_softc *sc = (void *)self;
	struct obio_attach_args *oa = aux;
	struct channel_softc *chp = &sc->sc_channel;

	printf("\n");

	chp->cmd_iot = chp->ctl_iot = oa->oa_iot;
	chp->_vtbl = &wdc_obio_vtbl;

	if (bus_space_map(chp->cmd_iot, oa->oa_io[0].or_addr,
	    WDC_OBIO_REG_SIZE, 0, &chp->cmd_ioh)
	 || bus_space_map(chp->ctl_iot,
	    oa->oa_io[0].or_addr + WDC_OBIO_AUXREG_OFFSET,
	    WDC_OBIO_AUXREG_SIZE, 0, &chp->ctl_ioh)) {
		printf(": couldn't map registers\n");
		return;
	}

	sc->sc_ih = obio_intr_establish(oa->oa_irq[0].or_irq, IPL_BIO, wdcintr,
	    chp, self->dv_xname);

	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_PREATA;
	sc->sc_wdcdev.PIO_cap = 0;
	sc->sc_chanptr = chp;
	sc->sc_wdcdev.channels = &sc->sc_chanptr;
	sc->sc_wdcdev.nchannels = 1;
	chp->channel = 0;
	chp->wdc = &sc->sc_wdcdev;

	chp->ch_queue = wdc_alloc_queue();
	if (chp->ch_queue == NULL) {
		printf("%s: cannot allocate channel queue\n",
		    self->dv_xname);
		obio_intr_disestablish(sc->sc_ih);
		return;
	}

	wdcattach(chp);
	wdc_print_current_modes(chp);
}
Esempio n. 9
0
static void
wdc_upc_attach(device_t parent, device_t self, void *aux)
{
    struct wdc_upc_softc *sc = device_private(self);
    struct wdc_regs *wdr;
    struct upc_attach_args *ua = aux;
    int i;

    sc->sc_wdc.sc_atac.atac_dev = self;
    sc->sc_wdc.regs = wdr = &sc->sc_wdc_regs;

    sc->sc_wdc.sc_atac.atac_cap = ATAC_CAP_DATA16;
    sc->sc_wdc.sc_atac.atac_pio_cap = 1; /* XXX ??? */
    sc->sc_wdc.sc_atac.atac_nchannels = 1;
    sc->sc_chanlist[0] = &sc->sc_channel;
    sc->sc_wdc.sc_atac.atac_channels = sc->sc_chanlist;
    wdr->cmd_iot = ua->ua_iot;
    wdr->cmd_baseioh = ua->ua_ioh;
    wdr->ctl_iot = ua->ua_iot;
    wdr->ctl_ioh = ua->ua_ioh2;
    sc->sc_channel.ch_channel = 0;
    sc->sc_channel.ch_atac = &sc->sc_wdc.sc_atac;
    sc->sc_channel.ch_queue = &sc->sc_chqueue;
    sc->sc_channel.ch_ndrive = 2;
    for (i = 0; i < WDC_NREG; i++) {
        if (bus_space_subregion(ua->ua_iot, ua->ua_ioh, i,
                                i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
            aprint_error_dev(sc->sc_wdc.sc_atac.atac_dev,
                             "can't subregion I/O space\n");
            return;
        }
    }
    wdc_init_shadow_regs(&sc->sc_channel);

    upc_intr_establish(ua->ua_irqhandle, IPL_BIO, wdcintr,
                       &sc->sc_channel);

    aprint_normal("\n");
    aprint_naive("\n");

    wdcattach(&sc->sc_channel);
}
void
wdc_amiga_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_amiga_softc *sc = device_private(self);
	struct wdc_regs *wdr;
	int i;

	aprint_normal("\n");

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs;

	gayle_init();

	if (is_a4000()) {
		sc->cmd_iot.base = (bus_addr_t) ztwomap(GAYLE_IDE_BASE_A4000 + 2);
	} else {
		sc->cmd_iot.base = (bus_addr_t) ztwomap(GAYLE_IDE_BASE + 2);
	}

	sc->cmd_iot.absm = sc->ctl_iot.absm = &amiga_bus_stride_4swap;
	wdr->cmd_iot = &sc->cmd_iot;
	wdr->ctl_iot = &sc->ctl_iot;

	if (bus_space_map(wdr->cmd_iot, 0, 0x40, 0,
			  &wdr->cmd_baseioh)) {
		aprint_error_dev(self, "couldn't map registers\n");
		return;
	}

	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(wdr->cmd_iot,
		    wdr->cmd_baseioh, i, i == 0 ? 4 : 1,
		    &wdr->cmd_iohs[i]) != 0) {

			bus_space_unmap(wdr->cmd_iot,
			    wdr->cmd_baseioh, 0x40);
			aprint_error_dev(self, "couldn't map registers\n");
			return;
		}
	}

	if (bus_space_subregion(wdr->cmd_iot,
	    wdr->cmd_baseioh, 0x406, 1, &wdr->ctl_ioh))
		return;

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_chanlist[0] = &sc->sc_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanlist;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	sc->sc_channel.ch_channel = 0;
	sc->sc_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	sc->sc_channel.ch_queue = &sc->sc_chqueue;

	wdc_init_shadow_regs(&sc->sc_channel);

	sc->sc_isr.isr_intr = wdc_amiga_intr;
	sc->sc_isr.isr_arg = sc;
	sc->sc_isr.isr_ipl = 2;
	add_isr (&sc->sc_isr);

	if (!is_a4000())
		gayle_intr_enable_set(GAYLE_INT_IDE);

	wdcattach(&sc->sc_channel);
}
Esempio n. 11
0
static void
via_sata_chip_map_new(struct pciide_softc *sc,
    const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	struct ata_channel *wdc_cp;
	struct wdc_regs *wdr;
	int channel;
	pci_intr_handle_t intrhandle;
	const char *intrstr;
	int i;

	if (pciide_chipen(sc, pa) == 0)
		return;

	sc->sc_apo_regbase = APO_VIA_VT6421_REGBASE;

	if (pci_mapreg_map(pa, PCI_BAR(5), PCI_MAPREG_TYPE_IO, 0,
	    &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, &sc->sc_ba5_ss) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map SATA regs\n");
	}

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	via_vt6421_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
	}
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;
	
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 3;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	if (pci_intr_map(pa, &intrhandle) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map native-PCI interrupt\n");
		return;
	}
	intrstr = pci_intr_string(pa->pa_pc, intrhandle);
	sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
	    intrhandle, IPL_BIO, pciide_pci_intr, sc);
	if (sc->sc_pci_ih == NULL) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't establish native-PCI interrupt");
		if (intrstr != NULL)
		    aprint_error(" at %s", intrstr);
		aprint_error("\n");
		return;
	}
	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "using %s for native-PCI interrupt\n",
	    intrstr ? intrstr : "unknown interrupt");

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (via_vt6421_chansetup(sc, channel) == 0)
			continue;
		wdc_cp = &cp->ata_channel;
		wdr = CHAN_TO_WDC_REGS(wdc_cp);

		wdr->sata_iot = sc->sc_ba5_st;
		wdr->sata_baseioh = sc->sc_ba5_sh;
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << 6) + 0x0, 4,
		    &wdr->sata_status) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_status regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << 6) + 0x4, 4,
		    &wdr->sata_error) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_error regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << 6) + 0x8, 4,
		    &wdr->sata_control) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_control regs\n",
			    wdc_cp->ch_channel);
			continue;
		}

		if (pci_mapreg_map(pa, PCI_BAR(wdc_cp->ch_channel),
		    PCI_MAPREG_TYPE_IO, 0, &wdr->cmd_iot, &wdr->cmd_baseioh,
		    NULL, &wdr->cmd_ios) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map %s channel regs\n", cp->name);
		}
		wdr->ctl_iot = wdr->cmd_iot;
		for (i = 0; i < WDC_NREG; i++) {
			if (bus_space_subregion(wdr->cmd_iot,
			    wdr->cmd_baseioh, i, i == 0 ? 4 : 1,
			    &wdr->cmd_iohs[i]) != 0) {
				aprint_error_dev(
				    sc->sc_wdcdev.sc_atac.atac_dev,
				    "couldn't subregion %s "
				    "channel cmd regs\n", cp->name);
				return;
			}
		}
		if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
		    WDC_NREG + 2, 1,  &wdr->ctl_ioh) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d ctl regs\n", channel);
			return;
		}
		wdc_init_shadow_regs(wdc_cp);
		wdr->data32iot = wdr->cmd_iot;
		wdr->data32ioh = wdr->cmd_iohs[wd_data];
		wdcattach(wdc_cp);
	}
}
Esempio n. 12
0
void
wdc_obio_attach(struct device *parent, struct device *self, void *aux)
{
	struct wdc_obio_softc *sc = (void *)self;
	struct confargs *ca = aux;
	struct channel_softc *chp = &sc->wdc_channel;
	int intr, error;
	bus_addr_t cmdbase;

	sc->sc_use_dma = 0;
	if (ca->ca_nreg >= 16)
		sc->sc_use_dma = 1;	/* Enable dma */

	sc->sc_dmat = ca->ca_dmat;
	if ((error = bus_dmamap_create(sc->sc_dmat,
	    WDC_DMALIST_MAX * DBDMA_COUNT_MAX, WDC_DMALIST_MAX,
	    DBDMA_COUNT_MAX, NBPG, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
		printf(": cannot create dma map, error = %d\n", error);
		return;
	}

	if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
		intr = ca->ca_intr[0];
		printf(" irq %d", intr);
	} else if (ca->ca_nintr == -1) {
		intr = WDC_DEFAULT_PIO_IRQ;
		printf(" irq property not found; using %d", intr);
	} else {
		printf(": couldn't get irq property\n");
		return;
	}

	if (sc->sc_use_dma)
		printf(": DMA");

	printf("\n");

	chp->cmd_iot = chp->ctl_iot = ca->ca_iot;
	chp->_vtbl = &wdc_obio_vtbl;

	cmdbase = ca->ca_reg[0];
	sc->sc_cmdsize = ca->ca_reg[1];

	if (bus_space_map(chp->cmd_iot, cmdbase, sc->sc_cmdsize, 0,
	    &chp->cmd_ioh) || bus_space_subregion(chp->cmd_iot, chp->cmd_ioh,
	    /* WDC_AUXREG_OFFSET<<4 */ 0x160, 1, &chp->ctl_ioh)) {
		printf("%s: couldn't map registers\n",
			sc->sc_wdcdev.sc_dev.dv_xname);
		return;
	}
	chp->data32iot = chp->cmd_iot;
	chp->data32ioh = chp->cmd_ioh;

	sc->sc_ih = mac_intr_establish(parent, intr, IST_LEVEL, IPL_BIO,
	    wdcintr, chp, sc->sc_wdcdev.sc_dev.dv_xname);

	sc->sc_wdcdev.set_modes = wdc_obio_adjust_timing;
	if (sc->sc_use_dma) {
		sc->sc_dbdma = dbdma_alloc(sc->sc_dmat, WDC_DMALIST_MAX + 1);
		sc->sc_dmacmd = sc->sc_dbdma->d_addr;

		sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2],
		    sc->sc_dmasize = ca->ca_reg[3]);

		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
		sc->sc_wdcdev.DMA_cap = 2;
		if (strcmp(ca->ca_name, "ata-4") == 0) {
			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA |
			    WDC_CAPABILITY_MODE;
			sc->sc_wdcdev.UDMA_cap = 4;
			sc->sc_wdcdev.set_modes = wdc_obio_ata4_adjust_timing;
		}
		if (strcmp(ca->ca_name, "ata-6") == 0) {
			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA |
			    WDC_CAPABILITY_MODE;
			sc->sc_wdcdev.UDMA_cap = 5;
			sc->sc_wdcdev.set_modes = wdc_obio_ata6_adjust_timing;
		}
	}
	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
	sc->sc_wdcdev.PIO_cap = 4;
	sc->wdc_chanptr = chp;
	sc->sc_wdcdev.channels = &sc->wdc_chanptr;
	sc->sc_wdcdev.nchannels = 1;
	sc->sc_wdcdev.dma_arg = sc;
	sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
	sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
	sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
	chp->channel = 0;
	chp->wdc = &sc->sc_wdcdev;

	chp->ch_queue = malloc(sizeof(struct channel_queue), M_DEVBUF,
	    M_NOWAIT);
	if (chp->ch_queue == NULL) {
		printf("%s: can't allocate memory for command queue",
		sc->sc_wdcdev.sc_dev.dv_xname);
		return;
	}

	wdcattach(chp);
	sc->sc_wdcdev.set_modes(chp);
	wdc_print_current_modes(chp);
}
Esempio n. 13
0
static void
cy693_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
    struct pciide_channel *cp;
    pcireg_t interface = PCI_INTERFACE(pa->pa_class);

    if (pciide_chipen(sc, pa) == 0)
        return;

    /*
     * this chip has 2 PCI IDE functions, one for primary and one for
     * secondary. So we need to call pciide_mapregs_compat() with
     * the real channel
     */
    if (pa->pa_function == 1) {
        sc->sc_cy_compatchan = 0;
    } else if (pa->pa_function == 2) {
        sc->sc_cy_compatchan = 1;
    } else {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unexpected PCI function %d\n", pa->pa_function);
        return;
    }
    if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
        aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                           "bus-master DMA support present\n");
        pciide_mapreg_dma(sc, pa);
    } else {
        aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                          "hardware does not support DMA\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
    if (sc->sc_cy_handle == NULL) {
        aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                         "unable to map hyperCache control registers\n");
        sc->sc_dma_ok = 0;
    }

    sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
    if (sc->sc_dma_ok) {
        sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
        sc->sc_wdcdev.irqack = pciide_irqack;
    }
    sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
    sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
    sc->sc_wdcdev.sc_atac.atac_set_modes = cy693_setup_channel;

    sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
    sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
    sc->sc_wdcdev.wdc_maxdrives = 2;

    wdc_allocate_regs(&sc->sc_wdcdev);

    /* Only one channel for this chip; if we are here it's enabled */
    cp = &sc->pciide_channels[0];
    sc->wdc_chanarray[0] = &cp->ata_channel;
    cp->name = PCIIDE_CHANNEL_NAME(0);
    cp->ata_channel.ch_channel = 0;
    cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
    cp->ata_channel.ch_queue =
        malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
    if (cp->ata_channel.ch_queue == NULL) {
        aprint_error("%s primary channel: "
                     "can't allocate memory for command queue",
                     device_xname(sc->sc_wdcdev.sc_atac.atac_dev));
        return;
    }
    aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
                      "primary channel %s to ",
                      (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
                      "configured" : "wired");
    if (interface & PCIIDE_INTERFACE_PCI(0)) {
        aprint_normal("native-PCI mode\n");
        pciide_mapregs_native(pa, cp, pciide_pci_intr);
    } else {
        aprint_normal("compatibility mode\n");
        pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan);
        if ((cp->ata_channel.ch_flags & ATACH_DISABLED) == 0)
            pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan);
    }
    wdcattach(&cp->ata_channel);
}
static void
artisea_mapregs(struct pci_attach_args *pa, struct pciide_channel *cp,
    bus_size_t *cmdsizep, bus_size_t *ctlsizep,
    int (*pci_intr)(void *))
{
	struct pciide_softc *sc = CHAN_TO_PCIIDE(&cp->ata_channel);
	struct ata_channel *wdc_cp = &cp->ata_channel;
	struct wdc_regs *wdr = CHAN_TO_WDC_REGS(wdc_cp);
	const char *intrstr;
	pci_intr_handle_t intrhandle;
	int i;

	cp->compat = 0;

	if (sc->sc_pci_ih == NULL) {
		if (pci_intr_map(pa, &intrhandle) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map native-PCI interrupt\n");
			goto bad;
		}
		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
		    intrhandle, IPL_BIO, pci_intr, sc);
		if (sc->sc_pci_ih != NULL) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "using %s for native-PCI interrupt\n",
			    intrstr ? intrstr : "unknown interrupt");
		} else {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't establish native-PCI interrupt");
			if (intrstr != NULL)
				aprint_normal(" at %s", intrstr);
			aprint_normal("\n");
			goto bad;
		}
	}
	cp->ih = sc->sc_pci_ih;
	wdr->cmd_iot = sc->sc_ba5_st;
	if (bus_space_subregion (sc->sc_ba5_st, sc->sc_ba5_sh,
	    ARTISEA_DPA_PORT_BASE(wdc_cp->ch_channel), 0x200,
	    &wdr->cmd_baseioh) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map %s channel cmd regs\n", cp->name);
		goto bad;
	}

	wdr->ctl_iot = sc->sc_ba5_st;
	if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
	    ARTISEA_SUPDDCTLR, 1, &cp->ctl_baseioh) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map %s channel ctl regs\n", cp->name);
		goto bad;
	}
	wdr->ctl_ioh = cp->ctl_baseioh;

	for (i = 0; i < WDC_NREG + 2; i++) {

		if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
		    artisea_dpa_cmd_map[i].offset, artisea_dpa_cmd_map[i].size,
		    &wdr->cmd_iohs[i]) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't subregion %s channel cmd regs\n",
			    cp->name);
			goto bad;
		}
	}
	wdr->data32iot = wdr->cmd_iot;
	wdr->data32ioh = wdr->cmd_iohs[0];

	wdr->sata_iot = wdr->cmd_iot;
	wdr->sata_baseioh = wdr->cmd_baseioh;

	if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
	    ARTISEA_SUPERSET_DPA_OFF + ARTISEA_SUPDSSSR, 1,
	    &wdr->sata_status) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map channel %d sata_status regs\n",
		    wdc_cp->ch_channel);
		goto bad;
	}
	if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
	    ARTISEA_SUPERSET_DPA_OFF + ARTISEA_SUPDSSER, 1,
	    &wdr->sata_error) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map channel %d sata_error regs\n",
		    wdc_cp->ch_channel);
		goto bad;
	}
	if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
	    ARTISEA_SUPERSET_DPA_OFF + ARTISEA_SUPDSSCR, 1,
	    &wdr->sata_control) != 0) {
		aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
		    "couldn't map channel %d sata_control regs\n",
		    wdc_cp->ch_channel);
		goto bad;
	}

	wdcattach(wdc_cp);
	return;

bad:
	wdc_cp->ch_flags |= ATACH_DISABLED;
	return;
}
Esempio n. 15
0
void
rapide_attach(device_t parent, device_t self, void *aux)
{
	struct rapide_softc *sc = device_private(self);
	struct podule_attach_args *pa = (void *)aux;
	bus_space_tag_t iot;
	bus_space_handle_t ctlioh;
	u_int iobase;
	int channel, i;
	struct rapide_channel *rcp;
	struct ata_channel *cp;
	struct wdc_regs *wdr;
	irqhandler_t *ihp;

	/* Note the podule number and validate */
	if (pa->pa_podule_number == -1)
		panic("Podule has disappeared !");

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_podule_number = pa->pa_podule_number;
	sc->sc_podule = pa->pa_podule;
	podules[sc->sc_podule_number].attached = 1;

	sc->sc_wdcdev.regs = sc->sc_wdc_regs;

	set_easi_cycle_type(sc->sc_podule_number, EASI_CYCLE_TYPE_C);

	/*
	 * Duplicate the podule bus space tag and provide alternative
	 * bus_space_read_multi_4() and bus_space_write_multi_4()
	 * functions.
	 */
	rapide_bs_tag = *pa->pa_iot;
	rapide_bs_tag.bs_rm_4 = rapide_bs_rm_4;
	rapide_bs_tag.bs_wm_4 = rapide_bs_wm_4;
	sc->sc_ctliot = iot = &rapide_bs_tag;

	if (bus_space_map(iot, pa->pa_podule->easi_base +
	    CONTROL_REGISTERS_OFFSET, CONTROL_REGISTER_SPACE, 0, &ctlioh))
		panic("%s: Cannot map control registers", device_xname(self));

	sc->sc_ctlioh = ctlioh;
	sc->sc_version = bus_space_read_1(iot, ctlioh, VERSION_REGISTER_OFFSET) & VERSION_REGISTER_MASK;
/*	bus_space_unmap(iot, ctl_ioh, CONTROL_REGISTER_SPACE);*/

	aprint_normal(": Issue %d\n", sc->sc_version + 1);
	if (sc->sc_version != VERSION_2_ID)
		return;

	if (shutdownhook_establish(rapide_shutdown, (void *)sc) == NULL)
		panic("%s: Cannot install shutdown handler", device_xname(self));

	/* Set the interrupt info for this podule */
	sc->sc_podule->irq_addr = pa->pa_podule->easi_base
	    + CONTROL_REGISTERS_OFFSET + IRQ_REQUEST_REGISTER_BYTE_OFFSET;
	sc->sc_podule->irq_mask = IRQ_MASK;

	iobase = pa->pa_podule->easi_base;

	/* Fill in wdc and channel infos */
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 2;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	for (channel = 0 ; channel < 2; channel++) {
		rcp = &sc->rapide_channels[channel];
		sc->sc_chanarray[channel] = &rcp->rc_channel;
		cp = &rcp->rc_channel;
		wdr = &sc->sc_wdc_regs[channel];

		cp->ch_channel = channel;
		cp->ch_atac = &sc->sc_wdcdev.sc_atac;
		cp->ch_queue = &rcp->rc_chqueue;
		wdr->cmd_iot = iot;
		wdr->ctl_iot = iot;
		wdr->data32iot = iot;

		if (bus_space_map(iot, iobase + rapide_info[channel].registers,
		    DRIVE_REGISTERS_SPACE, 0, &wdr->cmd_baseioh))
			continue;
		for (i = 0; i < WDC_NREG; i++) {
			if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
				i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
				bus_space_unmap(iot, wdr->cmd_baseioh,
				    DRIVE_REGISTERS_SPACE);
				continue;
			}
		}
		wdc_init_shadow_regs(cp);
		if (bus_space_map(iot, iobase +
		    rapide_info[channel].aux_register, 4, 0, &wdr->ctl_ioh)) {
			bus_space_unmap(iot, wdr->cmd_baseioh,
			   DRIVE_REGISTERS_SPACE);
			continue;
		}
		if (bus_space_map(iot, iobase +
		    rapide_info[channel].data_register, 4, 0, &wdr->data32ioh)) {
			bus_space_unmap(iot, wdr->cmd_baseioh,
			   DRIVE_REGISTERS_SPACE);
			bus_space_unmap(iot, wdr->ctl_ioh, 4);
			continue;
		}
		/* Disable interrupts and clear any pending interrupts */
		rcp->rc_irqmask = rapide_info[channel].irq_mask;
		sc->sc_intr_enable_mask &= ~rcp->rc_irqmask;
		bus_space_write_1(iot, sc->sc_ctlioh, IRQ_MASK_REGISTER_OFFSET,
		    sc->sc_intr_enable_mask);
		/* XXX - Issue 1 cards will need to clear any pending interrupts */
		ihp = &rcp->rc_ih;
		ihp->ih_func = rapide_intr;
		ihp->ih_arg = rcp;
		ihp->ih_level = IPL_BIO;
		ihp->ih_name = "rapide";
		ihp->ih_maskaddr = pa->pa_podule->irq_addr;
		ihp->ih_maskbits = rcp->rc_irqmask;
		if (irq_claim(sc->sc_podule->interrupt, ihp))
			panic("%s: Cannot claim interrupt %d",
			    device_xname(self), sc->sc_podule->interrupt);
		/* clear any pending interrupts and enable interrupts */
		sc->sc_intr_enable_mask |= rcp->rc_irqmask;
		bus_space_write_1(iot, sc->sc_ctlioh,
		    IRQ_MASK_REGISTER_OFFSET, sc->sc_intr_enable_mask);
		/* XXX - Issue 1 cards will need to clear any pending interrupts */
		wdcattach(cp);
	}
}
Esempio n. 16
0
static void
wdc_isapnp_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_isapnp_softc *sc = device_private(self);
	struct wdc_regs *wdr;
	struct isapnp_attach_args *ipa = aux;
	int i;

	if (ipa->ipa_nio != 2 ||
	    ipa->ipa_nmem != 0 ||
	    ipa->ipa_nmem32 != 0 ||
	    ipa->ipa_nirq != 1 ||
	    ipa->ipa_ndrq > 1) {
		aprint_error(": unexpected configuration\n");
		return;
	}

	if (isapnp_config(ipa->ipa_iot, ipa->ipa_memt, ipa)) {
		aprint_error(": couldn't map registers\n");
		return;
	}

	aprint_normal(": %s %s\n", ipa->ipa_devident, ipa->ipa_devclass);

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_wdcdev.regs = wdr = &sc->wdc_regs;
	wdr->cmd_iot = ipa->ipa_iot;
	wdr->ctl_iot = ipa->ipa_iot;
	/*
	 * An IDE controller can feed us the regions in any order. Pass
	 * them along with the 8-byte region in sc_ad.ioh, and the other
	 * (2 byte) region in auxioh.
	 */
	if (ipa->ipa_io[0].length == 8) {
		wdr->cmd_baseioh = ipa->ipa_io[0].h;
		wdr->ctl_ioh = ipa->ipa_io[1].h;
	} else {
		wdr->cmd_baseioh = ipa->ipa_io[1].h;
		wdr->ctl_ioh = ipa->ipa_io[0].h;
	}

	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(wdr->cmd_iot,
		    wdr->cmd_baseioh, i, i == 0 ? 4 : 1,
		    &wdr->cmd_iohs[i]) != 0) {
			aprint_error(": couldn't subregion registers\n");
			return;
		}
	}
	wdr->data32iot = wdr->cmd_iot;
	wdr->data32ioh = wdr->cmd_iohs[0];

	sc->sc_ic = ipa->ipa_ic;
	sc->sc_ih = isa_intr_establish(ipa->ipa_ic, ipa->ipa_irq[0].num,
	    ipa->ipa_irq[0].type, IPL_BIO, wdcintr, &sc->ata_channel);

#ifdef notyet
	if (ipa->ipa_ndrq > 0) {
		sc->sc_drq = ipa->ipa_drq[0].num;

		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.dma_start = &wdc_isapnp_dma_start;
		sc->sc_wdcdev.dma_finish = &wdc_isapnp_dma_finish;
		wdc_isapnp_dma_setup(sc);
	}
#endif
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->wdc_chanlist[0] = &sc->ata_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanlist;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->ata_channel.ch_channel = 0;
	sc->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	sc->ata_channel.ch_queue = &sc->wdc_chqueue;
	sc->ata_channel.ch_ndrive = 2;

	wdc_init_shadow_regs(&sc->ata_channel);

	wdcattach(&sc->ata_channel);
}
Esempio n. 17
0
void
njata32_attach(struct njata32_softc *sc)
{
	bus_addr_t dmaaddr;
	int i, devno, error;
	struct wdc_regs *wdr;

	/*
	 * allocate DMA resource
	 */
	if ((error = bus_dmamem_alloc(sc->sc_dmat,
	    sizeof(struct njata32_dma_page), PAGE_SIZE, 0,
	    &sc->sc_sgt_seg, 1, &sc->sc_sgt_nsegs, BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: unable to allocate sgt page, error = %d\n",
		    NJATA32NAME(sc), error);
		return;
	}
	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_sgt_seg,
	    sc->sc_sgt_nsegs, sizeof(struct njata32_dma_page),
	    (void **)&sc->sc_sgtpg,
	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
		aprint_error("%s: unable to map sgt page, error = %d\n",
		    NJATA32NAME(sc), error);
		goto fail1;
	}
	if ((error = bus_dmamap_create(sc->sc_dmat,
	    sizeof(struct njata32_dma_page), 1,
	    sizeof(struct njata32_dma_page), 0, BUS_DMA_NOWAIT,
	    &sc->sc_dmamap_sgt)) != 0) {
		aprint_error("%s: unable to create sgt DMA map, error = %d\n",
		    NJATA32NAME(sc), error);
		goto fail2;
	}
	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_sgt,
	    sc->sc_sgtpg, sizeof(struct njata32_dma_page),
	    NULL, BUS_DMA_NOWAIT)) != 0) {
		aprint_error("%s: unable to load sgt DMA map, error = %d\n",
		    NJATA32NAME(sc), error);
		goto fail3;
	}

	dmaaddr = sc->sc_dmamap_sgt->dm_segs[0].ds_addr;

	for (devno = 0; devno < NJATA32_NUM_DEV; devno++) {
		sc->sc_dev[devno].d_sgt = sc->sc_sgtpg->dp_sg[devno];
		sc->sc_dev[devno].d_sgt_dma = dmaaddr +
		    offsetof(struct njata32_dma_page, dp_sg[devno]);

		error = bus_dmamap_create(sc->sc_dmat,
		    NJATA32_MAX_XFER,		/* max total map size */
		    NJATA32_NUM_SG,		/* max number of segments */
		    NJATA32_SGT_MAXSEGLEN,	/* max size of a segment */
		    0,				/* boundary */
		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
		    &sc->sc_dev[devno].d_dmamap_xfer);
		if (error) {
			aprint_error("%s: failed to create DMA map "
			    "(error = %d)\n", NJATA32NAME(sc), error);
			goto fail4;
		}
	}

	/* device properties */
	sc->sc_wdcdev.sc_atac.atac_cap =
	    ATAC_CAP_DATA16 | ATAC_CAP_DATA32 | ATAC_CAP_PIOBM;
	sc->sc_wdcdev.irqack = njata32_irqack;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = NJATA32_NCHAN;	/* 1 */
	sc->sc_wdcdev.sc_atac.atac_pio_cap = NJATA32_MODE_MAX_PIO;
#if 0	/* ATA DMA is currently unused */
	sc->sc_wdcdev.sc_atac.atac_dma_cap = NJATA32_MODE_MAX_DMA;
#endif
	sc->sc_wdcdev.sc_atac.atac_set_modes = njata32_setup_channel;

	/* DMA control functions */
	sc->sc_wdcdev.dma_arg = sc;
	sc->sc_wdcdev.dma_init = njata32_dma_init;
	sc->sc_wdcdev.piobm_start = njata32_piobm_start;
	sc->sc_wdcdev.dma_finish = njata32_dma_finish;
	sc->sc_wdcdev.piobm_done = njata32_piobm_done;

	sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_EXTRA_RESETS;

	sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs;

	/* only one channel */
	sc->sc_wdc_chanarray[0] = &sc->sc_ch[0].ch_ata_channel;
	sc->sc_ch[0].ch_ata_channel.ch_channel = 0;
	sc->sc_ch[0].ch_ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	sc->sc_ch[0].ch_ata_channel.ch_queue = &sc->sc_wdc_chqueue;
	sc->sc_ch[0].ch_ata_channel.ch_ndrive = 2; /* max number of drives */

	/* map ATA registers */
	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(NJATA32_REGT(sc), NJATA32_REGH(sc),
		    NJATA32_OFFSET_WDCREGS + i,
		    i == wd_data ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
			aprint_error("%s: couldn't subregion cmd regs\n",
			    NJATA32NAME(sc));
			goto fail4;
		}
	}
	wdc_init_shadow_regs(&sc->sc_ch[0].ch_ata_channel);
	wdr->data32iot = NJATA32_REGT(sc);
	wdr->data32ioh = wdr->cmd_iohs[wd_data];

	/* map ATA ctl reg */
	wdr->ctl_iot = NJATA32_REGT(sc);
	if (bus_space_subregion(NJATA32_REGT(sc), NJATA32_REGH(sc),
	    NJATA32_REG_WD_ALTSTATUS, 1, &wdr->ctl_ioh) != 0) {
		aprint_error("%s: couldn't subregion ctl regs\n",
		    NJATA32NAME(sc));
		goto fail4;
	}

	sc->sc_flags |= NJATA32_CMDPG_MAPPED;

	/* use flags value as busmaster wait */
	if ((sc->sc_atawait =
	    (uint8_t)device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags))
		aprint_normal("%s: ATA wait = %#x\n",
		    NJATA32NAME(sc), sc->sc_atawait);

	njata32_init(sc, cold);

	wdcattach(&sc->sc_ch[0].ch_ata_channel);

	return;

	/*
	 * cleanup
	 */
fail4:	while (--devno >= 0) {
		bus_dmamap_destroy(sc->sc_dmat,
		    sc->sc_dev[devno].d_dmamap_xfer);
	}
	bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_sgt);
fail3:	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_sgt);
fail2:	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_sgtpg,
	    sizeof(struct njata32_dma_page));
fail1:	bus_dmamem_free(sc->sc_dmat, &sc->sc_sgt_seg, sc->sc_sgt_nsegs);
}
Esempio n. 18
0
void
simide_attach(device_t parent, device_t self, void *aux)
{
	struct simide_softc *sc = device_private(self);
	struct podule_attach_args *pa = (void *)aux;
	int status;
	u_int iobase;
	int channel, i;
	struct simide_channel *scp;
	struct ata_channel *cp;
	struct wdc_regs *wdr;
	irqhandler_t *ihp;

	/* Note the podule number and validate */
	if (pa->pa_podule_number == -1)
		panic("Podule has disappeared !");

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	sc->sc_podule_number = pa->pa_podule_number;
	sc->sc_podule = pa->pa_podule;
	podules[sc->sc_podule_number].attached = 1;

	sc->sc_wdcdev.regs = sc->sc_wdc_regs;

	/*
	 * Ok we need our own bus tag as the register spacing
	 * is not the default.
	 *
	 * For the podulebus the bus tag cookie is the shift
	 * to apply to registers
	 * So duplicate the bus space tag and change the
	 * cookie.
	 *
	 * Also while we are at it replace the default
	 * read/write mulitple short functions with
	 * optimised versions
	 */

	sc->sc_tag = *pa->pa_iot;
	sc->sc_tag.bs_cookie = (void *) DRIVE_REGISTER_SPACING_SHIFT;
	sc->sc_tag.bs_rm_2 = simide_bs_rm_2;
	sc->sc_tag.bs_wm_2 = simide_bs_wm_2;
	sc->sc_ctliot = pa->pa_iot;

	/* Obtain bus space handles for all the control registers */
	if (bus_space_map(sc->sc_ctliot, pa->pa_podule->mod_base +
	    CONTROL_REGISTERS_POFFSET, CONTROL_REGISTER_SPACE, 0,
	    &sc->sc_ctlioh))
		panic("%s: Cannot map control registers", device_xname(self));

	/* Install a clean up handler to make sure IRQ's are disabled */
	if (shutdownhook_establish(simide_shutdown, (void *)sc) == NULL)
		panic("%s: Cannot install shutdown handler",
		    device_xname(self));

	/* Set the interrupt info for this podule */
	sc->sc_podule->irq_addr = pa->pa_podule->mod_base
	    + CONTROL_REGISTERS_POFFSET + (CONTROL_REGISTER_OFFSET << 2);
	sc->sc_podule->irq_mask = STATUS_IRQ;

	sc->sc_ctl_reg = 0;

	status = bus_space_read_1(sc->sc_ctliot, sc->sc_ctlioh,
	    STATUS_REGISTER_OFFSET);

	aprint_normal(":");
	/* If any of the bits in STATUS_FAULT are zero then we have a fault. */
	if ((status & STATUS_FAULT) != STATUS_FAULT)
		aprint_normal(" card/cable fault (%02x) -", status);

	if (!(status & STATUS_RESET))
		aprint_normal(" (reset)");
	if (!(status & STATUS_ADDR_TEST))
		aprint_normal(" (addr)");
	if (!(status & STATUS_CS_TEST))
		aprint_normal(" (cs)");
	if (!(status & STATUS_RW_TEST))
		aprint_normal(" (rw)");

	aprint_normal("\n");

	/* Perhaps we should just abort at this point. */
/*	if ((status & STATUS_FAULT) != STATUS_FAULT)
		return;*/

	/*
	 * Enable IDE, Obey IORDY and disabled slow mode
	 */
	sc->sc_ctl_reg |= CONTROL_IDE_ENABLE | CONTROL_IORDY
			| CONTROL_SLOW_MODE_OFF;
	bus_space_write_1(sc->sc_ctliot, sc->sc_ctlioh,
	    CONTROL_REGISTER_OFFSET, sc->sc_ctl_reg);

	/* Fill in wdc and channel infos */
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 2;
	for (channel = 0 ; channel < 2; channel++) {
		scp = &sc->simide_channels[channel];
		sc->sc_chanarray[channel] = &scp->sc_channel;
		cp = &scp->sc_channel;
		wdr = &sc->sc_wdc_regs[channel];

		cp->ch_channel = channel;
		cp->ch_atac = &sc->sc_wdcdev.sc_atac;
		cp->ch_queue = &scp->sc_chqueue;
		cp->ch_ndrive = 2;
		wdr->cmd_iot = wdr->ctl_iot = &sc->sc_tag;
		iobase = pa->pa_podule->mod_base;
		if (bus_space_map(wdr->cmd_iot, iobase +
		    simide_info[channel].drive_registers,
		    DRIVE_REGISTERS_SPACE, 0, &wdr->cmd_baseioh)) 
			continue;
		for (i = 0; i < WDC_NREG; i++) {
			if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
				i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
				bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh,
				    DRIVE_REGISTERS_SPACE);
				continue;
			}
		}
		wdc_init_shadow_regs(cp);
		if (bus_space_map(wdr->ctl_iot, iobase +
		    simide_info[channel].aux_register, 4, 0, &wdr->ctl_ioh)) {
			bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh,
			    DRIVE_REGISTERS_SPACE);
			continue;
		}
		/* Disable interrupts and clear any pending interrupts */
		scp->sc_irqmask = simide_info[channel].irq_mask;
		sc->sc_ctl_reg &= ~scp->sc_irqmask;
		bus_space_write_1(sc->sc_ctliot, sc->sc_ctlioh,
		    CONTROL_REGISTER_OFFSET, sc->sc_ctl_reg);
		ihp = &scp->sc_ih;
		ihp->ih_func = simide_intr;
		ihp->ih_arg = scp;
		ihp->ih_level = IPL_BIO;
		ihp->ih_name = "simide";
		ihp->ih_maskaddr = pa->pa_podule->irq_addr;
		ihp->ih_maskbits = scp->sc_irqmask;
		if (irq_claim(sc->sc_podule->interrupt, ihp))
			panic("%s: Cannot claim interrupt %d",
			    device_xname(self), sc->sc_podule->interrupt);
		/* clear any pending interrupts and enable interrupts */
		sc->sc_ctl_reg |= scp->sc_irqmask;
		bus_space_write_1(sc->sc_ctliot, sc->sc_ctlioh,
		    CONTROL_REGISTER_OFFSET, sc->sc_ctl_reg);
		wdcattach(cp);
	}
}
Esempio n. 19
0
void
pciide_pnpbios_attach(device_t parent, device_t self, void *aux)
{
	struct pciide_softc *sc = device_private(self);
	struct pnpbiosdev_attach_args *aa = aux;
	struct pciide_channel *cp;
	struct ata_channel *wdc_cp;
	struct wdc_regs *wdr;
	bus_space_tag_t compat_iot;
	bus_space_handle_t cmd_baseioh, ctl_ioh;
	int i, drive, size;
	uint8_t idedma_ctl;

	sc->sc_wdcdev.sc_atac.atac_dev = self;

	aprint_naive(": disk controller\n");
	aprint_normal("\n");
	pnpbios_print_devres(self, aa);

	aprint_normal_dev(self, "Toshiba Extended IDE Controller\n");

	if (pnpbios_io_map(aa->pbt, aa->resc, 2, &sc->sc_dma_iot,
	    &sc->sc_dma_ioh) != 0) {
		aprint_error_dev(self, "unable to map DMA registers\n");
		return;
	}
	if (pnpbios_io_map(aa->pbt, aa->resc, 0, &compat_iot,
	    &cmd_baseioh) != 0) {
		aprint_error_dev(self, "unable to map command registers\n");
		return;
	}
	if (pnpbios_io_map(aa->pbt, aa->resc, 1, &compat_iot,
	    &ctl_ioh) != 0) {
		aprint_error_dev(self, "unable to map control register\n");
		return;
	}

	sc->sc_dmat = &pci_bus_dma_tag;

	cp = &sc->pciide_channels[0];
	sc->wdc_chanarray[0] = &cp->ata_channel;
	cp->ata_channel.ch_channel = 0;
	cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
	cp->ata_channel.ch_queue = malloc(sizeof(struct ata_queue),
					  M_DEVBUF, M_NOWAIT);
	cp->ata_channel.ch_ndrive = 2;
	if (cp->ata_channel.ch_queue == NULL) {
		aprint_error_dev(self, "unable to allocate memory for command "
		    "queue\n");
		return;
	}

	sc->sc_dma_ok = 1;
	for (i = 0; i < IDEDMA_NREGS; i++) {
		size = 4;
		if (size > (IDEDMA_SCH_OFFSET - i))
			size = IDEDMA_SCH_OFFSET - i;
		if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh,
		    i, size, &cp->dma_iohs[i]) != 0) {
			aprint_error_dev(self, "can't subregion offset %d "
			    "size %lu", i, (u_long)size);
			return;
		}
	}
	sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
	sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
	sc->sc_wdcdev.dma_arg = sc;
	sc->sc_wdcdev.dma_init = pciide_dma_init;
	sc->sc_wdcdev.dma_start = pciide_dma_start;
	sc->sc_wdcdev.dma_finish = pciide_dma_finish;
	sc->sc_wdcdev.irqack = pciide_irqack;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 0;		/* XXX */
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;	/* XXX */

	wdc_allocate_regs(&sc->sc_wdcdev);

	wdc_cp = &cp->ata_channel;
	wdr = CHAN_TO_WDC_REGS(wdc_cp);
	wdr->cmd_iot = compat_iot;
	wdr->cmd_baseioh = cmd_baseioh;

	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i,
		    i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
			    aprint_error_dev(self, "unable to subregion "
				"control register\n");
			    return;
		}
	}
	wdc_init_shadow_regs(wdc_cp);

	wdr->ctl_iot = wdr->data32iot = compat_iot;
	wdr->ctl_ioh = wdr->data32ioh = ctl_ioh;

	cp->compat = 1;

	cp->ih = pnpbios_intr_establish(aa->pbt, aa->resc, 0, IPL_BIO,
					pciide_compat_intr, cp);

	wdcattach(wdc_cp);

	idedma_ctl = 0;
	for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) {
		/*
		 * we have not probed the drives yet,
		 * allocate ressources for all of them.
		 */
		if (pciide_dma_table_setup(sc, 0, drive) != 0) {
			/* Abort DMA setup */
			aprint_error(
			    "%s:%d:%d: can't allocate DMA maps, "
			    "using PIO transfers\n",
			    device_xname(self), 0, drive);
			sc->sc_dma_ok = 0;
			sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA;
			sc->sc_wdcdev.irqack = NULL;
			idedma_ctl = 0;
			break;
		}
		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
	}
	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot,
		    cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl);
	}
}
Esempio n. 20
0
void
wdc_buddha_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_buddha_softc *sc;
	struct zbus_args *zap;
	int nchannels;
	int ch;

	sc = device_private(self);
	sc->sc_wdcdev.sc_atac.atac_dev = self;
	zap = aux;

	sc->ba = zap->va;

	sc->sc_iot.base = (bus_addr_t)sc->ba;
	sc->sc_iot.absm = &amiga_bus_stride_4swap;

	nchannels = 2;
	if (zap->prodid == 42) {
		aprint_normal(": Catweasel Z2\n");
		nchannels = 3;
	} else if (zap->serno == 0) 
		aprint_normal(": Buddha\n");
	else
		aprint_normal(": Buddha Flash\n");

	/* XXX pio mode setting not implemented yet. */
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16;
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 0;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = nchannels;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (ch = 0; ch < nchannels; ch++) {
		struct ata_channel *cp;
		struct wdc_regs *wdr;
		int i;

		cp = &sc->channels[ch];
		sc->wdc_chanarray[ch] = cp;

		cp->ch_channel = ch;
		cp->ch_atac = &sc->sc_wdcdev.sc_atac;
		cp->ch_queue =
		    malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
		if (cp->ch_queue == NULL) {
			aprint_error_dev(self,
			    "can't allocate memory for command queue\n");
			return;
		}
		cp->ch_ndrive = 2;

		/*
		 * XXX According to the Buddha docs, we should use a method
		 * array that adds 0x40 to the address for byte accesses, to
		 * get the slow timing for command accesses, and the 0x00 
		 * offset for the word (fast) accesses. This will be
		 * reconsidered when implementing setting the timing.
		 *
		 * XXX We also could consider to abuse the 32bit capability, or
		 * 32bit accesses to the words (which will read in two words)
		 * for better performance.
		 *		-is
		 */
		wdr = CHAN_TO_WDC_REGS(cp);

		wdr->cmd_iot = &sc->sc_iot;
		if (bus_space_map(wdr->cmd_iot, 0x210+ch*0x80, 8, 0,
		    &wdr->cmd_baseioh)) {
			aprint_error_dev(self, "couldn't map cmd registers\n");
			return;
		}

		wdr->ctl_iot = &sc->sc_iot;
		if (bus_space_map(wdr->ctl_iot, 0x250+ch*0x80, 2, 0,
		    &wdr->ctl_ioh)) {
			bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 8);
			aprint_error_dev(self, "couldn't map ctl registers\n");
			return;
		}

		for (i = 0; i < WDC_NREG; i++) {
			if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
			    i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
				aprint_error_dev(self,
				    "couldn't subregion cmd regs\n");
				return;
			}
		}

		wdc_init_shadow_regs(cp);
		wdcattach(cp);
	}

	sc->sc_isr.isr_intr = wdc_buddha_intr;
	sc->sc_isr.isr_arg = sc;
	sc->sc_isr.isr_ipl = 2;
	add_isr (&sc->sc_isr);
	sc->ba[0xfc0] = 0;	/* enable interrupts */
}
Esempio n. 21
0
static void atp_attach(struct device * parent, struct device * self, void *aux)
{
	atp_sata_t *sc = (atp_sata_t * )self;
        struct pci_attach_args *pa = aux;
        pci_chipset_tag_t pc = pa->pa_pc;
        bus_space_tag_t iot = pa->pa_iot;
        bus_addr_t iobase;
        bus_size_t iosize;
        bus_addr_t ideaddr;
	atp_sata_info_t info;
	u32	iog_base;
	u32	ioaddr;
	int i;

        if (pci_io_find(pc, pa->pa_tag, 0x14, &iobase, &iosize))
        {
               printf(": can't find i/o space\n");
               return;
        }

        if (bus_space_map(iot, iobase, iosize, 0,  &sc->reg_base))
        {
               printf(": can't map i/o space\n");
               return;
        }

	ioaddr = sc->reg_base; 
        if (pci_io_find(pc, pa->pa_tag, 0x10, &iobase, &iosize))
        {
               printf(": can't find i/o space\n");
               return;
        }

        if (bus_space_map(iot, iobase, iosize, 0,  &ideaddr))
        {
               printf(": can't map i/o space\n");
               return;
        }

	iog_base = ioaddr + 0x100;
	outb(iog_base + 0x0004,0x01);//send COMRESET
	while(inb(iog_base + 0x0004) & 0x01)
	{
	      myudelay(10);
	}
	outb(iog_base + 0x0004,0x0);

	for(i = 0;i <= 1;i++)
	{
		info.sata_reg_base = ioaddr + i * 0x80;
		info.flags = i;
		info.aa_link.aa_type=0xff; //just for not match ide
		config_found(self,(void *)&info,NULL);
	}
 
	sc->sc_wdcdev.PIO_cap = 0;
	sc->sc_wdcdev.DMA_cap = 0;
	sc->sc_wdcdev.channels = sc->wdc_chanarray;
	sc->sc_wdcdev.nchannels = 1;
	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA32;//WDC_CAPABILITY_DATA16;
	sc->wdc_chanarray[0] = &sc->wdc_channel;
	sc->wdc_channel.channel = 0;
	sc->wdc_channel.wdc = &sc->sc_wdcdev;
	sc->wdc_channel.ch_queue =
	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
	if (sc->wdc_channel.ch_queue == NULL) {
		printf("%s: "
			"cannot allocate memory for command queue",
		sc->sc_wdcdev.sc_dev.dv_xname);
		 return ;
	}
	 sc->wdc_channel.cmd_iot=iot;
	 sc->wdc_channel.ctl_iot=iot;
	 sc->wdc_channel.cmd_ioh= ideaddr+0x80;
	 sc->wdc_channel.ctl_ioh= ideaddr+0x8e;
			sc->wdc_channel.data32iot = sc->wdc_channel.cmd_iot;
			sc->wdc_channel.data32ioh = sc->wdc_channel.cmd_ioh;

	 wdcattach(sc->wdc_chanarray[0]);
}
void
wdc_obio_attach(device_t parent, device_t self, void *aux)
{
	struct wdc_obio_softc *sc = device_private(self);
	struct wdc_regs *wdr;
	struct confargs *ca = aux;
	struct ata_channel *chp = &sc->sc_channel;
	int intr, i, type = IST_EDGE;
	int use_dma = 0;
	char path[80];

	sc->sc_wdcdev.sc_atac.atac_dev = self;
	if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags &
	    WDC_OPTIONS_DMA) {
		if (ca->ca_nreg >= 16 || ca->ca_nintr == -1)
			use_dma = 1;	/* XXX Don't work yet. */
	}

	if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
		intr = ca->ca_intr[0];
		aprint_normal(" irq %d", intr);
		if (ca->ca_nintr > 8) {
			type = ca->ca_intr[1] ? IST_LEVEL : IST_EDGE;
		}
		aprint_normal(", %s triggered", (type == IST_EDGE) ? "edge" : "level");
	} else if (ca->ca_nintr == -1) {
		intr = WDC_DEFAULT_PIO_IRQ;
		aprint_normal(" irq property not found; using %d", intr);
	} else {
		aprint_error(": couldn't get irq property\n");
		return;
	}

	if (use_dma)
		aprint_normal(": DMA transfer");

	aprint_normal("\n");

	sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs;

	wdr->cmd_iot = wdr->ctl_iot = ca->ca_tag;

	if (bus_space_map(wdr->cmd_iot, ca->ca_baseaddr + ca->ca_reg[0],
	    WDC_REG_NPORTS << 4, 0, &wdr->cmd_baseioh) ||
	    bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
			WDC_AUXREG_OFFSET << 4, 1, &wdr->ctl_ioh)) {
		aprint_error_dev(self, "couldn't map registers\n");
		return;
	}

	for (i = 0; i < WDC_NREG; i++) {
		if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i << 4,
		    i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) {
			bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh,
			    WDC_REG_NPORTS << 4);
			aprint_error_dev(self,
			    "couldn't subregion registers\n");
			return;
		}
	}
#if 0
	wdr->data32iot = wdr->cmd_iot;
	wdr->data32ioh = wdr->cmd_ioh;
#endif

	sc->sc_ih = intr_establish(intr, type, IPL_BIO, wdcintr, chp);

	if (use_dma) {
		sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20);
		/*
		 * XXX
		 * we don't use ca->ca_reg[3] for size here because at least
		 * on the PB3400c it says 0x200 for both IDE channels ( the
		 * one on the mainboard and the other on the mediabay ) but
		 * their start addresses are only 0x100 apart. Since those
		 * DMA registers are always 0x100 or less we don't really 
		 * have to care though
		 */
		if (bus_space_map(wdr->cmd_iot, ca->ca_baseaddr + ca->ca_reg[2],
		    0x100, BUS_SPACE_MAP_LINEAR, &sc->sc_dmaregh)) {

			aprint_error_dev(self,
			    "unable to map DMA registers (%08x)\n",
			    ca->ca_reg[2]);
			/* should unmap stuff here */
			return;
		}
		sc->sc_dmareg = bus_space_vaddr(wdr->cmd_iot, sc->sc_dmaregh);

		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
		if (strcmp(ca->ca_name, "ata-4") == 0) {
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
			sc->sc_wdcdev.sc_atac.atac_set_modes = 
			    ata4_adjust_timing;
		} else {
			sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing;
		}
#ifdef notyet
		/* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */
		if (ohare) {
			sc->sc_wdcdev.sc_atac.atac_pio_cap = 3;
			sc->sc_wdcdev.sc_atac.atac_dma_cap = 1;
		}
#endif
	} else {
		/* all non-DMA controllers can use adjust_timing */
		sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing;
	}

	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
	sc->sc_chanptr = chp;
	sc->sc_wdcdev.sc_atac.atac_channels = &sc->sc_chanptr;
	sc->sc_wdcdev.sc_atac.atac_nchannels = 1;
	sc->sc_wdcdev.wdc_maxdrives = 2;
	sc->sc_wdcdev.dma_arg = sc;
	sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
	sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
	sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
	chp->ch_channel = 0;
	chp->ch_atac = &sc->sc_wdcdev.sc_atac;
	chp->ch_queue = &sc->sc_chqueue;

	wdc_init_shadow_regs(chp);

#define OHARE_FEATURE_REG	0xf3000038

	/* XXX Enable wdc1 by feature reg. */
	memset(path, 0, sizeof(path));
	OF_package_to_path(ca->ca_node, path, sizeof(path));
	if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) {
		u_int x;

		x = in32rb(OHARE_FEATURE_REG);
		x |= 8;
		out32rb(OHARE_FEATURE_REG, x);
	}

	wdcattach(chp);
}