static int wdc_obio_probe(device_t parent, cfdata_t cfp, void *aux) { struct obio_attach_args *oa = aux; struct ata_channel ch; struct wdc_softc wdc; struct wdc_regs wdr; int result = 0; int i; if (oa->oa_nio < 1) return (0); if (oa->oa_nirq < 1) return (0); if (oa->oa_io[0].or_addr == IOBASEUNK) return (0); if (oa->oa_irq[0].or_irq == IRQUNK) return (0); memset(&wdc, 0, sizeof(wdc)); memset(&ch, 0, sizeof(ch)); ch.ch_atac = &wdc.sc_atac; wdc.regs = &wdr; wdr.cmd_iot = oa->oa_iot; if (bus_space_map(wdr.cmd_iot, oa->oa_io[0].or_addr, WDC_OBIO_REG_SIZE, 0, &wdr.cmd_baseioh)) { goto out; } for (i = 0; i < WDC_OBIO_REG_NPORTS; i++) { if (bus_space_subregion(wdr.cmd_iot, wdr.cmd_baseioh, i * 2, (i == 0) ? 2 : 1, &wdr.cmd_iohs[i])) { goto outunmap; } } wdc_init_shadow_regs(&ch); wdr.ctl_iot = oa->oa_iot; if (bus_space_map(wdr.ctl_iot, oa->oa_io[0].or_addr + WDC_OBIO_AUXREG_OFFSET, WDC_OBIO_AUXREG_SIZE, 0, &wdr.ctl_ioh)) { goto outunmap; } result = wdcprobe(&ch); if (result) { oa->oa_nio = 1; oa->oa_io[0].or_size = WDC_OBIO_REG_SIZE; oa->oa_nirq = 1; oa->oa_niomem = 0; } bus_space_unmap(wdr.ctl_iot, wdr.ctl_ioh, WDC_OBIO_AUXREG_SIZE); outunmap: bus_space_unmap(wdr.cmd_iot, wdr.cmd_baseioh, WDC_OBIO_REG_SIZE); out: return (result); }
static void wdc_pnpbus_attach(device_t parent, device_t self, void *aux) { struct wdc_pnpbus_softc *sc = device_private(self); struct wdc_regs *wdr; struct pnpbus_dev_attach_args *pna = aux; int cmd_iobase, cmd_len, aux_iobase, aux_len, i; sc->sc_wdcdev.sc_atac.atac_dev = self; sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; wdr->cmd_iot = pna->pna_iot; wdr->ctl_iot = pna->pna_iot; pnpbus_getioport(&pna->pna_res, 0, &cmd_iobase, &cmd_len); pnpbus_getioport(&pna->pna_res, 1, &aux_iobase, &aux_len); if (pnpbus_io_map(&pna->pna_res, 0, &wdr->cmd_iot, &wdr->cmd_baseioh) || pnpbus_io_map(&pna->pna_res, 1, &wdr->ctl_iot, &wdr->ctl_ioh)) { aprint_error_dev(self, "couldn't map registers\n"); } for (i = 0; i < cmd_len; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error(": couldn't subregion registers\n"); return; } } wdr->data32iot = wdr->cmd_iot; wdr->data32ioh = wdr->cmd_iohs[0]; sc->sc_wdcdev.cap |= WDC_CAPABILITY_PREATA; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & WDC_OPTIONS_32) sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_chanlist[0] = &sc->sc_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanlist; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_channel.ch_channel = 0; sc->sc_channel.ch_atac = &sc->sc_wdcdev.sc_atac; sc->sc_channel.ch_queue = &sc->sc_chqueue; sc->sc_channel.ch_ndrive = 2; wdc_init_shadow_regs(&sc->sc_channel); sc->sc_ih = pnpbus_intr_establish(0, IPL_BIO, IST_PNP, wdcintr, &sc->sc_channel, &pna->pna_res); aprint_normal("\n"); wdcattach(&sc->sc_channel); }
static void wdc_obio_attach(device_t parent, device_t self, void *aux) { struct wdc_obio_softc *sc = device_private(self); struct obio_attach_args *oa = aux; struct wdc_regs *wdr; int i; aprint_naive("\n"); aprint_normal("\n"); sc->sc_wdcdev.sc_atac.atac_dev = self; sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; wdr->cmd_iot = oa->oa_iot; wdr->ctl_iot = oa->oa_iot; if (bus_space_map(wdr->cmd_iot, oa->oa_io[0].or_addr, WDC_OBIO_REG_SIZE, 0, &wdr->cmd_baseioh) || bus_space_map(wdr->ctl_iot, oa->oa_io[0].or_addr + WDC_OBIO_AUXREG_OFFSET, WDC_OBIO_AUXREG_SIZE, 0, &wdr->ctl_ioh)) { aprint_error_dev(self, "couldn't map registers\n"); return; } for (i = 0; i < WDC_OBIO_REG_NPORTS; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i * 2, (i == 0) ? 2 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error_dev(self, "couldn't subregion registers\n"); return; } } sc->sc_ih = obio_intr_establish(oa->oa_irq[0].or_irq, IPL_BIO, wdcintr, &sc->sc_channel); sc->sc_wdcdev.cap |= WDC_CAPABILITY_PREATA; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_chanlist[0] = &sc->sc_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanlist; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_channel.ch_channel = 0; sc->sc_channel.ch_atac = &sc->sc_wdcdev.sc_atac; sc->sc_channel.ch_queue = &sc->sc_chqueue; sc->sc_channel.ch_ndrive = 2; wdc_init_shadow_regs(&sc->sc_channel); wdcattach(&sc->sc_channel); }
void __wdc_spd_bus_space(struct ata_channel *ch) { struct wdc_regs *wdr = CHAN_TO_WDC_REGS(ch); int i; wdr->cmd_iot = &_wdc_spd_space; for (i = 0; i < 8; i++) wdr->cmd_iohs[i] = SPD_HDD_IO_BASE + i * 2; /* wdc register is 16 bit wide. */ wdc_init_shadow_regs(ch); wdr->ctl_iot = &_wdc_spd_space; wdr->ctl_ioh = SPD_HDD_IO_BASE + WDC_SPD_HDD_AUXREG_OFFSET; wdr->data32iot = wdr->cmd_iot; wdr->data32ioh = SPD_HDD_IO_BASE; }
static void wdc_upc_attach(device_t parent, device_t self, void *aux) { struct wdc_upc_softc *sc = device_private(self); struct wdc_regs *wdr; struct upc_attach_args *ua = aux; int i; sc->sc_wdc.sc_atac.atac_dev = self; sc->sc_wdc.regs = wdr = &sc->sc_wdc_regs; sc->sc_wdc.sc_atac.atac_cap = ATAC_CAP_DATA16; sc->sc_wdc.sc_atac.atac_pio_cap = 1; /* XXX ??? */ sc->sc_wdc.sc_atac.atac_nchannels = 1; sc->sc_chanlist[0] = &sc->sc_channel; sc->sc_wdc.sc_atac.atac_channels = sc->sc_chanlist; wdr->cmd_iot = ua->ua_iot; wdr->cmd_baseioh = ua->ua_ioh; wdr->ctl_iot = ua->ua_iot; wdr->ctl_ioh = ua->ua_ioh2; sc->sc_channel.ch_channel = 0; sc->sc_channel.ch_atac = &sc->sc_wdc.sc_atac; sc->sc_channel.ch_queue = &sc->sc_chqueue; sc->sc_channel.ch_ndrive = 2; for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(ua->ua_iot, ua->ua_ioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error_dev(sc->sc_wdc.sc_atac.atac_dev, "can't subregion I/O space\n"); return; } } wdc_init_shadow_regs(&sc->sc_channel); upc_intr_establish(ua->ua_irqhandle, IPL_BIO, wdcintr, &sc->sc_channel); aprint_normal("\n"); aprint_naive("\n"); wdcattach(&sc->sc_channel); }
void rapide_attach(device_t parent, device_t self, void *aux) { struct rapide_softc *sc = device_private(self); struct podule_attach_args *pa = (void *)aux; bus_space_tag_t iot; bus_space_handle_t ctlioh; u_int iobase; int channel, i; struct rapide_channel *rcp; struct ata_channel *cp; struct wdc_regs *wdr; irqhandler_t *ihp; /* Note the podule number and validate */ if (pa->pa_podule_number == -1) panic("Podule has disappeared !"); sc->sc_wdcdev.sc_atac.atac_dev = self; sc->sc_podule_number = pa->pa_podule_number; sc->sc_podule = pa->pa_podule; podules[sc->sc_podule_number].attached = 1; sc->sc_wdcdev.regs = sc->sc_wdc_regs; set_easi_cycle_type(sc->sc_podule_number, EASI_CYCLE_TYPE_C); /* * Duplicate the podule bus space tag and provide alternative * bus_space_read_multi_4() and bus_space_write_multi_4() * functions. */ rapide_bs_tag = *pa->pa_iot; rapide_bs_tag.bs_rm_4 = rapide_bs_rm_4; rapide_bs_tag.bs_wm_4 = rapide_bs_wm_4; sc->sc_ctliot = iot = &rapide_bs_tag; if (bus_space_map(iot, pa->pa_podule->easi_base + CONTROL_REGISTERS_OFFSET, CONTROL_REGISTER_SPACE, 0, &ctlioh)) panic("%s: Cannot map control registers", device_xname(self)); sc->sc_ctlioh = ctlioh; sc->sc_version = bus_space_read_1(iot, ctlioh, VERSION_REGISTER_OFFSET) & VERSION_REGISTER_MASK; /* bus_space_unmap(iot, ctl_ioh, CONTROL_REGISTER_SPACE);*/ aprint_normal(": Issue %d\n", sc->sc_version + 1); if (sc->sc_version != VERSION_2_ID) return; if (shutdownhook_establish(rapide_shutdown, (void *)sc) == NULL) panic("%s: Cannot install shutdown handler", device_xname(self)); /* Set the interrupt info for this podule */ sc->sc_podule->irq_addr = pa->pa_podule->easi_base + CONTROL_REGISTERS_OFFSET + IRQ_REQUEST_REGISTER_BYTE_OFFSET; sc->sc_podule->irq_mask = IRQ_MASK; iobase = pa->pa_podule->easi_base; /* Fill in wdc and channel infos */ sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 2; sc->sc_wdcdev.wdc_maxdrives = 2; for (channel = 0 ; channel < 2; channel++) { rcp = &sc->rapide_channels[channel]; sc->sc_chanarray[channel] = &rcp->rc_channel; cp = &rcp->rc_channel; wdr = &sc->sc_wdc_regs[channel]; cp->ch_channel = channel; cp->ch_atac = &sc->sc_wdcdev.sc_atac; cp->ch_queue = &rcp->rc_chqueue; wdr->cmd_iot = iot; wdr->ctl_iot = iot; wdr->data32iot = iot; if (bus_space_map(iot, iobase + rapide_info[channel].registers, DRIVE_REGISTERS_SPACE, 0, &wdr->cmd_baseioh)) continue; for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { bus_space_unmap(iot, wdr->cmd_baseioh, DRIVE_REGISTERS_SPACE); continue; } } wdc_init_shadow_regs(cp); if (bus_space_map(iot, iobase + rapide_info[channel].aux_register, 4, 0, &wdr->ctl_ioh)) { bus_space_unmap(iot, wdr->cmd_baseioh, DRIVE_REGISTERS_SPACE); continue; } if (bus_space_map(iot, iobase + rapide_info[channel].data_register, 4, 0, &wdr->data32ioh)) { bus_space_unmap(iot, wdr->cmd_baseioh, DRIVE_REGISTERS_SPACE); bus_space_unmap(iot, wdr->ctl_ioh, 4); continue; } /* Disable interrupts and clear any pending interrupts */ rcp->rc_irqmask = rapide_info[channel].irq_mask; sc->sc_intr_enable_mask &= ~rcp->rc_irqmask; bus_space_write_1(iot, sc->sc_ctlioh, IRQ_MASK_REGISTER_OFFSET, sc->sc_intr_enable_mask); /* XXX - Issue 1 cards will need to clear any pending interrupts */ ihp = &rcp->rc_ih; ihp->ih_func = rapide_intr; ihp->ih_arg = rcp; ihp->ih_level = IPL_BIO; ihp->ih_name = "rapide"; ihp->ih_maskaddr = pa->pa_podule->irq_addr; ihp->ih_maskbits = rcp->rc_irqmask; if (irq_claim(sc->sc_podule->interrupt, ihp)) panic("%s: Cannot claim interrupt %d", device_xname(self), sc->sc_podule->interrupt); /* clear any pending interrupts and enable interrupts */ sc->sc_intr_enable_mask |= rcp->rc_irqmask; bus_space_write_1(iot, sc->sc_ctlioh, IRQ_MASK_REGISTER_OFFSET, sc->sc_intr_enable_mask); /* XXX - Issue 1 cards will need to clear any pending interrupts */ wdcattach(cp); } }
void pciide_pnpbios_attach(device_t parent, device_t self, void *aux) { struct pciide_softc *sc = device_private(self); struct pnpbiosdev_attach_args *aa = aux; struct pciide_channel *cp; struct ata_channel *wdc_cp; struct wdc_regs *wdr; bus_space_tag_t compat_iot; bus_space_handle_t cmd_baseioh, ctl_ioh; int i, drive, size; uint8_t idedma_ctl; sc->sc_wdcdev.sc_atac.atac_dev = self; aprint_naive(": disk controller\n"); aprint_normal("\n"); pnpbios_print_devres(self, aa); aprint_normal_dev(self, "Toshiba Extended IDE Controller\n"); if (pnpbios_io_map(aa->pbt, aa->resc, 2, &sc->sc_dma_iot, &sc->sc_dma_ioh) != 0) { aprint_error_dev(self, "unable to map DMA registers\n"); return; } if (pnpbios_io_map(aa->pbt, aa->resc, 0, &compat_iot, &cmd_baseioh) != 0) { aprint_error_dev(self, "unable to map command registers\n"); return; } if (pnpbios_io_map(aa->pbt, aa->resc, 1, &compat_iot, &ctl_ioh) != 0) { aprint_error_dev(self, "unable to map control register\n"); return; } sc->sc_dmat = &pci_bus_dma_tag; cp = &sc->pciide_channels[0]; sc->wdc_chanarray[0] = &cp->ata_channel; cp->ata_channel.ch_channel = 0; cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; cp->ata_channel.ch_queue = malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); cp->ata_channel.ch_ndrive = 2; if (cp->ata_channel.ch_queue == NULL) { aprint_error_dev(self, "unable to allocate memory for command " "queue\n"); return; } sc->sc_dma_ok = 1; for (i = 0; i < IDEDMA_NREGS; i++) { size = 4; if (size > (IDEDMA_SCH_OFFSET - i)) size = IDEDMA_SCH_OFFSET - i; if (bus_space_subregion(sc->sc_dma_iot, sc->sc_dma_ioh, i, size, &cp->dma_iohs[i]) != 0) { aprint_error_dev(self, "can't subregion offset %d " "size %lu", i, (u_long)size); return; } } sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; sc->sc_wdcdev.dma_arg = sc; sc->sc_wdcdev.dma_init = pciide_dma_init; sc->sc_wdcdev.dma_start = pciide_dma_start; sc->sc_wdcdev.dma_finish = pciide_dma_finish; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_wdcdev.sc_atac.atac_dma_cap = 0; /* XXX */ sc->sc_wdcdev.sc_atac.atac_udma_cap = 0; /* XXX */ wdc_allocate_regs(&sc->sc_wdcdev); wdc_cp = &cp->ata_channel; wdr = CHAN_TO_WDC_REGS(wdc_cp); wdr->cmd_iot = compat_iot; wdr->cmd_baseioh = cmd_baseioh; for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error_dev(self, "unable to subregion " "control register\n"); return; } } wdc_init_shadow_regs(wdc_cp); wdr->ctl_iot = wdr->data32iot = compat_iot; wdr->ctl_ioh = wdr->data32ioh = ctl_ioh; cp->compat = 1; cp->ih = pnpbios_intr_establish(aa->pbt, aa->resc, 0, IPL_BIO, pciide_compat_intr, cp); wdcattach(wdc_cp); idedma_ctl = 0; for (drive = 0; drive < cp->ata_channel.ch_ndrive; drive++) { /* * we have not probed the drives yet, * allocate ressources for all of them. */ if (pciide_dma_table_setup(sc, 0, drive) != 0) { /* Abort DMA setup */ aprint_error( "%s:%d:%d: can't allocate DMA maps, " "using PIO transfers\n", device_xname(self), 0, drive); sc->sc_dma_ok = 0; sc->sc_wdcdev.sc_atac.atac_cap &= ~ATAC_CAP_DMA; sc->sc_wdcdev.irqack = NULL; idedma_ctl = 0; break; } idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); } if (idedma_ctl != 0) { /* Add software bits in status register */ bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); } }
static void via_sata_chip_map_new(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; struct ata_channel *wdc_cp; struct wdc_regs *wdr; int channel; pci_intr_handle_t intrhandle; const char *intrstr; int i; if (pciide_chipen(sc, pa) == 0) return; sc->sc_apo_regbase = APO_VIA_VT6421_REGBASE; if (pci_mapreg_map(pa, PCI_BAR(5), PCI_MAPREG_TYPE_IO, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, &sc->sc_ba5_ss) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map SATA regs\n"); } aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); via_vt6421_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; } sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 3; sc->sc_wdcdev.wdc_maxdrives = 2; wdc_allocate_regs(&sc->sc_wdcdev); if (pci_intr_map(pa, &intrhandle) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map native-PCI interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, intrhandle); sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, pciide_pci_intr, sc); if (sc->sc_pci_ih == NULL) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't establish native-PCI interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "using %s for native-PCI interrupt\n", intrstr ? intrstr : "unknown interrupt"); for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (via_vt6421_chansetup(sc, channel) == 0) continue; wdc_cp = &cp->ata_channel; wdr = CHAN_TO_WDC_REGS(wdc_cp); wdr->sata_iot = sc->sc_ba5_st; wdr->sata_baseioh = sc->sc_ba5_sh; if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, (wdc_cp->ch_channel << 6) + 0x0, 4, &wdr->sata_status) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map channel %d sata_status regs\n", wdc_cp->ch_channel); continue; } if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, (wdc_cp->ch_channel << 6) + 0x4, 4, &wdr->sata_error) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map channel %d sata_error regs\n", wdc_cp->ch_channel); continue; } if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh, (wdc_cp->ch_channel << 6) + 0x8, 4, &wdr->sata_control) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map channel %d sata_control regs\n", wdc_cp->ch_channel); continue; } if (pci_mapreg_map(pa, PCI_BAR(wdc_cp->ch_channel), PCI_MAPREG_TYPE_IO, 0, &wdr->cmd_iot, &wdr->cmd_baseioh, NULL, &wdr->cmd_ios) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map %s channel regs\n", cp->name); } wdr->ctl_iot = wdr->cmd_iot; for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error_dev( sc->sc_wdcdev.sc_atac.atac_dev, "couldn't subregion %s " "channel cmd regs\n", cp->name); return; } } if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, WDC_NREG + 2, 1, &wdr->ctl_ioh) != 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "couldn't map channel %d ctl regs\n", channel); return; } wdc_init_shadow_regs(wdc_cp); wdr->data32iot = wdr->cmd_iot; wdr->data32ioh = wdr->cmd_iohs[wd_data]; wdcattach(wdc_cp); } }
void wdc_buddha_attach(device_t parent, device_t self, void *aux) { struct wdc_buddha_softc *sc; struct zbus_args *zap; int nchannels; int ch; sc = device_private(self); sc->sc_wdcdev.sc_atac.atac_dev = self; zap = aux; sc->ba = zap->va; sc->sc_iot.base = (bus_addr_t)sc->ba; sc->sc_iot.absm = &amiga_bus_stride_4swap; nchannels = 2; if (zap->prodid == 42) { aprint_normal(": Catweasel Z2\n"); nchannels = 3; } else if (zap->serno == 0) aprint_normal(": Buddha\n"); else aprint_normal(": Buddha Flash\n"); /* XXX pio mode setting not implemented yet. */ sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = nchannels; wdc_allocate_regs(&sc->sc_wdcdev); for (ch = 0; ch < nchannels; ch++) { struct ata_channel *cp; struct wdc_regs *wdr; int i; cp = &sc->channels[ch]; sc->wdc_chanarray[ch] = cp; cp->ch_channel = ch; cp->ch_atac = &sc->sc_wdcdev.sc_atac; cp->ch_queue = malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT); if (cp->ch_queue == NULL) { aprint_error_dev(self, "can't allocate memory for command queue\n"); return; } cp->ch_ndrive = 2; /* * XXX According to the Buddha docs, we should use a method * array that adds 0x40 to the address for byte accesses, to * get the slow timing for command accesses, and the 0x00 * offset for the word (fast) accesses. This will be * reconsidered when implementing setting the timing. * * XXX We also could consider to abuse the 32bit capability, or * 32bit accesses to the words (which will read in two words) * for better performance. * -is */ wdr = CHAN_TO_WDC_REGS(cp); wdr->cmd_iot = &sc->sc_iot; if (bus_space_map(wdr->cmd_iot, 0x210+ch*0x80, 8, 0, &wdr->cmd_baseioh)) { aprint_error_dev(self, "couldn't map cmd registers\n"); return; } wdr->ctl_iot = &sc->sc_iot; if (bus_space_map(wdr->ctl_iot, 0x250+ch*0x80, 2, 0, &wdr->ctl_ioh)) { bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 8); aprint_error_dev(self, "couldn't map ctl registers\n"); return; } for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error_dev(self, "couldn't subregion cmd regs\n"); return; } } wdc_init_shadow_regs(cp); wdcattach(cp); } sc->sc_isr.isr_intr = wdc_buddha_intr; sc->sc_isr.isr_arg = sc; sc->sc_isr.isr_ipl = 2; add_isr (&sc->sc_isr); sc->ba[0xfc0] = 0; /* enable interrupts */ }
void wdc_amiga_attach(device_t parent, device_t self, void *aux) { struct wdc_amiga_softc *sc = device_private(self); struct wdc_regs *wdr; int i; aprint_normal("\n"); sc->sc_wdcdev.sc_atac.atac_dev = self; sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; gayle_init(); if (is_a4000()) { sc->cmd_iot.base = (bus_addr_t) ztwomap(GAYLE_IDE_BASE_A4000 + 2); } else { sc->cmd_iot.base = (bus_addr_t) ztwomap(GAYLE_IDE_BASE + 2); } sc->cmd_iot.absm = sc->ctl_iot.absm = &amiga_bus_stride_4swap; wdr->cmd_iot = &sc->cmd_iot; wdr->ctl_iot = &sc->ctl_iot; if (bus_space_map(wdr->cmd_iot, 0, 0x40, 0, &wdr->cmd_baseioh)) { aprint_error_dev(self, "couldn't map registers\n"); return; } for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, 0x40); aprint_error_dev(self, "couldn't map registers\n"); return; } } if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, 0x406, 1, &wdr->ctl_ioh)) return; sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_chanlist[0] = &sc->sc_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanlist; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_wdcdev.wdc_maxdrives = 2; sc->sc_channel.ch_channel = 0; sc->sc_channel.ch_atac = &sc->sc_wdcdev.sc_atac; sc->sc_channel.ch_queue = &sc->sc_chqueue; wdc_init_shadow_regs(&sc->sc_channel); sc->sc_isr.isr_intr = wdc_amiga_intr; sc->sc_isr.isr_arg = sc; sc->sc_isr.isr_ipl = 2; add_isr (&sc->sc_isr); if (!is_a4000()) gayle_intr_enable_set(GAYLE_INT_IDE); wdcattach(&sc->sc_channel); }
void njata32_attach(struct njata32_softc *sc) { bus_addr_t dmaaddr; int i, devno, error; struct wdc_regs *wdr; /* * allocate DMA resource */ if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct njata32_dma_page), PAGE_SIZE, 0, &sc->sc_sgt_seg, 1, &sc->sc_sgt_nsegs, BUS_DMA_NOWAIT)) != 0) { aprint_error("%s: unable to allocate sgt page, error = %d\n", NJATA32NAME(sc), error); return; } if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_sgt_seg, sc->sc_sgt_nsegs, sizeof(struct njata32_dma_page), (void **)&sc->sc_sgtpg, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { aprint_error("%s: unable to map sgt page, error = %d\n", NJATA32NAME(sc), error); goto fail1; } if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct njata32_dma_page), 1, sizeof(struct njata32_dma_page), 0, BUS_DMA_NOWAIT, &sc->sc_dmamap_sgt)) != 0) { aprint_error("%s: unable to create sgt DMA map, error = %d\n", NJATA32NAME(sc), error); goto fail2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap_sgt, sc->sc_sgtpg, sizeof(struct njata32_dma_page), NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error("%s: unable to load sgt DMA map, error = %d\n", NJATA32NAME(sc), error); goto fail3; } dmaaddr = sc->sc_dmamap_sgt->dm_segs[0].ds_addr; for (devno = 0; devno < NJATA32_NUM_DEV; devno++) { sc->sc_dev[devno].d_sgt = sc->sc_sgtpg->dp_sg[devno]; sc->sc_dev[devno].d_sgt_dma = dmaaddr + offsetof(struct njata32_dma_page, dp_sg[devno]); error = bus_dmamap_create(sc->sc_dmat, NJATA32_MAX_XFER, /* max total map size */ NJATA32_NUM_SG, /* max number of segments */ NJATA32_SGT_MAXSEGLEN, /* max size of a segment */ 0, /* boundary */ BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_dev[devno].d_dmamap_xfer); if (error) { aprint_error("%s: failed to create DMA map " "(error = %d)\n", NJATA32NAME(sc), error); goto fail4; } } /* device properties */ sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32 | ATAC_CAP_PIOBM; sc->sc_wdcdev.irqack = njata32_irqack; sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = NJATA32_NCHAN; /* 1 */ sc->sc_wdcdev.sc_atac.atac_pio_cap = NJATA32_MODE_MAX_PIO; #if 0 /* ATA DMA is currently unused */ sc->sc_wdcdev.sc_atac.atac_dma_cap = NJATA32_MODE_MAX_DMA; #endif sc->sc_wdcdev.sc_atac.atac_set_modes = njata32_setup_channel; /* DMA control functions */ sc->sc_wdcdev.dma_arg = sc; sc->sc_wdcdev.dma_init = njata32_dma_init; sc->sc_wdcdev.piobm_start = njata32_piobm_start; sc->sc_wdcdev.dma_finish = njata32_dma_finish; sc->sc_wdcdev.piobm_done = njata32_piobm_done; sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_EXTRA_RESETS; sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; /* only one channel */ sc->sc_wdc_chanarray[0] = &sc->sc_ch[0].ch_ata_channel; sc->sc_ch[0].ch_ata_channel.ch_channel = 0; sc->sc_ch[0].ch_ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; sc->sc_ch[0].ch_ata_channel.ch_queue = &sc->sc_wdc_chqueue; sc->sc_ch[0].ch_ata_channel.ch_ndrive = 2; /* max number of drives */ /* map ATA registers */ for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(NJATA32_REGT(sc), NJATA32_REGH(sc), NJATA32_OFFSET_WDCREGS + i, i == wd_data ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error("%s: couldn't subregion cmd regs\n", NJATA32NAME(sc)); goto fail4; } } wdc_init_shadow_regs(&sc->sc_ch[0].ch_ata_channel); wdr->data32iot = NJATA32_REGT(sc); wdr->data32ioh = wdr->cmd_iohs[wd_data]; /* map ATA ctl reg */ wdr->ctl_iot = NJATA32_REGT(sc); if (bus_space_subregion(NJATA32_REGT(sc), NJATA32_REGH(sc), NJATA32_REG_WD_ALTSTATUS, 1, &wdr->ctl_ioh) != 0) { aprint_error("%s: couldn't subregion ctl regs\n", NJATA32NAME(sc)); goto fail4; } sc->sc_flags |= NJATA32_CMDPG_MAPPED; /* use flags value as busmaster wait */ if ((sc->sc_atawait = (uint8_t)device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags)) aprint_normal("%s: ATA wait = %#x\n", NJATA32NAME(sc), sc->sc_atawait); njata32_init(sc, cold); wdcattach(&sc->sc_ch[0].ch_ata_channel); return; /* * cleanup */ fail4: while (--devno >= 0) { bus_dmamap_destroy(sc->sc_dmat, sc->sc_dev[devno].d_dmamap_xfer); } bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap_sgt); fail3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap_sgt); fail2: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_sgtpg, sizeof(struct njata32_dma_page)); fail1: bus_dmamem_free(sc->sc_dmat, &sc->sc_sgt_seg, sc->sc_sgt_nsegs); }
void simide_attach(device_t parent, device_t self, void *aux) { struct simide_softc *sc = device_private(self); struct podule_attach_args *pa = (void *)aux; int status; u_int iobase; int channel, i; struct simide_channel *scp; struct ata_channel *cp; struct wdc_regs *wdr; irqhandler_t *ihp; /* Note the podule number and validate */ if (pa->pa_podule_number == -1) panic("Podule has disappeared !"); sc->sc_wdcdev.sc_atac.atac_dev = self; sc->sc_podule_number = pa->pa_podule_number; sc->sc_podule = pa->pa_podule; podules[sc->sc_podule_number].attached = 1; sc->sc_wdcdev.regs = sc->sc_wdc_regs; /* * Ok we need our own bus tag as the register spacing * is not the default. * * For the podulebus the bus tag cookie is the shift * to apply to registers * So duplicate the bus space tag and change the * cookie. * * Also while we are at it replace the default * read/write mulitple short functions with * optimised versions */ sc->sc_tag = *pa->pa_iot; sc->sc_tag.bs_cookie = (void *) DRIVE_REGISTER_SPACING_SHIFT; sc->sc_tag.bs_rm_2 = simide_bs_rm_2; sc->sc_tag.bs_wm_2 = simide_bs_wm_2; sc->sc_ctliot = pa->pa_iot; /* Obtain bus space handles for all the control registers */ if (bus_space_map(sc->sc_ctliot, pa->pa_podule->mod_base + CONTROL_REGISTERS_POFFSET, CONTROL_REGISTER_SPACE, 0, &sc->sc_ctlioh)) panic("%s: Cannot map control registers", device_xname(self)); /* Install a clean up handler to make sure IRQ's are disabled */ if (shutdownhook_establish(simide_shutdown, (void *)sc) == NULL) panic("%s: Cannot install shutdown handler", device_xname(self)); /* Set the interrupt info for this podule */ sc->sc_podule->irq_addr = pa->pa_podule->mod_base + CONTROL_REGISTERS_POFFSET + (CONTROL_REGISTER_OFFSET << 2); sc->sc_podule->irq_mask = STATUS_IRQ; sc->sc_ctl_reg = 0; status = bus_space_read_1(sc->sc_ctliot, sc->sc_ctlioh, STATUS_REGISTER_OFFSET); aprint_normal(":"); /* If any of the bits in STATUS_FAULT are zero then we have a fault. */ if ((status & STATUS_FAULT) != STATUS_FAULT) aprint_normal(" card/cable fault (%02x) -", status); if (!(status & STATUS_RESET)) aprint_normal(" (reset)"); if (!(status & STATUS_ADDR_TEST)) aprint_normal(" (addr)"); if (!(status & STATUS_CS_TEST)) aprint_normal(" (cs)"); if (!(status & STATUS_RW_TEST)) aprint_normal(" (rw)"); aprint_normal("\n"); /* Perhaps we should just abort at this point. */ /* if ((status & STATUS_FAULT) != STATUS_FAULT) return;*/ /* * Enable IDE, Obey IORDY and disabled slow mode */ sc->sc_ctl_reg |= CONTROL_IDE_ENABLE | CONTROL_IORDY | CONTROL_SLOW_MODE_OFF; bus_space_write_1(sc->sc_ctliot, sc->sc_ctlioh, CONTROL_REGISTER_OFFSET, sc->sc_ctl_reg); /* Fill in wdc and channel infos */ sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = 2; for (channel = 0 ; channel < 2; channel++) { scp = &sc->simide_channels[channel]; sc->sc_chanarray[channel] = &scp->sc_channel; cp = &scp->sc_channel; wdr = &sc->sc_wdc_regs[channel]; cp->ch_channel = channel; cp->ch_atac = &sc->sc_wdcdev.sc_atac; cp->ch_queue = &scp->sc_chqueue; cp->ch_ndrive = 2; wdr->cmd_iot = wdr->ctl_iot = &sc->sc_tag; iobase = pa->pa_podule->mod_base; if (bus_space_map(wdr->cmd_iot, iobase + simide_info[channel].drive_registers, DRIVE_REGISTERS_SPACE, 0, &wdr->cmd_baseioh)) continue; for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, DRIVE_REGISTERS_SPACE); continue; } } wdc_init_shadow_regs(cp); if (bus_space_map(wdr->ctl_iot, iobase + simide_info[channel].aux_register, 4, 0, &wdr->ctl_ioh)) { bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, DRIVE_REGISTERS_SPACE); continue; } /* Disable interrupts and clear any pending interrupts */ scp->sc_irqmask = simide_info[channel].irq_mask; sc->sc_ctl_reg &= ~scp->sc_irqmask; bus_space_write_1(sc->sc_ctliot, sc->sc_ctlioh, CONTROL_REGISTER_OFFSET, sc->sc_ctl_reg); ihp = &scp->sc_ih; ihp->ih_func = simide_intr; ihp->ih_arg = scp; ihp->ih_level = IPL_BIO; ihp->ih_name = "simide"; ihp->ih_maskaddr = pa->pa_podule->irq_addr; ihp->ih_maskbits = scp->sc_irqmask; if (irq_claim(sc->sc_podule->interrupt, ihp)) panic("%s: Cannot claim interrupt %d", device_xname(self), sc->sc_podule->interrupt); /* clear any pending interrupts and enable interrupts */ sc->sc_ctl_reg |= scp->sc_irqmask; bus_space_write_1(sc->sc_ctliot, sc->sc_ctlioh, CONTROL_REGISTER_OFFSET, sc->sc_ctl_reg); wdcattach(cp); } }
static void wdc_isapnp_attach(device_t parent, device_t self, void *aux) { struct wdc_isapnp_softc *sc = device_private(self); struct wdc_regs *wdr; struct isapnp_attach_args *ipa = aux; int i; if (ipa->ipa_nio != 2 || ipa->ipa_nmem != 0 || ipa->ipa_nmem32 != 0 || ipa->ipa_nirq != 1 || ipa->ipa_ndrq > 1) { aprint_error(": unexpected configuration\n"); return; } if (isapnp_config(ipa->ipa_iot, ipa->ipa_memt, ipa)) { aprint_error(": couldn't map registers\n"); return; } aprint_normal(": %s %s\n", ipa->ipa_devident, ipa->ipa_devclass); sc->sc_wdcdev.sc_atac.atac_dev = self; sc->sc_wdcdev.regs = wdr = &sc->wdc_regs; wdr->cmd_iot = ipa->ipa_iot; wdr->ctl_iot = ipa->ipa_iot; /* * An IDE controller can feed us the regions in any order. Pass * them along with the 8-byte region in sc_ad.ioh, and the other * (2 byte) region in auxioh. */ if (ipa->ipa_io[0].length == 8) { wdr->cmd_baseioh = ipa->ipa_io[0].h; wdr->ctl_ioh = ipa->ipa_io[1].h; } else { wdr->cmd_baseioh = ipa->ipa_io[1].h; wdr->ctl_ioh = ipa->ipa_io[0].h; } for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { aprint_error(": couldn't subregion registers\n"); return; } } wdr->data32iot = wdr->cmd_iot; wdr->data32ioh = wdr->cmd_iohs[0]; sc->sc_ic = ipa->ipa_ic; sc->sc_ih = isa_intr_establish(ipa->ipa_ic, ipa->ipa_irq[0].num, ipa->ipa_irq[0].type, IPL_BIO, wdcintr, &sc->ata_channel); #ifdef notyet if (ipa->ipa_ndrq > 0) { sc->sc_drq = ipa->ipa_drq[0].num; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.dma_start = &wdc_isapnp_dma_start; sc->sc_wdcdev.dma_finish = &wdc_isapnp_dma_finish; wdc_isapnp_dma_setup(sc); } #endif sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32; sc->sc_wdcdev.sc_atac.atac_pio_cap = 0; sc->wdc_chanlist[0] = &sc->ata_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanlist; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->ata_channel.ch_channel = 0; sc->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac; sc->ata_channel.ch_queue = &sc->wdc_chqueue; sc->ata_channel.ch_ndrive = 2; wdc_init_shadow_regs(&sc->ata_channel); wdcattach(&sc->ata_channel); }
void wdc_obio_attach(device_t parent, device_t self, void *aux) { struct wdc_obio_softc *sc = device_private(self); struct wdc_regs *wdr; struct confargs *ca = aux; struct ata_channel *chp = &sc->sc_channel; int intr, i, type = IST_EDGE; int use_dma = 0; char path[80]; sc->sc_wdcdev.sc_atac.atac_dev = self; if (device_cfdata(sc->sc_wdcdev.sc_atac.atac_dev)->cf_flags & WDC_OPTIONS_DMA) { if (ca->ca_nreg >= 16 || ca->ca_nintr == -1) use_dma = 1; /* XXX Don't work yet. */ } if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) { intr = ca->ca_intr[0]; aprint_normal(" irq %d", intr); if (ca->ca_nintr > 8) { type = ca->ca_intr[1] ? IST_LEVEL : IST_EDGE; } aprint_normal(", %s triggered", (type == IST_EDGE) ? "edge" : "level"); } else if (ca->ca_nintr == -1) { intr = WDC_DEFAULT_PIO_IRQ; aprint_normal(" irq property not found; using %d", intr); } else { aprint_error(": couldn't get irq property\n"); return; } if (use_dma) aprint_normal(": DMA transfer"); aprint_normal("\n"); sc->sc_wdcdev.regs = wdr = &sc->sc_wdc_regs; wdr->cmd_iot = wdr->ctl_iot = ca->ca_tag; if (bus_space_map(wdr->cmd_iot, ca->ca_baseaddr + ca->ca_reg[0], WDC_REG_NPORTS << 4, 0, &wdr->cmd_baseioh) || bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, WDC_AUXREG_OFFSET << 4, 1, &wdr->ctl_ioh)) { aprint_error_dev(self, "couldn't map registers\n"); return; } for (i = 0; i < WDC_NREG; i++) { if (bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh, i << 4, i == 0 ? 4 : 1, &wdr->cmd_iohs[i]) != 0) { bus_space_unmap(wdr->cmd_iot, wdr->cmd_baseioh, WDC_REG_NPORTS << 4); aprint_error_dev(self, "couldn't subregion registers\n"); return; } } #if 0 wdr->data32iot = wdr->cmd_iot; wdr->data32ioh = wdr->cmd_ioh; #endif sc->sc_ih = intr_establish(intr, type, IPL_BIO, wdcintr, chp); if (use_dma) { sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20); /* * XXX * we don't use ca->ca_reg[3] for size here because at least * on the PB3400c it says 0x200 for both IDE channels ( the * one on the mainboard and the other on the mediabay ) but * their start addresses are only 0x100 apart. Since those * DMA registers are always 0x100 or less we don't really * have to care though */ if (bus_space_map(wdr->cmd_iot, ca->ca_baseaddr + ca->ca_reg[2], 0x100, BUS_SPACE_MAP_LINEAR, &sc->sc_dmaregh)) { aprint_error_dev(self, "unable to map DMA registers (%08x)\n", ca->ca_reg[2]); /* should unmap stuff here */ return; } sc->sc_dmareg = bus_space_vaddr(wdr->cmd_iot, sc->sc_dmaregh); sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; if (strcmp(ca->ca_name, "ata-4") == 0) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; sc->sc_wdcdev.sc_atac.atac_set_modes = ata4_adjust_timing; } else { sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing; } #ifdef notyet /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */ if (ohare) { sc->sc_wdcdev.sc_atac.atac_pio_cap = 3; sc->sc_wdcdev.sc_atac.atac_dma_cap = 1; } #endif } else { /* all non-DMA controllers can use adjust_timing */ sc->sc_wdcdev.sc_atac.atac_set_modes = adjust_timing; } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16; sc->sc_chanptr = chp; sc->sc_wdcdev.sc_atac.atac_channels = &sc->sc_chanptr; sc->sc_wdcdev.sc_atac.atac_nchannels = 1; sc->sc_wdcdev.wdc_maxdrives = 2; sc->sc_wdcdev.dma_arg = sc; sc->sc_wdcdev.dma_init = wdc_obio_dma_init; sc->sc_wdcdev.dma_start = wdc_obio_dma_start; sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish; chp->ch_channel = 0; chp->ch_atac = &sc->sc_wdcdev.sc_atac; chp->ch_queue = &sc->sc_chqueue; wdc_init_shadow_regs(chp); #define OHARE_FEATURE_REG 0xf3000038 /* XXX Enable wdc1 by feature reg. */ memset(path, 0, sizeof(path)); OF_package_to_path(ca->ca_node, path, sizeof(path)); if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) { u_int x; x = in32rb(OHARE_FEATURE_REG); x |= 8; out32rb(OHARE_FEATURE_REG, x); } wdcattach(chp); }