static int ata_cbuschannel_attach(device_t dev) { struct ata_cbus_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int i; if (ch->attached) return (0); ch->attached = 1; ch->unit = (intptr_t)device_get_ivars(dev); /* setup the resource vectors */ for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = ctlr->io; ch->r_io[i].offset = i << 1; } ch->r_io[ATA_CONTROL].res = ctlr->ctlio; ch->r_io[ATA_CONTROL].offset = 0; ch->r_io[ATA_IDX_ADDR].res = ctlr->io; ata_default_registers(dev); /* initialize softc for this channel */ ch->flags |= ATA_USE_16BIT; ata_generic_hw(dev); return ata_attach(dev); }
static int ata_pccard_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct resource *io, *ctlio; int i, rid, err; uint16_t funce; if (ch->attached) return (0); ch->attached = 1; /* allocate the io range to get start and length */ rid = ATA_IOADDR_RID; if (!(io = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, ATA_IOSIZE, RF_ACTIVE))) return (ENXIO); /* setup the resource vectors */ for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = io; ch->r_io[i].offset = i; } ch->r_io[ATA_IDX_ADDR].res = io; /* * if we got more than the default ATA_IOSIZE ports, this is a device * where ctlio is located at offset 14 into "normal" io space. */ if (rman_get_size(io) > ATA_IOSIZE) { ch->r_io[ATA_CONTROL].res = io; ch->r_io[ATA_CONTROL].offset = 14; } else { rid = ATA_CTLADDR_RID; if (!(ctlio = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, ATA_CTLIOSIZE, RF_ACTIVE))) { bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); for (i = ATA_DATA; i < ATA_MAX_RES; i++) ch->r_io[i].res = NULL; return (ENXIO); } ch->r_io[ATA_CONTROL].res = ctlio; ch->r_io[ATA_CONTROL].offset = 0; } ata_default_registers(dev); /* initialize softc for this channel */ ch->unit = 0; ch->flags |= ATA_USE_16BIT; funce = 0; /* Default to sane setting of FUNCE */ pccard_get_funce_disk(dev, &funce); if (!(funce & PFD_I_D)) ch-> flags |= ATA_NO_SLAVE; ata_generic_hw(dev); err = ata_probe(dev); if (err) return (err); return (ata_attach(dev)); }
static int at91_channel_attach(device_t dev) { struct at91_cfata_softc *sc = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int i; for (i = 0; i < ATA_MAX_RES; i++) ch->r_io[i].res = sc->mem_res; /* * CF+ Specification. * 6.1.3 Memory Mapped Addressing. */ ch->r_io[ATA_DATA].offset = 0x00; ch->r_io[ATA_FEATURE].offset = 0x01; ch->r_io[ATA_COUNT].offset = 0x02; ch->r_io[ATA_SECTOR].offset = 0x03; ch->r_io[ATA_CYL_LSB].offset = 0x04; ch->r_io[ATA_CYL_MSB].offset = 0x05; ch->r_io[ATA_DRIVE].offset = 0x06; ch->r_io[ATA_COMMAND].offset = 0x07; ch->r_io[ATA_ERROR].offset = 0x01; ch->r_io[ATA_IREASON].offset = 0x02; ch->r_io[ATA_STATUS].offset = 0x07; ch->r_io[ATA_ALTSTAT].offset = 0x0e; ch->r_io[ATA_CONTROL].offset = 0x0e; /* Should point at the base of registers. */ ch->r_io[ATA_IDX_ADDR].offset = 0x0; ata_generic_hw(dev); return (ata_attach(dev)); }
static int avila_channel_attach(device_t dev) { struct ata_avila_softc *sc = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int i; for (i = 0; i < ATA_MAX_RES; i++) ch->r_io[i].res = &sc->sc_ata; ch->r_io[ATA_DATA].offset = ATA_DATA; ch->r_io[ATA_FEATURE].offset = ATA_FEATURE; ch->r_io[ATA_COUNT].offset = ATA_COUNT; ch->r_io[ATA_SECTOR].offset = ATA_SECTOR; ch->r_io[ATA_CYL_LSB].offset = ATA_CYL_LSB; ch->r_io[ATA_CYL_MSB].offset = ATA_CYL_MSB; ch->r_io[ATA_DRIVE].offset = ATA_DRIVE; ch->r_io[ATA_COMMAND].offset = ATA_COMMAND; ch->r_io[ATA_ERROR].offset = ATA_FEATURE; /* NB: should be used only for ATAPI devices */ ch->r_io[ATA_IREASON].offset = ATA_COUNT; ch->r_io[ATA_STATUS].offset = ATA_COMMAND; /* NB: the control and alt status registers are special */ ch->r_io[ATA_ALTSTAT].res = &sc->sc_alt_ata; ch->r_io[ATA_ALTSTAT].offset = AVILA_IDE_CTRL; ch->r_io[ATA_CONTROL].res = &sc->sc_alt_ata; ch->r_io[ATA_CONTROL].offset = AVILA_IDE_CTRL; /* NB: by convention this points at the base of registers */ ch->r_io[ATA_IDX_ADDR].offset = 0; ata_generic_hw(dev); return ata_attach(dev); }
static int ata_zbbus_attach(device_t dev) { int i, rid, regshift, regoffset; struct ata_channel *ch; struct resource *io; ch = device_get_softc(dev); if (ch->attached) return (0); ch->attached = 1; rid = 0; io = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (io == NULL) return (ENXIO); /* * SWARM needs an address shift of 5 when accessing ATA registers. * * For e.g. an access to register 4 actually needs an address * of (4 << 5) to be output on the generic bus. */ regshift = 5; resource_int_value(device_get_name(dev), device_get_unit(dev), "regshift", ®shift); if (regshift && bootverbose) device_printf(dev, "using a register shift of %d\n", regshift); regoffset = 0x1F0; resource_int_value(device_get_name(dev), device_get_unit(dev), "regoffset", ®offset); if (regoffset && bootverbose) { device_printf(dev, "using a register offset of 0x%0x\n", regoffset); } /* setup the ata register addresses */ for (i = ATA_DATA; i <= ATA_COMMAND; ++i) { ch->r_io[i].res = io; ch->r_io[i].offset = (regoffset + i) << regshift; } ch->r_io[ATA_CONTROL].res = io; ch->r_io[ATA_CONTROL].offset = (regoffset + ATA_CTLOFFSET) << regshift; ch->r_io[ATA_IDX_ADDR].res = io; /* XXX what is this used for */ ata_default_registers(dev); /* initialize softc for this channel */ ch->unit = 0; ch->flags |= ATA_USE_16BIT; ata_generic_hw(dev); return (ata_attach(dev)); }
static int ata_isa_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); struct resource *io = NULL, *ctlio = NULL; u_long tmp; int i, rid; if (ch->attached) return (0); ch->attached = 1; /* allocate the io port range */ rid = ATA_IOADDR_RID; if (!(io = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, ATA_IOSIZE, RF_ACTIVE))) return ENXIO; /* set the altport range */ if (bus_get_resource(dev, SYS_RES_IOPORT, ATA_CTLADDR_RID, &tmp, &tmp)) { bus_set_resource(dev, SYS_RES_IOPORT, ATA_CTLADDR_RID, rman_get_start(io) + ATA_CTLOFFSET, ATA_CTLIOSIZE); } /* allocate the altport range */ rid = ATA_CTLADDR_RID; if (!(ctlio = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, ATA_CTLIOSIZE, RF_ACTIVE))) { bus_release_resource(dev, SYS_RES_IOPORT, ATA_IOADDR_RID, io); return ENXIO; } /* setup the resource vectors */ for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = io; ch->r_io[i].offset = i; } ch->r_io[ATA_CONTROL].res = ctlio; ch->r_io[ATA_CONTROL].offset = 0; ch->r_io[ATA_IDX_ADDR].res = io; ata_default_registers(dev); /* initialize softc for this channel */ ch->unit = 0; ch->flags |= ATA_USE_16BIT; ata_generic_hw(dev); return ata_attach(dev); }
static int ata_macio_attach(device_t dev) { struct ata_macio_softc *sc = device_get_softc(dev); uint32_t timingreg; #if USE_DBDMA_IRQ int dbdma_irq_rid = 1; struct resource *dbdma_irq; void *cookie; #endif /* Init DMA engine */ sc->sc_ch.dbdma_rid = 1; sc->sc_ch.dbdma_regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_ch.dbdma_rid, RF_ACTIVE); ata_dbdma_dmainit(dev); /* Configure initial timings */ timingreg = bus_read_4(sc->sc_mem, ATA_MACIO_TIMINGREG); if (sc->rev == 4) { sc->udmaconf[0] = sc->udmaconf[1] = timingreg & 0x1ff00000; sc->wdmaconf[0] = sc->wdmaconf[1] = timingreg & 0x001ffc00; sc->pioconf[0] = sc->pioconf[1] = timingreg & 0x000003ff; } else { sc->udmaconf[0] = sc->udmaconf[1] = 0; sc->wdmaconf[0] = sc->wdmaconf[1] = timingreg & 0xfffff800; sc->pioconf[0] = sc->pioconf[1] = timingreg & 0x000007ff; } #if USE_DBDMA_IRQ /* Bind to DBDMA interrupt as well */ if ((dbdma_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dbdma_irq_rid, RF_SHAREABLE | RF_ACTIVE)) != NULL) { bus_setup_intr(dev, dbdma_irq, ATA_INTR_FLAGS, NULL, (driver_intr_t *)ata_interrupt, sc,&cookie); } #endif /* Set begin_transaction */ sc->sc_ch.sc_ch.hw.begin_transaction = ata_macio_begin_transaction; return ata_attach(dev); }
static int ata_kauai_attach(device_t dev) { struct ata_kauai_softc *sc = device_get_softc(dev); #if USE_DBDMA_IRQ int dbdma_irq_rid = 1; struct resource *dbdma_irq; void *cookie; #endif pci_enable_busmaster(dev); /* Init DMA engine */ sc->sc_ch.dbdma_rid = 1; sc->sc_ch.dbdma_regs = sc->sc_memr; sc->sc_ch.dbdma_offset = ATA_KAUAI_DBDMAOFFSET; ata_dbdma_dmainit(dev); #if USE_DBDMA_IRQ /* Bind to DBDMA interrupt as well */ if ((dbdma_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dbdma_irq_rid, RF_SHAREABLE | RF_ACTIVE)) != NULL) { bus_setup_intr(dev, dbdma_irq, ATA_INTR_FLAGS, NULL, (driver_intr_t *)ata_kauai_dma_interrupt, sc,&cookie); } #endif /* Set up initial mode */ sc->pioconf[0] = sc->pioconf[1] = bus_read_4(sc->sc_memr, PIO_CONFIG_REG) & 0x0f000fff; sc->udmaconf[0] = sc->udmaconf[1] = 0; sc->wdmaconf[0] = sc->wdmaconf[1] = 0; /* Magic FCR value from Apple */ bus_write_4(sc->sc_memr, 0, 0x00000007); /* Set begin_transaction */ sc->sc_ch.sc_ch.hw.begin_transaction = ata_kauai_begin_transaction; return ata_attach(dev); }
static int ata_pcichannel_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (ctlr->dmainit) ctlr->dmainit(dev); if (ch->dma) ch->dma->alloc(dev); if ((error = ctlr->allocate(dev))) { if (ch->dma) ch->dma->free(dev); return error; } return ata_attach(dev); }
static int ata_pcichannel_attach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); int error; if (ch->attached) return (0); ch->attached = 1; ch->dev = dev; ch->unit = (intptr_t)device_get_ivars(dev); resource_int_value(device_get_name(dev), device_get_unit(dev), "pm_level", &ch->pm_level); if ((error = ctlr->ch_attach(dev))) return error; return ata_attach(dev); }
/* * We add all the devices which we know about. * The generic attach routine will attach them if they are alive. */ static int xlr_pcmcia_attach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); int i; int rid =0; struct resource *mem_res; mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); for (i = 0; i < ATA_MAX_RES; i++) ch->r_io[i].res = mem_res; /* * CF+ Specification. */ ch->r_io[ATA_DATA].offset = XLR_PCMCIA_DATA_REG; ch->r_io[ATA_FEATURE].offset = XLR_PCMCIA_ERROR_REG; ch->r_io[ATA_COUNT].offset = XLR_PCMCIA_SECT_CNT_REG; ch->r_io[ATA_SECTOR].offset = XLR_PCMCIA_SECT_NUM_REG; ch->r_io[ATA_CYL_LSB].offset = XLR_PCMCIA_CYLINDER_LOW_REG; ch->r_io[ATA_CYL_MSB].offset = XLR_PCMCIA_CYLINDER_HIGH_REG; ch->r_io[ATA_DRIVE].offset = XLR_PCMCIA_SECT_DRIVE_HEAD_REG; ch->r_io[ATA_COMMAND].offset = XLR_PCMCIA_CMD_STATUS_REG; ch->r_io[ATA_ERROR].offset = XLR_PCMCIA_ERROR_REG; ch->r_io[ATA_IREASON].offset = XLR_PCMCIA_SECT_CNT_REG; ch->r_io[ATA_STATUS].offset = XLR_PCMCIA_CMD_STATUS_REG; ch->r_io[ATA_ALTSTAT].offset = XLR_PCMCIA_ALT_STATUS_REG; ch->r_io[ATA_CONTROL].offset = XLR_PCMCIA_CONTROL_REG; /* Should point at the base of registers. */ ch->r_io[ATA_IDX_ADDR].offset = XLR_PCMCIA_DATA_REG; ata_generic_hw(dev); return (ata_attach(dev)); }
static int ataioctl(dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct proc *p) { struct ata_cmd *iocmd = (struct ata_cmd *)addr; struct ata_channel *ch; device_t device = devclass_get_device(ata_devclass, iocmd->channel); int error; if (cmd != IOCATA) return ENOTTY; if (iocmd->channel < -1 || iocmd->device < -1 || iocmd->device > SLAVE) return ENXIO; switch (iocmd->cmd) { case ATAATTACH: /* should enable channel HW on controller that can SOS XXX */ error = ata_probe(device); if (!error) error = ata_attach(device); return error; case ATADETACH: error = ata_detach(device); /* should disable channel HW on controller that can SOS XXX */ return error; case ATAREINIT: if (!device || !(ch = device_get_softc(device))) return ENXIO; ATA_SLEEPLOCK_CH(ch, ATA_ACTIVE); if ((error = ata_reinit(ch))) ATA_UNLOCK_CH(ch); return error; case ATAGMODE: if (!device || !(ch = device_get_softc(device))) return ENXIO; if ((iocmd->device == MASTER || iocmd->device == -1) && ch->device[MASTER].driver) iocmd->u.mode.mode[MASTER] = ch->device[MASTER].mode; else iocmd->u.mode.mode[MASTER] = -1; if ((iocmd->device == SLAVE || iocmd->device == -1) && ch->device[SLAVE].param) iocmd->u.mode.mode[SLAVE] = ch->device[SLAVE].mode; else iocmd->u.mode.mode[SLAVE] = -1; return 0; case ATASMODE: if (!device || !(ch = device_get_softc(device))) return ENXIO; if ((iocmd->device == MASTER || iocmd->device == -1) && iocmd->u.mode.mode[MASTER] >= 0 && ch->device[MASTER].param) { ata_change_mode(&ch->device[MASTER],iocmd->u.mode.mode[MASTER]); iocmd->u.mode.mode[MASTER] = ch->device[MASTER].mode; } else iocmd->u.mode.mode[MASTER] = -1; if ((iocmd->device == SLAVE || iocmd->device == -1) && iocmd->u.mode.mode[SLAVE] >= 0 && ch->device[SLAVE].param) { ata_change_mode(&ch->device[SLAVE], iocmd->u.mode.mode[SLAVE]); iocmd->u.mode.mode[SLAVE] = ch->device[SLAVE].mode; } else iocmd->u.mode.mode[SLAVE] = -1; return 0; case ATAGPARM: if (!device || !(ch = device_get_softc(device))) return ENXIO; iocmd->u.param.type[MASTER] = ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER); iocmd->u.param.type[SLAVE] = ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE); if (ch->device[MASTER].name) strcpy(iocmd->u.param.name[MASTER], ch->device[MASTER].name); if (ch->device[SLAVE].name) strcpy(iocmd->u.param.name[SLAVE], ch->device[SLAVE].name); if (ch->device[MASTER].param) bcopy(ch->device[MASTER].param, &iocmd->u.param.params[MASTER], sizeof(struct ata_params)); if (ch->device[SLAVE].param) bcopy(ch->device[SLAVE].param, &iocmd->u.param.params[SLAVE], sizeof(struct ata_params)); return 0; case ATAENCSTAT: { struct ata_device *atadev; if (!device || !(ch = device_get_softc(device))) return ENXIO; if (iocmd->device == SLAVE) atadev = &ch->device[SLAVE]; else atadev = &ch->device[MASTER]; return ata_enclosure_status(atadev, &iocmd->u.enclosure.fan, &iocmd->u.enclosure.temp, &iocmd->u.enclosure.v05, &iocmd->u.enclosure.v12); } #if NATADISK > 0 case ATARAIDREBUILD: return ata_raid_rebuild(iocmd->channel); case ATARAIDCREATE: return ata_raid_create(&iocmd->u.raid_setup); case ATARAIDDELETE: return ata_raid_delete(iocmd->channel); case ATARAIDSTATUS: return ata_raid_status(iocmd->channel, &iocmd->u.raid_status); #endif #if DEV_ATAPIALL case ATAPICMD: { struct ata_device *atadev; caddr_t buf; if (!device || !(ch = device_get_softc(device))) return ENXIO; if (!(atadev = &ch->device[iocmd->device]) || !(ch->devices & (iocmd->device == MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE))) return ENODEV; if (!(buf = malloc(iocmd->u.atapi.count, M_ATA, M_NOWAIT))) return ENOMEM; if (iocmd->u.atapi.flags & ATAPI_CMD_WRITE) { error = copyin(iocmd->u.atapi.data, buf, iocmd->u.atapi.count); if (error) return error; } error = atapi_queue_cmd(atadev, iocmd->u.atapi.ccb, buf, iocmd->u.atapi.count, (iocmd->u.atapi.flags == ATAPI_CMD_READ ? ATPR_F_READ : 0) | ATPR_F_QUIET, iocmd->u.atapi.timeout, NULL, NULL); if (error) { iocmd->u.atapi.error = error; bcopy(&atadev->result, iocmd->u.atapi.sense_data, sizeof(struct atapi_reqsense)); error = 0; } else if (iocmd->u.atapi.flags & ATAPI_CMD_READ) error = copyout(buf, iocmd->u.atapi.data, iocmd->u.atapi.count); free(buf, M_ATA); return error; } #endif default: break; } return ENOTTY; }
static int sata_channel_attach(device_t dev) { struct sata_softc *sc; struct ata_channel *ch; uint64_t work; int error, i; sc = device_get_softc(device_get_parent(dev)); ch = device_get_softc(dev); if (ch->attached) return (0); ch->dev = dev; ch->unit = device_get_unit(dev); ch->flags |= ATA_USE_16BIT | ATA_NO_SLAVE | ATA_SATA; /* Set legacy ATA resources. */ for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = sc->sc_mem_res; ch->r_io[i].offset = SATA_SHADOWR_BASE(ch->unit) + (i << 2); } ch->r_io[ATA_CONTROL].res = sc->sc_mem_res; ch->r_io[ATA_CONTROL].offset = SATA_SHADOWR_CONTROL(ch->unit); ch->r_io[ATA_IDX_ADDR].res = sc->sc_mem_res; ata_default_registers(dev); /* Set SATA resources. */ ch->r_io[ATA_SSTATUS].res = sc->sc_mem_res; ch->r_io[ATA_SSTATUS].offset = SATA_SATA_SSTATUS(ch->unit); ch->r_io[ATA_SERROR].res = sc->sc_mem_res; ch->r_io[ATA_SERROR].offset = SATA_SATA_SERROR(ch->unit); ch->r_io[ATA_SCONTROL].res = sc->sc_mem_res; ch->r_io[ATA_SCONTROL].offset = SATA_SATA_SCONTROL(ch->unit); ata_generic_hw(dev); ch->hw.begin_transaction = sata_channel_begin_transaction; ch->hw.end_transaction = sata_channel_end_transaction; ch->hw.status = sata_channel_status; /* Set DMA resources */ ata_dmainit(dev); ch->dma.setprd = sata_channel_dmasetprd; /* Clear work area */ KASSERT(sc->sc_edma_qlen * (sizeof(struct sata_crqb) + sizeof(struct sata_crpb)) <= ch->dma.max_iosize, ("insufficient DMA memory for request/response queues.\n")); bzero(ch->dma.work, sc->sc_edma_qlen * (sizeof(struct sata_crqb) + sizeof(struct sata_crpb))); bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Turn off EDMA engine */ error = sata_edma_ctrl(dev, 0); if (error) { ata_dmafini(dev); return (error); } /* * Initialize EDMA engine: * - Native Command Queuing off, * - Non-Queued operation, * - Host Queue Cache enabled. */ SATA_OUTL(sc, SATA_EDMA_CFG(ch->unit), SATA_EDMA_CFG_HQCACHE | (sc->sc_version == 1) ? SATA_EDMA_CFG_QL128 : 0); /* Set request queue pointers */ work = ch->dma.work_bus; SATA_OUTL(sc, SATA_EDMA_REQBAHR(ch->unit), work >> 32); SATA_OUTL(sc, SATA_EDMA_REQIPR(ch->unit), work & 0xFFFFFFFF); SATA_OUTL(sc, SATA_EDMA_REQOPR(ch->unit), work & 0xFFFFFFFF); /* Set response queue pointers */ work += sc->sc_edma_qlen * sizeof(struct sata_crqb); SATA_OUTL(sc, SATA_EDMA_RESBAHR(ch->unit), work >> 32); SATA_OUTL(sc, SATA_EDMA_RESIPR(ch->unit), work & 0xFFFFFFFF); SATA_OUTL(sc, SATA_EDMA_RESOPR(ch->unit), work & 0xFFFFFFFF); /* Clear any outstanding interrupts */ ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); SATA_OUTL(sc, SATA_SATA_FISICR(ch->unit), 0); SATA_OUTL(sc, SATA_EDMA_IECR(ch->unit), 0); SATA_OUTL(sc, SATA_ICR, ~(SATA_ICR_DEV(ch->unit) | SATA_ICR_DMADONE(ch->unit))); /* Umask channel interrupts */ SATA_OUTL(sc, SATA_EDMA_IEMR(ch->unit), 0xFFFFFFFF); SATA_OUTL(sc, SATA_MIMR, SATA_INL(sc, SATA_MIMR) | SATA_MICR_DONE(ch->unit) | SATA_MICR_DMADONE(ch->unit) | SATA_MICR_ERR(ch->unit)); ch->attached = 1; return (ata_attach(dev)); }
/* * device related interfaces */ static int ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int32_t flag, struct thread *td) { device_t device, *children; struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; int *value = (int *)data; int i, nchildren, error = ENOTTY; switch (cmd) { case IOCATAGMAXCHANNEL: /* In case we have channel 0..n this will return n+1. */ *value = devclass_get_maxunit(ata_devclass); error = 0; break; case IOCATAREINIT: if (*value >= devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, *value))) return ENXIO; error = ata_reinit(device); break; case IOCATAATTACH: if (*value >= devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, *value))) return ENXIO; /* XXX SOS should enable channel HW on controller */ error = ata_attach(device); break; case IOCATADETACH: if (*value >= devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, *value))) return ENXIO; error = ata_detach(device); /* XXX SOS should disable channel HW on controller */ break; case IOCATADEVICES: if (devices->channel >= devclass_get_maxunit(ata_devclass) || !(device = devclass_get_device(ata_devclass, devices->channel))) return ENXIO; bzero(devices->name[0], 32); bzero(&devices->params[0], sizeof(struct ata_params)); bzero(devices->name[1], 32); bzero(&devices->params[1], sizeof(struct ata_params)); if (!device_get_children(device, &children, &nchildren)) { for (i = 0; i < nchildren; i++) { if (children[i] && device_is_attached(children[i])) { struct ata_device *atadev = device_get_softc(children[i]); if (atadev->unit == ATA_MASTER) { strncpy(devices->name[0], device_get_nameunit(children[i]), 32); bcopy(&atadev->param, &devices->params[0], sizeof(struct ata_params)); } if (atadev->unit == ATA_SLAVE) { strncpy(devices->name[1], device_get_nameunit(children[i]), 32); bcopy(&atadev->param, &devices->params[1], sizeof(struct ata_params)); } } } free(children, M_TEMP); error = 0; } else error = ENODEV; break; default: if (ata_raid_ioctl_func) error = ata_raid_ioctl_func(cmd, data); } return error; }
static int ata_macio_attach(device_t dev) { struct ata_macio_softc *sc = device_get_softc(dev); uint32_t timingreg; struct ata_channel *ch; int rid, i; /* * Allocate resources */ rid = 0; ch = &sc->sc_ch.sc_ch; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "could not allocate memory\n"); return (ENXIO); } /* * Set up the resource vectors */ for (i = ATA_DATA; i <= ATA_COMMAND; i++) { ch->r_io[i].res = sc->sc_mem; ch->r_io[i].offset = i * ATA_MACIO_REGGAP; } ch->r_io[ATA_CONTROL].res = sc->sc_mem; ch->r_io[ATA_CONTROL].offset = ATA_MACIO_ALTOFFSET; ata_default_registers(dev); ch->unit = 0; ch->flags |= ATA_USE_16BIT | ATA_NO_ATAPI_DMA; ata_generic_hw(dev); #if USE_DBDMA_IRQ int dbdma_irq_rid = 1; struct resource *dbdma_irq; void *cookie; #endif /* Init DMA engine */ sc->sc_ch.dbdma_rid = 1; sc->sc_ch.dbdma_regs = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_ch.dbdma_rid, RF_ACTIVE); ata_dbdma_dmainit(dev); /* Configure initial timings */ timingreg = bus_read_4(sc->sc_mem, ATA_MACIO_TIMINGREG); if (sc->rev == 4) { sc->udmaconf[0] = sc->udmaconf[1] = timingreg & 0x1ff00000; sc->wdmaconf[0] = sc->wdmaconf[1] = timingreg & 0x001ffc00; sc->pioconf[0] = sc->pioconf[1] = timingreg & 0x000003ff; } else { sc->udmaconf[0] = sc->udmaconf[1] = 0; sc->wdmaconf[0] = sc->wdmaconf[1] = timingreg & 0xfffff800; sc->pioconf[0] = sc->pioconf[1] = timingreg & 0x000007ff; } #if USE_DBDMA_IRQ /* Bind to DBDMA interrupt as well */ if ((dbdma_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &dbdma_irq_rid, RF_SHAREABLE | RF_ACTIVE)) != NULL) { bus_setup_intr(dev, dbdma_irq, ATA_INTR_FLAGS, NULL, (driver_intr_t *)ata_interrupt, sc,&cookie); } #endif /* Set begin_transaction */ sc->sc_ch.sc_ch.hw.begin_transaction = ata_macio_begin_transaction; return ata_attach(dev); }