static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid, revid; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "mrvl,sata")) return (ENXIO); soc_id(&devid, &revid); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_VENDOR); } } return (ENXIO); }
static int sata_probe(device_t dev) { struct sata_softc *sc; uint32_t d, r; if (!ofw_bus_is_compatible(dev, "mrvl,sata")) return (ENXIO); soc_id(&d, &r); sc = device_get_softc(dev); switch(d) { case MV_DEV_88F5182: sc->sc_version = 1; sc->sc_edma_qlen = 128; break; case MV_DEV_88F6281: case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: sc->sc_version = 2; sc->sc_edma_qlen = 32; break; default: device_printf(dev, "unsupported SoC (ID: 0x%08X)!\n", d); return (ENXIO); } sc->sc_edma_reqis_mask = (sc->sc_edma_qlen - 1) << SATA_EDMA_REQIS_OFS; sc->sc_edma_resos_mask = (sc->sc_edma_qlen - 1) << SATA_EDMA_RESOS_OFS; device_set_desc(dev, "Marvell Integrated SATA Controller"); return (0); }
uint32_t get_tclk(void) { uint32_t dev, rev; /* * On Kirkwood TCLK is not configurable and depends on silicon * revision: * - A0 and A1 have TCLK hardcoded to 200 MHz. * - Z0 and others have TCLK hardcoded to 166 MHz. */ soc_id(&dev, &rev); if (dev == MV_DEV_88F6281 && (rev == 2 || rev == 3)) return (TCLK_200MHZ); if (dev == MV_DEV_88F6282) return (TCLK_200MHZ); return (TCLK_166MHZ); }
static int mv_ic_attach(device_t dev) { struct mv_ic_softc *sc; uint32_t dev_id, rev_id; int error; sc = (struct mv_ic_softc *)device_get_softc(dev); if (mv_ic_sc != NULL) return (ENXIO); mv_ic_sc = sc; soc_id(&dev_id, &rev_id); sc->ic_high_regs = 0; sc->ic_error_regs = 0; if (dev_id == MV_DEV_88F6281 || dev_id == MV_DEV_88F6282 || dev_id == MV_DEV_MV78100 || dev_id == MV_DEV_MV78100_Z0) sc->ic_high_regs = 1; if (dev_id == MV_DEV_MV78100 || dev_id == MV_DEV_MV78100_Z0) sc->ic_error_regs = 1; error = bus_alloc_resources(dev, mv_ic_spec, sc->ic_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->ic_bst = rman_get_bustag(sc->ic_res[0]); sc->ic_bsh = rman_get_bushandle(sc->ic_res[0]); /* Mask all interrupts */ arm_mask_irq_all(); return (0); }
static int mv_gpio_attach(device_t dev) { int error, i; struct mv_gpio_softc *sc; uint32_t dev_id, rev_id; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (mv_gpio_softc != NULL) return (ENXIO); mv_gpio_softc = sc; /* Get chip id and revision */ soc_id(&dev_id, &rev_id); if (dev_id == MV_DEV_88F5182 || dev_id == MV_DEV_88F5281 || dev_id == MV_DEV_MV78100 || dev_id == MV_DEV_MV78100_Z0 ) { sc->pin_num = 32; sc->irq_num = 4; sc->use_high = 0; } else if (dev_id == MV_DEV_88F6281) { sc->pin_num = 50; sc->irq_num = 7; sc->use_high = 1; } else { device_printf(dev, "unknown chip id=0x%x\n", dev_id); return (ENXIO); } error = bus_alloc_resources(dev, mv_gpio_res, sc->res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); /* Disable and clear all interrupts */ bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_EDGE_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_LEV_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_CAUSE, 0); if (sc->use_high) { bus_space_write_4(sc->bst, sc->bsh, GPIO_HI_INT_EDGE_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_HI_INT_LEV_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_HI_INT_CAUSE, 0); } for (i = 0; i < sc->irq_num; i++) { if (bus_setup_intr(dev, sc->res[1 + i], INTR_TYPE_MISC | INTR_FAST, (driver_filter_t *)mv_gpio_intr, NULL, sc, &sc->ih_cookie[i]) != 0) { bus_release_resources(dev, mv_gpio_res, sc->res); device_printf(dev, "could not set up intr %d\n", i); return (ENXIO); } } /* Setup GPIO lines */ for (i = 0; mv_gpio_config[i].gc_gpio >= 0; i++) { mv_gpio_configure(mv_gpio_config[i].gc_gpio, mv_gpio_config[i].gc_flags, ~0u); if (mv_gpio_config[i].gc_output < 0) mv_gpio_out_en(mv_gpio_config[i].gc_gpio, 0); else mv_gpio_out(mv_gpio_config[i].gc_gpio, mv_gpio_config[i].gc_output, 1); } return (0); }
static int mv_gpio_attach(device_t dev) { int error, i; struct mv_gpio_softc *sc; uint32_t dev_id, rev_id; sc = (struct mv_gpio_softc *)device_get_softc(dev); if (sc == NULL) return (ENXIO); mv_gpio_softc = sc; /* Get chip id and revision */ soc_id(&dev_id, &rev_id); if (dev_id == MV_DEV_88F5182 || dev_id == MV_DEV_88F5281 || dev_id == MV_DEV_MV78100 || dev_id == MV_DEV_MV78100_Z0 ) { sc->pin_num = 32; sc->irq_num = 4; } else if (dev_id == MV_DEV_88F6281 || dev_id == MV_DEV_88F6282) { sc->pin_num = 50; sc->irq_num = 7; } else { device_printf(dev, "unknown chip id=0x%x\n", dev_id); return (ENXIO); } error = bus_alloc_resources(dev, mv_gpio_res, sc->res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); /* Disable and clear all interrupts */ bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_EDGE_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_LEV_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_INT_CAUSE, 0); if (sc->pin_num > GPIO_PINS_PER_REG) { bus_space_write_4(sc->bst, sc->bsh, GPIO_HI_INT_EDGE_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_HI_INT_LEV_MASK, 0); bus_space_write_4(sc->bst, sc->bsh, GPIO_HI_INT_CAUSE, 0); } for (i = 0; i < sc->irq_num; i++) { if (bus_setup_intr(dev, sc->res[1 + i], INTR_TYPE_MISC, mv_gpio_intr, NULL, sc, &sc->ih_cookie[i]) != 0) { bus_release_resources(dev, mv_gpio_res, sc->res); device_printf(dev, "could not set up intr %d\n", i); return (ENXIO); } } return (platform_gpio_init()); }
static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid, revid; soc_id(&devid, &revid); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = 0; if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; if (ATA_INL(ctlr->r_mem, PORT_BASE(0) + SATA_PHYCFG_OFS) != 0) ctlr->quirks |= MVS_Q_SOC65; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; }