Exemple #1
0
static void
thunder_pem_release_all(device_t dev)
{
	struct thunder_pem_softc *sc;

	sc = device_get_softc(dev);

	rman_fini(&sc->io_rman);
	rman_fini(&sc->mem_rman);

	if (sc->reg != NULL)
		bus_free_resource(dev, SYS_RES_MEMORY, sc->reg);
}
Exemple #2
0
int
puc_bfe_detach(device_t dev)
{
	struct puc_bar *bar;
	struct puc_port *port;
	struct puc_softc *sc;
	int error, idx;

	sc = device_get_softc(dev);

	/* Detach our children. */
	error = 0;
	for (idx = 0; idx < sc->sc_nports; idx++) {
		port = &sc->sc_port[idx];
		if (port->p_dev == NULL)
			continue;
		if (device_detach(port->p_dev) == 0) {
			device_delete_child(dev, port->p_dev);
			if (port->p_rres != NULL)
				rman_release_resource(port->p_rres);
			if (port->p_ires != NULL)
				rman_release_resource(port->p_ires);
		} else
			error = ENXIO;
	}
	if (error)
		return (error);

	if (sc->sc_serdevs != 0UL)
		bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie);
	bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires);

	for (idx = 0; idx < PUC_PCI_BARS; idx++) {
		bar = &sc->sc_bar[idx];
		if (bar->b_res != NULL)
			bus_release_resource(sc->sc_dev, bar->b_type,
			    bar->b_rid, bar->b_res);
	}

	rman_fini(&sc->sc_irq);
	free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC);
	rman_fini(&sc->sc_iomem);
	free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC);
	rman_fini(&sc->sc_ioport);
	free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC);
	free(sc->sc_port, M_PUC);
	return (0);
}
Exemple #3
0
static int
mvs_detach(device_t dev)
{
	struct mvs_controller *ctlr = device_get_softc(dev);
	device_t *children;
	int nchildren, i;

	/* Detach & delete all children */
	if (!device_get_children(dev, &children, &nchildren)) {
		for (i = 0; i < nchildren; i++)
			device_delete_child(dev, children[i]);
		free(children, M_TEMP);
	}
	/* Free interrupt. */
	if (ctlr->irq.r_irq) {
		bus_teardown_intr(dev, ctlr->irq.r_irq,
		    ctlr->irq.handle);
		bus_release_resource(dev, SYS_RES_IRQ,
		    ctlr->irq.r_irq_rid, ctlr->irq.r_irq);
	}
	/* Free memory. */
	rman_fini(&ctlr->sc_iomem);
	if (ctlr->r_mem)
		bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
	mtx_destroy(&ctlr->mtx);
	return (0);
}
Exemple #4
0
/* Free memory management state */
static void
chipc_free_rman(struct chipc_softc *sc)
{
	struct chipc_region *cr, *cr_next;

	STAILQ_FOREACH_SAFE(cr, &sc->mem_regions, cr_link, cr_next)
		chipc_free_region(sc, cr);

	rman_fini(&sc->mem_rman);
}
Exemple #5
0
int
thunder_pcie_attach(device_t dev)
{
	int rid;
	struct thunder_pcie_softc *sc;
	int error;
	int tuple;
	uint64_t base, size;

	sc = device_get_softc(dev);
	sc->dev = dev;

	/* Identify pcib domain */
	if (thunder_pcie_identify_pcib(dev))
		return (ENXIO);

	rid = 0;
	sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
	if (sc->res == NULL) {
		device_printf(dev, "could not map memory.\n");
		return (ENXIO);
	}

	sc->mem_rman.rm_type = RMAN_ARRAY;
	sc->mem_rman.rm_descr = "PCIe Memory";

	/* Initialize rman and allocate memory regions */
	error = rman_init(&sc->mem_rman);
	if (error) {
		device_printf(dev, "rman_init() failed. error = %d\n", error);
		return (error);
	}

	for (tuple = 0; tuple < RANGES_TUPLES_MAX; tuple++) {
		base = sc->ranges[tuple].phys_base;
		size = sc->ranges[tuple].size;
		if ((base == 0) || (size == 0))
			continue; /* empty range element */

		error = rman_manage_region(&sc->mem_rman, base, base + size - 1);
		if (error) {
			device_printf(dev, "rman_manage_region() failed. error = %d\n", error);
			rman_fini(&sc->mem_rman);
			return (error);
		}
	}
	device_add_child(dev, "pci", -1);

	return (bus_generic_attach(dev));
}
Exemple #6
0
/*
 * Since this is not a self-enumerating bus, and since we always add
 * children in attach, we have to always delete children here.
 */
static int
gpiobus_detach(device_t dev)
{
	struct gpiobus_softc *sc;
	struct gpiobus_ivar *devi;
	device_t *devlist;
	int i, err, ndevs;

	sc = GPIOBUS_SOFTC(dev);
	KASSERT(mtx_initialized(&sc->sc_mtx),
	    ("gpiobus mutex not initialized"));
	GPIOBUS_LOCK_DESTROY(sc);

	if ((err = bus_generic_detach(dev)) != 0)
		return (err);

	if ((err = device_get_children(dev, &devlist, &ndevs)) != 0)
		return (err);
	for (i = 0; i < ndevs; i++) {
		devi = GPIOBUS_IVAR(devlist[i]);
		gpiobus_free_ivars(devi);
		resource_list_free(&devi->rl);
		free(devi, M_DEVBUF);
		device_delete_child(dev, devlist[i]);
	}
	free(devlist, M_TEMP);
	rman_fini(&sc->sc_intr_rman);
	if (sc->sc_pins) {
		for (i = 0; i < sc->sc_npins; i++) {
			if (sc->sc_pins[i].name != NULL)
				free(sc->sc_pins[i].name, M_DEVBUF);
			sc->sc_pins[i].name = NULL;
		}
		free(sc->sc_pins, M_DEVBUF);
		sc->sc_pins = NULL;
	}

	return (0);
}
Exemple #7
0
static int
mvs_detach(device_t dev)
{
	struct mvs_controller *ctlr = device_get_softc(dev);

	/* Detach & delete all children */
	device_delete_children(dev);

	/* Free interrupt. */
	if (ctlr->irq.r_irq) {
		bus_teardown_intr(dev, ctlr->irq.r_irq,
		    ctlr->irq.handle);
		bus_release_resource(dev, SYS_RES_IRQ,
		    ctlr->irq.r_irq_rid, ctlr->irq.r_irq);
	}
	/* Free memory. */
	rman_fini(&ctlr->sc_iomem);
	if (ctlr->r_mem)
		bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
	mtx_destroy(&ctlr->mtx);
	return (0);
}
Exemple #8
0
/*
 * Destroy an sgmap created with sgmap_map_create().
 */
void
sgmap_map_destroy(struct sgmap *sgmap)
{
	rman_fini(&sgmap->rm);
	free(sgmap, M_SGMAP);
}
Exemple #9
0
int
puc_bfe_attach(device_t dev)
{
	char buffer[64];
	struct puc_bar *bar;
	struct puc_port *port;
	struct puc_softc *sc;
	struct rman *rm;
	intptr_t res;
	bus_addr_t ofs, start;
	bus_size_t size;
	bus_space_handle_t bsh;
	bus_space_tag_t bst;
	int error, idx;

	sc = device_get_softc(dev);

	for (idx = 0; idx < PUC_PCI_BARS; idx++)
		sc->sc_bar[idx].b_rid = -1;

	do {
		sc->sc_ioport.rm_type = RMAN_ARRAY;
		error = rman_init(&sc->sc_ioport);
		if (!error) {
			sc->sc_iomem.rm_type = RMAN_ARRAY;
			error = rman_init(&sc->sc_iomem);
			if (!error) {
				sc->sc_irq.rm_type = RMAN_ARRAY;
				error = rman_init(&sc->sc_irq);
				if (!error)
					break;
				rman_fini(&sc->sc_iomem);
			}
			rman_fini(&sc->sc_ioport);
		}
		return (error);
	} while (0);

	snprintf(buffer, sizeof(buffer), "%s I/O port mapping",
	    device_get_nameunit(dev));
	sc->sc_ioport.rm_descr = strdup(buffer, M_PUC);
	snprintf(buffer, sizeof(buffer), "%s I/O memory mapping",
	    device_get_nameunit(dev));
	sc->sc_iomem.rm_descr = strdup(buffer, M_PUC);
	snprintf(buffer, sizeof(buffer), "%s port numbers",
	    device_get_nameunit(dev));
	sc->sc_irq.rm_descr = strdup(buffer, M_PUC);

	error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res);
	KASSERT(error == 0, ("%s %d", __func__, __LINE__));
	sc->sc_nports = (int)res;
	sc->sc_port = malloc(sc->sc_nports * sizeof(struct puc_port),
	    M_PUC, M_WAITOK|M_ZERO);

	error = rman_manage_region(&sc->sc_irq, 1, sc->sc_nports);
	if (error)
		goto fail;

	error = puc_config(sc, PUC_CFG_SETUP, 0, &res);
	if (error)
		goto fail;

	for (idx = 0; idx < sc->sc_nports; idx++) {
		port = &sc->sc_port[idx];
		port->p_nr = idx + 1;
		error = puc_config(sc, PUC_CFG_GET_TYPE, idx, &res);
		if (error)
			goto fail;
		port->p_type = res;
		error = puc_config(sc, PUC_CFG_GET_RID, idx, &res);
		if (error)
			goto fail;
		bar = puc_get_bar(sc, res);
		if (bar == NULL) {
			error = ENXIO;
			goto fail;
		}
		port->p_bar = bar;
		start = rman_get_start(bar->b_res);
		error = puc_config(sc, PUC_CFG_GET_OFS, idx, &res);
		if (error)
			goto fail;
		ofs = res;
		error = puc_config(sc, PUC_CFG_GET_LEN, idx, &res);
		if (error)
			goto fail;
		size = res;
		rm = (bar->b_type == SYS_RES_IOPORT)
		    ? &sc->sc_ioport: &sc->sc_iomem;
		port->p_rres = rman_reserve_resource(rm, start + ofs,
		    start + ofs + size - 1, size, 0, NULL);
		if (port->p_rres != NULL) {
			bsh = rman_get_bushandle(bar->b_res);
			bst = rman_get_bustag(bar->b_res);
			bus_space_subregion(bst, bsh, ofs, size, &bsh);
			rman_set_bushandle(port->p_rres, bsh);
			rman_set_bustag(port->p_rres, bst);
		}
		port->p_ires = rman_reserve_resource(&sc->sc_irq, port->p_nr,
		    port->p_nr, 1, 0, NULL);
		if (port->p_ires == NULL) {
			error = ENXIO;
			goto fail;
		}
		error = puc_config(sc, PUC_CFG_GET_CLOCK, idx, &res);
		if (error)
			goto fail;
		port->p_rclk = res;

		port->p_dev = device_add_child(dev, NULL, -1);
		if (port->p_dev != NULL)
			device_set_ivars(port->p_dev, (void *)port);
	}

	error = puc_config(sc, PUC_CFG_GET_ILR, 0, &res);
	if (error)
		goto fail;
	sc->sc_ilr = res;
	if (bootverbose && sc->sc_ilr != 0)
		device_printf(dev, "using interrupt latch register\n");

	sc->sc_irid = 0;
	sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid,
	    RF_ACTIVE|RF_SHAREABLE);
	if (sc->sc_ires != NULL) {
		error = bus_setup_intr(dev, sc->sc_ires,
		    INTR_TYPE_TTY, puc_intr, NULL, sc, &sc->sc_icookie);
		if (error)
			error = bus_setup_intr(dev, sc->sc_ires,
			    INTR_TYPE_TTY | INTR_MPSAFE, NULL,
			    (driver_intr_t *)puc_intr, sc, &sc->sc_icookie);
		else
			sc->sc_fastintr = 1;

		if (error) {
			device_printf(dev, "could not activate interrupt\n");
			bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid,
			    sc->sc_ires);
			sc->sc_ires = NULL;
		}
	}
	if (sc->sc_ires == NULL) {
		/* XXX no interrupt resource. Force polled mode. */
		sc->sc_polled = 1;
	}

	/* Probe and attach our children. */
	for (idx = 0; idx < sc->sc_nports; idx++) {
		port = &sc->sc_port[idx];
		if (port->p_dev == NULL)
			continue;
		error = device_probe_and_attach(port->p_dev);
		if (error) {
			device_delete_child(dev, port->p_dev);
			port->p_dev = NULL;
		}
	}

	/*
	 * If there are no serdev devices, then our interrupt handler
	 * will do nothing. Tear it down.
	 */
	if (sc->sc_serdevs == 0UL)
		bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie);

	return (0);

fail:
	for (idx = 0; idx < sc->sc_nports; idx++) {
		port = &sc->sc_port[idx];
		if (port->p_dev != NULL)
			device_delete_child(dev, port->p_dev);
		if (port->p_rres != NULL)
			rman_release_resource(port->p_rres);
		if (port->p_ires != NULL)
			rman_release_resource(port->p_ires);
	}
	for (idx = 0; idx < PUC_PCI_BARS; idx++) {
		bar = &sc->sc_bar[idx];
		if (bar->b_res != NULL)
			bus_release_resource(sc->sc_dev, bar->b_type,
			    bar->b_rid, bar->b_res);
	}
	rman_fini(&sc->sc_irq);
	free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC);
	rman_fini(&sc->sc_iomem);
	free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC);
	rman_fini(&sc->sc_ioport);
	free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC);
	free(sc->sc_port, M_PUC);
	return (error);
}
Exemple #10
0
static int
lbc_attach(device_t dev)
{
	struct lbc_softc *sc;
	struct lbc_devinfo *di;
	struct rman *rm;
	u_long offset, start, size;
	device_t cdev;
	phandle_t node, child;
	pcell_t *ranges, *rangesptr;
	int tuple_size, tuples;
	int par_addr_cells;
	int bank, error, i;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;

	sc->sc_mrid = 0;
	sc->sc_mres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mrid,
	    RF_ACTIVE);
	if (sc->sc_mres == NULL)
		return (ENXIO);

	sc->sc_bst = rman_get_bustag(sc->sc_mres);
	sc->sc_bsh = rman_get_bushandle(sc->sc_mres);

	for (bank = 0; bank < LBC_DEV_MAX; bank++) {
		bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_BR(bank), 0);
		bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_OR(bank), 0);
	}

	/*
	 * Initialize configuration register:
	 * - enable Local Bus
	 * - set data buffer control signal function
	 * - disable parity byte select
	 * - set ECC parity type
	 * - set bus monitor timing and timer prescale
	 */
	bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LBCR, 0);

	/*
	 * Initialize clock ratio register:
	 * - disable PLL bypass mode
	 * - configure LCLK delay cycles for the assertion of LALE
	 * - set system clock divider
	 */
	bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LCRR, 0x00030008);

	bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEDR, 0);
	bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTESR, ~0);
	bus_space_write_4(sc->sc_bst, sc->sc_bsh, LBC85XX_LTEIR, 0x64080001);

	sc->sc_irid = 0;
	sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid,
	    RF_ACTIVE | RF_SHAREABLE);
	if (sc->sc_ires != NULL) {
		error = bus_setup_intr(dev, sc->sc_ires,
		    INTR_TYPE_MISC | INTR_MPSAFE, NULL, lbc_intr, sc,
		    &sc->sc_icookie);
		if (error) {
			device_printf(dev, "could not activate interrupt\n");
			bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid,
			    sc->sc_ires);
			sc->sc_ires = NULL;
		}
	}

	sc->sc_ltesr = ~0;

	rangesptr = NULL;

	rm = &sc->sc_rman;
	rm->rm_type = RMAN_ARRAY;
	rm->rm_descr = "Local Bus Space";
	rm->rm_start = 0UL;
	rm->rm_end = ~0UL;
	error = rman_init(rm);
	if (error)
		goto fail;

	error = rman_manage_region(rm, rm->rm_start, rm->rm_end);
	if (error) {
		rman_fini(rm);
		goto fail;
	}

	/*
	 * Process 'ranges' property.
	 */
	node = ofw_bus_get_node(dev);
	if ((fdt_addrsize_cells(node, &sc->sc_addr_cells,
	    &sc->sc_size_cells)) != 0) {
		error = ENXIO;
		goto fail;
	}

	par_addr_cells = fdt_parent_addr_cells(node);
	if (par_addr_cells > 2) {
		device_printf(dev, "unsupported parent #addr-cells\n");
		error = ERANGE;
		goto fail;
	}
	tuple_size = sizeof(pcell_t) * (sc->sc_addr_cells + par_addr_cells +
	    sc->sc_size_cells);

	tuples = OF_getprop_alloc(node, "ranges", tuple_size,
	    (void **)&ranges);
	if (tuples < 0) {
		device_printf(dev, "could not retrieve 'ranges' property\n");
		error = ENXIO;
		goto fail;
	}
	rangesptr = ranges;

	debugf("par addr_cells = %d, addr_cells = %d, size_cells = %d, "
	    "tuple_size = %d, tuples = %d\n", par_addr_cells,
	    sc->sc_addr_cells, sc->sc_size_cells, tuple_size, tuples);

	start = 0;
	size = 0;
	for (i = 0; i < tuples; i++) {

		/* The first cell is the bank (chip select) number. */
		bank = fdt_data_get((void *)ranges, 1);
		if (bank < 0 || bank > LBC_DEV_MAX) {
			device_printf(dev, "bank out of range: %d\n", bank);
			error = ERANGE;
			goto fail;
		}
		ranges += 1;

		/*
		 * Remaining cells of the child address define offset into
		 * this CS.
		 */
		offset = fdt_data_get((void *)ranges, sc->sc_addr_cells - 1);
		ranges += sc->sc_addr_cells - 1;

		/* Parent bus start address of this bank. */
		start = fdt_data_get((void *)ranges, par_addr_cells);
		ranges += par_addr_cells;

		size = fdt_data_get((void *)ranges, sc->sc_size_cells);
		ranges += sc->sc_size_cells;
		debugf("bank = %d, start = %lx, size = %lx\n", bank,
		    start, size);

		sc->sc_banks[bank].addr = start + offset;
		sc->sc_banks[bank].size = size;

		/*
		 * Attributes for the bank.
		 *
		 * XXX Note there are no DT bindings defined for them at the
		 * moment, so we need to provide some defaults.
		 */
		sc->sc_banks[bank].width = 16;
		sc->sc_banks[bank].msel = LBCRES_MSEL_GPCM;
		sc->sc_banks[bank].decc = LBCRES_DECC_DISABLED;
		sc->sc_banks[bank].atom = LBCRES_ATOM_DISABLED;
		sc->sc_banks[bank].wp = 0;
	}

	/*
	 * Initialize mem-mappings for the LBC banks (i.e. chip selects).
	 */
	error = lbc_banks_map(sc);
	if (error)
		goto fail;

	/*
	 * Walk the localbus and add direct subordinates as our children.
	 */
	for (child = OF_child(node); child != 0; child = OF_peer(child)) {

		di = malloc(sizeof(*di), M_LBC, M_WAITOK | M_ZERO);

		if (ofw_bus_gen_setup_devinfo(&di->di_ofw, child) != 0) {
			free(di, M_LBC);
			device_printf(dev, "could not set up devinfo\n");
			continue;
		}

		resource_list_init(&di->di_res);

		if (fdt_lbc_reg_decode(child, sc, di)) {
			device_printf(dev, "could not process 'reg' "
			    "property\n");
			ofw_bus_gen_destroy_devinfo(&di->di_ofw);
			free(di, M_LBC);
			continue;
		}

		fdt_lbc_fixup(child, sc, di);

		/* Add newbus device for this FDT node */
		cdev = device_add_child(dev, NULL, -1);
		if (cdev == NULL) {
			device_printf(dev, "could not add child: %s\n",
			    di->di_ofw.obd_name);
			resource_list_free(&di->di_res);
			ofw_bus_gen_destroy_devinfo(&di->di_ofw);
			free(di, M_LBC);
			continue;
		}
		debugf("added child name='%s', node=%p\n", di->di_ofw.obd_name,
		    (void *)child);
		device_set_ivars(cdev, di);
	}

	/*
	 * Enable the LBC.
	 */
	lbc_banks_enable(sc);

	free(rangesptr, M_OFWPROP);
	return (bus_generic_attach(dev));

fail:
	free(rangesptr, M_OFWPROP);
	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mrid, sc->sc_mres);
	return (error);
}
Exemple #11
0
static int
thunder_pem_attach(device_t dev)
{
	devclass_t pci_class;
	device_t parent;
	struct thunder_pem_softc *sc;
	int error;
	int rid;
	int tuple;
	uint64_t base, size;
	struct rman *rman;

	sc = device_get_softc(dev);
	sc->dev = dev;

	/* Allocate memory for resource */
	pci_class = devclass_find("pci");
	parent = device_get_parent(dev);
	if (device_get_devclass(parent) == pci_class)
		rid = PCIR_BAR(0);
	else
		rid = RID_PEM_SPACE;

	sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &rid, RF_ACTIVE);
	if (sc->reg == NULL) {
		device_printf(dev, "Failed to allocate resource\n");
		return (ENXIO);
	}
	sc->reg_bst = rman_get_bustag(sc->reg);
	sc->reg_bsh = rman_get_bushandle(sc->reg);

	/* Map SLI, do it only once */
	if (!sli0_s2m_regx_base) {
		bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC,
		    SLIX_S2M_REGX_ACC_SIZE, 0, &sli0_s2m_regx_base);
	}
	if (!sli1_s2m_regx_base) {
		bus_space_map(sc->reg_bst, SLIX_S2M_REGX_ACC +
		    SLIX_S2M_REGX_ACC_SPACING, SLIX_S2M_REGX_ACC_SIZE, 0,
		    &sli1_s2m_regx_base);
	}

	if ((sli0_s2m_regx_base == 0) || (sli1_s2m_regx_base == 0)) {
		device_printf(dev,
		    "bus_space_map failed to map slix_s2m_regx_base\n");
		goto fail;
	}

	/* Identify PEM */
	if (thunder_pem_identify(dev) != 0)
		goto fail;

	/* Initialize rman and allocate regions */
	sc->mem_rman.rm_type = RMAN_ARRAY;
	sc->mem_rman.rm_descr = "PEM PCIe Memory";
	error = rman_init(&sc->mem_rman);
	if (error != 0) {
		device_printf(dev, "memory rman_init() failed. error = %d\n",
		    error);
		goto fail;
	}
	sc->io_rman.rm_type = RMAN_ARRAY;
	sc->io_rman.rm_descr = "PEM PCIe IO";
	error = rman_init(&sc->io_rman);
	if (error != 0) {
		device_printf(dev, "IO rman_init() failed. error = %d\n",
		    error);
		goto fail_mem;
	}

	/*
	 * We ignore the values that may have been provided in FDT
	 * and configure ranges according to the below formula
	 * for all types of devices. This is because some DTBs provided
	 * by EFI do not have proper ranges property or don't have them
	 * at all.
	 */
	/* Fill memory window */
	sc->ranges[0].pci_base = PCI_MEMORY_BASE;
	sc->ranges[0].size = PCI_MEMORY_SIZE;
	sc->ranges[0].phys_base = sc->sli_window_base + SLI_PCI_OFFSET +
	    sc->ranges[0].pci_base;
	sc->ranges[0].flags = SYS_RES_MEMORY;

	/* Fill IO window */
	sc->ranges[1].pci_base = PCI_IO_BASE;
	sc->ranges[1].size = PCI_IO_SIZE;
	sc->ranges[1].phys_base = sc->sli_window_base + SLI_PCI_OFFSET +
	    sc->ranges[1].pci_base;
	sc->ranges[1].flags = SYS_RES_IOPORT;

	for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
		base = sc->ranges[tuple].pci_base;
		size = sc->ranges[tuple].size;
		if (size == 0)
			continue; /* empty range element */

		rman = thunder_pem_rman(sc, sc->ranges[tuple].flags);
		if (rman != NULL)
			error = rman_manage_region(rman, base,
			    base + size - 1);
		else
			error = EINVAL;
		if (error) {
			device_printf(dev,
			    "rman_manage_region() failed. error = %d\n", error);
			rman_fini(&sc->mem_rman);
			return (error);
		}
		if (bootverbose) {
			device_printf(dev,
			    "\tPCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Flags:0x%jx\n",
			    sc->ranges[tuple].pci_base,
			    sc->ranges[tuple].phys_base,
			    sc->ranges[tuple].size,
			    sc->ranges[tuple].flags);
		}
	}

	if (thunder_pem_init(sc)) {
		device_printf(dev, "Failure during PEM init\n");
		goto fail_io;
	}

	device_add_child(dev, "pci", -1);

	return (bus_generic_attach(dev));

fail_io:
	rman_fini(&sc->io_rman);
fail_mem:
	rman_fini(&sc->mem_rman);
fail:
	bus_free_resource(dev, SYS_RES_MEMORY, sc->reg);
	return (ENXIO);
}
Exemple #12
0
static int
mvs_attach(device_t dev)
{
	struct mvs_controller *ctlr = device_get_softc(dev);
	device_t child;
	int	error, unit, i;
	uint32_t devid, revid;

	soc_id(&devid, &revid);
	ctlr->dev = dev;
	i = 0;
	while (mvs_ids[i].id != 0 &&
	    (mvs_ids[i].id != devid ||
	     mvs_ids[i].rev > revid))
		i++;
	ctlr->channels = mvs_ids[i].ports;
	ctlr->quirks = mvs_ids[i].quirks;
	resource_int_value(device_get_name(dev),
	    device_get_unit(dev), "ccc", &ctlr->ccc);
	ctlr->cccc = 8;
	resource_int_value(device_get_name(dev),
	    device_get_unit(dev), "cccc", &ctlr->cccc);
	if (ctlr->ccc == 0 || ctlr->cccc == 0) {
		ctlr->ccc = 0;
		ctlr->cccc = 0;
	}
	if (ctlr->ccc > 100000)
		ctlr->ccc = 100000;
	device_printf(dev,
	    "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n",
	    ((ctlr->quirks & MVS_Q_GENI) ? "I" :
	     ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")),
	    ctlr->channels,
	    ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"),
	    ((ctlr->quirks & MVS_Q_GENI) ?
	    "not supported" : "supported"),
	    ((ctlr->quirks & MVS_Q_GENIIE) ?
	    " with FBS" : ""));
	mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF);
	/* We should have a memory BAR(0). */
	ctlr->r_rid = 0;
	if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &ctlr->r_rid, RF_ACTIVE)))
		return ENXIO;
	if (ATA_INL(ctlr->r_mem, PORT_BASE(0) + SATA_PHYCFG_OFS) != 0)
		ctlr->quirks |= MVS_Q_SOC65;
	/* Setup our own memory management for channels. */
	ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem);
	ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem);
	ctlr->sc_iomem.rm_type = RMAN_ARRAY;
	ctlr->sc_iomem.rm_descr = "I/O memory addresses";
	if ((error = rman_init(&ctlr->sc_iomem)) != 0) {
		bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
		return (error);
	}
	if ((error = rman_manage_region(&ctlr->sc_iomem,
	    rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) {
		bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
		rman_fini(&ctlr->sc_iomem);
		return (error);
	}
	mvs_ctlr_setup(dev);
	/* Setup interrupts. */
	if (mvs_setup_interrupt(dev)) {
		bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem);
		rman_fini(&ctlr->sc_iomem);
		return ENXIO;
	}
	/* Attach all channels on this controller */
	for (unit = 0; unit < ctlr->channels; unit++) {
		child = device_add_child(dev, "mvsch", -1);
		if (child == NULL)
			device_printf(dev, "failed to add channel device\n");
		else
			device_set_ivars(child, (void *)(intptr_t)unit);
	}
	bus_generic_attach(dev);
	return 0;
}
Exemple #13
0
int
ofw_pci_init(device_t dev)
{
	struct ofw_pci_softc *sc;
	phandle_t node;
	u_int32_t busrange[2];
	struct ofw_pci_range *rp;
	int error;
	struct ofw_pci_cell_info *cell_info;

	node = ofw_bus_get_node(dev);
	sc = device_get_softc(dev);
	sc->sc_initialized = 1;
	sc->sc_range = NULL;

	cell_info = (struct ofw_pci_cell_info *)malloc(sizeof(*cell_info),
	    M_DEVBUF, M_WAITOK | M_ZERO);

	sc->sc_cell_info = cell_info;

	if (OF_getencprop(node, "bus-range", busrange, sizeof(busrange)) != 8)
		busrange[0] = 0;

	sc->sc_dev = dev;
	sc->sc_node = node;
	sc->sc_bus = busrange[0];

	if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) {
		phandle_t c;
		int n, i;

		sc->sc_nrange = 0;
		for (c = OF_child(node); c != 0; c = OF_peer(c)) {
			n = ofw_pci_nranges(c, cell_info);
			if (n > 0)
				sc->sc_nrange += n;
		}
		if (sc->sc_nrange == 0) {
			error = ENXIO;
			goto out;
		}
		sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]),
		    M_DEVBUF, M_WAITOK);
		i = 0;
		for (c = OF_child(node); c != 0; c = OF_peer(c)) {
			n = ofw_pci_fill_ranges(c, &sc->sc_range[i]);
			if (n > 0)
				i += n;
		}
		KASSERT(i == sc->sc_nrange, ("range count mismatch"));
	} else {
		sc->sc_nrange = ofw_pci_nranges(node, cell_info);
		if (sc->sc_nrange <= 0) {
			device_printf(dev, "could not getranges\n");
			error = ENXIO;
			goto out;
		}
		sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]),
		    M_DEVBUF, M_WAITOK);
		ofw_pci_fill_ranges(node, sc->sc_range);
	}

	sc->sc_io_rman.rm_type = RMAN_ARRAY;
	sc->sc_io_rman.rm_descr = "PCI I/O Ports";
	error = rman_init(&sc->sc_io_rman);
	if (error != 0) {
		device_printf(dev, "rman_init() failed. error = %d\n", error);
		goto out;
	}

	sc->sc_mem_rman.rm_type = RMAN_ARRAY;
	sc->sc_mem_rman.rm_descr = "PCI Memory";
	error = rman_init(&sc->sc_mem_rman);
	if (error != 0) {
		device_printf(dev, "rman_init() failed. error = %d\n", error);
		goto out;
	}

	for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange &&
	    rp->pci_hi != 0; rp++) {
		error = 0;

		switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) {
		case OFW_PCI_PHYS_HI_SPACE_CONFIG:
			break;
		case OFW_PCI_PHYS_HI_SPACE_IO:
			error = rman_manage_region(&sc->sc_io_rman, rp->pci,
			    rp->pci + rp->size - 1);
			break;
		case OFW_PCI_PHYS_HI_SPACE_MEM32:
		case OFW_PCI_PHYS_HI_SPACE_MEM64:
			error = rman_manage_region(&sc->sc_mem_rman, rp->pci,
			    rp->pci + rp->size - 1);
			break;
		}

		if (error != 0) {
			device_printf(dev,
			    "rman_manage_region(%x, %#jx, %#jx) failed. "
			    "error = %d\n", rp->pci_hi &
			    OFW_PCI_PHYS_HI_SPACEMASK, rp->pci,
			    rp->pci + rp->size - 1, error);
			goto out;
		}
	}

	ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t));
	return (0);

out:
	free(cell_info, M_DEVBUF);
	free(sc->sc_range, M_DEVBUF);
	rman_fini(&sc->sc_io_rman);
	rman_fini(&sc->sc_mem_rman);

	return (error);
}
static int
ebus_attach(device_t dev)
{
	struct ebus_softc *sc;
	struct ebus_devinfo *edi;
	struct ebus_rinfo *eri;
	struct resource *res;
	device_t cdev;
	phandle_t node;
	int i, rnum, rid;

	sc = device_get_softc(dev);

	node = ofw_bus_get_node(dev);
	sc->sc_nrange = OF_getprop_alloc(node, "ranges",
	    sizeof(*sc->sc_range), (void **)&sc->sc_range);
	if (sc->sc_nrange == -1) {
		printf("ebus_attach: could not get ranges property\n");
		return (ENXIO);
	}

	sc->sc_rinfo = malloc(sizeof(*sc->sc_rinfo) * sc->sc_nrange, M_DEVBUF,
	    M_WAITOK | M_ZERO);

	/* For every range, there must be a matching resource. */
	for (rnum = 0; rnum < sc->sc_nrange; rnum++) {
		eri = &sc->sc_rinfo[rnum];
		eri->eri_rtype = ofw_isa_range_restype(&sc->sc_range[rnum]);
		rid = PCIR_BAR(rnum);
		res = bus_alloc_resource_any(dev, eri->eri_rtype, &rid,
		    RF_ACTIVE);
		if (res == NULL) {
			printf("ebus_attach: failed to allocate range "
			    "resource!\n");
			goto fail;
		}
		eri->eri_res = res;
		eri->eri_rman.rm_type = RMAN_ARRAY;
		eri->eri_rman.rm_descr = "EBus range";
		if (rman_init(&eri->eri_rman) != 0) {
			printf("ebus_attach: failed to initialize rman!");
			goto fail;
		}
		if (rman_manage_region(&eri->eri_rman, rman_get_start(res),
		    rman_get_end(res)) != 0) {
			printf("ebus_attach: failed to register region!");
			rman_fini(&eri->eri_rman);
			goto fail;
		}
	}

	ofw_bus_setup_iinfo(node, &sc->sc_iinfo, sizeof(ofw_isa_intr_t));

	/*
	 * Now attach our children.
	 */
	for (node = OF_child(node); node > 0; node = OF_peer(node)) {
		if ((edi = ebus_setup_dinfo(dev, sc, node)) == NULL)
			continue;
		if ((cdev = device_add_child(dev, NULL, -1)) == NULL) {
			device_printf(dev, "<%s>: device_add_child failed\n",
			    edi->edi_obdinfo.obd_name);
			ebus_destroy_dinfo(edi);
			continue;
		}
		device_set_ivars(cdev, edi);
	}
	return (bus_generic_attach(dev));

fail:
	for (i = rnum; i >= 0; i--) {
		eri = &sc->sc_rinfo[i];
		if (i < rnum)
			rman_fini(&eri->eri_rman);
		if (eri->eri_res != 0) {
			bus_release_resource(dev, eri->eri_rtype,
			    PCIR_BAR(rnum), eri->eri_res);
		}
	}
	free(sc->sc_rinfo, M_DEVBUF);
	free(sc->sc_range, M_OFWPROP);
	return (ENXIO);
}
Exemple #15
0
static int
ebus_pci_attach(device_t dev)
{
	struct ebus_softc *sc;
	struct ebus_rinfo *eri;
	struct resource *res;
	phandle_t node;
	int i, rnum, rid;

	sc = device_get_softc(dev);
	sc->sc_flags |= EBUS_PCI;

	pci_write_config(dev, PCIR_COMMAND,
	    pci_read_config(dev, PCIR_COMMAND, 2) | PCIM_CMD_SERRESPEN |
	    PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN, 2);
	pci_write_config(dev, PCIR_CACHELNSZ, 16 /* 64 bytes */, 1);
	pci_write_config(dev, PCIR_LATTIMER, 64 /* 64 PCI cycles */, 1);

	node = ofw_bus_get_node(dev);
	sc->sc_nrange = OF_getprop_alloc(node, "ranges",
	    sizeof(struct isa_ranges), &sc->sc_range);
	if (sc->sc_nrange == -1) {
		printf("%s: could not get ranges property\n", __func__);
		return (ENXIO);
	}

	sc->sc_rinfo = malloc(sizeof(*sc->sc_rinfo) * sc->sc_nrange, M_DEVBUF,
	    M_WAITOK | M_ZERO);

	/* For every range, there must be a matching resource. */
	for (rnum = 0; rnum < sc->sc_nrange; rnum++) {
		eri = &sc->sc_rinfo[rnum];
		eri->eri_rtype = ofw_isa_range_restype(
		    &((struct isa_ranges *)sc->sc_range)[rnum]);
		rid = PCIR_BAR(rnum);
		res = bus_alloc_resource_any(dev, eri->eri_rtype, &rid,
		    RF_ACTIVE);
		if (res == NULL) {
			printf("%s: failed to allocate range resource!\n",
			    __func__);
			goto fail;
		}
		eri->eri_res = res;
		eri->eri_rman.rm_type = RMAN_ARRAY;
		eri->eri_rman.rm_descr = "EBus range";
		if (rman_init_from_resource(&eri->eri_rman, res) != 0) {
			printf("%s: failed to initialize rman!", __func__);
			goto fail;
		}
	}
	return (ebus_attach(dev, sc, node));

 fail:
	for (i = rnum; i >= 0; i--) {
		eri = &sc->sc_rinfo[i];
		if (i < rnum)
			rman_fini(&eri->eri_rman);
		if (eri->eri_res != 0) {
			bus_release_resource(dev, eri->eri_rtype,
			    PCIR_BAR(rnum), eri->eri_res);
		}
	}
	free(sc->sc_rinfo, M_DEVBUF);
	free(sc->sc_range, M_OFWPROP);
	return (ENXIO);
}
Exemple #16
0
static int
pci_iov_delete(struct cdev *cdev)
{
	device_t bus, dev, vf, *devlist;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	int i, error, devcount;
	uint32_t iov_ctl;

	mtx_lock(&Giant);
	dinfo = cdev->si_drv1;
	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	devlist = NULL;

	if (iov->iov_flags & IOV_BUSY) {
		mtx_unlock(&Giant);
		return (EBUSY);
	}

	if (iov->iov_num_vfs == 0) {
		mtx_unlock(&Giant);
		return (ECHILD);
	}

	iov->iov_flags |= IOV_BUSY;

	error = device_get_children(bus, &devlist, &devcount);

	if (error != 0)
		goto out;

	for (i = 0; i < devcount; i++) {
		vf = devlist[i];

		if (!pci_iov_is_child_vf(iov, vf))
			continue;

		error = device_detach(vf);
		if (error != 0) {
			device_printf(dev,
			   "Could not disable SR-IOV: failed to detach VF %s\n",
			    device_get_nameunit(vf));
			goto out;
		}
	}

	for (i = 0; i < devcount; i++) {
		vf = devlist[i];

		if (pci_iov_is_child_vf(iov, vf))
			device_delete_child(bus, vf);
	}
	PCI_IOV_UNINIT(dev);

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2);

	iov->iov_num_vfs = 0;

	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
		if (iov->iov_bar[i].res != NULL) {
			pci_release_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i),
			    iov->iov_bar[i].res);
			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i));
			iov->iov_bar[i].res = NULL;
		}
	}

	if (iov->iov_flags & IOV_RMAN_INITED) {
		rman_fini(&iov->rman);
		iov->iov_flags &= ~IOV_RMAN_INITED;
	}

	error = 0;
out:
	free(devlist, M_TEMP);
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);
	return (error);
}
Exemple #17
0
static int
pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg)
{
	device_t bus, dev;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	nvlist_t *config;
	int i, error;
	uint16_t rid_off, rid_stride;
	uint16_t first_rid, last_rid;
	uint16_t iov_ctl;
	uint16_t num_vfs, total_vfs;
	int iov_inited;

	mtx_lock(&Giant);
	dinfo = cdev->si_drv1;
	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	iov_inited = 0;
	config = NULL;

	if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) {
		mtx_unlock(&Giant);
		return (EBUSY);
	}
	iov->iov_flags |= IOV_BUSY;

	error = pci_iov_parse_config(iov, arg, &config);
	if (error != 0)
		goto out;

	num_vfs = pci_iov_config_get_num_vfs(config);
	total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2);
	if (num_vfs > total_vfs) {
		error = EINVAL;
		goto out;
	}

	error = pci_iov_config_page_size(dinfo);
	if (error != 0)
		goto out;

	error = pci_iov_set_ari(bus);
	if (error != 0)
		goto out;

	error = pci_iov_init(dev, num_vfs, config);
	if (error != 0)
		goto out;
	iov_inited = 1;

	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2);

	rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2);
	rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2);

	first_rid = pci_get_rid(dev) + rid_off;
	last_rid = first_rid + (num_vfs - 1) * rid_stride;

	/* We don't yet support allocating extra bus numbers for VFs. */
	if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) {
		error = ENOSPC;
		goto out;
	}

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);

	error = pci_iov_init_rman(dev, iov);
	if (error != 0)
		goto out;

	iov->iov_num_vfs = num_vfs;

	error = pci_iov_setup_bars(dinfo);
	if (error != 0)
		goto out;

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE;
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);

	/* Per specification, we must wait 100ms before accessing VFs. */
	pause("iov", roundup(hz, 10));
	pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride);

	nvlist_destroy(config);
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);

	return (0);
out:
	if (iov_inited)
		PCI_IOV_UNINIT(dev);

	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
		if (iov->iov_bar[i].res != NULL) {
			pci_release_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i),
			    iov->iov_bar[i].res);
			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i));
			iov->iov_bar[i].res = NULL;
		}
	}

	if (iov->iov_flags & IOV_RMAN_INITED) {
		rman_fini(&iov->rman);
		iov->iov_flags &= ~IOV_RMAN_INITED;
	}

	nvlist_destroy(config);
	iov->iov_num_vfs = 0;
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);
	return (error);
}
static int
lbc_attach(device_t dev)
{
	struct lbc_softc *sc;
	struct rman *rm;
	const struct lbc_resource *lbcres;
	int error;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;

	sc->sc_rid = 0;
	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
	    RF_ACTIVE);
	if (sc->sc_res == NULL)
		return (ENXIO);

	sc->sc_bst = rman_get_bustag(sc->sc_res);
	sc->sc_bsh = rman_get_bushandle(sc->sc_res);

	rm = &sc->sc_rman;
	rm->rm_type = RMAN_ARRAY;
	rm->rm_descr = "MPC85XX Local Bus Space";
	rm->rm_start = 0UL;
	rm->rm_end = ~0UL;
	error = rman_init(rm);
	if (error)
		goto fail;

	error = rman_manage_region(rm, rm->rm_start, rm->rm_end);
	if (error) {
		rman_fini(rm);
		goto fail;
	}

	/*
	 * Initialize configuration register:
	 * - enable Local Bus
	 * - set data buffer control signal function
	 * - disable parity byte select
	 * - set ECC parity type
	 * - set bus monitor timing and timer prescale
	 */
	lbc_write_reg(sc, LBC85XX_LBCR, 0x00000000);

	/*
	 * Initialize clock ratio register:
	 * - disable PLL bypass mode
	 * - configure LCLK delay cycles for the assertion of LALE
	 * - set system clock divider
	 */
	lbc_write_reg(sc, LBC85XX_LCRR, 0x00030008);

	lbcres = mpc85xx_lbc_resources;

	for (; lbcres->lbr_devtype; lbcres++)
		if (!lbc_mk_child(dev, lbcres)) {
			error = ENXIO;
			goto fail;
		}

	return (bus_generic_attach(dev));

fail:
	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
	return (error);
}