Пример #1
0
static int
nvme_attach(device_t dev)
{
	struct nvme_controller	*ctrlr = DEVICE2SOFTC(dev);
	int			status;

	status = nvme_ctrlr_construct(ctrlr, dev);

	if (status != 0)
		return (status);

	/*
	 * Reset controller twice to ensure we do a transition from cc.en==1
	 *  to cc.en==0.  This is because we don't really know what status
	 *  the controller was left in when boot handed off to OS.
	 */
	status = nvme_ctrlr_hw_reset(ctrlr);
	if (status != 0)
		return (status);

	status = nvme_ctrlr_hw_reset(ctrlr);
	if (status != 0)
		return (status);

	nvme_sysctl_initialize_ctrlr(ctrlr);

	ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
	ctrlr->config_hook.ich_arg = ctrlr;

	config_intrhook_establish(&ctrlr->config_hook);

	return (0);
}
Пример #2
0
static int
bcm2835_audio_attach(device_t dev)
{
    	struct bcm2835_audio_info *sc;

	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);

	sc->dev = dev;
	sc->bufsz = VCHIQ_AUDIO_BUFFER_SIZE;

	sc->lock = snd_mtxcreate(device_get_nameunit(dev), "bcm2835_audio softc");

	mtx_init(&sc->vchi_lock, "bcm2835_audio", "vchi_lock", MTX_DEF);
	mtx_init(&sc->msg_avail_lock, "msg_avail_mtx", "msg_avail_mtx", MTX_DEF);
	cv_init(&sc->msg_avail_cv, "msg_avail_cv");
	mtx_init(&sc->data_lock, "data_mtx", "data_mtx", MTX_DEF);
	cv_init(&sc->data_cv, "data_cv");
	sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID;

	/* 
	 * We need interrupts enabled for VCHI to work properly,
	 * so delay intialization until it happens
	 */
	sc->intr_hook.ich_func = bcm2835_audio_delayed_init;
	sc->intr_hook.ich_arg = sc;

	if (config_intrhook_establish(&sc->intr_hook) != 0)
		goto no;

    	return 0;

no:
    	return ENXIO;
}
Пример #3
0
/*
 * module handeling
 */
static int
ata_module_event_handler(module_t mod, int what, void *arg)
{
    static struct cdev *atacdev;

    switch (what) {
    case MOD_LOAD:
	/* register controlling device */
	atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");

	if (cold) {
	    /* register boot attach to be run when interrupts are enabled */
	    if (!(ata_delayed_attach = (struct intr_config_hook *)
				       malloc(sizeof(struct intr_config_hook),
					      M_TEMP, M_NOWAIT | M_ZERO))) {
		printf("ata: malloc of delayed attach hook failed\n");
		return EIO;
	    }
	    ata_delayed_attach->ich_func = (void*)ata_boot_attach;
	    if (config_intrhook_establish(ata_delayed_attach) != 0) {
		printf("ata: config_intrhook_establish failed\n");
		free(ata_delayed_attach, M_TEMP);
	    }
	}
	return 0;

    case MOD_UNLOAD:
	/* deregister controlling device */
	destroy_dev(atacdev);
	return 0;

    default:
	return EOPNOTSUPP;
    }
}
Пример #4
0
static int
twl_attach(device_t dev)
{
	struct twl_softc *sc;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;

	TWL_LOCK_INIT(sc);

	/* We have to wait until interrupts are enabled. I2C read and write
	 * only works if the interrupts are available.
	 */
	sc->sc_scan_hook.ich_func = twl_scan;
	sc->sc_scan_hook.ich_arg = dev;

	if (config_intrhook_establish(&sc->sc_scan_hook) != 0)
		return (ENOMEM);

	/* FIXME: should be in DTS file */
	if ((sc->sc_vreg = device_add_child(dev, "twl_vreg", -1)) == NULL)
		device_printf(dev, "could not allocate twl_vreg instance\n");
	if ((sc->sc_clks = device_add_child(dev, "twl_clks", -1)) == NULL)
		device_printf(dev, "could not allocate twl_clks instance\n");

	return (bus_generic_attach(dev));
}
static int
xenbus_attach(device_t dev)
{
	struct xenbus_softc *sc = device_get_softc(dev);

	sc->xs_attachcb.ich_func = xenbus_attach_deferred;
	sc->xs_attachcb.ich_arg = dev;
	config_intrhook_establish(&sc->xs_attachcb);

	return (0);
}
Пример #6
0
static int
bcm_fb_attach(device_t dev)
{
	struct bcmsc_softc *sc = device_get_softc(dev);
	int dma_size = sizeof(struct bcm_fb_config);
	int err;

	sc->dev = dev;

	err = bus_dma_tag_create(
	    bus_get_dma_tag(sc->dev),
	    PAGE_SIZE, 0,		/* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    dma_size, 1,		/* maxsize, nsegments */
	    dma_size, 0,		/* maxsegsize, flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &sc->dma_tag);

	err = bus_dmamem_alloc(sc->dma_tag, (void **)&sc->fb_config, 0,
	    &sc->dma_map);
	if (err) {
		device_printf(dev, "cannot allocate framebuffer\n");
		goto fail;
	}

	err = bus_dmamap_load(sc->dma_tag, sc->dma_map, sc->fb_config,
	    dma_size, bcm_fb_dmamap_cb, &sc->fb_config_phys, BUS_DMA_NOWAIT);

	if (err) {
		device_printf(dev, "cannot load DMA map\n");
		goto fail;
	}

	/*
	 * We have to wait until interrupts are enabled.
	 * Mailbox relies on it to get data from VideoCore
	 */
        sc->init_hook.ich_func = bcm_fb_init;
        sc->init_hook.ich_arg = sc;

        if (config_intrhook_establish(&sc->init_hook) != 0) {
		device_printf(dev, "failed to establish intrhook\n");
                return (ENOMEM);
	}

	return (0);

fail:
	return (ENXIO);
}
Пример #7
0
static int
imx6_anatop_attach(device_t dev)
{
	struct imx6_anatop_softc *sc;
	int err;

	sc = device_get_softc(dev);
	sc->dev = dev;

	/* Allocate bus_space resources. */
	if (bus_alloc_resources(dev, imx6_anatop_spec, sc->res)) {
		device_printf(dev, "Cannot allocate resources\n");
		err = ENXIO;
		goto out;
	}

	sc->intr_setup_hook.ich_func = intr_setup;
	sc->intr_setup_hook.ich_arg = sc;
	config_intrhook_establish(&sc->intr_setup_hook);

	SYSCTL_ADD_UINT(device_get_sysctl_ctx(sc->dev),
	    SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
	    OID_AUTO, "cpu_voltage", CTLFLAG_RD,
	    &sc->cpu_curmv, 0, "Current CPU voltage in millivolts");

	imx6_anatop_sc = sc;

	/*
	 * Other code seen on the net sets this SELFBIASOFF flag around the same
	 * time the temperature sensor is set up, although it's unclear how the
	 * two are related (if at all).
	 */
	imx6_anatop_write_4(IMX6_ANALOG_PMU_MISC0_SET, 
	    IMX6_ANALOG_PMU_MISC0_SELFBIASOFF);

	/*
	 * Some day, when we're ready to deal with the actual anatop regulators
	 * that are described in fdt data as children of this "bus", this would
	 * be the place to invoke a simplebus helper routine to instantiate the
	 * children from the fdt data.
	 */

	err = 0;

out:

	if (err != 0) {
		bus_release_resources(dev, imx6_anatop_spec, sc->res);
	}

	return (err);
}
Пример #8
0
static int
am335x_pmic_attach(device_t dev)
{
	struct am335x_pmic_softc *sc;

	sc = device_get_softc(dev);

	sc->enum_hook.ich_func = am335x_pmic_start;
	sc->enum_hook.ich_arg = dev;

	if (config_intrhook_establish(&sc->enum_hook) != 0)
		return (ENOMEM);

	return (0);
}
Пример #9
0
static int
mmc_attach(device_t dev)
{
	struct mmc_softc *sc;

	sc = device_get_softc(dev);
	sc->dev = dev;
	MMC_LOCK_INIT(sc);

	/* We'll probe and attach our children later, but before / mount */
	sc->config_intrhook.ich_func = mmc_delayed_attach;
	sc->config_intrhook.ich_arg = sc;
	if (config_intrhook_establish(&sc->config_intrhook) != 0)
		device_printf(dev, "config_intrhook_establish failed\n");
	return (0);
}
Пример #10
0
static int
adb_bus_attach(device_t dev)
{
	struct adb_softc *sc = device_get_softc(dev);
	sc->enum_hook.ich_func = adb_bus_enumerate;
	sc->enum_hook.ich_arg = dev;

	/*
	 * We should wait until interrupts are enabled to try to probe
	 * the bus. Enumerating the ADB involves receiving packets,
	 * which works best with interrupts enabled.
	 */
	
	if (config_intrhook_establish(&sc->enum_hook) != 0)
		return (ENOMEM);

	return (0);
}
Пример #11
0
static int
ds1307_attach(device_t dev)
{
	struct ds1307_softc *sc;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_addr = iicbus_get_addr(dev);
	sc->sc_year0 = 1900;
	sc->enum_hook.ich_func = ds1307_start;
	sc->enum_hook.ich_arg = dev;

	/*
	 * We have to wait until interrupts are enabled.  Usually I2C read
	 * and write only works when the interrupts are available.
	 */
	if (config_intrhook_establish(&sc->enum_hook) != 0)
		return (ENOMEM);

	return (0);
}
Пример #12
0
static int
pcf8563_attach(device_t dev)
{
	struct pcf8563_softc *sc;

	sc = device_get_softc(dev);
	sc->sc_addr = iicbus_get_addr(dev);
	if (sc->sc_addr == 0)
		sc->sc_addr = PCF8563_ADDR;
	sc->sc_year0 = 1900;
	sc->enum_hook.ich_func = pcf8563_start;
	sc->enum_hook.ich_arg = dev;

	/*
	 * We have to wait until interrupts are enabled.  Sometimes I2C read
	 * and write only works when the interrupts are available.
	 */
	if (config_intrhook_establish(&sc->enum_hook) != 0)
		return (ENOMEM);

	return (0);
}
Пример #13
0
static int
am335x_pmic_attach(device_t dev)
{
	struct am335x_pmic_softc *sc;
	int rid;

	sc = device_get_softc(dev);

	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_ACTIVE);
	if (!sc->sc_irq_res) {
		device_printf(dev, "cannot allocate interrupt\n");
		/* return (ENXIO); */
	}

	sc->enum_hook.ich_func = am335x_pmic_start;
	sc->enum_hook.ich_arg = dev;

	if (config_intrhook_establish(&sc->enum_hook) != 0)
		return (ENOMEM);

	return (0);
}
Пример #14
0
/*
 * Function name:	twa_attach
 * Description:		Allocates pci resources; updates sc; adds a node to the
 *			sysctl tree to expose the driver version; makes calls
 *			to initialize ctlr, and to attach to CAM.
 *
 * Input:		dev	-- bus device corresponding to the ctlr
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
static int
twa_attach(device_t dev)
{
	struct twa_softc	*sc = device_get_softc(dev);
	u_int32_t		command;
	int			res_id;
	int			error;

	twa_dbg_dprint_enter(3, sc);

	/* Initialize the softc structure. */
	sc->twa_bus_dev = dev;

	sysctl_ctx_init(&sc->twa_sysctl_ctx);
	sc->twa_sysctl_tree = SYSCTL_ADD_NODE(&sc->twa_sysctl_ctx,
				SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
				device_get_nameunit(dev), CTLFLAG_RD, 0, "");
	if (sc->twa_sysctl_tree == NULL) {
		twa_printf(sc, "Cannot add sysctl tree node.\n");
		return(ENXIO);
	}
	SYSCTL_ADD_STRING(&sc->twa_sysctl_ctx, SYSCTL_CHILDREN(sc->twa_sysctl_tree),
				OID_AUTO, "driver_version", CTLFLAG_RD,
				TWA_DRIVER_VERSION_STRING, 0, "TWA driver version");

	/* Make sure we are going to be able to talk to this board. */
	command = pci_read_config(dev, PCIR_COMMAND, 2);
	if ((command & PCIM_CMD_PORTEN) == 0) {
		twa_printf(sc, "Register window not available.\n");
		return(ENXIO);
	}
	
	/* Force the busmaster enable bit on, in case the BIOS forgot. */
	command |= PCIM_CMD_BUSMASTEREN;
	pci_write_config(dev, PCIR_COMMAND, command, 2);

	/* Allocate the PCI register window. */
	res_id = TWA_IO_CONFIG_REG;
	if ((sc->twa_io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &res_id,
					0, ~0, 1, RF_ACTIVE)) == NULL) {
		twa_printf(sc, "can't allocate register window.\n");
		twa_free(sc);
		return(ENXIO);
	}
	sc->twa_bus_tag = rman_get_bustag(sc->twa_io_res);
	sc->twa_bus_handle = rman_get_bushandle(sc->twa_io_res);

	/* Allocate and connect our interrupt. */
	res_id = 0;
	if ((sc->twa_irq_res = bus_alloc_resource(sc->twa_bus_dev, SYS_RES_IRQ,
					&res_id, 0, ~0, 1,
					RF_SHAREABLE | RF_ACTIVE)) == NULL) {
		twa_printf(sc, "Can't allocate interrupt.\n");
		twa_free(sc);
		return(ENXIO);
	}
	if (bus_setup_intr(sc->twa_bus_dev, sc->twa_irq_res, INTR_TYPE_CAM,
				twa_pci_intr, sc, &sc->twa_intr_handle)) {
		twa_printf(sc, "Can't set up interrupt.\n");
		twa_free(sc);
		return(ENXIO);
	}

	/* Initialize the driver for this controller. */
	if ((error = twa_setup(sc))) {
		twa_free(sc);
		return(error);
	}

	/* Print some information about the controller and configuration. */
	twa_describe_controller(sc);

	/* Create the control device. */
	sc->twa_ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->twa_bus_dev),
					UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
					"twa%d", device_get_unit(sc->twa_bus_dev));
	sc->twa_ctrl_dev->si_drv1 = sc;

	/*
	 * Schedule ourselves to bring the controller up once interrupts are
	 * available.  This isn't strictly necessary, since we disable
	 * interrupts while probing the controller, but it is more in keeping
	 * with common practice for other disk devices.
	 */
	sc->twa_ich.ich_func = twa_intrhook;
	sc->twa_ich.ich_arg = sc;
	if (config_intrhook_establish(&sc->twa_ich) != 0) {
		twa_printf(sc, "Can't establish configuration hook.\n");
		twa_free(sc);
		return(ENXIO);
	}

	if ((error = twa_cam_setup(sc))) {
		twa_free(sc);
		return(error);
	}
	return(0);
}
Пример #15
0
static int
pl310_attach(device_t dev)
{
	struct pl310_softc *sc = device_get_softc(dev);
	int rid;
	uint32_t cache_id, debug_ctrl;
	phandle_t node;

	sc->sc_dev = dev;
	rid = 0;
	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (sc->sc_mem_res == NULL)
		panic("%s: Cannot map registers", device_get_name(dev));

	/* Allocate an IRQ resource */
	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	                                        RF_ACTIVE | RF_SHAREABLE);
	if (sc->sc_irq_res == NULL) {
		device_printf(dev, "cannot allocate IRQ, not using interrupt\n");
	}

	pl310_softc = sc;
	mtx_init(&sc->sc_mtx, "pl310lock", NULL, MTX_SPIN);

	cache_id = pl310_read4(sc, PL310_CACHE_ID);
	sc->sc_rtl_revision = (cache_id >> CACHE_ID_RELEASE_SHIFT) &
	    CACHE_ID_RELEASE_MASK;
	device_printf(dev, "Part number: 0x%x, release: 0x%x\n",
	    (cache_id >> CACHE_ID_PARTNUM_SHIFT) & CACHE_ID_PARTNUM_MASK,
	    (cache_id >> CACHE_ID_RELEASE_SHIFT) & CACHE_ID_RELEASE_MASK);

	/*
	 * Test for "arm,io-coherent" property and disable sync operation if
	 * platform is I/O coherent. Outer sync operations are not needed
	 * on coherent platform and may be harmful in certain situations.
	 */
	node = ofw_bus_get_node(dev);
	if (OF_hasprop(node, "arm,io-coherent"))
		sc->sc_io_coherent = true;

	/*
	 * If L2 cache is already enabled then something has violated the rules,
	 * because caches are supposed to be off at kernel entry.  The cache
	 * must be disabled to write the configuration registers without
	 * triggering an access error (SLVERR), but there's no documented safe
	 * procedure for disabling the L2 cache in the manual.  So we'll try to
	 * invent one:
	 *  - Use the debug register to force write-through mode and prevent
	 *    linefills (allocation of new lines on read); now anything we do
	 *    will not cause new data to come into the L2 cache.
	 *  - Writeback and invalidate the current contents.
	 *  - Disable the controller.
	 *  - Restore the original debug settings.
	 */
	if (pl310_read4(sc, PL310_CTRL) & CTRL_ENABLED) {
		device_printf(dev, "Warning: L2 Cache should not already be "
		    "active; trying to de-activate and re-initialize...\n");
		sc->sc_enabled = 1;
		debug_ctrl = pl310_read4(sc, PL310_DEBUG_CTRL);
		platform_pl310_write_debug(sc, debug_ctrl |
		    DEBUG_CTRL_DISABLE_WRITEBACK | DEBUG_CTRL_DISABLE_LINEFILL);
		pl310_set_way_sizes(sc);
		pl310_wbinv_all();
		platform_pl310_write_ctrl(sc, CTRL_DISABLED);
		platform_pl310_write_debug(sc, debug_ctrl);
	}
	sc->sc_enabled = pl310_enabled;

	if (sc->sc_enabled) {
		platform_pl310_init(sc);
		pl310_set_way_sizes(sc); /* platform init might change these */
		pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
		pl310_wait_background_op(PL310_INV_WAY, 0xffff);
		platform_pl310_write_ctrl(sc, CTRL_ENABLED);
		device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
		    (g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
		if (bootverbose)
			pl310_print_config(sc);
	} else {
		if (sc->sc_irq_res != NULL) {
			sc->sc_ich = malloc(sizeof(*sc->sc_ich), M_DEVBUF, M_WAITOK);
			sc->sc_ich->ich_func = pl310_config_intr;
			sc->sc_ich->ich_arg = sc;
			if (config_intrhook_establish(sc->sc_ich) != 0) {
				device_printf(dev,
				    "config_intrhook_establish failed\n");
				free(sc->sc_ich, M_DEVBUF);
				return(ENXIO);
			}
		}

		device_printf(dev, "L2 Cache disabled\n");
	}

	/* Set the l2 functions in the set of cpufuncs */
	cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
	cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
	cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
	cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
	cpufuncs.cf_l2cache_drain_writebuf = pl310_drain_writebuf;

	return (0);
}
Пример #16
0
int
ida_init(struct ida_softc *ida)
{
	struct ida_controller_info cinfo;
	device_t child;
	int error, i, unit;

	SLIST_INIT(&ida->free_qcbs);
	STAILQ_INIT(&ida->qcb_queue);
	bioq_init(&ida->bio_queue);

	ida->qcbs = (struct ida_qcb *)
	    malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF,
		M_NOWAIT | M_ZERO);
	if (ida->qcbs == NULL)
		return (ENOMEM);

	/*
	 * Create our DMA tags
	 */

	/* DMA tag for our hardware QCB structures */
	error = bus_dma_tag_create(
		/* parent	*/ ida->parent_dmat,
		/* alignment	*/ 1,
		/* boundary	*/ 0,
		/* lowaddr	*/ BUS_SPACE_MAXADDR,
		/* highaddr	*/ BUS_SPACE_MAXADDR,
		/* filter	*/ NULL,
		/* filterarg	*/ NULL,
		/* maxsize	*/ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
		/* nsegments	*/ 1,
		/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
		/* flags	*/ 0,
		/* lockfunc	*/ NULL,
		/* lockarg	*/ NULL,
		&ida->hwqcb_dmat);
	if (error)
		return (ENOMEM);

	/* DMA tag for mapping buffers into device space */
	error = bus_dma_tag_create(
		/* parent 	*/ ida->parent_dmat,
		/* alignment	*/ 1,
		/* boundary	*/ 0,
		/* lowaddr	*/ BUS_SPACE_MAXADDR,
		/* highaddr	*/ BUS_SPACE_MAXADDR,
		/* filter	*/ NULL,
		/* filterarg	*/ NULL,
		/* maxsize	*/ MAXBSIZE,
		/* nsegments	*/ IDA_NSEG,
		/* maxsegsz	*/ BUS_SPACE_MAXSIZE_32BIT,
		/* flags	*/ 0,
		/* lockfunc	*/ busdma_lock_mutex,
		/* lockarg	*/ &Giant,
		&ida->buffer_dmat);
	if (error)
		return (ENOMEM);

	/* Allocation of hardware QCBs */
	/* XXX allocation is rounded to hardware page size */
	error = bus_dmamem_alloc(ida->hwqcb_dmat,
	    (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap);
	if (error)
		return (ENOMEM);

	/* And permanently map them in */
	bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap,
	    ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb),
	    ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0);

	bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb));

	error = ida_alloc_qcbs(ida);
	if (error)
		return (error);

	mtx_lock(&ida->lock);
	ida->cmd.int_enable(ida, 0);

	error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo),
	    IDA_CONTROLLER, 0, DMA_DATA_IN);
	if (error) {
		mtx_unlock(&ida->lock);
		device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n");
		return (error);
	}

	device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n",
	    cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1],
	    cinfo.firm_rev[2], cinfo.firm_rev[3]);

	if (ida->flags & IDA_FIRMWARE) {
		int data;

		error = ida_command(ida, CMD_START_FIRMWARE,
		    &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN);
		if (error) {
			mtx_unlock(&ida->lock);
			device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n");
			return (error);
		}
	}
	
	ida->cmd.int_enable(ida, 1);
	ida->flags |= IDA_ATTACHED;
	mtx_unlock(&ida->lock);

	for (i = 0; i < cinfo.num_drvs; i++) {
		child = device_add_child(ida->dev, /*"idad"*/NULL, -1);
		if (child != NULL)
			device_set_ivars(child, (void *)(intptr_t)i);
	}

	ida->ich.ich_func = ida_startup;
	ida->ich.ich_arg = ida;
	if (config_intrhook_establish(&ida->ich) != 0) {
		device_delete_children(ida->dev);
		device_printf(ida->dev, "Cannot establish configuration hook\n");
		return (error);
	}

	unit = device_get_unit(ida->dev);
	ida->ida_dev_t = make_dev(&ida_cdevsw, unit,
				 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
				 "ida%d", unit);
	ida->ida_dev_t->si_drv1 = ida;

	return (0);
}
Пример #17
0
static int
atiixp_pci_attach(device_t dev)
{
	struct atiixp_info *sc;
	int i;

	sc = kmalloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);
	sc->lock = snd_mtxcreate(device_get_nameunit(dev), "sound softc");
	sc->dev = dev;
	/*
	 * Default DMA segments per playback / recording channel
	 */
	sc->dma_segs = ATI_IXP_DMA_CHSEGS;

	pci_set_powerstate(dev, PCI_POWERSTATE_D0);
	pci_enable_busmaster(dev);

	sc->regid = PCIR_BAR(0);
	sc->regtype = SYS_RES_MEMORY;
	sc->reg = bus_alloc_resource_any(dev, sc->regtype, &sc->regid,
								RF_ACTIVE);

	if (!sc->reg) {
		device_printf(dev, "unable to allocate register space\n");
		goto bad;
	}

	sc->st = rman_get_bustag(sc->reg);
	sc->sh = rman_get_bushandle(sc->reg);

	sc->bufsz = pcm_getbuffersize(dev, 4096, ATI_IXP_DEFAULT_BUFSZ, 65536);

	sc->irqid = 0;
	sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid,
						RF_ACTIVE | RF_SHAREABLE);
	if (!sc->irq || 
			snd_setup_intr(dev, sc->irq, INTR_MPSAFE,
						atiixp_intr, sc, &sc->ih)) {
		device_printf(dev, "unable to map interrupt\n");
		goto bad;
	}

	/*
	 * Let the user choose the best DMA segments.
	 */
	 if (resource_int_value(device_get_name(dev),
			device_get_unit(dev), "dma_segs",
			&i) == 0) {
		if (i < ATI_IXP_DMA_CHSEGS_MIN)
			i = ATI_IXP_DMA_CHSEGS_MIN;
		if (i > ATI_IXP_DMA_CHSEGS_MAX)
			i = ATI_IXP_DMA_CHSEGS_MAX;
		sc->dma_segs = i;
	}

	/*
	 * round the value to the nearest ^2
	 */
	i = 0;
	while (sc->dma_segs >> i)
		i++;
	sc->dma_segs = 1 << (i - 1);
	if (sc->dma_segs < ATI_IXP_DMA_CHSEGS_MIN)
		sc->dma_segs = ATI_IXP_DMA_CHSEGS_MIN;
	else if (sc->dma_segs > ATI_IXP_DMA_CHSEGS_MAX)
		sc->dma_segs = ATI_IXP_DMA_CHSEGS_MAX;

	/*
	 * DMA tag for scatter-gather buffers and link pointers
	 */
	if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
		/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
		/*highaddr*/BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff,
		/*flags*/0,
		&sc->parent_dmat) != 0) {
		device_printf(dev, "unable to create dma tag\n");
		goto bad;
	}

	if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0,
		/*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
		/*highaddr*/BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/sc->dma_segs * ATI_IXP_NCHANS *
						sizeof(struct atiixp_dma_op),
		/*nsegments*/1, /*maxsegz*/0x3ffff,
		/*flags*/0,
		&sc->sgd_dmat) != 0) {
		device_printf(dev, "unable to create dma tag\n");
		goto bad;
	}

	if (bus_dmamem_alloc(sc->sgd_dmat, (void **)&sc->sgd_table, 
				BUS_DMA_NOWAIT, &sc->sgd_dmamap) == -1)
		goto bad;

	if (bus_dmamap_load(sc->sgd_dmat, sc->sgd_dmamap, sc->sgd_table, 
				sc->dma_segs * ATI_IXP_NCHANS *
						sizeof(struct atiixp_dma_op),
				atiixp_dma_cb, sc, 0))
		goto bad;


	atiixp_chip_pre_init(sc);

	sc->delayed_attach.ich_func = atiixp_chip_post_init;
	sc->delayed_attach.ich_arg = sc;
	sc->delayed_attach.ich_desc = "snd_atiixp";
	if (cold == 0 ||
			config_intrhook_establish(&sc->delayed_attach) != 0) {
		sc->delayed_attach.ich_func = NULL;
		atiixp_chip_post_init(sc);
	}

	return 0;

bad:
	atiixp_release_resource(sc);
	return ENXIO;
}
Пример #18
0
/********************************************************************************
 * Allocate resources, initialise the controller.
 */
static int
twe_attach(device_t dev)
{
    struct twe_softc	*sc;
    int			rid, error;
    u_int32_t		command;

    debug_called(4);

    /*
     * Initialise the softc structure.
     */
    sc = device_get_softc(dev);
    sc->twe_dev = dev;

    sysctl_ctx_init(&sc->sysctl_ctx);
    sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
	SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
	device_get_nameunit(dev), CTLFLAG_RD, 0, "");
    if (sc->sysctl_tree == NULL) {
	twe_printf(sc, "cannot add sysctl tree node\n");
	return (ENXIO);
    }
    SYSCTL_ADD_STRING(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
	OID_AUTO, "driver_version", CTLFLAG_RD, TWE_DRIVER_VERSION_STRING, 0,
	"TWE driver version");

    /*
     * Make sure we are going to be able to talk to this board.
     */
    command = pci_read_config(dev, PCIR_COMMAND, 2);
    if ((command & PCIM_CMD_PORTEN) == 0) {
	twe_printf(sc, "register window not available\n");
	return(ENXIO);
    }
    /*
     * Force the busmaster enable bit on, in case the BIOS forgot.
     */
    command |= PCIM_CMD_BUSMASTEREN;
    pci_write_config(dev, PCIR_COMMAND, command, 2);

    /*
     * Allocate the PCI register window.
     */
    rid = TWE_IO_CONFIG_REG;
    if ((sc->twe_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 
        RF_ACTIVE)) == NULL) {
	twe_printf(sc, "can't allocate register window\n");
	twe_free(sc);
	return(ENXIO);
    }
    sc->twe_btag = rman_get_bustag(sc->twe_io);
    sc->twe_bhandle = rman_get_bushandle(sc->twe_io);

    /*
     * Allocate the parent bus DMA tag appropriate for PCI.
     */
    if (bus_dma_tag_create(NULL, 				/* parent */
			   1, 0, 				/* alignment, boundary */
			   BUS_SPACE_MAXADDR_32BIT, 		/* lowaddr */
			   BUS_SPACE_MAXADDR, 			/* highaddr */
			   NULL, NULL, 				/* filter, filterarg */
			   MAXBSIZE, TWE_MAX_SGL_LENGTH,	/* maxsize, nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
			   0,					/* flags */
			   NULL,				/* lockfunc */
			   NULL,				/* lockarg */
			   &sc->twe_parent_dmat)) {
	twe_printf(sc, "can't allocate parent DMA tag\n");
	twe_free(sc);
	return(ENOMEM);
    }

    /* 
     * Allocate and connect our interrupt.
     */
    rid = 0;
    if ((sc->twe_irq = bus_alloc_resource_any(sc->twe_dev, SYS_RES_IRQ,
        &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
	twe_printf(sc, "can't allocate interrupt\n");
	twe_free(sc);
	return(ENXIO);
    }
    if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_TYPE_BIO | INTR_ENTROPY,  
		       NULL, twe_pci_intr, sc, &sc->twe_intr)) {
	twe_printf(sc, "can't set up interrupt\n");
	twe_free(sc);
	return(ENXIO);
    }

    /*
     * Create DMA tag for mapping command's into controller-addressable space.
     */
    if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
			   1, 0, 			/* alignment, boundary */
			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
			   BUS_SPACE_MAXADDR, 		/* highaddr */
			   NULL, NULL, 			/* filter, filterarg */
			   sizeof(TWE_Command) *
			   TWE_Q_LENGTH, 1,		/* maxsize, nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   0,				/* flags */
			   NULL,			/* lockfunc */
			   NULL,			/* lockarg */
			   &sc->twe_cmd_dmat)) {
	twe_printf(sc, "can't allocate data buffer DMA tag\n");
	twe_free(sc);
	return(ENOMEM);
    }
    /*
     * Allocate memory and make it available for DMA.
     */
    if (bus_dmamem_alloc(sc->twe_cmd_dmat, (void **)&sc->twe_cmd,
			 BUS_DMA_NOWAIT, &sc->twe_cmdmap)) {
	twe_printf(sc, "can't allocate command memory\n");
	return(ENOMEM);
    }
    bus_dmamap_load(sc->twe_cmd_dmat, sc->twe_cmdmap, sc->twe_cmd,
		    sizeof(TWE_Command) * TWE_Q_LENGTH,
		    twe_setup_request_dmamap, sc, 0);
    bzero(sc->twe_cmd, sizeof(TWE_Command) * TWE_Q_LENGTH);

    /*
     * Create DMA tag for mapping objects into controller-addressable space.
     */
    if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
			   1, 0, 			/* alignment, boundary */
			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
			   BUS_SPACE_MAXADDR, 		/* highaddr */
			   NULL, NULL, 			/* filter, filterarg */
			   MAXBSIZE, TWE_MAX_SGL_LENGTH,/* maxsize, nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   BUS_DMA_ALLOCNOW,		/* flags */
			   busdma_lock_mutex,		/* lockfunc */
			   &Giant,			/* lockarg */
			   &sc->twe_buffer_dmat)) {
	twe_printf(sc, "can't allocate data buffer DMA tag\n");
	twe_free(sc);
	return(ENOMEM);
    }

    /*
     * Create DMA tag for mapping objects into controller-addressable space.
     */
    if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
			   1, 0, 			/* alignment, boundary */
			   BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
			   BUS_SPACE_MAXADDR, 		/* highaddr */
			   NULL, NULL, 			/* filter, filterarg */
			   MAXBSIZE, 1,			/* maxsize, nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   0,				/* flags */
			   NULL,			/* lockfunc */
			   NULL,			/* lockarg */
			   &sc->twe_immediate_dmat)) {
	twe_printf(sc, "can't allocate data buffer DMA tag\n");
	twe_free(sc);
	return(ENOMEM);
    }
    /*
     * Allocate memory for requests which cannot sleep or support continuation.
     */
     if (bus_dmamem_alloc(sc->twe_immediate_dmat, (void **)&sc->twe_immediate,
			  BUS_DMA_NOWAIT, &sc->twe_immediate_map)) {
	twe_printf(sc, "can't allocate memory for immediate requests\n");
	return(ENOMEM);
     }

    /*
     * Initialise the controller and driver core.
     */
    if ((error = twe_setup(sc))) {
	twe_free(sc);
	return(error);
    }

    /*
     * Print some information about the controller and configuration.
     */
    twe_describe_controller(sc);

    /*
     * Create the control device.
     */
    sc->twe_dev_t = make_dev(&twe_cdevsw, device_get_unit(sc->twe_dev), UID_ROOT, GID_OPERATOR,
			     S_IRUSR | S_IWUSR, "twe%d", device_get_unit(sc->twe_dev));
    sc->twe_dev_t->si_drv1 = sc;
    /*
     * Schedule ourselves to bring the controller up once interrupts are available.
     * This isn't strictly necessary, since we disable interrupts while probing the
     * controller, but it is more in keeping with common practice for other disk 
     * devices.
     */
    sc->twe_ich.ich_func = twe_intrhook;
    sc->twe_ich.ich_arg = sc;
    if (config_intrhook_establish(&sc->twe_ich) != 0) {
	twe_printf(sc, "can't establish configuration hook\n");
	twe_free(sc);
	return(ENXIO);
    }

    return(0);
}
Пример #19
0
static int
i2s_attach(device_t self)
{
	struct i2s_softc 	*sc;
	struct resource 	*dbdma_irq;
	void			*dbdma_ih;
	int 			 rid, oirq, err;
	phandle_t 		 port;
	
	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO);

	sc->aoa.sc_dev = self;
	sc->node = ofw_bus_get_node(self);

	port = of_find_firstchild_byname(sc->node, "i2s-a");
	if (port == -1)
		return (ENXIO);
	sc->soundnode = of_find_firstchild_byname(port, "sound");
	if (sc->soundnode == -1)
		return (ENXIO);
 
	mtx_init(&sc->port_mtx, "port_mtx", NULL, MTX_DEF);

	/* Map the controller register space. */
	rid = 0;
	sc->reg = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
	if (sc->reg == NULL)
		return ENXIO;

	/* Map the DBDMA channel register space. */
	rid = 1;
	sc->aoa.sc_odma = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, 
	    RF_ACTIVE);
	if (sc->aoa.sc_odma == NULL)
		return ENXIO;

	/* Establish the DBDMA channel edge-triggered interrupt. */
	rid = 1;
	dbdma_irq = bus_alloc_resource_any(self, SYS_RES_IRQ, 
	    &rid, RF_SHAREABLE | RF_ACTIVE);
	if (dbdma_irq == NULL)
		return (ENXIO);

	/* Now initialize the controller. */
	err = i2s_setup(sc, 44100, 16, 64);
	if (err != 0)
		return (err);

	snd_setup_intr(self, dbdma_irq, INTR_MPSAFE, aoa_interrupt,
	    sc, &dbdma_ih);

	oirq = rman_get_start(dbdma_irq);
	err = powerpc_config_intr(oirq, INTR_TRIGGER_EDGE, INTR_POLARITY_LOW);
	if (err != 0)
		return (err);

	/*
	 * Register a hook for delayed attach in order to allow
	 * the I2C controller to attach.
	 */
	if ((i2s_delayed_attach = malloc(sizeof(struct intr_config_hook), 
	    M_TEMP, M_WAITOK | M_ZERO)) == NULL)
		return (ENOMEM);

	i2s_delayed_attach->ich_func = i2s_postattach;
	i2s_delayed_attach->ich_arg = sc;

	if (config_intrhook_establish(i2s_delayed_attach) != 0)
		return (ENOMEM);

	return (aoa_attach(sc));
}
Пример #20
0
/*
 *				SMBUS API FUNCTIONS
 *
 * Called from ig4iic_pci_attach/detach()
 */
int
ig4iic_attach(ig4iic_softc_t *sc)
{
	int error;
	uint32_t v;

	v = reg_read(sc, IG4_REG_COMP_TYPE);
	v = reg_read(sc, IG4_REG_COMP_PARAM1);
	v = reg_read(sc, IG4_REG_GENERAL);
	if ((v & IG4_GENERAL_SWMODE) == 0) {
		v |= IG4_GENERAL_SWMODE;
		reg_write(sc, IG4_REG_GENERAL, v);
		v = reg_read(sc, IG4_REG_GENERAL);
	}

	v = reg_read(sc, IG4_REG_SW_LTR_VALUE);
	v = reg_read(sc, IG4_REG_AUTO_LTR_VALUE);

	v = reg_read(sc, IG4_REG_COMP_VER);
	if (v != IG4_COMP_VER) {
		error = ENXIO;
		goto done;
	}
	v = reg_read(sc, IG4_REG_SS_SCL_HCNT);
	v = reg_read(sc, IG4_REG_SS_SCL_LCNT);
	v = reg_read(sc, IG4_REG_FS_SCL_HCNT);
	v = reg_read(sc, IG4_REG_FS_SCL_LCNT);
	v = reg_read(sc, IG4_REG_SDA_HOLD);

	v = reg_read(sc, IG4_REG_SS_SCL_HCNT);
	reg_write(sc, IG4_REG_FS_SCL_HCNT, v);
	v = reg_read(sc, IG4_REG_SS_SCL_LCNT);
	reg_write(sc, IG4_REG_FS_SCL_LCNT, v);

	/*
	 * Program based on a 25000 Hz clock.  This is a bit of a
	 * hack (obviously).  The defaults are 400 and 470 for standard
	 * and 60 and 130 for fast.  The defaults for standard fail
	 * utterly (presumably cause an abort) because the clock time
	 * is ~18.8ms by default.  This brings it down to ~4ms (for now).
	 */
	reg_write(sc, IG4_REG_SS_SCL_HCNT, 100);
	reg_write(sc, IG4_REG_SS_SCL_LCNT, 125);
	reg_write(sc, IG4_REG_FS_SCL_HCNT, 100);
	reg_write(sc, IG4_REG_FS_SCL_LCNT, 125);

	/*
	 * Use a threshold of 1 so we get interrupted on each character,
	 * allowing us to use mtx_sleep() in our poll code.  Not perfect
	 * but this is better than using DELAY() for receiving data.
	 *
	 * See ig4_var.h for details on interrupt handler synchronization.
	 */
	reg_write(sc, IG4_REG_RX_TL, 1);

	reg_write(sc, IG4_REG_CTL,
		  IG4_CTL_MASTER |
		  IG4_CTL_SLAVE_DISABLE |
		  IG4_CTL_RESTARTEN |
		  IG4_CTL_SPEED_STD);

	sc->smb = device_add_child(sc->dev, "smbus", -1);
	if (sc->smb == NULL) {
		device_printf(sc->dev, "smbus driver not found\n");
		error = ENXIO;
		goto done;
	}

#if 0
	/*
	 * Don't do this, it blows up the PCI config
	 */
	reg_write(sc, IG4_REG_RESETS, IG4_RESETS_ASSERT);
	reg_write(sc, IG4_REG_RESETS, IG4_RESETS_DEASSERT);
#endif

	/*
	 * Interrupt on STOP detect or receive character ready
	 */
	reg_write(sc, IG4_REG_INTR_MASK, IG4_INTR_STOP_DET |
					 IG4_INTR_RX_FULL);
	mtx_lock(&sc->io_lock);
	if (set_controller(sc, 0))
		device_printf(sc->dev, "controller error during attach-1\n");
	if (set_controller(sc, IG4_I2C_ENABLE))
		device_printf(sc->dev, "controller error during attach-2\n");
	mtx_unlock(&sc->io_lock);
	error = bus_setup_intr(sc->dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE,
			       NULL, ig4iic_intr, sc, &sc->intr_handle);
	if (error) {
		device_printf(sc->dev,
			      "Unable to setup irq: error %d\n", error);
	}

	sc->enum_hook.ich_func = ig4iic_start;
	sc->enum_hook.ich_arg = sc->dev;

	/* We have to wait until interrupts are enabled. I2C read and write
	 * only works if the interrupts are available.
	 */
	if (config_intrhook_establish(&sc->enum_hook) != 0)
		error = ENOMEM;
	else
		error = 0;

done:
	return (error);
}
Пример #21
0
static int
bcm2835_cpufreq_attach(device_t dev)
{
	struct bcm2835_cpufreq_softc *sc;
	struct sysctl_oid *oid;

	/* set self dev */
	sc = device_get_softc(dev);
	sc->dev = dev;

	/* initial values */
	sc->arm_max_freq = -1;
	sc->arm_min_freq = -1;
	sc->core_max_freq = -1;
	sc->core_min_freq = -1;
	sc->sdram_max_freq = -1;
	sc->sdram_min_freq = -1;
	sc->max_voltage_core = 0;
	sc->min_voltage_core = 0;

	/* setup sysctl at first device */
	if (device_get_unit(dev) == 0) {
		sysctl_ctx_init(&bcm2835_sysctl_ctx);
		/* create node for hw.cpufreq */
		oid = SYSCTL_ADD_NODE(&bcm2835_sysctl_ctx,
		    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "cpufreq",
		    CTLFLAG_RD, NULL, "");

		/* Frequency (Hz) */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "arm_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_arm_freq, "IU",
		    "ARM frequency (Hz)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "core_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_core_freq, "IU",
		    "Core frequency (Hz)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "sdram_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_sdram_freq, "IU",
		    "SDRAM frequency (Hz)");

		/* Turbo state */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "turbo", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_turbo, "IU",
		    "Disables dynamic clocking");

		/* Voltage (offset from 1.2V in units of 0.025V) */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_core", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_voltage_core, "I",
		    "ARM/GPU core voltage"
		    "(offset from 1.2V in units of 0.025V)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram", CTLTYPE_INT | CTLFLAG_WR, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram, "I",
		    "SDRAM voltage (offset from 1.2V in units of 0.025V)");

		/* Voltage individual SDRAM */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram_c", CTLTYPE_INT | CTLFLAG_RW, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram_c, "I",
		    "SDRAM controller voltage"
		    "(offset from 1.2V in units of 0.025V)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram_i", CTLTYPE_INT | CTLFLAG_RW, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram_i, "I",
		    "SDRAM I/O voltage (offset from 1.2V in units of 0.025V)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram_p", CTLTYPE_INT | CTLFLAG_RW, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram_p, "I",
		    "SDRAM phy voltage (offset from 1.2V in units of 0.025V)");

		/* Temperature */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
		    sysctl_bcm2835_cpufreq_temperature, "I",
		    "SoC temperature (thousandths of a degree C)");
	}

	/* ARM->VC lock */
	sema_init(&vc_sema, 1, "vcsema");

	/* register callback for using mbox when interrupts are enabled */
	sc->init_hook.ich_func = bcm2835_cpufreq_init;
	sc->init_hook.ich_arg = sc;

	if (config_intrhook_establish(&sc->init_hook) != 0) {
		device_printf(dev, "config_intrhook_establish failed\n");
		return (ENOMEM);
	}

	/* this device is controlled by cpufreq(4) */
	cpufreq_register(dev);

	return (0);
}
Пример #22
0
/********************************************************************************
 * Allocate resources, initialise the controller.
 */
static int
twe_attach(device_t dev)
{
    struct twe_softc	*sc;
    int			rid, error;
    u_int32_t		command;

    debug_called(4);

    /*
     * Initialise the softc structure.
     */
    sc = device_get_softc(dev);
    sc->twe_dev = dev;

    /*
     * Make sure we are going to be able to talk to this board.
     */
    command = pci_read_config(dev, PCIR_COMMAND, 2);
    if ((command & PCIM_CMD_PORTEN) == 0) {
	twe_printf(sc, "register window not available\n");
	return(ENXIO);
    }
    /*
     * Force the busmaster enable bit on, in case the BIOS forgot.
     */
    command |= PCIM_CMD_BUSMASTEREN;
    pci_write_config(dev, PCIR_COMMAND, command, 2);

    /*
     * Allocate the PCI register window.
     */
    rid = TWE_IO_CONFIG_REG;
    if ((sc->twe_io = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE)) == NULL) {
	twe_printf(sc, "can't allocate register window\n");
	twe_free(sc);
	return(ENXIO);
    }
    sc->twe_btag = rman_get_bustag(sc->twe_io);
    sc->twe_bhandle = rman_get_bushandle(sc->twe_io);

    /*
     * Allocate the parent bus DMA tag appropriate for PCI.
     */
    if (bus_dma_tag_create(NULL, 				/* parent */
			   1, 0, 				/* alignment, boundary */
			   BUS_SPACE_MAXADDR_32BIT, 		/* lowaddr */
			   BUS_SPACE_MAXADDR, 			/* highaddr */
			   NULL, NULL, 				/* filter, filterarg */
			   MAXBSIZE, TWE_MAX_SGL_LENGTH,	/* maxsize, nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,		/* maxsegsize */
			   BUS_DMA_ALLOCNOW,			/* flags */
			   &sc->twe_parent_dmat)) {
	twe_printf(sc, "can't allocate parent DMA tag\n");
	twe_free(sc);
	return(ENOMEM);
    }

    /* 
     * Allocate and connect our interrupt.
     */
    rid = 0;
    if ((sc->twe_irq = bus_alloc_resource(sc->twe_dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
	twe_printf(sc, "can't allocate interrupt\n");
	twe_free(sc);
	return(ENXIO);
    }
    if (bus_setup_intr(sc->twe_dev, sc->twe_irq, INTR_TYPE_BIO | INTR_ENTROPY,  twe_pci_intr, sc, &sc->twe_intr)) {
	twe_printf(sc, "can't set up interrupt\n");
	twe_free(sc);
	return(ENXIO);
    }

    /*
     * Create DMA tag for mapping objects into controller-addressable space.
     */
    if (bus_dma_tag_create(sc->twe_parent_dmat, 	/* parent */
			   1, 0, 			/* alignment, boundary */
			   BUS_SPACE_MAXADDR,		/* lowaddr */
			   BUS_SPACE_MAXADDR, 		/* highaddr */
			   NULL, NULL, 			/* filter, filterarg */
			   MAXBSIZE, TWE_MAX_SGL_LENGTH,/* maxsize, nsegments */
			   BUS_SPACE_MAXSIZE_32BIT,	/* maxsegsize */
			   0,				/* flags */
			   &sc->twe_buffer_dmat)) {
	twe_printf(sc, "can't allocate data buffer DMA tag\n");
	twe_free(sc);
	return(ENOMEM);
    }

    /*
     * Initialise the controller and driver core.
     */
    if ((error = twe_setup(sc)))
	return(error);

    /*
     * Print some information about the controller and configuration.
     */
    twe_describe_controller(sc);

    /*
     * Create the control device.
     */
    sc->twe_dev_t = make_dev(&twe_cdevsw, device_get_unit(sc->twe_dev), UID_ROOT, GID_OPERATOR,
			     S_IRUSR | S_IWUSR, "twe%d", device_get_unit(sc->twe_dev));
    sc->twe_dev_t->si_drv1 = sc;
    /*
     * Schedule ourselves to bring the controller up once interrupts are available.
     * This isn't strictly necessary, since we disable interrupts while probing the
     * controller, but it is more in keeping with common practice for other disk 
     * devices.
     */
    sc->twe_ich.ich_func = twe_intrhook;
    sc->twe_ich.ich_arg = sc;
    if (config_intrhook_establish(&sc->twe_ich) != 0) {
	twe_printf(sc, "can't establish configuration hook\n");
	twe_free(sc);
	return(ENXIO);
    }

    return(0);
}
Пример #23
0
int
isci_initialize(struct isci_softc *isci)
{
    int error;
    uint32_t status = 0;
    uint32_t library_object_size;
    uint32_t verbosity_mask;
    uint32_t scic_log_object_mask;
    uint32_t scif_log_object_mask;
    uint8_t *header_buffer;

    library_object_size = scif_library_get_object_size(SCI_MAX_CONTROLLERS);

    isci->sci_library_memory =
        malloc(library_object_size, M_ISCI, M_NOWAIT | M_ZERO );

    isci->sci_library_handle = scif_library_construct(
                                   isci->sci_library_memory, SCI_MAX_CONTROLLERS);

    sci_object_set_association( isci->sci_library_handle, (void *)isci);

    verbosity_mask = (1<<SCI_LOG_VERBOSITY_ERROR) |
                     (1<<SCI_LOG_VERBOSITY_WARNING) | (1<<SCI_LOG_VERBOSITY_INFO) |
                     (1<<SCI_LOG_VERBOSITY_TRACE);

    scic_log_object_mask = 0xFFFFFFFF;
    scic_log_object_mask &= ~SCIC_LOG_OBJECT_COMPLETION_QUEUE;
    scic_log_object_mask &= ~SCIC_LOG_OBJECT_SSP_IO_REQUEST;
    scic_log_object_mask &= ~SCIC_LOG_OBJECT_STP_IO_REQUEST;
    scic_log_object_mask &= ~SCIC_LOG_OBJECT_SMP_IO_REQUEST;
    scic_log_object_mask &= ~SCIC_LOG_OBJECT_CONTROLLER;

    scif_log_object_mask = 0xFFFFFFFF;
    scif_log_object_mask &= ~SCIF_LOG_OBJECT_CONTROLLER;
    scif_log_object_mask &= ~SCIF_LOG_OBJECT_IO_REQUEST;

    TUNABLE_INT_FETCH("hw.isci.debug_level", &g_isci_debug_level);

    sci_logger_enable(sci_object_get_logger(isci->sci_library_handle),
                      scif_log_object_mask, verbosity_mask);

    sci_logger_enable(sci_object_get_logger(
                          scif_library_get_scic_handle(isci->sci_library_handle)),
                      scic_log_object_mask, verbosity_mask);

    header_buffer = (uint8_t *)&isci->pci_common_header;
    for (uint8_t i = 0; i < sizeof(isci->pci_common_header); i++)
        header_buffer[i] = pci_read_config(isci->device, i, 1);

    scic_library_set_pci_info(
        scif_library_get_scic_handle(isci->sci_library_handle),
        &isci->pci_common_header);

    isci->oem_parameters_found = FALSE;

    isci_get_oem_parameters(isci);

    /* trigger interrupt if 32 completions occur before timeout expires */
    isci->coalesce_number = 32;

    /* trigger interrupt if 2 microseconds elapse after a completion occurs,
     *  regardless if "coalesce_number" completions have occurred
     */
    isci->coalesce_timeout = 2;

    isci->controller_count = scic_library_get_pci_device_controller_count(
                                 scif_library_get_scic_handle(isci->sci_library_handle));

    for (int index = 0; index < isci->controller_count; index++) {
        struct ISCI_CONTROLLER *controller = &isci->controllers[index];
        SCI_CONTROLLER_HANDLE_T scif_controller_handle;

        controller->index = index;
        isci_controller_construct(controller, isci);

        scif_controller_handle = controller->scif_controller_handle;

        status = isci_controller_initialize(controller);

        if(status != SCI_SUCCESS) {
            isci_log_message(0, "ISCI",
                             "isci_controller_initialize FAILED: %x\n",
                             status);
            return (status);
        }

        error = isci_controller_allocate_memory(controller);

        if (error != 0)
            return (error);

        scif_controller_set_interrupt_coalescence(
            scif_controller_handle, isci->coalesce_number,
            isci->coalesce_timeout);
    }

    /* FreeBSD provides us a hook to ensure we get a chance to start
     *  our controllers and complete initial domain discovery before
     *  it searches for the boot device.  Once we're done, we'll
     *  disestablish the hook, signaling the kernel that is can proceed
     *  with the boot process.
     */
    isci->config_hook.ich_func = &isci_controller_start;
    isci->config_hook.ich_arg = &isci->controllers[0];

    if (config_intrhook_establish(&isci->config_hook) != 0)
        isci_log_message(0, "ISCI",
                         "config_intrhook_establish failed!\n");

    return (status);
}
Пример #24
0
static int
athp_pci_attach(device_t dev)
{
	struct ath10k_pci *ar_pci = device_get_softc(dev);
	struct ath10k *ar = &ar_pci->sc_sc;
	int rid, i;
	int err = 0;
	int ret;

	ar->sc_dev = dev;
	ar->sc_invalid = 1;

	/* XXX TODO: initialize sc_debug from TUNABLE */
#if 0
	ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC |
	    ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC |
	    ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT;
#endif
	ar->sc_psc = ar_pci;

	/* Load-time tunable/sysctl tree */
	athp_attach_sysctl(ar);

	/* Enable WMI/HTT RX for now */
	ar->sc_rx_wmi = 1;
	ar->sc_rx_htt = 1;

	/* Fetch pcie capability offset */
	ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off);
	if (ret != 0) {
		device_printf(dev,
		    "%s: failed to find pci-express capability offset\n",
		    __func__);
		return (ret);
	}

	/*
	 * Initialise ath10k core bits.
	 */
	if (ath10k_core_init(ar) < 0)
		goto bad0;

	/*
	 * Initialise ath10k freebsd bits.
	 */
	sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev));
	mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK,
	    MTX_DEF);

	sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev));
	mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF);

	sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev));
	mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF);

	sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev));
	mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf",
	    MTX_DEF | MTX_RECURSE);

	sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev));
	mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF);

	sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev));
	mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF);

	sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev));
	mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data",
	    MTX_DEF);

	/*
	 * Initialise ath10k BMI/PCIDIAG bits.
	 */
	ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req",
	    4, 1024);
	ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp",
	    4, 1024);
	if (ret != 0) {
		device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n",
		    __func__);
		goto bad0;
	}

	/*
	 * Initialise HTT descriptors/memory.
	 */
	ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt);
	if (ret != 0) {
		device_printf(dev, "%s: failed to alloc HTT RX descriptors\n",
		    __func__);
		goto bad;
	}

	/* XXX here instead of in core_init because we need the lock init'ed */
	callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0);

	ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT,
	    NULL, ar_pci);
	(void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq",
	    device_get_nameunit(dev));
	if (ar_pci->pipe_taskq == NULL) {
		device_printf(dev, "%s: couldn't create pipe taskq\n",
		    __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Look at the device/vendor ID and choose which register offset
	 * mapping to use.  This is used by a lot of the register access
	 * pieces to get the correct device-specific windows.
	 */
	ar_pci->sc_vendorid = pci_get_vendor(dev);
	ar_pci->sc_deviceid = pci_get_device(dev);
	if (athp_pci_hw_lookup(ar_pci) != 0) {
		device_printf(dev, "%s: hw lookup failed\n", __func__);
		err = ENXIO;
		goto bad;
	}

	/*
	 * Enable bus mastering.
	 */
	pci_enable_busmaster(dev);

	/*
	 * Setup memory-mapping of PCI registers.
	 */
	rid = BS_BAR;
	ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (ar_pci->sc_sr == NULL) {
		device_printf(dev, "cannot map register space\n");
		err = ENXIO;
		goto bad;
	}

	/* Driver copy; hopefully we can delete this */
	ar->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/* Local copy for bus operations */
	ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr);
	ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr);

	/*
	 * Mark device invalid so any interrupts (shared or otherwise)
	 * that arrive before the HAL is setup are discarded.
	 */
	ar->sc_invalid = 1;

	printf("%s: msicount=%d, msixcount=%d\n",
	    __func__,
	    pci_msi_count(dev),
	    pci_msix_count(dev));

	/*
	 * Arrange interrupt line.
	 *
	 * XXX TODO: this is effictively ath10k_pci_init_irq().
	 * Refactor it out later.
	 *
	 * First - attempt MSI.  If we get it, then use it.
	 */
	i = MSI_NUM_REQUEST;
	if (pci_alloc_msi(dev, &i) == 0) {
		device_printf(dev, "%s: %d MSI interrupts\n", __func__, i);
		ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
	} else {
		i = 1;
		if (pci_alloc_msi(dev, &i) == 0) {
			device_printf(dev, "%s: 1 MSI interrupt\n", __func__);
			ar_pci->num_msi_intrs = 1;
		} else {
			device_printf(dev, "%s: legacy interrupts\n", __func__);
			ar_pci->num_msi_intrs = 0;
		}
	}
	err = ath10k_pci_request_irq(ar_pci);
	if (err != 0)
		goto bad1;

	/*
	 * Attach register ops - needed for the caller to do register IO.
	 */
	ar->sc_regio.reg_read = athp_pci_regio_read_reg;
	ar->sc_regio.reg_write = athp_pci_regio_write_reg;
	ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg;
	ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg;
	ar->sc_regio.reg_flush = athp_pci_regio_flush_reg;
	ar->sc_regio.reg_arg = ar_pci;

	/*
	 * TODO: abstract this out to be a bus/hif specific
	 * attach path.
	 *
	 * I'm not sure what USB/SDIO will look like here, but
	 * I'm pretty sure it won't involve PCI/CE setup.
	 * It'll still have WME/HIF/BMI, but it'll be done over
	 * USB endpoints.
	 */

	if (athp_pci_setup_bufs(ar_pci) != 0) {
		err = ENXIO;
		goto bad4;
	}

	/* HIF ops attach */
	ar->hif.ops = &ath10k_pci_hif_ops;
	ar->hif.bus = ATH10K_BUS_PCI;

	/* Alloc pipes */
	ret = ath10k_pci_alloc_pipes(ar);
	if (ret) {
		device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n",
		    __func__,
		    ret);
		/* XXX cleanup */
		err = ENXIO;
		goto bad4;
	}

	/* deinit ce */
	ath10k_pci_ce_deinit(ar);

	/* disable irq */
	ret = ath10k_pci_irq_disable(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* init IRQ */
	ret = ath10k_pci_init_irq(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: init_irq failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* Ok, gate open the interrupt handler */
	ar->sc_invalid = 0;

	/* pci_chip_reset */
	ret = ath10k_pci_chip_reset(ar_pci);
	if (ret) {
		device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n",
		    __func__,
		    ret);
		err = ENXIO;
		goto bad4;
	}

	/* read SoC/chip version */
	ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs));

	/* Verify chip version is something we can use */
	device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid);
	if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) {
		device_printf(ar->sc_dev,
		    "%s: unsupported chip; chipid: 0x%08x\n", __func__,
		    ar->sc_chipid);
		err = ENXIO;
		goto bad4;
	}

	/* Call main attach method with given info */
	ar->sc_preinit_hook.ich_func = athp_attach_preinit;
	ar->sc_preinit_hook.ich_arg = ar;
	if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) {
		device_printf(ar->sc_dev,
		    "%s: couldn't establish preinit hook\n", __func__);
		goto bad4;
	}

	return (0);

	/* Fallthrough for setup failure */
bad4:
	athp_pci_free_bufs(ar_pci);
	/* Ensure we disable interrupts from the device */
	ath10k_pci_deinit_irq(ar_pci);
	ath10k_pci_free_irq(ar_pci);
bad1:
	bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr);
bad:

	ath10k_htt_rx_free_desc(ar, &ar->htt);

	athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf);
	athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf);

	/* XXX disable busmaster? */
	mtx_destroy(&ar_pci->ps_mtx);
	mtx_destroy(&ar_pci->ce_mtx);
	mtx_destroy(&ar->sc_conf_mtx);
	mtx_destroy(&ar->sc_data_mtx);
	mtx_destroy(&ar->sc_buf_mtx);
	mtx_destroy(&ar->sc_dma_mtx);
	mtx_destroy(&ar->sc_mtx);
	if (ar_pci->pipe_taskq) {
		taskqueue_drain_all(ar_pci->pipe_taskq);
		taskqueue_free(ar_pci->pipe_taskq);
	}

	/* Shutdown ioctl handler */
	athp_ioctl_teardown(ar);

	ath10k_core_destroy(ar);
bad0:
	return (err);
}
Пример #25
0
static int
bcm2835_cpufreq_attach(device_t dev)
{
	struct bcm2835_cpufreq_softc *sc;
	struct sysctl_oid *oid;
	int err;

	/* set self dev */
	sc = device_get_softc(dev);
	sc->dev = dev;

	/* initial values */
	sc->arm_max_freq = -1;
	sc->arm_min_freq = -1;
	sc->core_max_freq = -1;
	sc->core_min_freq = -1;
	sc->sdram_max_freq = -1;
	sc->sdram_min_freq = -1;
	sc->max_voltage_core = 0;
	sc->min_voltage_core = 0;

	/* create VC mbox buffer */
	sc->dma_size = PAGE_SIZE;
	err = bus_dma_tag_create(
	    bus_get_dma_tag(sc->dev),
	    PAGE_SIZE, 0,		/* alignment, boundary */
	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
	    BUS_SPACE_MAXADDR,		/* highaddr */
	    NULL, NULL,			/* filter, filterarg */
	    sc->dma_size, 1,		/* maxsize, nsegments */
	    sc->dma_size, 0,		/* maxsegsize, flags */
	    NULL, NULL,			/* lockfunc, lockarg */
	    &sc->dma_tag);
	if (err) {
		device_printf(dev, "can't create DMA tag\n");
		return (ENXIO);
	}

	err = bus_dmamem_alloc(sc->dma_tag, (void **)&sc->dma_buf, 0,
	    &sc->dma_map);
	if (err) {
		bus_dma_tag_destroy(sc->dma_tag);
		device_printf(dev, "can't allocate dmamem\n");
		return (ENXIO);
	}

	err = bus_dmamap_load(sc->dma_tag, sc->dma_map, sc->dma_buf,
	    sc->dma_size, bcm2835_cpufreq_cb, &sc->dma_phys, 0);
	if (err) {
		bus_dmamem_free(sc->dma_tag, sc->dma_buf, sc->dma_map);
		bus_dma_tag_destroy(sc->dma_tag);
		device_printf(dev, "can't load DMA map\n");
		return (ENXIO);
	}
	/* OK, ready to use VC buffer */

	/* setup sysctl at first device */
	if (device_get_unit(dev) == 0) {
		sysctl_ctx_init(&bcm2835_sysctl_ctx);
		/* create node for hw.cpufreq */
		oid = SYSCTL_ADD_NODE(&bcm2835_sysctl_ctx,
		    SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, "cpufreq",
		    CTLFLAG_RD, NULL, "");

		/* Frequency (Hz) */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "arm_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_arm_freq, "IU",
		    "ARM frequency (Hz)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "core_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_core_freq, "IU",
		    "Core frequency (Hz)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "sdram_freq", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_sdram_freq, "IU",
		    "SDRAM frequency (Hz)");

		/* Turbo state */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "turbo", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_turbo, "IU",
		    "Disables dynamic clocking");

		/* Voltage (offset from 1.2V in units of 0.025V) */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_core", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
		    sysctl_bcm2835_cpufreq_voltage_core, "I",
		    "ARM/GPU core voltage"
		    "(offset from 1.2V in units of 0.025V)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram", CTLTYPE_INT | CTLFLAG_WR, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram, "I",
		    "SDRAM voltage (offset from 1.2V in units of 0.025V)");

		/* Voltage individual SDRAM */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram_c", CTLTYPE_INT | CTLFLAG_RW, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram_c, "I",
		    "SDRAM controller voltage"
		    "(offset from 1.2V in units of 0.025V)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram_i", CTLTYPE_INT | CTLFLAG_RW, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram_i, "I",
		    "SDRAM I/O voltage (offset from 1.2V in units of 0.025V)");
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "voltage_sdram_p", CTLTYPE_INT | CTLFLAG_RW, sc,
		    0, sysctl_bcm2835_cpufreq_voltage_sdram_p, "I",
		    "SDRAM phy voltage (offset from 1.2V in units of 0.025V)");

		/* Temperature */
		SYSCTL_ADD_PROC(&bcm2835_sysctl_ctx, SYSCTL_CHILDREN(oid),
		    OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, sc, 0,
		    sysctl_bcm2835_cpufreq_temperature, "I",
		    "SoC temperature (thousandths of a degree C)");
	}

	/* ARM->VC lock */
	sema_init(&vc_sema, 1, "vcsema");

	/* register callback for using mbox when interrupts are enabled */
	sc->init_hook.ich_func = bcm2835_cpufreq_init;
	sc->init_hook.ich_arg = sc;

	if (config_intrhook_establish(&sc->init_hook) != 0) {
		bus_dmamap_unload(sc->dma_tag, sc->dma_map);
		bus_dmamem_free(sc->dma_tag, sc->dma_buf, sc->dma_map);
		bus_dma_tag_destroy(sc->dma_tag);
		device_printf(dev, "config_intrhook_establish failed\n");
		return (ENOMEM);
	}

	/* this device is controlled by cpufreq(4) */
	cpufreq_register(dev);

	return (0);
}