Пример #1
0
static int
usie_driver_loaded(struct module *mod, int what, void *arg)
{
	switch (what) {
	case MOD_LOAD:
		/* register autoinstall handler */
		usie_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
		    usie_autoinst, NULL, EVENTHANDLER_PRI_ANY);
		break;
	case MOD_UNLOAD:
		EVENTHANDLER_DEREGISTER(usb_dev_configured, usie_etag);
		break;
	default:
		return (EOPNOTSUPP);
	}
	return (0);
}
Пример #2
0
int
atkbd_attach_unit(device_t dev, keyboard_t **kbd, int irq, int flags)
{
	keyboard_switch_t *sw;
	int args[2];
	int error;
	int unit;

	sw = kbd_get_switch(ATKBD_DRIVER_NAME);
	if (sw == NULL)
		return ENXIO;

	/* reset, initialize and enable the device */
	unit = device_get_unit(dev);
	args[0] = device_get_unit(device_get_parent(dev));
	args[1] = irq;
	*kbd = NULL;
	error = (*sw->probe)(unit, args, flags);
	if (error)
		return error;
	error = (*sw->init)(unit, kbd, args, flags);
	if (error)
		return error;
	(*sw->enable)(*kbd);

#ifdef KBD_INSTALL_CDEV
	/* attach a virtual keyboard cdev */
	error = kbd_attach(*kbd);
	if (error)
		return error;
#endif

	/*
	 * This is a kludge to compensate for lost keyboard interrupts.
	 * A similar code used to be in syscons. See below. XXX
	 */
	atkbd_timeout(*kbd);

	if (bootverbose)
		(*sw->diag)(*kbd, bootverbose);

	EVENTHANDLER_REGISTER(shutdown_final, atkbd_shutdown_final, *kbd,
	    SHUTDOWN_PRI_DEFAULT);

	return 0;
}
Пример #3
0
static void
mbuf_init(void *dummy)
{

	/*
	 * Configure UMA zones for Mbufs, Clusters, and Packets.
	 */
	zone_mbuf = uma_zcreate("Mbuf", MSIZE, mb_ctor_mbuf, mb_dtor_mbuf,
	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MAXBUCKET);
	zone_clust = uma_zcreate("MbufClust", MCLBYTES, mb_ctor_clust,
	    mb_dtor_clust, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
	if (nmbclusters > 0)
		uma_zone_set_max(zone_clust, nmbclusters);
	zone_pack = uma_zsecond_create("Packet", mb_ctor_pack, mb_dtor_pack,
	    mb_init_pack, mb_fini_pack, zone_mbuf);

	/* uma_prealloc() goes here */

	/*
	 * Hook event handler for low-memory situation, used to
	 * drain protocols and push data back to the caches (UMA
	 * later pushes it back to VM).
	 */
	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
	    EVENTHANDLER_PRI_FIRST);

	/*
	 * [Re]set counters and local statistics knobs.
	 * XXX Some of these should go and be replaced, but UMA stat
	 * gathering needs to be revised.
	 */
	mbstat.m_mbufs = 0;
	mbstat.m_mclusts = 0;
	mbstat.m_drain = 0;
	mbstat.m_msize = MSIZE;
	mbstat.m_mclbytes = MCLBYTES;
	mbstat.m_minclsize = MINCLSIZE;
	mbstat.m_mlen = MLEN;
	mbstat.m_mhlen = MHLEN;
	mbstat.m_numtypes = MT_NTYPES;

	mbstat.m_mcfail = mbstat.m_mpfail = 0;
	mbstat.sf_iocnt = 0;
	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
}
Пример #4
0
/*
 * The function called at load/unload.
 */
static int
fsyscall_modevent(struct module *_, int cmd, void *__)
{
	int error = 0;

	switch (cmd) {
	case MOD_LOAD :
		fsyscall_exit_tag = EVENTHANDLER_REGISTER(process_exit, process_exit, NULL, EVENTHANDLER_PRI_ANY);
		break;
	case MOD_UNLOAD :
		EVENTHANDLER_DEREGISTER(process_exit, fsyscall_exit_tag);
		break;
	default :
		error = EOPNOTSUPP;
		break;
	}
	return (error);
}
Пример #5
0
static int
pmc_isa_attach(device_t dev)
{
	struct pmc_isa_softc *sc = device_get_softc(dev);
	int error;

	error = pmc_isa_alloc_resources(dev);
	if (error) {
		device_printf(dev, "resource allocation failed\n");
		return error;
	}

	/* Power the system off using PMC */
	sc->evt = EVENTHANDLER_REGISTER(shutdown_final, pmc_poweroff, sc,
					SHUTDOWN_PRI_LAST);
	sc->flags = device_get_flags(dev);
	return 0;
}
Пример #6
0
static int
ti_wdt_attach(device_t dev)
{
	struct ti_wdt_softc *sc;
	int rid;

	sc = device_get_softc(dev);
	rid = 0;
	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (sc->sc_mem_res == NULL) {
		device_printf(dev, "could not allocate memory resource\n");
		return (ENXIO);
	}
	sc->sc_bt = rman_get_bustag(sc->sc_mem_res);
	sc->sc_bh = rman_get_bushandle(sc->sc_mem_res);
	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE);
	if (sc->sc_irq_res == NULL) {
		device_printf(dev, "could not allocate interrupt resource\n");
		ti_wdt_detach(dev);
		return (ENXIO);
	}
	if (bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE | INTR_TYPE_MISC,
		NULL, ti_wdt_intr, sc,  &sc->sc_intr) != 0) {
		device_printf(dev,
		    "unable to setup the interrupt handler\n");
		ti_wdt_detach(dev);
		return (ENXIO);
	}
	/* Reset, enable interrupts and stop the watchdog. */
	ti_wdt_reg_write(sc, TI_WDT_WDSC,
	    ti_wdt_reg_read(sc, TI_WDT_WDSC) | TI_WDSC_SR);
	while (ti_wdt_reg_read(sc, TI_WDT_WDSC) & TI_WDSC_SR)
		DELAY(10);
	ti_wdt_reg_write(sc, TI_WDT_WIRQENSET, TI_IRQ_EN_OVF | TI_IRQ_EN_DLY);
	ti_wdt_disable(sc);
	if (bootverbose)
		device_printf(dev, "revision: 0x%x\n",
		    ti_wdt_reg_read(sc, TI_WDT_WIDR));
	sc->sc_ev_tag = EVENTHANDLER_REGISTER(watchdog_list, ti_wdt_event, sc,
	    0);

	return (0);
}
Пример #7
0
static int
dcons_crom_attach(device_t dev)
{
	struct dcons_crom_softc *sc;
	int error;

	if (dcons_conf->buf == NULL)
		return (ENXIO);
        sc = (struct dcons_crom_softc *) device_get_softc(dev);
	sc->fd.fc = device_get_ivars(dev);
	sc->fd.dev = dev;
	sc->fd.post_explore = NULL;
	sc->fd.post_busreset = (void *) dcons_crom_post_busreset;

	/* map dcons buffer */
	error = bus_dma_tag_create(
		/*parent*/ sc->fd.fc->dmat,
		/*alignment*/ sizeof(u_int32_t),
		/*boundary*/ 0,
		/*lowaddr*/ BUS_SPACE_MAXADDR,
		/*highaddr*/ BUS_SPACE_MAXADDR,
		/*filter*/NULL, /*filterarg*/NULL,
		/*maxsize*/ dcons_conf->size,
		/*nsegments*/ 1,
		/*maxsegsz*/ BUS_SPACE_MAXSIZE_32BIT,
		/*flags*/ BUS_DMA_ALLOCNOW,
		/*lockfunc*/busdma_lock_mutex,
		/*lockarg*/&Giant,
		&sc->dma_tag);
	if (error != 0)
		return (error);
	error = bus_dmamap_create(sc->dma_tag, BUS_DMA_COHERENT, &sc->dma_map);
	if (error != 0)
		return (error);
	error = bus_dmamap_load(sc->dma_tag, sc->dma_map,
	    (void *)dcons_conf->buf, dcons_conf->size,
	    dmamap_cb, sc, 0);
	if (error != 0)
		return (error);
	sc->ehand = EVENTHANDLER_REGISTER(dcons_poll, dcons_crom_poll,
			 (void *)sc, 0);
	return (0);
}
Пример #8
0
static int
at91_st_attach(device_t dev)
{
	int err;

	timer_softc = device_get_softc(dev);
	err = at91_st_activate(dev);
	if (err)
		return err;

	timer_softc->sc_wet = EVENTHANDLER_REGISTER(watchdog_list,
	  at91_st_watchdog, dev, 0);

	device_printf(dev,
	  "watchdog registered, timeout intervall max. 64 sec\n");

	at91_st_initclocks(dev, timer_softc);
	return (0);
}
Пример #9
0
static int VBoxDrvFreeBSDLoad(void)
{
    g_cUsers = 0;

    /*
     * Initialize the runtime.
     */
    int rc = RTR0Init(0);
    if (RT_SUCCESS(rc))
    {
        Log(("VBoxDrvFreeBSDLoad:\n"));

        /*
         * Initialize the device extension.
         */
        rc = supdrvInitDevExt(&g_VBoxDrvFreeBSDDevExt, sizeof(SUPDRVSESSION));
        if (RT_SUCCESS(rc))
        {
            /*
             * Configure device cloning.
             */
            clone_setup(&g_pVBoxDrvFreeBSDClones);
            g_VBoxDrvFreeBSDEHTag = EVENTHANDLER_REGISTER(dev_clone, VBoxDrvFreeBSDClone, 0, 1000);
            if (g_VBoxDrvFreeBSDEHTag)
            {
                Log(("VBoxDrvFreeBSDLoad: returns successfully\n"));
                return VINF_SUCCESS;
            }

            printf("vboxdrv: EVENTHANDLER_REGISTER(dev_clone,,,) failed\n");
            clone_cleanup(&g_pVBoxDrvFreeBSDClones);
            rc = VERR_ALREADY_LOADED;
            supdrvDeleteDevExt(&g_VBoxDrvFreeBSDDevExt);
        }
        else
            printf("vboxdrv: supdrvInitDevExt failed, rc=%d\n", rc);
        RTR0Term();
    }
    else
        printf("vboxdrv: RTR0Init failed, rc=%d\n", rc);
    return rc;
}
Пример #10
0
static int
uhso_driver_loaded(struct module *mod, int what, void *arg)
{
	switch (what) {
	case MOD_LOAD:
		/* register our autoinstall handler */
		uhso_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
		    uhso_test_autoinst, NULL, EVENTHANDLER_PRI_ANY);
		/* create our unit allocator for inet devs */
		uhso_ifnet_unit = new_unrhdr(0, INT_MAX, NULL);
		break;
	case MOD_UNLOAD:
		EVENTHANDLER_DEREGISTER(usb_dev_configured, uhso_etag);
		delete_unrhdr(uhso_ifnet_unit);
		break;
	default:
		return (EOPNOTSUPP);
	}
	return (0);
}
Пример #11
0
void
udp_init(void)
{

	/*
	 * For now default to 2-tuple UDP hashing - until the fragment
	 * reassembly code can also update the flowid.
	 *
	 * Once we can calculate the flowid that way and re-establish
	 * a 4-tuple, flip this to 4-tuple.
	 */
	in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
	    "udp_inpcb", udp_inpcb_init, IPI_HASHFIELDS_2TUPLE);
	V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
	uma_zone_set_max(V_udpcb_zone, maxsockets);
	uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
	EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
	    EVENTHANDLER_PRI_ANY);
}
Пример #12
0
static void *
acpi_alloc_wakeup_handler(void *wakepages[ACPI_WAKEPAGES])
{
	int		i;

	memset(wakepages, 0, ACPI_WAKEPAGES * sizeof(*wakepages));

	/*
	 * Specify the region for our wakeup code.  We want it in the low 1 MB
	 * region, excluding real mode IVT (0-0x3ff), BDA (0x400-0x4ff), EBDA
	 * (less than 128KB, below 0xa0000, must be excluded by SMAP and DSDT),
	 * and ROM area (0xa0000 and above).  The temporary page tables must be
	 * page-aligned.
	 */
	for (i = 0; i < ACPI_WAKEPAGES; i++) {
		wakepages[i] = contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT,
		    0x500, 0xa0000, PAGE_SIZE, 0ul);
		if (wakepages[i] == NULL) {
			printf("%s: can't alloc wake memory\n", __func__);
			goto freepages;
		}
	}
	if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL,
	    EVENTHANDLER_PRI_LAST) == NULL) {
		printf("%s: can't register event handler\n", __func__);
		goto freepages;
	}
	susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK);
	for (i = 0; i < mp_ncpus; i++) {
		susppcbs[i] = malloc(sizeof(**susppcbs), M_DEVBUF, M_WAITOK);
		susppcbs[i]->sp_fpususpend = alloc_fpusave(M_WAITOK);
	}

	return (wakepages);

freepages:
	for (i = 0; i < ACPI_WAKEPAGES; i++)
		if (wakepages[i] != NULL)
			contigfree(wakepages[i], PAGE_SIZE, M_DEVBUF);
	return (NULL);
}
Пример #13
0
static int
fuse_loader(struct module *m, int what, void *arg)
{
	static eventhandler_tag  eh_tag = NULL;
	int err = 0;

	switch (what) {
	case MOD_LOAD:                /* kldload */
		fuse_pbuf_freecnt = nswbuf / 2 + 1;
		clone_setup(&fuseclones);
		mtx_init(&fuse_mtx, "fuse_mtx", NULL, MTX_DEF);
		eh_tag = EVENTHANDLER_REGISTER(dev_clone, fuse_device_clone, 0,
		                               1000);
		if (eh_tag == NULL) {
			clone_cleanup(&fuseclones);
			mtx_destroy(&fuse_mtx);
			return (ENOMEM);
		}

		fuse_ipc_init();

		/* vfs_modevent ignores its first arg */
		if ((err = vfs_modevent(NULL, what, &fuse_vfsconf)))
			fuse_bringdown(eh_tag);
		else
			printf("fuse-freebsd: version %s, FUSE ABI %d.%d\n",
			    FUSE_FREEBSD_VERSION,
			    FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);

		break;
	case MOD_UNLOAD:
		if ((err = vfs_modevent(NULL, what, &fuse_vfsconf)))
			return (err);
		fuse_bringdown(eh_tag);
		break;
	default:
		return (EINVAL);
	}

	return (err);
}
Пример #14
0
/* Called from attach */
int
sms1xxx_demux_init(struct sms1xxx_softc *sc)
{
    /* Mutexes */
    mtx_init(&sc->dvr.lock, "DVB Dvr lock", NULL, MTX_DEF);
    mtx_init(&sc->filterlock, "DVB Filter lock", NULL, MTX_DEF);

    /* DVR */
    sc->dvr.size = DVRBUFSIZE;
    sc->dvr.buf = malloc(sc->dvr.size, M_USBDEV, M_WAITOK);
    if(sc->dvr.buf == NULL) {
        ERR("could not allocate dvr buf\n");
        mtx_destroy(&sc->filterlock);
        mtx_destroy(&sc->dvr.lock);
        return (ENOMEM);
    }
    sms1xxx_demux_pesbuf_reset(sc, 0, "init");
    sc->dvr.state = 0;

    /* Filters */
    int i;
    for(i = 0; i < MAX_FILTERS; ++i) {
        sms1xxx_demux_filter_reset(&sc->filter[i]);
    }

    /* Devices */
    clone_setup(&sc->demux_clones);
    sc->clonetag = EVENTHANDLER_REGISTER(dev_clone, sms1xxx_demux_clone,
                         sc, 1000);
    sc->dvr.dev = make_dev(&sms1xxx_dvr_cdevsw,
        device_get_unit(sc->sc_dev),
        UID_ROOT, GID_WHEEL, 0666,
        "dvb/adapter%d/dvr0",
        device_get_unit(sc->sc_dev));
    if (sc->dvr.dev != NULL)
        sc->dvr.dev->si_drv1 = sc;

    TRACE(TRACE_MODULE,"created dvr0 device, addr=%p\n",sc->dvr.dev);
    return (0);
}
Пример #15
0
static int
psci_v0_2_init(device_t dev)
{
    struct psci_softc *sc = device_get_softc(dev);
    int version;

    /* PSCI v0.2 specifies explicit function IDs. */
    sc->psci_fnids[PSCI_FN_VERSION]		    = PSCI_FNID_VERSION;
    sc->psci_fnids[PSCI_FN_CPU_SUSPEND]	    = PSCI_FNID_CPU_SUSPEND;
    sc->psci_fnids[PSCI_FN_CPU_OFF]		    = PSCI_FNID_CPU_OFF;
    sc->psci_fnids[PSCI_FN_CPU_ON]		    = PSCI_FNID_CPU_ON;
    sc->psci_fnids[PSCI_FN_AFFINITY_INFO]	    = PSCI_FNID_AFFINITY_INFO;
    sc->psci_fnids[PSCI_FN_MIGRATE]		    = PSCI_FNID_MIGRATE;
    sc->psci_fnids[PSCI_FN_MIGRATE_INFO_TYPE]   = PSCI_FNID_MIGRATE_INFO_TYPE;
    sc->psci_fnids[PSCI_FN_MIGRATE_INFO_UP_CPU] = PSCI_FNID_MIGRATE_INFO_UP_CPU;
    sc->psci_fnids[PSCI_FN_SYSTEM_OFF]	    = PSCI_FNID_SYSTEM_OFF;
    sc->psci_fnids[PSCI_FN_SYSTEM_RESET]	    = PSCI_FNID_SYSTEM_RESET;

    version = psci_get_version(sc);

    if (version == PSCI_RETVAL_NOT_SUPPORTED)
        return (1);

    if ((PSCI_VER_MAJOR(version) != 0) && (PSCI_VER_MINOR(version) != 2)) {
        device_printf(dev, "PSCI version number mismatched with DT\n");
        return (1);
    }

    if (bootverbose)
        device_printf(dev, "PSCI version 0.2 available\n");

    /*
     * We only register this for v0.2 since v0.1 doesn't support
     * system_reset.
     */
    EVENTHANDLER_REGISTER(shutdown_final, psci_shutdown, sc,
                          SHUTDOWN_PRI_LAST);

    return (0);
}
Пример #16
0
static void
ald_daemon(void)
{
    int needwakeup;
    struct alq *alq;

    ald_thread = FIRST_THREAD_IN_PROC(ald_proc);

    alq_eventhandler_tag = EVENTHANDLER_REGISTER(shutdown_pre_sync,
                           ald_shutdown, NULL, SHUTDOWN_PRI_FIRST);

    ALD_LOCK();

    for (;;) {
        while ((alq = BSD_LIST_FIRST(&ald_active)) == NULL &&
                !ald_shutingdown)
            mtx_sleep(&ald_active, &ald_mtx, PWAIT, "aldslp", 0);

        /* Don't shutdown until all active ALQs are flushed. */
        if (ald_shutingdown && alq == NULL) {
            ALD_UNLOCK();
            break;
        }

        ALQ_LOCK(alq);
        ald_deactivate(alq);
        ALD_UNLOCK();
        needwakeup = alq_doio(alq);
        ALQ_UNLOCK(alq);
        if (needwakeup)
            wakeup_one(alq);
        ALD_LOCK();
    }

    kproc_exit(0);
}
Пример #17
0
static int
nsmb_dev_load(module_t mod, int cmd, void *arg)
{
	int error = 0;

	switch (cmd) {
	    case MOD_LOAD:
		error = smb_sm_init();
		if (error)
			break;
		error = smb_iod_init();
		if (error) {
			smb_sm_done();
			break;
		}
		clone_setup(&nsmb_clones);
		nsmb_dev_tag = EVENTHANDLER_REGISTER(dev_clone, nsmb_dev_clone, 0, 1000);
		printf("netsmb_dev: loaded\n");
		break;
	    case MOD_UNLOAD:
		smb_iod_done();
		error = smb_sm_done();
		if (error)
			break;
		EVENTHANDLER_DEREGISTER(dev_clone, nsmb_dev_tag);
		drain_dev_clone_events();
		clone_cleanup(&nsmb_clones);
		destroy_dev_drain(&nsmb_cdevsw);
		printf("netsmb_dev: unloaded\n");
		break;
	    default:
		error = EINVAL;
		break;
	}
	return error;
}
Пример #18
0
static int
imx_wdog_attach(device_t dev)
{
	struct imx_wdog_softc *sc;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;

	if (bus_alloc_resources(dev, imx_wdog_spec, sc->sc_res)) {
		device_printf(dev, "could not allocate resources\n");
		return (ENXIO);
	}

	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "imx_wdt", MTX_DEF);

	sc->sc_dev = dev;
	sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
	sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);

	/* TODO: handle interrupt */

	EVENTHANDLER_REGISTER(watchdog_list, imx_watchdog, sc, 0);
	return (0);
}
Пример #19
0
static void *
acpi_alloc_wakeup_handler(void)
{
	void		*wakeaddr;
	int		i;

	/*
	 * Specify the region for our wakeup code.  We want it in the low 1 MB
	 * region, excluding real mode IVT (0-0x3ff), BDA (0x400-0x4ff), EBDA
	 * (less than 128KB, below 0xa0000, must be excluded by SMAP and DSDT),
	 * and ROM area (0xa0000 and above).  The temporary page tables must be
	 * page-aligned.
	 */
	wakeaddr = contigmalloc((ACPI_PAGETABLES + 1) * PAGE_SIZE, M_DEVBUF,
	    M_WAITOK, 0x500, 0xa0000, PAGE_SIZE, 0ul);
	if (wakeaddr == NULL) {
		printf("%s: can't alloc wake memory\n", __func__);
		return (NULL);
	}
	if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL,
	    EVENTHANDLER_PRI_LAST) == NULL) {
		printf("%s: can't register event handler\n", __func__);
		contigfree(wakeaddr, (ACPI_PAGETABLES + 1) * PAGE_SIZE,
		    M_DEVBUF);
		return (NULL);
	}
	susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK);
	for (i = 0; i < mp_ncpus; i++) {
		susppcbs[i] = malloc(sizeof(**susppcbs), M_DEVBUF, M_WAITOK);
#ifdef __amd64__
		susppcbs[i]->pcb_fpususpend = alloc_fpusave(M_WAITOK);
#endif
	}

	return (wakeaddr);
}
Пример #20
0
static int
pst_attach(device_t dev)
{
    struct pst_softc *psc = device_get_softc(dev);
    struct i2o_get_param_reply *reply;
    struct i2o_device_identity *ident;
    int lun = device_get_unit(dev);
    int8_t name [32];

    if (!(reply = iop_get_util_params(psc->iop, psc->lct->local_tid,
				      I2O_PARAMS_OPERATION_FIELD_GET,
				      I2O_BSA_DEVICE_INFO_GROUP_NO)))
	return ENODEV;

    if (!(psc->info = (struct i2o_bsa_device *)
	    malloc(sizeof(struct i2o_bsa_device), M_PSTRAID, M_NOWAIT))) {
	contigfree(reply, PAGE_SIZE, M_PSTIOP);
	return ENOMEM;
    }
    bcopy(reply->result, psc->info, sizeof(struct i2o_bsa_device));
    contigfree(reply, PAGE_SIZE, M_PSTIOP);

    if (!(reply = iop_get_util_params(psc->iop, psc->lct->local_tid,
				      I2O_PARAMS_OPERATION_FIELD_GET,
				      I2O_UTIL_DEVICE_IDENTITY_GROUP_NO)))
	return ENODEV;
    ident = (struct i2o_device_identity *)reply->result;
#ifdef PSTDEBUG	   
    printf("pst: vendor=<%.16s> product=<%.16s>\n",
	   ident->vendor, ident->product);
    printf("pst: description=<%.16s> revision=<%.8s>\n",
	   ident->description, ident->revision);
    printf("pst: capacity=%lld blocksize=%d\n",
	   psc->info->capacity, psc->info->block_size);
#endif
    bpack(ident->vendor, ident->vendor, 16);
    bpack(ident->product, ident->product, 16);
    sprintf(name, "%s %s", ident->vendor, ident->product);
    contigfree(reply, PAGE_SIZE, M_PSTIOP);

    bioq_init(&psc->queue);

    psc->disk = disk_alloc();
    psc->disk->d_name = "pst";
    psc->disk->d_strategy = pststrategy;
    psc->disk->d_maxsize = 64 * 1024; /*I2O_SGL_MAX_SEGS * PAGE_SIZE;*/
    psc->disk->d_drv1 = psc;
    psc->disk->d_unit = lun;

    psc->disk->d_sectorsize = psc->info->block_size;
    psc->disk->d_mediasize = psc->info->capacity;
    psc->disk->d_fwsectors = 63;
    psc->disk->d_fwheads = 255;

    disk_create(psc->disk, DISK_VERSION);

    printf("pst%d: %lluMB <%.40s> [%lld/%d/%d] on %.16s\n", lun,
	   (unsigned long long)psc->info->capacity / (1024 * 1024),
	   name, psc->info->capacity/(512*255*63), 255, 63,
	   device_get_nameunit(psc->iop->dev));

    EVENTHANDLER_REGISTER(shutdown_post_sync, pst_shutdown,
			  dev, SHUTDOWN_PRI_FIRST);
    return 0;
}
Пример #21
0
/*
 * Initialize FreeBSD Network buffer allocation.
 */
static void
mbuf_init(void *dummy)
{

	/*
	 * Configure UMA zones for Mbufs, Clusters, and Packets.
	 */
	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
	    mb_ctor_mbuf, mb_dtor_mbuf,
#ifdef INVARIANTS
	    trash_init, trash_fini,
#else
	    NULL, NULL,
#endif
	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
	if (nmbufs > 0)
		nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);

	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
	    mb_ctor_clust, mb_dtor_clust,
#ifdef INVARIANTS
	    trash_init, trash_fini,
#else
	    NULL, NULL,
#endif
	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
	if (nmbclusters > 0)
		nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);

	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);

	/* Make jumbo frame zone too. Page size, 9k and 16k. */
	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
	    mb_ctor_clust, mb_dtor_clust,
#ifdef INVARIANTS
	    trash_init, trash_fini,
#else
	    NULL, NULL,
#endif
	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
	if (nmbjumbop > 0)
		nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);

	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
	    mb_ctor_clust, mb_dtor_clust,
#ifdef INVARIANTS
	    trash_init, trash_fini,
#else
	    NULL, NULL,
#endif
	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
	if (nmbjumbo9 > 0)
		nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);

	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
	    mb_ctor_clust, mb_dtor_clust,
#ifdef INVARIANTS
	    trash_init, trash_fini,
#else
	    NULL, NULL,
#endif
	    UMA_ALIGN_PTR, UMA_ZONE_REFCNT);
	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
	if (nmbjumbo16 > 0)
		nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);

	zone_ext_refcnt = uma_zcreate(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int),
	    NULL, NULL,
	    NULL, NULL,
	    UMA_ALIGN_PTR, UMA_ZONE_ZINIT);

	/* uma_prealloc() goes here... */

	/*
	 * Hook event handler for low-memory situation, used to
	 * drain protocols and push data back to the caches (UMA
	 * later pushes it back to VM).
	 */
#ifndef __rtems__
	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
	    EVENTHANDLER_PRI_FIRST);
#endif /* __rtems__ */

	/*
	 * [Re]set counters and local statistics knobs.
	 * XXX Some of these should go and be replaced, but UMA stat
	 * gathering needs to be revised.
	 */
	mbstat.m_mbufs = 0;
	mbstat.m_mclusts = 0;
	mbstat.m_drain = 0;
	mbstat.m_msize = MSIZE;
	mbstat.m_mclbytes = MCLBYTES;
	mbstat.m_minclsize = MINCLSIZE;
	mbstat.m_mlen = MLEN;
	mbstat.m_mhlen = MHLEN;
	mbstat.m_numtypes = MT_NTYPES;

	mbstat.m_mcfail = mbstat.m_mpfail = 0;
	mbstat.sf_iocnt = 0;
	mbstat.sf_allocwait = mbstat.sf_allocfail = 0;
}
Пример #22
0
static int vgdrvFreeBSDAttach(device_t pDevice)
{
    int rc;
    int iResId;
    struct VBoxGuestDeviceState *pState;

    cUsers = 0;

    /*
     * Initialize IPRT R0 driver, which internally calls OS-specific r0 init.
     */
    rc = RTR0Init(0);
    if (RT_FAILURE(rc))
    {
        LogFunc(("RTR0Init failed.\n"));
        return ENXIO;
    }

    pState = device_get_softc(pDevice);

    /*
     * Allocate I/O port resource.
     */
    iResId                 = PCIR_BAR(0);
    pState->pIOPortRes     = bus_alloc_resource_any(pDevice, SYS_RES_IOPORT, &iResId, RF_ACTIVE);
    pState->uIOPortBase    = rman_get_start(pState->pIOPortRes);
    pState->iIOPortResId   = iResId;
    if (pState->uIOPortBase)
    {
        /*
         * Map the MMIO region.
         */
        iResId                   = PCIR_BAR(1);
        pState->pVMMDevMemRes    = bus_alloc_resource_any(pDevice, SYS_RES_MEMORY, &iResId, RF_ACTIVE);
        pState->VMMDevMemHandle  = rman_get_bushandle(pState->pVMMDevMemRes);
        pState->VMMDevMemSize    = rman_get_size(pState->pVMMDevMemRes);

        pState->pMMIOBase        = rman_get_virtual(pState->pVMMDevMemRes);
        pState->iVMMDevMemResId  = iResId;
        if (pState->pMMIOBase)
        {
            /*
             * Call the common device extension initializer.
             */
            rc = VGDrvCommonInitDevExt(&g_DevExt, pState->uIOPortBase,
                                       pState->pMMIOBase, pState->VMMDevMemSize,
#if ARCH_BITS == 64
                                       VBOXOSTYPE_FreeBSD_x64,
#else
                                       VBOXOSTYPE_FreeBSD,
#endif
                                       VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
            if (RT_SUCCESS(rc))
            {
                /*
                 * Add IRQ of VMMDev.
                 */
                rc = vgdrvFreeBSDAddIRQ(pDevice, pState);
                if (RT_SUCCESS(rc))
                {
                    /*
                     * Read host configuration.
                     */
                    VGDrvCommonProcessOptionsFromHost(&g_DevExt);

                    /*
                     * Configure device cloning.
                     */
                    clone_setup(&g_pvgdrvFreeBSDClones);
                    g_vgdrvFreeBSDEHTag = EVENTHANDLER_REGISTER(dev_clone, vgdrvFreeBSDClone, 0, 1000);
                    if (g_vgdrvFreeBSDEHTag)
                    {
                        printf(DEVICE_NAME ": loaded successfully\n");
                        return 0;
                    }

                    printf(DEVICE_NAME ": EVENTHANDLER_REGISTER(dev_clone,,,) failed\n");
                    clone_cleanup(&g_pvgdrvFreeBSDClones);
                    vgdrvFreeBSDRemoveIRQ(pDevice, pState);
                }
                else
                    printf((DEVICE_NAME ": VGDrvCommonInitDevExt failed.\n"));
                VGDrvCommonDeleteDevExt(&g_DevExt);
            }
            else
                printf((DEVICE_NAME ": vgdrvFreeBSDAddIRQ failed.\n"));
        }
        else
            printf((DEVICE_NAME ": MMIO region setup failed.\n"));
    }
    else
        printf((DEVICE_NAME ": IOport setup failed.\n"));

    RTR0Term();
    return ENXIO;
}
Пример #23
0
static int
mpt_pci_attach(device_t dev)
{
	struct mpt_softc *mpt;
	int		  iqd;
	uint32_t	  data, cmd;
	int		  mpt_io_bar, mpt_mem_bar;

	mpt  = (struct mpt_softc*)device_get_softc(dev);

	switch (pci_get_device(dev)) {
	case MPI_MANUFACTPAGE_DEVICEID_FC909_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC909:
	case MPI_MANUFACTPAGE_DEVICEID_FC919:
	case MPI_MANUFACTPAGE_DEVICEID_FC919_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC929:
	case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC929X:
	case MPI_MANUFACTPAGE_DEVICEID_FC929X_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC919X:
	case MPI_MANUFACTPAGE_DEVICEID_FC919X_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC949E:
	case MPI_MANUFACTPAGE_DEVICEID_FC949X:
		mpt->is_fc = 1;
		break;
	case MPI_MANUFACTPAGE_DEVID_SAS1078:
	case MPI_MANUFACTPAGE_DEVID_SAS1078DE_FB:
		mpt->is_1078 = 1;
		/* FALLTHROUGH */
	case MPI_MANUFACTPAGE_DEVID_SAS1064:
	case MPI_MANUFACTPAGE_DEVID_SAS1064A:
	case MPI_MANUFACTPAGE_DEVID_SAS1064E:
	case MPI_MANUFACTPAGE_DEVID_SAS1066:
	case MPI_MANUFACTPAGE_DEVID_SAS1066E:
	case MPI_MANUFACTPAGE_DEVID_SAS1068:
	case MPI_MANUFACTPAGE_DEVID_SAS1068A_FB:
	case MPI_MANUFACTPAGE_DEVID_SAS1068E:
	case MPI_MANUFACTPAGE_DEVID_SAS1068E_FB:
		mpt->is_sas = 1;
		break;
	default:
		mpt->is_spi = 1;
		break;
	}
	mpt->dev = dev;
	mpt->unit = device_get_unit(dev);
	mpt->raid_resync_rate = MPT_RAID_RESYNC_RATE_DEFAULT;
	mpt->raid_mwce_setting = MPT_RAID_MWCE_DEFAULT;
	mpt->raid_queue_depth = MPT_RAID_QUEUE_DEPTH_DEFAULT;
	mpt->verbose = MPT_PRT_NONE;
	mpt->role = MPT_ROLE_NONE;
	mpt->mpt_ini_id = MPT_INI_ID_NONE;
#ifdef __sparc64__
	if (mpt->is_spi)
		mpt->mpt_ini_id = OF_getscsinitid(dev);
#endif
	mpt_set_options(mpt);
	if (mpt->verbose == MPT_PRT_NONE) {
		mpt->verbose = MPT_PRT_WARN;
		/* Print INFO level (if any) if bootverbose is set */
		mpt->verbose += (bootverbose != 0)? 1 : 0;
	}
	/* Make sure memory access decoders are enabled */
	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
	if ((cmd & PCIM_CMD_MEMEN) == 0) {
		device_printf(dev, "Memory accesses disabled");
		return (ENXIO);
	}

	/*
	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set.
	 */
	cmd |=
	    PCIM_CMD_SERRESPEN | PCIM_CMD_PERRESPEN |
	    PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN;
	pci_write_config(dev, PCIR_COMMAND, cmd, 2);

	/*
	 * Make sure we've disabled the ROM.
	 */
	data = pci_read_config(dev, PCIR_BIOS, 4);
	data &= ~PCIM_BIOS_ENABLE;
	pci_write_config(dev, PCIR_BIOS, data, 4);

	/*
	 * Is this part a dual?
	 * If so, link with our partner (around yet)
	 */
	switch (pci_get_device(dev)) {
	case MPI_MANUFACTPAGE_DEVICEID_FC929:
	case MPI_MANUFACTPAGE_DEVICEID_FC929_LAN_FB:
	case MPI_MANUFACTPAGE_DEVICEID_FC949E:
	case MPI_MANUFACTPAGE_DEVICEID_FC949X:
	case MPI_MANUFACTPAGE_DEVID_53C1030:
	case MPI_MANUFACTPAGE_DEVID_53C1030ZC:
		mpt_link_peer(mpt);
		break;
	default:
		break;
	}

	/*
	 * Figure out which are the I/O and MEM Bars
	 */
	data = pci_read_config(dev, PCIR_BAR(0), 4);
	if (PCI_BAR_IO(data)) {
		/* BAR0 is IO, BAR1 is memory */
		mpt_io_bar = 0;
		mpt_mem_bar = 1;
	} else {
		/* BAR0 is memory, BAR1 is IO */
		mpt_mem_bar = 0;
		mpt_io_bar = 1;
	}

	/*
	 * Set up register access.  PIO mode is required for
	 * certain reset operations (but must be disabled for
	 * some cards otherwise).
	 */
	mpt_io_bar = PCIR_BAR(mpt_io_bar);
	mpt->pci_pio_reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
	    &mpt_io_bar, RF_ACTIVE);
	if (mpt->pci_pio_reg == NULL) {
		if (bootverbose) {
			device_printf(dev,
			    "unable to map registers in PIO mode\n");
		}
	} else {
		mpt->pci_pio_st = rman_get_bustag(mpt->pci_pio_reg);
		mpt->pci_pio_sh = rman_get_bushandle(mpt->pci_pio_reg);
	}

	mpt_mem_bar = PCIR_BAR(mpt_mem_bar);
	mpt->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &mpt_mem_bar, RF_ACTIVE);
	if (mpt->pci_reg == NULL) {
		if (bootverbose || mpt->is_sas || mpt->pci_pio_reg == NULL) {
			device_printf(dev,
			    "Unable to memory map registers.\n");
		}
		if (mpt->is_sas || mpt->pci_pio_reg == NULL) {
			device_printf(dev, "Giving Up.\n");
			goto bad;
		}
		if (bootverbose) {
			device_printf(dev, "Falling back to PIO mode.\n");
		}
		mpt->pci_st = mpt->pci_pio_st;
		mpt->pci_sh = mpt->pci_pio_sh;
	} else {
		mpt->pci_st = rman_get_bustag(mpt->pci_reg);
		mpt->pci_sh = rman_get_bushandle(mpt->pci_reg);
	}

	/* Get a handle to the interrupt */
	iqd = 0;
	if (mpt->msi_enable) {
		/*
		 * First try to alloc an MSI-X message.  If that
		 * fails, then try to alloc an MSI message instead.
		 */
		if (pci_msix_count(dev) == 1) {
			mpt->pci_msi_count = 1;
			if (pci_alloc_msix(dev, &mpt->pci_msi_count) == 0) {
				iqd = 1;
			} else {
				mpt->pci_msi_count = 0;
			}
		}
		if (iqd == 0 && pci_msi_count(dev) == 1) {
			mpt->pci_msi_count = 1;
			if (pci_alloc_msi(dev, &mpt->pci_msi_count) == 0) {
				iqd = 1;
			} else {
				mpt->pci_msi_count = 0;
			}
		}
	}
	mpt->pci_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd,
	    RF_ACTIVE | (mpt->pci_msi_count ? 0 : RF_SHAREABLE));
	if (mpt->pci_irq == NULL) {
		device_printf(dev, "could not allocate interrupt\n");
		goto bad;
	}

	MPT_LOCK_SETUP(mpt);

	/* Disable interrupts at the part */
	mpt_disable_ints(mpt);

	/* Register the interrupt handler */
	if (mpt_setup_intr(dev, mpt->pci_irq, MPT_IFLAGS, NULL, mpt_pci_intr,
	    mpt, &mpt->ih)) {
		device_printf(dev, "could not setup interrupt\n");
		goto bad;
	}

	/* Allocate dma memory */
	if (mpt_dma_mem_alloc(mpt)) {
		mpt_prt(mpt, "Could not allocate DMA memory\n");
		goto bad;
	}

#if 0
	/*
	 * Save the PCI config register values
 	 *
	 * Hard resets are known to screw up the BAR for diagnostic
	 * memory accesses (Mem1).
	 *
	 * Using Mem1 is known to make the chip stop responding to 
	 * configuration space transfers, so we need to save it now
	 */

	mpt_read_config_regs(mpt);
#endif

	/*
	 * Disable PIO until we need it
	 */
	if (mpt->is_sas) {
		pci_disable_io(dev, SYS_RES_IOPORT);
	}

	/* Initialize the hardware */
	if (mpt->disabled == 0) {
		if (mpt_attach(mpt) != 0) {
			goto bad;
		}
	} else {
		mpt_prt(mpt, "device disabled at user request\n");
		goto bad;
	}

	mpt->eh = EVENTHANDLER_REGISTER(shutdown_post_sync, mpt_pci_shutdown,
	    dev, SHUTDOWN_PRI_DEFAULT);

	if (mpt->eh == NULL) {
		mpt_prt(mpt, "shutdown event registration failed\n");
		(void) mpt_detach(mpt);
		goto bad;
	}
	return (0);

bad:
	mpt_dma_mem_free(mpt);
	mpt_free_bus_resources(mpt);
	mpt_unlink_peer(mpt);

	MPT_LOCK_DESTROY(mpt);

	/*
	 * but return zero to preserve unit numbering
	 */
	return (0);
}
Пример #24
0
/*
 * Initialize FreeBSD Network buffer allocation.
 */
static void
mbuf_init(void *dummy)
{

	/*
	 * Configure UMA zones for Mbufs, Clusters, and Packets.
	 */
	zone_mbuf = uma_zcreate(MBUF_MEM_NAME, MSIZE,
	    mb_ctor_mbuf, mb_dtor_mbuf,
#ifdef INVARIANTS
	    trash_init, trash_fini,
#else
	    NULL, NULL,
#endif
	    MSIZE - 1, UMA_ZONE_MAXBUCKET);
	if (nmbufs > 0)
		nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
	uma_zone_set_warning(zone_mbuf, "kern.ipc.nmbufs limit reached");
	uma_zone_set_maxaction(zone_mbuf, mb_reclaim);

	zone_clust = uma_zcreate(MBUF_CLUSTER_MEM_NAME, MCLBYTES,
	    mb_ctor_clust,
#ifdef INVARIANTS
	    trash_dtor, trash_init, trash_fini,
#else
	    NULL, NULL, NULL,
#endif
	    UMA_ALIGN_PTR, 0);
	if (nmbclusters > 0)
		nmbclusters = uma_zone_set_max(zone_clust, nmbclusters);
	uma_zone_set_warning(zone_clust, "kern.ipc.nmbclusters limit reached");
	uma_zone_set_maxaction(zone_clust, mb_reclaim);

	zone_pack = uma_zsecond_create(MBUF_PACKET_MEM_NAME, mb_ctor_pack,
	    mb_dtor_pack, mb_zinit_pack, mb_zfini_pack, zone_mbuf);

	/* Make jumbo frame zone too. Page size, 9k and 16k. */
	zone_jumbop = uma_zcreate(MBUF_JUMBOP_MEM_NAME, MJUMPAGESIZE,
	    mb_ctor_clust,
#ifdef INVARIANTS
	    trash_dtor, trash_init, trash_fini,
#else
	    NULL, NULL, NULL,
#endif
	    UMA_ALIGN_PTR, 0);
	if (nmbjumbop > 0)
		nmbjumbop = uma_zone_set_max(zone_jumbop, nmbjumbop);
	uma_zone_set_warning(zone_jumbop, "kern.ipc.nmbjumbop limit reached");
	uma_zone_set_maxaction(zone_jumbop, mb_reclaim);

	zone_jumbo9 = uma_zcreate(MBUF_JUMBO9_MEM_NAME, MJUM9BYTES,
	    mb_ctor_clust,
#ifdef INVARIANTS
	    trash_dtor, trash_init, trash_fini,
#else
	    NULL, NULL, NULL,
#endif
	    UMA_ALIGN_PTR, 0);
	uma_zone_set_allocf(zone_jumbo9, mbuf_jumbo_alloc);
	if (nmbjumbo9 > 0)
		nmbjumbo9 = uma_zone_set_max(zone_jumbo9, nmbjumbo9);
	uma_zone_set_warning(zone_jumbo9, "kern.ipc.nmbjumbo9 limit reached");
	uma_zone_set_maxaction(zone_jumbo9, mb_reclaim);

	zone_jumbo16 = uma_zcreate(MBUF_JUMBO16_MEM_NAME, MJUM16BYTES,
	    mb_ctor_clust,
#ifdef INVARIANTS
	    trash_dtor, trash_init, trash_fini,
#else
	    NULL, NULL, NULL,
#endif
	    UMA_ALIGN_PTR, 0);
	uma_zone_set_allocf(zone_jumbo16, mbuf_jumbo_alloc);
	if (nmbjumbo16 > 0)
		nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
	uma_zone_set_warning(zone_jumbo16, "kern.ipc.nmbjumbo16 limit reached");
	uma_zone_set_maxaction(zone_jumbo16, mb_reclaim);

	/*
	 * Hook event handler for low-memory situation, used to
	 * drain protocols and push data back to the caches (UMA
	 * later pushes it back to VM).
	 */
	EVENTHANDLER_REGISTER(vm_lowmem, mb_reclaim, NULL,
	    EVENTHANDLER_PRI_FIRST);
}
Пример #25
0
static int
cuda_attach(device_t dev)
{
	struct cuda_softc *sc;

	volatile int i;
	uint8_t reg;
	phandle_t node,child;
	
	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	
	sc->sc_memrid = 0;
	sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 
	    &sc->sc_memrid, RF_ACTIVE);

	if (sc->sc_memr == NULL) {
		device_printf(dev, "Could not alloc mem resource!\n");
		return (ENXIO);
	}

	sc->sc_irqrid = 0;
	sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqrid,
            	RF_ACTIVE);
        if (sc->sc_irq == NULL) {
                device_printf(dev, "could not allocate interrupt\n");
                return (ENXIO);
        }

	if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC | INTR_MPSAFE 
	    | INTR_ENTROPY, NULL, cuda_intr, dev, &sc->sc_ih) != 0) {
                device_printf(dev, "could not setup interrupt\n");
                bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqrid,
                    sc->sc_irq);
                return (ENXIO);
        }

	mtx_init(&sc->sc_mutex,"cuda",NULL,MTX_DEF | MTX_RECURSE);

	sc->sc_sent = 0;
	sc->sc_received = 0;
	sc->sc_waiting = 0;
	sc->sc_polling = 0;
	sc->sc_state = CUDA_NOTREADY;
	sc->sc_autopoll = 0;
	sc->sc_rtc = -1;

	STAILQ_INIT(&sc->sc_inq);
	STAILQ_INIT(&sc->sc_outq);
	STAILQ_INIT(&sc->sc_freeq);

	for (i = 0; i < CUDA_MAXPACKETS; i++)
		STAILQ_INSERT_TAIL(&sc->sc_freeq, &sc->sc_pkts[i], pkt_q);

	/* Init CUDA */

	reg = cuda_read_reg(sc, vDirB);
	reg |= 0x30;	/* register B bits 4 and 5: outputs */
	cuda_write_reg(sc, vDirB, reg);

	reg = cuda_read_reg(sc, vDirB);
	reg &= 0xf7;	/* register B bit 3: input */
	cuda_write_reg(sc, vDirB, reg);

	reg = cuda_read_reg(sc, vACR);
	reg &= ~vSR_OUT;	/* make sure SR is set to IN */
	cuda_write_reg(sc, vACR, reg);

	cuda_write_reg(sc, vACR, (cuda_read_reg(sc, vACR) | 0x0c) & ~0x10);

	sc->sc_state = CUDA_IDLE;	/* used by all types of hardware */

	cuda_write_reg(sc, vIER, 0x84); /* make sure VIA interrupts are on */

	cuda_idle(sc);	/* reset ADB */

	/* Reset CUDA */

	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
	cuda_write_reg(sc, vIER, 0x04); /* no interrupts while clearing */
	cuda_idle(sc);	/* reset state to idle */
	DELAY(150);
	cuda_tip(sc);	/* signal start of frame */
	DELAY(150);
	cuda_toggle_ack(sc);
	DELAY(150);
	cuda_clear_tip(sc);
	DELAY(150);
	cuda_idle(sc);	/* back to idle state */
	i = cuda_read_reg(sc, vSR);	/* clear interrupt */
	cuda_write_reg(sc, vIER, 0x84);	/* ints ok now */

	/* Initialize child buses (ADB) */
	node = ofw_bus_get_node(dev);

	for (child = OF_child(node); child != 0; child = OF_peer(child)) {
		char name[32];

		memset(name, 0, sizeof(name));
		OF_getprop(child, "name", name, sizeof(name));

		if (bootverbose)
			device_printf(dev, "CUDA child <%s>\n",name);

		if (strncmp(name, "adb", 4) == 0) {
			sc->adb_bus = device_add_child(dev,"adb",-1);
		}
	}

	clock_register(dev, 1000);
	EVENTHANDLER_REGISTER(shutdown_final, cuda_shutdown, sc,
	    SHUTDOWN_PRI_LAST);

	return (bus_generic_attach(dev));
}
Пример #26
0
/*
 * System filesystem synchronizer daemon.
 */
static void
syncer_thread(void *_ctx)
{
	struct thread *td = curthread;
	struct syncer_ctx *ctx = _ctx;
	struct synclist *slp;
	struct vnode *vp;
	long starttime;
	int *sc_flagsp;
	int sc_flags;
	int vnodes_synced = 0;

	/*
	 * syncer0 runs till system shutdown; per-filesystem syncers are
	 * terminated on filesystem unmount
	 */
	if (ctx == &syncer_ctx0) 
		EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
				      SHUTDOWN_PRI_LAST);
	for (;;) {
		kproc_suspend_loop();

		starttime = time_second;
		lwkt_gettoken(&ctx->sc_token);

		/*
		 * Push files whose dirty time has expired.  Be careful
		 * of interrupt race on slp queue.
		 */
		slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
		ctx->syncer_delayno += 1;
		if (ctx->syncer_delayno == syncer_maxdelay)
			ctx->syncer_delayno = 0;

		while ((vp = LIST_FIRST(slp)) != NULL) {
			if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
				VOP_FSYNC(vp, MNT_LAZY, 0);
				vput(vp);
				vnodes_synced++;
			}

			/*
			 * vp is stale but can still be used if we can
			 * verify that it remains at the head of the list.
			 * Be careful not to try to get vp->v_token as
			 * vp can become stale if this blocks.
			 *
			 * If the vp is still at the head of the list were
			 * unable to completely flush it and move it to
			 * a later slot to give other vnodes a fair shot.
			 *
			 * Note that v_tag VT_VFS vnodes can remain on the
			 * worklist with no dirty blocks, but sync_fsync()
			 * moves it to a later slot so we will never see it
			 * here.
			 *
			 * It is possible to race a vnode with no dirty
			 * buffers being removed from the list.  If this
			 * occurs we will move the vnode in the synclist
			 * and then the other thread will remove it.  Do
			 * not try to remove it here.
			 */
			if (LIST_FIRST(slp) == vp)
				vn_syncer_add(vp, syncdelay);
		}

		sc_flags = ctx->sc_flags;

		/* Exit on unmount */
		if (sc_flags & SC_FLAG_EXIT)
			break;

		lwkt_reltoken(&ctx->sc_token);

		/*
		 * Do sync processing for each mount.
		 */
		if (ctx->sc_mp || sc_flags & SC_FLAG_BIOOPS_ALL)
			bio_ops_sync(ctx->sc_mp);

		/*
		 * The variable rushjob allows the kernel to speed up the
		 * processing of the filesystem syncer process. A rushjob
		 * value of N tells the filesystem syncer to process the next
		 * N seconds worth of work on its queue ASAP. Currently rushjob
		 * is used by the soft update code to speed up the filesystem
		 * syncer process when the incore state is getting so far
		 * ahead of the disk that the kernel memory pool is being
		 * threatened with exhaustion.
		 */
		if (ctx == &syncer_ctx0 && rushjob > 0) {
			atomic_subtract_int(&rushjob, 1);
			continue;
		}
		/*
		 * If it has taken us less than a second to process the
		 * current work, then wait. Otherwise start right over
		 * again. We can still lose time if any single round
		 * takes more than two seconds, but it does not really
		 * matter as we are just trying to generally pace the
		 * filesystem activity.
		 */
		if (time_second == starttime)
			tsleep(ctx, 0, "syncer", hz);
	}

	/*
	 * Unmount/exit path for per-filesystem syncers; sc_token held
	 */
	ctx->sc_flags |= SC_FLAG_DONE;
	sc_flagsp = &ctx->sc_flags;
	lwkt_reltoken(&ctx->sc_token);
	wakeup(sc_flagsp);

	kthread_exit();
}
Пример #27
0
static int
acpi_tz_attach(device_t dev)
{
    struct acpi_tz_softc	*sc;
    struct acpi_softc		*acpi_sc;
    int				error;
    char			oidname[8];

    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);

    sc = device_get_softc(dev);
    sc->tz_dev = dev;
    sc->tz_handle = acpi_get_handle(dev);
    sc->tz_requested = TZ_ACTIVE_NONE;
    sc->tz_active = TZ_ACTIVE_UNKNOWN;
    sc->tz_thflags = TZ_THFLAG_NONE;
    sc->tz_cooling_proc = NULL;
    sc->tz_cooling_proc_running = FALSE;
    sc->tz_cooling_active = FALSE;
    sc->tz_cooling_updated = FALSE;
    sc->tz_cooling_enabled = FALSE;

    /*
     * Parse the current state of the thermal zone and build control
     * structures.  We don't need to worry about interference with the
     * control thread since we haven't fully attached this device yet.
     */
    if ((error = acpi_tz_establish(sc)) != 0)
	return (error);

    /*
     * Register for any Notify events sent to this zone.
     */
    AcpiInstallNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY,
			     acpi_tz_notify_handler, sc);

    /*
     * Create our sysctl nodes.
     *
     * XXX we need a mechanism for adding nodes under ACPI.
     */
    if (device_get_unit(dev) == 0) {
	acpi_sc = acpi_device_get_parent_softc(dev);
	sysctl_ctx_init(&acpi_tz_sysctl_ctx);
	acpi_tz_sysctl_tree = SYSCTL_ADD_NODE(&acpi_tz_sysctl_ctx,
			      SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
			      OID_AUTO, "thermal", CTLFLAG_RD, 0, "");
	SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
		       SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
		       OID_AUTO, "min_runtime", CTLFLAG_RW,
		       &acpi_tz_min_runtime, 0,
		       "minimum cooling run time in sec");
	SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
		       SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
		       OID_AUTO, "polling_rate", CTLFLAG_RW,
		       &acpi_tz_polling_rate, 0, "monitor polling interval in seconds");
	SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx,
		       SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO,
		       "user_override", CTLFLAG_RW, &acpi_tz_override, 0,
		       "allow override of thermal settings");
    }
    sysctl_ctx_init(&sc->tz_sysctl_ctx);
    sprintf(oidname, "tz%d", device_get_unit(dev));
    sc->tz_sysctl_tree = SYSCTL_ADD_NODE(&sc->tz_sysctl_ctx,
					 SYSCTL_CHILDREN(acpi_tz_sysctl_tree),
					 OID_AUTO, oidname, CTLFLAG_RD, 0, "");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD,
		    &sc->tz_temperature, 0, sysctl_handle_int,
		    "IK", "current thermal zone temperature");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "active", CTLTYPE_INT | CTLFLAG_RW,
		    sc, 0, acpi_tz_active_sysctl, "I", "cooling is active");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "passive_cooling", CTLTYPE_INT | CTLFLAG_RW,
		    sc, 0, acpi_tz_cooling_sysctl, "I",
		    "enable passive (speed reduction) cooling");

    SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		   OID_AUTO, "thermal_flags", CTLFLAG_RD,
		   &sc->tz_thflags, 0, "thermal zone flags");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_PSV", CTLTYPE_INT | CTLFLAG_RW,
		    sc, offsetof(struct acpi_tz_softc, tz_zone.psv),
		    acpi_tz_temp_sysctl, "IK", "passive cooling temp setpoint");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_HOT", CTLTYPE_INT | CTLFLAG_RW,
		    sc, offsetof(struct acpi_tz_softc, tz_zone.hot),
		    acpi_tz_temp_sysctl, "IK",
		    "too hot temp setpoint (suspend now)");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_CRT", CTLTYPE_INT | CTLFLAG_RW,
		    sc, offsetof(struct acpi_tz_softc, tz_zone.crt),
		    acpi_tz_temp_sysctl, "IK",
		    "critical temp setpoint (shutdown now)");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_ACx", CTLTYPE_INT | CTLFLAG_RD,
		    &sc->tz_zone.ac, sizeof(sc->tz_zone.ac),
		    sysctl_handle_opaque, "IK", "");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_TC1", CTLTYPE_INT | CTLFLAG_RW,
		    sc, offsetof(struct acpi_tz_softc, tz_zone.tc1),
		    acpi_tz_passive_sysctl, "I",
		    "thermal constant 1 for passive cooling");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_TC2", CTLTYPE_INT | CTLFLAG_RW,
		    sc, offsetof(struct acpi_tz_softc, tz_zone.tc2),
		    acpi_tz_passive_sysctl, "I",
		    "thermal constant 2 for passive cooling");
    SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree),
		    OID_AUTO, "_TSP", CTLTYPE_INT | CTLFLAG_RW,
		    sc, offsetof(struct acpi_tz_softc, tz_zone.tsp),
		    acpi_tz_passive_sysctl, "I",
		    "thermal sampling period for passive cooling");

    /*
     * Create thread to service all of the thermal zones.  Register
     * our power profile event handler.
     */
    sc->tz_event = EVENTHANDLER_REGISTER(power_profile_change,
	acpi_tz_power_profile, sc, 0);
    if (acpi_tz_proc == NULL) {
	error = kproc_create(acpi_tz_thread, NULL, &acpi_tz_proc,
	    RFHIGHPID, 0, "acpi_thermal");
	if (error != 0) {
	    device_printf(sc->tz_dev, "could not create thread - %d", error);
	    goto out;
	}
    }

    /*
     * Create a thread to handle passive cooling for 1st zone which
     * has _PSV, _TSP, _TC1 and _TC2.  Users can enable it for other
     * zones manually for now.
     *
     * XXX We enable only one zone to avoid multiple zones conflict
     * with each other since cpufreq currently sets all CPUs to the
     * given frequency whereas it's possible for different thermal
     * zones to specify independent settings for multiple CPUs.
     */
    if (acpi_tz_cooling_unit < 0 && acpi_tz_cooling_is_available(sc))
	sc->tz_cooling_enabled = TRUE;
    if (sc->tz_cooling_enabled) {
	error = acpi_tz_cooling_thread_start(sc);
	if (error != 0) {
	    sc->tz_cooling_enabled = FALSE;
	    goto out;
	}
	acpi_tz_cooling_unit = device_get_unit(dev);
    }

    /*
     * Flag the event handler for a manual invocation by our timeout.
     * We defer it like this so that the rest of the subsystem has time
     * to come up.  Don't bother evaluating/printing the temperature at
     * this point; on many systems it'll be bogus until the EC is running.
     */
    sc->tz_flags |= TZ_FLAG_GETPROFILE;

out:
    if (error != 0) {
	EVENTHANDLER_DEREGISTER(power_profile_change, sc->tz_event);
	AcpiRemoveNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY,
	    acpi_tz_notify_handler);
	sysctl_ctx_free(&sc->tz_sysctl_ctx);
    }
    return_VALUE (error);
}
Пример #28
0
static int
linux_elf_modevent(module_t mod, int type, void *data)
{
	Elf32_Brandinfo **brandinfo;
	int error;
	struct linux_ioctl_handler **lihp;
	struct linux_device_handler **ldhp;

	error = 0;

	switch(type) {
	case MOD_LOAD:
		for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
		     ++brandinfo)
			if (elf32_insert_brand_entry(*brandinfo) < 0)
				error = EINVAL;
		if (error == 0) {
			SET_FOREACH(lihp, linux_ioctl_handler_set)
				linux_ioctl_register_handler(*lihp);
			SET_FOREACH(ldhp, linux_device_handler_set)
				linux_device_register_handler(*ldhp);
			mtx_init(&emul_lock, "emuldata lock", NULL, MTX_DEF);
			sx_init(&emul_shared_lock, "emuldata->shared lock");
			LIST_INIT(&futex_list);
			mtx_init(&futex_mtx, "ftllk", NULL, MTX_DEF);
			linux_exit_tag = EVENTHANDLER_REGISTER(process_exit,
			    linux_proc_exit, NULL, 1000);
			linux_exec_tag = EVENTHANDLER_REGISTER(process_exec,
			    linux_proc_exec, NULL, 1000);
			linux_szplatform = roundup(strlen(linux_platform) + 1,
			    sizeof(char *));
			linux_osd_jail_register();
			stclohz = (stathz ? stathz : hz);
			if (bootverbose)
				printf("Linux ELF exec handler installed\n");
		} else
			printf("cannot insert Linux ELF brand handler\n");
		break;
	case MOD_UNLOAD:
		for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
		     ++brandinfo)
			if (elf32_brand_inuse(*brandinfo))
				error = EBUSY;
		if (error == 0) {
			for (brandinfo = &linux_brandlist[0];
			     *brandinfo != NULL; ++brandinfo)
				if (elf32_remove_brand_entry(*brandinfo) < 0)
					error = EINVAL;
		}
		if (error == 0) {
			SET_FOREACH(lihp, linux_ioctl_handler_set)
				linux_ioctl_unregister_handler(*lihp);
			SET_FOREACH(ldhp, linux_device_handler_set)
				linux_device_unregister_handler(*ldhp);
			mtx_destroy(&emul_lock);
			sx_destroy(&emul_shared_lock);
			mtx_destroy(&futex_mtx);
			EVENTHANDLER_DEREGISTER(process_exit, linux_exit_tag);
			EVENTHANDLER_DEREGISTER(process_exec, linux_exec_tag);
			linux_osd_jail_deregister();
			if (bootverbose)
				printf("Linux ELF exec handler removed\n");
		} else
			printf("Could not deinstall ELF interpreter entry\n");
		break;
	default:
		return EOPNOTSUPP;
	}
	return error;
}
Пример #29
0
static int
ixl_attach(device_t dev)
{
	struct ixl_pf	*pf;
	struct i40e_hw	*hw;
	struct ixl_vsi  *vsi;
	enum i40e_status_code status;
	int             error = 0;

	INIT_DEBUGOUT("ixl_attach: begin");

	/* Allocate, clear, and link in our primary soft structure */
	pf = device_get_softc(dev);
	pf->dev = pf->osdep.dev = dev;
	hw = &pf->hw;

	/*
	** Note this assumes we have a single embedded VSI,
	** this could be enhanced later to allocate multiple
	*/
	vsi = &pf->vsi;
	vsi->dev = pf->dev;
	vsi->back = pf;

	/* Save tunable values */
	error = ixl_save_pf_tunables(pf);
	if (error)
		return (error);

	/* Core Lock Init*/
	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));

	/* Set up the timer callout */
	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);

	/* Do PCI setup - map BAR0, etc */
	if (ixl_allocate_pci_resources(pf)) {
		device_printf(dev, "Allocation of PCI resources failed\n");
		error = ENXIO;
		goto err_out;
	}

	/* Establish a clean starting point */
	i40e_clear_hw(hw);
	status = i40e_pf_reset(hw);
	if (status) {
		device_printf(dev, "PF reset failure %s\n",
		    i40e_stat_str(hw, status));
		error = EIO;
		goto err_out;
	}

	/* Initialize the shared code */
	status = i40e_init_shared_code(hw);
	if (status) {
		device_printf(dev, "Unable to initialize shared code, error %s\n",
		    i40e_stat_str(hw, status));
		error = EIO;
		goto err_out;
	}

	/* Set up the admin queue */
	hw->aq.num_arq_entries = IXL_AQ_LEN;
	hw->aq.num_asq_entries = IXL_AQ_LEN;
	hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
	hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;

	status = i40e_init_adminq(hw);
	if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
		device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
		    i40e_stat_str(hw, status));
		error = EIO;
		goto err_out;
	}
	ixl_print_nvm_version(pf);

	if (status == I40E_ERR_FIRMWARE_API_VERSION) {
		device_printf(dev, "The driver for the device stopped "
		    "because the NVM image is newer than expected.\n");
		device_printf(dev, "You must install the most recent version of "
		    "the network driver.\n");
		error = EIO;
		goto err_out;
	}

        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
	    hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) {
		device_printf(dev, "The driver for the device detected "
		    "a newer version of the NVM image than expected.\n");
		device_printf(dev, "Please install the most recent version "
		    "of the network driver.\n");
	} else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) {
		device_printf(dev, "The driver for the device detected "
		    "an older version of the NVM image than expected.\n");
		device_printf(dev, "Please update the NVM image.\n");
	}

	/* Clear PXE mode */
	i40e_clear_pxe_mode(hw);

	/* Get capabilities from the device */
	error = ixl_get_hw_capabilities(pf);
	if (error) {
		device_printf(dev, "HW capabilities failure!\n");
		goto err_get_cap;
	}

	/*
	 * Allocate interrupts and figure out number of queues to use
	 * for PF interface
	 */
	pf->msix = ixl_init_msix(pf);

	/* Set up host memory cache */
	status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
	    hw->func_caps.num_rx_qp, 0, 0);
	if (status) {
		device_printf(dev, "init_lan_hmc failed: %s\n",
		    i40e_stat_str(hw, status));
		goto err_get_cap;
	}

	status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
	if (status) {
		device_printf(dev, "configure_lan_hmc failed: %s\n",
		    i40e_stat_str(hw, status));
		goto err_mac_hmc;
	}

	/* Init queue allocation manager */
	error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
	if (error) {
		device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
		    error);
		goto err_mac_hmc;
	}
	/* reserve a contiguous allocation for the PF's VSI */
	error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
	if (error) {
		device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
		    error);
		goto err_mac_hmc;
	}
	device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
	    pf->qtag.num_allocated, pf->qtag.num_active);

	/* Disable LLDP from the firmware for certain NVM versions */
	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
	    (pf->hw.aq.fw_maj_ver < 4)) {
		i40e_aq_stop_lldp(hw, TRUE, NULL);
		pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
	}

	/* Get MAC addresses from hardware */
	i40e_get_mac_addr(hw, hw->mac.addr);
	error = i40e_validate_mac_addr(hw->mac.addr);
	if (error) {
		device_printf(dev, "validate_mac_addr failed: %d\n", error);
		goto err_mac_hmc;
	}
	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
	i40e_get_port_mac_addr(hw, hw->mac.port_addr);

	/* Query device FW LLDP status */
	ixl_get_fw_lldp_status(pf);
	/* Tell FW to apply DCB config on link up */
	if ((hw->mac.type != I40E_MAC_X722)
	    && ((pf->hw.aq.api_maj_ver > 1)
	    || (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver >= 7)))
		i40e_aq_set_dcb_parameters(hw, true, NULL);

	/* Initialize mac filter list for VSI */
	SLIST_INIT(&vsi->ftl);

	/* Set up SW VSI and allocate queue memory and rings */
	if (ixl_setup_stations(pf)) { 
		device_printf(dev, "setup stations failed!\n");
		error = ENOMEM;
		goto err_mac_hmc;
	}

	/* Setup OS network interface / ifnet */
	if (ixl_setup_interface(dev, vsi)) {
		device_printf(dev, "interface setup failed!\n");
		error = EIO;
		goto err_late;
	}

	/* Determine link state */
	if (ixl_attach_get_link_status(pf)) {
		error = EINVAL;
		goto err_late;
	}

	error = ixl_switch_config(pf);
	if (error) {
		device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
		     error);
		goto err_late;
	}

	/* Limit PHY interrupts to link, autoneg, and modules failure */
	status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
	    NULL);
        if (status) {
		device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
		    " aq_err %s\n", i40e_stat_str(hw, status),
		    i40e_aq_str(hw, hw->aq.asq_last_status));
		goto err_late;
	}

	/* Get the bus configuration and set the shared code's config */
	ixl_get_bus_info(pf);

	/*
	 * In MSI-X mode, initialize the Admin Queue interrupt,
	 * so userland tools can communicate with the adapter regardless of
	 * the ifnet interface's status.
	 */
	if (pf->msix > 1) {
		error = ixl_setup_adminq_msix(pf);
		if (error) {
			device_printf(dev, "ixl_setup_adminq_msix() error: %d\n",
			    error);
			goto err_late;
		}
		error = ixl_setup_adminq_tq(pf);
		if (error) {
			device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
			    error);
			goto err_late;
		}
		ixl_configure_intr0_msix(pf);
		ixl_enable_intr0(hw);

		error = ixl_setup_queue_msix(vsi);
		if (error)
			device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
			    error);
		error = ixl_setup_queue_tqs(vsi);
		if (error)
			device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
			    error);
	} else {
		error = ixl_setup_legacy(pf);

		error = ixl_setup_adminq_tq(pf);
		if (error) {
			device_printf(dev, "ixl_setup_adminq_tq() error: %d\n",
			    error);
			goto err_late;
		}

		error = ixl_setup_queue_tqs(vsi);
		if (error)
			device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
			    error);
	}

	if (error) {
		device_printf(dev, "interrupt setup error: %d\n", error);
	}

	/* Set initial advertised speed sysctl value */
	ixl_set_initial_advertised_speeds(pf);

	/* Initialize statistics & add sysctls */
	ixl_add_device_sysctls(pf);

	ixl_pf_reset_stats(pf);
	ixl_update_stats_counters(pf);
	ixl_add_hw_stats(pf);

	/* Register for VLAN events */
	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);

#ifdef PCI_IOV
	ixl_initialize_sriov(pf);
#endif

#ifdef DEV_NETMAP
	if (vsi->num_rx_desc == vsi->num_tx_desc) {
		vsi->queues[0].num_desc = vsi->num_rx_desc;
		ixl_netmap_attach(vsi);
	} else
		device_printf(dev,
		    "Netmap is not supported when RX and TX descriptor ring sizes differ\n");

#endif /* DEV_NETMAP */

#ifdef IXL_IW
	if (hw->func_caps.iwarp && ixl_enable_iwarp) {
		pf->iw_enabled = (pf->iw_msix > 0) ? true : false;
		if (pf->iw_enabled) {
			error = ixl_iw_pf_attach(pf);
			if (error) {
				device_printf(dev,
				    "interfacing to iwarp driver failed: %d\n",
				    error);
				goto err_late;
			} else
				device_printf(dev, "iWARP ready\n");
		} else
			device_printf(dev,
			    "iwarp disabled on this device (no msix vectors)\n");
	} else {
		pf->iw_enabled = false;
		device_printf(dev, "The device is not iWARP enabled\n");
	}
#endif

	INIT_DEBUGOUT("ixl_attach: end");
	return (0);

err_late:
	if (vsi->ifp != NULL) {
		ether_ifdetach(vsi->ifp);
		if_free(vsi->ifp);
	}
err_mac_hmc:
	i40e_shutdown_lan_hmc(hw);
err_get_cap:
	i40e_shutdown_adminq(hw);
err_out:
	ixl_free_pci_resources(pf);
	ixl_free_vsi(vsi);
	IXL_PF_LOCK_DESTROY(pf);
	return (error);
}
Пример #30
0
static void
dtrace_load(void *dummy)
{
	dtrace_provider_id_t id;

	/* Hook into the trap handler. */
	dtrace_trap_func = dtrace_trap;

	/* Hang our hook for thread switches. */
	dtrace_vtime_switch_func = dtrace_vtime_switch;

	/* Hang our hook for exceptions. */
	dtrace_invop_init();

	/*
	 * XXX This is a short term hack to avoid having to comment
	 * out lots and lots of lock/unlock calls.
	 */
	mutex_init(&mod_lock,"XXX mod_lock hack", MUTEX_DEFAULT, NULL);

	/*
	 * Initialise the mutexes without 'witness' because the dtrace
	 * code is mostly written to wait for memory. To have the
	 * witness code change a malloc() from M_WAITOK to M_NOWAIT
	 * because a lock is held would surely create a panic in a
	 * low memory situation. And that low memory situation might be
	 * the very problem we are trying to trace.
	 */
	mutex_init(&dtrace_lock,"dtrace probe state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_provider_lock,"dtrace provider state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_meta_lock,"dtrace meta-provider state", MUTEX_DEFAULT, NULL);
	mutex_init(&dtrace_errlock,"dtrace error lock", MUTEX_DEFAULT, NULL);

	mutex_enter(&dtrace_provider_lock);
	mutex_enter(&dtrace_lock);
	mutex_enter(&cpu_lock);

	ASSERT(MUTEX_HELD(&cpu_lock));

	dtrace_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);

	dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
	    sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
	    NULL, NULL, NULL, NULL, NULL, 0);

	ASSERT(MUTEX_HELD(&cpu_lock));
	dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
	    offsetof(dtrace_probe_t, dtpr_nextmod),
	    offsetof(dtrace_probe_t, dtpr_prevmod));

	dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
	    offsetof(dtrace_probe_t, dtpr_nextfunc),
	    offsetof(dtrace_probe_t, dtpr_prevfunc));

	dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
	    offsetof(dtrace_probe_t, dtpr_nextname),
	    offsetof(dtrace_probe_t, dtpr_prevname));

	if (dtrace_retain_max < 1) {
		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
		    "setting to 1", dtrace_retain_max);
		dtrace_retain_max = 1;
	}

	/*
	 * Now discover our toxic ranges.
	 */
	dtrace_toxic_ranges(dtrace_toxrange_add);

	/*
	 * Before we register ourselves as a provider to our own framework,
	 * we would like to assert that dtrace_provider is NULL -- but that's
	 * not true if we were loaded as a dependency of a DTrace provider.
	 * Once we've registered, we can assert that dtrace_provider is our
	 * pseudo provider.
	 */
	(void) dtrace_register("dtrace", &dtrace_provider_attr,
	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);

	ASSERT(dtrace_provider != NULL);
	ASSERT((dtrace_provider_id_t)dtrace_provider == id);

	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "END", 0, NULL);
	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
	    dtrace_provider, NULL, NULL, "ERROR", 1, NULL);

	mutex_exit(&cpu_lock);

	/*
	 * If DTrace helper tracing is enabled, we need to allocate the
	 * trace buffer and initialize the values.
	 */
	if (dtrace_helptrace_enabled) {
		ASSERT(dtrace_helptrace_buffer == NULL);
		dtrace_helptrace_buffer =
		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
		dtrace_helptrace_next = 0;
	}

	mutex_exit(&dtrace_lock);
	mutex_exit(&dtrace_provider_lock);

	mutex_enter(&cpu_lock);

	/* Setup the boot CPU */
	(void) dtrace_cpu_setup(CPU_CONFIG, 0);

	mutex_exit(&cpu_lock);

#if __FreeBSD_version < 800039
	/* Enable device cloning. */
	clone_setup(&dtrace_clones);

	/* Setup device cloning events. */
	eh_tag = EVENTHANDLER_REGISTER(dev_clone, dtrace_clone, 0, 1000);
#else
	dtrace_dev = make_dev(&dtrace_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
	    "dtrace/dtrace");
	helper_dev = make_dev(&helper_cdevsw, 0, UID_ROOT, GID_WHEEL, 0660,
	    "dtrace/helper");
#endif

	return;
}