Пример #1
0
int
ex_attach(device_t dev)
{
    struct ex_softc *	sc = device_get_softc(dev);
    struct ifnet *		ifp;
    struct ifmedia *	ifm;
    int			error;
    uint16_t		temp;

    ifp = sc->ifp = if_alloc(IFT_ETHER);
    if (ifp == NULL) {
        device_printf(dev, "can not if_alloc()\n");
        return (ENOSPC);
    }
    /* work out which set of irq <-> internal tables to use */
    if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) {
        sc->irq2ee = plus_irq2eemap;
        sc->ee2irq = plus_ee2irqmap;
    } else {
        sc->irq2ee = irq2eemap;
        sc->ee2irq = ee2irqmap;
    }

    sc->mem_size = CARD_RAM_SIZE;	/* XXX This should be read from the card itself. */

    /*
     * Initialize the ifnet structure.
     */
    ifp->if_softc = sc;
    if_initname(ifp, device_get_name(dev), device_get_unit(dev));
    ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
    ifp->if_start = ex_start;
    ifp->if_ioctl = ex_ioctl;
    ifp->if_init = ex_init;
    IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);

    ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts);
    mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
             MTX_DEF);
    callout_init_mtx(&sc->timer, &sc->lock, 0);

    temp = ex_eeprom_read(sc, EE_W5);
    if (temp & EE_W5_PORT_TPE)
        ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL);
    if (temp & EE_W5_PORT_BNC)
        ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL);
    if (temp & EE_W5_PORT_AUI)
        ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL);

    ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL);
    ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL);
    ifmedia_set(&sc->ifmedia, ex_get_media(sc));

    ifm = &sc->ifmedia;
    ifm->ifm_media = ifm->ifm_cur->ifm_media;
    ex_ifmedia_upd(ifp);

    /*
     * Attach the interface.
     */
    ether_ifattach(ifp, sc->enaddr);

    error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
                           NULL, ex_intr, (void *)sc, &sc->ih);
    if (error) {
        device_printf(dev, "bus_setup_intr() failed!\n");
        ether_ifdetach(ifp);
        mtx_destroy(&sc->lock);
        return (error);
    }

    return(0);
}
Пример #2
0
static int
cbb_pci_attach(device_t brdev)
{
	static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */
	struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev);
	struct sysctl_ctx_list *sctx;
	struct sysctl_oid *soid;
	int rid;
	device_t parent;
	uint32_t pribus;

	parent = device_get_parent(brdev);
	mtx_init(&sc->mtx, device_get_nameunit(brdev), "cbb", MTX_DEF);
	sc->chipset = cbb_chipset(pci_get_devid(brdev), NULL);
	sc->dev = brdev;
	sc->cbdev = NULL;
	sc->exca[0].pccarddev = NULL;
	sc->domain = pci_get_domain(brdev);
	sc->secbus = pci_read_config(brdev, PCIR_SECBUS_2, 1);
	sc->subbus = pci_read_config(brdev, PCIR_SUBBUS_2, 1);
	sc->pribus = pcib_get_bus(parent);
	SLIST_INIT(&sc->rl);
	cbb_powerstate_d0(brdev);

	rid = CBBR_SOCKBASE;
	sc->base_res = bus_alloc_resource_any(brdev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (!sc->base_res) {
		device_printf(brdev, "Could not map register memory\n");
		mtx_destroy(&sc->mtx);
		return (ENOMEM);
	} else {
		DEVPRINTF((brdev, "Found memory at %08lx\n",
		    rman_get_start(sc->base_res)));
	}

	sc->bst = rman_get_bustag(sc->base_res);
	sc->bsh = rman_get_bushandle(sc->base_res);
	exca_init(&sc->exca[0], brdev, sc->bst, sc->bsh, CBB_EXCA_OFFSET);
	sc->exca[0].flags |= EXCA_HAS_MEMREG_WIN;
	sc->exca[0].chipset = EXCA_CARDBUS;
	sc->chipinit = cbb_chipinit;
	sc->chipinit(sc);

	/*Sysctls*/
	sctx = device_get_sysctl_ctx(brdev);
	soid = device_get_sysctl_tree(brdev);
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain",
	    CTLFLAG_RD, &sc->domain, 0, "Domain number");
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus",
	    CTLFLAG_RD, &sc->pribus, 0, "Primary bus number");
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus",
	    CTLFLAG_RD, &sc->secbus, 0, "Secondary bus number");
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus",
	    CTLFLAG_RD, &sc->subbus, 0, "Subordinate bus number");
#if 0
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "memory",
	    CTLFLAG_RD, &sc->subbus, 0, "Memory window open");
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "premem",
	    CTLFLAG_RD, &sc->subbus, 0, "Prefetch memroy window open");
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io1",
	    CTLFLAG_RD, &sc->subbus, 0, "io range 1 open");
	SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io2",
	    CTLFLAG_RD, &sc->subbus, 0, "io range 2 open");
#endif

	/*
	 * This is a gross hack.  We should be scanning the entire pci
	 * tree, assigning bus numbers in a way such that we (1) can
	 * reserve 1 extra bus just in case and (2) all sub busses
	 * are in an appropriate range.
	 */
	DEVPRINTF((brdev, "Secondary bus is %d\n", sc->secbus));
	pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1);
	if (sc->secbus == 0 || sc->pribus != pribus) {
		if (curr_bus_number <= sc->pribus)
			curr_bus_number = sc->pribus + 1;
		if (pribus != sc->pribus) {
			DEVPRINTF((brdev, "Setting primary bus to %d\n",
			    sc->pribus));
			pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1);
		}
		sc->secbus = curr_bus_number++;
		sc->subbus = curr_bus_number++;
		DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n",
		    sc->secbus, sc->subbus));
		pci_write_config(brdev, PCIR_SECBUS_2, sc->secbus, 1);
		pci_write_config(brdev, PCIR_SUBBUS_2, sc->subbus, 1);
	}

	/* attach children */
	sc->cbdev = device_add_child(brdev, "cardbus", -1);
	if (sc->cbdev == NULL)
		DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n"));
	else if (device_probe_and_attach(sc->cbdev) != 0)
		DEVPRINTF((brdev, "WARNING: cannot attach cardbus bus!\n"));

	sc->exca[0].pccarddev = device_add_child(brdev, "pccard", -1);
	if (sc->exca[0].pccarddev == NULL)
		DEVPRINTF((brdev, "WARNING: cannot add pccard bus.\n"));
	else if (device_probe_and_attach(sc->exca[0].pccarddev) != 0)
		DEVPRINTF((brdev, "WARNING: cannot attach pccard bus.\n"));

	/* Map and establish the interrupt. */
	rid = 0;
	sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid,
	    RF_SHAREABLE | RF_ACTIVE);
	if (sc->irq_res == NULL) {
		device_printf(brdev, "Unable to map IRQ...\n");
		goto err;
	}

	if (bus_setup_intr(brdev, sc->irq_res, INTR_TYPE_AV | INTR_MPSAFE,
	    cbb_pci_filt, NULL, sc, &sc->intrhand)) {
		device_printf(brdev, "couldn't establish interrupt\n");
		goto err;
	}

	/* reset 16-bit pcmcia bus */
	exca_clrb(&sc->exca[0], EXCA_INTR, EXCA_INTR_RESET);

	/* turn off power */
	cbb_power(brdev, CARD_OFF);

	/* CSC Interrupt: Card detect interrupt on */
	cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD);

	/* reset interrupt */
	cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT));

	if (bootverbose)
		cbb_print_config(brdev);

	/* Start the thread */
	if (kproc_create(cbb_event_thread, sc, &sc->event_thread, 0, 0,
	    "%s event thread", device_get_nameunit(brdev))) {
		device_printf(brdev, "unable to create event thread.\n");
		panic("cbb_create_event_thread");
	}
	sc->sc_root_token = root_mount_hold(device_get_nameunit(sc->dev));
	return (0);
err:
	if (sc->irq_res)
		bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res);
	if (sc->base_res) {
		bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE,
		    sc->base_res);
	}
	mtx_destroy(&sc->mtx);
	return (ENOMEM);
}
Пример #3
0
/*
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
nfs_nget(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, int flags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfs_vncmp ncmp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp->fh_bytes, fhsize, FNV1_32_INIT);
	ncmp.fhsize = fhsize;
	ncmp.fh = fhp;

	error = vfs_hash_get(mntp, hash, flags,
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}
	np = uma_zalloc(nfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("nfs", mntp, &nfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	vp->v_bufobj.bo_ops = &buf_ops_nfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert happened
	 * to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NFSnode lock", NULL, MTX_DEF);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	if (fhsize > NFS_SMALLFH) {
		np->n_fhp = malloc(fhsize, M_NFSBIGFH, M_WAITOK);
	} else
		np->n_fhp = &np->n_fh;
	bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
	np->n_fhsize = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		if (np->n_fhsize > NFS_SMALLFH) {
			free((caddr_t)np->n_fhp, M_NFSBIGFH);
		}
		mtx_destroy(&np->n_mtx);
		uma_zfree(nfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, flags, 
	    td, &nvp, nfs_vncmpf, &ncmp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
Пример #4
0
void usb_free_recv_priv (_adapter *padapter, u16 ini_in_buf_sz)
{
	int i;
	struct recv_buf *precvbuf;
	struct recv_priv	*precvpriv = &padapter->recvpriv;

	precvbuf = (struct recv_buf *)precvpriv->precv_buf;

	for(i=0; i < NR_RECVBUFF ; i++)
	{
		rtw_os_recvbuf_resource_free(padapter, precvbuf);
		precvbuf++;
	}

	if(precvpriv->pallocated_recv_buf)
		rtw_mfree(precvpriv->pallocated_recv_buf, NR_RECVBUFF *sizeof(struct recv_buf) + 4);

#ifdef CONFIG_USB_INTERRUPT_IN_PIPE
#ifdef PLATFORM_LINUX
	if(precvpriv->int_in_urb)
	{
		usb_free_urb(precvpriv->int_in_urb);
	}
#endif
	if(precvpriv->int_in_buf)
		rtw_mfree(precvpriv->int_in_buf, ini_in_buf_sz);
#endif /* CONFIG_USB_INTERRUPT_IN_PIPE */

#ifdef PLATFORM_LINUX

	if (skb_queue_len(&precvpriv->rx_skb_queue)) {
		DBG_8192C(KERN_WARNING "rx_skb_queue not empty\n");
	}

	rtw_skb_queue_purge(&precvpriv->rx_skb_queue);

	if (skb_queue_len(&precvpriv->free_recv_skb_queue)) {
		DBG_8192C(KERN_WARNING "free_recv_skb_queue not empty, %d\n", skb_queue_len(&precvpriv->free_recv_skb_queue));
	}

#if !defined(CONFIG_USE_USB_BUFFER_ALLOC_RX)
	#if defined(CONFIG_PREALLOC_RECV_SKB) && defined(CONFIG_PREALLOC_RX_SKB_BUFFER)
	{
		struct sk_buff *skb;

		while ((skb = skb_dequeue(&precvpriv->free_recv_skb_queue)) != NULL)
		{
			if (rtw_free_skb_premem(skb) != 0)
				rtw_skb_free(skb);
		}
	}
	#else
	rtw_skb_queue_purge(&precvpriv->free_recv_skb_queue);
	#endif /* defined(CONFIG_PREALLOC_RX_SKB_BUFFER) && defined(CONFIG_PREALLOC_RECV_SKB) */
#endif /* !defined(CONFIG_USE_USB_BUFFER_ALLOC_RX) */

#endif /* PLATFORM_LINUX */

#ifdef PLATFORM_FREEBSD
	struct sk_buff  *pskb;
	while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue)))
	{
		rtw_skb_free(pskb);
	}

	#if !defined(CONFIG_USE_USB_BUFFER_ALLOC_RX)
	rtw_skb_queue_purge(&precvpriv->free_recv_skb_queue);
	#endif

#ifdef CONFIG_RX_INDICATE_QUEUE
	struct mbuf *m;
	for (;;) {
		IF_DEQUEUE(&precvpriv->rx_indicate_queue, m);
		if (m == NULL)
			break;
		m_freem(m);
	}
	mtx_destroy(&precvpriv->rx_indicate_queue.ifq_mtx);
#endif /* CONFIG_RX_INDICATE_QUEUE */

#endif /* PLATFORM_FREEBSD */
}
Пример #5
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	uint8_t buf[sizeof(struct dqblk64)];
	off_t base, recsize;
	struct dquot *dq, *dq1;
	struct dqhash *dqh;
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int dqvplocked, error;

#ifdef DEBUG_VFS_LOCKS
	if (vp != NULLVP)
		ASSERT_VOP_ELOCKED(vp, "dqget");
#endif

	if (vp != NULLVP && *dqp != NODQUOT) {
		return (0);
	}

	/* XXX: Disallow negative id values to prevent the
	* creation of 100GB+ quota data files.
	*/
	if ((int)id < 0)
		return (EINVAL);

	UFS_LOCK(ump);
	dqvp = ump->um_quotas[type];
	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
		*dqp = NODQUOT;
		UFS_UNLOCK(ump);
		return (EINVAL);
	}
	vref(dqvp);
	UFS_UNLOCK(ump);
	error = 0;
	dqvplocked = 0;

	/*
	 * Check the cache first.
	 */
	dqh = DQHASH(dqvp, id);
	DQH_LOCK();
	dq = dqhashfind(dqh, id, dqvp);
	if (dq != NULL) {
		DQH_UNLOCK();
hfound:		DQI_LOCK(dq);
		DQI_WAIT(dq, PINOD+1, "dqget");
		DQI_UNLOCK(dq);
		if (dq->dq_ump == NULL) {
			dqrele(vp, dq);
			dq = NODQUOT;
			error = EIO;
		}
		*dqp = dq;
		if (dqvplocked)
			vput(dqvp);
		else
			vrele(dqvp);
		return (error);
	}

	/*
	 * Quota vnode lock is before DQ_LOCK. Acquire dqvp lock there
	 * since new dq will appear on the hash chain DQ_LOCKed.
	 */
	if (vp != dqvp) {
		DQH_UNLOCK();
		vn_lock(dqvp, LK_SHARED | LK_RETRY);
		dqvplocked = 1;
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for quota vnode lock.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			DQH_UNLOCK();
			goto hfound;
		}
	}

	/*
	 * Not in cache, allocate a new one or take it from the
	 * free list.
	 */
	if (TAILQ_FIRST(&dqfreelist) == NODQUOT &&
	    numdquot < MAXQUOTAS * desiredvnodes)
		desireddquot += DQUOTINC;
	if (numdquot < desireddquot) {
		numdquot++;
		DQH_UNLOCK();
		dq1 = malloc(sizeof *dq1, M_DQUOT, M_WAITOK | M_ZERO);
		mtx_init(&dq1->dq_lock, "dqlock", NULL, MTX_DEF);
		DQH_LOCK();
		/*
		 * Recheck the cache after sleep for memory.
		 */
		dq = dqhashfind(dqh, id, dqvp);
		if (dq != NULL) {
			numdquot--;
			DQH_UNLOCK();
			mtx_destroy(&dq1->dq_lock);
			free(dq1, M_DQUOT);
			goto hfound;
		}
		dq = dq1;
	} else {
		if ((dq = TAILQ_FIRST(&dqfreelist)) == NULL) {
			DQH_UNLOCK();
			tablefull("dquot");
			*dqp = NODQUOT;
			if (dqvplocked)
				vput(dqvp);
			else
				vrele(dqvp);
			return (EUSERS);
		}
		if (dq->dq_cnt || (dq->dq_flags & DQ_MOD))
			panic("dqget: free dquot isn't %p", dq);
		TAILQ_REMOVE(&dqfreelist, dq, dq_freelist);
		if (dq->dq_ump != NULL)
			LIST_REMOVE(dq, dq_hash);
	}

	/*
	 * Dq is put into hash already locked to prevent parallel
	 * usage while it is being read from file.
	 */
	dq->dq_flags = DQ_LOCK;
	dq->dq_id = id;
	dq->dq_type = type;
	dq->dq_ump = ump;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	DQREF(dq);
	DQH_UNLOCK();

	/*
	 * Read the requested quota record from the quota file, performing
	 * any necessary conversions.
	 */
	if (ump->um_qflags[type] & QTF_64BIT) {
		recsize = sizeof(struct dqblk64);
		base = sizeof(struct dqhdr64);
	} else {
		recsize = sizeof(struct dqblk32);
		base = 0;
	}
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = buf;
	aiov.iov_len = recsize;
	auio.uio_resid = recsize;
	auio.uio_offset = base + id * recsize;
	auio.uio_segflg = UIO_SYSSPACE;
	auio.uio_rw = UIO_READ;
	auio.uio_td = (struct thread *)0;

	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
	if (auio.uio_resid == recsize && error == 0) {
		bzero(&dq->dq_dqb, sizeof(dq->dq_dqb));
	} else {
		if (ump->um_qflags[type] & QTF_64BIT)
			dqb64_dq((struct dqblk64 *)buf, dq);
		else
			dqb32_dq((struct dqblk32 *)buf, dq);
	}
	if (dqvplocked)
		vput(dqvp);
	else
		vrele(dqvp);
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		DQH_LOCK();
		dq->dq_ump = NULL;
		LIST_REMOVE(dq, dq_hash);
		DQH_UNLOCK();
		DQI_LOCK(dq);
		if (dq->dq_flags & DQ_WANT)
			wakeup(dq);
		dq->dq_flags = 0;
		DQI_UNLOCK(dq);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	DQI_LOCK(dq);
	/*
	 * Check for no limit to enforce.
	 * Initialize time values if necessary.
	 */
	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
		dq->dq_flags |= DQ_FAKE;
	if (dq->dq_id != 0) {
		if (dq->dq_btime == 0) {
			dq->dq_btime = time_second + ump->um_btime[type];
			if (dq->dq_bsoftlimit &&
			    dq->dq_curblocks >= dq->dq_bsoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
		if (dq->dq_itime == 0) {
			dq->dq_itime = time_second + ump->um_itime[type];
			if (dq->dq_isoftlimit &&
			    dq->dq_curinodes >= dq->dq_isoftlimit)
				dq->dq_flags |= DQ_MOD;
		}
	}
	DQI_WAKEUP(dq);
	DQI_UNLOCK(dq);
	*dqp = dq;
	return (0);
}
Пример #6
0
static int
tws_attach(device_t dev)
{
    struct tws_softc *sc = device_get_softc(dev);
    u_int32_t cmd, bar;
    int error=0,i;

    /* no tracing yet */
    /* Look up our softc and initialize its fields. */
    sc->tws_dev = dev;
    sc->device_id = pci_get_device(dev);
    sc->subvendor_id = pci_get_subvendor(dev);
    sc->subdevice_id = pci_get_subdevice(dev);

    /* Intialize mutexes */
    mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF);
    mtx_init( &sc->sim_lock,  "tws_sim_lock", NULL, MTX_DEF);
    mtx_init( &sc->gen_lock,  "tws_gen_lock", NULL, MTX_DEF);
    mtx_init( &sc->io_lock,  "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE);

    if ( tws_init_trace_q(sc) == FAILURE )
        printf("trace init failure\n");
    /* send init event */
    mtx_lock(&sc->gen_lock);
    tws_send_event(sc, TWS_INIT_START);
    mtx_unlock(&sc->gen_lock);


#if _BYTE_ORDER == _BIG_ENDIAN
    TWS_TRACE(sc, "BIG endian", 0, 0);
#endif
    /* sysctl context setup */
    sysctl_ctx_init(&sc->tws_clist);
    sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist,
                                   SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
                                   device_get_nameunit(dev), 
                                   CTLFLAG_RD, 0, "");
    if ( sc->tws_oidp == NULL ) {
        tws_log(sc, SYSCTL_TREE_NODE_ADD);
        goto attach_fail_1;
    }
    SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp),
                      OID_AUTO, "driver_version", CTLFLAG_RD,
                      TWS_DRIVER_VERSION_STRING, 0, "TWS driver version");

    cmd = pci_read_config(dev, PCIR_COMMAND, 2);
    if ( (cmd & PCIM_CMD_PORTEN) == 0) {
        tws_log(sc, PCI_COMMAND_READ);
        goto attach_fail_1;
    }
    /* Force the busmaster enable bit on. */
    cmd |= PCIM_CMD_BUSMASTEREN;
    pci_write_config(dev, PCIR_COMMAND, cmd, 2);

    bar = pci_read_config(dev, TWS_PCI_BAR0, 4);
    TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0);
    bar = pci_read_config(dev, TWS_PCI_BAR1, 4);
    bar = bar & ~TWS_BIT2;
    TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0);
 
    /* MFA base address is BAR2 register used for 
     * push mode. Firmware will evatualy move to 
     * pull mode during witch this needs to change
     */ 
#ifndef TWS_PULL_MODE_ENABLE
    sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4);
    sc->mfa_base = sc->mfa_base & ~TWS_BIT2;
    TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0);
#endif

    /* allocate MMIO register space */ 
    sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */
    if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
                                &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
                                == NULL) {
        tws_log(sc, ALLOC_MEMORY_RES);
        goto attach_fail_1;
    }
    sc->bus_tag = rman_get_bustag(sc->reg_res);
    sc->bus_handle = rman_get_bushandle(sc->reg_res);

#ifndef TWS_PULL_MODE_ENABLE
    /* Allocate bus space for inbound mfa */ 
    sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */
    if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
                          &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE))
                                == NULL) {
        tws_log(sc, ALLOC_MEMORY_RES);
        goto attach_fail_2;
    }
    sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res);
    sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res);
#endif

    /* Allocate and register our interrupt. */
    sc->intr_type = TWS_INTx; /* default */

    if ( tws_enable_msi )
        sc->intr_type = TWS_MSI;
    if ( tws_setup_irq(sc) == FAILURE ) {
        tws_log(sc, ALLOC_MEMORY_RES);
        goto attach_fail_3;
    }

    /*
     * Create a /dev entry for this device.  The kernel will assign us
     * a major number automatically.  We use the unit number of this
     * device as the minor number and name the character device
     * "tws<unit>".
     */
    sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev),
        UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", 
        device_get_unit(dev));
    sc->tws_cdev->si_drv1 = sc;

    if ( tws_init(sc) == FAILURE ) {
        tws_log(sc, TWS_INIT_FAILURE);
        goto attach_fail_4;
    }
    if ( tws_init_ctlr(sc) == FAILURE ) {
        tws_log(sc, TWS_CTLR_INIT_FAILURE);
        goto attach_fail_4;
    }
    if ((error = tws_cam_attach(sc))) {
        tws_log(sc, TWS_CAM_ATTACH);
        goto attach_fail_4;
    }
    /* send init complete event */
    mtx_lock(&sc->gen_lock);
    tws_send_event(sc, TWS_INIT_COMPLETE);
    mtx_unlock(&sc->gen_lock);
        
    TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id);
    return(0);

attach_fail_4:
    tws_teardown_intr(sc);
    destroy_dev(sc->tws_cdev);
attach_fail_3:
    for(i=0;i<sc->irqs;i++) {
        if ( sc->irq_res[i] ){
            if (bus_release_resource(sc->tws_dev,
                 SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
                TWS_TRACE(sc, "bus irq res", 0, 0);
        }
    }
#ifndef TWS_PULL_MODE_ENABLE
attach_fail_2: 
#endif
    if ( sc->mfa_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
            TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id);
    }
    if ( sc->reg_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
            TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id);
    }
attach_fail_1:
    mtx_destroy(&sc->q_lock);
    mtx_destroy(&sc->sim_lock);
    mtx_destroy(&sc->gen_lock);
    mtx_destroy(&sc->io_lock);
    sysctl_ctx_free(&sc->tws_clist);
    return (ENXIO);
}
Пример #7
0
static int
ahci_em_attach(device_t dev)
{
	device_t parent = device_get_parent(dev);
	struct ahci_controller *ctlr = device_get_softc(parent);
	struct ahci_enclosure *enc = device_get_softc(dev);
	struct cam_devq *devq;
	int i, c, rid, error;
	char buf[32];

	enc->dev = dev;
	enc->quirks = ctlr->quirks;
	enc->channels = ctlr->channels;
	enc->ichannels = ctlr->ichannels;
	mtx_init(&enc->mtx, "AHCI enclosure lock", NULL, MTX_DEF);
	rid = 0;
	if (!(enc->r_memc = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &rid, RF_ACTIVE)))
		return (ENXIO);
	enc->capsem = ATA_INL(enc->r_memc, 0);
	rid = 1;
	if (!(enc->r_memt = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &rid, RF_ACTIVE))) {
		error = ENXIO;
		goto err0;
	}
	if ((enc->capsem & (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) {
		rid = 2;
		if (!(enc->r_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
		    &rid, RF_ACTIVE))) {
			error = ENXIO;
			goto err0;
		}
	} else
		enc->r_memr = NULL;
	mtx_lock(&enc->mtx);
	ahci_em_reset(dev);
	rid = ATA_IRQ_RID;
	/* Create the device queue for our SIM. */
	devq = cam_simq_alloc(1);
	if (devq == NULL) {
		device_printf(dev, "Unable to allocate SIM queue\n");
		error = ENOMEM;
		goto err1;
	}
	/* Construct SIM entry */
	enc->sim = cam_sim_alloc(ahciemaction, ahciempoll, "ahciem", enc,
	    device_get_unit(dev), &enc->mtx,
	    1, 0, devq);
	if (enc->sim == NULL) {
		cam_simq_free(devq);
		device_printf(dev, "Unable to allocate SIM\n");
		error = ENOMEM;
		goto err1;
	}
	if (xpt_bus_register(enc->sim, dev, 0) != CAM_SUCCESS) {
		device_printf(dev, "unable to register xpt bus\n");
		error = ENXIO;
		goto err2;
	}
	if (xpt_create_path(&enc->path, /*periph*/NULL, cam_sim_path(enc->sim),
	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
		device_printf(dev, "Unable to create path\n");
		error = ENXIO;
		goto err3;
	}
	mtx_unlock(&enc->mtx);
	if (bootverbose) {
		device_printf(dev, "Caps:%s%s%s%s%s%s%s%s\n",
		    (enc->capsem & AHCI_EM_PM) ? " PM":"",
		    (enc->capsem & AHCI_EM_ALHD) ? " ALHD":"",
		    (enc->capsem & AHCI_EM_XMT) ? " XMT":"",
		    (enc->capsem & AHCI_EM_SMB) ? " SMB":"",
		    (enc->capsem & AHCI_EM_SGPIO) ? " SGPIO":"",
		    (enc->capsem & AHCI_EM_SES2) ? " SES-2":"",
		    (enc->capsem & AHCI_EM_SAFTE) ? " SAF-TE":"",
		    (enc->capsem & AHCI_EM_LED) ? " LED":"");
	}
	if ((enc->capsem & AHCI_EM_LED)) {
		for (c = 0; c < enc->channels; c++) {
			if ((enc->ichannels & (1 << c)) == 0)
				continue;
			for (i = 0; i < AHCI_NUM_LEDS; i++) {
				enc->leds[c * AHCI_NUM_LEDS + i].dev = dev;
				enc->leds[c * AHCI_NUM_LEDS + i].num =
				    c * AHCI_NUM_LEDS + i;
			}
			if ((enc->capsem & AHCI_EM_ALHD) == 0) {
				snprintf(buf, sizeof(buf), "%s.%d.act",
				    device_get_nameunit(parent), c);
				enc->leds[c * AHCI_NUM_LEDS + 0].led =
				    led_create(ahci_em_led,
				    &enc->leds[c * AHCI_NUM_LEDS + 0], buf);
			}
			snprintf(buf, sizeof(buf), "%s.%d.locate",
			    device_get_nameunit(parent), c);
			enc->leds[c * AHCI_NUM_LEDS + 1].led =
			    led_create(ahci_em_led,
			    &enc->leds[c * AHCI_NUM_LEDS + 1], buf);
			snprintf(buf, sizeof(buf), "%s.%d.fault",
			    device_get_nameunit(parent), c);
			enc->leds[c * AHCI_NUM_LEDS + 2].led =
			    led_create(ahci_em_led,
			    &enc->leds[c * AHCI_NUM_LEDS + 2], buf);
		}
	}
	return (0);

err3:
	xpt_bus_deregister(cam_sim_path(enc->sim));
err2:
	cam_sim_free(enc->sim, /*free_devq*/TRUE);
err1:
	mtx_unlock(&enc->mtx);
	if (enc->r_memr)
		bus_release_resource(dev, SYS_RES_MEMORY, 2, enc->r_memr);
err0:
	if (enc->r_memt)
		bus_release_resource(dev, SYS_RES_MEMORY, 1, enc->r_memt);
	bus_release_resource(dev, SYS_RES_MEMORY, 0, enc->r_memc);
	mtx_destroy(&enc->mtx);
	return (error);
}
struct pipe_context *
dd_context_create(struct dd_screen *dscreen, struct pipe_context *pipe)
{
   struct dd_context *dctx;

   if (!pipe)
      return NULL;

   dctx = CALLOC_STRUCT(dd_context);
   if (!dctx)
      goto fail;

   dctx->pipe = pipe;
   dctx->base.priv = pipe->priv; /* expose wrapped priv data */
   dctx->base.screen = &dscreen->base;
   dctx->base.stream_uploader = pipe->stream_uploader;
   dctx->base.const_uploader = pipe->const_uploader;

   dctx->base.destroy = dd_context_destroy;

   CTX_INIT(render_condition);
   CTX_INIT(create_query);
   CTX_INIT(create_batch_query);
   CTX_INIT(destroy_query);
   CTX_INIT(begin_query);
   CTX_INIT(end_query);
   CTX_INIT(get_query_result);
   CTX_INIT(set_active_query_state);
   CTX_INIT(create_blend_state);
   CTX_INIT(bind_blend_state);
   CTX_INIT(delete_blend_state);
   CTX_INIT(create_sampler_state);
   CTX_INIT(bind_sampler_states);
   CTX_INIT(delete_sampler_state);
   CTX_INIT(create_rasterizer_state);
   CTX_INIT(bind_rasterizer_state);
   CTX_INIT(delete_rasterizer_state);
   CTX_INIT(create_depth_stencil_alpha_state);
   CTX_INIT(bind_depth_stencil_alpha_state);
   CTX_INIT(delete_depth_stencil_alpha_state);
   CTX_INIT(create_fs_state);
   CTX_INIT(bind_fs_state);
   CTX_INIT(delete_fs_state);
   CTX_INIT(create_vs_state);
   CTX_INIT(bind_vs_state);
   CTX_INIT(delete_vs_state);
   CTX_INIT(create_gs_state);
   CTX_INIT(bind_gs_state);
   CTX_INIT(delete_gs_state);
   CTX_INIT(create_tcs_state);
   CTX_INIT(bind_tcs_state);
   CTX_INIT(delete_tcs_state);
   CTX_INIT(create_tes_state);
   CTX_INIT(bind_tes_state);
   CTX_INIT(delete_tes_state);
   CTX_INIT(create_compute_state);
   CTX_INIT(bind_compute_state);
   CTX_INIT(delete_compute_state);
   CTX_INIT(create_vertex_elements_state);
   CTX_INIT(bind_vertex_elements_state);
   CTX_INIT(delete_vertex_elements_state);
   CTX_INIT(set_blend_color);
   CTX_INIT(set_stencil_ref);
   CTX_INIT(set_sample_mask);
   CTX_INIT(set_min_samples);
   CTX_INIT(set_clip_state);
   CTX_INIT(set_constant_buffer);
   CTX_INIT(set_framebuffer_state);
   CTX_INIT(set_polygon_stipple);
   CTX_INIT(set_scissor_states);
   CTX_INIT(set_viewport_states);
   CTX_INIT(set_sampler_views);
   CTX_INIT(set_tess_state);
   CTX_INIT(set_shader_buffers);
   CTX_INIT(set_shader_images);
   CTX_INIT(set_vertex_buffers);
   CTX_INIT(create_stream_output_target);
   CTX_INIT(stream_output_target_destroy);
   CTX_INIT(set_stream_output_targets);
   CTX_INIT(create_sampler_view);
   CTX_INIT(sampler_view_destroy);
   CTX_INIT(create_surface);
   CTX_INIT(surface_destroy);
   CTX_INIT(transfer_map);
   CTX_INIT(transfer_flush_region);
   CTX_INIT(transfer_unmap);
   CTX_INIT(buffer_subdata);
   CTX_INIT(texture_subdata);
   CTX_INIT(texture_barrier);
   CTX_INIT(memory_barrier);
   CTX_INIT(resource_commit);
   /* create_video_codec */
   /* create_video_buffer */
   /* set_compute_resources */
   /* set_global_binding */
   CTX_INIT(get_sample_position);
   CTX_INIT(invalidate_resource);
   CTX_INIT(get_device_reset_status);
   CTX_INIT(set_device_reset_callback);
   CTX_INIT(dump_debug_state);
   CTX_INIT(emit_string_marker);
   CTX_INIT(create_texture_handle);
   CTX_INIT(delete_texture_handle);
   CTX_INIT(make_texture_handle_resident);
   CTX_INIT(create_image_handle);
   CTX_INIT(delete_image_handle);
   CTX_INIT(make_image_handle_resident);

   dd_init_draw_functions(dctx);

   u_log_context_init(&dctx->log);
   if (pipe->set_log_context)
      pipe->set_log_context(pipe, &dctx->log);

   dctx->draw_state.sample_mask = ~0;

   if (dscreen->mode == DD_DETECT_HANGS_PIPELINED) {
      dctx->fence = pipe_buffer_create(dscreen->screen, PIPE_BIND_CUSTOM,
                                            PIPE_USAGE_STAGING, 4);
      if (!dctx->fence)
         goto fail;

      dctx->mapped_fence = pipe_buffer_map(pipe, dctx->fence,
                                           PIPE_TRANSFER_READ_WRITE |
                                           PIPE_TRANSFER_PERSISTENT |
                                           PIPE_TRANSFER_COHERENT,
                                           &dctx->fence_transfer);
      if (!dctx->mapped_fence)
         goto fail;

      *dctx->mapped_fence = 0;

      (void) mtx_init(&dctx->mutex, mtx_plain);
      dctx->thread = u_thread_create(dd_thread_pipelined_hang_detect, dctx);
      if (!dctx->thread) {
         mtx_destroy(&dctx->mutex);
         goto fail;
      }
   }

   return &dctx->base;

fail:
   if (dctx) {
      if (dctx->mapped_fence)
         pipe_transfer_unmap(pipe, dctx->fence_transfer);
      pipe_resource_reference(&dctx->fence, NULL);
      FREE(dctx);
   }
   pipe->destroy(pipe);
   return NULL;
}
Пример #9
0
struct nandsim_chip *
nandsim_chip_init(struct nandsim_softc* sc, uint8_t chip_num,
    struct sim_chip *sim_chip)
{
	struct nandsim_chip *chip;
	struct onfi_params *chip_param;
	char swapfile[20];
	uint32_t size;
	int error;

	chip = malloc(sizeof(*chip), M_NANDSIM, M_WAITOK | M_ZERO);
	if (!chip)
		return (NULL);

	mtx_init(&chip->ns_lock, "nandsim lock", NULL, MTX_DEF);
	callout_init(&chip->ns_callout, 1);
	STAILQ_INIT(&chip->nandsim_events);

	chip->chip_num = chip_num;
	chip->ctrl_num = sim_chip->ctrl_num;
	chip->sc = sc;

	if (!sim_chip->is_wp)
		nandchip_set_status(chip, NAND_STATUS_WP);

	chip_param = &chip->params;

	chip->id.dev_id = sim_chip->device_id;
	chip->id.man_id = sim_chip->manufact_id;

	chip->error_ratio = sim_chip->error_ratio;
	chip->wear_level = sim_chip->wear_level;
	chip->prog_delay = sim_chip->prog_time;
	chip->erase_delay = sim_chip->erase_time;
	chip->read_delay = sim_chip->read_time;

	chip_param->t_prog = sim_chip->prog_time;
	chip_param->t_bers = sim_chip->erase_time;
	chip_param->t_r = sim_chip->read_time;
	bcopy("onfi", &chip_param->signature, 4);

	chip_param->manufacturer_id = sim_chip->manufact_id;
	strncpy(chip_param->manufacturer_name, sim_chip->manufacturer, 12);
	chip_param->manufacturer_name[11] = 0;
	strncpy(chip_param->device_model, sim_chip->device_model, 20);
	chip_param->device_model[19] = 0;

	chip_param->bytes_per_page = sim_chip->page_size;
	chip_param->spare_bytes_per_page = sim_chip->oob_size;
	chip_param->pages_per_block = sim_chip->pgs_per_blk;
	chip_param->blocks_per_lun = sim_chip->blks_per_lun;
	chip_param->luns = sim_chip->luns;

	init_chip_geom(&chip->cg, chip_param->luns, chip_param->blocks_per_lun,
	    chip_param->pages_per_block, chip_param->bytes_per_page,
	    chip_param->spare_bytes_per_page);

	chip_param->address_cycles = sim_chip->row_addr_cycles |
	    (sim_chip->col_addr_cycles << 4);
	chip_param->features = sim_chip->features;
	if (sim_chip->width == 16)
		chip_param->features |= ONFI_FEAT_16BIT;

	size = chip_param->blocks_per_lun * chip_param->luns;

	error = nandsim_blk_state_init(chip, size, sim_chip->wear_level);
	if (error) {
		mtx_destroy(&chip->ns_lock);
		free(chip, M_NANDSIM);
		return (NULL);
	}

	error = nandsim_bbm_init(chip, size, sim_chip->bad_block_map);
	if (error) {
		mtx_destroy(&chip->ns_lock);
		nandsim_blk_state_destroy(chip);
		free(chip, M_NANDSIM);
		return (NULL);
	}

	nandsim_start_handler(chip, poweron_evh);

	nand_debug(NDBG_SIM,"Create thread for chip%d [%8p]", chip->chip_num,
	    chip);
	/* Create chip thread */
	error = kproc_kthread_add(nandsim_loop, chip, &nandsim_proc,
	    &chip->nandsim_td, RFSTOPPED | RFHIGHPID,
	    0, "nandsim", "chip");
	if (error) {
		mtx_destroy(&chip->ns_lock);
		nandsim_blk_state_destroy(chip);
		free(chip, M_NANDSIM);
		return (NULL);
	}

	thread_lock(chip->nandsim_td);
	sched_class(chip->nandsim_td, PRI_REALTIME);
	sched_add(chip->nandsim_td, SRQ_BORING);
	thread_unlock(chip->nandsim_td);

	size = (chip_param->bytes_per_page +
	    chip_param->spare_bytes_per_page) *
	    chip_param->pages_per_block;

	sprintf(swapfile, "chip%d%d.swp", chip->ctrl_num, chip->chip_num);
	chip->swap = nandsim_swap_init(swapfile, chip_param->blocks_per_lun *
	    chip_param->luns, size);
	if (!chip->swap)
		nandsim_chip_destroy(chip);

	/* Wait for new thread to enter main loop */
	tsleep(chip->nandsim_td, PWAIT, "ns_chip", 1 * hz);

	return (chip);
}
Пример #10
0
static int
pcf_isa_attach(device_t dev)
{
	struct pcf_softc *sc;
	int rv = ENXIO;

	sc = DEVTOSOFTC(dev);
	mtx_init(&sc->pcf_lock, device_get_nameunit(dev), "pcf", MTX_DEF);

	/* IO port is mandatory */
	sc->res_ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
						&sc->rid_ioport, RF_ACTIVE);
	if (sc->res_ioport == 0) {
		device_printf(dev, "cannot reserve I/O port range\n");
		goto error;
	}

	sc->pcf_flags = device_get_flags(dev);

	if (!(sc->pcf_flags & IIC_POLLED)) {
		sc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->rid_irq,
						     RF_ACTIVE);
		if (sc->res_irq == 0) {
			device_printf(dev, "can't reserve irq, polled mode.\n");
			sc->pcf_flags |= IIC_POLLED;
		}
	}

	/* reset the chip */
	pcf_rst_card(dev, IIC_FASTEST, PCF_DEFAULT_ADDR, NULL);

	if (sc->res_irq) {
		rv = bus_setup_intr(dev, sc->res_irq,
				    INTR_TYPE_NET /* | INTR_ENTROPY */,
				    NULL, pcf_intr, sc, &sc->intr_cookie);
		if (rv) {
			device_printf(dev, "could not setup IRQ\n");
			goto error;
		}
	}

	if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL)
		device_printf(dev, "could not allocate iicbus instance\n");

	/* probe and attach the iicbus */
	bus_generic_attach(dev);

	return (0);

error:
	if (sc->res_irq != 0) {
		bus_release_resource(dev, SYS_RES_IRQ, sc->rid_irq,
				     sc->res_irq);
	}
	if (sc->res_ioport != 0) {
		bus_release_resource(dev, SYS_RES_IOPORT, sc->rid_ioport,
				     sc->res_ioport);
	}
	mtx_destroy(&sc->pcf_lock);
	return (rv);
}
Пример #11
0
void stack10_Destructor(Stack10 *s){
	mtx_destroy(&s->mtx_);
	cnd_destroy(&s->cnd_push_);
	cnd_destroy(&s->cnd_pop_);
}
Пример #12
0
static int
jz4780_mmc_attach(device_t dev)
{
	struct jz4780_mmc_softc *sc;
	struct sysctl_ctx_list *ctx;
	struct sysctl_oid_list *tree;
	device_t child;
	ssize_t len;
	pcell_t prop;
	phandle_t node;

	sc = device_get_softc(dev);
	sc->sc_dev = dev;
	sc->sc_req = NULL;
	if (bus_alloc_resources(dev, jz4780_mmc_res_spec, sc->sc_res) != 0) {
		device_printf(dev, "cannot allocate device resources\n");
		return (ENXIO);
	}
	sc->sc_bst = rman_get_bustag(sc->sc_res[JZ_MSC_MEMRES]);
	sc->sc_bsh = rman_get_bushandle(sc->sc_res[JZ_MSC_MEMRES]);
	if (bus_setup_intr(dev, sc->sc_res[JZ_MSC_IRQRES],
	    INTR_TYPE_MISC | INTR_MPSAFE, NULL, jz4780_mmc_intr, sc,
	    &sc->sc_intrhand)) {
		bus_release_resources(dev, jz4780_mmc_res_spec, sc->sc_res);
		device_printf(dev, "cannot setup interrupt handler\n");
		return (ENXIO);
	}
	sc->sc_timeout = 10;
	ctx = device_get_sysctl_ctx(dev);
	tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
	SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW,
	    &sc->sc_timeout, 0, "Request timeout in seconds");
	mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), "jz4780_mmc",
	    MTX_DEF);
	callout_init_mtx(&sc->sc_timeoutc, &sc->sc_mtx, 0);

	/* Reset controller. */
	if (jz4780_mmc_reset(sc) != 0) {
		device_printf(dev, "cannot reset the controller\n");
		goto fail;
	}
	if (jz4780_mmc_pio_mode == 0 && jz4780_mmc_setup_dma(sc) != 0) {
		device_printf(sc->sc_dev, "Couldn't setup DMA!\n");
		jz4780_mmc_pio_mode = 1;
	}
	if (bootverbose)
		device_printf(sc->sc_dev, "DMA status: %s\n",
		    jz4780_mmc_pio_mode ? "disabled" : "enabled");

	node = ofw_bus_get_node(dev);
	/* Determine max operating frequency */
	sc->sc_host.f_max = 24000000;
	len = OF_getencprop(node, "max-frequency", &prop, sizeof(prop));
	if (len / sizeof(prop) == 1)
		sc->sc_host.f_max = prop;
	sc->sc_host.f_min = sc->sc_host.f_max / 128;

	sc->sc_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
	sc->sc_host.caps = MMC_CAP_HSPEED;
	sc->sc_host.mode = mode_sd;
	/*
	 * Check for bus-width property, default to both 4 and 8 bit
	 * if no bus width is specified.
	 */
	len = OF_getencprop(node, "bus-width", &prop, sizeof(prop));
	if (len / sizeof(prop) != 1)
		sc->sc_host.caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
	else if (prop == 8)
		sc->sc_host.caps |= MMC_CAP_8_BIT_DATA;
	else if (prop == 4)
		sc->sc_host.caps |= MMC_CAP_4_BIT_DATA;
	/* Activate the module clock. */
	if (jz4780_mmc_enable_clock(sc) != 0) {
		device_printf(dev, "cannot activate mmc clock\n");
		goto fail;
	}

	child = device_add_child(dev, "mmc", -1);
	if (child == NULL) {
		device_printf(dev, "attaching MMC bus failed!\n");
		goto fail;
	}
	if (device_probe_and_attach(child) != 0) {
		device_printf(dev, "attaching MMC child failed!\n");
		device_delete_child(dev, child);
		goto fail;
	}

	return (0);

fail:
	callout_drain(&sc->sc_timeoutc);
	mtx_destroy(&sc->sc_mtx);
	bus_teardown_intr(dev, sc->sc_res[JZ_MSC_IRQRES], sc->sc_intrhand);
	bus_release_resources(dev, jz4780_mmc_res_spec, sc->sc_res);
	if (sc->sc_clk != NULL)
		clk_release(sc->sc_clk);
	return (ENXIO);
}
Пример #13
0
/*
 * ONLY USED FOR THE ROOT DIRECTORY. nfscl_nget() does the rest. If this
 * function is going to be used to get Regular Files, code must be added
 * to fill in the "struct nfsv4node".
 * Look up a vnode/nfsnode by file handle.
 * Callers must check for mount points!!
 * In all cases, a pointer to a
 * nfsnode structure is returned.
 */
int
ncl_nget(struct mount *mntp, u_int8_t *fhp, int fhsize, struct nfsnode **npp,
    int lkflags)
{
	struct thread *td = curthread;	/* XXX */
	struct nfsnode *np;
	struct vnode *vp;
	struct vnode *nvp;
	int error;
	u_int hash;
	struct nfsmount *nmp;
	struct nfsfh *nfhp;

	nmp = VFSTONFS(mntp);
	*npp = NULL;

	hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);

	MALLOC(nfhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
	nfhp->nfh_len = fhsize;
	error = vfs_hash_get(mntp, hash, lkflags,
	    td, &nvp, newnfs_vncmpf, nfhp);
	FREE(nfhp, M_NFSFH);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		return (0);
	}

	/*
	 * Allocate before getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if zalloc should block.
	 */
	np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);

	error = getnewvnode("newnfs", mntp, &newnfs_vnodeops, &nvp);
	if (error) {
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	vp = nvp;
	KASSERT(vp->v_bufobj.bo_bsize != 0, ("ncl_nget: bo_bsize == 0"));
	vp->v_bufobj.bo_ops = &buf_ops_newnfs;
	vp->v_data = np;
	np->n_vnode = vp;
	/* 
	 * Initialize the mutex even if the vnode is going to be a loser.
	 * This simplifies the logic in reclaim, which can then unconditionally
	 * destroy the mutex (in the case of the loser, or if hash_insert
	 * happened to return an error no special casing is needed).
	 */
	mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
	/*
	 * NFS supports recursive and shared locking.
	 */
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
	VN_LOCK_AREC(vp);
	VN_LOCK_ASHARE(vp);
	/* 
	 * Are we getting the root? If so, make sure the vnode flags
	 * are correct 
	 */
	if ((fhsize == nmp->nm_fhsize) &&
	    !bcmp(fhp, nmp->nm_fh, fhsize)) {
		if (vp->v_type == VNON)
			vp->v_type = VDIR;
		vp->v_vflag |= VV_ROOT;
	}
	
	MALLOC(np->n_fhp, struct nfsfh *, sizeof (struct nfsfh) + fhsize,
	    M_NFSFH, M_WAITOK);
	bcopy(fhp, np->n_fhp->nfh_fh, fhsize);
	np->n_fhp->nfh_len = fhsize;
	error = insmntque(vp, mntp);
	if (error != 0) {
		*npp = NULL;
		FREE((caddr_t)np->n_fhp, M_NFSFH);
		mtx_destroy(&np->n_mtx);
		uma_zfree(newnfsnode_zone, np);
		return (error);
	}
	error = vfs_hash_insert(vp, hash, lkflags, 
	    td, &nvp, newnfs_vncmpf, np->n_fhp);
	if (error)
		return (error);
	if (nvp != NULL) {
		*npp = VTONFS(nvp);
		/* vfs_hash_insert() vput()'s the losing vnode */
		return (0);
	}
	*npp = np;

	return (0);
}
Пример #14
0
void
random_deinit(void)
{
	mtx_destroy(&random_reseed_mtx);
}
Пример #15
0
static int
pcf_ebus_attach(device_t dev)
{
	struct pcf_softc *sc;
	int rv = ENXIO;
	phandle_t node;
	uint64_t own_addr;

	sc = DEVTOSOFTC(dev);
	mtx_init(&sc->pcf_lock, device_get_nameunit(dev), "pcf", MTX_DEF);

	/* get OFW node of the pcf */
	if ((node = ofw_bus_get_node(dev)) == -1) {
		device_printf(dev, "cannot get OFW node\n");
		goto error;
	}

	/* IO port is mandatory */
	sc->res_ioport = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &sc->rid_ioport, RF_ACTIVE);
	if (sc->res_ioport == 0) {
		device_printf(dev, "cannot reserve I/O port range\n");
		goto error;
	}

	sc->pcf_flags = device_get_flags(dev);

	/*
	 * XXX use poll-mode property?
	 */
	if (!(sc->pcf_flags & IIC_POLLED)) {
		sc->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
		    &sc->rid_irq, RF_ACTIVE);
		if (sc->res_irq == 0) {
			device_printf(dev, "can't reserve irq, polled mode.\n");
			sc->pcf_flags |= IIC_POLLED;
		}
	}

	/*
	 * XXX on AXmp there's probably a second IRQ which is the fan fail
	 *     interrupt genererated by the PCF8574 at 0x78.
	 */

	/* get address of the pcf */
	if (OF_getprop(node, "own-address", &own_addr, sizeof(own_addr)) ==
	    -1) {
		device_printf(dev, "cannot get own address\n");
		goto error;
	}
	if (bootverbose)
		device_printf(dev, "PCF8584 address: 0x%08llx\n", (unsigned
		    long long)own_addr);

	/* reset the chip */
	pcf_rst_card(dev, IIC_FASTEST, own_addr, NULL);

	if (sc->res_irq) {
		rv = bus_setup_intr(dev, sc->res_irq,
		    INTR_TYPE_NET /* | INTR_ENTROPY */, NULL, pcf_intr, sc,
		    &sc->intr_cookie);
		if (rv) {
			device_printf(dev, "could not setup IRQ\n");
			goto error;
		}
	}

	if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL)
		device_printf(dev, "could not allocate iicbus instance\n");

	/* probe and attach the iicbus */
	bus_generic_attach(dev);

	return (0);

error:
	if (sc->res_irq != 0) {
		bus_release_resource(dev, SYS_RES_IRQ, sc->rid_irq,
		    sc->res_irq);
	}
	if (sc->res_ioport != 0) {
		bus_release_resource(dev, SYS_RES_MEMORY, sc->rid_ioport,
		    sc->res_ioport);
	}
	mtx_destroy(&sc->pcf_lock);
	return (rv);
}
Пример #16
0
/**
 * @brief Cleanup the ring buffer.
 */
void hv_ring_buffer_cleanup(hv_vmbus_ring_buffer_info* ring_info)
{
    mtx_destroy(&ring_info->ring_lock);
}
Пример #17
0
/*
 * Common code for mount and mountroot.
 */
static int
ext2_mountfs(struct vnode *devvp, struct mount *mp)
{
	struct ext2mount *ump;
	struct buf *bp;
	struct m_ext2fs *fs;
	struct ext2fs *es;
	struct cdev *dev = devvp->v_rdev;
	struct g_consumer *cp;
	struct bufobj *bo;
	struct csum *sump;
	int error;
	int ronly;
	int i;
	u_long size;
	int32_t *lp;
	int32_t e2fs_maxcontig;

	ronly = vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0);
	/* XXX: use VOP_ACESS to check FS perms */
	g_topology_lock();
	error = g_vfs_open(devvp, &cp, "ext2fs", ronly ? 0 : 1);
	g_topology_unlock();
	VOP_UNLOCK(devvp, 0);
	if (error)
		return (error);

	/* XXX: should we check for some sectorsize or 512 instead? */
	if (((SBSIZE % cp->provider->sectorsize) != 0) ||
	    (SBSIZE < cp->provider->sectorsize)) {
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
		return (EINVAL);
	}

	bo = &devvp->v_bufobj;
	bo->bo_private = cp;
	bo->bo_ops = g_vfs_bufops;
	if (devvp->v_rdev->si_iosize_max != 0)
		mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
	if (mp->mnt_iosize_max > MAXPHYS)
		mp->mnt_iosize_max = MAXPHYS;

	bp = NULL;
	ump = NULL;
	if ((error = bread(devvp, SBLOCK, SBSIZE, NOCRED, &bp)) != 0)
		goto out;
	es = (struct ext2fs *)bp->b_data;
	if (ext2_check_sb_compat(es, dev, ronly) != 0) {
		error = EINVAL;		/* XXX needs translation */
		goto out;
	}
	if ((es->e2fs_state & E2FS_ISCLEAN) == 0 ||
	    (es->e2fs_state & E2FS_ERRORS)) {
		if (ronly || (mp->mnt_flag & MNT_FORCE)) {
			printf(
"WARNING: Filesystem was not properly dismounted\n");
		} else {
			printf(
"WARNING: R/W mount denied.  Filesystem is not clean - run fsck\n");
			error = EPERM;
			goto out;
		}
	}
	ump = malloc(sizeof(*ump), M_EXT2MNT, M_WAITOK | M_ZERO);

	/*
	 * I don't know whether this is the right strategy. Note that
	 * we dynamically allocate both an m_ext2fs and an ext2fs
	 * while Linux keeps the super block in a locked buffer.
	 */
	ump->um_e2fs = malloc(sizeof(struct m_ext2fs),
	    M_EXT2MNT, M_WAITOK | M_ZERO);
	ump->um_e2fs->e2fs = malloc(sizeof(struct ext2fs),
	    M_EXT2MNT, M_WAITOK);
	mtx_init(EXT2_MTX(ump), "EXT2FS", "EXT2FS Lock", MTX_DEF);
	bcopy(es, ump->um_e2fs->e2fs, (u_int)sizeof(struct ext2fs));
	if ((error = compute_sb_data(devvp, ump->um_e2fs->e2fs, ump->um_e2fs)))
		goto out;

	/*
	 * Calculate the maximum contiguous blocks and size of cluster summary
	 * array.  In FFS this is done by newfs; however, the superblock
	 * in ext2fs doesn't have these variables, so we can calculate
	 * them here.
	 */
	e2fs_maxcontig = MAX(1, MAXPHYS / ump->um_e2fs->e2fs_bsize);
	ump->um_e2fs->e2fs_contigsumsize = MIN(e2fs_maxcontig, EXT2_MAXCONTIG);
	if (ump->um_e2fs->e2fs_contigsumsize > 0) {
		size = ump->um_e2fs->e2fs_gcount * sizeof(int32_t);
		ump->um_e2fs->e2fs_maxcluster = malloc(size, M_EXT2MNT, M_WAITOK);
		size = ump->um_e2fs->e2fs_gcount * sizeof(struct csum);
		ump->um_e2fs->e2fs_clustersum = malloc(size, M_EXT2MNT, M_WAITOK);
		lp = ump->um_e2fs->e2fs_maxcluster;
		sump = ump->um_e2fs->e2fs_clustersum;
		for (i = 0; i < ump->um_e2fs->e2fs_gcount; i++, sump++) {
			*lp++ = ump->um_e2fs->e2fs_contigsumsize;
			sump->cs_init = 0;
			sump->cs_sum = malloc((ump->um_e2fs->e2fs_contigsumsize + 1) *
			    sizeof(int32_t), M_EXT2MNT, M_WAITOK | M_ZERO);
		}
	}

	brelse(bp);
	bp = NULL;
	fs = ump->um_e2fs;
	fs->e2fs_ronly = ronly;	/* ronly is set according to mnt_flags */

	/*
	 * If the fs is not mounted read-only, make sure the super block is
	 * always written back on a sync().
	 */
	fs->e2fs_wasvalid = fs->e2fs->e2fs_state & E2FS_ISCLEAN ? 1 : 0;
	if (ronly == 0) {
		fs->e2fs_fmod = 1;	/* mark it modified */
		fs->e2fs->e2fs_state &= ~E2FS_ISCLEAN;	/* set fs invalid */
	}
	mp->mnt_data = ump;
	mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
	mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
	mp->mnt_maxsymlinklen = EXT2_MAXSYMLINKLEN;
	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	MNT_IUNLOCK(mp);
	ump->um_mountp = mp;
	ump->um_dev = dev;
	ump->um_devvp = devvp;
	ump->um_bo = &devvp->v_bufobj;
	ump->um_cp = cp;

	/*
	 * Setting those two parameters allowed us to use
	 * ufs_bmap w/o changse!
	 */
	ump->um_nindir = EXT2_ADDR_PER_BLOCK(fs);
	ump->um_bptrtodb = fs->e2fs->e2fs_log_bsize + 1;
	ump->um_seqinc = EXT2_FRAGS_PER_BLOCK(fs);
	if (ronly == 0)
		ext2_sbupdate(ump, MNT_WAIT);
	/*
	 * Initialize filesystem stat information in mount struct.
	 */
	MNT_ILOCK(mp);
	mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
	    MNTK_USES_BCACHE;
	MNT_IUNLOCK(mp);
	return (0);
out:
	if (bp)
		brelse(bp);
	if (cp != NULL) {
		g_topology_lock();
		g_vfs_close(cp);
		g_topology_unlock();
	}
	if (ump) {
		mtx_destroy(EXT2_MTX(ump));
		free(ump->um_e2fs->e2fs_gd, M_EXT2MNT);
		free(ump->um_e2fs->e2fs_contigdirs, M_EXT2MNT);
		free(ump->um_e2fs->e2fs, M_EXT2MNT);
		free(ump->um_e2fs, M_EXT2MNT);
		free(ump, M_EXT2MNT);
		mp->mnt_data = NULL;
	}
	return (error);
}
Пример #18
0
static int
tsec_fdt_attach(device_t dev)
{
	struct tsec_softc *sc;
	phandle_t phy;
	int error = 0;

	sc = device_get_softc(dev);
	sc->dev = dev;
	sc->node = ofw_bus_get_node(dev);

	/* Get phy address from fdt */
	if (OF_getencprop(sc->node, "phy-handle", &phy, sizeof(phy)) <= 0) {
		device_printf(dev, "PHY not found in device tree");
		return (ENXIO);
	}

	phy = OF_node_from_xref(phy);
	OF_decode_addr(OF_parent(phy), 0, &sc->phy_bst, &sc->phy_bsh);
	OF_getencprop(phy, "reg", &sc->phyaddr, sizeof(sc->phyaddr));

	/* Init timer */
	callout_init(&sc->tsec_callout, 1);

	/* Init locks */
	mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "TSEC TX lock",
	    MTX_DEF);
	mtx_init(&sc->receive_lock, device_get_nameunit(dev), "TSEC RX lock",
	    MTX_DEF);
	mtx_init(&sc->ic_lock, device_get_nameunit(dev), "TSEC IC lock",
	    MTX_DEF);

	/* Allocate IO memory for TSEC registers */
	sc->sc_rrid = 0;
	sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
	    RF_ACTIVE);
	if (sc->sc_rres == NULL) {
		device_printf(dev, "could not allocate IO memory range!\n");
		goto fail1;
	}
	sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
	sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);

	/* TSEC attach */
	if (tsec_attach(sc) != 0) {
		device_printf(dev, "could not be configured\n");
		goto fail2;
	}

	/* Set up interrupts (TX/RX/ERR) */
	sc->sc_transmit_irid = TSEC_RID_TXIRQ;
	error = tsec_setup_intr(sc, &sc->sc_transmit_ires,
	    &sc->sc_transmit_ihand, &sc->sc_transmit_irid,
	    tsec_transmit_intr, "TX");
	if (error)
		goto fail2;

	sc->sc_receive_irid = TSEC_RID_RXIRQ;
	error = tsec_setup_intr(sc, &sc->sc_receive_ires,
	    &sc->sc_receive_ihand, &sc->sc_receive_irid,
	    tsec_receive_intr, "RX");
	if (error)
		goto fail3;

	sc->sc_error_irid = TSEC_RID_ERRIRQ;
	error = tsec_setup_intr(sc, &sc->sc_error_ires,
	    &sc->sc_error_ihand, &sc->sc_error_irid,
	    tsec_error_intr, "ERR");
	if (error)
		goto fail4;

	return (0);

fail4:
	tsec_release_intr(sc, sc->sc_receive_ires, sc->sc_receive_ihand,
	    sc->sc_receive_irid, "RX");
fail3:
	tsec_release_intr(sc, sc->sc_transmit_ires, sc->sc_transmit_ihand,
	    sc->sc_transmit_irid, "TX");
fail2:
	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
fail1:
	mtx_destroy(&sc->receive_lock);
	mtx_destroy(&sc->transmit_lock);
	return (ENXIO);
}
Пример #19
0
static int
tws_detach(device_t dev)
{
    struct tws_softc *sc = device_get_softc(dev);
    int i;
    u_int32_t reg;

    TWS_TRACE_DEBUG(sc, "entry", 0, 0);

    mtx_lock(&sc->gen_lock);
    tws_send_event(sc, TWS_UNINIT_START);
    mtx_unlock(&sc->gen_lock);

    /* needs to disable interrupt before detaching from cam */
    tws_turn_off_interrupts(sc);
    /* clear door bell */
    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
    reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
    TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
    sc->obfl_q_overrun = false;
    tws_init_connect(sc, 1);

    /* Teardown the state in our softc created in our attach routine. */
    /* Disconnect the interrupt handler. */
    tws_teardown_intr(sc);

    /* Release irq resource */
    for(i=0;i<sc->irqs;i++) {
        if ( sc->irq_res[i] ){
            if (bus_release_resource(sc->tws_dev,
                     SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
                TWS_TRACE(sc, "bus release irq resource", 
                                       i, sc->irq_res_id[i]);
        }
    }
    if ( sc->intr_type == TWS_MSI ) {
        pci_release_msi(sc->tws_dev);
    }

    tws_cam_detach(sc);

    /* Release memory resource */
    if ( sc->mfa_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
            TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
    }
    if ( sc->reg_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
            TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
    }

    free(sc->reqs, M_TWS);
    free(sc->sense_bufs, M_TWS);
    free(sc->scan_ccb, M_TWS);
    free(sc->aen_q.q, M_TWS);
    free(sc->trace_q.q, M_TWS);
    mtx_destroy(&sc->q_lock);
    mtx_destroy(&sc->sim_lock);
    mtx_destroy(&sc->gen_lock);
    mtx_destroy(&sc->io_lock);
    destroy_dev(sc->tws_cdev);
    sysctl_ctx_free(&sc->tws_clist);
    return (0);
}
Пример #20
0
/*
 * Common code for mount and mountroot
 */
static int
mountnfs(struct nfs_args *argp, struct mount *mp, struct sockaddr *nam,
    char *hst, struct vnode **vpp, struct ucred *cred, int nametimeo,
    int negnametimeo)
{
	struct nfsmount *nmp;
	struct nfsnode *np;
	int error;
	struct vattr attrs;

	if (mp->mnt_flag & MNT_UPDATE) {
		nmp = VFSTONFS(mp);
		printf("%s: MNT_UPDATE is no longer handled here\n", __func__);
		free(nam, M_SONAME);
		return (0);
	} else {
		nmp = uma_zalloc(nfsmount_zone, M_WAITOK);
		bzero((caddr_t)nmp, sizeof (struct nfsmount));
		TAILQ_INIT(&nmp->nm_bufq);
		mp->mnt_data = nmp;
		nmp->nm_getinfo = nfs_getnlminfo;
		nmp->nm_vinvalbuf = nfs_vinvalbuf;
	}
	vfs_getnewfsid(mp);
	nmp->nm_mountp = mp;
	mtx_init(&nmp->nm_mtx, "NFSmount lock", NULL, MTX_DEF);			

	/*
	 * V2 can only handle 32 bit filesizes.  A 4GB-1 limit may be too
	 * high, depending on whether we end up with negative offsets in
	 * the client or server somewhere.  2GB-1 may be safer.
	 *
	 * For V3, nfs_fsinfo will adjust this as necessary.  Assume maximum
	 * that we can handle until we find out otherwise.
	 */
	if ((argp->flags & NFSMNT_NFSV3) == 0)
		nmp->nm_maxfilesize = 0xffffffffLL;
	else
		nmp->nm_maxfilesize = OFF_MAX;

	nmp->nm_timeo = NFS_TIMEO;
	nmp->nm_retry = NFS_RETRANS;
	if ((argp->flags & NFSMNT_NFSV3) && argp->sotype == SOCK_STREAM) {
		nmp->nm_wsize = nmp->nm_rsize = NFS_MAXDATA;
	} else {
		nmp->nm_wsize = NFS_WSIZE;
		nmp->nm_rsize = NFS_RSIZE;
	}
	nmp->nm_wcommitsize = hibufspace / (desiredvnodes / 1000);
	nmp->nm_readdirsize = NFS_READDIRSIZE;
	nmp->nm_numgrps = NFS_MAXGRPS;
	nmp->nm_readahead = NFS_DEFRAHEAD;
	nmp->nm_deadthresh = NFS_MAXDEADTHRESH;
	nmp->nm_nametimeo = nametimeo;
	nmp->nm_negnametimeo = negnametimeo;
	nmp->nm_tprintf_delay = nfs_tprintf_delay;
	if (nmp->nm_tprintf_delay < 0)
		nmp->nm_tprintf_delay = 0;
	nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay;
	if (nmp->nm_tprintf_initial_delay < 0)
		nmp->nm_tprintf_initial_delay = 0;
	nmp->nm_fhsize = argp->fhsize;
	bcopy((caddr_t)argp->fh, (caddr_t)nmp->nm_fh, argp->fhsize);
	bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN);
	nmp->nm_nam = nam;
	/* Set up the sockets and per-host congestion */
	nmp->nm_sotype = argp->sotype;
	nmp->nm_soproto = argp->proto;
	nmp->nm_rpcops = &nfs_rpcops;

	nfs_decode_args(mp, nmp, argp, hst);

	/*
	 * For Connection based sockets (TCP,...) defer the connect until
	 * the first request, in case the server is not responding.
	 */
	if (nmp->nm_sotype == SOCK_DGRAM &&
		(error = nfs_connect(nmp)))
		goto bad;

	/*
	 * This is silly, but it has to be set so that vinifod() works.
	 * We do not want to do an nfs_statfs() here since we can get
	 * stuck on a dead server and we are holding a lock on the mount
	 * point.
	 */
	mtx_lock(&nmp->nm_mtx);
	mp->mnt_stat.f_iosize = nfs_iosize(nmp);
	mtx_unlock(&nmp->nm_mtx);
	/*
	 * A reference count is needed on the nfsnode representing the
	 * remote root.  If this object is not persistent, then backward
	 * traversals of the mount point (i.e. "..") will not work if
	 * the nfsnode gets flushed out of the cache. Ufs does not have
	 * this problem, because one can identify root inodes by their
	 * number == ROOTINO (2).
	 */
	error = nfs_nget(mp, (nfsfh_t *)nmp->nm_fh, nmp->nm_fhsize, &np, LK_EXCLUSIVE);
	if (error)
		goto bad;
	*vpp = NFSTOV(np);

	/*
	 * Get file attributes and transfer parameters for the
	 * mountpoint.  This has the side effect of filling in
	 * (*vpp)->v_type with the correct value.
	 */
	if (argp->flags & NFSMNT_NFSV3)
		nfs_fsinfo(nmp, *vpp, curthread->td_ucred, curthread);
	else
		VOP_GETATTR(*vpp, &attrs, curthread->td_ucred);

	/*
	 * Lose the lock but keep the ref.
	 */
	VOP_UNLOCK(*vpp, 0);

	return (0);
bad:
	nfs_disconnect(nmp);
	mtx_destroy(&nmp->nm_mtx);
	uma_zfree(nfsmount_zone, nmp);
	free(nam, M_SONAME);
	return (error);
}
Пример #21
0
int
iris_bo_busy(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct drm_i915_gem_busy busy = { .handle = bo->gem_handle };

   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
   if (ret == 0) {
      bo->idle = !busy.busy;
      return busy.busy;
   }
   return false;
}

int
iris_bo_madvise(struct iris_bo *bo, int state)
{
   struct drm_i915_gem_madvise madv = {
      .handle = bo->gem_handle,
      .madv = state,
      .retained = 1,
   };

   drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);

   return madv.retained;
}

/* drop the oldest entries that have been purged by the kernel */
static void
iris_bo_cache_purge_bucket(struct iris_bufmgr *bufmgr,
                          struct bo_cache_bucket *bucket)
{
   list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
      if (iris_bo_madvise(bo, I915_MADV_DONTNEED))
         break;

      list_del(&bo->head);
      bo_free(bo);
   }
}

static struct iris_bo *
bo_calloc(void)
{
   struct iris_bo *bo = calloc(1, sizeof(*bo));
   if (bo) {
      bo->hash = _mesa_hash_pointer(bo);
   }
   return bo;
}

static struct iris_bo *
bo_alloc_internal(struct iris_bufmgr *bufmgr,
                  const char *name,
                  uint64_t size,
                  enum iris_memory_zone memzone,
                  unsigned flags,
                  uint32_t tiling_mode,
                  uint32_t stride)
{
   struct iris_bo *bo;
   unsigned int page_size = getpagesize();
   int ret;
   struct bo_cache_bucket *bucket;
   bool alloc_from_cache;
   uint64_t bo_size;
   bool zeroed = false;

   if (flags & BO_ALLOC_ZEROED)
      zeroed = true;

   if ((flags & BO_ALLOC_COHERENT) && !bufmgr->has_llc) {
      bo_size = MAX2(ALIGN(size, page_size), page_size);
      bucket = NULL;
      goto skip_cache;
   }

   /* Round the allocated size up to a power of two number of pages. */
   bucket = bucket_for_size(bufmgr, size);

   /* If we don't have caching at this size, don't actually round the
    * allocation up.
    */
   if (bucket == NULL) {
      bo_size = MAX2(ALIGN(size, page_size), page_size);
   } else {
      bo_size = bucket->size;
   }

   mtx_lock(&bufmgr->lock);
   /* Get a buffer out of the cache if available */
retry:
   alloc_from_cache = false;
   if (bucket != NULL && !list_empty(&bucket->head)) {
      /* If the last BO in the cache is idle, then reuse it.  Otherwise,
       * allocate a fresh buffer to avoid stalling.
       */
      bo = LIST_ENTRY(struct iris_bo, bucket->head.next, head);
      if (!iris_bo_busy(bo)) {
         alloc_from_cache = true;
         list_del(&bo->head);
      }

      if (alloc_from_cache) {
         if (!iris_bo_madvise(bo, I915_MADV_WILLNEED)) {
            bo_free(bo);
            iris_bo_cache_purge_bucket(bufmgr, bucket);
            goto retry;
         }

         if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
            bo_free(bo);
            goto retry;
         }

         if (zeroed) {
            void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
            if (!map) {
               bo_free(bo);
               goto retry;
            }
            memset(map, 0, bo_size);
         }
      }
   }

   if (alloc_from_cache) {
      /* If the cached BO isn't in the right memory zone, free the old
       * memory and assign it a new address.
       */
      if (memzone != iris_memzone_for_address(bo->gtt_offset)) {
         vma_free(bufmgr, bo->gtt_offset, bo->size);
         bo->gtt_offset = 0ull;
      }
   } else {
skip_cache:
      bo = bo_calloc();
      if (!bo)
         goto err;

      bo->size = bo_size;
      bo->idle = true;

      struct drm_i915_gem_create create = { .size = bo_size };

      /* All new BOs we get from the kernel are zeroed, so we don't need to
       * worry about that here.
       */
      ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
      if (ret != 0) {
         free(bo);
         goto err;
      }

      bo->gem_handle = create.handle;

      bo->bufmgr = bufmgr;

      bo->tiling_mode = I915_TILING_NONE;
      bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
      bo->stride = 0;

      if (bo_set_tiling_internal(bo, tiling_mode, stride))
         goto err_free;

      /* Calling set_domain() will allocate pages for the BO outside of the
       * struct mutex lock in the kernel, which is more efficient than waiting
       * to create them during the first execbuf that uses the BO.
       */
      struct drm_i915_gem_set_domain sd = {
         .handle = bo->gem_handle,
         .read_domains = I915_GEM_DOMAIN_CPU,
         .write_domain = 0,
      };

      if (drm_ioctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0)
         goto err_free;
   }

   bo->name = name;
   p_atomic_set(&bo->refcount, 1);
   bo->reusable = bucket && bufmgr->bo_reuse;
   bo->cache_coherent = bufmgr->has_llc;
   bo->index = -1;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;

   /* By default, capture all driver-internal buffers like shader kernels,
    * surface states, dynamic states, border colors, and so on.
    */
   if (memzone < IRIS_MEMZONE_OTHER)
      bo->kflags |= EXEC_OBJECT_CAPTURE;

   if (bo->gtt_offset == 0ull) {
      bo->gtt_offset = vma_alloc(bufmgr, memzone, bo->size, 1);

      if (bo->gtt_offset == 0ull)
         goto err_free;
   }

   mtx_unlock(&bufmgr->lock);

   if ((flags & BO_ALLOC_COHERENT) && !bo->cache_coherent) {
      struct drm_i915_gem_caching arg = {
         .handle = bo->gem_handle,
         .caching = 1,
      };
      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &arg) == 0) {
         bo->cache_coherent = true;
         bo->reusable = false;
      }
   }

   DBG("bo_create: buf %d (%s) (%s memzone) %llub\n", bo->gem_handle,
       bo->name, memzone_name(memzone), (unsigned long long) size);

   return bo;

err_free:
   bo_free(bo);
err:
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

struct iris_bo *
iris_bo_alloc(struct iris_bufmgr *bufmgr,
              const char *name,
              uint64_t size,
              enum iris_memory_zone memzone)
{
   return bo_alloc_internal(bufmgr, name, size, memzone,
                            0, I915_TILING_NONE, 0);
}

struct iris_bo *
iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr, const char *name,
                    uint64_t size, enum iris_memory_zone memzone,
                    uint32_t tiling_mode, uint32_t pitch, unsigned flags)
{
   return bo_alloc_internal(bufmgr, name, size, memzone,
                            flags, tiling_mode, pitch);
}

struct iris_bo *
iris_bo_create_userptr(struct iris_bufmgr *bufmgr, const char *name,
                       void *ptr, size_t size,
                       enum iris_memory_zone memzone)
{
   struct iris_bo *bo;

   bo = bo_calloc();
   if (!bo)
      return NULL;

   struct drm_i915_gem_userptr arg = {
      .user_ptr = (uintptr_t)ptr,
      .user_size = size,
   };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_USERPTR, &arg))
      goto err_free;
   bo->gem_handle = arg.handle;

   /* Check the buffer for validity before we try and use it in a batch */
   struct drm_i915_gem_set_domain sd = {
      .handle = bo->gem_handle,
      .read_domains = I915_GEM_DOMAIN_CPU,
   };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd))
      goto err_close;

   bo->name = name;
   bo->size = size;
   bo->map_cpu = ptr;

   bo->bufmgr = bufmgr;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, memzone, size, 1);
   if (bo->gtt_offset == 0ull)
      goto err_close;

   p_atomic_set(&bo->refcount, 1);
   bo->userptr = true;
   bo->cache_coherent = true;
   bo->index = -1;
   bo->idle = true;

   return bo;

err_close:
   drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &bo->gem_handle);
err_free:
   free(bo);
   return NULL;
}

/**
 * Returns a iris_bo wrapping the given buffer object handle.
 *
 * This can be used when one application needs to pass a buffer object
 * to another.
 */
struct iris_bo *
iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
                             const char *name, unsigned int handle)
{
   struct iris_bo *bo;

   /* At the moment most applications only have a few named bo.
    * For instance, in a DRI client only the render buffers passed
    * between X and the client are named. And since X returns the
    * alternating names for the front/back buffer a linear search
    * provides a sufficiently fast match.
    */
   mtx_lock(&bufmgr->lock);
   bo = hash_find_bo(bufmgr->name_table, handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   struct drm_gem_open open_arg = { .name = handle };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
   if (ret != 0) {
      DBG("Couldn't reference %s handle 0x%08x: %s\n",
          name, handle, strerror(errno));
      bo = NULL;
      goto out;
   }
   /* Now see if someone has used a prime handle to get this
    * object from the kernel before by looking through the list
    * again for a matching gem_handle
    */
   bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   bo = bo_calloc();
   if (!bo)
      goto out;

   p_atomic_set(&bo->refcount, 1);

   bo->size = open_arg.size;
   bo->gtt_offset = 0;
   bo->bufmgr = bufmgr;
   bo->gem_handle = open_arg.handle;
   bo->name = name;
   bo->global_name = handle;
   bo->reusable = false;
   bo->external = true;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);

   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
   _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);

   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
   ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
   if (ret != 0)
      goto err_unref;

   bo->tiling_mode = get_tiling.tiling_mode;
   bo->swizzle_mode = get_tiling.swizzle_mode;
   /* XXX stride is unknown */
   DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);

out:
   mtx_unlock(&bufmgr->lock);
   return bo;

err_unref:
   bo_free(bo);
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

static void
bo_free(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (bo->map_cpu && !bo->userptr) {
      VG_NOACCESS(bo->map_cpu, bo->size);
      munmap(bo->map_cpu, bo->size);
   }
   if (bo->map_wc) {
      VG_NOACCESS(bo->map_wc, bo->size);
      munmap(bo->map_wc, bo->size);
   }
   if (bo->map_gtt) {
      VG_NOACCESS(bo->map_gtt, bo->size);
      munmap(bo->map_gtt, bo->size);
   }

   if (bo->external) {
      struct hash_entry *entry;

      if (bo->global_name) {
         entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
         _mesa_hash_table_remove(bufmgr->name_table, entry);
      }

      entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
      _mesa_hash_table_remove(bufmgr->handle_table, entry);
   }

   /* Close this object */
   struct drm_gem_close close = { .handle = bo->gem_handle };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
   if (ret != 0) {
      DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
          bo->gem_handle, bo->name, strerror(errno));
   }

   vma_free(bo->bufmgr, bo->gtt_offset, bo->size);

   free(bo);
}

/** Frees all cached buffers significantly older than @time. */
static void
cleanup_bo_cache(struct iris_bufmgr *bufmgr, time_t time)
{
   int i;

   if (bufmgr->time == time)
      return;

   for (i = 0; i < bufmgr->num_buckets; i++) {
      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];

      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
         if (time - bo->free_time <= 1)
            break;

         list_del(&bo->head);

         bo_free(bo);
      }
   }

   bufmgr->time = time;
}

static void
bo_unreference_final(struct iris_bo *bo, time_t time)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct bo_cache_bucket *bucket;

   DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);

   bucket = NULL;
   if (bo->reusable)
      bucket = bucket_for_size(bufmgr, bo->size);
   /* Put the buffer into our internal cache for reuse if we can. */
   if (bucket && iris_bo_madvise(bo, I915_MADV_DONTNEED)) {
      bo->free_time = time;
      bo->name = NULL;

      list_addtail(&bo->head, &bucket->head);
   } else {
      bo_free(bo);
   }
}

void
iris_bo_unreference(struct iris_bo *bo)
{
   if (bo == NULL)
      return;

   assert(p_atomic_read(&bo->refcount) > 0);

   if (atomic_add_unless(&bo->refcount, -1, 1)) {
      struct iris_bufmgr *bufmgr = bo->bufmgr;
      struct timespec time;

      clock_gettime(CLOCK_MONOTONIC, &time);

      mtx_lock(&bufmgr->lock);

      if (p_atomic_dec_zero(&bo->refcount)) {
         bo_unreference_final(bo, time.tv_sec);
         cleanup_bo_cache(bufmgr, time.tv_sec);
      }

      mtx_unlock(&bufmgr->lock);
   }
}

static void
bo_wait_with_stall_warning(struct pipe_debug_callback *dbg,
                           struct iris_bo *bo,
                           const char *action)
{
   bool busy = dbg && !bo->idle;
   double elapsed = unlikely(busy) ? -get_time() : 0.0;

   iris_bo_wait_rendering(bo);

   if (unlikely(busy)) {
      elapsed += get_time();
      if (elapsed > 1e-5) /* 0.01ms */ {
         perf_debug(dbg, "%s a busy \"%s\" BO stalled and took %.03f ms.\n",
                    action, bo->name, elapsed * 1000);
      }
   }
}

static void
print_flags(unsigned flags)
{
   if (flags & MAP_READ)
      DBG("READ ");
   if (flags & MAP_WRITE)
      DBG("WRITE ");
   if (flags & MAP_ASYNC)
      DBG("ASYNC ");
   if (flags & MAP_PERSISTENT)
      DBG("PERSISTENT ");
   if (flags & MAP_COHERENT)
      DBG("COHERENT ");
   if (flags & MAP_RAW)
      DBG("RAW ");
   DBG("\n");
}

static void *
iris_bo_map_cpu(struct pipe_debug_callback *dbg,
                struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* We disallow CPU maps for writing to non-coherent buffers, as the
    * CPU map can become invalidated when a batch is flushed out, which
    * can happen at unpredictable times.  You should use WC maps instead.
    */
   assert(bo->cache_coherent || !(flags & MAP_WRITE));

   if (!bo->map_cpu) {
      DBG("iris_bo_map_cpu: %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap mmap_arg = {
         .handle = bo->gem_handle,
         .size = bo->size,
      };
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }
      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_cpu, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_cpu);

   DBG("iris_bo_map_cpu: %d (%s) -> %p, ", bo->gem_handle, bo->name,
       bo->map_cpu);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "CPU mapping");
   }

   if (!bo->cache_coherent && !bo->bufmgr->has_llc) {
      /* If we're reusing an existing CPU mapping, the CPU caches may
       * contain stale data from the last time we read from that mapping.
       * (With the BO cache, it might even be data from a previous buffer!)
       * Even if it's a brand new mapping, the kernel may have zeroed the
       * buffer via CPU writes.
       *
       * We need to invalidate those cachelines so that we see the latest
       * contents, and so long as we only read from the CPU mmap we do not
       * need to write those cachelines back afterwards.
       *
       * On LLC, the emprical evidence suggests that writes from the GPU
       * that bypass the LLC (i.e. for scanout) do *invalidate* the CPU
       * cachelines. (Other reads, such as the display engine, bypass the
       * LLC entirely requiring us to keep dirty pixels for the scanout
       * out of any cache.)
       */
      gen_invalidate_range(bo->map_cpu, bo->size);
   }

   return bo->map_cpu;
}

static void *
iris_bo_map_wc(struct pipe_debug_callback *dbg,
               struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (!bo->map_wc) {
      DBG("iris_bo_map_wc: %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap mmap_arg = {
         .handle = bo->gem_handle,
         .size = bo->size,
         .flags = I915_MMAP_WC,
      };
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      void *map = (void *) (uintptr_t) mmap_arg.addr_ptr;
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_wc, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_wc);

   DBG("iris_bo_map_wc: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->map_wc);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "WC mapping");
   }

   return bo->map_wc;
}

/**
 * Perform an uncached mapping via the GTT.
 *
 * Write access through the GTT is not quite fully coherent. On low power
 * systems especially, like modern Atoms, we can observe reads from RAM before
 * the write via GTT has landed. A write memory barrier that flushes the Write
 * Combining Buffer (i.e. sfence/mfence) is not sufficient to order the later
 * read after the write as the GTT write suffers a small delay through the GTT
 * indirection. The kernel uses an uncached mmio read to ensure the GTT write
 * is ordered with reads (either by the GPU, WB or WC) and unconditionally
 * flushes prior to execbuf submission. However, if we are not informing the
 * kernel about our GTT writes, it will not flush before earlier access, such
 * as when using the cmdparser. Similarly, we need to be careful if we should
 * ever issue a CPU read immediately following a GTT write.
 *
 * Telling the kernel about write access also has one more important
 * side-effect. Upon receiving notification about the write, it cancels any
 * scanout buffering for FBC/PSR and friends. Later FBC/PSR is then flushed by
 * either SW_FINISH or DIRTYFB. The presumption is that we never write to the
 * actual scanout via a mmaping, only to a backbuffer and so all the FBC/PSR
 * tracking is handled on the buffer exchange instead.
 */
static void *
iris_bo_map_gtt(struct pipe_debug_callback *dbg,
                struct iris_bo *bo, unsigned flags)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* Get a mapping of the buffer if we haven't before. */
   if (bo->map_gtt == NULL) {
      DBG("bo_map_gtt: mmap %d (%s)\n", bo->gem_handle, bo->name);

      struct drm_i915_gem_mmap_gtt mmap_arg = { .handle = bo->gem_handle };

      /* Get the fake offset back... */
      int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
      if (ret != 0) {
         DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      /* and mmap it. */
      void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE,
                       MAP_SHARED, bufmgr->fd, mmap_arg.offset);
      if (map == MAP_FAILED) {
         DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
             __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
         return NULL;
      }

      /* We don't need to use VALGRIND_MALLOCLIKE_BLOCK because Valgrind will
       * already intercept this mmap call. However, for consistency between
       * all the mmap paths, we mark the pointer as defined now and mark it
       * as inaccessible afterwards.
       */
      VG_DEFINED(map, bo->size);

      if (p_atomic_cmpxchg(&bo->map_gtt, NULL, map)) {
         VG_NOACCESS(map, bo->size);
         munmap(map, bo->size);
      }
   }
   assert(bo->map_gtt);

   DBG("bo_map_gtt: %d (%s) -> %p, ", bo->gem_handle, bo->name, bo->map_gtt);
   print_flags(flags);

   if (!(flags & MAP_ASYNC)) {
      bo_wait_with_stall_warning(dbg, bo, "GTT mapping");
   }

   return bo->map_gtt;
}

static bool
can_map_cpu(struct iris_bo *bo, unsigned flags)
{
   if (bo->cache_coherent)
      return true;

   /* Even if the buffer itself is not cache-coherent (such as a scanout), on
    * an LLC platform reads always are coherent (as they are performed via the
    * central system agent). It is just the writes that we need to take special
    * care to ensure that land in main memory and not stick in the CPU cache.
    */
   if (!(flags & MAP_WRITE) && bo->bufmgr->has_llc)
      return true;

   /* If PERSISTENT or COHERENT are set, the mmapping needs to remain valid
    * across batch flushes where the kernel will change cache domains of the
    * bo, invalidating continued access to the CPU mmap on non-LLC device.
    *
    * Similarly, ASYNC typically means that the buffer will be accessed via
    * both the CPU and the GPU simultaneously.  Batches may be executed that
    * use the BO even while it is mapped.  While OpenGL technically disallows
    * most drawing while non-persistent mappings are active, we may still use
    * the GPU for blits or other operations, causing batches to happen at
    * inconvenient times.
    *
    * If RAW is set, we expect the caller to be able to handle a WC buffer
    * more efficiently than the involuntary clflushes.
    */
   if (flags & (MAP_PERSISTENT | MAP_COHERENT | MAP_ASYNC | MAP_RAW))
      return false;

   return !(flags & MAP_WRITE);
}

void *
iris_bo_map(struct pipe_debug_callback *dbg,
            struct iris_bo *bo, unsigned flags)
{
   if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
      return iris_bo_map_gtt(dbg, bo, flags);

   void *map;

   if (can_map_cpu(bo, flags))
      map = iris_bo_map_cpu(dbg, bo, flags);
   else
      map = iris_bo_map_wc(dbg, bo, flags);

   /* Allow the attempt to fail by falling back to the GTT where necessary.
    *
    * Not every buffer can be mmaped directly using the CPU (or WC), for
    * example buffers that wrap stolen memory or are imported from other
    * devices. For those, we have little choice but to use a GTT mmapping.
    * However, if we use a slow GTT mmapping for reads where we expected fast
    * access, that order of magnitude difference in throughput will be clearly
    * expressed by angry users.
    *
    * We skip MAP_RAW because we want to avoid map_gtt's fence detiling.
    */
   if (!map && !(flags & MAP_RAW)) {
      perf_debug(dbg, "Fallback GTT mapping for %s with access flags %x\n",
                 bo->name, flags);
      map = iris_bo_map_gtt(dbg, bo, flags);
   }

   return map;
}

/** Waits for all GPU rendering with the object to have completed. */
void
iris_bo_wait_rendering(struct iris_bo *bo)
{
   /* We require a kernel recent enough for WAIT_IOCTL support.
    * See intel_init_bufmgr()
    */
   iris_bo_wait(bo, -1);
}

/**
 * Waits on a BO for the given amount of time.
 *
 * @bo: buffer object to wait for
 * @timeout_ns: amount of time to wait in nanoseconds.
 *   If value is less than 0, an infinite wait will occur.
 *
 * Returns 0 if the wait was successful ie. the last batch referencing the
 * object has completed within the allotted time. Otherwise some negative return
 * value describes the error. Of particular interest is -ETIME when the wait has
 * failed to yield the desired result.
 *
 * Similar to iris_bo_wait_rendering except a timeout parameter allows
 * the operation to give up after a certain amount of time. Another subtle
 * difference is the internal locking semantics are different (this variant does
 * not hold the lock for the duration of the wait). This makes the wait subject
 * to a larger userspace race window.
 *
 * The implementation shall wait until the object is no longer actively
 * referenced within a batch buffer at the time of the call. The wait will
 * not guarantee that the buffer is re-issued via another thread, or an flinked
 * handle. Userspace must make sure this race does not occur if such precision
 * is important.
 *
 * Note that some kernels have broken the inifite wait for negative values
 * promise, upgrade to latest stable kernels if this is the case.
 */
int
iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   /* If we know it's idle, don't bother with the kernel round trip */
   if (bo->idle && !bo->external)
      return 0;

   struct drm_i915_gem_wait wait = {
      .bo_handle = bo->gem_handle,
      .timeout_ns = timeout_ns,
   };
   int ret = drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
   if (ret != 0)
      return -errno;

   bo->idle = true;

   return ret;
}

void
iris_bufmgr_destroy(struct iris_bufmgr *bufmgr)
{
   mtx_destroy(&bufmgr->lock);

   /* Free any cached buffer objects we were going to reuse */
   for (int i = 0; i < bufmgr->num_buckets; i++) {
      struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];

      list_for_each_entry_safe(struct iris_bo, bo, &bucket->head, head) {
         list_del(&bo->head);

         bo_free(bo);
      }
   }

   _mesa_hash_table_destroy(bufmgr->name_table, NULL);
   _mesa_hash_table_destroy(bufmgr->handle_table, NULL);

   for (int z = 0; z < IRIS_MEMZONE_COUNT; z++) {
      if (z != IRIS_MEMZONE_BINDER)
         util_vma_heap_finish(&bufmgr->vma_allocator[z]);
   }

   free(bufmgr);
}

static int
bo_set_tiling_internal(struct iris_bo *bo, uint32_t tiling_mode,
                       uint32_t stride)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;
   struct drm_i915_gem_set_tiling set_tiling;
   int ret;

   if (bo->global_name == 0 &&
       tiling_mode == bo->tiling_mode && stride == bo->stride)
      return 0;

   memset(&set_tiling, 0, sizeof(set_tiling));
   do {
      /* set_tiling is slightly broken and overwrites the
       * input on the error path, so we have to open code
       * drm_ioctl.
       */
      set_tiling.handle = bo->gem_handle;
      set_tiling.tiling_mode = tiling_mode;
      set_tiling.stride = stride;

      ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
   } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
   if (ret == -1)
      return -errno;

   bo->tiling_mode = set_tiling.tiling_mode;
   bo->swizzle_mode = set_tiling.swizzle_mode;
   bo->stride = set_tiling.stride;
   return 0;
}

int
iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
                  uint32_t *swizzle_mode)
{
   *tiling_mode = bo->tiling_mode;
   *swizzle_mode = bo->swizzle_mode;
   return 0;
}

struct iris_bo *
iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd)
{
   uint32_t handle;
   struct iris_bo *bo;

   mtx_lock(&bufmgr->lock);
   int ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
   if (ret) {
      DBG("import_dmabuf: failed to obtain handle from fd: %s\n",
          strerror(errno));
      mtx_unlock(&bufmgr->lock);
      return NULL;
   }

   /*
    * See if the kernel has already returned this buffer to us. Just as
    * for named buffers, we must not create two bo's pointing at the same
    * kernel object
    */
   bo = hash_find_bo(bufmgr->handle_table, handle);
   if (bo) {
      iris_bo_reference(bo);
      goto out;
   }

   bo = bo_calloc();
   if (!bo)
      goto out;

   p_atomic_set(&bo->refcount, 1);

   /* Determine size of bo.  The fd-to-handle ioctl really should
    * return the size, but it doesn't.  If we have kernel 3.12 or
    * later, we can lseek on the prime fd to get the size.  Older
    * kernels will just fail, in which case we fall back to the
    * provided (estimated or guess size). */
   ret = lseek(prime_fd, 0, SEEK_END);
   if (ret != -1)
      bo->size = ret;

   bo->bufmgr = bufmgr;

   bo->gem_handle = handle;
   _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);

   bo->name = "prime";
   bo->reusable = false;
   bo->external = true;
   bo->kflags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS | EXEC_OBJECT_PINNED;
   bo->gtt_offset = vma_alloc(bufmgr, IRIS_MEMZONE_OTHER, bo->size, 1);

   struct drm_i915_gem_get_tiling get_tiling = { .handle = bo->gem_handle };
   if (drm_ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
      goto err;

   bo->tiling_mode = get_tiling.tiling_mode;
   bo->swizzle_mode = get_tiling.swizzle_mode;
   /* XXX stride is unknown */

out:
   mtx_unlock(&bufmgr->lock);
   return bo;

err:
   bo_free(bo);
   mtx_unlock(&bufmgr->lock);
   return NULL;
}

static void
iris_bo_make_external_locked(struct iris_bo *bo)
{
   if (!bo->external) {
      _mesa_hash_table_insert(bo->bufmgr->handle_table, &bo->gem_handle, bo);
      bo->external = true;
   }
}

static void
iris_bo_make_external(struct iris_bo *bo)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (bo->external)
      return;

   mtx_lock(&bufmgr->lock);
   iris_bo_make_external_locked(bo);
   mtx_unlock(&bufmgr->lock);
}

int
iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   iris_bo_make_external(bo);

   if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
                          DRM_CLOEXEC, prime_fd) != 0)
      return -errno;

   bo->reusable = false;

   return 0;
}

uint32_t
iris_bo_export_gem_handle(struct iris_bo *bo)
{
   iris_bo_make_external(bo);

   return bo->gem_handle;
}

int
iris_bo_flink(struct iris_bo *bo, uint32_t *name)
{
   struct iris_bufmgr *bufmgr = bo->bufmgr;

   if (!bo->global_name) {
      struct drm_gem_flink flink = { .handle = bo->gem_handle };

      if (drm_ioctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
         return -errno;

      mtx_lock(&bufmgr->lock);
      if (!bo->global_name) {
         iris_bo_make_external_locked(bo);
         bo->global_name = flink.name;
         _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
      }
      mtx_unlock(&bufmgr->lock);

      bo->reusable = false;
   }

   *name = bo->global_name;
   return 0;
}

static void
add_bucket(struct iris_bufmgr *bufmgr, int size)
{
   unsigned int i = bufmgr->num_buckets;

   assert(i < ARRAY_SIZE(bufmgr->cache_bucket));

   list_inithead(&bufmgr->cache_bucket[i].head);
   bufmgr->cache_bucket[i].size = size;
   bufmgr->num_buckets++;

   assert(bucket_for_size(bufmgr, size) == &bufmgr->cache_bucket[i]);
   assert(bucket_for_size(bufmgr, size - 2048) == &bufmgr->cache_bucket[i]);
   assert(bucket_for_size(bufmgr, size + 1) != &bufmgr->cache_bucket[i]);
}

static void
init_cache_buckets(struct iris_bufmgr *bufmgr)
{
   uint64_t size, cache_max_size = 64 * 1024 * 1024;

   /* OK, so power of two buckets was too wasteful of memory.
    * Give 3 other sizes between each power of two, to hopefully
    * cover things accurately enough.  (The alternative is
    * probably to just go for exact matching of sizes, and assume
    * that for things like composited window resize the tiled
    * width/height alignment and rounding of sizes to pages will
    * get us useful cache hit rates anyway)
    */
   add_bucket(bufmgr, PAGE_SIZE);
   add_bucket(bufmgr, PAGE_SIZE * 2);
   add_bucket(bufmgr, PAGE_SIZE * 3);

   /* Initialize the linked lists for BO reuse cache. */
   for (size = 4 * PAGE_SIZE; size <= cache_max_size; size *= 2) {
      add_bucket(bufmgr, size);

      add_bucket(bufmgr, size + size * 1 / 4);
      add_bucket(bufmgr, size + size * 2 / 4);
      add_bucket(bufmgr, size + size * 3 / 4);
   }
}
Пример #22
0
int
smc_detach(device_t dev)
{
	int			type;
	struct smc_softc	*sc;

	sc = device_get_softc(dev);
	SMC_LOCK(sc);
	smc_stop(sc);
	SMC_UNLOCK(sc);

	if (sc->smc_ifp != NULL) {
		ether_ifdetach(sc->smc_ifp);
	}
	
	callout_drain(&sc->smc_watchdog);
	callout_drain(&sc->smc_mii_tick_ch);
	
#ifdef DEVICE_POLLING
	if (sc->smc_ifp->if_capenable & IFCAP_POLLING)
		ether_poll_deregister(sc->smc_ifp);
#endif

	if (sc->smc_ih != NULL)
		bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih);

	if (sc->smc_tq != NULL) {
		taskqueue_drain(sc->smc_tq, &sc->smc_intr);
		taskqueue_drain(sc->smc_tq, &sc->smc_rx);
		taskqueue_drain(sc->smc_tq, &sc->smc_tx);
		taskqueue_free(sc->smc_tq);
		sc->smc_tq = NULL;
	}

	if (sc->smc_ifp != NULL) {
		if_free(sc->smc_ifp);
	}

	if (sc->smc_miibus != NULL) {
		device_delete_child(sc->smc_dev, sc->smc_miibus);
		bus_generic_detach(sc->smc_dev);
	}

	if (sc->smc_reg != NULL) {
		type = SYS_RES_IOPORT;
		if (sc->smc_usemem)
			type = SYS_RES_MEMORY;

		bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid,
		    sc->smc_reg);
	}

	if (sc->smc_irq != NULL)
		bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid,
		   sc->smc_irq);

	if (mtx_initialized(&sc->smc_mtx))
		mtx_destroy(&sc->smc_mtx);

	return (0);
}
Пример #23
0
/*
 * Function name:	tw_osli_free_resources
 * Description:		Performs clean-up at the time of going down.
 *
 * Input:		sc	-- ptr to OSL internal ctlr context
 * Output:		None
 * Return value:	None
 */
static TW_VOID
tw_osli_free_resources(struct twa_softc *sc)
{
	struct tw_osli_req_context	*req;
	TW_INT32			error = 0;

	tw_osli_dbg_dprintf(3, sc, "entered");

	/* Detach from CAM */
	tw_osli_cam_detach(sc);

	if (sc->req_ctx_buf)
		while ((req = tw_osli_req_q_remove_head(sc, TW_OSLI_FREE_Q)) !=
			NULL) {
			mtx_destroy(req->ioctl_wake_timeout_lock);

			if ((error = bus_dmamap_destroy(sc->dma_tag,
					req->dma_map)))
				tw_osli_dbg_dprintf(1, sc,
					"dmamap_destroy(dma) returned %d",
					error);
		}

	if ((sc->ioctl_tag) && (sc->ioctl_map))
		if ((error = bus_dmamap_destroy(sc->ioctl_tag, sc->ioctl_map)))
			tw_osli_dbg_dprintf(1, sc,
				"dmamap_destroy(ioctl) returned %d", error);

	/* Free all memory allocated so far. */
	if (sc->req_ctx_buf)
		free(sc->req_ctx_buf, TW_OSLI_MALLOC_CLASS);

	if (sc->non_dma_mem)
		free(sc->non_dma_mem, TW_OSLI_MALLOC_CLASS);

	if (sc->dma_mem) {
		bus_dmamap_unload(sc->cmd_tag, sc->cmd_map);
		bus_dmamem_free(sc->cmd_tag, sc->dma_mem,
			sc->cmd_map);
	}
	if (sc->cmd_tag)
		if ((error = bus_dma_tag_destroy(sc->cmd_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(cmd) returned %d", error);

	if (sc->dma_tag)
		if ((error = bus_dma_tag_destroy(sc->dma_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(dma) returned %d", error);

	if (sc->ioctl_tag)
		if ((error = bus_dma_tag_destroy(sc->ioctl_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(ioctl) returned %d", error);

	if (sc->parent_tag)
		if ((error = bus_dma_tag_destroy(sc->parent_tag)))
			tw_osli_dbg_dprintf(1, sc,
				"dma_tag_destroy(parent) returned %d", error);


	/* Disconnect the interrupt handler. */
	if ((error = twa_teardown_intr(sc)))
			tw_osli_dbg_dprintf(1, sc,
				"teardown_intr returned %d", error);

	if (sc->irq_res != NULL)
		if ((error = bus_release_resource(sc->bus_dev,
				SYS_RES_IRQ, sc->irq_res_id, sc->irq_res)))
			tw_osli_dbg_dprintf(1, sc,
				"release_resource(irq) returned %d", error);


	/* Release the register window mapping. */
	if (sc->reg_res != NULL)
		if ((error = bus_release_resource(sc->bus_dev,
				SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)))
			tw_osli_dbg_dprintf(1, sc,
				"release_resource(io) returned %d", error);


	/* Destroy the control device. */
	if (sc->ctrl_dev != (struct cdev *)NULL)
		destroy_dev(sc->ctrl_dev);

	if ((error = sysctl_ctx_free(&sc->sysctl_ctxt)))
		tw_osli_dbg_dprintf(1, sc,
			"sysctl_ctx_free returned %d", error);

}
Пример #24
0
int
hme_pci_attach(device_t dev)
{
	struct hme_pci_softc *hsc;
	struct hme_softc *sc;
	bus_space_tag_t	memt;
	bus_space_handle_t memh;
	int i, error = 0;
#if !(defined(__powerpc__) || defined(__sparc64__))
	device_t *children, ebus_dev;
	struct resource *ebus_rres;
	int j, slot;
#endif

	pci_enable_busmaster(dev);
	/*
	 * Some Sun HMEs do have their intpin register bogusly set to 0,
	 * although it should be 1.  Correct that.
	 */
	if (pci_get_intpin(dev) == 0)
		pci_set_intpin(dev, 1);

	hsc = device_get_softc(dev);
	sc = &hsc->hsc_hme;
	sc->sc_dev = dev;
	sc->sc_flags |= HME_PCI;
	mtx_init(&sc->sc_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);

	/*
	 * Map five register banks:
	 *
	 *	bank 0: HME SEB registers:	+0x0000
	 *	bank 1: HME ETX registers:	+0x2000
	 *	bank 2: HME ERX registers:	+0x4000
	 *	bank 3: HME MAC registers:	+0x6000
	 *	bank 4: HME MIF registers:	+0x7000
	 *
	 */
	i = PCIR_BAR(0);
	hsc->hsc_sres = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE);
	if (hsc->hsc_sres == NULL) {
		device_printf(dev, "could not map device registers\n");
		error = ENXIO;
		goto fail_mtx;
	}
	i = 0;
	hsc->hsc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &i, RF_SHAREABLE | RF_ACTIVE);
	if (hsc->hsc_ires == NULL) {
		device_printf(dev, "could not allocate interrupt\n");
		error = ENXIO;
		goto fail_sres;
	}
	memt = rman_get_bustag(hsc->hsc_sres);
	memh = rman_get_bushandle(hsc->hsc_sres);
	sc->sc_sebt = sc->sc_etxt = sc->sc_erxt = sc->sc_mact = sc->sc_mift =
	    memt;
	bus_space_subregion(memt, memh, 0x0000, 0x1000, &sc->sc_sebh);
	bus_space_subregion(memt, memh, 0x2000, 0x1000, &sc->sc_etxh);
	bus_space_subregion(memt, memh, 0x4000, 0x1000, &sc->sc_erxh);
	bus_space_subregion(memt, memh, 0x6000, 0x1000, &sc->sc_mach);
	bus_space_subregion(memt, memh, 0x7000, 0x1000, &sc->sc_mifh);

#if defined(__powerpc__) || defined(__sparc64__)
	OF_getetheraddr(dev, sc->sc_enaddr);
#else
	/*
	 * Dig out VPD (vital product data) and read NA (network address).
	 *
	 * The PCI HME is a PCIO chip, which is composed of two functions:
	 *	function 0: PCI-EBus2 bridge, and
	 *	function 1: HappyMeal Ethernet controller.
	 *
	 * The VPD of HME resides in the Boot PROM (PCI FCode) attached
	 * to the EBus bridge and can't be accessed via the PCI capability
	 * pointer.
	 * ``Writing FCode 3.x Programs'' (newer ones, dated 1997 and later)
	 * chapter 2 describes the data structure.
	 *
	 * We don't have a MI EBus driver since no EBus device exists
	 * (besides the FCode PROM) on add-on HME boards.  The ``no driver
	 * attached'' message for function 0 therefore is what is expected.
	 */

#define	PCI_ROMHDR_SIZE			0x1c
#define	PCI_ROMHDR_SIG			0x00
#define	PCI_ROMHDR_SIG_MAGIC		0xaa55		/* little endian */
#define	PCI_ROMHDR_PTR_DATA		0x18
#define	PCI_ROM_SIZE			0x18
#define	PCI_ROM_SIG			0x00
#define	PCI_ROM_SIG_MAGIC		0x52494350	/* "PCIR", endian */
							/* reversed */
#define	PCI_ROM_VENDOR			0x04
#define	PCI_ROM_DEVICE			0x06
#define	PCI_ROM_PTR_VPD			0x08
#define	PCI_VPDRES_BYTE0		0x00
#define	PCI_VPDRES_ISLARGE(x)		((x) & 0x80)
#define	PCI_VPDRES_LARGE_NAME(x)	((x) & 0x7f)
#define	PCI_VPDRES_TYPE_VPD		0x10		/* large */
#define	PCI_VPDRES_LARGE_LEN_LSB	0x01
#define	PCI_VPDRES_LARGE_LEN_MSB	0x02
#define	PCI_VPDRES_LARGE_DATA		0x03
#define	PCI_VPD_SIZE			0x03
#define	PCI_VPD_KEY0			0x00
#define	PCI_VPD_KEY1			0x01
#define	PCI_VPD_LEN			0x02
#define	PCI_VPD_DATA			0x03

#define	HME_ROM_READ_N(n, offs)	bus_space_read_ ## n (memt, memh, (offs))
#define	HME_ROM_READ_1(offs)	HME_ROM_READ_N(1, (offs))
#define	HME_ROM_READ_2(offs)	HME_ROM_READ_N(2, (offs))
#define	HME_ROM_READ_4(offs)	HME_ROM_READ_N(4, (offs))

	/* Search accompanying EBus bridge. */
	slot = pci_get_slot(dev);
	if (device_get_children(device_get_parent(dev), &children, &i) != 0) {
		device_printf(dev, "could not get children\n");
		error = ENXIO;
		goto fail_sres;
	}
	ebus_dev = NULL;
	for (j = 0; j < i; j++) {
		if (pci_get_class(children[j]) == PCIC_BRIDGE &&
		    pci_get_vendor(children[j]) == PCI_VENDOR_SUN &&
		    pci_get_device(children[j]) == PCI_PRODUCT_SUN_EBUS &&
		    pci_get_slot(children[j]) == slot) {
			ebus_dev = children[j];
			break;
		}
	}
	if (ebus_dev == NULL) {
		device_printf(dev, "could not find EBus bridge\n");
		error = ENXIO;
		goto fail_children;
	}

	/* Map EBus bridge PROM registers. */
	i = PCIR_BAR(0);
	if ((ebus_rres = bus_alloc_resource_any(ebus_dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE)) == NULL) {
		device_printf(dev, "could not map PROM registers\n");
		error = ENXIO;
		goto fail_children;
	}
	memt = rman_get_bustag(ebus_rres);
	memh = rman_get_bushandle(ebus_rres);

	/* Read PCI Expansion ROM header. */
	if (HME_ROM_READ_2(PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC ||
	    (i = HME_ROM_READ_2(PCI_ROMHDR_PTR_DATA)) < PCI_ROMHDR_SIZE) {
		device_printf(dev, "unexpected PCI Expansion ROM header\n");
		error = ENXIO;
		goto fail_rres;
	}

	/* Read PCI Expansion ROM data. */
	if (HME_ROM_READ_4(i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC ||
	    HME_ROM_READ_2(i + PCI_ROM_VENDOR) != pci_get_vendor(dev) ||
	    HME_ROM_READ_2(i + PCI_ROM_DEVICE) != pci_get_device(dev) ||
	    (j = HME_ROM_READ_2(i + PCI_ROM_PTR_VPD)) < i + PCI_ROM_SIZE) {
		device_printf(dev, "unexpected PCI Expansion ROM data\n");
		error = ENXIO;
		goto fail_rres;
	}

	/*
	 * Read PCI VPD.
	 * SUNW,hme cards have a single large resource VPD-R tag
	 * containing one NA.  SUNW,qfe cards have four large resource
	 * VPD-R tags containing one NA each (all four HME chips share
	 * the same PROM).
	 * The VPD used on both cards is not in PCI 2.2 standard format
	 * however.  The length in the resource header is in big endian
	 * and the end tag is non-standard (0x79) and followed by an
	 * all-zero "checksum" byte.  Sun calls this a "Fresh Choice
	 * Ethernet" VPD...
	 */
	/* Look at the end tag to determine whether this is a VPD with 4 NAs. */
	if (HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_SIZE +
	    ETHER_ADDR_LEN) != 0x79 &&
	    HME_ROM_READ_1(j + 4 * (PCI_VPDRES_LARGE_DATA + PCI_VPD_SIZE +
	    ETHER_ADDR_LEN)) == 0x79)
		/* Use the Nth NA for the Nth HME on this SUNW,qfe. */
		j += slot * (PCI_VPDRES_LARGE_DATA + PCI_VPD_SIZE +
		    ETHER_ADDR_LEN);
	if (PCI_VPDRES_ISLARGE(HME_ROM_READ_1(j + PCI_VPDRES_BYTE0)) == 0 ||
	    PCI_VPDRES_LARGE_NAME(HME_ROM_READ_1(j + PCI_VPDRES_BYTE0)) !=
	    PCI_VPDRES_TYPE_VPD ||
	    (HME_ROM_READ_1(j + PCI_VPDRES_LARGE_LEN_LSB) << 8 |
	    HME_ROM_READ_1(j + PCI_VPDRES_LARGE_LEN_MSB)) !=
	    PCI_VPD_SIZE + ETHER_ADDR_LEN ||
	    HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_KEY0) !=
	    0x4e /* N */ ||
	    HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_KEY1) !=
	    0x41 /* A */ ||
	    HME_ROM_READ_1(j + PCI_VPDRES_LARGE_DATA + PCI_VPD_LEN) !=
	    ETHER_ADDR_LEN) {
		device_printf(dev, "unexpected PCI VPD\n");
		error = ENXIO;
		goto fail_rres;
	}
	bus_space_read_region_1(memt, memh, j + PCI_VPDRES_LARGE_DATA +
	    PCI_VPD_DATA, sc->sc_enaddr, ETHER_ADDR_LEN);

fail_rres:
	bus_release_resource(ebus_dev, SYS_RES_MEMORY,
	    rman_get_rid(ebus_rres), ebus_rres);
fail_children:
	free(children, M_TEMP);
	if (error != 0)
		goto fail_sres;
#endif

	sc->sc_burst = 64;	/* XXX */

	/*
	 * call the main configure
	 */
	if ((error = hme_config(sc)) != 0) {
		device_printf(dev, "could not be configured\n");
		goto fail_ires;
	}

	if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET |
	    INTR_MPSAFE, NULL, hme_intr, sc, &hsc->hsc_ih)) != 0) {
		device_printf(dev, "couldn't establish interrupt\n");
		hme_detach(sc);
		goto fail_ires;
	}
	return (0);

fail_ires:
	bus_release_resource(dev, SYS_RES_IRQ,
	    rman_get_rid(hsc->hsc_ires), hsc->hsc_ires);
fail_sres:
	bus_release_resource(dev, SYS_RES_MEMORY,
	    rman_get_rid(hsc->hsc_sres), hsc->hsc_sres);
fail_mtx:
	mtx_destroy(&sc->sc_lock);
	return (error);
}
Пример #25
0
int
cfcs_init(void)
{
	struct cfcs_softc *softc;
	struct ccb_setasync csa;
	struct ctl_port *port;
#ifdef NEEDTOPORT
	char wwnn[8];
#endif
	int retval;

	softc = &cfcs_softc;
	retval = 0;
	bzero(softc, sizeof(*softc));
	mtx_init(&softc->lock, "ctl2cam", NULL, MTX_DEF);
	port = &softc->port;

	port->frontend = &cfcs_frontend;
	port->port_type = CTL_PORT_INTERNAL;
	/* XXX KDM what should the real number be here? */
	port->num_requested_ctl_io = 4096;
	snprintf(softc->port_name, sizeof(softc->port_name), "camsim");
	port->port_name = softc->port_name;
	port->port_online = cfcs_online;
	port->port_offline = cfcs_offline;
	port->onoff_arg = softc;
	port->lun_enable = cfcs_lun_enable;
	port->lun_disable = cfcs_lun_disable;
	port->targ_lun_arg = softc;
	port->fe_datamove = cfcs_datamove;
	port->fe_done = cfcs_done;

	/* XXX KDM what should we report here? */
	/* XXX These should probably be fetched from CTL. */
	port->max_targets = 1;
	port->max_target_id = 15;

	retval = ctl_port_register(port);
	if (retval != 0) {
		printf("%s: ctl_port_register() failed with error %d!\n",
		       __func__, retval);
		mtx_destroy(&softc->lock);
		return (retval);
	}

	/*
	 * Get the WWNN out of the database, and create a WWPN as well.
	 */
#ifdef NEEDTOPORT
	ddb_GetWWNN((char *)wwnn);
	softc->wwnn = be64dec(wwnn);
	softc->wwpn = softc->wwnn + (softc->port.targ_port & 0xff);
#endif

	/*
	 * If the CTL frontend didn't tell us what our WWNN/WWPN is, go
	 * ahead and set something random.
	 */
	if (port->wwnn == 0) {
		uint64_t random_bits;

		arc4rand(&random_bits, sizeof(random_bits), 0);
		softc->wwnn = (random_bits & 0x0000000fffffff00ULL) |
			/* Company ID */ 0x5000000000000000ULL |
			/* NL-Port */    0x0300;
		softc->wwpn = softc->wwnn + port->targ_port + 1;
		ctl_port_set_wwns(port, true, softc->wwnn, true, softc->wwpn);
	} else {
		softc->wwnn = port->wwnn;
		softc->wwpn = port->wwpn;
	}

	mtx_lock(&softc->lock);
	softc->devq = cam_simq_alloc(port->num_requested_ctl_io);
	if (softc->devq == NULL) {
		printf("%s: error allocating devq\n", __func__);
		retval = ENOMEM;
		goto bailout;
	}

	softc->sim = cam_sim_alloc(cfcs_action, cfcs_poll, softc->port_name,
				   softc, /*unit*/ 0, &softc->lock, 1,
				   port->num_requested_ctl_io, softc->devq);
	if (softc->sim == NULL) {
		printf("%s: error allocating SIM\n", __func__);
		retval = ENOMEM;
		goto bailout;
	}

	if (xpt_bus_register(softc->sim, NULL, 0) != CAM_SUCCESS) {
		printf("%s: error registering SIM\n", __func__);
		retval = ENOMEM;
		goto bailout;
	}

	if (xpt_create_path(&softc->path, /*periph*/NULL,
			    cam_sim_path(softc->sim),
			    CAM_TARGET_WILDCARD,
			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
		printf("%s: error creating path\n", __func__);
		xpt_bus_deregister(cam_sim_path(softc->sim));
		retval = EINVAL;
		goto bailout;
	}

	xpt_setup_ccb(&csa.ccb_h, softc->path, CAM_PRIORITY_NONE);
	csa.ccb_h.func_code = XPT_SASYNC_CB;
	csa.event_enable = AC_LOST_DEVICE;
	csa.callback = cfcs_async;
        csa.callback_arg = softc->sim;
        xpt_action((union ccb *)&csa);

	mtx_unlock(&softc->lock);

	return (retval);

bailout:
	if (softc->sim)
		cam_sim_free(softc->sim, /*free_devq*/ TRUE);
	else if (softc->devq)
		cam_simq_free(softc->devq);
	mtx_unlock(&softc->lock);
	mtx_destroy(&softc->lock);

	return (retval);
}
Пример #26
0
int
ic_init(isc_session_t *sp)
{
     struct cam_sim	*sim;
     struct cam_devq	*devq;

     debug_called(8);

     if((devq = cam_simq_alloc(256)) == NULL)
	  return ENOMEM;

#if __FreeBSD_version >= 700000
     mtx_init(&sp->cam_mtx, "isc-cam", NULL, MTX_DEF);
#else
     isp->cam_mtx = Giant;
#endif
     sim = cam_sim_alloc(ic_action,
			 ic_poll,
			 "iscsi",
			 sp,
			 sp->sid,	// unit
#if __FreeBSD_version >= 700000
			 &sp->cam_mtx,
#endif
			 1,		// max_dev_transactions
			 0,		// max_tagged_dev_transactions
			 devq);
     if(sim == NULL) {
	  cam_simq_free(devq);
#if __FreeBSD_version >= 700000
	  mtx_destroy(&sp->cam_mtx);
#endif
	  return ENXIO;
     }

     CAM_LOCK(sp);
     if(xpt_bus_register(sim,
#if __FreeBSD_version >= 700000
			 NULL,
#endif
			 0/*bus_number*/) != CAM_SUCCESS) {

	  cam_sim_free(sim, /*free_devq*/TRUE);
	  CAM_UNLOCK(sp);
#if __FreeBSD_version >= 700000
	  mtx_destroy(&sp->cam_mtx);
#endif
	  return ENXIO;
     }
     sp->cam_sim = sim;
     if(xpt_create_path(&sp->cam_path, NULL, cam_sim_path(sp->cam_sim),
	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
	  xpt_bus_deregister(cam_sim_path(sp->cam_sim));
	  cam_sim_free(sim, /*free_devq*/TRUE);
	  CAM_UNLOCK(sp);
#if __FreeBSD_version >= 700000
	  mtx_destroy(&sp->cam_mtx);
#endif
	  return ENXIO;
     }
     CAM_UNLOCK(sp);

     sdebug(1, "cam subsystem initialized");

     ic_scan(sp);

     return 0;
}
Пример #27
0
void mbq_safe_destroy(struct mbq *q)
{
    mtx_destroy(&q->lock);
}
Пример #28
0
static int
hme_sbus_attach(device_t dev)
{
	struct hme_sbus_softc *hsc;
	struct hme_softc *sc;
	u_long start, count;
	uint32_t burst;
	int i, error = 0;

	hsc = device_get_softc(dev);
	sc = &hsc->hsc_hme;
	mtx_init(&sc->sc_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
	    MTX_DEF);
	/*
	 * Map five register banks:
	 *
	 *	bank 0: HME SEB registers
	 *	bank 1: HME ETX registers
	 *	bank 2: HME ERX registers
	 *	bank 3: HME MAC registers
	 *	bank 4: HME MIF registers
	 *
	 */
	i = 0;
	hsc->hsc_seb_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE);
	if (hsc->hsc_seb_res == NULL) {
		device_printf(dev, "cannot map SEB registers\n");
		error = ENXIO;
		goto fail_mtx_res;
	}
	sc->sc_sebt = rman_get_bustag(hsc->hsc_seb_res);
	sc->sc_sebh = rman_get_bushandle(hsc->hsc_seb_res);

	i = 1;
	hsc->hsc_etx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE);
	if (hsc->hsc_etx_res == NULL) {
		device_printf(dev, "cannot map ETX registers\n");
		error = ENXIO;
		goto fail_seb_res;
	}
	sc->sc_etxt = rman_get_bustag(hsc->hsc_etx_res);
	sc->sc_etxh = rman_get_bushandle(hsc->hsc_etx_res);

	i = 2;
	hsc->hsc_erx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE);
	if (hsc->hsc_erx_res == NULL) {
		device_printf(dev, "cannot map ERX registers\n");
		error = ENXIO;
		goto fail_etx_res;
	}
	sc->sc_erxt = rman_get_bustag(hsc->hsc_erx_res);
	sc->sc_erxh = rman_get_bushandle(hsc->hsc_erx_res);

	i = 3;
	hsc->hsc_mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE);
	if (hsc->hsc_mac_res == NULL) {
		device_printf(dev, "cannot map MAC registers\n");
		error = ENXIO;
		goto fail_erx_res;
	}
	sc->sc_mact = rman_get_bustag(hsc->hsc_mac_res);
	sc->sc_mach = rman_get_bushandle(hsc->hsc_mac_res);

	/*
	 * At least on some HMEs, the MIF registers seem to be inside the MAC
	 * range, so try to kludge around it.
	 */
	i = 4;
	hsc->hsc_mif_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
	    &i, RF_ACTIVE);
	if (hsc->hsc_mif_res == NULL) {
		if (bus_get_resource(dev, SYS_RES_MEMORY, i,
		    &start, &count) != 0) {
			device_printf(dev, "cannot get MIF registers\n");
			error = ENXIO;
			goto fail_mac_res;
		}
		if (start < rman_get_start(hsc->hsc_mac_res) ||
		    start + count - 1 > rman_get_end(hsc->hsc_mac_res)) {
			device_printf(dev, "cannot move MIF registers to MAC "
			    "bank\n");
			error = ENXIO;
			goto fail_mac_res;
		}
		sc->sc_mift = sc->sc_mact;
		bus_space_subregion(sc->sc_mact, sc->sc_mach,
		    start - rman_get_start(hsc->hsc_mac_res), count,
		    &sc->sc_mifh);
	} else {
		sc->sc_mift = rman_get_bustag(hsc->hsc_mif_res);
		sc->sc_mifh = rman_get_bushandle(hsc->hsc_mif_res);
	}

	i = 0;
	hsc->hsc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &i, RF_SHAREABLE | RF_ACTIVE);
	if (hsc->hsc_ires == NULL) {
		device_printf(dev, "could not allocate interrupt\n");
		error = ENXIO;
		goto fail_mif_res;
	}

	OF_getetheraddr(dev, sc->sc_enaddr);

	burst = sbus_get_burstsz(dev);
	/* Translate into plain numerical format */
	if ((burst & SBUS_BURST_64))
		sc->sc_burst = 64;
	else if ((burst & SBUS_BURST_32))
		sc->sc_burst = 32;
	else if ((burst & SBUS_BURST_16))
		sc->sc_burst = 16;
	else
		 sc->sc_burst = 0;

	sc->sc_dev = dev;
	sc->sc_flags = 0;

	if ((error = hme_config(sc)) != 0) {
		device_printf(dev, "could not be configured\n");
		goto fail_ires;
	}

	if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET |
	    INTR_MPSAFE, NULL, hme_intr, sc, &hsc->hsc_ih)) != 0) {
		device_printf(dev, "couldn't establish interrupt\n");
		hme_detach(sc);
		goto fail_ires;
	}
	return (0);

fail_ires:
	bus_release_resource(dev, SYS_RES_IRQ,
	    rman_get_rid(hsc->hsc_ires), hsc->hsc_ires);
fail_mif_res:
	if (hsc->hsc_mif_res != NULL) {
		bus_release_resource(dev, SYS_RES_MEMORY,
		    rman_get_rid(hsc->hsc_mif_res), hsc->hsc_mif_res);
	}
fail_mac_res:
	bus_release_resource(dev, SYS_RES_MEMORY,
	    rman_get_rid(hsc->hsc_mac_res), hsc->hsc_mac_res);
fail_erx_res:
	bus_release_resource(dev, SYS_RES_MEMORY,
	    rman_get_rid(hsc->hsc_erx_res), hsc->hsc_erx_res);
fail_etx_res:
	bus_release_resource(dev, SYS_RES_MEMORY,
	    rman_get_rid(hsc->hsc_etx_res), hsc->hsc_etx_res);
fail_seb_res:
	bus_release_resource(dev, SYS_RES_MEMORY,
	    rman_get_rid(hsc->hsc_seb_res), hsc->hsc_seb_res);
fail_mtx_res:
	mtx_destroy(&sc->sc_lock);
	return (error);
}
Пример #29
0
static int
linux_elf_modevent(module_t mod, int type, void *data)
{
	Elf32_Brandinfo **brandinfo;
	int error;
	struct linux_ioctl_handler **lihp;
	struct linux_device_handler **ldhp;

	error = 0;

	switch(type) {
	case MOD_LOAD:
		for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
		     ++brandinfo)
			if (elf32_insert_brand_entry(*brandinfo) < 0)
				error = EINVAL;
		if (error == 0) {
			SET_FOREACH(lihp, linux_ioctl_handler_set)
				linux_ioctl_register_handler(*lihp);
			SET_FOREACH(ldhp, linux_device_handler_set)
				linux_device_register_handler(*ldhp);
			mtx_init(&emul_lock, "emuldata lock", NULL, MTX_DEF);
			sx_init(&emul_shared_lock, "emuldata->shared lock");
			LIST_INIT(&futex_list);
			mtx_init(&futex_mtx, "ftllk", NULL, MTX_DEF);
			linux_exit_tag = EVENTHANDLER_REGISTER(process_exit, linux_proc_exit,
			      NULL, 1000);
			linux_exec_tag = EVENTHANDLER_REGISTER(process_exec, linux_proc_exec,
			      NULL, 1000);
			linux_get_machine(&linux_platform);
			linux_szplatform = roundup(strlen(linux_platform) + 1,
			    sizeof(char *));
			linux_osd_jail_register();
			stclohz = (stathz ? stathz : hz);
			if (bootverbose)
				printf("Linux ELF exec handler installed\n");
		} else
			printf("cannot insert Linux ELF brand handler\n");
		break;
	case MOD_UNLOAD:
		for (brandinfo = &linux_brandlist[0]; *brandinfo != NULL;
		     ++brandinfo)
			if (elf32_brand_inuse(*brandinfo))
				error = EBUSY;
		if (error == 0) {
			for (brandinfo = &linux_brandlist[0];
			     *brandinfo != NULL; ++brandinfo)
				if (elf32_remove_brand_entry(*brandinfo) < 0)
					error = EINVAL;
		}
		if (error == 0) {
			SET_FOREACH(lihp, linux_ioctl_handler_set)
				linux_ioctl_unregister_handler(*lihp);
			SET_FOREACH(ldhp, linux_device_handler_set)
				linux_device_unregister_handler(*ldhp);
			mtx_destroy(&emul_lock);
			sx_destroy(&emul_shared_lock);
			mtx_destroy(&futex_mtx);
			EVENTHANDLER_DEREGISTER(process_exit, linux_exit_tag);
			EVENTHANDLER_DEREGISTER(process_exec, linux_exec_tag);
			linux_osd_jail_deregister();
			if (bootverbose)
				printf("Linux ELF exec handler removed\n");
		} else
			printf("Could not deinstall ELF interpreter entry\n");
		break;
	default:
		return EOPNOTSUPP;
	}
	return error;
}
Пример #30
0
static int
rk30_gpio_attach(device_t dev)
{
	struct rk30_gpio_softc *sc = device_get_softc(dev);
	int i, rid;
	phandle_t gpio;
	unsigned long start;

	if (rk30_gpio_sc)
		return (ENXIO);
	sc->sc_dev = dev;
	mtx_init(&sc->sc_mtx, "rk30 gpio", "gpio", MTX_DEF);

	rid = 0;
	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
	    RF_ACTIVE);
	if (!sc->sc_mem_res) {
		device_printf(dev, "cannot allocate memory window\n");
		goto fail;
	}
	sc->sc_bst = rman_get_bustag(sc->sc_mem_res);
	sc->sc_bsh = rman_get_bushandle(sc->sc_mem_res);
	/* Check the unit we are attaching by our base address. */
	sc->sc_bank = -1;
	start = rman_get_start(sc->sc_mem_res);
	for (i = 0; i < nitems(rk30_gpio_base_addr); i++) {
		if (rk30_gpio_base_addr[i] == start) {
			sc->sc_bank = i;
			break;
		}
	}
	if (sc->sc_bank == -1) {
		device_printf(dev,
		    "unsupported device unit (only GPIO0..3 are supported)\n");
		goto fail;
	}

	rid = 0;
	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_ACTIVE);
	if (!sc->sc_irq_res) {
		device_printf(dev, "cannot allocate interrupt\n");
		goto fail;
	}

	/* Find our node. */
	gpio = ofw_bus_get_node(sc->sc_dev);

	if (!OF_hasprop(gpio, "gpio-controller"))
		/* Node is not a GPIO controller. */
		goto fail;

	/* Initialize the software controlled pins. */
	for (i = 0; i < RK30_GPIO_PINS; i++) {
		snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME,
		    "pin %d", i);
		sc->sc_gpio_pins[i].gp_pin = i;
		sc->sc_gpio_pins[i].gp_caps = RK30_GPIO_DEFAULT_CAPS;
		sc->sc_gpio_pins[i].gp_flags = rk30_gpio_get_function(sc, i);
	}
	sc->sc_gpio_npins = i;
	rk30_gpio_sc = sc;
	rk30_gpio_init();
	sc->sc_busdev = gpiobus_attach_bus(dev);
	if (sc->sc_busdev == NULL)
		goto fail;

	return (0);

fail:
	if (sc->sc_irq_res)
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res);
	if (sc->sc_mem_res)
		bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res);
	mtx_destroy(&sc->sc_mtx);

	return (ENXIO);
}