Пример #1
0
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	drm_draw_t *draw = data;
	struct bsd_drm_drawable_info *info;

	info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info),
	    DRM_MEM_DRAWABLE);
	if (info == NULL)
		return ENOMEM;

#ifdef __FreeBSD__
	info->handle = alloc_unr(dev->drw_unrhdr);
#else
	/* XXXJDM */
	info->handle = ++dev->drw_no;
#endif
	DRM_SPINLOCK(&dev->drw_lock);
	RB_INSERT(drawable_tree, &dev->drw_head, info);
	draw->handle = info->handle;
	DRM_SPINUNLOCK(&dev->drw_lock);

	DRM_DEBUG("%d\n", draw->handle);

	return 0;
}
Пример #2
0
static int
uhso_attach_ifnet(struct uhso_softc *sc, struct usb_interface *iface, int type)
{
	struct ifnet *ifp;
	usb_error_t uerr;
	struct sysctl_ctx_list *sctx;
	struct sysctl_oid *soid;
	unsigned int devunit;

	uerr = usbd_transfer_setup(sc->sc_udev,
	    &iface->idesc->bInterfaceNumber, sc->sc_if_xfer,
	    uhso_ifnet_config, UHSO_IFNET_MAX, sc, &sc->sc_mtx);
	if (uerr) {
		UHSO_DPRINTF(0, "usbd_transfer_setup failed: %s\n",
		    usbd_errstr(uerr));
		return (-1);
	}

	sc->sc_ifp = ifp = if_alloc(IFT_OTHER);
	if (sc->sc_ifp == NULL) {
		device_printf(sc->sc_dev, "if_alloc() failed\n");
		return (-1);
	}

	callout_init_mtx(&sc->sc_c, &sc->sc_mtx, 0);
	mtx_lock(&sc->sc_mtx);
	callout_reset(&sc->sc_c, 1, uhso_if_rxflush, sc);
	mtx_unlock(&sc->sc_mtx);

	/*
	 * We create our own unit numbers for ifnet devices because the
	 * USB interface unit numbers can be at arbitrary positions yielding
	 * odd looking device names.
	 */
	devunit = alloc_unr(uhso_ifnet_unit);

	if_initname(ifp, device_get_name(sc->sc_dev), devunit);
	ifp->if_mtu = UHSO_MAX_MTU;
	ifp->if_ioctl = uhso_if_ioctl;
	ifp->if_init = uhso_if_init;
	ifp->if_start = uhso_if_start;
	ifp->if_output = uhso_if_output;
	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_NOARP;
	ifp->if_softc = sc;
	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
	IFQ_SET_READY(&ifp->if_snd);

	if_attach(ifp);
	bpfattach(ifp, DLT_RAW, 0);

	sctx = device_get_sysctl_ctx(sc->sc_dev);
	soid = device_get_sysctl_tree(sc->sc_dev);
	/* Unlocked read... */
	SYSCTL_ADD_STRING(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "netif",
	    CTLFLAG_RD, ifp->if_xname, 0, "Attached network interface");

	return (0);
}
Пример #3
0
/*
 * Allocate a file number
 */
void
pfs_fileno_alloc(struct pfs_node *pn)
{

	if (pn->pn_parent)
		PFS_TRACE(("%s/%s", pn->pn_parent->pn_name, pn->pn_name));
	else
		PFS_TRACE(("%s", pn->pn_name));
	pfs_assert_not_owned(pn);

	switch (pn->pn_type) {
	case pfstype_root:
		/* root must always be 2 */
		pn->pn_fileno = 2;
		break;
	case pfstype_dir:
	case pfstype_file:
	case pfstype_symlink:
	case pfstype_procdir:
		pn->pn_fileno = alloc_unr(pn->pn_info->pi_unrhdr);
		break;
	case pfstype_this:
		KASSERT(pn->pn_parent != NULL,
		    ("%s(): pfstype_this node has no parent", __func__));
		pn->pn_fileno = pn->pn_parent->pn_fileno;
		break;
	case pfstype_parent:
		KASSERT(pn->pn_parent != NULL,
		    ("%s(): pfstype_parent node has no parent", __func__));
		if (pn->pn_parent->pn_type == pfstype_root) {
			pn->pn_fileno = pn->pn_parent->pn_fileno;
			break;
		}
		KASSERT(pn->pn_parent->pn_parent != NULL,
		    ("%s(): pfstype_parent node has no grandparent", __func__));
		pn->pn_fileno = pn->pn_parent->pn_parent->pn_fileno;
		break;
	case pfstype_none:
		KASSERT(0,
		    ("%s(): pfstype_none node", __func__));
		break;
	}

#if 0
	printf("%s(): %s: ", __func__, pn->pn_info->pi_name);
	if (pn->pn_parent) {
		if (pn->pn_parent->pn_parent) {
			printf("%s/", pn->pn_parent->pn_parent->pn_name);
		}
		printf("%s/", pn->pn_parent->pn_name);
	}
	printf("%s -> %d\n", pn->pn_name, pn->pn_fileno);
#endif
}
Пример #4
0
static
#endif /* !PTS_EXTERNAL */
int
pts_alloc(int fflags, struct thread *td, struct file *fp)
{
	int unit, ok, error;
	struct tty *tp;
	struct pts_softc *psc;
	struct proc *p = td->td_proc;
	struct ucred *cred = td->td_ucred;

	/* Resource limiting. */
	PROC_LOCK(p);
	error = racct_add(p, RACCT_NPTS, 1);
	if (error != 0) {
		PROC_UNLOCK(p);
		return (EAGAIN);
	}
	ok = chgptscnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPTS));
	if (!ok) {
		racct_sub(p, RACCT_NPTS, 1);
		PROC_UNLOCK(p);
		return (EAGAIN);
	}
	PROC_UNLOCK(p);

	/* Try to allocate a new pts unit number. */
	unit = alloc_unr(pts_pool);
	if (unit < 0) {
		racct_sub(p, RACCT_NPTS, 1);
		chgptscnt(cred->cr_ruidinfo, -1, 0);
		return (EAGAIN);
	}

	/* Allocate TTY and softc. */
	psc = malloc(sizeof(struct pts_softc), M_PTS, M_WAITOK|M_ZERO);
	cv_init(&psc->pts_inwait, "ptsin");
	cv_init(&psc->pts_outwait, "ptsout");

	psc->pts_unit = unit;
	psc->pts_cred = crhold(cred);

	tp = tty_alloc(&pts_class, psc);
	knlist_init_mtx(&psc->pts_inpoll.si_note, tp->t_mtx);
	knlist_init_mtx(&psc->pts_outpoll.si_note, tp->t_mtx);

	/* Expose the slave device as well. */
	tty_makedev(tp, td->td_ucred, "pts/%u", psc->pts_unit);

	finit(fp, fflags, DTYPE_PTS, tp, &ptsdev_ops);

	return (0);
}
Пример #5
0
/*
 * Mark a unit number (the X in cuaUX) as in use.
 *
 * Note that devices using a different naming scheme (see ucom_tty_name()
 * callback) still use this unit allocation.
 */
static int
ucom_unit_alloc(void)
{
	int unit;

	/* sanity checks */
	if (ucom_unrhdr == NULL) {
		DPRINTF("ucom_unrhdr is NULL\n");
		return (-1);
	}
	unit = alloc_unr(ucom_unrhdr);
	DPRINTF("unit %d is allocated\n", unit);
	return (unit);
}
Пример #6
0
static void
vpid_alloc(uint16_t *vpid, int num)
{
	int i, x;

	if (num <= 0 || num > VM_MAXCPU)
		panic("invalid number of vpids requested: %d", num);

	/*
	 * If the "enable vpid" execution control is not enabled then the
	 * VPID is required to be 0 for all vcpus.
	 */
	if ((procbased_ctls2 & PROCBASED2_ENABLE_VPID) == 0) {
		for (i = 0; i < num; i++)
			vpid[i] = 0;
		return;
	}

	/*
	 * Allocate a unique VPID for each vcpu from the unit number allocator.
	 */
	for (i = 0; i < num; i++) {
		x = alloc_unr(vpid_unr);
		if (x == -1)
			break;
		else
			vpid[i] = x;
	}

	if (i < num) {
		atomic_add_int(&vpid_alloc_failed, 1);

		/*
		 * If the unit number allocator does not have enough unique
		 * VPIDs then we need to allocate from the [1,VM_MAXCPU] range.
		 *
		 * These VPIDs are not be unique across VMs but this does not
		 * affect correctness because the combined mappings are also
		 * tagged with the EP4TA which is unique for each VM.
		 *
		 * It is still sub-optimal because the invvpid will invalidate
		 * combined mappings for a particular VPID across all EP4TAs.
		 */
		while (i-- > 0)
			vpid_free(vpid[i]);

		for (i = 0; i < num; i++)
			vpid[i] = i + 1;
	}
}
Пример #7
0
/*
 * Mount the filesystem
 */
static int
devfs_mount(struct mount *mp)
{
	int error;
	struct devfs_mount *fmp;
	struct vnode *rvp;

	if (devfs_unr == NULL)
		devfs_unr = new_unrhdr(0, INT_MAX, NULL);

	error = 0;

	if (mp->mnt_flag & (MNT_UPDATE | MNT_ROOTFS))
		return (EOPNOTSUPP);

	fmp = malloc(sizeof *fmp, M_DEVFS, M_WAITOK | M_ZERO);
	fmp->dm_idx = alloc_unr(devfs_unr);
	sx_init(&fmp->dm_lock, "devfsmount");
	fmp->dm_holdcnt = 1;

	MNT_ILOCK(mp);
	mp->mnt_flag |= MNT_LOCAL;
	mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
	    MNTK_EXTENDED_SHARED;
#ifdef MAC
	mp->mnt_flag |= MNT_MULTILABEL;
#endif
	MNT_IUNLOCK(mp);
	fmp->dm_mount = mp;
	mp->mnt_data = (void *) fmp;
	vfs_getnewfsid(mp);

	fmp->dm_rootdir = devfs_vmkdir(fmp, NULL, 0, NULL, DEVFS_ROOTINO);

	error = devfs_root(mp, LK_EXCLUSIVE, &rvp);
	if (error) {
		sx_destroy(&fmp->dm_lock);
		free_unr(devfs_unr, fmp->dm_idx);
		free(fmp, M_DEVFS);
		return (error);
	}

	VOP_UNLOCK(rvp, 0);

	vfs_mountedfrom(mp, "devfs");

	return (0);
}
Пример #8
0
/*
 * Create a new non-anonymous set with the requested parent and mask.  May
 * return failures if the mask is invalid or a new number can not be
 * allocated.
 */
static int
cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
{
	struct cpuset *set;
	cpusetid_t id;
	int error;

	id = alloc_unr(cpuset_unr);
	if (id == -1)
		return (ENFILE);
	*setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
	error = _cpuset_create(set, parent, mask, id);
	if (error == 0)
		return (0);
	free_unr(cpuset_unr, id);
	uma_zfree(cpuset_zone, set);

	return (error);
}
Пример #9
0
int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
	struct drm_draw *draw = data;
	struct bsd_drm_drawable_info *info;

	info = malloc(sizeof(struct bsd_drm_drawable_info), DRM_MEM_DRAWABLE,
	    M_NOWAIT | M_ZERO);
	if (info == NULL)
		return ENOMEM;

	info->handle = alloc_unr(dev->drw_unrhdr);
	DRM_SPINLOCK(&dev->drw_lock);
	RB_INSERT(drawable_tree, &dev->drw_head, info);
	draw->handle = info->handle;
	DRM_SPINUNLOCK(&dev->drw_lock);

	DRM_DEBUG("%d\n", draw->handle);

	return 0;
}
Пример #10
0
static void
soaio_kproc_create(void *context, int pending)
{
	struct proc *p;
	int error, id;

	mtx_lock(&soaio_jobs_lock);
	for (;;) {
		if (soaio_num_procs < soaio_target_procs) {
			/* Must create */
		} else if (soaio_num_procs >= soaio_max_procs) {
			/*
			 * Hit the limit on kernel processes, don't
			 * create another one.
			 */
			break;
		} else if (soaio_queued <= soaio_idle + soaio_starting) {
			/*
			 * No more AIO jobs waiting for a process to be
			 * created, so stop.
			 */
			break;
		}
		soaio_starting++;
		mtx_unlock(&soaio_jobs_lock);

		id = alloc_unr(soaio_kproc_unr);
		error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id,
		    &p, 0, 0, "soaiod%d", id);
		if (error != 0) {
			free_unr(soaio_kproc_unr, id);
			mtx_lock(&soaio_jobs_lock);
			soaio_starting--;
			break;
		}

		mtx_lock(&soaio_jobs_lock);
		soaio_num_procs++;
	}
	mtx_unlock(&soaio_jobs_lock);
}
Пример #11
0
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
    struct drm_device *dev = obj->dev;
    struct drm_gem_mm *mm = dev->mm_private;
    int ret;

    if (obj->on_map)
        return 0;

    obj->map_list.key = alloc_unr(mm->idxunr);
    ret = drm_ht_insert_item(&mm->offset_hash, &obj->map_list);
    if (ret) {
        DRM_ERROR("failed to add to map hash\n");
        free_unr(mm->idxunr, obj->map_list.key);
        return ret;
    }
    obj->on_map = true;

    return 0;
}
Пример #12
0
int
drm_gem_name_create(struct drm_gem_names *names, void *p, uint32_t *name)
{
	struct drm_gem_name *np;

	if (*name != 0) {
		return (EALREADY);
	}

	np = malloc(sizeof(struct drm_gem_name), M_GEM_NAMES, M_WAITOK);
	mtx_lock(&names->lock);
	np->name = alloc_unr(names->unr);
	if (np->name == -1) {
		mtx_unlock(&names->lock);
		free(np, M_GEM_NAMES);
		return (ENOMEM);
	}
	*name = np->name;
	np->ptr = p;
	LIST_INSERT_HEAD(gem_name_hash_index(names, np->name), np, link);
	mtx_unlock(&names->lock);
	return (0);
}
static void
ue_attach_post_task(struct usb_proc_msg *_task)
{
	struct usb_ether_cfg_task *task =
	    (struct usb_ether_cfg_task *)_task;
	struct usb_ether *ue = task->ue;
	struct ifnet *ifp;
	int error;
	char num[14];			/* sufficient for 32 bits */

	/* first call driver's post attach routine */
	ue->ue_methods->ue_attach_post(ue);

	UE_UNLOCK(ue);

	ue->ue_unit = alloc_unr(ueunit);
	usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0);
	sysctl_ctx_init(&ue->ue_sysctl_ctx);

	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(ue->ue_dev, "could not allocate ifnet\n");
		goto error;
	}

	ifp->if_softc = ue;
	if_initname(ifp, "ue", ue->ue_unit);
	ifp->if_mtu = ETHERMTU;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	if (ue->ue_methods->ue_ioctl != NULL)
		ifp->if_ioctl = ue->ue_methods->ue_ioctl;
	else
		ifp->if_ioctl = uether_ioctl;
	ifp->if_start = ue_start;
	ifp->if_init = ue_init;
	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
	IFQ_SET_READY(&ifp->if_snd);
	ue->ue_ifp = ifp;

	if (ue->ue_methods->ue_mii_upd != NULL && 
	    ue->ue_methods->ue_mii_sts != NULL) {
		mtx_lock(&Giant);	/* device_xxx() depends on this */
		error = mii_phy_probe(ue->ue_dev, &ue->ue_miibus,
		    ue_ifmedia_upd, ue->ue_methods->ue_mii_sts);
		mtx_unlock(&Giant);
		if (error) {
			device_printf(ue->ue_dev, "MII without any PHY\n");
			goto error;
		}
	}

	if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev));
	ether_ifattach(ifp, ue->ue_eaddr);

	snprintf(num, sizeof(num), "%u", ue->ue_unit);
	ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx,
	    &SYSCTL_NODE_CHILDREN(_net, ue),
	    OID_AUTO, num, CTLFLAG_RD, NULL, "");
	SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx,
	    SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO,
	    "%parent", CTLFLAG_RD, ue, 0,
	    ue_sysctl_parent, "A", "parent device");

	UE_LOCK(ue);
	return;

error:
	free_unr(ueunit, ue->ue_unit);
	if (ue->ue_ifp != NULL) {
		if_free(ue->ue_ifp);
		ue->ue_ifp = NULL;
	}
	UE_LOCK(ue);
	return;
}
Пример #14
0
static void
ue_attach_post_task(struct usb_proc_msg *_task)
{
	struct usb_ether_cfg_task *task =
	    (struct usb_ether_cfg_task *)_task;
	struct usb_ether *ue = task->ue;
	struct ifnet *ifp;
	int error;
	char num[14];			/* sufficient for 32 bits */

	/* first call driver's post attach routine */
	ue->ue_methods->ue_attach_post(ue);

	UE_UNLOCK(ue);

	ue->ue_unit = alloc_unr(ueunit);
	usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0);
	sysctl_ctx_init(&ue->ue_sysctl_ctx);

	error = 0;
	CURVNET_SET_QUIET(vnet0);
	ifp = if_alloc(IFT_ETHER);
	if (ifp == NULL) {
		device_printf(ue->ue_dev, "could not allocate ifnet\n");
		goto fail;
	}

	ifp->if_softc = ue;
	if_initname(ifp, "ue", ue->ue_unit);
	if (ue->ue_methods->ue_attach_post_sub != NULL) {
		ue->ue_ifp = ifp;
		error = ue->ue_methods->ue_attach_post_sub(ue);
	} else {
		ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
		if (ue->ue_methods->ue_ioctl != NULL)
			ifp->if_ioctl = ue->ue_methods->ue_ioctl;
		else
			ifp->if_ioctl = uether_ioctl;
		ifp->if_start = ue_start;
		ifp->if_init = ue_init;
		IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
		ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
		IFQ_SET_READY(&ifp->if_snd);
		ue->ue_ifp = ifp;

		if (ue->ue_methods->ue_mii_upd != NULL &&
		    ue->ue_methods->ue_mii_sts != NULL) {
			/* device_xxx() depends on this */
			mtx_lock(&Giant);
			error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp,
			    ue_ifmedia_upd, ue->ue_methods->ue_mii_sts,
			    BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
			mtx_unlock(&Giant);
		}
	}

	if (error) {
		device_printf(ue->ue_dev, "attaching PHYs failed\n");
		goto fail;
	}

	if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev));
	ether_ifattach(ifp, ue->ue_eaddr);
	/* Tell upper layer we support VLAN oversized frames. */
	if (ifp->if_capabilities & IFCAP_VLAN_MTU)
		ifp->if_hdrlen = sizeof(struct ether_vlan_header);

	CURVNET_RESTORE();

	snprintf(num, sizeof(num), "%u", ue->ue_unit);
	ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx,
	    &SYSCTL_NODE_CHILDREN(_net, ue),
	    OID_AUTO, num, CTLFLAG_RD, NULL, "");
	SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx,
	    SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO,
	    "%parent", CTLTYPE_STRING | CTLFLAG_RD, ue, 0,
	    ue_sysctl_parent, "A", "parent device");

	UE_LOCK(ue);
	return;

fail:
	CURVNET_RESTORE();
	free_unr(ueunit, ue->ue_unit);
	if (ue->ue_ifp != NULL) {
		if_free(ue->ue_ifp);
		ue->ue_ifp = NULL;
	}
	UE_LOCK(ue);
	return;
}