static void dev_free_devlocked(struct cdev *cdev) { struct cdev_priv *cdp; mtx_assert(&devmtx, MA_OWNED); cdp = cdev2priv(cdev); TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); }
static void dev_free_devlocked(struct cdev *cdev) { struct cdev_priv *cdp; mtx_assert(&devmtx, MA_OWNED); cdp = cdev2priv(cdev); KASSERT((cdp->cdp_flags & CDP_UNREF_DTR) == 0, ("destroy_dev() was not called after delist_dev(%p)", cdev)); TAILQ_INSERT_HEAD(&cdevp_free_list, cdp, cdp_list); }
/* * Read the cdev structure in the kernel in order to work out the * associated dev_t */ dev_t dev2udev(struct cdev *dev) { struct cdev_priv priv; if (KVM_READ(cdev2priv(dev), &priv, sizeof priv)) { return ((dev_t)priv.cdp_inode); } else { dprintf(stderr, "can't convert cdev *%p to a dev_t\n", dev); return -1; } }
struct cdevsw * dev_refthread(struct cdev *dev) { struct cdevsw *csw; struct cdev_priv *cdp; mtx_assert(&devmtx, MA_NOTOWNED); dev_lock(); csw = dev->si_devsw; if (csw != NULL) { cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) dev->si_threadcount++; else csw = NULL; } dev_unlock(); return (csw); }
struct cdevsw * devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; struct cdev *dev; mtx_assert(&devmtx, MA_NOTOWNED); if ((vp->v_vflag & VV_ETERNALDEV) != 0) { dev = vp->v_rdev; if (dev == NULL) return (NULL); KASSERT((dev->si_flags & SI_ETERNAL) != 0, ("Not eternal cdev")); *ref = 0; csw = dev->si_devsw; KASSERT(csw != NULL, ("Eternal cdev is destroyed")); *devp = dev; return (csw); } csw = NULL; dev_lock(); dev = vp->v_rdev; if (dev == NULL) { dev_unlock(); return (NULL); } cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { csw = dev->si_devsw; if (csw != NULL) dev->si_threadcount++; } dev_unlock(); if (csw != NULL) { *devp = dev; *ref = 1; } return (csw); }
struct cdevsw * devvn_refthread(struct vnode *vp, struct cdev **devp) { struct cdevsw *csw; struct cdev_priv *cdp; mtx_assert(&devmtx, MA_NOTOWNED); csw = NULL; dev_lock(); *devp = vp->v_rdev; if (*devp != NULL) { cdp = cdev2priv(*devp); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { csw = (*devp)->si_devsw; if (csw != NULL) (*devp)->si_threadcount++; } } dev_unlock(); return (csw); }
struct cdevsw * dev_refthread(struct cdev *dev, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; mtx_assert(&devmtx, MA_NOTOWNED); if ((dev->si_flags & SI_ETERNAL) != 0) { *ref = 0; return (dev->si_devsw); } dev_lock(); csw = dev->si_devsw; if (csw != NULL) { cdp = cdev2priv(dev); if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) atomic_add_long(&dev->si_threadcount, 1); else csw = NULL; } dev_unlock(); *ref = 1; return (csw); }