示例#1
0
static int
devfs_fo_kqfilter(struct file *fp, struct knote *kn)
{
	struct vnode *vp;
	int error;
	cdev_t dev;

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD) {
		error = EBADF;
		goto done;
	}
	if ((dev = vp->v_rdev) == NULL) {
		error = EBADF;
		goto done;
	}
	reference_dev(dev);

	error = dev_dkqfilter(dev, kn, fp);

	release_dev(dev);

done:
	return (error);
}
示例#2
0
/*
 * Device-optimized file table vnode read routine.
 *
 * This bypasses the VOP table and talks directly to the device.  Most
 * filesystems just route to specfs and can make this optimization.
 *
 * MPALMOSTSAFE - acquires mplock
 */
static int
devfs_fo_read(struct file *fp, struct uio *uio,
		 struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));

	if (uio->uio_resid == 0)
		return 0;

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dread(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node)
		nanotime(&node->atime);
	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}
示例#3
0
static int
isavga_attach(device_t dev)
{
	vga_softc_t *sc;
	int unit;
	int rid;
	int error;

	unit = device_get_unit(dev);
	sc = device_get_softc(dev);

	rid = 0;
	bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
			   0, ~0, 0, RF_ACTIVE | RF_SHAREABLE);
	rid = 0;
	bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
			   0, ~0, 0, RF_ACTIVE | RF_SHAREABLE);

	error = vga_attach_unit(unit, sc, device_get_flags(dev));
	if (error)
		return error;

#ifdef FB_INSTALL_CDEV
	/* attach a virtual frame buffer device */
	sc->devt = make_dev(&isavga_ops, VGA_MKMINOR(unit), 0, 0, 02660, "vga%x", VGA_MKMINOR(unit));
	reference_dev(sc->devt);
	error = fb_attach(sc->devt, sc->adp);
	if (error)
		return error;
#endif /* FB_INSTALL_CDEV */

	if (bootverbose)
		(*vidsw[sc->adp->va_index]->diag)(sc->adp, bootverbose);

#if 0 /* experimental */
	device_add_child(dev, "fb", -1);
	bus_generic_attach(dev);
#endif

	return 0;
}
示例#4
0
/*
 * module handeling
 */
static int
ata_module_event_handler(module_t mod, int what, void *arg)
{
    /* static because we need the reference at destruction time */
    static cdev_t atacdev;

    switch (what) {
    case MOD_LOAD:
	/* register controlling device */
	atacdev = make_dev(&ata_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "ata");
	reference_dev(atacdev);
	return 0;

    case MOD_UNLOAD:
	/* deregister controlling device */
	destroy_dev(atacdev);
	dev_ops_remove_all(&ata_ops);
	return 0;

    default:
	return EOPNOTSUPP;
    }
}
示例#5
0
static int
devfs_vop_getattr(struct vop_getattr_args *ap)
{
	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
	struct vattr *vap = ap->a_vap;
	struct partinfo pinfo;
	int error = 0;

#if 0
	if (!devfs_node_is_accessible(node))
		return ENOENT;
#endif
	node_sync_dev_get(node);

	lockmgr(&devfs_lock, LK_EXCLUSIVE);

	/* start by zeroing out the attributes */
	VATTR_NULL(vap);

	/* next do all the common fields */
	vap->va_type = ap->a_vp->v_type;
	vap->va_mode = node->mode;
	vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
	vap->va_flags = 0;
	vap->va_blocksize = DEV_BSIZE;
	vap->va_bytes = vap->va_size = 0;

	vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];

	vap->va_atime = node->atime;
	vap->va_mtime = node->mtime;
	vap->va_ctime = node->ctime;

	vap->va_nlink = 1; /* number of references to file */

	vap->va_uid = node->uid;
	vap->va_gid = node->gid;

	vap->va_rmajor = 0;
	vap->va_rminor = 0;

	if ((node->node_type == Ndev) && node->d_dev)  {
		reference_dev(node->d_dev);
		vap->va_rminor = node->d_dev->si_uminor;
		release_dev(node->d_dev);
	}

	/* For a softlink the va_size is the length of the softlink */
	if (node->symlink_name != 0) {
		vap->va_bytes = vap->va_size = node->symlink_namelen;
	}

	/*
	 * For a disk-type device, va_size is the size of the underlying
	 * device, so that lseek() works properly.
	 */
	if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) {
		bzero(&pinfo, sizeof(pinfo));
		error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo,
				   0, proc0.p_ucred, NULL, NULL);
		if ((error == 0) && (pinfo.media_blksize != 0)) {
			vap->va_size = pinfo.media_size;
		} else {
			vap->va_size = 0;
			error = 0;
		}
	}

	lockmgr(&devfs_lock, LK_RELEASE);

	return (error);
}
示例#6
0
/*
 * MPALMOSTSAFE - acquires mplock
 */
static int
devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data,
		  struct ucred *ucred, struct sysmsg *msg)
{
#if 0
	struct devfs_node *node;
#endif
	struct vnode *vp;
	struct vnode *ovp;
	cdev_t	dev;
	int error;
	struct fiodname_args *name_args;
	size_t namlen;
	const char *name;

	vp = ((struct vnode *)fp->f_data);

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;		/* device was revoked */

	reference_dev(dev);

#if 0
	node = DEVFS_NODE(vp);
#endif

	devfs_debug(DEVFS_DEBUG_DEBUG,
		    "devfs_fo_ioctl() called! for dev %s\n",
		    dev->si_name);

	if (com == FIODTYPE) {
		*(int *)data = dev_dflags(dev) & D_TYPEMASK;
		error = 0;
		goto out;
	} else if (com == FIODNAME) {
		name_args = (struct fiodname_args *)data;
		name = dev->si_name;
		namlen = strlen(name) + 1;

		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "ioctl, got: FIODNAME for %s\n", name);

		if (namlen <= name_args->len)
			error = copyout(dev->si_name, name_args->name, namlen);
		else
			error = EINVAL;

		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "ioctl stuff: error: %d\n", error);
		goto out;
	}

	error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg, fp);

#if 0
	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}
#endif
	if (com == TIOCSCTTY) {
		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_fo_ioctl: got TIOCSCTTY on %s\n",
			    dev->si_name);
	}
	if (error == 0 && com == TIOCSCTTY) {
		struct proc *p = curthread->td_proc;
		struct session *sess;

		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n",
			    dev->si_name);
		if (p == NULL) {
			error = ENOTTY;
			goto out;
		}
		sess = p->p_session;

		/*
		 * Do nothing if reassigning same control tty
		 */
		if (sess->s_ttyvp == vp) {
			error = 0;
			goto out;
		}

		/*
		 * Get rid of reference to old control tty
		 */
		ovp = sess->s_ttyvp;
		vref(vp);
		sess->s_ttyvp = vp;
		if (ovp)
			vrele(ovp);
	}

out:
	release_dev(dev);
	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n");
	return (error);
}
示例#7
0
static int
devfs_fo_write(struct file *fp, struct uio *uio,
		  struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not p %p", uio->uio_td, curthread));

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if (vp->v_type == VREG)
		bwillwrite(uio->uio_resid);

	vp = (struct vnode *)fp->f_data;

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = IO_UNIT;
	if (vp->v_type == VREG &&
	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
		ioflag |= IO_APPEND;
	}

	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if (flags & O_FASYNCWRITE) {
		/* ioflag &= ~IO_SYNC; */
	} else if (flags & O_FSYNCWRITE) {
		ioflag |= IO_SYNC;
	} else if (fp->f_flag & O_FSYNC) {
		ioflag |= IO_SYNC;
	}

	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
		ioflag |= IO_SYNC;
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dwrite(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}

	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}
示例#8
0
static int
devfs_spec_close(struct vop_close_args *ap)
{
	struct devfs_node *node;
	struct proc *p = curproc;
	struct vnode *vp = ap->a_vp;
	cdev_t dev = vp->v_rdev;
	int error = 0;
	int needrelock;

	/*
	 * We do special tests on the opencount so unfortunately we need
	 * an exclusive lock.
	 */
	vn_lock(vp, LK_UPGRADE | LK_RETRY);

	if (dev)
		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_spec_close() called on %s! \n",
			    dev->si_name);
	else
		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_spec_close() called, null vode!\n");

	/*
	 * A couple of hacks for devices and tty devices.  The
	 * vnode ref count cannot be used to figure out the
	 * last close, but we can use v_opencount now that
	 * revoke works properly.
	 *
	 * Detect the last close on a controlling terminal and clear
	 * the session (half-close).
	 */
	if (dev)
		reference_dev(dev);

	if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
		p->p_session->s_ttyvp = NULL;
		vrele(vp);
	}

	/*
	 * Vnodes can be opened and closed multiple times.  Do not really
	 * close the device unless (1) it is being closed forcibly,
	 * (2) the device wants to track closes, or (3) this is the last
	 * vnode doing its last close on the device.
	 *
	 * XXX the VXLOCK (force close) case can leave vnodes referencing
	 * a closed device.  This might not occur now that our revoke is
	 * fixed.
	 */
	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
	if (dev && ((vp->v_flag & VRECLAIMED) ||
	    (dev_dflags(dev) & D_TRACKCLOSE) ||
	    (vp->v_opencount == 1))) {
		/*
		 * Ugly pty magic, to make pty devices disappear again once
		 * they are closed.
		 */
		node = DEVFS_NODE(ap->a_vp);
		if (node && (node->flags & DEVFS_PTY))
			node->flags |= DEVFS_INVISIBLE;

		/*
		 * Unlock around dev_dclose(), unless the vnode is
		 * undergoing a vgone/reclaim (during umount).
		 */
		needrelock = 0;
		if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) {
			needrelock = 1;
			vn_unlock(vp);
		}

		/*
		 * WARNING!  If the device destroys itself the devfs node
		 *	     can disappear here.
		 *
		 * WARNING!  vn_lock() will fail if the vp is in a VRECLAIM,
		 *	     which can occur during umount.
		 */
		error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp);
		/* node is now stale */

		if (needrelock) {
			if (vn_lock(vp, LK_EXCLUSIVE |
					LK_RETRY |
					LK_FAILRECLAIM) != 0) {
				panic("devfs_spec_close: vnode %p "
				      "unexpectedly could not be relocked",
				      vp);
			}
		}
	} else {
		error = 0;
	}
	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");

	/*
	 * Track the actual opens and closes on the vnode.  The last close
	 * disassociates the rdev.  If the rdev is already disassociated or
	 * the opencount is already 0, the vnode might have been revoked
	 * and no further opencount tracking occurs.
	 */
	if (dev)
		release_dev(dev);
	if (vp->v_opencount > 0)
		vop_stdclose(ap);
	return(error);

}