Exemplo n.º 1
0
static int
devfs_fo_kqfilter(struct file *fp, struct knote *kn)
{
	struct vnode *vp;
	int error;
	cdev_t dev;

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD) {
		error = EBADF;
		goto done;
	}
	if ((dev = vp->v_rdev) == NULL) {
		error = EBADF;
		goto done;
	}
	reference_dev(dev);

	error = dev_dkqfilter(dev, kn, fp);

	release_dev(dev);

done:
	return (error);
}
Exemplo n.º 2
0
/*
 * Device-optimized file table vnode read routine.
 *
 * This bypasses the VOP table and talks directly to the device.  Most
 * filesystems just route to specfs and can make this optimization.
 *
 * MPALMOSTSAFE - acquires mplock
 */
static int
devfs_fo_read(struct file *fp, struct uio *uio,
		 struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not td %p", uio->uio_td, curthread));

	if (uio->uio_resid == 0)
		return 0;

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = 0;
	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dread(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node)
		nanotime(&node->atime);
	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}
Exemplo n.º 3
0
/*
 * Note that releasing a pty master also releases the child, so
 * we have to make the redirection checks after that and on both
 * sides of a pty.
 */
static void tty_release(struct inode * inode, struct file * filp)
{
	int dev;

	dev = filp->f_rdev;
	if (MAJOR(dev) != TTY_MAJOR) {
		printk("tty_release: tty pseudo-major != TTY_MAJOR\n");
		return;
	}
	dev = MINOR(filp->f_rdev);
	if (!dev) {
		printk("tty_release: bad f_rdev\n");
		return;
	}
	release_dev(dev, filp);
}
Exemplo n.º 4
0
/*
 * tty_open and tty_release keep up the tty count that contains the
 * number of opens done on a tty. We cannot use the inode-count, as
 * different inodes might point to the same tty.
 *
 * Open-counting is needed for pty masters, as well as for keeping
 * track of serial lines: DTR is dropped when the last close happens.
 * (This is not done solely through tty->count, now.  - Ted 1/27/92)
 *
 * The termios state of a pty is reset on first open so that
 * settings don't persist across reuse.
 */
static int tty_open(struct inode * inode, struct file * filp)
{
	struct tty_struct *tty;
	int major, minor;
	int noctty, retval;

retry_open:
	minor = MINOR(inode->i_rdev);
	major = MAJOR(inode->i_rdev);
	noctty = filp->f_flags & O_NOCTTY;
	if (major == TTYAUX_MAJOR) {
		if (!minor) {
			major = TTY_MAJOR;
			minor = current->tty;
		}
		/* noctty = 1; */
	} else if (major == TTY_MAJOR) {
		if (!minor) {
			minor = fg_console + 1;
			noctty = 1;
		}
	} else {
		printk("Bad major #%d in tty_open\n", MAJOR(inode->i_rdev));
		return -ENODEV;
	}
	if (minor <= 0)
		return -ENXIO;
	if (IS_A_PTY_MASTER(minor))
		noctty = 1;
	filp->f_rdev = (major << 8) | minor;
	retval = init_dev(minor);
	if (retval)
		return retval;
	tty = tty_table[minor];
#ifdef TTY_DEBUG_HANGUP
	printk("opening tty%d...", tty->line);
#endif
	if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !suser())
		return -EBUSY;

#if 0
	/* clean up the packet stuff. */
	/*
	 *  Why is this not done in init_dev?  Right here, if another 
	 * process opens up a tty in packet mode, all the packet 
	 * variables get cleared.  Come to think of it, is anything 
	 * using the packet mode at all???  - Ted, 1/27/93
	 *
	 * Not to worry, a pty master can only be opened once.
	 * And rlogind and telnetd both use packet mode.  -- jrs
	 *
	 * Not needed.  These are cleared in initialize_tty_struct. -- jlc
	 */
	tty->ctrl_status = 0;
	tty->packet = 0;
#endif

	if (tty->open) {
		retval = tty->open(tty, filp);
	} else {
		retval = -ENODEV;
	}
	if (retval) {
#ifdef TTY_DEBUG_HANGUP
		printk("error %d in opening tty%d...", retval, tty->line);
#endif

		release_dev(minor, filp);
		if (retval != -ERESTARTSYS)
			return retval;
		if (current->signal & ~current->blocked)
			return retval;
		schedule();
		goto retry_open;
	}
	if (!noctty &&
	    current->leader &&
	    current->tty<0 &&
	    tty->session==0) {
		current->tty = minor;
		tty->session = current->session;
		tty->pgrp = current->pgrp;
	}
	filp->f_rdev = MKDEV(TTY_MAJOR,minor); /* Set it to something normal */
	return 0;
}
Exemplo n.º 5
0
static int
devfs_vop_getattr(struct vop_getattr_args *ap)
{
	struct devfs_node *node = DEVFS_NODE(ap->a_vp);
	struct vattr *vap = ap->a_vap;
	struct partinfo pinfo;
	int error = 0;

#if 0
	if (!devfs_node_is_accessible(node))
		return ENOENT;
#endif
	node_sync_dev_get(node);

	lockmgr(&devfs_lock, LK_EXCLUSIVE);

	/* start by zeroing out the attributes */
	VATTR_NULL(vap);

	/* next do all the common fields */
	vap->va_type = ap->a_vp->v_type;
	vap->va_mode = node->mode;
	vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ;
	vap->va_flags = 0;
	vap->va_blocksize = DEV_BSIZE;
	vap->va_bytes = vap->va_size = 0;

	vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];

	vap->va_atime = node->atime;
	vap->va_mtime = node->mtime;
	vap->va_ctime = node->ctime;

	vap->va_nlink = 1; /* number of references to file */

	vap->va_uid = node->uid;
	vap->va_gid = node->gid;

	vap->va_rmajor = 0;
	vap->va_rminor = 0;

	if ((node->node_type == Ndev) && node->d_dev)  {
		reference_dev(node->d_dev);
		vap->va_rminor = node->d_dev->si_uminor;
		release_dev(node->d_dev);
	}

	/* For a softlink the va_size is the length of the softlink */
	if (node->symlink_name != 0) {
		vap->va_bytes = vap->va_size = node->symlink_namelen;
	}

	/*
	 * For a disk-type device, va_size is the size of the underlying
	 * device, so that lseek() works properly.
	 */
	if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) {
		bzero(&pinfo, sizeof(pinfo));
		error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo,
				   0, proc0.p_ucred, NULL, NULL);
		if ((error == 0) && (pinfo.media_blksize != 0)) {
			vap->va_size = pinfo.media_size;
		} else {
			vap->va_size = 0;
			error = 0;
		}
	}

	lockmgr(&devfs_lock, LK_RELEASE);

	return (error);
}
Exemplo n.º 6
0
/*
 * MPALMOSTSAFE - acquires mplock
 */
static int
devfs_fo_ioctl(struct file *fp, u_long com, caddr_t data,
		  struct ucred *ucred, struct sysmsg *msg)
{
#if 0
	struct devfs_node *node;
#endif
	struct vnode *vp;
	struct vnode *ovp;
	cdev_t	dev;
	int error;
	struct fiodname_args *name_args;
	size_t namlen;
	const char *name;

	vp = ((struct vnode *)fp->f_data);

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;		/* device was revoked */

	reference_dev(dev);

#if 0
	node = DEVFS_NODE(vp);
#endif

	devfs_debug(DEVFS_DEBUG_DEBUG,
		    "devfs_fo_ioctl() called! for dev %s\n",
		    dev->si_name);

	if (com == FIODTYPE) {
		*(int *)data = dev_dflags(dev) & D_TYPEMASK;
		error = 0;
		goto out;
	} else if (com == FIODNAME) {
		name_args = (struct fiodname_args *)data;
		name = dev->si_name;
		namlen = strlen(name) + 1;

		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "ioctl, got: FIODNAME for %s\n", name);

		if (namlen <= name_args->len)
			error = copyout(dev->si_name, name_args->name, namlen);
		else
			error = EINVAL;

		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "ioctl stuff: error: %d\n", error);
		goto out;
	}

	error = dev_dioctl(dev, com, data, fp->f_flag, ucred, msg, fp);

#if 0
	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}
#endif
	if (com == TIOCSCTTY) {
		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_fo_ioctl: got TIOCSCTTY on %s\n",
			    dev->si_name);
	}
	if (error == 0 && com == TIOCSCTTY) {
		struct proc *p = curthread->td_proc;
		struct session *sess;

		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_fo_ioctl: dealing with TIOCSCTTY on %s\n",
			    dev->si_name);
		if (p == NULL) {
			error = ENOTTY;
			goto out;
		}
		sess = p->p_session;

		/*
		 * Do nothing if reassigning same control tty
		 */
		if (sess->s_ttyvp == vp) {
			error = 0;
			goto out;
		}

		/*
		 * Get rid of reference to old control tty
		 */
		ovp = sess->s_ttyvp;
		vref(vp);
		sess->s_ttyvp = vp;
		if (ovp)
			vrele(ovp);
	}

out:
	release_dev(dev);
	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_fo_ioctl() finished! \n");
	return (error);
}
Exemplo n.º 7
0
static int
devfs_fo_write(struct file *fp, struct uio *uio,
		  struct ucred *cred, int flags)
{
	struct devfs_node *node;
	struct vnode *vp;
	int ioflag;
	int error;
	cdev_t dev;

	KASSERT(uio->uio_td == curthread,
		("uio_td %p is not p %p", uio->uio_td, curthread));

	vp = (struct vnode *)fp->f_data;
	if (vp == NULL || vp->v_type == VBAD)
		return EBADF;

	node = DEVFS_NODE(vp);

	if (vp->v_type == VREG)
		bwillwrite(uio->uio_resid);

	vp = (struct vnode *)fp->f_data;

	if ((dev = vp->v_rdev) == NULL)
		return EBADF;

	reference_dev(dev);

	if ((flags & O_FOFFSET) == 0)
		uio->uio_offset = fp->f_offset;

	ioflag = IO_UNIT;
	if (vp->v_type == VREG &&
	   ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
		ioflag |= IO_APPEND;
	}

	if (flags & O_FBLOCKING) {
		/* ioflag &= ~IO_NDELAY; */
	} else if (flags & O_FNONBLOCKING) {
		ioflag |= IO_NDELAY;
	} else if (fp->f_flag & FNONBLOCK) {
		ioflag |= IO_NDELAY;
	}
	if (flags & O_FBUFFERED) {
		/* ioflag &= ~IO_DIRECT; */
	} else if (flags & O_FUNBUFFERED) {
		ioflag |= IO_DIRECT;
	} else if (fp->f_flag & O_DIRECT) {
		ioflag |= IO_DIRECT;
	}
	if (flags & O_FASYNCWRITE) {
		/* ioflag &= ~IO_SYNC; */
	} else if (flags & O_FSYNCWRITE) {
		ioflag |= IO_SYNC;
	} else if (fp->f_flag & O_FSYNC) {
		ioflag |= IO_SYNC;
	}

	if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
		ioflag |= IO_SYNC;
	ioflag |= sequential_heuristic(uio, fp);

	error = dev_dwrite(dev, uio, ioflag, fp);

	release_dev(dev);
	if (node) {
		nanotime(&node->atime);
		nanotime(&node->mtime);
	}

	if ((flags & O_FOFFSET) == 0)
		fp->f_offset = uio->uio_offset;
	fp->f_nextoff = uio->uio_offset;

	return (error);
}
Exemplo n.º 8
0
static int
devfs_spec_close(struct vop_close_args *ap)
{
	struct devfs_node *node;
	struct proc *p = curproc;
	struct vnode *vp = ap->a_vp;
	cdev_t dev = vp->v_rdev;
	int error = 0;
	int needrelock;

	/*
	 * We do special tests on the opencount so unfortunately we need
	 * an exclusive lock.
	 */
	vn_lock(vp, LK_UPGRADE | LK_RETRY);

	if (dev)
		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_spec_close() called on %s! \n",
			    dev->si_name);
	else
		devfs_debug(DEVFS_DEBUG_DEBUG,
			    "devfs_spec_close() called, null vode!\n");

	/*
	 * A couple of hacks for devices and tty devices.  The
	 * vnode ref count cannot be used to figure out the
	 * last close, but we can use v_opencount now that
	 * revoke works properly.
	 *
	 * Detect the last close on a controlling terminal and clear
	 * the session (half-close).
	 */
	if (dev)
		reference_dev(dev);

	if (p && vp->v_opencount <= 1 && vp == p->p_session->s_ttyvp) {
		p->p_session->s_ttyvp = NULL;
		vrele(vp);
	}

	/*
	 * Vnodes can be opened and closed multiple times.  Do not really
	 * close the device unless (1) it is being closed forcibly,
	 * (2) the device wants to track closes, or (3) this is the last
	 * vnode doing its last close on the device.
	 *
	 * XXX the VXLOCK (force close) case can leave vnodes referencing
	 * a closed device.  This might not occur now that our revoke is
	 * fixed.
	 */
	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -1- \n");
	if (dev && ((vp->v_flag & VRECLAIMED) ||
	    (dev_dflags(dev) & D_TRACKCLOSE) ||
	    (vp->v_opencount == 1))) {
		/*
		 * Ugly pty magic, to make pty devices disappear again once
		 * they are closed.
		 */
		node = DEVFS_NODE(ap->a_vp);
		if (node && (node->flags & DEVFS_PTY))
			node->flags |= DEVFS_INVISIBLE;

		/*
		 * Unlock around dev_dclose(), unless the vnode is
		 * undergoing a vgone/reclaim (during umount).
		 */
		needrelock = 0;
		if ((vp->v_flag & VRECLAIMED) == 0 && vn_islocked(vp)) {
			needrelock = 1;
			vn_unlock(vp);
		}

		/*
		 * WARNING!  If the device destroys itself the devfs node
		 *	     can disappear here.
		 *
		 * WARNING!  vn_lock() will fail if the vp is in a VRECLAIM,
		 *	     which can occur during umount.
		 */
		error = dev_dclose(dev, ap->a_fflag, S_IFCHR, ap->a_fp);
		/* node is now stale */

		if (needrelock) {
			if (vn_lock(vp, LK_EXCLUSIVE |
					LK_RETRY |
					LK_FAILRECLAIM) != 0) {
				panic("devfs_spec_close: vnode %p "
				      "unexpectedly could not be relocked",
				      vp);
			}
		}
	} else {
		error = 0;
	}
	devfs_debug(DEVFS_DEBUG_DEBUG, "devfs_spec_close() -2- \n");

	/*
	 * Track the actual opens and closes on the vnode.  The last close
	 * disassociates the rdev.  If the rdev is already disassociated or
	 * the opencount is already 0, the vnode might have been revoked
	 * and no further opencount tracking occurs.
	 */
	if (dev)
		release_dev(dev);
	if (vp->v_opencount > 0)
		vop_stdclose(ap);
	return(error);

}