Beispiel #1
0
int
fdesc_allocvp(fdntype ftype, int ix, struct mount *mp, struct vnode **vpp)
{
	struct fdhashhead *fc;
	struct fdescnode *fd;
	int error = 0;

	fc = FD_NHASH(ix);
loop:
	LIST_FOREACH(fd, fc, fd_hash) {
		if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) {
			if (vget(fd->fd_vnode, LK_EXCLUSIVE|LK_SLEEPFAIL))
				goto loop;
			*vpp = fd->fd_vnode;
			return (error);
		}
	}

	/*
	 * otherwise lock the array while we call getnewvnode
	 * since that can block.
	 */
	if (fdcache_lock & FDL_LOCKED) {
		fdcache_lock |= FDL_WANT;
		tsleep((caddr_t) &fdcache_lock, 0, "fdalvp", 0);
		goto loop;
	}
	fdcache_lock |= FDL_LOCKED;

	/*
	 * Do the MALLOC before the getnewvnode since doing so afterward
	 * might cause a bogus v_data pointer to get dereferenced
	 * elsewhere if MALLOC should block.
	 */
	fd = kmalloc(sizeof(struct fdescnode), M_TEMP, M_WAITOK);

	error = getnewvnode(VT_FDESC, mp, vpp, 0, 0);
	if (error) {
		kfree(fd, M_TEMP);
		goto out;
	}
	(*vpp)->v_data = fd;
	fd->fd_vnode = *vpp;
	fd->fd_type = ftype;
	fd->fd_fd = -1;
	fd->fd_ix = ix;
	LIST_INSERT_HEAD(fc, fd, fd_hash);
	vx_unlock(*vpp);

out:
	fdcache_lock &= ~FDL_LOCKED;

	if (fdcache_lock & FDL_WANT) {
		fdcache_lock &= ~FDL_WANT;
		wakeup((caddr_t) &fdcache_lock);
	}

	return (error);
}
Beispiel #2
0
/*
 * Remove an entry from the hash if it exists.
 */
static void
fdesc_remove_entry(struct fdescnode *fd)
{
	struct fdhashhead *fc;
	struct fdescnode *fd2;

	fc = FD_NHASH(fd->fd_ix);
	mtx_lock(&fdesc_hashmtx);
	LIST_FOREACH(fd2, fc, fd_hash) {
		if (fd == fd2) {
			LIST_REMOVE(fd, fd_hash);
			break;
		}
	}
	mtx_unlock(&fdesc_hashmtx);
}
Beispiel #3
0
int
fdesc_allocvp(fdntype ftype, unsigned fd_fd, int ix, struct mount *mp,
    struct vnode **vpp)
{
	struct fdescmount *fmp;
	struct fdhashhead *fc;
	struct fdescnode *fd, *fd2;
	struct vnode *vp, *vp2;
	struct thread *td;
	int error = 0;

	td = curthread;
	fc = FD_NHASH(ix);
loop:
	mtx_lock(&fdesc_hashmtx);
	/*
	 * If a forced unmount is progressing, we need to drop it. The flags are
	 * protected by the hashmtx.
	 */
	fmp = (struct fdescmount *)mp->mnt_data;
	if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) {
		mtx_unlock(&fdesc_hashmtx);
		return (-1);
	}

	LIST_FOREACH(fd, fc, fd_hash) {
		if (fd->fd_ix == ix && fd->fd_vnode->v_mount == mp) {
			/* Get reference to vnode in case it's being free'd */
			vp = fd->fd_vnode;
			VI_LOCK(vp);
			mtx_unlock(&fdesc_hashmtx);
			if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td))
				goto loop;
			*vpp = vp;
			return (0);
		}
	}
	mtx_unlock(&fdesc_hashmtx);

	fd = malloc(sizeof(struct fdescnode), M_TEMP, M_WAITOK);

	error = getnewvnode("fdescfs", mp, &fdesc_vnodeops, &vp);
	if (error) {
		free(fd, M_TEMP);
		return (error);
	}
	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
	vp->v_data = fd;
	fd->fd_vnode = vp;
	fd->fd_type = ftype;
	fd->fd_fd = fd_fd;
	fd->fd_ix = ix;
	error = insmntque1(vp, mp, fdesc_insmntque_dtr, NULL);
	if (error != 0) {
		*vpp = NULLVP;
		return (error);
	}

	/* Make sure that someone didn't beat us when inserting the vnode. */
	mtx_lock(&fdesc_hashmtx);
	/*
	 * If a forced unmount is progressing, we need to drop it. The flags are
	 * protected by the hashmtx.
	 */
	fmp = (struct fdescmount *)mp->mnt_data;
	if (fmp == NULL || fmp->flags & FMNT_UNMOUNTF) {
		mtx_unlock(&fdesc_hashmtx);
		vgone(vp);
		vput(vp);
		*vpp = NULLVP;
		return (-1);
	}

	LIST_FOREACH(fd2, fc, fd_hash) {
		if (fd2->fd_ix == ix && fd2->fd_vnode->v_mount == mp) {
			/* Get reference to vnode in case it's being free'd */
			vp2 = fd2->fd_vnode;
			VI_LOCK(vp2);
			mtx_unlock(&fdesc_hashmtx);
			error = vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, td);
			/* Someone beat us, dec use count and wait for reclaim */
			vgone(vp);
			vput(vp);
			/* If we didn't get it, return no vnode. */
			if (error)
				vp2 = NULLVP;
			*vpp = vp2;
			return (error);
		}
	}

	/* If we came here, we can insert it safely. */
	LIST_INSERT_HEAD(fc, fd, fd_hash);
	mtx_unlock(&fdesc_hashmtx);
	*vpp = vp;
	return (0);
}