Exemplo n.º 1
0
/*
 * Reclaim an inode so that it can be used for other purposes.
 */
int
ffs_reclaim(void *v)
{
	struct vop_reclaim_args /* {
		struct vnode *a_vp;
		struct lwp *a_l;
	} */ *ap = v;
	struct vnode *vp = ap->a_vp;
	struct inode *ip = VTOI(vp);
	struct mount *mp = vp->v_mount;
	struct ufsmount *ump = ip->i_ump;
	void *data;
	int error;

	fstrans_start(mp, FSTRANS_LAZY);
	/*
	 * The inode must be freed and updated before being removed
	 * from its hash chain.  Other threads trying to gain a hold
	 * on the inode will be stalled because it is locked (VI_XLOCK).
	 */
	error = UFS_WAPBL_BEGIN(mp);
	if (error) {
		fstrans_done(mp);
		return error;
	}
	if (ip->i_nlink <= 0 && ip->i_omode != 0 &&
	    (vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
		ffs_vfree(vp, ip->i_number, ip->i_omode);
	UFS_WAPBL_END(mp);
	if ((error = ufs_reclaim(vp)) != 0) {
		fstrans_done(mp);
		return (error);
	}
	if (ip->i_din.ffs1_din != NULL) {
		if (ump->um_fstype == UFS1)
			pool_cache_put(ffs_dinode1_cache, ip->i_din.ffs1_din);
		else
			pool_cache_put(ffs_dinode2_cache, ip->i_din.ffs2_din);
	}
	/*
	 * To interlock with ffs_sync().
	 */
	genfs_node_destroy(vp);
	mutex_enter(vp->v_interlock);
	data = vp->v_data;
	vp->v_data = NULL;
	mutex_exit(vp->v_interlock);

	/*
	 * XXX MFS ends up here, too, to free an inode.  Should we create
	 * XXX a separate pool for MFS inodes?
	 */
	pool_cache_put(ffs_inode_cache, data);
	fstrans_done(mp);
	return (0);
}
Exemplo n.º 2
0
/*
 * Release a reference to a dquot.
 */
static void
dqrele(struct vnode *vp, struct dquot *dq)
{

	if (dq == NODQUOT)
		return;
	mutex_enter(&dq->dq_interlock);
	for (;;) {
		mutex_enter(&dqlock);
		if (dq->dq_cnt > 1) {
			dq->dq_cnt--;
			mutex_exit(&dqlock);
			mutex_exit(&dq->dq_interlock);
			return;
		}
		if ((dq->dq_flags & DQ_MOD) == 0)
			break;
		mutex_exit(&dqlock);
		(void) dqsync(vp, dq);
	}
	KASSERT(dq->dq_cnt == 1 && (dq->dq_flags & DQ_MOD) == 0);
	LIST_REMOVE(dq, dq_hash);
	mutex_exit(&dqlock);
	mutex_exit(&dq->dq_interlock);
	mutex_destroy(&dq->dq_interlock);
	pool_cache_put(dquot_cache, dq);
}
Exemplo n.º 3
0
/*
 * Release a reference to a dquot.
 */
void
dqrele(struct vnode *vp, struct dquot *dq)
{

	if (dq == NODQUOT)
		return;
	mutex_enter(&dq->dq_interlock);
	for (;;) {
		mutex_enter(&dqlock);
		if (dq->dq_cnt > 1) {
			dq->dq_cnt--;
			mutex_exit(&dqlock);
			mutex_exit(&dq->dq_interlock);
			return;
		}
		if ((dq->dq_flags & DQ_MOD) == 0)
			break;
		mutex_exit(&dqlock);
#ifdef QUOTA
		if (dq->dq_ump->um_flags & UFS_QUOTA)
			(void) dq1sync(vp, dq);
#endif
#ifdef QUOTA2
		if (dq->dq_ump->um_flags & UFS_QUOTA2)
			(void) dq2sync(vp, dq);
#endif
	}
	KASSERT(dq->dq_cnt == 1 && (dq->dq_flags & DQ_MOD) == 0);
	LIST_REMOVE(dq, dq_hash);
	mutex_exit(&dqlock);
	mutex_exit(&dq->dq_interlock);
	mutex_destroy(&dq->dq_interlock);
	pool_cache_put(dquot_cache, dq);
}
Exemplo n.º 4
0
/*
 * Free any hash table associated with inode 'ip'.
 */
void
ufsdirhash_free(struct inode *ip)
{
	struct dirhash *dh;
	int i, mem;

	if ((dh = ip->i_dirhash) == NULL)
		return;

	if (dh->dh_onlist) {
		DIRHASHLIST_LOCK();
		if (dh->dh_onlist)
			TAILQ_REMOVE(&ufsdirhash_list, dh, dh_list);
		DIRHASHLIST_UNLOCK();
	}

	/* The dirhash pointed to by 'dh' is exclusively ours now. */
	mem = sizeof(*dh);
	if (dh->dh_hash != NULL) {
		for (i = 0; i < dh->dh_narrays; i++)
			DIRHASH_BLKFREE(dh->dh_hash[i]);
		kmem_free(dh->dh_hash, dh->dh_hashsz);
		kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
		mem += dh->dh_hashsz;
		mem += dh->dh_narrays * DH_NBLKOFF * sizeof(**dh->dh_hash);
		mem += dh->dh_nblk * sizeof(*dh->dh_blkfree);
	}
	mutex_destroy(&dh->dh_lock);
	pool_cache_put(ufsdirhash_cache, dh);
	ip->i_dirhash = NULL;

	atomic_add_int(&ufs_dirhashmem, -mem);
}
Exemplo n.º 5
0
/*
 * amap_alloc1: internal function that allocates an amap, but does not
 *	init the overlay.
 *
 * => lock on returned amap is init'd
 */
static inline struct vm_amap *
amap_alloc1(int slots, int padslots, int waitf)
{
	struct vm_amap *amap;
	int totalslots;
	km_flag_t kmflags;

	amap = pool_cache_get(&uvm_amap_cache,
	    ((waitf & UVM_FLAG_NOWAIT) != 0) ? PR_NOWAIT : PR_WAITOK);
	if (amap == NULL)
		return(NULL);

	kmflags = ((waitf & UVM_FLAG_NOWAIT) != 0) ? KM_NOSLEEP : KM_SLEEP;
	totalslots = amap_roundup_slots(slots + padslots);
	mutex_init(&amap->am_l, MUTEX_DEFAULT, IPL_NONE);
	amap->am_ref = 1;
	amap->am_flags = 0;
#ifdef UVM_AMAP_PPREF
	amap->am_ppref = NULL;
#endif
	amap->am_maxslot = totalslots;
	amap->am_nslot = slots;
	amap->am_nused = 0;

	amap->am_slots = kmem_alloc(totalslots * sizeof(int), kmflags);
	if (amap->am_slots == NULL)
		goto fail1;

	amap->am_bckptr = kmem_alloc(totalslots * sizeof(int), kmflags);
	if (amap->am_bckptr == NULL)
		goto fail2;

	amap->am_anon = kmem_alloc(totalslots * sizeof(struct vm_anon *),
	    kmflags);
	if (amap->am_anon == NULL)
		goto fail3;

	return(amap);

fail3:
	kmem_free(amap->am_bckptr, totalslots * sizeof(int));
fail2:
	kmem_free(amap->am_slots, totalslots * sizeof(int));
fail1:
	mutex_destroy(&amap->am_l);
	pool_cache_put(&uvm_amap_cache, amap);

	/*
	 * XXX hack to tell the pagedaemon how many pages we need,
	 * since we can need more than it would normally free.
	 */
	if ((waitf & UVM_FLAG_NOWAIT) != 0) {
		extern u_int uvm_extrapages;
		atomic_add_int(&uvm_extrapages,
		    ((sizeof(int) * 2 + sizeof(struct vm_anon *)) *
		    totalslots) >> PAGE_SHIFT);
	}
	return (NULL);
}
Exemplo n.º 6
0
void
cpu_lwp_free2(struct lwp *l)
{
    struct fpstate64 *fs;

    if ((fs = l->l_md.md_fpstate) != NULL)
        pool_cache_put(fpstate_cache, fs);
}
Exemplo n.º 7
0
/*
 * Free a marker vnode.
 */
void
vnfree_marker(vnode_t *vp)
{
	vnode_impl_t *node;

	node = VNODE_TO_VIMPL(vp);
	KASSERT(node->vi_state == VS_MARKER);
	uvm_obj_destroy(&vp->v_uobj, true);
	pool_cache_put(vcache.pool, node);
}
/* ARGSUSED */
void
sunos32_setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
	/* stack:  XXX */
{
	struct trapframe64 *tf = l->l_md.md_tf;
	struct fpstate64 *fs;
	int64_t tstate;
	struct proc *p = l->l_proc;

	/* Don't allow misaligned code by default */
	p->p_md.md_flags &= ~MDP_FIXALIGN;

	/* Mark this as a 32-bit emulation */
	mutex_enter(p->p_lock);
	p->p_flag |= PK_32;
	mutex_exit(p->p_lock);

	/* Setup the ev_out32 hook */
#if NFIRM_EVENTS > 0
	if (ev_out32_hook == NULL)
		ev_out32_hook = ev_out32;
#endif

	/*
	 * Set the registers to 0 except for:
	 *	%o6: stack pointer, built in exec())
	 *	%tstate: (retain icc and xcc and cwp bits)
	 *	%g1: p->p_psstrp (used by crt0)
	 *	%tpc,%tnpc: entry point of program
	 */
	tstate = ((PSTATE_USER32)<<TSTATE_PSTATE_SHIFT) 
		| (tf->tf_tstate & TSTATE_CWP);
	if ((fs = l->l_md.md_fpstate) != NULL) {
		/*
		 * We hold an FPU state.  If we own *the* FPU chip state
		 * we must get rid of it, and the only way to do that is
		 * to save it.  In any case, get rid of our FPU state.
		 */
		if (l == fplwp) {
			savefpstate(fs);
			fplwp = NULL;
		}
		pool_cache_put(fpstate_cache, fs);
		l->l_md.md_fpstate = NULL;
	}
	memset(tf, 0, sizeof *tf);
	tf->tf_tstate = tstate;
	tf->tf_global[1] = (u_int)p->p_psstrp;
	tf->tf_pc = pack->ep_entry & ~3;
	tf->tf_npc = tf->tf_pc + 4;

	stack -= sizeof(struct rwindow32);
	tf->tf_out[6] = stack;
	tf->tf_out[7] = 0;
}
Exemplo n.º 9
0
/*
 * Release the mbuf, we keep a reference count on the tx buffer so
 * that we dont release it before its free.
 */
static void
btsco_extfree(struct mbuf *m, void *addr, size_t size,
    void *arg)
{
	struct btsco_softc *sc = arg;

	if (m != NULL)
		pool_cache_put(mb_cache, m);

	sc->sc_tx_refcnt--;
}
Exemplo n.º 10
0
static int
rnd_close(struct file *fp)
{
	struct rnd_ctx *const ctx = fp->f_data;

	if (ctx->rc_cprng != NULL)
		cprng_strong_destroy(ctx->rc_cprng);
	fp->f_data = NULL;
	pool_cache_put(rnd_ctx_cache, ctx);

	return 0;
}
Exemplo n.º 11
0
static void
radix_tree_free_node(struct radix_tree_node *n)
{

	KASSERT(radix_tree_node_clean_p(n));
#if defined(_KERNEL)
	pool_cache_put(radix_tree_node_cache, n);
#elif defined(_STANDALONE)
	dealloc(n, sizeof(*n));
#else
	free(n);
#endif
}
void
soput(struct socket *so)
{

	KASSERT(!cv_has_waiters(&so->so_cv));
	KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv));
	KASSERT(!cv_has_waiters(&so->so_snd.sb_cv));
	seldestroy(&so->so_rcv.sb_sel);
	seldestroy(&so->so_snd.sb_sel);
	mutex_obj_free(so->so_lock);
	cv_destroy(&so->so_cv);
	cv_destroy(&so->so_rcv.sb_cv);
	cv_destroy(&so->so_snd.sb_cv);
	pool_cache_put(socket_cache, so);
}
Exemplo n.º 13
0
/*
 * mutex_obj_free:
 *
 *	Drop a reference from a lock object.  If the last reference is being
 *	dropped, free the object and return true.  Otherwise, return false.
 */
bool
mutex_obj_free(kmutex_t *lock)
{
	struct kmutexobj *mo = (struct kmutexobj *)lock;

	KASSERTMSG(mo->mo_magic == MUTEX_OBJ_MAGIC,
	    "%s: lock %p: mo->mo_magic (%#x) != MUTEX_OBJ_MAGIC (%#x)",
	     __func__, mo, mo->mo_magic, MUTEX_OBJ_MAGIC);
	KASSERTMSG(mo->mo_refcnt > 0,
	    "%s: lock %p: mo->mo_refcnt (%#x) == 0",
	     __func__, mo, mo->mo_refcnt);

	if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
		return false;
	}
	mutex_destroy(&mo->mo_lock);
	pool_cache_put(mutex_obj_cache, mo);
	return true;
}
Exemplo n.º 14
0
/*
 * Free an unused, unreferenced vnode.
 */
void
vnfree(vnode_t *vp)
{

	KASSERT(vp->v_usecount == 0);

	if ((vp->v_iflag & VI_MARKER) == 0) {
		rw_destroy(&vp->v_lock);
		mutex_enter(&vnode_free_list_lock);
		numvnodes--;
		mutex_exit(&vnode_free_list_lock);
	}

	/*
	 * Note: the vnode interlock will either be freed, of reference
	 * dropped (if VI_LOCKSHARE was in use).
	 */
	uvm_obj_destroy(&vp->v_uobj, true);
	cv_destroy(&vp->v_cv);
	pool_cache_put(vnode_cache, vp);
}
Exemplo n.º 15
0
/*
 * amap_free: free an amap
 *
 * => the amap must be unlocked
 * => the amap should have a zero reference count and be empty
 */
void
amap_free(struct vm_amap *amap)
{
	int slots;

	UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);

	KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
	KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
	KASSERT(!mutex_owned(&amap->am_l));
	slots = amap->am_maxslot;
	kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
	kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
	kmem_free(amap->am_anon, slots * sizeof(*amap->am_anon));
#ifdef UVM_AMAP_PPREF
	if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
		kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref));
#endif
	mutex_destroy(&amap->am_l);
	pool_cache_put(&uvm_amap_cache, amap);
	UVMHIST_LOG(maphist,"<- done, freed amap = 0x%x", amap, 0, 0, 0);
}
Exemplo n.º 16
0
/*
 * Allocate a new inode.
 */
int
ufs_makeinode(int mode, struct vnode *dvp, const struct ufs_lookup_results *ulr,
	struct vnode **vpp, struct componentname *cnp)
{
	struct inode	*ip, *pdir;
	struct direct	*newdir;
	struct vnode	*tvp;
	int		error;

	UFS_WAPBL_JUNLOCK_ASSERT(dvp->v_mount);

	pdir = VTOI(dvp);

	if ((mode & IFMT) == 0)
		mode |= IFREG;

	if ((error = UFS_VALLOC(dvp, mode, cnp->cn_cred, vpp)) != 0) {
		return (error);
	}
	tvp = *vpp;
	ip = VTOI(tvp);
	ip->i_gid = pdir->i_gid;
	DIP_ASSIGN(ip, gid, ip->i_gid);
	ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
	DIP_ASSIGN(ip, uid, ip->i_uid);
	error = UFS_WAPBL_BEGIN1(dvp->v_mount, dvp);
	if (error) {
		/*
		 * Note, we can't VOP_VFREE(tvp) here like we should
		 * because we can't write to the disk.  Instead, we leave
		 * the vnode dangling from the journal.
		 */
		vput(tvp);
		return (error);
	}
#if defined(QUOTA) || defined(QUOTA2)
	if ((error = chkiq(ip, 1, cnp->cn_cred, 0))) {
		UFS_VFREE(tvp, ip->i_number, mode);
		UFS_WAPBL_END1(dvp->v_mount, dvp);
		vput(tvp);
		return (error);
	}
#endif
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_mode = mode;
	DIP_ASSIGN(ip, mode, mode);
	tvp->v_type = IFTOVT(mode);	/* Rest init'd in getnewvnode(). */
	ip->i_nlink = 1;
	DIP_ASSIGN(ip, nlink, 1);

	/* Authorize setting SGID if needed. */
	if (ip->i_mode & ISGID) {
		error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY,
		    tvp, NULL, genfs_can_chmod(tvp->v_type, cnp->cn_cred, ip->i_uid,
		    ip->i_gid, mode));
		if (error) {
			ip->i_mode &= ~ISGID;
			DIP_ASSIGN(ip, mode, ip->i_mode);
		}
	}

	if (cnp->cn_flags & ISWHITEOUT) {
		ip->i_flags |= UF_OPAQUE;
		DIP_ASSIGN(ip, flags, ip->i_flags);
	}

	/*
	 * Make sure inode goes to disk before directory entry.
	 */
	if ((error = UFS_UPDATE(tvp, NULL, NULL, UPDATE_DIROP)) != 0)
		goto bad;
	newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
	ufs_makedirentry(ip, cnp, newdir);
	error = ufs_direnter(dvp, ulr, tvp, newdir, cnp, NULL);
	pool_cache_put(ufs_direct_cache, newdir);
	if (error)
		goto bad;
	*vpp = tvp;
	return (0);

 bad:
	/*
	 * Write error occurred trying to update the inode
	 * or the directory so must deallocate the inode.
	 */
	ip->i_nlink = 0;
	DIP_ASSIGN(ip, nlink, 0);
	ip->i_flag |= IN_CHANGE;
	UFS_WAPBL_UPDATE(tvp, NULL, NULL, 0);
	tvp->v_type = VNON;		/* explodes later if VBLK */
	UFS_WAPBL_END1(dvp->v_mount, dvp);
	vput(tvp);
	return (error);
}
Exemplo n.º 17
0
static int
rnd_write(struct file *fp, off_t *offp, struct uio *uio,
	   kauth_cred_t cred, int flags)
{
	u_int8_t *bf;
	int n, ret = 0, estimate_ok = 0, estimate = 0, added = 0;

	ret = kauth_authorize_device(cred,
	    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
	if (ret) {
		return (ret);
	}
	estimate_ok = !kauth_authorize_device(cred,
	    KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);

	DPRINTF(RND_DEBUG_WRITE,
	    ("Random: Write of %zu requested\n", uio->uio_resid));

	if (uio->uio_resid == 0)
		return (0);
	ret = 0;
	bf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);
	while (uio->uio_resid > 0) {
		/*
		 * Don't flood the pool.
		 */
		if (added > RND_POOLWORDS * sizeof(int)) {
#ifdef RND_VERBOSE
			printf("rnd: added %d already, adding no more.\n",
			       added);
#endif
			break;
		}
		n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);

		ret = uiomove((void *)bf, n, uio);
		if (ret != 0)
			break;

		if (estimate_ok) {
			/*
			 * Don't cause samples to be discarded by taking
			 * the pool's entropy estimate to the max.
			 */
			if (added > RND_POOLWORDS / 2)
				estimate = 0;
			else
				estimate = n * NBBY / 2;
#ifdef RND_VERBOSE
			printf("rnd: adding on write, %d bytes, estimate %d\n",
			       n, estimate);
#endif
		} else {
#ifdef RND_VERBOSE
			printf("rnd: kauth says no entropy.\n");
#endif
		}

		/*
		 * Mix in the bytes.
		 */
		mutex_spin_enter(&rndpool_mtx);
		rndpool_add_data(&rnd_pool, bf, n, estimate);
		mutex_spin_exit(&rndpool_mtx);

		added += n;
		DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
	}
	pool_cache_put(rnd_temp_buffer_cache, bf);
	return (ret);
}
Exemplo n.º 18
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	struct dquot *dq, *ndq;
	struct dqhashhead *dqh;
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int error;

	/* Lock to see an up to date value for QTF_CLOSING. */
	mutex_enter(&dqlock);
	dqvp = ump->um_quotas[type];
	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
		mutex_exit(&dqlock);
		*dqp = NODQUOT;
		return (EINVAL);
	}
	KASSERT(dqvp != vp);
	/*
	 * Check the cache first.
	 */
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		*dqp = dq;
		return (0);
	}
	/*
	 * Not in cache, allocate a new one.
	 */
	mutex_exit(&dqlock);
	ndq = pool_cache_get(dquot_cache, PR_WAITOK);
	/*
	 * Initialize the contents of the dquot structure.
	 */
	memset((char *)ndq, 0, sizeof *ndq);
	ndq->dq_flags = 0;
	ndq->dq_id = id;
	ndq->dq_ump = ump;
	ndq->dq_type = type;
	mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE);
	mutex_enter(&dqlock);
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		/*
		 * Another thread beat us allocating this dquot.
		 */
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		mutex_destroy(&ndq->dq_interlock);
		pool_cache_put(dquot_cache, ndq);
		*dqp = dq;
		return 0;
	}
	dq = ndq;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	dqref(dq);
	mutex_enter(&dq->dq_interlock);
	mutex_exit(&dqlock);
	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = (void *)&dq->dq_dqb;
	aiov.iov_len = sizeof (struct dqblk);
	auio.uio_resid = sizeof (struct dqblk);
	auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
	auio.uio_rw = UIO_READ;
	UIO_SETUP_SYSSPACE(&auio);
	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
	if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
		memset((void *)&dq->dq_dqb, 0, sizeof(struct dqblk));
	VOP_UNLOCK(dqvp, 0);
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		mutex_enter(&dqlock);
		LIST_REMOVE(dq, dq_hash);
		mutex_exit(&dqlock);
		mutex_exit(&dq->dq_interlock);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	/*
	 * Check for no limit to enforce.
	 * Initialize time values if necessary.
	 */
	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
		dq->dq_flags |= DQ_FAKE;
	if (dq->dq_id != 0) {
		if (dq->dq_btime == 0)
			dq->dq_btime = time_second + ump->um_btime[type];
		if (dq->dq_itime == 0)
			dq->dq_itime = time_second + ump->um_itime[type];
	}
	mutex_exit(&dq->dq_interlock);
	*dqp = dq;
	return (0);
}
Exemplo n.º 19
0
/*
 * ulfs_gro_rename: Actually perform the rename operation.
 */
static int
ulfs_gro_rename(struct mount *mp, kauth_cred_t cred,
    struct vnode *fdvp, struct componentname *fcnp,
    void *fde, struct vnode *fvp,
    struct vnode *tdvp, struct componentname *tcnp,
    void *tde, struct vnode *tvp)
{
	struct ulfs_lookup_results *fulr = fde;
	struct ulfs_lookup_results *tulr = tde;
	bool directory_p, reparent_p;
	struct lfs_direct *newdir;
	int error;

	KASSERT(mp != NULL);
	KASSERT(fdvp != NULL);
	KASSERT(fcnp != NULL);
	KASSERT(fulr != NULL);
	KASSERT(fvp != NULL);
	KASSERT(tdvp != NULL);
	KASSERT(tcnp != NULL);
	KASSERT(tulr != NULL);
	KASSERT(fulr != tulr);
	KASSERT(fdvp != fvp);
	KASSERT(fdvp != tvp);
	KASSERT(tdvp != fvp);
	KASSERT(tdvp != tvp);
	KASSERT(fvp != tvp);
	KASSERT(fdvp->v_mount == mp);
	KASSERT(fvp->v_mount == mp);
	KASSERT(tdvp->v_mount == mp);
	KASSERT((tvp == NULL) || (tvp->v_mount == mp));
	KASSERT(VOP_ISLOCKED(fdvp) == LK_EXCLUSIVE);
	KASSERT(VOP_ISLOCKED(fvp) == LK_EXCLUSIVE);
	KASSERT(VOP_ISLOCKED(tdvp) == LK_EXCLUSIVE);
	KASSERT((tvp == NULL) || (VOP_ISLOCKED(tvp) == LK_EXCLUSIVE));

	/*
	 * We shall need to temporarily bump the link count, so make
	 * sure there is room to do so.
	 */
	if ((nlink_t)VTOI(fvp)->i_nlink >= LINK_MAX)
		return EMLINK;

	directory_p = (fvp->v_type == VDIR);
	KASSERT(directory_p == ((VTOI(fvp)->i_mode & LFS_IFMT) == LFS_IFDIR));
	KASSERT((tvp == NULL) || (directory_p == (tvp->v_type == VDIR)));
	KASSERT((tvp == NULL) || (directory_p ==
		((VTOI(tvp)->i_mode & LFS_IFMT) == LFS_IFDIR)));

	reparent_p = (fdvp != tdvp);
	KASSERT(reparent_p == (VTOI(fdvp)->i_number != VTOI(tdvp)->i_number));

	/*
	 * Commence hacking of the data on disk.
	 */

	error = 0;

	/*
	 * 1) Bump link count while we're moving stuff
	 *    around.  If we crash somewhere before
	 *    completing our work, the link count
	 *    may be wrong, but correctable.
	 */

	KASSERT((nlink_t)VTOI(fvp)->i_nlink < LINK_MAX);
	VTOI(fvp)->i_nlink++;
	DIP_ASSIGN(VTOI(fvp), nlink, VTOI(fvp)->i_nlink);
	VTOI(fvp)->i_flag |= IN_CHANGE;
	error = lfs_update(fvp, NULL, NULL, UPDATE_DIROP);
	if (error)
		goto whymustithurtsomuch;

	/*
	 * 2) If target doesn't exist, link the target
	 *    to the source and unlink the source.
	 *    Otherwise, rewrite the target directory
	 *    entry to reference the source inode and
	 *    expunge the original entry's existence.
	 */

	if (tvp == NULL) {
		/*
		 * Account for ".." in new directory.
		 * When source and destination have the same
		 * parent we don't fool with the link count.
		 */
		if (directory_p && reparent_p) {
			if ((nlink_t)VTOI(tdvp)->i_nlink >= LINK_MAX) {
				error = EMLINK;
				goto whymustithurtsomuch;
			}
			KASSERT((nlink_t)VTOI(tdvp)->i_nlink < LINK_MAX);
			VTOI(tdvp)->i_nlink++;
			DIP_ASSIGN(VTOI(tdvp), nlink, VTOI(tdvp)->i_nlink);
			VTOI(tdvp)->i_flag |= IN_CHANGE;
			error = lfs_update(tdvp, NULL, NULL, UPDATE_DIROP);
			if (error) {
				/*
				 * Link count update didn't take --
				 * back out the in-memory link count.
				 */
				KASSERT(0 < VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_nlink--;
				DIP_ASSIGN(VTOI(tdvp), nlink,
				    VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_flag |= IN_CHANGE;
				goto whymustithurtsomuch;
			}
		}

		newdir = pool_cache_get(ulfs_direct_cache, PR_WAITOK);
		ulfs_makedirentry(VTOI(fvp), tcnp, newdir);
		error = ulfs_direnter(tdvp, tulr, NULL, newdir, tcnp, NULL);
		pool_cache_put(ulfs_direct_cache, newdir);
		if (error) {
			if (directory_p && reparent_p) {
				/*
				 * Directory update didn't take, but
				 * the link count update did -- back
				 * out the in-memory link count and the
				 * on-disk link count.
				 */
				KASSERT(0 < VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_nlink--;
				DIP_ASSIGN(VTOI(tdvp), nlink,
				    VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_flag |= IN_CHANGE;
				(void)lfs_update(tdvp, NULL, NULL,
				    UPDATE_WAIT | UPDATE_DIROP);
			}
			goto whymustithurtsomuch;
		}
	} else {
		if (directory_p)
			/* XXX WTF?  Why purge here?  Why not purge others?  */
			cache_purge(tdvp);

		/*
		 * Make the target directory's entry for tcnp point at
		 * the source node.
		 *
		 * XXX ulfs_dirrewrite decrements tvp's link count, but
		 * doesn't touch the link count of the new inode.  Go
		 * figure.
		 */
		error = ulfs_dirrewrite(VTOI(tdvp), tulr->ulr_offset,
		    VTOI(tvp), VTOI(fvp)->i_number, LFS_IFTODT(VTOI(fvp)->i_mode),
		    ((directory_p && reparent_p) ? reparent_p : directory_p),
		    IN_CHANGE | IN_UPDATE);
		if (error)
			goto whymustithurtsomuch;

		/*
		 * If the source and target are directories, and the
		 * target is in the same directory as the source,
		 * decrement the link count of the common parent
		 * directory, since we are removing the target from
		 * that directory.
		 */
		if (directory_p && !reparent_p) {
			KASSERT(fdvp == tdvp);
			/* XXX check, don't kassert */
			KASSERT(0 < VTOI(tdvp)->i_nlink);
			VTOI(tdvp)->i_nlink--;
			DIP_ASSIGN(VTOI(tdvp), nlink, VTOI(tdvp)->i_nlink);
			VTOI(tdvp)->i_flag |= IN_CHANGE;
		}

		if (directory_p) {
			/*
			 * XXX I don't understand the following comment
			 * from ulfs_rename -- in particular, the part
			 * about `there may be other hard links'.
			 *
			 * Truncate inode. The only stuff left in the directory
			 * is "." and "..". The "." reference is inconsequential
			 * since we are quashing it. We have removed the "."
			 * reference and the reference in the parent directory,
			 * but there may be other hard links.
			 *
			 * XXX The ulfs_dirempty call earlier does
			 * not guarantee anything about nlink.
			 */
			if (VTOI(tvp)->i_nlink != 1)
				ulfs_dirbad(VTOI(tvp), (doff_t)0,
				    "hard-linked directory");
			VTOI(tvp)->i_nlink = 0;
			DIP_ASSIGN(VTOI(tvp), nlink, 0);
			error = lfs_truncate(tvp, (off_t)0, IO_SYNC, cred);
			if (error)
				goto whymustithurtsomuch;
		}
	}

	/*
	 * If the source is a directory with a new parent, the link
	 * count of the old parent directory must be decremented and
	 * ".." set to point to the new parent.
	 *
	 * XXX ulfs_dirrewrite updates the link count of fdvp, but not
	 * the link count of fvp or the link count of tdvp.  Go figure.
	 */
	if (directory_p && reparent_p) {
		error = ulfs_dirrewrite(VTOI(fvp), mastertemplate.dot_reclen,
		    VTOI(fdvp), VTOI(tdvp)->i_number, LFS_DT_DIR, 0, IN_CHANGE);
#if 0		/* XXX This branch was not in ulfs_rename! */
		if (error)
			goto whymustithurtsomuch;
#endif

		/* XXX WTF?  Why purge here?  Why not purge others?  */
		cache_purge(fdvp);
	}

	/*
	 * 3) Unlink the source.
	 */

	/*
	 * ulfs_direnter may compact the directory in the process of
	 * inserting a new entry.  That may invalidate fulr, which we
	 * need in order to remove the old entry.  In that case, we
	 * need to recalculate what fulr should be.
	 */
	if (!reparent_p && (tvp == NULL) &&
	    ulfs_rename_ulr_overlap_p(fulr, tulr)) {
		error = ulfs_rename_recalculate_fulr(fdvp, fulr, tulr, fcnp);
#if 0				/* XXX */
		if (error)	/* XXX Try to back out changes?  */
			goto whymustithurtsomuch;
#endif
	}

	/*
	 * XXX 0 means !isrmdir.  But can't this be an rmdir?
	 * XXX Well, turns out that argument to ulfs_dirremove is ignored...
	 * XXX And it turns out ulfs_dirremove updates the link count of fvp.
	 * XXX But it doesn't update the link count of fdvp.  Go figure.
	 * XXX fdvp's link count is updated in ulfs_dirrewrite instead.
	 * XXX Actually, sometimes it doesn't update fvp's link count.
	 * XXX I hate the world.
	 */
	error = ulfs_dirremove(fdvp, fulr, VTOI(fvp), fcnp->cn_flags, 0);
	if (error)
#if 0				/* XXX */
		goto whymustithurtsomuch;
#endif
		goto arghmybrainhurts;

	/*
	 * XXX Perhaps this should go at the top, in case the file
	 * system is modified but incompletely so because of an
	 * intermediate error.
	 */
	genfs_rename_knote(fdvp, fvp, tdvp, tvp,
	    ((tvp != NULL) && (VTOI(tvp)->i_nlink == 0)));
#if 0				/* XXX */
	genfs_rename_cache_purge(fdvp, fvp, tdvp, tvp);
#endif
	goto arghmybrainhurts;

whymustithurtsomuch:
	KASSERT(0 < VTOI(fvp)->i_nlink);
	VTOI(fvp)->i_nlink--;
	DIP_ASSIGN(VTOI(fvp), nlink, VTOI(fvp)->i_nlink);
	VTOI(fvp)->i_flag |= IN_CHANGE;

arghmybrainhurts:
/*ihateyou:*/
	return error;
}
Exemplo n.º 20
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	struct dquot *dq, *ndq;
	struct dqhashhead *dqh;
	struct vnode *dqvp;
	int error = 0; /* XXX gcc */

	/* Lock to see an up to date value for QTF_CLOSING. */
	mutex_enter(&dqlock);
	if ((ump->um_flags & (UFS_QUOTA|UFS_QUOTA2)) == 0) {
		mutex_exit(&dqlock);
		*dqp = NODQUOT;
		return (ENODEV);
	}
	dqvp = ump->um_quotas[type];
#ifdef QUOTA
	if (ump->um_flags & UFS_QUOTA) {
		if (dqvp == NULLVP || (ump->umq1_qflags[type] & QTF_CLOSING)) {
			mutex_exit(&dqlock);
			*dqp = NODQUOT;
			return (ENODEV);
		}
	}
#endif
#ifdef QUOTA2
	if (ump->um_flags & UFS_QUOTA2) {
		if (dqvp == NULLVP) {
			mutex_exit(&dqlock);
			*dqp = NODQUOT;
			return (ENODEV);
		}
	}
#endif
	KASSERT(dqvp != vp);
	/*
	 * Check the cache first.
	 */
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		*dqp = dq;
		return (0);
	}
	/*
	 * Not in cache, allocate a new one.
	 */
	mutex_exit(&dqlock);
	ndq = pool_cache_get(dquot_cache, PR_WAITOK);
	/*
	 * Initialize the contents of the dquot structure.
	 */
	memset((char *)ndq, 0, sizeof *ndq);
	ndq->dq_flags = 0;
	ndq->dq_id = id;
	ndq->dq_ump = ump;
	ndq->dq_type = type;
	mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE);
	mutex_enter(&dqlock);
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		/*
		 * Another thread beat us allocating this dquot.
		 */
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		mutex_destroy(&ndq->dq_interlock);
		pool_cache_put(dquot_cache, ndq);
		*dqp = dq;
		return 0;
	}
	dq = ndq;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	dqref(dq);
	mutex_enter(&dq->dq_interlock);
	mutex_exit(&dqlock);
#ifdef QUOTA
	if (ump->um_flags & UFS_QUOTA)
		error = dq1get(dqvp, id, ump, type, dq);
#endif
#ifdef QUOTA2
	if (ump->um_flags & UFS_QUOTA2)
		error = dq2get(dqvp, id, ump, type, dq);
#endif
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		mutex_enter(&dqlock);
		LIST_REMOVE(dq, dq_hash);
		mutex_exit(&dqlock);
		mutex_exit(&dq->dq_interlock);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	mutex_exit(&dq->dq_interlock);
	*dqp = dq;
	return (0);
}
Exemplo n.º 21
0
/*
 * Allocate a new inode.
 */
int
ulfs_makeinode(int mode, struct vnode *dvp, const struct ulfs_lookup_results *ulr,
	struct vnode **vpp, struct componentname *cnp)
{
	struct inode	*ip, *pdir;
	struct lfs_direct	*newdir;
	struct vnode	*tvp;
	int		error;

	pdir = VTOI(dvp);

	if ((mode & LFS_IFMT) == 0)
		mode |= LFS_IFREG;

	if ((error = lfs_valloc(dvp, mode, cnp->cn_cred, vpp)) != 0) {
		return (error);
	}
	tvp = *vpp;
	ip = VTOI(tvp);
	ip->i_gid = pdir->i_gid;
	DIP_ASSIGN(ip, gid, ip->i_gid);
	ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
	DIP_ASSIGN(ip, uid, ip->i_uid);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	if ((error = lfs_chkiq(ip, 1, cnp->cn_cred, 0))) {
		lfs_vfree(tvp, ip->i_number, mode);
		vput(tvp);
		return (error);
	}
#endif
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_mode = mode;
	DIP_ASSIGN(ip, mode, mode);
	tvp->v_type = IFTOVT(mode);	/* Rest init'd in getnewvnode(). */
	ip->i_nlink = 1;
	DIP_ASSIGN(ip, nlink, 1);

	/* Authorize setting SGID if needed. */
	if (ip->i_mode & ISGID) {
		error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY,
		    tvp, NULL, genfs_can_chmod(tvp->v_type, cnp->cn_cred, ip->i_uid,
		    ip->i_gid, mode));
		if (error) {
			ip->i_mode &= ~ISGID;
			DIP_ASSIGN(ip, mode, ip->i_mode);
		}
	}

	if (cnp->cn_flags & ISWHITEOUT) {
		ip->i_flags |= UF_OPAQUE;
		DIP_ASSIGN(ip, flags, ip->i_flags);
	}

	/*
	 * Make sure inode goes to disk before directory entry.
	 */
	if ((error = lfs_update(tvp, NULL, NULL, UPDATE_DIROP)) != 0)
		goto bad;
	newdir = pool_cache_get(ulfs_direct_cache, PR_WAITOK);
	ulfs_makedirentry(ip, cnp, newdir);
	error = ulfs_direnter(dvp, ulr, tvp, newdir, cnp, NULL);
	pool_cache_put(ulfs_direct_cache, newdir);
	if (error)
		goto bad;
	*vpp = tvp;
	return (0);

 bad:
	/*
	 * Write error occurred trying to update the inode
	 * or the directory so must deallocate the inode.
	 */
	ip->i_nlink = 0;
	DIP_ASSIGN(ip, nlink, 0);
	ip->i_flag |= IN_CHANGE;
	/* If IN_ADIROP, account for it */
	lfs_unmark_vnode(tvp);
	tvp->v_type = VNON;		/* explodes later if VBLK */
	vput(tvp);
	return (error);
}
Exemplo n.º 22
0
int
ufs_mkdir(void *v)
{
	struct vop_mkdir_v3_args /* {
		struct vnode		*a_dvp;
		struct vnode		**a_vpp;
		struct componentname	*a_cnp;
		struct vattr		*a_vap;
	} */ *ap = v;
	struct vnode		*dvp = ap->a_dvp, *tvp;
	struct vattr		*vap = ap->a_vap;
	struct componentname	*cnp = ap->a_cnp;
	struct inode		*ip, *dp = VTOI(dvp);
	struct buf		*bp;
	struct dirtemplate	dirtemplate;
	struct direct		*newdir;
	int			error, dmode;
	struct ufsmount		*ump = dp->i_ump;
	int			dirblksiz = ump->um_dirblksiz;
	struct ufs_lookup_results *ulr;

	fstrans_start(dvp->v_mount, FSTRANS_SHARED);

	/* XXX should handle this material another way */
	ulr = &dp->i_crap;
	UFS_CHECK_CRAPCOUNTER(dp);

	if ((nlink_t)dp->i_nlink >= LINK_MAX) {
		error = EMLINK;
		goto out;
	}
	dmode = vap->va_mode & ACCESSPERMS;
	dmode |= IFDIR;
	/*
	 * Must simulate part of ufs_makeinode here to acquire the inode,
	 * but not have it entered in the parent directory. The entry is
	 * made later after writing "." and ".." entries.
	 */
	if ((error = UFS_VALLOC(dvp, dmode, cnp->cn_cred, ap->a_vpp)) != 0)
		goto out;

	tvp = *ap->a_vpp;
	ip = VTOI(tvp);

	error = UFS_WAPBL_BEGIN(ap->a_dvp->v_mount);
	if (error) {
		UFS_VFREE(tvp, ip->i_number, dmode);
		vput(tvp);
		goto out;
	}
	ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
	DIP_ASSIGN(ip, uid, ip->i_uid);
	ip->i_gid = dp->i_gid;
	DIP_ASSIGN(ip, gid, ip->i_gid);
#if defined(QUOTA) || defined(QUOTA2)
	if ((error = chkiq(ip, 1, cnp->cn_cred, 0))) {
		UFS_VFREE(tvp, ip->i_number, dmode);
		UFS_WAPBL_END(dvp->v_mount);
		fstrans_done(dvp->v_mount);
		vput(tvp);
		return (error);
	}
#endif
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_mode = dmode;
	DIP_ASSIGN(ip, mode, dmode);
	tvp->v_type = VDIR;	/* Rest init'd in getnewvnode(). */
	ip->i_nlink = 2;
	DIP_ASSIGN(ip, nlink, 2);
	if (cnp->cn_flags & ISWHITEOUT) {
		ip->i_flags |= UF_OPAQUE;
		DIP_ASSIGN(ip, flags, ip->i_flags);
	}

	/*
	 * Bump link count in parent directory to reflect work done below.
	 * Should be done before reference is created so cleanup is
	 * possible if we crash.
	 */
	dp->i_nlink++;
	DIP_ASSIGN(dp, nlink, dp->i_nlink);
	dp->i_flag |= IN_CHANGE;
	if ((error = UFS_UPDATE(dvp, NULL, NULL, UPDATE_DIROP)) != 0)
		goto bad;

	/*
	 * Initialize directory with "." and ".." from static template.
	 */
	dirtemplate = mastertemplate;
	dirtemplate.dotdot_reclen = dirblksiz - dirtemplate.dot_reclen;
	dirtemplate.dot_ino = ufs_rw32(ip->i_number, UFS_MPNEEDSWAP(ump));
	dirtemplate.dotdot_ino = ufs_rw32(dp->i_number, UFS_MPNEEDSWAP(ump));
	dirtemplate.dot_reclen = ufs_rw16(dirtemplate.dot_reclen,
	    UFS_MPNEEDSWAP(ump));
	dirtemplate.dotdot_reclen = ufs_rw16(dirtemplate.dotdot_reclen,
	    UFS_MPNEEDSWAP(ump));
	if (ump->um_maxsymlinklen <= 0) {
#if BYTE_ORDER == LITTLE_ENDIAN
		if (UFS_MPNEEDSWAP(ump) == 0)
#else
		if (UFS_MPNEEDSWAP(ump) != 0)
#endif
		{
			dirtemplate.dot_type = dirtemplate.dot_namlen;
			dirtemplate.dotdot_type = dirtemplate.dotdot_namlen;
			dirtemplate.dot_namlen = dirtemplate.dotdot_namlen = 0;
		} else
			dirtemplate.dot_type = dirtemplate.dotdot_type = 0;
	}
	if ((error = UFS_BALLOC(tvp, (off_t)0, dirblksiz, cnp->cn_cred,
	    B_CLRBUF, &bp)) != 0)
		goto bad;
	ip->i_size = dirblksiz;
	DIP_ASSIGN(ip, size, dirblksiz);
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	uvm_vnp_setsize(tvp, ip->i_size);
	memcpy((void *)bp->b_data, (void *)&dirtemplate, sizeof dirtemplate);

	/*
	 * Directory set up, now install its entry in the parent directory.
	 * We must write out the buffer containing the new directory body
	 * before entering the new name in the parent.
	 */
	if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0)
		goto bad;
	if ((error = UFS_UPDATE(tvp, NULL, NULL, UPDATE_DIROP)) != 0) {
		goto bad;
	}
	newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
	ufs_makedirentry(ip, cnp, newdir);
	error = ufs_direnter(dvp, ulr, tvp, newdir, cnp, bp);
	pool_cache_put(ufs_direct_cache, newdir);
 bad:
	if (error == 0) {
		VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
		VOP_UNLOCK(tvp);
		UFS_WAPBL_END(dvp->v_mount);
	} else {
		dp->i_nlink--;
		DIP_ASSIGN(dp, nlink, dp->i_nlink);
		dp->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(dvp, NULL, NULL, UPDATE_DIROP);
		/*
		 * No need to do an explicit UFS_TRUNCATE here, vrele will
		 * do this for us because we set the link count to 0.
		 */
		ip->i_nlink = 0;
		DIP_ASSIGN(ip, nlink, 0);
		ip->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(tvp, NULL, NULL, UPDATE_DIROP);
		UFS_WAPBL_END(dvp->v_mount);
		vput(tvp);
	}
 out:
	fstrans_done(dvp->v_mount);
	return (error);
}
Exemplo n.º 23
0
/*
 * whiteout vnode call
 */
int
ufs_whiteout(void *v)
{
	struct vop_whiteout_args /* {
		struct vnode		*a_dvp;
		struct componentname	*a_cnp;
		int			a_flags;
	} */ *ap = v;
	struct vnode		*dvp = ap->a_dvp;
	struct componentname	*cnp = ap->a_cnp;
	struct direct		*newdir;
	int			error;
	struct ufsmount		*ump = VFSTOUFS(dvp->v_mount);
	struct ufs_lookup_results *ulr;

	/* XXX should handle this material another way */
	ulr = &VTOI(dvp)->i_crap;
	UFS_CHECK_CRAPCOUNTER(VTOI(dvp));

	error = 0;
	switch (ap->a_flags) {
	case LOOKUP:
		/* 4.4 format directories support whiteout operations */
		if (ump->um_maxsymlinklen > 0)
			return (0);
		return (EOPNOTSUPP);

	case CREATE:
		/* create a new directory whiteout */
		fstrans_start(dvp->v_mount, FSTRANS_SHARED);
		error = UFS_WAPBL_BEGIN(dvp->v_mount);
		if (error)
			break;
#ifdef DIAGNOSTIC
		if (ump->um_maxsymlinklen <= 0)
			panic("ufs_whiteout: old format filesystem");
#endif

		newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
		newdir->d_ino = UFS_WINO;
		newdir->d_namlen = cnp->cn_namelen;
		memcpy(newdir->d_name, cnp->cn_nameptr,
		    (size_t)cnp->cn_namelen);
		newdir->d_name[cnp->cn_namelen] = '\0';
		newdir->d_type = DT_WHT;
		error = ufs_direnter(dvp, ulr, NULL, newdir, cnp, NULL);
		pool_cache_put(ufs_direct_cache, newdir);
		break;

	case DELETE:
		/* remove an existing directory whiteout */
		fstrans_start(dvp->v_mount, FSTRANS_SHARED);
		error = UFS_WAPBL_BEGIN(dvp->v_mount);
		if (error)
			break;
#ifdef DIAGNOSTIC
		if (ump->um_maxsymlinklen <= 0)
			panic("ufs_whiteout: old format filesystem");
#endif

		cnp->cn_flags &= ~DOWHITEOUT;
		error = ufs_dirremove(dvp, ulr, NULL, cnp->cn_flags, 0);
		break;
	default:
		panic("ufs_whiteout: unknown op");
		/* NOTREACHED */
	}
	UFS_WAPBL_END(dvp->v_mount);
	fstrans_done(dvp->v_mount);
	return (error);
}
Exemplo n.º 24
0
/*
 * ufs_link: create hard link.
 */
int
ufs_link(void *v)
{
	struct vop_link_args /* {
		struct vnode *a_dvp;
		struct vnode *a_vp;
		struct componentname *a_cnp;
	} */ *ap = v;
	struct vnode *dvp = ap->a_dvp;
	struct vnode *vp = ap->a_vp;
	struct componentname *cnp = ap->a_cnp;
	struct inode *ip;
	struct direct *newdir;
	int error;
	struct ufs_lookup_results *ulr;

	KASSERT(dvp != vp);
	KASSERT(vp->v_type != VDIR);
	KASSERT(dvp->v_mount == vp->v_mount);

	/* XXX should handle this material another way */
	ulr = &VTOI(dvp)->i_crap;
	UFS_CHECK_CRAPCOUNTER(VTOI(dvp));

	fstrans_start(dvp->v_mount, FSTRANS_SHARED);
	error = vn_lock(vp, LK_EXCLUSIVE);
	if (error) {
		VOP_ABORTOP(dvp, cnp);
		goto out2;
	}
	ip = VTOI(vp);
	if ((nlink_t)ip->i_nlink >= LINK_MAX) {
		VOP_ABORTOP(dvp, cnp);
		error = EMLINK;
		goto out1;
	}
	if (ip->i_flags & (IMMUTABLE | APPEND)) {
		VOP_ABORTOP(dvp, cnp);
		error = EPERM;
		goto out1;
	}
	error = UFS_WAPBL_BEGIN(vp->v_mount);
	if (error) {
		VOP_ABORTOP(dvp, cnp);
		goto out1;
	}
	ip->i_nlink++;
	DIP_ASSIGN(ip, nlink, ip->i_nlink);
	ip->i_flag |= IN_CHANGE;
	error = UFS_UPDATE(vp, NULL, NULL, UPDATE_DIROP);
	if (!error) {
		newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
		ufs_makedirentry(ip, cnp, newdir);
		error = ufs_direnter(dvp, ulr, vp, newdir, cnp, NULL);
		pool_cache_put(ufs_direct_cache, newdir);
	}
	if (error) {
		ip->i_nlink--;
		DIP_ASSIGN(ip, nlink, ip->i_nlink);
		ip->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(vp, NULL, NULL, UPDATE_DIROP);
	}
	UFS_WAPBL_END(vp->v_mount);
 out1:
	VOP_UNLOCK(vp);
 out2:
	VN_KNOTE(vp, NOTE_LINK);
	VN_KNOTE(dvp, NOTE_WRITE);
	vput(dvp);
	fstrans_done(dvp->v_mount);
	return (error);
}
Exemplo n.º 25
0
static int
rnd_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred,
    int flags)
{
	int error = 0;

	DPRINTF(RND_DEBUG_READ,
	    ("Random: Read of %zu requested, flags 0x%08x\n",
		uio->uio_resid, flags));

	if (uio->uio_resid == 0)
		return 0;

	struct rnd_ctx *const ctx = fp->f_data;
	uint8_t *const buf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);

	/*
	 * Choose a CPRNG to use -- either the per-open CPRNG, if this
	 * is /dev/random or a long read, or the per-CPU one otherwise.
	 *
	 * XXX NIST_BLOCK_KEYLEN_BYTES is a detail of the cprng(9)
	 * implementation and as such should not be mentioned here.
	 */
	struct cprng_strong *const cprng =
	    ((ctx->rc_hard || (uio->uio_resid > NIST_BLOCK_KEYLEN_BYTES))?
		rnd_ctx_cprng(ctx) : rnd_percpu_cprng());

	/*
	 * Generate the data in RND_TEMP_BUFFER_SIZE chunks.
	 */
	while (uio->uio_resid > 0) {
		const size_t n_req = MIN(uio->uio_resid, RND_TEMP_BUFFER_SIZE);

		CTASSERT(RND_TEMP_BUFFER_SIZE <= CPRNG_MAX_LEN);
		const size_t n_read = cprng_strong(cprng, buf, n_req,
		    ((ctx->rc_hard && ISSET(fp->f_flag, FNONBLOCK))?
			FNONBLOCK : 0));

		/*
		 * Equality will hold unless this is /dev/random, in
		 * which case we get only as many bytes as are left
		 * from the CPRNG's `information-theoretic strength'
		 * since the last rekey.
		 */
		KASSERT(n_read <= n_req);
		KASSERT(ctx->rc_hard || (n_read == n_req));

		error = uiomove(buf, n_read, uio);
		if (error)
			goto out;

		/*
		 * Do at most one iteration for /dev/random and return
		 * a short read without hanging further.  Hanging
		 * breaks applications worse than short reads.  Reads
		 * can't be zero unless nonblocking from /dev/random;
		 * in that case, ask caller to retry, instead of just
		 * returning zero bytes, which means EOF.
		 */
		KASSERT((0 < n_read) || (ctx->rc_hard &&
			ISSET(fp->f_flag, FNONBLOCK)));
		if (ctx->rc_hard) {
			if (n_read == 0)
				error = EAGAIN;
			goto out;
		}
	}

out:	pool_cache_put(rnd_temp_buffer_cache, buf);
	return error;
}
Exemplo n.º 26
0
/*
 * Attempt to build up a hash table for the directory contents in
 * inode 'ip'. Returns 0 on success, or -1 of the operation failed.
 */
int
ufsdirhash_build(struct inode *ip)
{
	struct dirhash *dh;
	struct buf *bp = NULL;
	struct direct *ep;
	struct vnode *vp;
	doff_t bmask, pos;
	int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot;
	const int needswap = UFS_MPNEEDSWAP(ip->i_ump);
	int dirblksiz = ip->i_ump->um_dirblksiz;

	/* Check if we can/should use dirhash. */
	if (ip->i_dirhash == NULL) {
		if (ip->i_size < (ufs_dirhashminblks * dirblksiz) || OFSFMT(ip))
			return (-1);
	} else {
		/* Hash exists, but sysctls could have changed. */
		if (ip->i_size < (ufs_dirhashminblks * dirblksiz) ||
		    ufs_dirhashmem > ufs_dirhashmaxmem) {
			ufsdirhash_free(ip);
			return (-1);
		}
		/* Check if hash exists and is intact (note: unlocked read). */
		if (ip->i_dirhash->dh_hash != NULL)
			return (0);
		/* Free the old, recycled hash and build a new one. */
		ufsdirhash_free(ip);
	}

	/* Don't hash removed directories. */
	if (ip->i_nlink == 0)
		return (-1);

	vp = ip->i_vnode;
	/* Allocate 50% more entries than this dir size could ever need. */
	KASSERT(ip->i_size >= dirblksiz);
	nslots = ip->i_size / UFS_DIRECTSIZ(1);
	nslots = (nslots * 3 + 1) / 2;
	narrays = howmany(nslots, DH_NBLKOFF);
	nslots = narrays * DH_NBLKOFF;
	dirblocks = howmany(ip->i_size, dirblksiz);
	nblocks = (dirblocks * 3 + 1) / 2;

	memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) +
	    narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
	    nblocks * sizeof(*dh->dh_blkfree);

	while (atomic_add_int_nv(&ufs_dirhashmem, memreqd) >
	    ufs_dirhashmaxmem) {
		atomic_add_int(&ufs_dirhashmem, -memreqd);
		if (memreqd > ufs_dirhashmaxmem / 2)
			return (-1);
		/* Try to free some space. */
		if (ufsdirhash_recycle(memreqd) != 0)
			return (-1);
	        else
		    	DIRHASHLIST_UNLOCK();
	}

	/*
	 * Use non-blocking mallocs so that we will revert to a linear
	 * lookup on failure rather than potentially blocking forever.
	 */
	dh = pool_cache_get(ufsdirhash_cache, PR_NOWAIT);
	if (dh == NULL) {
		atomic_add_int(&ufs_dirhashmem, -memreqd);
		return (-1);
	}
	memset(dh, 0, sizeof(*dh));
	mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE);
	DIRHASH_LOCK(dh);
	dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]);
	dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP);
	dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]);
	dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP);
	if (dh->dh_hash == NULL || dh->dh_blkfree == NULL)
		goto fail;
	for (i = 0; i < narrays; i++) {
		if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL)
			goto fail;
		for (j = 0; j < DH_NBLKOFF; j++)
			dh->dh_hash[i][j] = DIRHASH_EMPTY;
	}

	/* Initialise the hash table and block statistics. */
	dh->dh_narrays = narrays;
	dh->dh_hlen = nslots;
	dh->dh_nblk = nblocks;
	dh->dh_dirblks = dirblocks;
	for (i = 0; i < dirblocks; i++)
		dh->dh_blkfree[i] = dirblksiz / DIRALIGN;
	for (i = 0; i < DH_NFSTATS; i++)
		dh->dh_firstfree[i] = -1;
	dh->dh_firstfree[DH_NFSTATS] = 0;
	dh->dh_seqopt = 0;
	dh->dh_seqoff = 0;
	dh->dh_score = DH_SCOREINIT;
	ip->i_dirhash = dh;

	bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
	pos = 0;
	while (pos < ip->i_size) {
		if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
		    != 0) {
			preempt();
		}
		/* If necessary, get the next directory block. */
		if ((pos & bmask) == 0) {
			if (bp != NULL)
				brelse(bp, 0);
			if (ufs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0)
				goto fail;
		}

		/* Add this entry to the hash. */
		ep = (struct direct *)((char *)bp->b_data + (pos & bmask));
		if (ep->d_reclen == 0 || ep->d_reclen >
		    dirblksiz - (pos & (dirblksiz - 1))) {
			/* Corrupted directory. */
			brelse(bp, 0);
			goto fail;
		}
		if (ep->d_ino != 0) {
			/* Add the entry (simplified ufsdirhash_add). */
			slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen);
			while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
				slot = WRAPINCR(slot, dh->dh_hlen);
			dh->dh_hused++;
			DH_ENTRY(dh, slot) = pos;
			ufsdirhash_adjfree(dh, pos, -UFS_DIRSIZ(0, ep, needswap),
			    dirblksiz);
		}
		pos += ep->d_reclen;
	}

	if (bp != NULL)
		brelse(bp, 0);
	DIRHASHLIST_LOCK();
	TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list);
	dh->dh_onlist = 1;
	DIRHASH_UNLOCK(dh);
	DIRHASHLIST_UNLOCK();
	return (0);

fail:
	DIRHASH_UNLOCK(dh);
	if (dh->dh_hash != NULL) {
		for (i = 0; i < narrays; i++)
			if (dh->dh_hash[i] != NULL)
				DIRHASH_BLKFREE(dh->dh_hash[i]);
		kmem_free(dh->dh_hash, dh->dh_hashsz);
	}
	if (dh->dh_blkfree != NULL)
		kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
	mutex_destroy(&dh->dh_lock);
	pool_cache_put(ufsdirhash_cache, dh);
	ip->i_dirhash = NULL;
	atomic_add_int(&ufs_dirhashmem, -memreqd);
	return (-1);
}