/*
 * Allocate a new, uninitialized vnode.  If 'mp' is non-NULL, this is a
 * marker vnode.
 */
vnode_t *
vnalloc(struct mount *mp)
{
	vnode_t *vp;

	vp = pool_cache_get(vnode_cache, PR_WAITOK);
	KASSERT(vp != NULL);

	memset(vp, 0, sizeof(*vp));
	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
	cv_init(&vp->v_cv, "vnode");
	/*
	 * Done by memset() above.
	 *	LIST_INIT(&vp->v_nclist);
	 *	LIST_INIT(&vp->v_dnclist);
	 */

	if (mp != NULL) {
		vp->v_mount = mp;
		vp->v_type = VBAD;
		vp->v_iflag = VI_MARKER;
	} else {
		rw_init(&vp->v_lock);
	}

	return vp;
}
Exemplo n.º 2
0
int
vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp)
{
	const vm_flag_t strat __diagused = flags & VM_FITMASK;

	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);

	KASSERT(size > 0);
	KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT);
	if ((flags & VM_SLEEP) != 0) {
		ASSERT_SLEEPABLE();
	}

#if defined(QCACHE)
	if (size <= vm->vm_qcache_max) {
		void *p;
		int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift;
		qcache_t *qc = vm->vm_qcache[qidx - 1];

		p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags));
		if (addrp != NULL)
			*addrp = (vmem_addr_t)p;
		return (p == NULL) ? ENOMEM : 0;
	}
Exemplo n.º 3
0
struct socket *
soget(bool waitok)
{
	struct socket *so;

	so = pool_cache_get(socket_cache, (waitok ? PR_WAITOK : PR_NOWAIT));
	if (__predict_false(so == NULL))
		return (NULL);
	memset(so, 0, sizeof(*so));
	TAILQ_INIT(&so->so_q0);
	TAILQ_INIT(&so->so_q);
#if 0
	cv_init(&so->so_cv, "socket");
	cv_init(&so->so_rcv.sb_cv, "netio");
	cv_init(&so->so_snd.sb_cv, "netio");
#endif
#if 0 /* VADIM */
	selinit(&so->so_rcv.sb_sel);
	selinit(&so->so_snd.sb_sel);
#else
	so->so_rcv.sb_flags |= SB_NOTIFY;
	so->so_snd.sb_flags |= SB_NOTIFY;
#endif
	so->so_rcv.sb_so = so;
	so->so_snd.sb_so = so;
	return so;
}
Exemplo n.º 4
0
/*
 * amap_alloc1: internal function that allocates an amap, but does not
 *	init the overlay.
 *
 * => lock on returned amap is init'd
 */
static inline struct vm_amap *
amap_alloc1(int slots, int padslots, int waitf)
{
	struct vm_amap *amap;
	int totalslots;
	km_flag_t kmflags;

	amap = pool_cache_get(&uvm_amap_cache,
	    ((waitf & UVM_FLAG_NOWAIT) != 0) ? PR_NOWAIT : PR_WAITOK);
	if (amap == NULL)
		return(NULL);

	kmflags = ((waitf & UVM_FLAG_NOWAIT) != 0) ? KM_NOSLEEP : KM_SLEEP;
	totalslots = amap_roundup_slots(slots + padslots);
	mutex_init(&amap->am_l, MUTEX_DEFAULT, IPL_NONE);
	amap->am_ref = 1;
	amap->am_flags = 0;
#ifdef UVM_AMAP_PPREF
	amap->am_ppref = NULL;
#endif
	amap->am_maxslot = totalslots;
	amap->am_nslot = slots;
	amap->am_nused = 0;

	amap->am_slots = kmem_alloc(totalslots * sizeof(int), kmflags);
	if (amap->am_slots == NULL)
		goto fail1;

	amap->am_bckptr = kmem_alloc(totalslots * sizeof(int), kmflags);
	if (amap->am_bckptr == NULL)
		goto fail2;

	amap->am_anon = kmem_alloc(totalslots * sizeof(struct vm_anon *),
	    kmflags);
	if (amap->am_anon == NULL)
		goto fail3;

	return(amap);

fail3:
	kmem_free(amap->am_bckptr, totalslots * sizeof(int));
fail2:
	kmem_free(amap->am_slots, totalslots * sizeof(int));
fail1:
	mutex_destroy(&amap->am_l);
	pool_cache_put(&uvm_amap_cache, amap);

	/*
	 * XXX hack to tell the pagedaemon how many pages we need,
	 * since we can need more than it would normally free.
	 */
	if ((waitf & UVM_FLAG_NOWAIT) != 0) {
		extern u_int uvm_extrapages;
		atomic_add_int(&uvm_extrapages,
		    ((sizeof(int) * 2 + sizeof(struct vm_anon *)) *
		    totalslots) >> PAGE_SHIFT);
	}
	return (NULL);
}
Exemplo n.º 5
0
/*
 * mutex_obj_alloc:
 *
 *	Allocate a single lock object.
 */
kmutex_t *
mutex_obj_alloc(kmutex_type_t type, int ipl)
{
	struct kmutexobj *mo;

	mo = pool_cache_get(mutex_obj_cache, PR_WAITOK);
	mutex_init(&mo->mo_lock, type, ipl);
	mo->mo_refcnt = 1;

	return (kmutex_t *)mo;
}
Exemplo n.º 6
0
/*
 * Allocate a new marker vnode.
 */
vnode_t *
vnalloc_marker(struct mount *mp)
{
	vnode_impl_t *node;
	vnode_t *vp;

	node = pool_cache_get(vcache.pool, PR_WAITOK);
	memset(node, 0, sizeof(*node));
	vp = VIMPL_TO_VNODE(node);
	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
	vp->v_mount = mp;
	vp->v_type = VBAD;
	node->vi_state = VS_MARKER;

	return vp;
}
int
rndopen(dev_t dev, int flags, int fmt, struct lwp *l)
{
	bool hard;
	struct file *fp;
	int fd;
	int error;

	switch (minor(dev)) {
	case RND_DEV_URANDOM:
		hard = false;
		break;

	case RND_DEV_RANDOM:
		hard = true;
		break;

	default:
		return ENXIO;
	}

	error = fd_allocfile(&fp, &fd);
	if (error)
		return error;

	/*
	 * Allocate a context, but don't create a CPRNG yet -- do that
	 * lazily because it consumes entropy from the system entropy
	 * pool, which (currently) has the effect of depleting it and
	 * causing readers from /dev/random to block.  If this is
	 * /dev/urandom and the process is about to send only short
	 * reads to it, then we will be using a per-CPU CPRNG anyway.
	 */
	struct rnd_ctx *const ctx = pool_cache_get(rnd_ctx_cache, PR_WAITOK);
	ctx->rc_cprng = NULL;
	ctx->rc_hard = hard;

	error = fd_clone(fp, fd, flags, &rnd_fileops, ctx);
	KASSERT(error == EMOVEFD);

	return error;
}
Exemplo n.º 8
0
static struct radix_tree_node *
radix_tree_alloc_node(void)
{
	struct radix_tree_node *n;

#if defined(_KERNEL)
	n = pool_cache_get(radix_tree_node_cache, PR_NOWAIT);
#else /* defined(_KERNEL) */
#if defined(_STANDALONE)
	n = alloc(sizeof(*n));
#else /* defined(_STANDALONE) */
	n = malloc(sizeof(*n));
#endif /* defined(_STANDALONE) */
	if (n != NULL) {
		radix_tree_node_init(n);
	}
#endif /* defined(_KERNEL) */
	KASSERT(n == NULL || radix_tree_node_clean_p(n));
	return n;
}
Exemplo n.º 9
0
/*
 * Allocate a new inode.
 */
int
ufs_makeinode(int mode, struct vnode *dvp, const struct ufs_lookup_results *ulr,
	struct vnode **vpp, struct componentname *cnp)
{
	struct inode	*ip, *pdir;
	struct direct	*newdir;
	struct vnode	*tvp;
	int		error;

	UFS_WAPBL_JUNLOCK_ASSERT(dvp->v_mount);

	pdir = VTOI(dvp);

	if ((mode & IFMT) == 0)
		mode |= IFREG;

	if ((error = UFS_VALLOC(dvp, mode, cnp->cn_cred, vpp)) != 0) {
		return (error);
	}
	tvp = *vpp;
	ip = VTOI(tvp);
	ip->i_gid = pdir->i_gid;
	DIP_ASSIGN(ip, gid, ip->i_gid);
	ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
	DIP_ASSIGN(ip, uid, ip->i_uid);
	error = UFS_WAPBL_BEGIN1(dvp->v_mount, dvp);
	if (error) {
		/*
		 * Note, we can't VOP_VFREE(tvp) here like we should
		 * because we can't write to the disk.  Instead, we leave
		 * the vnode dangling from the journal.
		 */
		vput(tvp);
		return (error);
	}
#if defined(QUOTA) || defined(QUOTA2)
	if ((error = chkiq(ip, 1, cnp->cn_cred, 0))) {
		UFS_VFREE(tvp, ip->i_number, mode);
		UFS_WAPBL_END1(dvp->v_mount, dvp);
		vput(tvp);
		return (error);
	}
#endif
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_mode = mode;
	DIP_ASSIGN(ip, mode, mode);
	tvp->v_type = IFTOVT(mode);	/* Rest init'd in getnewvnode(). */
	ip->i_nlink = 1;
	DIP_ASSIGN(ip, nlink, 1);

	/* Authorize setting SGID if needed. */
	if (ip->i_mode & ISGID) {
		error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY,
		    tvp, NULL, genfs_can_chmod(tvp->v_type, cnp->cn_cred, ip->i_uid,
		    ip->i_gid, mode));
		if (error) {
			ip->i_mode &= ~ISGID;
			DIP_ASSIGN(ip, mode, ip->i_mode);
		}
	}

	if (cnp->cn_flags & ISWHITEOUT) {
		ip->i_flags |= UF_OPAQUE;
		DIP_ASSIGN(ip, flags, ip->i_flags);
	}

	/*
	 * Make sure inode goes to disk before directory entry.
	 */
	if ((error = UFS_UPDATE(tvp, NULL, NULL, UPDATE_DIROP)) != 0)
		goto bad;
	newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
	ufs_makedirentry(ip, cnp, newdir);
	error = ufs_direnter(dvp, ulr, tvp, newdir, cnp, NULL);
	pool_cache_put(ufs_direct_cache, newdir);
	if (error)
		goto bad;
	*vpp = tvp;
	return (0);

 bad:
	/*
	 * Write error occurred trying to update the inode
	 * or the directory so must deallocate the inode.
	 */
	ip->i_nlink = 0;
	DIP_ASSIGN(ip, nlink, 0);
	ip->i_flag |= IN_CHANGE;
	UFS_WAPBL_UPDATE(tvp, NULL, NULL, 0);
	tvp->v_type = VNON;		/* explodes later if VBLK */
	UFS_WAPBL_END1(dvp->v_mount, dvp);
	vput(tvp);
	return (error);
}
Exemplo n.º 10
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
static int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	struct dquot *dq, *ndq;
	struct dqhashhead *dqh;
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int error;

	/* Lock to see an up to date value for QTF_CLOSING. */
	mutex_enter(&dqlock);
	dqvp = ump->um_quotas[type];
	if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) {
		mutex_exit(&dqlock);
		*dqp = NODQUOT;
		return (EINVAL);
	}
	KASSERT(dqvp != vp);
	/*
	 * Check the cache first.
	 */
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		*dqp = dq;
		return (0);
	}
	/*
	 * Not in cache, allocate a new one.
	 */
	mutex_exit(&dqlock);
	ndq = pool_cache_get(dquot_cache, PR_WAITOK);
	/*
	 * Initialize the contents of the dquot structure.
	 */
	memset((char *)ndq, 0, sizeof *ndq);
	ndq->dq_flags = 0;
	ndq->dq_id = id;
	ndq->dq_ump = ump;
	ndq->dq_type = type;
	mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE);
	mutex_enter(&dqlock);
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		/*
		 * Another thread beat us allocating this dquot.
		 */
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		mutex_destroy(&ndq->dq_interlock);
		pool_cache_put(dquot_cache, ndq);
		*dqp = dq;
		return 0;
	}
	dq = ndq;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	dqref(dq);
	mutex_enter(&dq->dq_interlock);
	mutex_exit(&dqlock);
	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = (void *)&dq->dq_dqb;
	aiov.iov_len = sizeof (struct dqblk);
	auio.uio_resid = sizeof (struct dqblk);
	auio.uio_offset = (off_t)(id * sizeof (struct dqblk));
	auio.uio_rw = UIO_READ;
	UIO_SETUP_SYSSPACE(&auio);
	error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]);
	if (auio.uio_resid == sizeof(struct dqblk) && error == 0)
		memset((void *)&dq->dq_dqb, 0, sizeof(struct dqblk));
	VOP_UNLOCK(dqvp, 0);
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		mutex_enter(&dqlock);
		LIST_REMOVE(dq, dq_hash);
		mutex_exit(&dqlock);
		mutex_exit(&dq->dq_interlock);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	/*
	 * Check for no limit to enforce.
	 * Initialize time values if necessary.
	 */
	if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 &&
	    dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0)
		dq->dq_flags |= DQ_FAKE;
	if (dq->dq_id != 0) {
		if (dq->dq_btime == 0)
			dq->dq_btime = time_second + ump->um_btime[type];
		if (dq->dq_itime == 0)
			dq->dq_itime = time_second + ump->um_itime[type];
	}
	mutex_exit(&dq->dq_interlock);
	*dqp = dq;
	return (0);
}
Exemplo n.º 11
0
static int
rnd_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred,
    int flags)
{
	int error = 0;

	DPRINTF(RND_DEBUG_READ,
	    ("Random: Read of %zu requested, flags 0x%08x\n",
		uio->uio_resid, flags));

	if (uio->uio_resid == 0)
		return 0;

	struct rnd_ctx *const ctx = fp->f_data;
	uint8_t *const buf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);

	/*
	 * Choose a CPRNG to use -- either the per-open CPRNG, if this
	 * is /dev/random or a long read, or the per-CPU one otherwise.
	 *
	 * XXX NIST_BLOCK_KEYLEN_BYTES is a detail of the cprng(9)
	 * implementation and as such should not be mentioned here.
	 */
	struct cprng_strong *const cprng =
	    ((ctx->rc_hard || (uio->uio_resid > NIST_BLOCK_KEYLEN_BYTES))?
		rnd_ctx_cprng(ctx) : rnd_percpu_cprng());

	/*
	 * Generate the data in RND_TEMP_BUFFER_SIZE chunks.
	 */
	while (uio->uio_resid > 0) {
		const size_t n_req = MIN(uio->uio_resid, RND_TEMP_BUFFER_SIZE);

		CTASSERT(RND_TEMP_BUFFER_SIZE <= CPRNG_MAX_LEN);
		const size_t n_read = cprng_strong(cprng, buf, n_req,
		    ((ctx->rc_hard && ISSET(fp->f_flag, FNONBLOCK))?
			FNONBLOCK : 0));

		/*
		 * Equality will hold unless this is /dev/random, in
		 * which case we get only as many bytes as are left
		 * from the CPRNG's `information-theoretic strength'
		 * since the last rekey.
		 */
		KASSERT(n_read <= n_req);
		KASSERT(ctx->rc_hard || (n_read == n_req));

		error = uiomove(buf, n_read, uio);
		if (error)
			goto out;

		/*
		 * Do at most one iteration for /dev/random and return
		 * a short read without hanging further.  Hanging
		 * breaks applications worse than short reads.  Reads
		 * can't be zero unless nonblocking from /dev/random;
		 * in that case, ask caller to retry, instead of just
		 * returning zero bytes, which means EOF.
		 */
		KASSERT((0 < n_read) || (ctx->rc_hard &&
			ISSET(fp->f_flag, FNONBLOCK)));
		if (ctx->rc_hard) {
			if (n_read == 0)
				error = EAGAIN;
			goto out;
		}
	}

out:	pool_cache_put(rnd_temp_buffer_cache, buf);
	return error;
}
Exemplo n.º 12
0
/*
 * Obtain a dquot structure for the specified identifier and quota file
 * reading the information from the file if necessary.
 */
int
dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type,
    struct dquot **dqp)
{
	struct dquot *dq, *ndq;
	struct dqhashhead *dqh;
	struct vnode *dqvp;
	int error = 0; /* XXX gcc */

	/* Lock to see an up to date value for QTF_CLOSING. */
	mutex_enter(&dqlock);
	if ((ump->um_flags & (UFS_QUOTA|UFS_QUOTA2)) == 0) {
		mutex_exit(&dqlock);
		*dqp = NODQUOT;
		return (ENODEV);
	}
	dqvp = ump->um_quotas[type];
#ifdef QUOTA
	if (ump->um_flags & UFS_QUOTA) {
		if (dqvp == NULLVP || (ump->umq1_qflags[type] & QTF_CLOSING)) {
			mutex_exit(&dqlock);
			*dqp = NODQUOT;
			return (ENODEV);
		}
	}
#endif
#ifdef QUOTA2
	if (ump->um_flags & UFS_QUOTA2) {
		if (dqvp == NULLVP) {
			mutex_exit(&dqlock);
			*dqp = NODQUOT;
			return (ENODEV);
		}
	}
#endif
	KASSERT(dqvp != vp);
	/*
	 * Check the cache first.
	 */
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		*dqp = dq;
		return (0);
	}
	/*
	 * Not in cache, allocate a new one.
	 */
	mutex_exit(&dqlock);
	ndq = pool_cache_get(dquot_cache, PR_WAITOK);
	/*
	 * Initialize the contents of the dquot structure.
	 */
	memset((char *)ndq, 0, sizeof *ndq);
	ndq->dq_flags = 0;
	ndq->dq_id = id;
	ndq->dq_ump = ump;
	ndq->dq_type = type;
	mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE);
	mutex_enter(&dqlock);
	dqh = &dqhashtbl[DQHASH(dqvp, id)];
	LIST_FOREACH(dq, dqh, dq_hash) {
		if (dq->dq_id != id ||
		    dq->dq_ump->um_quotas[dq->dq_type] != dqvp)
			continue;
		/*
		 * Another thread beat us allocating this dquot.
		 */
		KASSERT(dq->dq_cnt > 0);
		dqref(dq);
		mutex_exit(&dqlock);
		mutex_destroy(&ndq->dq_interlock);
		pool_cache_put(dquot_cache, ndq);
		*dqp = dq;
		return 0;
	}
	dq = ndq;
	LIST_INSERT_HEAD(dqh, dq, dq_hash);
	dqref(dq);
	mutex_enter(&dq->dq_interlock);
	mutex_exit(&dqlock);
#ifdef QUOTA
	if (ump->um_flags & UFS_QUOTA)
		error = dq1get(dqvp, id, ump, type, dq);
#endif
#ifdef QUOTA2
	if (ump->um_flags & UFS_QUOTA2)
		error = dq2get(dqvp, id, ump, type, dq);
#endif
	/*
	 * I/O error in reading quota file, release
	 * quota structure and reflect problem to caller.
	 */
	if (error) {
		mutex_enter(&dqlock);
		LIST_REMOVE(dq, dq_hash);
		mutex_exit(&dqlock);
		mutex_exit(&dq->dq_interlock);
		dqrele(vp, dq);
		*dqp = NODQUOT;
		return (error);
	}
	mutex_exit(&dq->dq_interlock);
	*dqp = dq;
	return (0);
}
Exemplo n.º 13
0
static int
rnd_write(struct file *fp, off_t *offp, struct uio *uio,
	   kauth_cred_t cred, int flags)
{
	u_int8_t *bf;
	int n, ret = 0, estimate_ok = 0, estimate = 0, added = 0;

	ret = kauth_authorize_device(cred,
	    KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
	if (ret) {
		return (ret);
	}
	estimate_ok = !kauth_authorize_device(cred,
	    KAUTH_DEVICE_RND_ADDDATA_ESTIMATE, NULL, NULL, NULL, NULL);

	DPRINTF(RND_DEBUG_WRITE,
	    ("Random: Write of %zu requested\n", uio->uio_resid));

	if (uio->uio_resid == 0)
		return (0);
	ret = 0;
	bf = pool_cache_get(rnd_temp_buffer_cache, PR_WAITOK);
	while (uio->uio_resid > 0) {
		/*
		 * Don't flood the pool.
		 */
		if (added > RND_POOLWORDS * sizeof(int)) {
#ifdef RND_VERBOSE
			printf("rnd: added %d already, adding no more.\n",
			       added);
#endif
			break;
		}
		n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);

		ret = uiomove((void *)bf, n, uio);
		if (ret != 0)
			break;

		if (estimate_ok) {
			/*
			 * Don't cause samples to be discarded by taking
			 * the pool's entropy estimate to the max.
			 */
			if (added > RND_POOLWORDS / 2)
				estimate = 0;
			else
				estimate = n * NBBY / 2;
#ifdef RND_VERBOSE
			printf("rnd: adding on write, %d bytes, estimate %d\n",
			       n, estimate);
#endif
		} else {
#ifdef RND_VERBOSE
			printf("rnd: kauth says no entropy.\n");
#endif
		}

		/*
		 * Mix in the bytes.
		 */
		mutex_spin_enter(&rndpool_mtx);
		rndpool_add_data(&rnd_pool, bf, n, estimate);
		mutex_spin_exit(&rndpool_mtx);

		added += n;
		DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
	}
	pool_cache_put(rnd_temp_buffer_cache, bf);
	return (ret);
}
Exemplo n.º 14
0
int
ufs_mkdir(void *v)
{
	struct vop_mkdir_v3_args /* {
		struct vnode		*a_dvp;
		struct vnode		**a_vpp;
		struct componentname	*a_cnp;
		struct vattr		*a_vap;
	} */ *ap = v;
	struct vnode		*dvp = ap->a_dvp, *tvp;
	struct vattr		*vap = ap->a_vap;
	struct componentname	*cnp = ap->a_cnp;
	struct inode		*ip, *dp = VTOI(dvp);
	struct buf		*bp;
	struct dirtemplate	dirtemplate;
	struct direct		*newdir;
	int			error, dmode;
	struct ufsmount		*ump = dp->i_ump;
	int			dirblksiz = ump->um_dirblksiz;
	struct ufs_lookup_results *ulr;

	fstrans_start(dvp->v_mount, FSTRANS_SHARED);

	/* XXX should handle this material another way */
	ulr = &dp->i_crap;
	UFS_CHECK_CRAPCOUNTER(dp);

	if ((nlink_t)dp->i_nlink >= LINK_MAX) {
		error = EMLINK;
		goto out;
	}
	dmode = vap->va_mode & ACCESSPERMS;
	dmode |= IFDIR;
	/*
	 * Must simulate part of ufs_makeinode here to acquire the inode,
	 * but not have it entered in the parent directory. The entry is
	 * made later after writing "." and ".." entries.
	 */
	if ((error = UFS_VALLOC(dvp, dmode, cnp->cn_cred, ap->a_vpp)) != 0)
		goto out;

	tvp = *ap->a_vpp;
	ip = VTOI(tvp);

	error = UFS_WAPBL_BEGIN(ap->a_dvp->v_mount);
	if (error) {
		UFS_VFREE(tvp, ip->i_number, dmode);
		vput(tvp);
		goto out;
	}
	ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
	DIP_ASSIGN(ip, uid, ip->i_uid);
	ip->i_gid = dp->i_gid;
	DIP_ASSIGN(ip, gid, ip->i_gid);
#if defined(QUOTA) || defined(QUOTA2)
	if ((error = chkiq(ip, 1, cnp->cn_cred, 0))) {
		UFS_VFREE(tvp, ip->i_number, dmode);
		UFS_WAPBL_END(dvp->v_mount);
		fstrans_done(dvp->v_mount);
		vput(tvp);
		return (error);
	}
#endif
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_mode = dmode;
	DIP_ASSIGN(ip, mode, dmode);
	tvp->v_type = VDIR;	/* Rest init'd in getnewvnode(). */
	ip->i_nlink = 2;
	DIP_ASSIGN(ip, nlink, 2);
	if (cnp->cn_flags & ISWHITEOUT) {
		ip->i_flags |= UF_OPAQUE;
		DIP_ASSIGN(ip, flags, ip->i_flags);
	}

	/*
	 * Bump link count in parent directory to reflect work done below.
	 * Should be done before reference is created so cleanup is
	 * possible if we crash.
	 */
	dp->i_nlink++;
	DIP_ASSIGN(dp, nlink, dp->i_nlink);
	dp->i_flag |= IN_CHANGE;
	if ((error = UFS_UPDATE(dvp, NULL, NULL, UPDATE_DIROP)) != 0)
		goto bad;

	/*
	 * Initialize directory with "." and ".." from static template.
	 */
	dirtemplate = mastertemplate;
	dirtemplate.dotdot_reclen = dirblksiz - dirtemplate.dot_reclen;
	dirtemplate.dot_ino = ufs_rw32(ip->i_number, UFS_MPNEEDSWAP(ump));
	dirtemplate.dotdot_ino = ufs_rw32(dp->i_number, UFS_MPNEEDSWAP(ump));
	dirtemplate.dot_reclen = ufs_rw16(dirtemplate.dot_reclen,
	    UFS_MPNEEDSWAP(ump));
	dirtemplate.dotdot_reclen = ufs_rw16(dirtemplate.dotdot_reclen,
	    UFS_MPNEEDSWAP(ump));
	if (ump->um_maxsymlinklen <= 0) {
#if BYTE_ORDER == LITTLE_ENDIAN
		if (UFS_MPNEEDSWAP(ump) == 0)
#else
		if (UFS_MPNEEDSWAP(ump) != 0)
#endif
		{
			dirtemplate.dot_type = dirtemplate.dot_namlen;
			dirtemplate.dotdot_type = dirtemplate.dotdot_namlen;
			dirtemplate.dot_namlen = dirtemplate.dotdot_namlen = 0;
		} else
			dirtemplate.dot_type = dirtemplate.dotdot_type = 0;
	}
	if ((error = UFS_BALLOC(tvp, (off_t)0, dirblksiz, cnp->cn_cred,
	    B_CLRBUF, &bp)) != 0)
		goto bad;
	ip->i_size = dirblksiz;
	DIP_ASSIGN(ip, size, dirblksiz);
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	uvm_vnp_setsize(tvp, ip->i_size);
	memcpy((void *)bp->b_data, (void *)&dirtemplate, sizeof dirtemplate);

	/*
	 * Directory set up, now install its entry in the parent directory.
	 * We must write out the buffer containing the new directory body
	 * before entering the new name in the parent.
	 */
	if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0)
		goto bad;
	if ((error = UFS_UPDATE(tvp, NULL, NULL, UPDATE_DIROP)) != 0) {
		goto bad;
	}
	newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
	ufs_makedirentry(ip, cnp, newdir);
	error = ufs_direnter(dvp, ulr, tvp, newdir, cnp, bp);
	pool_cache_put(ufs_direct_cache, newdir);
 bad:
	if (error == 0) {
		VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
		VOP_UNLOCK(tvp);
		UFS_WAPBL_END(dvp->v_mount);
	} else {
		dp->i_nlink--;
		DIP_ASSIGN(dp, nlink, dp->i_nlink);
		dp->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(dvp, NULL, NULL, UPDATE_DIROP);
		/*
		 * No need to do an explicit UFS_TRUNCATE here, vrele will
		 * do this for us because we set the link count to 0.
		 */
		ip->i_nlink = 0;
		DIP_ASSIGN(ip, nlink, 0);
		ip->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(tvp, NULL, NULL, UPDATE_DIROP);
		UFS_WAPBL_END(dvp->v_mount);
		vput(tvp);
	}
 out:
	fstrans_done(dvp->v_mount);
	return (error);
}
Exemplo n.º 15
0
int
cpu_setmcontext32(struct lwp *l, const mcontext32_t *mcp, unsigned int flags)
{
	struct trapframe *tf = l->l_md.md_tf;
	const __greg32_t *gr = mcp->__gregs;
	struct proc *p = l->l_proc;
	int error;

	/* First ensure consistent stack state (see sendsig). */
	write_user_windows();
	if (rwindow_save(l)) {
		mutex_enter(p->p_lock);
		sigexit(l, SIGILL);
	}

	/* Restore register context, if any. */
	if ((flags & _UC_CPU) != 0) {
		error = cpu_mcontext32_validate(l, mcp);
		if (error)
			return error;

		/* Restore general register context. */
		/* take only tstate CCR (and ASI) fields */
		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) |
		    PSRCC_TO_TSTATE(gr[_REG32_PSR]);
		tf->tf_pc        = (uint64_t)gr[_REG32_PC];
		tf->tf_npc       = (uint64_t)gr[_REG32_nPC];
		tf->tf_y         = (uint64_t)gr[_REG32_Y];
		tf->tf_global[1] = (uint64_t)gr[_REG32_G1];
		tf->tf_global[2] = (uint64_t)gr[_REG32_G2];
		tf->tf_global[3] = (uint64_t)gr[_REG32_G3];
		tf->tf_global[4] = (uint64_t)gr[_REG32_G4];
		tf->tf_global[5] = (uint64_t)gr[_REG32_G5];
		tf->tf_global[6] = (uint64_t)gr[_REG32_G6];
		/* done in lwp_setprivate */
		/* tf->tf_global[7] = (uint64_t)gr[_REG32_G7]; */
		tf->tf_out[0]    = (uint64_t)gr[_REG32_O0];
		tf->tf_out[1]    = (uint64_t)gr[_REG32_O1];
		tf->tf_out[2]    = (uint64_t)gr[_REG32_O2];
		tf->tf_out[3]    = (uint64_t)gr[_REG32_O3];
		tf->tf_out[4]    = (uint64_t)gr[_REG32_O4];
		tf->tf_out[5]    = (uint64_t)gr[_REG32_O5];
		tf->tf_out[6]    = (uint64_t)gr[_REG32_O6];
		tf->tf_out[7]    = (uint64_t)gr[_REG32_O7];
		/* %asi restored above; %fprs not yet supported. */

		if (flags & _UC_TLSBASE)
			lwp_setprivate(l, (void *)(uintptr_t)gr[_REG32_G7]);

		/* XXX mcp->__gwins */
	}

	/* Restore floating point register context, if any. */
	if ((flags & _UC_FPU) != 0) {
#ifdef notyet
		struct fpstate64 *fsp;
		const __fpregset_t *fpr = &mcp->__fpregs;

		/*
		 * If we're the current FPU owner, simply reload it from
		 * the supplied context.  Otherwise, store it into the
		 * process' FPU save area (which is used to restore from
		 * by lazy FPU context switching); allocate it if necessary.
		 */
		if ((fsp = l->l_md.md_fpstate) == NULL) {
			fsp = pool_cache_get(fpstate_cache, PR_WAITOK);
			l->l_md.md_fpstate = fsp;
		} else {
			/* Drop the live context on the floor. */
			fpusave_lwp(l, false);
		}
		/* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */
		memcpy(fsp->fs_regs, &fpr->__fpu_fr, sizeof (fpr->__fpu_fr));
		fsp->fs_fsr = mcp->__fpregs.__fpu_fsr;
		fsp->fs_qsize = 0;

#if 0
		/* Need more info! */
		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
#endif
#endif
	}
#ifdef _UC_SETSTACK
	mutex_enter(p->p_lock);
	if (flags & _UC_SETSTACK)
		l->l_sigstk.ss_flags |= SS_ONSTACK;
	if (flags & _UC_CLRSTACK)
		l->l_sigstk.ss_flags &= ~SS_ONSTACK;
	mutex_exit(p->p_lock);
#endif
	return (0);
}
Exemplo n.º 16
0
/*
 * Attempt to build up a hash table for the directory contents in
 * inode 'ip'. Returns 0 on success, or -1 of the operation failed.
 */
int
ufsdirhash_build(struct inode *ip)
{
	struct dirhash *dh;
	struct buf *bp = NULL;
	struct direct *ep;
	struct vnode *vp;
	doff_t bmask, pos;
	int dirblocks, i, j, memreqd, nblocks, narrays, nslots, slot;
	const int needswap = UFS_MPNEEDSWAP(ip->i_ump);
	int dirblksiz = ip->i_ump->um_dirblksiz;

	/* Check if we can/should use dirhash. */
	if (ip->i_dirhash == NULL) {
		if (ip->i_size < (ufs_dirhashminblks * dirblksiz) || OFSFMT(ip))
			return (-1);
	} else {
		/* Hash exists, but sysctls could have changed. */
		if (ip->i_size < (ufs_dirhashminblks * dirblksiz) ||
		    ufs_dirhashmem > ufs_dirhashmaxmem) {
			ufsdirhash_free(ip);
			return (-1);
		}
		/* Check if hash exists and is intact (note: unlocked read). */
		if (ip->i_dirhash->dh_hash != NULL)
			return (0);
		/* Free the old, recycled hash and build a new one. */
		ufsdirhash_free(ip);
	}

	/* Don't hash removed directories. */
	if (ip->i_nlink == 0)
		return (-1);

	vp = ip->i_vnode;
	/* Allocate 50% more entries than this dir size could ever need. */
	KASSERT(ip->i_size >= dirblksiz);
	nslots = ip->i_size / UFS_DIRECTSIZ(1);
	nslots = (nslots * 3 + 1) / 2;
	narrays = howmany(nslots, DH_NBLKOFF);
	nslots = narrays * DH_NBLKOFF;
	dirblocks = howmany(ip->i_size, dirblksiz);
	nblocks = (dirblocks * 3 + 1) / 2;

	memreqd = sizeof(*dh) + narrays * sizeof(*dh->dh_hash) +
	    narrays * DH_NBLKOFF * sizeof(**dh->dh_hash) +
	    nblocks * sizeof(*dh->dh_blkfree);

	while (atomic_add_int_nv(&ufs_dirhashmem, memreqd) >
	    ufs_dirhashmaxmem) {
		atomic_add_int(&ufs_dirhashmem, -memreqd);
		if (memreqd > ufs_dirhashmaxmem / 2)
			return (-1);
		/* Try to free some space. */
		if (ufsdirhash_recycle(memreqd) != 0)
			return (-1);
	        else
		    	DIRHASHLIST_UNLOCK();
	}

	/*
	 * Use non-blocking mallocs so that we will revert to a linear
	 * lookup on failure rather than potentially blocking forever.
	 */
	dh = pool_cache_get(ufsdirhash_cache, PR_NOWAIT);
	if (dh == NULL) {
		atomic_add_int(&ufs_dirhashmem, -memreqd);
		return (-1);
	}
	memset(dh, 0, sizeof(*dh));
	mutex_init(&dh->dh_lock, MUTEX_DEFAULT, IPL_NONE);
	DIRHASH_LOCK(dh);
	dh->dh_hashsz = narrays * sizeof(dh->dh_hash[0]);
	dh->dh_hash = kmem_zalloc(dh->dh_hashsz, KM_NOSLEEP);
	dh->dh_blkfreesz = nblocks * sizeof(dh->dh_blkfree[0]);
	dh->dh_blkfree = kmem_zalloc(dh->dh_blkfreesz, KM_NOSLEEP);
	if (dh->dh_hash == NULL || dh->dh_blkfree == NULL)
		goto fail;
	for (i = 0; i < narrays; i++) {
		if ((dh->dh_hash[i] = DIRHASH_BLKALLOC()) == NULL)
			goto fail;
		for (j = 0; j < DH_NBLKOFF; j++)
			dh->dh_hash[i][j] = DIRHASH_EMPTY;
	}

	/* Initialise the hash table and block statistics. */
	dh->dh_narrays = narrays;
	dh->dh_hlen = nslots;
	dh->dh_nblk = nblocks;
	dh->dh_dirblks = dirblocks;
	for (i = 0; i < dirblocks; i++)
		dh->dh_blkfree[i] = dirblksiz / DIRALIGN;
	for (i = 0; i < DH_NFSTATS; i++)
		dh->dh_firstfree[i] = -1;
	dh->dh_firstfree[DH_NFSTATS] = 0;
	dh->dh_seqopt = 0;
	dh->dh_seqoff = 0;
	dh->dh_score = DH_SCOREINIT;
	ip->i_dirhash = dh;

	bmask = VFSTOUFS(vp->v_mount)->um_mountp->mnt_stat.f_iosize - 1;
	pos = 0;
	while (pos < ip->i_size) {
		if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
		    != 0) {
			preempt();
		}
		/* If necessary, get the next directory block. */
		if ((pos & bmask) == 0) {
			if (bp != NULL)
				brelse(bp, 0);
			if (ufs_blkatoff(vp, (off_t)pos, NULL, &bp, false) != 0)
				goto fail;
		}

		/* Add this entry to the hash. */
		ep = (struct direct *)((char *)bp->b_data + (pos & bmask));
		if (ep->d_reclen == 0 || ep->d_reclen >
		    dirblksiz - (pos & (dirblksiz - 1))) {
			/* Corrupted directory. */
			brelse(bp, 0);
			goto fail;
		}
		if (ep->d_ino != 0) {
			/* Add the entry (simplified ufsdirhash_add). */
			slot = ufsdirhash_hash(dh, ep->d_name, ep->d_namlen);
			while (DH_ENTRY(dh, slot) != DIRHASH_EMPTY)
				slot = WRAPINCR(slot, dh->dh_hlen);
			dh->dh_hused++;
			DH_ENTRY(dh, slot) = pos;
			ufsdirhash_adjfree(dh, pos, -UFS_DIRSIZ(0, ep, needswap),
			    dirblksiz);
		}
		pos += ep->d_reclen;
	}

	if (bp != NULL)
		brelse(bp, 0);
	DIRHASHLIST_LOCK();
	TAILQ_INSERT_TAIL(&ufsdirhash_list, dh, dh_list);
	dh->dh_onlist = 1;
	DIRHASH_UNLOCK(dh);
	DIRHASHLIST_UNLOCK();
	return (0);

fail:
	DIRHASH_UNLOCK(dh);
	if (dh->dh_hash != NULL) {
		for (i = 0; i < narrays; i++)
			if (dh->dh_hash[i] != NULL)
				DIRHASH_BLKFREE(dh->dh_hash[i]);
		kmem_free(dh->dh_hash, dh->dh_hashsz);
	}
	if (dh->dh_blkfree != NULL)
		kmem_free(dh->dh_blkfree, dh->dh_blkfreesz);
	mutex_destroy(&dh->dh_lock);
	pool_cache_put(ufsdirhash_cache, dh);
	ip->i_dirhash = NULL;
	atomic_add_int(&ufs_dirhashmem, -memreqd);
	return (-1);
}
Exemplo n.º 17
0
/*
 * Allocate a new inode.
 */
int
ulfs_makeinode(int mode, struct vnode *dvp, const struct ulfs_lookup_results *ulr,
	struct vnode **vpp, struct componentname *cnp)
{
	struct inode	*ip, *pdir;
	struct lfs_direct	*newdir;
	struct vnode	*tvp;
	int		error;

	pdir = VTOI(dvp);

	if ((mode & LFS_IFMT) == 0)
		mode |= LFS_IFREG;

	if ((error = lfs_valloc(dvp, mode, cnp->cn_cred, vpp)) != 0) {
		return (error);
	}
	tvp = *vpp;
	ip = VTOI(tvp);
	ip->i_gid = pdir->i_gid;
	DIP_ASSIGN(ip, gid, ip->i_gid);
	ip->i_uid = kauth_cred_geteuid(cnp->cn_cred);
	DIP_ASSIGN(ip, uid, ip->i_uid);
#if defined(LFS_QUOTA) || defined(LFS_QUOTA2)
	if ((error = lfs_chkiq(ip, 1, cnp->cn_cred, 0))) {
		lfs_vfree(tvp, ip->i_number, mode);
		vput(tvp);
		return (error);
	}
#endif
	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
	ip->i_mode = mode;
	DIP_ASSIGN(ip, mode, mode);
	tvp->v_type = IFTOVT(mode);	/* Rest init'd in getnewvnode(). */
	ip->i_nlink = 1;
	DIP_ASSIGN(ip, nlink, 1);

	/* Authorize setting SGID if needed. */
	if (ip->i_mode & ISGID) {
		error = kauth_authorize_vnode(cnp->cn_cred, KAUTH_VNODE_WRITE_SECURITY,
		    tvp, NULL, genfs_can_chmod(tvp->v_type, cnp->cn_cred, ip->i_uid,
		    ip->i_gid, mode));
		if (error) {
			ip->i_mode &= ~ISGID;
			DIP_ASSIGN(ip, mode, ip->i_mode);
		}
	}

	if (cnp->cn_flags & ISWHITEOUT) {
		ip->i_flags |= UF_OPAQUE;
		DIP_ASSIGN(ip, flags, ip->i_flags);
	}

	/*
	 * Make sure inode goes to disk before directory entry.
	 */
	if ((error = lfs_update(tvp, NULL, NULL, UPDATE_DIROP)) != 0)
		goto bad;
	newdir = pool_cache_get(ulfs_direct_cache, PR_WAITOK);
	ulfs_makedirentry(ip, cnp, newdir);
	error = ulfs_direnter(dvp, ulr, tvp, newdir, cnp, NULL);
	pool_cache_put(ulfs_direct_cache, newdir);
	if (error)
		goto bad;
	*vpp = tvp;
	return (0);

 bad:
	/*
	 * Write error occurred trying to update the inode
	 * or the directory so must deallocate the inode.
	 */
	ip->i_nlink = 0;
	DIP_ASSIGN(ip, nlink, 0);
	ip->i_flag |= IN_CHANGE;
	/* If IN_ADIROP, account for it */
	lfs_unmark_vnode(tvp);
	tvp->v_type = VNON;		/* explodes later if VBLK */
	vput(tvp);
	return (error);
}
Exemplo n.º 18
0
/*
 * ulfs_gro_rename: Actually perform the rename operation.
 */
static int
ulfs_gro_rename(struct mount *mp, kauth_cred_t cred,
    struct vnode *fdvp, struct componentname *fcnp,
    void *fde, struct vnode *fvp,
    struct vnode *tdvp, struct componentname *tcnp,
    void *tde, struct vnode *tvp)
{
	struct ulfs_lookup_results *fulr = fde;
	struct ulfs_lookup_results *tulr = tde;
	bool directory_p, reparent_p;
	struct lfs_direct *newdir;
	int error;

	KASSERT(mp != NULL);
	KASSERT(fdvp != NULL);
	KASSERT(fcnp != NULL);
	KASSERT(fulr != NULL);
	KASSERT(fvp != NULL);
	KASSERT(tdvp != NULL);
	KASSERT(tcnp != NULL);
	KASSERT(tulr != NULL);
	KASSERT(fulr != tulr);
	KASSERT(fdvp != fvp);
	KASSERT(fdvp != tvp);
	KASSERT(tdvp != fvp);
	KASSERT(tdvp != tvp);
	KASSERT(fvp != tvp);
	KASSERT(fdvp->v_mount == mp);
	KASSERT(fvp->v_mount == mp);
	KASSERT(tdvp->v_mount == mp);
	KASSERT((tvp == NULL) || (tvp->v_mount == mp));
	KASSERT(VOP_ISLOCKED(fdvp) == LK_EXCLUSIVE);
	KASSERT(VOP_ISLOCKED(fvp) == LK_EXCLUSIVE);
	KASSERT(VOP_ISLOCKED(tdvp) == LK_EXCLUSIVE);
	KASSERT((tvp == NULL) || (VOP_ISLOCKED(tvp) == LK_EXCLUSIVE));

	/*
	 * We shall need to temporarily bump the link count, so make
	 * sure there is room to do so.
	 */
	if ((nlink_t)VTOI(fvp)->i_nlink >= LINK_MAX)
		return EMLINK;

	directory_p = (fvp->v_type == VDIR);
	KASSERT(directory_p == ((VTOI(fvp)->i_mode & LFS_IFMT) == LFS_IFDIR));
	KASSERT((tvp == NULL) || (directory_p == (tvp->v_type == VDIR)));
	KASSERT((tvp == NULL) || (directory_p ==
		((VTOI(tvp)->i_mode & LFS_IFMT) == LFS_IFDIR)));

	reparent_p = (fdvp != tdvp);
	KASSERT(reparent_p == (VTOI(fdvp)->i_number != VTOI(tdvp)->i_number));

	/*
	 * Commence hacking of the data on disk.
	 */

	error = 0;

	/*
	 * 1) Bump link count while we're moving stuff
	 *    around.  If we crash somewhere before
	 *    completing our work, the link count
	 *    may be wrong, but correctable.
	 */

	KASSERT((nlink_t)VTOI(fvp)->i_nlink < LINK_MAX);
	VTOI(fvp)->i_nlink++;
	DIP_ASSIGN(VTOI(fvp), nlink, VTOI(fvp)->i_nlink);
	VTOI(fvp)->i_flag |= IN_CHANGE;
	error = lfs_update(fvp, NULL, NULL, UPDATE_DIROP);
	if (error)
		goto whymustithurtsomuch;

	/*
	 * 2) If target doesn't exist, link the target
	 *    to the source and unlink the source.
	 *    Otherwise, rewrite the target directory
	 *    entry to reference the source inode and
	 *    expunge the original entry's existence.
	 */

	if (tvp == NULL) {
		/*
		 * Account for ".." in new directory.
		 * When source and destination have the same
		 * parent we don't fool with the link count.
		 */
		if (directory_p && reparent_p) {
			if ((nlink_t)VTOI(tdvp)->i_nlink >= LINK_MAX) {
				error = EMLINK;
				goto whymustithurtsomuch;
			}
			KASSERT((nlink_t)VTOI(tdvp)->i_nlink < LINK_MAX);
			VTOI(tdvp)->i_nlink++;
			DIP_ASSIGN(VTOI(tdvp), nlink, VTOI(tdvp)->i_nlink);
			VTOI(tdvp)->i_flag |= IN_CHANGE;
			error = lfs_update(tdvp, NULL, NULL, UPDATE_DIROP);
			if (error) {
				/*
				 * Link count update didn't take --
				 * back out the in-memory link count.
				 */
				KASSERT(0 < VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_nlink--;
				DIP_ASSIGN(VTOI(tdvp), nlink,
				    VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_flag |= IN_CHANGE;
				goto whymustithurtsomuch;
			}
		}

		newdir = pool_cache_get(ulfs_direct_cache, PR_WAITOK);
		ulfs_makedirentry(VTOI(fvp), tcnp, newdir);
		error = ulfs_direnter(tdvp, tulr, NULL, newdir, tcnp, NULL);
		pool_cache_put(ulfs_direct_cache, newdir);
		if (error) {
			if (directory_p && reparent_p) {
				/*
				 * Directory update didn't take, but
				 * the link count update did -- back
				 * out the in-memory link count and the
				 * on-disk link count.
				 */
				KASSERT(0 < VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_nlink--;
				DIP_ASSIGN(VTOI(tdvp), nlink,
				    VTOI(tdvp)->i_nlink);
				VTOI(tdvp)->i_flag |= IN_CHANGE;
				(void)lfs_update(tdvp, NULL, NULL,
				    UPDATE_WAIT | UPDATE_DIROP);
			}
			goto whymustithurtsomuch;
		}
	} else {
		if (directory_p)
			/* XXX WTF?  Why purge here?  Why not purge others?  */
			cache_purge(tdvp);

		/*
		 * Make the target directory's entry for tcnp point at
		 * the source node.
		 *
		 * XXX ulfs_dirrewrite decrements tvp's link count, but
		 * doesn't touch the link count of the new inode.  Go
		 * figure.
		 */
		error = ulfs_dirrewrite(VTOI(tdvp), tulr->ulr_offset,
		    VTOI(tvp), VTOI(fvp)->i_number, LFS_IFTODT(VTOI(fvp)->i_mode),
		    ((directory_p && reparent_p) ? reparent_p : directory_p),
		    IN_CHANGE | IN_UPDATE);
		if (error)
			goto whymustithurtsomuch;

		/*
		 * If the source and target are directories, and the
		 * target is in the same directory as the source,
		 * decrement the link count of the common parent
		 * directory, since we are removing the target from
		 * that directory.
		 */
		if (directory_p && !reparent_p) {
			KASSERT(fdvp == tdvp);
			/* XXX check, don't kassert */
			KASSERT(0 < VTOI(tdvp)->i_nlink);
			VTOI(tdvp)->i_nlink--;
			DIP_ASSIGN(VTOI(tdvp), nlink, VTOI(tdvp)->i_nlink);
			VTOI(tdvp)->i_flag |= IN_CHANGE;
		}

		if (directory_p) {
			/*
			 * XXX I don't understand the following comment
			 * from ulfs_rename -- in particular, the part
			 * about `there may be other hard links'.
			 *
			 * Truncate inode. The only stuff left in the directory
			 * is "." and "..". The "." reference is inconsequential
			 * since we are quashing it. We have removed the "."
			 * reference and the reference in the parent directory,
			 * but there may be other hard links.
			 *
			 * XXX The ulfs_dirempty call earlier does
			 * not guarantee anything about nlink.
			 */
			if (VTOI(tvp)->i_nlink != 1)
				ulfs_dirbad(VTOI(tvp), (doff_t)0,
				    "hard-linked directory");
			VTOI(tvp)->i_nlink = 0;
			DIP_ASSIGN(VTOI(tvp), nlink, 0);
			error = lfs_truncate(tvp, (off_t)0, IO_SYNC, cred);
			if (error)
				goto whymustithurtsomuch;
		}
	}

	/*
	 * If the source is a directory with a new parent, the link
	 * count of the old parent directory must be decremented and
	 * ".." set to point to the new parent.
	 *
	 * XXX ulfs_dirrewrite updates the link count of fdvp, but not
	 * the link count of fvp or the link count of tdvp.  Go figure.
	 */
	if (directory_p && reparent_p) {
		error = ulfs_dirrewrite(VTOI(fvp), mastertemplate.dot_reclen,
		    VTOI(fdvp), VTOI(tdvp)->i_number, LFS_DT_DIR, 0, IN_CHANGE);
#if 0		/* XXX This branch was not in ulfs_rename! */
		if (error)
			goto whymustithurtsomuch;
#endif

		/* XXX WTF?  Why purge here?  Why not purge others?  */
		cache_purge(fdvp);
	}

	/*
	 * 3) Unlink the source.
	 */

	/*
	 * ulfs_direnter may compact the directory in the process of
	 * inserting a new entry.  That may invalidate fulr, which we
	 * need in order to remove the old entry.  In that case, we
	 * need to recalculate what fulr should be.
	 */
	if (!reparent_p && (tvp == NULL) &&
	    ulfs_rename_ulr_overlap_p(fulr, tulr)) {
		error = ulfs_rename_recalculate_fulr(fdvp, fulr, tulr, fcnp);
#if 0				/* XXX */
		if (error)	/* XXX Try to back out changes?  */
			goto whymustithurtsomuch;
#endif
	}

	/*
	 * XXX 0 means !isrmdir.  But can't this be an rmdir?
	 * XXX Well, turns out that argument to ulfs_dirremove is ignored...
	 * XXX And it turns out ulfs_dirremove updates the link count of fvp.
	 * XXX But it doesn't update the link count of fdvp.  Go figure.
	 * XXX fdvp's link count is updated in ulfs_dirrewrite instead.
	 * XXX Actually, sometimes it doesn't update fvp's link count.
	 * XXX I hate the world.
	 */
	error = ulfs_dirremove(fdvp, fulr, VTOI(fvp), fcnp->cn_flags, 0);
	if (error)
#if 0				/* XXX */
		goto whymustithurtsomuch;
#endif
		goto arghmybrainhurts;

	/*
	 * XXX Perhaps this should go at the top, in case the file
	 * system is modified but incompletely so because of an
	 * intermediate error.
	 */
	genfs_rename_knote(fdvp, fvp, tdvp, tvp,
	    ((tvp != NULL) && (VTOI(tvp)->i_nlink == 0)));
#if 0				/* XXX */
	genfs_rename_cache_purge(fdvp, fvp, tdvp, tvp);
#endif
	goto arghmybrainhurts;

whymustithurtsomuch:
	KASSERT(0 < VTOI(fvp)->i_nlink);
	VTOI(fvp)->i_nlink--;
	DIP_ASSIGN(VTOI(fvp), nlink, VTOI(fvp)->i_nlink);
	VTOI(fvp)->i_flag |= IN_CHANGE;

arghmybrainhurts:
/*ihateyou:*/
	return error;
}
Exemplo n.º 19
0
/*
 * ufs_link: create hard link.
 */
int
ufs_link(void *v)
{
	struct vop_link_args /* {
		struct vnode *a_dvp;
		struct vnode *a_vp;
		struct componentname *a_cnp;
	} */ *ap = v;
	struct vnode *dvp = ap->a_dvp;
	struct vnode *vp = ap->a_vp;
	struct componentname *cnp = ap->a_cnp;
	struct inode *ip;
	struct direct *newdir;
	int error;
	struct ufs_lookup_results *ulr;

	KASSERT(dvp != vp);
	KASSERT(vp->v_type != VDIR);
	KASSERT(dvp->v_mount == vp->v_mount);

	/* XXX should handle this material another way */
	ulr = &VTOI(dvp)->i_crap;
	UFS_CHECK_CRAPCOUNTER(VTOI(dvp));

	fstrans_start(dvp->v_mount, FSTRANS_SHARED);
	error = vn_lock(vp, LK_EXCLUSIVE);
	if (error) {
		VOP_ABORTOP(dvp, cnp);
		goto out2;
	}
	ip = VTOI(vp);
	if ((nlink_t)ip->i_nlink >= LINK_MAX) {
		VOP_ABORTOP(dvp, cnp);
		error = EMLINK;
		goto out1;
	}
	if (ip->i_flags & (IMMUTABLE | APPEND)) {
		VOP_ABORTOP(dvp, cnp);
		error = EPERM;
		goto out1;
	}
	error = UFS_WAPBL_BEGIN(vp->v_mount);
	if (error) {
		VOP_ABORTOP(dvp, cnp);
		goto out1;
	}
	ip->i_nlink++;
	DIP_ASSIGN(ip, nlink, ip->i_nlink);
	ip->i_flag |= IN_CHANGE;
	error = UFS_UPDATE(vp, NULL, NULL, UPDATE_DIROP);
	if (!error) {
		newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
		ufs_makedirentry(ip, cnp, newdir);
		error = ufs_direnter(dvp, ulr, vp, newdir, cnp, NULL);
		pool_cache_put(ufs_direct_cache, newdir);
	}
	if (error) {
		ip->i_nlink--;
		DIP_ASSIGN(ip, nlink, ip->i_nlink);
		ip->i_flag |= IN_CHANGE;
		UFS_WAPBL_UPDATE(vp, NULL, NULL, UPDATE_DIROP);
	}
	UFS_WAPBL_END(vp->v_mount);
 out1:
	VOP_UNLOCK(vp);
 out2:
	VN_KNOTE(vp, NOTE_LINK);
	VN_KNOTE(dvp, NOTE_WRITE);
	vput(dvp);
	fstrans_done(dvp->v_mount);
	return (error);
}
Exemplo n.º 20
0
/*
 * whiteout vnode call
 */
int
ufs_whiteout(void *v)
{
	struct vop_whiteout_args /* {
		struct vnode		*a_dvp;
		struct componentname	*a_cnp;
		int			a_flags;
	} */ *ap = v;
	struct vnode		*dvp = ap->a_dvp;
	struct componentname	*cnp = ap->a_cnp;
	struct direct		*newdir;
	int			error;
	struct ufsmount		*ump = VFSTOUFS(dvp->v_mount);
	struct ufs_lookup_results *ulr;

	/* XXX should handle this material another way */
	ulr = &VTOI(dvp)->i_crap;
	UFS_CHECK_CRAPCOUNTER(VTOI(dvp));

	error = 0;
	switch (ap->a_flags) {
	case LOOKUP:
		/* 4.4 format directories support whiteout operations */
		if (ump->um_maxsymlinklen > 0)
			return (0);
		return (EOPNOTSUPP);

	case CREATE:
		/* create a new directory whiteout */
		fstrans_start(dvp->v_mount, FSTRANS_SHARED);
		error = UFS_WAPBL_BEGIN(dvp->v_mount);
		if (error)
			break;
#ifdef DIAGNOSTIC
		if (ump->um_maxsymlinklen <= 0)
			panic("ufs_whiteout: old format filesystem");
#endif

		newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK);
		newdir->d_ino = UFS_WINO;
		newdir->d_namlen = cnp->cn_namelen;
		memcpy(newdir->d_name, cnp->cn_nameptr,
		    (size_t)cnp->cn_namelen);
		newdir->d_name[cnp->cn_namelen] = '\0';
		newdir->d_type = DT_WHT;
		error = ufs_direnter(dvp, ulr, NULL, newdir, cnp, NULL);
		pool_cache_put(ufs_direct_cache, newdir);
		break;

	case DELETE:
		/* remove an existing directory whiteout */
		fstrans_start(dvp->v_mount, FSTRANS_SHARED);
		error = UFS_WAPBL_BEGIN(dvp->v_mount);
		if (error)
			break;
#ifdef DIAGNOSTIC
		if (ump->um_maxsymlinklen <= 0)
			panic("ufs_whiteout: old format filesystem");
#endif

		cnp->cn_flags &= ~DOWHITEOUT;
		error = ufs_dirremove(dvp, ulr, NULL, cnp->cn_flags, 0);
		break;
	default:
		panic("ufs_whiteout: unknown op");
		/* NOTREACHED */
	}
	UFS_WAPBL_END(dvp->v_mount);
	fstrans_done(dvp->v_mount);
	return (error);
}
Exemplo n.º 21
0
int
netbsd32_cpu_setmcontext(
	struct lwp *l,
	/* XXX const netbsd32_*/mcontext_t *mcp,
	unsigned int flags)
{
#ifdef NOT_YET
/* XXX */
	greg32_t *gr = mcp->__gregs;
	struct trapframe64 *tf = l->l_md.md_tf;

	/* First ensure consistent stack state (see sendsig). */
	write_user_windows();
	if (rwindow_save(p)) {
		mutex_enter(l->l_proc->p_lock);
		sigexit(p, SIGILL);
	}

	if ((flags & _UC_CPU) != 0) {
		/*
	 	 * Only the icc bits in the psr are used, so it need not be
	 	 * verified.  pc and npc must be multiples of 4.  This is all
	 	 * that is required; if it holds, just do it.
		 */
		if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 ||
		    gr[_REG_PC] == 0 || gr[_REG_nPC] == 0)
			return (EINVAL);

		/* Restore general register context. */
		/* take only tstate CCR (and ASI) fields */
		tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) |
		    PSRCC_TO_TSTATE(gr[_REG_PSR]);
		tf->tf_pc        = (uint64_t)gr[_REG_PC];
		tf->tf_npc       = (uint64_t)gr[_REG_nPC];
		tf->tf_y         = (uint64_t)gr[_REG_Y];
		tf->tf_global[1] = (uint64_t)gr[_REG_G1];
		tf->tf_global[2] = (uint64_t)gr[_REG_G2];
		tf->tf_global[3] = (uint64_t)gr[_REG_G3];
		tf->tf_global[4] = (uint64_t)gr[_REG_G4];
		tf->tf_global[5] = (uint64_t)gr[_REG_G5];
		tf->tf_global[6] = (uint64_t)gr[_REG_G6];
		tf->tf_global[7] = (uint64_t)gr[_REG_G7];
		tf->tf_out[0]    = (uint64_t)gr[_REG_O0];
		tf->tf_out[1]    = (uint64_t)gr[_REG_O1];
		tf->tf_out[2]    = (uint64_t)gr[_REG_O2];
		tf->tf_out[3]    = (uint64_t)gr[_REG_O3];
		tf->tf_out[4]    = (uint64_t)gr[_REG_O4];
		tf->tf_out[5]    = (uint64_t)gr[_REG_O5];
		tf->tf_out[6]    = (uint64_t)gr[_REG_O6];
		tf->tf_out[7]    = (uint64_t)gr[_REG_O7];
		/* %asi restored above; %fprs not yet supported. */

		/* XXX mcp->__gwins */
	}

	/* Restore FP register context, if any. */
	if ((flags & _UC_FPU) != 0 && mcp->__fpregs.__fpu_en != 0) {
		struct fpstate *fsp;
		const netbsd32_fpregset_t *fpr = &mcp->__fpregs;
		int reload = 0;

		/*
		 * If we're the current FPU owner, simply reload it from
		 * the supplied context.  Otherwise, store it into the
		 * process' FPU save area (which is used to restore from
		 * by lazy FPU context switching); allocate it if necessary.
		 */
		/*
		 * XXX Should we really activate the supplied FPU context
		 * XXX immediately or just fault it in later?
		 */
		if ((fsp = l->l_md.md_fpstate) == NULL) {
			fsp = pool_cache_get(fpstate_cache, PR_WAITOK);
			l->l_md.md_fpstate = fsp;
		} else {
			/* Drop the live context on the floor. */
			fpusave_lwp(l, false);
			reload = 1;
		}
		/* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */
		memcpy(fsp->fs_regs, fpr->__fpu_fr, sizeof (fpr->__fpu_fr));
		fsp->fs_fsr = fpr->__fpu_fsr;	/* don't care about fcc1-3 */
		fsp->fs_qsize = 0;

#if 0
		/* Need more info! */
		mcp->__fpregs.__fpu_q = NULL;	/* `Need more info.' */
		mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */
#endif

		/* Reload context again, if necessary. */
		if (reload)
			loadfpstate(fsp);
	}

	/* XXX mcp->__xrs */
	/* XXX mcp->__asrs */
#endif
	return (0);
}
Exemplo n.º 22
0
void
cpu_lwp_fork(register struct lwp *l1, register struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg)
{
    struct pcb *opcb = lwp_getpcb(l1);
    struct pcb *npcb = lwp_getpcb(l2);
    struct trapframe *tf2;
    struct rwindow *rp;

    /*
     * Save all user registers to l1's stack or, in the case of
     * user registers and invalid stack pointers, to opcb.
     * We then copy the whole pcb to l2; when switch() selects l2
     * to run, it will run at the `lwp_trampoline' stub, rather
     * than returning at the copying code below.
     *
     * If process l1 has an FPU state, we must copy it.  If it is
     * the FPU user, we must save the FPU state first.
     */

#ifdef NOTDEF_DEBUG
    printf("cpu_lwp_fork()\n");
#endif
    if (l1 == curlwp) {
        write_user_windows();

        /*
         * We're in the kernel, so we don't really care about
         * %ccr or %asi.  We do want to duplicate %pstate and %cwp.
         */
        opcb->pcb_pstate = getpstate();
        opcb->pcb_cwp = getcwp();
    }
#ifdef DIAGNOSTIC
    else if (l1 != &lwp0)
        panic("cpu_lwp_fork: curlwp");
#endif
#ifdef DEBUG
    /* prevent us from having NULL lastcall */
    opcb->lastcall = cpu_forkname;
#else
    opcb->lastcall = NULL;
#endif
    memcpy(npcb, opcb, sizeof(struct pcb));
    if (l1->l_md.md_fpstate) {
        fpusave_lwp(l1, true);
        l2->l_md.md_fpstate = pool_cache_get(fpstate_cache, PR_WAITOK);
        memcpy(l2->l_md.md_fpstate, l1->l_md.md_fpstate,
               sizeof(struct fpstate64));
    } else
        l2->l_md.md_fpstate = NULL;

    /*
     * Setup (kernel) stack frame that will by-pass the child
     * out of the kernel. (The trap frame invariably resides at
     * the tippity-top of the u. area.)
     */
    tf2 = l2->l_md.md_tf = (struct trapframe *)
                           ((long)npcb + USPACE - sizeof(*tf2));

    /* Copy parent's trapframe */
    *tf2 = *(struct trapframe *)((long)opcb + USPACE - sizeof(*tf2));

    /*
     * If specified, give the child a different stack.
     */
    if (stack != NULL)
        tf2->tf_out[6] = (uint64_t)(u_long)stack + stacksize;

    /*
     * Set return values in child mode and clear condition code,
     * in case we end up running a signal handler before returning
     * to userland.
     */
    tf2->tf_out[0] = 0;
    tf2->tf_out[1] = 1;
    tf2->tf_tstate &= ~TSTATE_CCR;

    /* Construct kernel frame to return to in cpu_switch() */
    rp = (struct rwindow *)((u_long)npcb + TOPFRAMEOFF);
    *rp = *(struct rwindow *)((u_long)opcb + TOPFRAMEOFF);

    rp->rw_local[0] = (long)func;	/* Function to call */
    rp->rw_local[1] = (long)arg;	/* and its argument */
    rp->rw_local[2] = (long)l2;	/* new lwp */

    npcb->pcb_pc = (long)lwp_trampoline - 8;
    npcb->pcb_sp = (long)rp - STACK_OFFSET;
}