Esempio n. 1
0
/*
 * Functions implementing extended-attribute backed labels for file systems
 * that support it.
 *
 * Where possible, we use EA transactions to make writes to multiple
 * attributes across difference policies mutually atomic.  We allow work to
 * continue on file systems not supporting EA transactions, but generate a
 * printf warning.
 */
int
mac_vnode_create_extattr(struct ucred *cred, struct mount *mp,
    struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
{
	int error;

	ASSERT_VOP_LOCKED(dvp, "mac_vnode_create_extattr");
	ASSERT_VOP_LOCKED(vp, "mac_vnode_create_extattr");

	error = VOP_OPENEXTATTR(vp, cred, curthread);
	if (error == EOPNOTSUPP) {
		if (ea_warn_once == 0) {
			printf("Warning: transactions not supported "
			    "in EA write.\n");
			ea_warn_once = 1;
		}
	} else if (error)
		return (error);

	MAC_POLICY_CHECK(vnode_create_extattr, cred, mp, mp->mnt_label, dvp,
	    dvp->v_label, vp, vp->v_label, cnp);

	if (error) {
		VOP_CLOSEEXTATTR(vp, 0, NOCRED, curthread);
		return (error);
	}

	error = VOP_CLOSEEXTATTR(vp, 1, NOCRED, curthread);
	if (error == EOPNOTSUPP)
		error = 0;

	return (error);
}
Esempio n. 2
0
int
fuse_vnode_get(struct mount         *mp,
               uint64_t              nodeid,
               struct vnode         *dvp,
               struct vnode         **vpp,
               struct componentname *cnp,
               enum vtype            vtyp)
{
    struct thread *td = (cnp != NULL ? cnp->cn_thread : curthread);
    int err = 0;

    debug_printf("dvp=%p\n", dvp);

    err = fuse_vnode_alloc(mp, td, nodeid, vtyp, vpp);
    if (err) {
        return err;
    }

    if (dvp != NULL) {
        MPASS((cnp->cn_flags & ISDOTDOT) == 0);
        MPASS(!(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'));
        fuse_vnode_setparent(*vpp, dvp);
    }
    if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0) {
        ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get");
        ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get");
        cache_enter(dvp, *vpp, cnp);
    }
    VTOFUD(*vpp)->nlookup++;

    return 0;
}
Esempio n. 3
0
static int
mac_vnode_setlabel_extattr(struct ucred *cred, struct vnode *vp,
    struct label *intlabel)
{
	int error;

	ASSERT_VOP_LOCKED(vp, "mac_vnode_setlabel_extattr");

	error = VOP_OPENEXTATTR(vp, cred, curthread);
	if (error == EOPNOTSUPP) {
		if (ea_warn_once == 0) {
			printf("Warning: transactions not supported "
			    "in EA write.\n");
			ea_warn_once = 1;
		}
	} else if (error)
		return (error);

	MAC_POLICY_CHECK(vnode_setlabel_extattr, cred, vp, vp->v_label,
	    intlabel);

	if (error) {
		VOP_CLOSEEXTATTR(vp, 0, NOCRED, curthread);
		return (error);
	}

	error = VOP_CLOSEEXTATTR(vp, 1, NOCRED, curthread);
	if (error == EOPNOTSUPP)
		error = 0;

	return (error);
}
Esempio n. 4
0
int
nandfs_node_update(struct nandfs_node *node)
{
	struct nandfs_alloc_request req;
	struct nandfsmount *nmp;
	struct nandfs_mdt *mdt;
	struct nandfs_node *ifile;
	struct nandfs_inode *inode;
	uint32_t index;
	int error = 0;

	nmp = node->nn_nmp;
	ifile = nmp->nm_ifile_node;
	ASSERT_VOP_LOCKED(NTOV(ifile), __func__);

	req.entrynum = node->nn_ino;
	mdt = &nmp->nm_nandfsdev->nd_ifile_mdt;

	DPRINTF(IFILE, ("%s: node:%p ino:%#jx\n",
	    __func__, &node->nn_inode, (uintmax_t)node->nn_ino));

	error = nandfs_get_entry_block(mdt, ifile, &req, &index, 0);
	if (error) {
		printf("nandfs_get_entry_block returned with ERROR=%d\n",
		    error);
		return (error);
	}

	inode = ((struct nandfs_inode *) req.bp_entry->b_data) + index;
	memcpy(inode, &node->nn_inode, sizeof(*inode));
	error = nandfs_dirty_buf(req.bp_entry, 0);

	return (error);
}
/*
 * Function to save the path and vnode attr information into the audit
 * record.
 *
 * It is assumed that the caller will hold any vnode locks necessary to
 * perform a VOP_GETATTR() on the passed vnode.
 *
 * XXX: The attr code is very similar to vfs_vnops.c:vn_stat(), but always
 * provides access to the generation number as we need that to construct the
 * BSM file ID.
 *
 * XXX: We should accept the process argument from the caller, since it's
 * very likely they already have a reference.
 *
 * XXX: Error handling in this function is poor.
 *
 * XXXAUDIT: Possibly KASSERT the path pointer is NULL?
 */
static int
audit_arg_vnode(struct vnode *vp, struct vnode_au_info *vnp)
{
	struct vattr vattr;
	int error;

	/*
	 * Assume that if the caller is calling audit_arg_vnode() on a
	 * non-MPSAFE vnode, then it will have acquired Giant.
	 */
	VFS_ASSERT_GIANT(vp->v_mount);
	ASSERT_VOP_LOCKED(vp, "audit_arg_vnode");

	error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
	if (error) {
		/* XXX: How to handle this case? */
		return (error);
	}

	vnp->vn_mode = vattr.va_mode;
	vnp->vn_uid = vattr.va_uid;
	vnp->vn_gid = vattr.va_gid;
	vnp->vn_dev = vattr.va_rdev;
	vnp->vn_fsid = vattr.va_fsid;
	vnp->vn_fileid = vattr.va_fileid;
	vnp->vn_gen = vattr.va_gen;
	return (0);
}
Esempio n. 6
0
/* Update block count of segment */
int
nandfs_update_segment(struct nandfs_device *fsdev, uint64_t seg, uint32_t nblks)
{
	struct nandfs_node *su_node;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		nandfs_error("%s: read block:%jx to update\n",
		    __func__, blk);
		brelse(bp);
		return (error);
	}

	su_usage = SU_USAGE_OFF(bp, offset);
	su_usage->su_lastmod = fsdev->nd_ts.tv_sec;
	su_usage->su_flags = NANDFS_SEGMENT_USAGE_DIRTY;
	su_usage->su_nblocks += nblks;

	DPRINTF(SEG, ("%s: seg:%#jx inc:%#x cur:%#x\n",  __func__,
	    (uintmax_t)seg, nblks, su_usage->su_nblocks));

	nandfs_dirty_buf(bp, 1);

	return (0);
}
Esempio n. 7
0
int
zfs_dirlook(znode_t *dzp, const char *name, znode_t **zpp)
{
    zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
    znode_t *zp;
    int error = 0;

    ASSERT_VOP_LOCKED(ZTOV(dzp), __func__);
    ASSERT(RRM_READ_HELD(&zfsvfs->z_teardown_lock));

    if (dzp->z_unlinked)
        return (SET_ERROR(ENOENT));

    if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
        *zpp = dzp;
    } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
        error = zfs_dd_lookup(dzp, zpp);
    } else {
        error = zfs_dirent_lookup(dzp, name, &zp, ZEXISTS);
        if (error == 0) {
            dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */
            *zpp = zp;
        }
    }
    return (error);
}
Esempio n. 8
0
/*
 * Return a VREF'ed alias for lower vnode if already exists, else 0.
 * Lower vnode should be locked on entry and will be left locked on exit.
 */
static struct vnode *
pefs_nodehash_get(struct mount *mp, struct vnode *lowervp)
{
	struct pefs_node_listhead *hd;
	struct pefs_node *a;
	struct vnode *vp;

	ASSERT_VOP_LOCKED(lowervp, "pefs_nodehash_get");

	/*
	 * Find hash base, and then search the (two-way) linked
	 * list looking for a pefs_node structure which is referencing
	 * the lower vnode.  If found, the increment the pefs_node
	 * reference count (but NOT the lower vnode's VREF counter).
	 */
	hd = pefs_nodehash_gethead(lowervp);
	mtx_lock(&pefs_node_listmtx);
	LIST_FOREACH(a, hd, pn_listentry) {
		if (a->pn_lowervp == lowervp && PN_TO_VP(a)->v_mount == mp) {
			/*
			 * Since we have the lower node locked the pefs
			 * node can not be in the process of recycling.  If
			 * it had been recycled before we grabed the lower
			 * lock it would not have been found on the hash.
			 */
			vp = PN_TO_VP(a);
			vref(vp);
			mtx_unlock(&pefs_node_listmtx);
			return (vp);
		}
	}
	mtx_unlock(&pefs_node_listmtx);
	return (NULLVP);
}
Esempio n. 9
0
static int
nandfs_bad_segment(struct nandfs_device *fsdev, uint64_t seg)
{
	struct nandfs_node *su_node;
	struct nandfs_segment_usage *su_usage;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}

	su_usage = SU_USAGE_OFF(bp, offset);
	su_usage->su_lastmod = fsdev->nd_ts.tv_sec;
	su_usage->su_flags = NANDFS_SEGMENT_USAGE_ERROR;

	DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg));

	nandfs_dirty_buf(bp, 1);

	return (0);
}
Esempio n. 10
0
/*
 * Disable extended attribute support on an FS.
 */
static int
ufs_extattr_disable(struct ufsmount *ump, int attrnamespace,
    const char *attrname, struct thread *td)
{
	struct ufs_extattr_list_entry *uele;
	int error = 0;

	if (!ufs_extattr_valid_attrname(attrnamespace, attrname))
		return (EINVAL);

	uele = ufs_extattr_find_attr(ump, attrnamespace, attrname);
	if (!uele)
		return (ENOATTR);

	LIST_REMOVE(uele, uele_entries);

	vn_lock(uele->uele_backing_vnode, LK_SHARED | LK_RETRY);
	ASSERT_VOP_LOCKED(uele->uele_backing_vnode, "ufs_extattr_disable");
	VOP_UNLOCK(uele->uele_backing_vnode, 0);
	error = vn_close(uele->uele_backing_vnode, FREAD|FWRITE,
	    td->td_ucred, td);

	free(uele, M_UFS_EXTATTR);

	return (error);
}
Esempio n. 11
0
/*
 * MAC Framework entry points relating to overall operation of system,
 * including global services such as the kernel environment and loadable
 * modules.
 *
 * System checks often align with existing privilege checks, but provide
 * additional security context that may be relevant to policies, such as the
 * specific object being operated on.
 */

#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/systm.h>
#include <sys/vnode.h>
#include <sys/sysctl.h>

#include <security/mac/mac_framework.h>
#include <security/mac/mac_internal.h>
#include <security/mac/mac_policy.h>

#define mac_assert_vnode_locked(VP) \
    assert((((VP)->v_flag & VLOCKSWORK) == 0) || VOP_ISLOCKED((VP)))

#if 0 /* XXX PM: We don't have the kenv(2) system call in OpenBSD. */
int
mac_kenv_check_dump(struct ucred *cred)
{
	int error;

	MAC_CHECK(kenv_check_dump, cred);

	return (error);
}

int
mac_kenv_check_get(struct ucred *cred, char *name)
{
	int error;

	MAC_CHECK(kenv_check_get, cred, name);

	return (error);
}

int
mac_kenv_check_set(struct ucred *cred, char *name, char *value)
{
	int error;

	MAC_CHECK(kenv_check_set, cred, name, value);

	return (error);
}

int
mac_kenv_check_unset(struct ucred *cred, char *name)
{
	int error;

	MAC_CHECK(kenv_check_unset, cred, name);

	return (error);
}
#endif

#if 0 /* XXX PM: We won't support kernel modules. */
int
mac_kld_check_load(struct ucred *cred, struct vnode *vp)
{
	int error;

	ASSERT_VOP_LOCKED(vp, "mac_kld_check_load");

	MAC_CHECK(kld_check_load, cred, vp, vp->v_label);

	return (error);
}
Esempio n. 12
0
int
fuse_vnode_get(struct mount *mp,
    struct fuse_entry_out *feo,
    uint64_t nodeid,
    struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    enum vtype vtyp)
{
	struct thread *td = (cnp != NULL ? cnp->cn_thread : curthread);
	int err = 0;

	err = fuse_vnode_alloc(mp, td, nodeid, vtyp, vpp);
	if (err) {
		return err;
	}
	if (dvp != NULL) {
		MPASS((cnp->cn_flags & ISDOTDOT) == 0);
		MPASS(!(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'));
		fuse_vnode_setparent(*vpp, dvp);
	}
	if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0 &&
	    feo != NULL &&
	    (feo->entry_valid != 0 || feo->entry_valid_nsec != 0)) {
		ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get");
		ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get");
		cache_enter(dvp, *vpp, cnp);
	}

	/*
	 * In userland, libfuse uses cached lookups for dot and dotdot entries,
	 * thus it does not really bump the nlookup counter for forget.
	 * Follow the same semantic and avoid tu bump it in order to keep
	 * nlookup counters consistent.
	 */
	if (cnp == NULL || ((cnp->cn_flags & ISDOTDOT) == 0 &&
	    (cnp->cn_namelen != 1 || cnp->cn_nameptr[0] != '.')))
		VTOFUD(*vpp)->nlookup++;

	return 0;
}
Esempio n. 13
0
int
mac_system_check_swapoff(struct ucred *cred, struct vnode *vp)
{
	int error;

	ASSERT_VOP_LOCKED(vp, "mac_system_check_swapoff");

	MAC_POLICY_CHECK(system_check_swapoff, cred, vp, vp->v_label);
	MAC_CHECK_PROBE2(system_check_swapoff, error, cred, vp);

	return (error);
}
Esempio n. 14
0
int
mac_vnode_associate_extattr(struct mount *mp, struct vnode *vp)
{
	int error;

	ASSERT_VOP_LOCKED(vp, "mac_vnode_associate_extattr");

	MAC_POLICY_CHECK(vnode_associate_extattr, mp, mp->mnt_label, vp,
	    vp->v_label);

	return (error);
}
Esempio n. 15
0
int
mac_system_check_auditctl(struct ucred *cred, struct vnode *vp)
{
	int error;
	struct label *vl;

	ASSERT_VOP_LOCKED(vp, "mac_system_check_auditctl");

	vl = (vp != NULL) ? vp->v_label : NULL;
	MAC_POLICY_CHECK(system_check_auditctl, cred, vp, vl);
	MAC_CHECK_PROBE2(system_check_auditctl, error, cred, vp);

	return (error);
}
Esempio n. 16
0
/* Purge VM for a file when its callback is revoked.
 *
 * Locking:  No lock is held, not even the global lock.
 */
void
osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp)
{
    struct vnode *vp;
    struct vm_object *obj;

    vp = AFSTOV(avc);
    ASSERT_VOP_LOCKED(vp, __func__);
    if (VOP_GETVOBJECT(vp, &obj) == 0) {
	VM_OBJECT_LOCK(obj);
	vm_object_page_remove(obj, 0, 0, FALSE);
	VM_OBJECT_UNLOCK(obj);
    }
    osi_vinvalbuf(vp, 0, 0, 0);
}
Esempio n. 17
0
int
ncl_upgrade_vnlock(struct vnode *vp)
{
	int old_lock;

	ASSERT_VOP_LOCKED(vp, "ncl_upgrade_vnlock");
	old_lock = NFSVOPISLOCKED(vp);
	if (old_lock != LK_EXCLUSIVE) {
		KASSERT(old_lock == LK_SHARED,
		    ("ncl_upgrade_vnlock: wrong old_lock %d", old_lock));
		/* Upgrade to exclusive lock, this might block */
		NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY);
  	}
	return (old_lock);
}
Esempio n. 18
0
/*
 * Determine if the file system supports NFSv4 ACLs.
 * Return 1 if it does, 0 otherwise.
 */
int
nfs_supportsnfsv4acls(struct vnode *vp)
{
	int error;
	register_t retval;

	ASSERT_VOP_LOCKED(vp, "nfs supports nfsv4acls");

	if (nfsrv_useacl == 0)
		return (0);
	error = VOP_PATHCONF(vp, _PC_ACL_NFS4, &retval);
	if (error == 0 && retval != 0)
		return (1);
	return (0);
}
Esempio n. 19
0
int
mac_system_check_acct(struct ucred *cred, struct vnode *vp)
{
	int error;

	if (vp != NULL) {
		ASSERT_VOP_LOCKED(vp, "mac_system_check_acct");
	}

	MAC_POLICY_CHECK(system_check_acct, cred, vp,
	    vp != NULL ? vp->v_label : NULL);
	MAC_CHECK_PROBE2(system_check_acct, error, cred, vp);

	return (error);
}
Esempio n. 20
0
int
nandfs_bufsync(struct bufobj *bo, int waitfor)
{
	struct vnode *vp;
	int error = 0;

	vp = bo2vnode(bo);

	ASSERT_VOP_LOCKED(vp, __func__);
	error = nandfs_sync_file(vp);
	if (error)
		nandfs_warning("%s: cannot flush buffers err:%d\n",
		    __func__, error);

	return (error);
}
Esempio n. 21
0
File: osi_vm.c Progetto: hwr/openafs
/* Purge VM for a file when its callback is revoked.
 *
 * Locking:  No lock is held, not even the global lock.
 */
void
osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp)
{
    struct vnode *vp;
    struct vm_object *obj;

    vp = AFSTOV(avc);
    ASSERT_VOP_LOCKED(vp, __func__);
    obj = vp->v_object;
    if (obj != NULL) {
	AFS_VM_OBJECT_WLOCK(obj);
	vm_object_page_remove(obj, 0, 0, FALSE);
	AFS_VM_OBJECT_WUNLOCK(obj);
    }
    osi_vinvalbuf(vp, 0, 0, 0);
}
Esempio n. 22
0
/*
 * Determine the quota file type.
 *
 * A 32-bit quota file is simply an array of struct dqblk32.
 *
 * A 64-bit quota file is a struct dqhdr64 followed by an array of struct
 * dqblk64.  The header contains various magic bits which allow us to be
 * reasonably confident that it is indeeda 64-bit quota file and not just
 * a 32-bit quota file that just happens to "look right".
 *
 */
static int
dqopen(struct vnode *vp, struct ufsmount *ump, int type)
{
    struct dqhdr64 dqh;
    struct iovec aiov;
    struct uio auio;
    int error;

    ASSERT_VOP_LOCKED(vp, "dqopen");
    auio.uio_iov = &aiov;
    auio.uio_iovcnt = 1;
    aiov.iov_base = &dqh;
    aiov.iov_len = sizeof(dqh);
    auio.uio_resid = sizeof(dqh);
    auio.uio_offset = 0;
    auio.uio_segflg = UIO_SYSSPACE;
    auio.uio_rw = UIO_READ;
    auio.uio_td = (struct thread *)0;
    error = VOP_READ(vp, &auio, 0, ump->um_cred[type]);

    if (error != 0)
        return (error);
    if (auio.uio_resid > 0) {
        /* assume 32 bits */
        return (0);
    }

    UFS_LOCK(ump);
    if (strcmp(dqh.dqh_magic, Q_DQHDR64_MAGIC) == 0 &&
            be32toh(dqh.dqh_version) == Q_DQHDR64_VERSION &&
            be32toh(dqh.dqh_hdrlen) == (uint32_t)sizeof(struct dqhdr64) &&
            be32toh(dqh.dqh_reclen) == (uint32_t)sizeof(struct dqblk64)) {
        /* XXX: what if the magic matches, but the sizes are wrong? */
        ump->um_qflags[type] |= QTF_64BIT;
    } else {
        ump->um_qflags[type] &= ~QTF_64BIT;
    }
    UFS_UNLOCK(ump);

    return (0);
}
Esempio n. 23
0
/*
 * Special iput for brand-new inodes that are still locked
 */
void
xfs_iput_new(xfs_inode_t	*ip,
	     uint		lock_flags)
{
	xfs_vnode_t		*vp = XFS_ITOV(ip);

	vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);

	printf("xfs_iput_new: ip %p\n",ip);
	
	if ((ip->i_d.di_mode == 0)) {
		ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE));
		//vn_mark_bad(vp);
		printf("xfs_iput_new: ip %p di_mode == 0\n",ip);
		/* mabe call vgone here? RMC */
	}
	if (lock_flags)
		xfs_iunlock(ip, lock_flags);

	ASSERT_VOP_LOCKED(vp->v_vnode, "xfs_iput_new");
	vput(vp->v_vnode);
}
Esempio n. 24
0
static int
zfs_dd_lookup(znode_t *dzp, znode_t **zpp)
{
    zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
    znode_t *zp;
    uint64_t parent;
    int error;

    ASSERT_VOP_LOCKED(ZTOV(dzp), __func__);
    ASSERT(RRM_READ_HELD(&zfsvfs->z_teardown_lock));

    if (dzp->z_unlinked)
        return (ENOENT);

    if ((error = sa_lookup(dzp->z_sa_hdl,
                           SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
        return (error);

    error = zfs_zget(zfsvfs, parent, &zp);
    if (error == 0)
        *zpp = zp;
    return (error);
}
static int
ufs_lookup_upgrade_lock(struct vnode *vp)
{
	int error;

	ASSERT_VOP_LOCKED(vp, __FUNCTION__);
	if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
		return (0);

	error = 0;

	/*
	 * Upgrade vnode lock, since getinoquota()
	 * requires exclusive lock to modify inode.
	 */
	vhold(vp);
	vn_lock(vp, LK_UPGRADE | LK_RETRY);
	VI_LOCK(vp);
	if (vp->v_iflag & VI_DOOMED)
		error = ENOENT;
	vdropl(vp);
	return (error);
}
Esempio n. 26
0
/*
 * Function to save the path and vnode attr information into the audit
 * record.
 *
 * It is assumed that the caller will hold any vnode locks necessary to
 * perform a VOP_GETATTR() on the passed vnode.
 *
 * XXX: The attr code is very similar to vfs_vnops.c:vn_stat(), but always
 * provides access to the generation number as we need that to construct the
 * BSM file ID.
 *
 * XXX: We should accept the process argument from the caller, since it's
 * very likely they already have a reference.
 *
 * XXX: Error handling in this function is poor.
 *
 * XXXAUDIT: Possibly KASSERT the path pointer is NULL?
 */
static int
audit_arg_vnode(struct vnode *vp, struct vnode_au_info *vnp)
{
	struct vattr vattr;
	int error;

	ASSERT_VOP_LOCKED(vp, "audit_arg_vnode");

	error = VOP_GETATTR(vp, &vattr, curthread->td_ucred);
	if (error) {
		/* XXX: How to handle this case? */
		return (error);
	}

	vnp->vn_mode = vattr.va_mode;
	vnp->vn_uid = vattr.va_uid;
	vnp->vn_gid = vattr.va_gid;
	vnp->vn_dev = vattr.va_rdev;
	vnp->vn_fsid = vattr.va_fsid;
	vnp->vn_fileid = vattr.va_fileid;
	vnp->vn_gen = vattr.va_gen;
	return (0);
}
Esempio n. 27
0
static int
pefs_node_lookup_name(struct vnode *lvp, struct vnode *ldvp, struct ucred *cred,
    char *encname, int *encname_len)
{
	struct vnode *nldvp;
	int error, locked, dlocked;
	int buflen = *encname_len;

	ASSERT_VOP_LOCKED(lvp, "pefs_node_lookup_name");
	locked = VOP_ISLOCKED(lvp);
	if (ldvp) {
		dlocked = VOP_ISLOCKED(ldvp);
		if (dlocked)
			VOP_UNLOCK(ldvp, 0);
	} else
		dlocked = 0;

	vref(lvp);
	VOP_UNLOCK(lvp, 0);
	nldvp = lvp;
	error = vn_vptocnp(&nldvp, cred, encname, encname_len);
	if (error == 0)
		vrele(nldvp);
	vrele(lvp);
	if (ldvp && dlocked)
		vn_lock(ldvp, dlocked | LK_RETRY);
	vn_lock(lvp, locked | LK_RETRY);
	if (error != 0)
		return (ENOENT);

	memcpy(encname, encname + *encname_len, buflen - *encname_len);
	*encname_len = buflen - *encname_len;
	if (*encname_len < buflen)
		encname[*encname_len] = '\0';

	return (0);
}
Esempio n. 28
0
/* 
 * Return locked root vnode of a filesystem
 */
static int
smbfs_root(struct mount *mp, int flags, struct vnode **vpp)
{
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct vnode *vp;
	struct smbnode *np;
	struct smbfattr fattr;
	struct thread *td;
	struct ucred *cred;
	struct smb_cred *scred;
	int error;

	td = curthread;
	cred = td->td_ucred;

	if (smp->sm_root) {
		*vpp = SMBTOV(smp->sm_root);
		return vget(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
	}
	scred = smbfs_malloc_scred();
	smb_makescred(scred, td, cred);
	error = smbfs_smb_lookup(NULL, NULL, 0, &fattr, scred);
	if (error)
		goto out;
	error = smbfs_nget(mp, NULL, NULL, 0, &fattr, &vp);
	if (error)
		goto out;
	ASSERT_VOP_LOCKED(vp, "smbfs_root");
	vp->v_vflag |= VV_ROOT;
	np = VTOSMB(vp);
	smp->sm_root = np;
	*vpp = vp;
out:
	smbfs_free_scred(scred);
	return error;
}
Esempio n. 29
0
/*
 * Make buffer dirty, it will be updated soon but first it need to be
 * gathered by syncer.
 */
int
nandfs_touch_segment(struct nandfs_device *fsdev, uint64_t seg)
{
	struct nandfs_node *su_node;
	struct buf *bp;
	uint64_t blk, offset;
	int error;

	su_node = fsdev->nd_su_node;
	ASSERT_VOP_LOCKED(NTOV(su_node), __func__);

	nandfs_seg_usage_blk_offset(fsdev, seg, &blk, &offset);

	error = nandfs_bread(su_node, blk, NOCRED, 0, &bp);
	if (error) {
		brelse(bp);
		nandfs_error("%s: cannot preallocate new segment\n", __func__);
		return (error);
	} else
		nandfs_dirty_buf(bp, 1);

	DPRINTF(SEG, ("%s: seg:%#jx\n", __func__, (uintmax_t)seg));
	return (error);
}
Esempio n. 30
0
/*
 * Truncate the inode oip to at most length size, freeing the
 * disk blocks.
 */
int
ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred,
    struct thread *td)
{
	struct vnode *ovp = vp;
	int32_t lastblock;
	struct inode *oip;
	int32_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR];
	uint32_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR];
	struct m_ext2fs *fs;
	struct buf *bp;
	int offset, size, level;
	e4fs_daddr_t count, nblocks, blocksreleased = 0;
	int error, i, allerror;
	off_t osize;
#ifdef INVARIANTS
	struct bufobj *bo;
#endif

	oip = VTOI(ovp);
#ifdef INVARIANTS
	bo = &ovp->v_bufobj;
#endif

	ASSERT_VOP_LOCKED(vp, "ext2_truncate");	

	if (length < 0)
	    return (EINVAL);

	if (ovp->v_type == VLNK &&
	    oip->i_size < ovp->v_mount->mnt_maxsymlinklen) {
#ifdef INVARIANTS
		if (length != 0)
			panic("ext2_truncate: partial truncate of symlink");
#endif
		bzero((char *)&oip->i_shortlink, (u_int)oip->i_size);
		oip->i_size = 0;
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ext2_update(ovp, 1));
	}
	if (oip->i_size == length) {
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ext2_update(ovp, 0));
	}
	fs = oip->i_e2fs;
	osize = oip->i_size;
	/*
	 * Lengthen the size of the file. We must ensure that the
	 * last byte of the file is allocated. Since the smallest
	 * value of osize is 0, length will be at least 1.
	 */
	if (osize < length) {
		if (length > oip->i_e2fs->e2fs_maxfilesize)
			return (EFBIG);
		vnode_pager_setsize(ovp, length);
		offset = blkoff(fs, length - 1);
		lbn = lblkno(fs, length - 1);
		flags |= BA_CLRBUF;
		error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags);
		if (error) {
			vnode_pager_setsize(vp, osize);
			return (error);
		}
		oip->i_size = length;
		if (bp->b_bufsize == fs->e2fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (flags & IO_SYNC)
			bwrite(bp);
		else if (DOINGASYNC(ovp))
			bdwrite(bp);
		else
			bawrite(bp);
		oip->i_flag |= IN_CHANGE | IN_UPDATE;
		return (ext2_update(ovp, !DOINGASYNC(ovp)));
	}
	/*
	 * Shorten the size of the file. If the file is not being
	 * truncated to a block boundry, the contents of the
	 * partial block following the end of the file must be
	 * zero'ed in case it ever become accessible again because
	 * of subsequent file growth.
	 */
	/* I don't understand the comment above */
	offset = blkoff(fs, length);
	if (offset == 0) {
		oip->i_size = length;
	} else {
		lbn = lblkno(fs, length);
		flags |= BA_CLRBUF;
		error = ext2_balloc(oip, lbn, offset, cred, &bp, flags);
		if (error)
			return (error);
		oip->i_size = length;
		size = blksize(fs, oip, lbn);
		bzero((char *)bp->b_data + offset, (u_int)(size - offset));
		allocbuf(bp, size);
		if (bp->b_bufsize == fs->e2fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		if (flags & IO_SYNC)
			bwrite(bp);
		else if (DOINGASYNC(ovp))
			bdwrite(bp);
		else
			bawrite(bp);
	}
	/*
	 * Calculate index into inode's block list of
	 * last direct and indirect blocks (if any)
	 * which we want to keep.  Lastblock is -1 when
	 * the file is truncated to 0.
	 */
	lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1;
	lastiblock[SINGLE] = lastblock - NDADDR;
	lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs);
	lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs);
	nblocks = btodb(fs->e2fs_bsize);
	/*
	 * Update file and block pointers on disk before we start freeing
	 * blocks.  If we crash before free'ing blocks below, the blocks
	 * will be returned to the free list.  lastiblock values are also
	 * normalized to -1 for calls to ext2_indirtrunc below.
	 */
	for (level = TRIPLE; level >= SINGLE; level--) {
		oldblks[NDADDR + level] = oip->i_ib[level];
		if (lastiblock[level] < 0) {
			oip->i_ib[level] = 0;
			lastiblock[level] = -1;
		}
	}
	for (i = 0; i < NDADDR; i++) {
		oldblks[i] = oip->i_db[i];
		if (i > lastblock)
			oip->i_db[i] = 0;
	}
	oip->i_flag |= IN_CHANGE | IN_UPDATE;
	allerror = ext2_update(ovp, !DOINGASYNC(ovp));

	/*
	 * Having written the new inode to disk, save its new configuration
	 * and put back the old block pointers long enough to process them.
	 * Note that we save the new block configuration so we can check it
	 * when we are done.
	 */
	for (i = 0; i < NDADDR; i++) {
		newblks[i] = oip->i_db[i];
		oip->i_db[i] = oldblks[i];
	}
	for (i = 0; i < NIADDR; i++) {
		newblks[NDADDR + i] = oip->i_ib[i];
		oip->i_ib[i] = oldblks[NDADDR + i];
	}
	oip->i_size = osize;
	error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize);
	if (error && (allerror == 0))
		allerror = error;
	vnode_pager_setsize(ovp, length);

	/*
	 * Indirect blocks first.
	 */
	indir_lbn[SINGLE] = -NDADDR;
	indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1;
	indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1;
	for (level = TRIPLE; level >= SINGLE; level--) {
		bn = oip->i_ib[level];
		if (bn != 0) {
			error = ext2_indirtrunc(oip, indir_lbn[level],
			    fsbtodb(fs, bn), lastiblock[level], level, &count);
			if (error)
				allerror = error;
			blocksreleased += count;
			if (lastiblock[level] < 0) {
				oip->i_ib[level] = 0;
				ext2_blkfree(oip, bn, fs->e2fs_fsize);
				blocksreleased += nblocks;
			}
		}
		if (lastiblock[level] >= 0)
			goto done;
	}

	/*
	 * All whole direct blocks or frags.
	 */
	for (i = NDADDR - 1; i > lastblock; i--) {
		long bsize;

		bn = oip->i_db[i];
		if (bn == 0)
			continue;
		oip->i_db[i] = 0;
		bsize = blksize(fs, oip, i);
		ext2_blkfree(oip, bn, bsize);
		blocksreleased += btodb(bsize);
	}
	if (lastblock < 0)
		goto done;

	/*
	 * Finally, look for a change in size of the
	 * last direct block; release any frags.
	 */
	bn = oip->i_db[lastblock];
	if (bn != 0) {
		long oldspace, newspace;

		/*
		 * Calculate amount of space we're giving
		 * back as old block size minus new block size.
		 */
		oldspace = blksize(fs, oip, lastblock);
		oip->i_size = length;
		newspace = blksize(fs, oip, lastblock);
		if (newspace == 0)
			panic("ext2_truncate: newspace");
		if (oldspace - newspace > 0) {
			/*
			 * Block number of space to be free'd is
			 * the old block # plus the number of frags
			 * required for the storage we're keeping.
			 */
			bn += numfrags(fs, newspace);
			ext2_blkfree(oip, bn, oldspace - newspace);
			blocksreleased += btodb(oldspace - newspace);
		}
	}
done:
#ifdef INVARIANTS
	for (level = SINGLE; level <= TRIPLE; level++)
		if (newblks[NDADDR + level] != oip->i_ib[level])
			panic("itrunc1");
	for (i = 0; i < NDADDR; i++)
		if (newblks[i] != oip->i_db[i])
			panic("itrunc2");
	BO_LOCK(bo);
	if (length == 0 && (bo->bo_dirty.bv_cnt != 0 ||
	    bo->bo_clean.bv_cnt != 0))
		panic("itrunc3");
	BO_UNLOCK(bo);
#endif /* INVARIANTS */
	/*
	 * Put back the real size.
	 */
	oip->i_size = length;
	if (oip->i_blocks >= blocksreleased)
		oip->i_blocks -= blocksreleased;
	else				/* sanity */
		oip->i_blocks = 0;
	oip->i_flag |= IN_CHANGE;
	vnode_pager_setsize(ovp, length);
	return (allerror);
}