Example #1
0
static int
vnop_write_9p(struct vnop_write_args *ap)
{
	vnode_t vp;
	node_9p *np;
	uio_t uio;
	user_ssize_t resid;
	off_t eof, zh, zt, off;
	int e, flag;

	TRACE();
	vp = ap->a_vp;
	uio = ap->a_uio;
	np = NTO9P(vp);

	if (vnode_isdir(vp))
		return EISDIR;
	
	off = uio_offset(uio);
	if (off < 0)
		return EINVAL;
	
	resid = uio_resid(uio);
	if (resid == 0)
		return 0;

	flag = ap->a_ioflag;
	if (ISSET(flag, IO_APPEND)) {
		off = np->dir.length;
		uio_setoffset(uio, off);
	}
	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	if (vnode_isnocache(vp) || ISSET(flag, IO_NOCACHE)) {
		ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_PUSHDIRTY|UBC_SYNC);
		ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE);
		e = nwrite_9p(np, uio);
	} else {
		zh = zt = 0;
		eof = MAX(np->dir.length, resid+off);
		if (eof > np->dir.length) {
			if (off > np->dir.length) {
				zh = np->dir.length;
				SET(flag, IO_HEADZEROFILL);
			}
  			zt = (eof + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64;
			if (zt > eof) {
				zt = eof;
				SET(flag, IO_TAILZEROFILL);
			}
		}
		e = cluster_write(vp, uio, np->dir.length, eof, zh, zt, flag);
		if (e==0 && eof>np->dir.length) {
			np->dirtimer = 0;
			np->dir.length = eof;
			ubc_setsize(vp, eof);
		}
	}
	nunlock_9p(np);
	return e;
}
Example #2
0
static int
vnop_setattr_9p(struct vnop_setattr_args *ap)
{
	struct vnode_attr *vap;
	vnode_t vp;
	node_9p *np;
	dir_9p d;
	int e;

	TRACE();
	vp = ap->a_vp;
	vap = ap->a_vap;
	np = NTO9P(vp);

	if (vnode_vfsisrdonly(vp))
		return EROFS;
	
	if (vnode_isvroot(vp))
		return EACCES;

	nulldir(&d);
	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
		if (vnode_isdir(vp))
			return EISDIR;
		d.length = vap->va_data_size;
	}
	VATTR_SET_SUPPORTED(vap, va_data_size);

	if (VATTR_IS_ACTIVE(vap, va_access_time))
		d.atime = vap->va_access_time.tv_sec;
	VATTR_SET_SUPPORTED(vap, va_access_time);

	if (VATTR_IS_ACTIVE(vap, va_modify_time))
		d.mtime = vap->va_modify_time.tv_sec;
	VATTR_SET_SUPPORTED(vap, va_modify_time);

	if (VATTR_IS_ACTIVE(vap, va_mode)) {
		d.mode = vap->va_mode & 0777;
		if (vnode_isdir(vp))
			SET(d.mode, DMDIR);
		if (ISSET(np->nmp->flags, F_DOTU)) {
			switch (vnode_vtype(vp)) {
			case VBLK:
			case VCHR:
				SET(d.mode, DMDEVICE);
				break;
			case VLNK:
				SET(d.mode, DMSYMLINK);
				break;
			case VSOCK:
				SET(d.mode, DMSOCKET);
				break;
			case VFIFO:
				SET(d.mode, DMNAMEDPIPE);
				break;
			default:
				break;
			}
		}
	}
	VATTR_SET_SUPPORTED(vap, va_mode);

	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	e = wstat_9p(np->nmp, np->fid, &d);
	np->dirtimer = 0;

	if (e==0 && d.length!=~0)
		ubc_setsize(vp, d.length);

	nunlock_9p(np);
	return e;
}
Example #3
0
/*
 * Balloc defines the structure of file system storage
 * by allocating the physical blocks on a device given
 * the inode and the logical block number in a file.
 */
ffs_balloc(
	register struct inode *ip,
	register ufs_daddr_t lbn,
	int size,
	kauth_cred_t cred,
	struct buf **bpp,
	int flags,
	int * blk_alloc)
{
	register struct fs *fs;
	register ufs_daddr_t nb;
	struct buf *bp, *nbp;
	struct vnode *vp = ITOV(ip);
	struct indir indirs[NIADDR + 2];
	ufs_daddr_t newb, *bap, pref;
	int deallocated, osize, nsize, num, i, error;
	ufs_daddr_t *allocib, *blkp, *allocblk, allociblk[NIADDR + 1];
	int devBlockSize=0;
	int alloc_buffer = 1;
	struct mount *mp=vp->v_mount;
#if REV_ENDIAN_FS
	int rev_endian=(mp->mnt_flag & MNT_REVEND);
#endif /* REV_ENDIAN_FS */

	*bpp = NULL;
	if (lbn < 0)
		return (EFBIG);
	fs = ip->i_fs;
	if (flags & B_NOBUFF) 
		alloc_buffer = 0;

	if (blk_alloc)
		*blk_alloc = 0;

	/*
	 * If the next write will extend the file into a new block,
	 * and the file is currently composed of a fragment
	 * this fragment has to be extended to be a full block.
	 */
	nb = lblkno(fs, ip->i_size);
	if (nb < NDADDR && nb < lbn) {
		/* the filesize prior to this write  can fit in direct 
		 * blocks (ie.  fragmentaion is possibly done)
		 * we are now extending the file write beyond 
		 * the block which has end of file prior to this write 
		 */
		osize = blksize(fs, ip, nb); 
		/* osize gives disk allocated size in the last block. It is 
		 * either in fragments or a file system block size */
		if (osize < fs->fs_bsize && osize > 0) {
			/* few fragments are already allocated,since the
			 * current extends beyond this block 
			 * allocate the complete block as fragments are only
			 * in last block
			 */
			error = ffs_realloccg(ip, nb,
				ffs_blkpref(ip, nb, (int)nb, &ip->i_db[0]),
				osize, (int)fs->fs_bsize, cred, &bp);
			if (error)
				return (error);
			/* adjust the inode size we just grew */
			/* it is in nb+1 as nb starts from 0 */
			ip->i_size = (nb + 1) * fs->fs_bsize;
			ubc_setsize(vp, (off_t)ip->i_size);

			ip->i_db[nb] = dbtofsb(fs, (ufs_daddr_t)buf_blkno(bp));
			ip->i_flag |= IN_CHANGE | IN_UPDATE;

			if ((flags & B_SYNC) || (!alloc_buffer)) {
				if (!alloc_buffer) 
					buf_setflags(bp, B_NOCACHE);
				buf_bwrite(bp);
			} else
				buf_bdwrite(bp);
			/* note that bp is already released here */
		}
	}
	/*
	 * The first NDADDR blocks are direct blocks
	 */
	if (lbn < NDADDR) {
		nb = ip->i_db[lbn];
		if (nb != 0 && ip->i_size >= (lbn + 1) * fs->fs_bsize) {
			if (alloc_buffer) {
			error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, NOCRED, &bp);
			if (error) {
				buf_brelse(bp);
				return (error);
			}
			*bpp = bp;
			}
			return (0);
		}
		if (nb != 0) {
			/*
			 * Consider need to reallocate a fragment.
			 */
			osize = fragroundup(fs, blkoff(fs, ip->i_size));
			nsize = fragroundup(fs, size);
			if (nsize <= osize) {
				if (alloc_buffer) {
				error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), osize, NOCRED, &bp);
				if (error) {
					buf_brelse(bp);
					return (error);
				}
				ip->i_flag |= IN_CHANGE | IN_UPDATE;
				*bpp = bp;
				return (0);
				}
				else {
					ip->i_flag |= IN_CHANGE | IN_UPDATE;
					return (0);
				}
			} else {
				error = ffs_realloccg(ip, lbn,
				    ffs_blkpref(ip, lbn, (int)lbn,
					&ip->i_db[0]), osize, nsize, cred, &bp);
				if (error)
					return (error);
				ip->i_db[lbn] = dbtofsb(fs, (ufs_daddr_t)buf_blkno(bp));
				ip->i_flag |= IN_CHANGE | IN_UPDATE;

				/* adjust the inode size we just grew */
				ip->i_size = (lbn * fs->fs_bsize) + size;
				ubc_setsize(vp, (off_t)ip->i_size);

				if (!alloc_buffer) {
					buf_setflags(bp, B_NOCACHE);
					if (flags & B_SYNC)
						buf_bwrite(bp);
					else
						buf_bdwrite(bp);
				 } else
					*bpp = bp;
				return (0);

			}
		} else {
			if (ip->i_size < (lbn + 1) * fs->fs_bsize)
				nsize = fragroundup(fs, size);
			else
				nsize = fs->fs_bsize;
			error = ffs_alloc(ip, lbn,
			    ffs_blkpref(ip, lbn, (int)lbn, &ip->i_db[0]),
			    nsize, cred, &newb);
			if (error)
				return (error);
			if (alloc_buffer) {
			        bp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), nsize, 0, 0, BLK_WRITE);
				buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, newb)));

				if (flags & B_CLRBUF)
				        buf_clear(bp);
			}
			ip->i_db[lbn] = newb;
			ip->i_flag |= IN_CHANGE | IN_UPDATE;
			if (blk_alloc) {
				*blk_alloc = nsize;
			}
			if (alloc_buffer)
				*bpp = bp;
			return (0);
		}
	}
	/*
	 * Determine the number of levels of indirection.
	 */
	pref = 0;
	if (error = ufs_getlbns(vp, lbn, indirs, &num))
		return(error);
#if DIAGNOSTIC
	if (num < 1)
		panic ("ffs_balloc: ufs_bmaparray returned indirect block");
#endif
	/*
	 * Fetch the first indirect block allocating if necessary.
	 */
	--num;
	nb = ip->i_ib[indirs[0].in_off];
	allocib = NULL;
	allocblk = allociblk;
	if (nb == 0) {
		pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
	        if (error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize,
		    cred, &newb))
			return (error);
		nb = newb;
		*allocblk++ = nb;
		bp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[1].in_lbn)), fs->fs_bsize, 0, 0, BLK_META);
		buf_setblkno(bp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
		buf_clear(bp);
		/*
		 * Write synchronously conditional on mount flags.
		 */
		if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
			error = 0;
			buf_bdwrite(bp);
		} else if ((error = buf_bwrite(bp)) != 0) {
			goto fail;
		}
		allocib = &ip->i_ib[indirs[0].in_off];
		*allocib = nb;
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	/*
	 * Fetch through the indirect blocks, allocating as necessary.
	 */
	for (i = 1;;) {
		error = (int)buf_meta_bread(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), (int)fs->fs_bsize, NOCRED, &bp);
		if (error) {
			buf_brelse(bp);
			goto fail;
		}
		bap = (ufs_daddr_t *)buf_dataptr(bp);
#if	REV_ENDIAN_FS
	if (rev_endian)
		nb = OSSwapInt32(bap[indirs[i].in_off]);
	else {
#endif	/* REV_ENDIAN_FS */
		nb = bap[indirs[i].in_off];
#if REV_ENDIAN_FS
	}
#endif /* REV_ENDIAN_FS */
		if (i == num)
			break;
		i += 1;
		if (nb != 0) {
			buf_brelse(bp);
			continue;
		}
		if (pref == 0)
			pref = ffs_blkpref(ip, lbn, 0, (ufs_daddr_t *)0);
		if (error =
		    ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
			buf_brelse(bp);
			goto fail;
		}
		nb = newb;
		*allocblk++ = nb;
		nbp = buf_getblk(vp, (daddr64_t)((unsigned)(indirs[i].in_lbn)), fs->fs_bsize, 0, 0, BLK_META);
		buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
		buf_clear(nbp);
		/*
		 * Write synchronously conditional on mount flags.
		 */
		if ((vp)->v_mount->mnt_flag & MNT_ASYNC) {
			error = 0;
			buf_bdwrite(nbp);
		} else if (error = buf_bwrite(nbp)) {
			buf_brelse(bp);
			goto fail;
		}
#if	REV_ENDIAN_FS
	if (rev_endian)
		bap[indirs[i - 1].in_off] = OSSwapInt32(nb);
	else {
#endif	/* REV_ENDIAN_FS */
		bap[indirs[i - 1].in_off] = nb;
#if	REV_ENDIAN_FS
	}
#endif	/* REV_ENDIAN_FS */
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if (flags & B_SYNC) {
			buf_bwrite(bp);
		} else {
			buf_bdwrite(bp);
		}
	}
	/*
	 * Get the data block, allocating if necessary.
	 */
	if (nb == 0) {
		pref = ffs_blkpref(ip, lbn, indirs[i].in_off, &bap[0]);
		if (error = ffs_alloc(ip,
		    lbn, pref, (int)fs->fs_bsize, cred, &newb)) {
			buf_brelse(bp);
			goto fail;
		}
		nb = newb;
		*allocblk++ = nb;
#if	REV_ENDIAN_FS
	if (rev_endian)
		bap[indirs[i].in_off] = OSSwapInt32(nb);
	else {
#endif	/* REV_ENDIAN_FS */
		bap[indirs[i].in_off] = nb;
#if	REV_ENDIAN_FS
	}
#endif	/* REV_ENDIAN_FS */
		/*
		 * If required, write synchronously, otherwise use
		 * delayed write.
		 */
		if ((flags & B_SYNC)) {
			buf_bwrite(bp);
		} else {
			buf_bdwrite(bp);
		}
		if(alloc_buffer ) {
		nbp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, 0, 0, BLK_WRITE);
		buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));

		if (flags & B_CLRBUF)
			buf_clear(nbp);
		}
		if (blk_alloc) {
			*blk_alloc = fs->fs_bsize;
		}
		if(alloc_buffer) 
			*bpp = nbp;

		return (0);
	}
	buf_brelse(bp);
	if (alloc_buffer) {
	        if (flags & B_CLRBUF) {
		        error = (int)buf_bread(vp, (daddr64_t)((unsigned)lbn), (int)fs->fs_bsize, NOCRED, &nbp);
			if (error) {
			        buf_brelse(nbp);
				goto fail;
			}
		} else {
		        nbp = buf_getblk(vp, (daddr64_t)((unsigned)lbn), fs->fs_bsize, 0, 0, BLK_WRITE);
			buf_setblkno(nbp, (daddr64_t)((unsigned)fsbtodb(fs, nb)));
		}
		*bpp = nbp;
	}
	return (0);
fail:
	/*
	 * If we have failed part way through block allocation, we
	 * have to deallocate any indirect blocks that we have allocated.
	 */
	for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
		ffs_blkfree(ip, *blkp, fs->fs_bsize);
		deallocated += fs->fs_bsize;
	}
	if (allocib != NULL)
		*allocib = 0;
	if (deallocated) {
	        devBlockSize = vfs_devblocksize(mp);
#if QUOTA
		/*
		 * Restore user's disk quota because allocation failed.
		 */
		(void) chkdq(ip, (int64_t)-deallocated, cred, FORCE);
#endif /* QUOTA */
		ip->i_blocks -= btodb(deallocated, devBlockSize);
		ip->i_flag |= IN_CHANGE | IN_UPDATE;
	}
	return (error);
}
Example #4
0
/* relies on v1 paging */
static int
nullfs_pagein(struct vnop_pagein_args * ap)
{
	int error = EIO;
	struct vnode *vp, *lvp;

	NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp);

	vp  = ap->a_vp;
	lvp = NULLVPTOLOWERVP(vp);

	if (vnode_vtype(vp) != VREG) {
		return ENOTSUP;
	}

	/*
	 * Ask VM/UBC/VFS to do our bidding
	 */
	if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) {
		vm_offset_t ioaddr;
		uio_t auio;
		kern_return_t kret;
		off_t bytes_to_commit;
		off_t lowersize;
		upl_t upl      = ap->a_pl;
		user_ssize_t bytes_remaining = 0;

		auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ);
		if (auio == NULL) {
			error = EIO;
			goto exit_no_unmap;
		}

		kret = ubc_upl_map(upl, &ioaddr);
		if (KERN_SUCCESS != kret) {
			panic("nullfs_pagein: ubc_upl_map() failed with (%d)", kret);
		}

		ioaddr += ap->a_pl_offset;

		error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size);
		if (error) {
			goto exit;
		}

		lowersize = ubc_getsize(lvp);
		if (lowersize != ubc_getsize(vp)) {
			(void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */
		}

		error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context);

		bytes_remaining = uio_resid(auio);
		if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size)
		{
			/* zero bytes that weren't read in to the upl */
			bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining);
		}

	exit:
		kret = ubc_upl_unmap(upl);
		if (KERN_SUCCESS != kret) {
			panic("nullfs_pagein: ubc_upl_unmap() failed with (%d)", kret);
		}

		if (auio != NULL) {
			uio_free(auio);
		}

	exit_no_unmap:
		if ((ap->a_flags & UPL_NOCOMMIT) == 0) {
			if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) {
				/* only commit what was read in (page aligned)*/
				bytes_to_commit = ap->a_size - bytes_remaining;
				if (bytes_to_commit)
				{
					/* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/
					if (bytes_to_commit & PAGE_MASK)
					{
						bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1);
						assert(bytes_to_commit <= (off_t)ap->a_size);

						bytes_remaining = ap->a_size - bytes_to_commit;
					}
					ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY);
				}
				
				/* abort anything thats left */
				if (bytes_remaining) {
					ubc_upl_abort_range(upl, ap->a_pl_offset + bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
				}
			} else {
				ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
			}
		}
		vnode_put(lvp);
	} else if((ap->a_flags & UPL_NOCOMMIT) == 0) {
		ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY);
	}
	return error;
}
Example #5
0
/* ioctl */
__private_extern__
int
fuse_internal_ioctl_avfi(vnode_t vp, __unused vfs_context_t context,
                         struct fuse_avfi_ioctl *avfi)
{
    int ret = 0;
    uint32_t hint = 0;

    if (!avfi) {
        return EINVAL;
    }

    if (avfi->cmd & FUSE_AVFI_MARKGONE) {

        /*
         * TBD
         */
        return EINVAL;
    }

    /* The result of this /does/ alter our return value. */
    if (avfi->cmd & FUSE_AVFI_UBC) {
        int ubc_flags = avfi->ubc_flags & (UBC_PUSHDIRTY  | UBC_PUSHALL |
                                           UBC_INVALIDATE | UBC_SYNC);
        if (ubc_msync(vp, (off_t)0, ubc_getsize(vp), (off_t*)0,
                      ubc_flags) == 0) {
            /* failed */
            ret = EINVAL; /* don't really have a good error to return */
        }
    }

    if (avfi->cmd & FUSE_AVFI_UBC_SETSIZE) {
        if (VTOFUD(vp)->filesize != avfi->size) {
            hint |= NOTE_WRITE;
            if (avfi->size > VTOFUD(vp)->filesize) {
                hint |= NOTE_EXTEND;
            }
            VTOFUD(vp)->filesize = avfi->size;
            ubc_setsize(vp, avfi->size);
        }
        (void)fuse_invalidate_attr(vp);
    }

    /* The result of this doesn't alter our return value. */
    if (avfi->cmd & FUSE_AVFI_PURGEATTRCACHE) {
        hint |= NOTE_ATTRIB;
        (void)fuse_invalidate_attr(vp);
    }

    /* The result of this doesn't alter our return value. */
    if (avfi->cmd & FUSE_AVFI_PURGEVNCACHE) {
        (void)fuse_vncache_purge(vp);
    }

    if (avfi->cmd & FUSE_AVFI_KNOTE) {
        hint |= avfi->note;
    }

    if (hint) {
        FUSE_KNOTE(vp, hint);
    }

    return ret;
}
Example #6
0
__private_extern__
int
fuse_internal_exchange(vnode_t       fvp,
                       const char   *fname,
                       size_t        flen,
                       vnode_t       tvp,
                       const char   *tname,
                       size_t        tlen,
                       int           options,
                       vfs_context_t context)
{
    struct fuse_dispatcher fdi;
    struct fuse_exchange_in *fei;
    struct fuse_vnode_data *ffud = VTOFUD(fvp);
    struct fuse_vnode_data *tfud = VTOFUD(tvp);
    vnode_t fdvp = ffud->parentvp;
    vnode_t tdvp = tfud->parentvp;
    int err = 0;

    fdisp_init(&fdi, sizeof(*fei) + flen + tlen + 2);
    fdisp_make_vp(&fdi, FUSE_EXCHANGE, fvp, context);

    fei = fdi.indata;
    fei->olddir = VTOI(fdvp);
    fei->newdir = VTOI(tdvp);
    fei->options = (uint64_t)options;

    memcpy((char *)fdi.indata + sizeof(*fei), fname, flen);
    ((char *)fdi.indata)[sizeof(*fei) + flen] = '\0';

    memcpy((char *)fdi.indata + sizeof(*fei) + flen + 1, tname, tlen);
    ((char *)fdi.indata)[sizeof(*fei) + flen + tlen + 1] = '\0';

    ubc_msync(fvp, (off_t)0, (off_t)ffud->filesize, (off_t*)0,
              UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
    ubc_msync(tvp, (off_t)0, (off_t)tfud->filesize, (off_t*)0,
              UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
        
    if (!(err = fdisp_wait_answ(&fdi))) {
        fuse_ticket_drop(fdi.tick);
    }

    if (err == 0) {
        if (fdvp) {
            fuse_invalidate_attr(fdvp);
        }
        if (tdvp != fdvp) {
            if (tdvp) {
                fuse_invalidate_attr(tdvp);
            }
        }

        fuse_invalidate_attr(fvp);
        fuse_invalidate_attr(tvp);

        cache_purge(fvp);
        cache_purge(tvp);

        /* Swap sizes */
        off_t tmpfilesize = ffud->filesize;
        ffud->filesize = tfud->filesize;
        tfud->filesize = tmpfilesize;
        ubc_setsize(fvp, (off_t)ffud->filesize);
        ubc_setsize(tvp, (off_t)tfud->filesize);

        fuse_kludge_exchange(fvp, tvp);

        /*
         * Another approach (will need additional kernel support to work):
         *
        vnode_t tmpvp = ffud->vp;
        ffud->vp = tfud->vp;
        tfud->vp = tmpvp;

        vnode_t tmpparentvp = ffud->parentvp;
        ffud->parentvp = tfud->parentvp;
        tfud->parentvp = tmpparentvp;

        off_t tmpfilesize = ffud->filesize;
        ffud->filesize = tfud->filesize;
        tfud->filesize = tmpfilesize;

        struct fuse_vnode_data tmpfud;
        memcpy(&tmpfud, ffud, sizeof(struct fuse_vnode_data));
        memcpy(ffud, tfud, sizeof(struct fuse_vnode_data));
        memcpy(tfud, &tmpfud, sizeof(struct fuse_vnode_data));
        
        HNodeExchangeFromFSNode(ffud, tfud);
        *
        */
    }

    return err;
}
/*
 * This routine exists to support the load_dylinker().
 *
 * This routine has its own, separate, understanding of the FAT file format,
 * which is terrifically unfortunate.
 */
static
load_return_t
get_macho_vnode(
    char			*path,
    integer_t		archbits,
    struct mach_header	*mach_header,
    off_t			*file_offset,
    off_t			*macho_size,
    struct vnode		**vpp
)
{
    struct vnode		*vp;
    vfs_context_t		ctx = vfs_context_current();
    proc_t			p = vfs_context_proc(ctx);
    kauth_cred_t		kerncred;
    struct nameidata nid, *ndp;
    boolean_t		is_fat;
    struct fat_arch		fat_arch;
    int			error = LOAD_SUCCESS;
    int resid;
    union {
        struct mach_header	mach_header;
        struct fat_header	fat_header;
        char	pad[512];
    } header;
    off_t fsize = (off_t)0;
    int err2;

    /*
     * Capture the kernel credential for use in the actual read of the
     * file, since the user doing the execution may have execute rights
     * but not read rights, but to exec something, we have to either map
     * or read it into the new process address space, which requires
     * read rights.  This is to deal with lack of common credential
     * serialization code which would treat NOCRED as "serialize 'root'".
     */
    kerncred = vfs_context_ucred(vfs_context_kernel());

    ndp = &nid;

    /* init the namei data to point the file user's program name */
    NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32, CAST_USER_ADDR_T(path), ctx);

    if ((error = namei(ndp)) != 0) {
        if (error == ENOENT) {
            error = LOAD_ENOENT;
        } else {
            error = LOAD_FAILURE;
        }
        return(error);
    }
    nameidone(ndp);
    vp = ndp->ni_vp;

    /* check for regular file */
    if (vp->v_type != VREG) {
        error = LOAD_PROTECT;
        goto bad1;
    }

    /* get size */
    if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
        error = LOAD_FAILURE;
        goto bad1;
    }

    /* Check mount point */
    if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
        error = LOAD_PROTECT;
        goto bad1;
    }

    /* check access */
    if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx)) != 0) {
        error = LOAD_PROTECT;
        goto bad1;
    }

    /* try to open it */
    if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
        error = LOAD_PROTECT;
        goto bad1;
    }

    if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0,
                         UIO_SYSSPACE32, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
        error = LOAD_IOERROR;
        goto bad2;
    }

    if (header.mach_header.magic == MH_MAGIC ||
            header.mach_header.magic == MH_MAGIC_64)
        is_fat = FALSE;
    else if (header.fat_header.magic == FAT_MAGIC ||
             header.fat_header.magic == FAT_CIGAM)
        is_fat = TRUE;
    else {
        error = LOAD_BADMACHO;
        goto bad2;
    }

    if (is_fat) {
        /* Look up our architecture in the fat file. */
        error = fatfile_getarch_with_bits(vp, archbits, (vm_offset_t)(&header.fat_header), &fat_arch);
        if (error != LOAD_SUCCESS)
            goto bad2;

        /* Read the Mach-O header out of it */
        error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header,
                        sizeof(header.mach_header), fat_arch.offset,
                        UIO_SYSSPACE32, IO_NODELOCKED, kerncred, &resid, p);
        if (error) {
            error = LOAD_IOERROR;
            goto bad2;
        }

        /* Is this really a Mach-O? */
        if (header.mach_header.magic != MH_MAGIC &&
                header.mach_header.magic != MH_MAGIC_64) {
            error = LOAD_BADMACHO;
            goto bad2;
        }

        *file_offset = fat_arch.offset;
        *macho_size = fat_arch.size;
    } else {
        /*
         * Force get_macho_vnode() to fail if the architecture bits
         * do not match the expected architecture bits.  This in
         * turn causes load_dylinker() to fail for the same reason,
         * so it ensures the dynamic linker and the binary are in
         * lock-step.  This is potentially bad, if we ever add to
         * the CPU_ARCH_* bits any bits that are desirable but not
         * required, since the dynamic linker might work, but we will
         * refuse to load it because of this check.
         */
        if ((cpu_type_t)(header.mach_header.cputype & CPU_ARCH_MASK) != archbits)
            return(LOAD_BADARCH);

        *file_offset = 0;
        *macho_size = fsize;
    }

    *mach_header = header.mach_header;
    *vpp = vp;

    ubc_setsize(vp, fsize);

    return (error);

bad2:
    err2 = VNOP_CLOSE(vp, FREAD, ctx);
    vnode_put(vp);
    return (error);

bad1:
    vnode_put(vp);
    return(error);
}