/* * Real work associated with retrieving a named attribute--assumes that * the attribute lock has already been grabbed. */ static int ufs_extattr_get(struct vnode *vp, int attrnamespace, const char *name, struct uio *uio, size_t *size, struct ucred *cred, struct thread *td) { struct myfs_ufs_extattr_list_entry *attribute; struct myfs_ufs_extattr_header ueh; struct iovec local_aiov; struct uio local_aio; struct mount *mp = vp->v_mount; struct myfs_ufsmount *ump = VFSTOMYFS(mp); struct myfs_inode *ip = MYFS_VTOI(vp); off_t base_offset; size_t len, old_len; int error = 0; if (!(ump->um_extattr.uepm_flags & MYFS_EXTATTR_UEPM_STARTED)) return (EOPNOTSUPP); if (strlen(name) == 0) return (EINVAL); error = extattr_check_cred(vp, attrnamespace, cred, td, VREAD); if (error) return (error); attribute = ufs_extattr_find_attr(ump, attrnamespace, name); if (!attribute) return (ENOATTR); /* * Allow only offsets of zero to encourage the read/replace * extended attribute semantic. Otherwise we can't guarantee * atomicity, as we don't provide locks for extended attributes. */ if (uio != NULL && uio->uio_offset != 0) return (ENXIO); /* * Find base offset of header in file based on file header size, and * data header size + maximum data size, indexed by inode number. */ base_offset = sizeof(struct myfs_ufs_extattr_fileheader) + ip->i_number * (sizeof(struct myfs_ufs_extattr_header) + attribute->uele_fileheader.uef_size); /* * Read in the data header to see if the data is defined, and if so * how much. */ bzero(&ueh, sizeof(struct myfs_ufs_extattr_header)); local_aiov.iov_base = (caddr_t) &ueh; local_aiov.iov_len = sizeof(struct myfs_ufs_extattr_header); local_aio.uio_iov = &local_aiov; local_aio.uio_iovcnt = 1; local_aio.uio_rw = UIO_READ; local_aio.uio_segflg = UIO_SYSSPACE; local_aio.uio_td = td; local_aio.uio_offset = base_offset; local_aio.uio_resid = sizeof(struct myfs_ufs_extattr_header); /* * Acquire locks. * * Don't need to get a lock on the backing file if the getattr is * being applied to the backing file, as the lock is already held. */ if (attribute->uele_backing_vnode != vp) vn_lock(attribute->uele_backing_vnode, LK_SHARED | LK_RETRY); error = VOP_READ(attribute->uele_backing_vnode, &local_aio, IO_NODELOCKED, ump->um_extattr.uepm_ucred); if (error) goto vopunlock_exit; /* Defined? */ if ((ueh.ueh_flags & MYFS_EXTATTR_ATTR_FLAG_INUSE) == 0) { error = ENOATTR; goto vopunlock_exit; } /* Valid for the current inode generation? */ if (ueh.ueh_i_gen != ip->i_gen) { /* * The inode itself has a different generation number * than the attribute data. For now, the best solution * is to coerce this to undefined, and let it get cleaned * up by the next write or extattrctl clean. */ printf("ufs_extattr_get (%s): inode number inconsistency (%d, %jd)\n", mp->mnt_stat.f_mntonname, ueh.ueh_i_gen, (intmax_t)ip->i_gen); error = ENOATTR; goto vopunlock_exit; } /* Local size consistency check. */ if (ueh.ueh_len > attribute->uele_fileheader.uef_size) { error = ENXIO; goto vopunlock_exit; } /* Return full data size if caller requested it. */ if (size != NULL) *size = ueh.ueh_len; /* Return data if the caller requested it. */ if (uio != NULL) { /* Allow for offset into the attribute data. */ uio->uio_offset = base_offset + sizeof(struct myfs_ufs_extattr_header); /* * Figure out maximum to transfer -- use buffer size and * local data limit. */ len = MIN(uio->uio_resid, ueh.ueh_len); old_len = uio->uio_resid; uio->uio_resid = len; error = VOP_READ(attribute->uele_backing_vnode, uio, IO_NODELOCKED, ump->um_extattr.uepm_ucred); if (error) goto vopunlock_exit; uio->uio_resid = old_len - (len - uio->uio_resid); } vopunlock_exit: if (uio != NULL) uio->uio_offset = 0; if (attribute->uele_backing_vnode != vp) VOP_UNLOCK(attribute->uele_backing_vnode, 0); return (error); }
/* * Balloc defines the structure of filesystem storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. * This is the allocation strategy for MYFS1. Below is * the allocation strategy for MYFS2. */ int myfs_ffs_balloc_ufs1(struct vnode *vp, off_t startoffset, int size, struct ucred *cred, int flags, struct buf **bpp) { struct myfs_inode *ip; struct myfs_ufs1_dinode *dp; myfs_ufs_lbn_t lbn, lastlbn; struct myfs_fs *fs; myfs_ufs1_daddr_t nb; struct buf *bp, *nbp; struct myfs_ufsmount *ump; struct myfs_indir indirs[MYFS_NIADDR + 2]; int deallocated, osize, nsize, num, i, error; myfs_ufs2_daddr_t newb; myfs_ufs1_daddr_t *bap, pref; myfs_ufs1_daddr_t *allocib, *blkp, *allocblk, allociblk[MYFS_NIADDR + 1]; myfs_ufs2_daddr_t *lbns_remfree, lbns[MYFS_NIADDR + 1]; int unwindidx = -1; int saved_inbdflush; ip = MYFS_VTOI(vp); dp = ip->i_din1; fs = ip->i_fs; ump = ip->i_ump; lbn = myfs_lblkno(fs, startoffset); size = myfs_blkoff(fs, startoffset) + size; if (size > fs->fs_bsize) panic("ffs_balloc_ufs1: blk too big"); *bpp = NULL; if (flags & IO_EXT) return (EOPNOTSUPP); if (lbn < 0) return (EFBIG); /* * If the next write will extend the file into a new block, * and the file is currently composed of a fragment * this fragment has to be extended to be a full block. */ lastlbn = myfs_lblkno(fs, ip->i_size); if (lastlbn < MYFS_NDADDR && lastlbn < lbn) { nb = lastlbn; osize = myfs_blksize(fs, ip, nb); if (osize < fs->fs_bsize && osize > 0) { MYFS_LOCK(ump); error = myfs_ffs_realloccg(ip, nb, dp->di_db[nb], myfs_ffs_blkpref_ufs1(ip, lastlbn, (int)nb, &dp->di_db[0]), osize, (int)fs->fs_bsize, flags, cred, &bp); if (error) return (error); if (MYFS_DOINGSOFTDEP(vp)) myfs_softdep_setup_allocdirect(ip, nb, myfs_dbtofsb(fs, bp->b_blkno), dp->di_db[nb], fs->fs_bsize, osize, bp); ip->i_size = myfs_smalllblktosize(fs, nb + 1); dp->di_size = ip->i_size; dp->di_db[nb] = myfs_dbtofsb(fs, bp->b_blkno); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (flags & IO_SYNC) bwrite(bp); else bawrite(bp); } } /* * The first MYFS_NDADDR blocks are direct blocks */ if (lbn < MYFS_NDADDR) { if (flags & MYFS_BA_METAONLY) panic("ffs_balloc_ufs1: MYFS_BA_METAONLY for direct block"); nb = dp->di_db[lbn]; if (nb != 0 && ip->i_size >= myfs_smalllblktosize(fs, lbn + 1)) { error = bread(vp, lbn, fs->fs_bsize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp->b_blkno = myfs_fsbtodb(fs, nb); *bpp = bp; return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = myfs_fragroundup(fs, myfs_blkoff(fs, ip->i_size)); nsize = myfs_fragroundup(fs, size); if (nsize <= osize) { error = bread(vp, lbn, osize, NOCRED, &bp); if (error) { brelse(bp); return (error); } bp->b_blkno = myfs_fsbtodb(fs, nb); } else { MYFS_LOCK(ump); error = myfs_ffs_realloccg(ip, lbn, dp->di_db[lbn], myfs_ffs_blkpref_ufs1(ip, lbn, (int)lbn, &dp->di_db[0]), osize, nsize, flags, cred, &bp); if (error) return (error); if (MYFS_DOINGSOFTDEP(vp)) myfs_softdep_setup_allocdirect(ip, lbn, myfs_dbtofsb(fs, bp->b_blkno), nb, nsize, osize, bp); } } else { if (ip->i_size < myfs_smalllblktosize(fs, lbn + 1)) nsize = myfs_fragroundup(fs, size); else nsize = fs->fs_bsize; MYFS_LOCK(ump); error = myfs_ffs_alloc(ip, lbn, myfs_ffs_blkpref_ufs1(ip, lbn, (int)lbn, &dp->di_db[0]), nsize, flags, cred, &newb); if (error) return (error); bp = getblk(vp, lbn, nsize, 0, 0, 0); bp->b_blkno = myfs_fsbtodb(fs, newb); if (flags & MYFS_BA_CLRBUF) vfs_bio_clrbuf(bp); if (MYFS_DOINGSOFTDEP(vp)) myfs_softdep_setup_allocdirect(ip, lbn, newb, 0, nsize, 0, bp); } dp->di_db[lbn] = myfs_dbtofsb(fs, bp->b_blkno); ip->i_flag |= IN_CHANGE | IN_UPDATE; *bpp = bp; return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = myfs_ufs_getlbns(vp, lbn, indirs, &num)) != 0) return(error); #ifdef INVARIANTS if (num < 1) panic ("ffs_balloc_ufs1: myfs_ufs_getlbns returned indirect block"); #endif saved_inbdflush = ~TDP_INBDFLUSH | (curthread->td_pflags & TDP_INBDFLUSH); curthread->td_pflags |= TDP_INBDFLUSH; /* * Fetch the first indirect block allocating if necessary. */ --num; nb = dp->di_ib[indirs[0].in_off]; allocib = NULL; allocblk = allociblk; lbns_remfree = lbns; if (nb == 0) { MYFS_LOCK(ump); pref = myfs_ffs_blkpref_ufs1(ip, lbn, 0, (myfs_ufs1_daddr_t *)0); if ((error = myfs_ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, flags, cred, &newb)) != 0) { curthread->td_pflags &= saved_inbdflush; return (error); } nb = newb; *allocblk++ = nb; *lbns_remfree++ = indirs[1].in_lbn; bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0, 0); bp->b_blkno = myfs_fsbtodb(fs, nb); vfs_bio_clrbuf(bp); if (MYFS_DOINGSOFTDEP(vp)) { myfs_softdep_setup_allocdirect(ip, MYFS_NDADDR + indirs[0].in_off, newb, 0, fs->fs_bsize, 0, bp); bdwrite(bp); } else { /* * Write synchronously so that indirect blocks * never point at garbage. */ if (MYFS_DOINGASYNC(vp)) bdwrite(bp); else if ((error = bwrite(bp)) != 0) goto fail; } allocib = &dp->di_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp); if (error) { brelse(bp); goto fail; } bap = (myfs_ufs1_daddr_t *)bp->b_data; nb = bap[indirs[i].in_off]; if (i == num) break; i += 1; if (nb != 0) { bqrelse(bp); continue; } MYFS_LOCK(ump); if (pref == 0) pref = myfs_ffs_blkpref_ufs1(ip, lbn, 0, (myfs_ufs1_daddr_t *)0); if ((error = myfs_ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, flags, cred, &newb)) != 0) { brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; *lbns_remfree++ = indirs[i].in_lbn; nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0, 0); nbp->b_blkno = myfs_fsbtodb(fs, nb); vfs_bio_clrbuf(nbp); if (MYFS_DOINGSOFTDEP(vp)) { myfs_softdep_setup_allocindir_meta(nbp, ip, bp, indirs[i - 1].in_off, nb); bdwrite(nbp); } else { /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { brelse(bp); goto fail; } } bap[indirs[i - 1].in_off] = nb; if (allocib == NULL && unwindidx < 0) unwindidx = i - 1; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } } /* * If asked only for the indirect block, then return it. */ if (flags & MYFS_BA_METAONLY) { curthread->td_pflags &= saved_inbdflush; *bpp = bp; return (0); } /* * Get the data block, allocating if necessary. */ if (nb == 0) { MYFS_LOCK(ump); pref = myfs_ffs_blkpref_ufs1(ip, lbn, indirs[i].in_off, &bap[0]); error = myfs_ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, flags, cred, &newb); if (error) { brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; *lbns_remfree++ = lbn; nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0, 0); nbp->b_blkno = myfs_fsbtodb(fs, nb); if (flags & MYFS_BA_CLRBUF) vfs_bio_clrbuf(nbp); if (MYFS_DOINGSOFTDEP(vp)) myfs_softdep_setup_allocindir_page(ip, lbn, bp, indirs[i].in_off, nb, 0, nbp); bap[indirs[i].in_off] = nb; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & IO_SYNC) { bwrite(bp); } else { if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; bdwrite(bp); } curthread->td_pflags &= saved_inbdflush; *bpp = nbp; return (0); } brelse(bp); if (flags & MYFS_BA_CLRBUF) { int seqcount = (flags & MYFS_BA_SEQMASK) >> MYFS_BA_SEQSHIFT; if (seqcount && (vp->v_mount->mnt_flag & MNT_NOCLUSTERR) == 0) { error = cluster_read(vp, ip->i_size, lbn, (int)fs->fs_bsize, NOCRED, MAXBSIZE, seqcount, &nbp); } else { error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp); } if (error) { brelse(nbp); goto fail; } } else {
/* * Real work associated with removing an extended attribute from a vnode. * Assumes the attribute lock has already been grabbed. */ static int ufs_extattr_rm(struct vnode *vp, int attrnamespace, const char *name, struct ucred *cred, struct thread *td) { struct myfs_ufs_extattr_list_entry *attribute; struct myfs_ufs_extattr_header ueh; struct iovec local_aiov; struct uio local_aio; struct mount *mp = vp->v_mount; struct myfs_ufsmount *ump = VFSTOMYFS(mp); struct myfs_inode *ip = MYFS_VTOI(vp); off_t base_offset; int error = 0, ioflag; if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (!(ump->um_extattr.uepm_flags & MYFS_EXTATTR_UEPM_STARTED)) return (EOPNOTSUPP); if (!ufs_extattr_valid_attrname(attrnamespace, name)) return (EINVAL); error = extattr_check_cred(vp, attrnamespace, cred, td, VWRITE); if (error) return (error); attribute = ufs_extattr_find_attr(ump, attrnamespace, name); if (!attribute) return (ENOATTR); /* * Find base offset of header in file based on file header size, and * data header size + maximum data size, indexed by inode number. */ base_offset = sizeof(struct myfs_ufs_extattr_fileheader) + ip->i_number * (sizeof(struct myfs_ufs_extattr_header) + attribute->uele_fileheader.uef_size); /* * Check to see if currently defined. */ bzero(&ueh, sizeof(struct myfs_ufs_extattr_header)); local_aiov.iov_base = (caddr_t) &ueh; local_aiov.iov_len = sizeof(struct myfs_ufs_extattr_header); local_aio.uio_iov = &local_aiov; local_aio.uio_iovcnt = 1; local_aio.uio_rw = UIO_READ; local_aio.uio_segflg = UIO_SYSSPACE; local_aio.uio_td = td; local_aio.uio_offset = base_offset; local_aio.uio_resid = sizeof(struct myfs_ufs_extattr_header); /* * Don't need to get the lock on the backing vnode if the vnode we're * modifying is it, as we already hold the lock. */ if (attribute->uele_backing_vnode != vp) vn_lock(attribute->uele_backing_vnode, LK_EXCLUSIVE | LK_RETRY); error = VOP_READ(attribute->uele_backing_vnode, &local_aio, IO_NODELOCKED, ump->um_extattr.uepm_ucred); if (error) goto vopunlock_exit; /* Defined? */ if ((ueh.ueh_flags & MYFS_EXTATTR_ATTR_FLAG_INUSE) == 0) { error = ENOATTR; goto vopunlock_exit; } /* Valid for the current inode generation? */ if (ueh.ueh_i_gen != ip->i_gen) { /* * The inode itself has a different generation number than * the attribute data. For now, the best solution is to * coerce this to undefined, and let it get cleaned up by * the next write or extattrctl clean. */ printf("ufs_extattr_rm (%s): inode number inconsistency (%d, %jd)\n", mp->mnt_stat.f_mntonname, ueh.ueh_i_gen, (intmax_t)ip->i_gen); error = ENOATTR; goto vopunlock_exit; } /* Flag it as not in use. */ ueh.ueh_flags = 0; ueh.ueh_len = 0; local_aiov.iov_base = (caddr_t) &ueh; local_aiov.iov_len = sizeof(struct myfs_ufs_extattr_header); local_aio.uio_iov = &local_aiov; local_aio.uio_iovcnt = 1; local_aio.uio_rw = UIO_WRITE; local_aio.uio_segflg = UIO_SYSSPACE; local_aio.uio_td = td; local_aio.uio_offset = base_offset; local_aio.uio_resid = sizeof(struct myfs_ufs_extattr_header); ioflag = IO_NODELOCKED; if (ufs_extattr_sync) ioflag |= IO_SYNC; error = VOP_WRITE(attribute->uele_backing_vnode, &local_aio, ioflag, ump->um_extattr.uepm_ucred); if (error) goto vopunlock_exit; if (local_aio.uio_resid != 0) error = ENXIO; vopunlock_exit: VOP_UNLOCK(attribute->uele_backing_vnode, 0); return (error); }
/* * Real work associated with setting a vnode's extended attributes; * assumes that the attribute lock has already been grabbed. */ static int ufs_extattr_set(struct vnode *vp, int attrnamespace, const char *name, struct uio *uio, struct ucred *cred, struct thread *td) { struct myfs_ufs_extattr_list_entry *attribute; struct myfs_ufs_extattr_header ueh; struct iovec local_aiov; struct uio local_aio; struct mount *mp = vp->v_mount; struct myfs_ufsmount *ump = VFSTOMYFS(mp); struct myfs_inode *ip = MYFS_VTOI(vp); off_t base_offset; int error = 0, ioflag; if (vp->v_mount->mnt_flag & MNT_RDONLY) return (EROFS); if (!(ump->um_extattr.uepm_flags & MYFS_EXTATTR_UEPM_STARTED)) return (EOPNOTSUPP); if (!ufs_extattr_valid_attrname(attrnamespace, name)) return (EINVAL); error = extattr_check_cred(vp, attrnamespace, cred, td, VWRITE); if (error) return (error); attribute = ufs_extattr_find_attr(ump, attrnamespace, name); if (!attribute) return (ENOATTR); /* * Early rejection of invalid offsets/length. * Reject: any offset but 0 (replace) * Any size greater than attribute size limit */ if (uio->uio_offset != 0 || uio->uio_resid > attribute->uele_fileheader.uef_size) return (ENXIO); /* * Find base offset of header in file based on file header size, and * data header size + maximum data size, indexed by inode number. */ base_offset = sizeof(struct myfs_ufs_extattr_fileheader) + ip->i_number * (sizeof(struct myfs_ufs_extattr_header) + attribute->uele_fileheader.uef_size); /* * Write out a data header for the data. */ ueh.ueh_len = uio->uio_resid; ueh.ueh_flags = MYFS_EXTATTR_ATTR_FLAG_INUSE; ueh.ueh_i_gen = ip->i_gen; local_aiov.iov_base = (caddr_t) &ueh; local_aiov.iov_len = sizeof(struct myfs_ufs_extattr_header); local_aio.uio_iov = &local_aiov; local_aio.uio_iovcnt = 1; local_aio.uio_rw = UIO_WRITE; local_aio.uio_segflg = UIO_SYSSPACE; local_aio.uio_td = td; local_aio.uio_offset = base_offset; local_aio.uio_resid = sizeof(struct myfs_ufs_extattr_header); /* * Acquire locks. * * Don't need to get a lock on the backing file if the setattr is * being applied to the backing file, as the lock is already held. */ if (attribute->uele_backing_vnode != vp) vn_lock(attribute->uele_backing_vnode, LK_EXCLUSIVE | LK_RETRY); ioflag = IO_NODELOCKED; if (ufs_extattr_sync) ioflag |= IO_SYNC; error = VOP_WRITE(attribute->uele_backing_vnode, &local_aio, ioflag, ump->um_extattr.uepm_ucred); if (error) goto vopunlock_exit; if (local_aio.uio_resid != 0) { error = ENXIO; goto vopunlock_exit; } /* * Write out user data. */ uio->uio_offset = base_offset + sizeof(struct myfs_ufs_extattr_header); ioflag = IO_NODELOCKED; if (ufs_extattr_sync) ioflag |= IO_SYNC; error = VOP_WRITE(attribute->uele_backing_vnode, uio, ioflag, ump->um_extattr.uepm_ucred); vopunlock_exit: uio->uio_offset = 0; if (attribute->uele_backing_vnode != vp) VOP_UNLOCK(attribute->uele_backing_vnode, 0); return (error); }
int sys_setacl(struct thread *td, struct setacl_args *uap) { int error; struct nameidata nd; int i; int index = -1; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td); if ((error = namei(&nd)) != 0) { return error; } NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_vp->v_op != &myfs_ffs_vnodeops2) { vrele(nd.ni_vp); uprintf("File was not in the myfs filesystem.\n"); return 0; } struct myfs_inode *ip = MYFS_VTOI(nd.ni_vp); //User entry if(uap->type == 0) { if(ip->i_din2->user_cnt != 0) { for(i = 0; i < ip->i_din2->user_cnt; i++) { if( ip->i_din2->user_entry[i].idnum == uap->idnum ) { index = i; break; } } } //Not exist,add the entry if(ip->i_din2->user_cnt == 0 || index == -1) { if(uap->idnum == 0) { ip->i_din2->user_entry[ip->i_din2->user_cnt].idnum = td->td_ucred->cr_ruid; } else { ip->i_din2->user_entry[ip->i_din2->user_cnt].idnum = uap->idnum; } ip->i_din2->user_entry[ip->i_din2->user_cnt].perms = uap->perms; ip->i_din2->user_cnt++; } //Existed,change the entry else if(index >= 0) { //Check whether it's the owner to do the change. if(td->td_ucred->cr_ruid != ip->i_din2->di_uid && td->td_ucred->cr_ruid != 0) { return EPERM; } ip->i_din2->user_entry[index].perms = uap->perms; } } //Group entry else if(uap->type == 1) { if(td->td_ucred->cr_rgid == uap->idnum || td->td_ucred->cr_ruid == 0) { ip->i_din2->group_entry[ip->i_din2->group_cnt].idnum = uap->idnum; ip->i_din2->group_entry[ip->i_din2->group_cnt].perms = uap->perms; ip->i_din2->group_cnt++; } else { return EPERM; } } vrele(nd.ni_vp); return 0; }
int sys_getacl(struct thread *td, struct getacl_args *uap) { int error; struct nameidata nd; int perms = -1; int whetherintable = 0; int i,j; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td); if ((error = namei(&nd)) != 0) return error; NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_vp->v_op != &myfs_ffs_vnodeops2) { vrele(nd.ni_vp); uprintf("File was not in the myfs filesystem.\n"); return 0; } struct myfs_inode *ip = MYFS_VTOI(nd.ni_vp); if(uap->type == 0) { for(i = 0; i < ip->i_din2->user_cnt ; i++) { if(ip->i_din2->user_entry[i].idnum == uap->idnum) { whetherintable = 1; break; } } } else if(uap->type == 1) { for(j = 0; j < ip->i_din2->group_cnt ; j++) { if(ip->i_din2->group_entry[j].idnum == uap->idnum) { whetherintable = 1; break; } } } if(whetherintable == 0 && td->td_ucred->cr_ruid != 0) { td->td_retval[0] = -1; return EPERM; } struct myfs_ufs2_dinode *dip = ip->i_din2; perms = entry_find(dip,uap->type,uap->idnum); if(perms == -1) { td->td_retval[0] = -1; return ENOENT; } td->td_retval[0] = perms;//return the entry's permission number. vrele(nd.ni_vp); return 0; }
int sys_clearacl(struct thread *td, struct clearacl_args *uap) { int error; struct nameidata nd; int index = -1; int i,j; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td); if ((error = namei(&nd)) != 0) return error; NDFREE(&nd, NDF_ONLY_PNBUF); if (nd.ni_vp->v_op != &myfs_ffs_vnodeops2) { vrele(nd.ni_vp); uprintf("File was not in the myfs filesystem.\n"); return 0; } struct myfs_inode *ip = MYFS_VTOI(nd.ni_vp); struct myfs_ufs2_dinode *dip = ip->i_din2; //index = entry_find(dip,uap->type,kern_name,uap->idnum); if(uap->type == 0)//search user entry { for(i = 0; i < dip->user_cnt; i++) { if( dip->user_entry[i].idnum == uap->idnum ) { index = i; break; } } if(index != -1) { dip->user_entry[index].idnum = 0; dip->user_entry[index].perms = 0; dip->user_cnt--; //Adjust the user access control list. if( index != 15 ) { if(dip->user_entry[index + 1].idnum != 0) { for(i = index + 1; i <= dip->user_cnt; i++) { dip->user_entry[i-1].idnum = dip->user_entry[i].idnum; dip->user_entry[i-1].perms = dip->user_entry[i].perms; } dip->user_entry[dip->user_cnt].idnum = 0; dip->user_entry[dip->user_cnt].perms = 0; } } } } else if(uap->type == 1)//search group entry { for(j = 0; j < dip->group_cnt; j++) { if(dip->group_entry[j].idnum == uap->idnum) { index = j; break; } } if(index != -1) { dip->group_entry[index].idnum = 0; dip->group_entry[index].perms = 0; dip->group_cnt--; //Adjust the group access control list. if(index != 15) { if(dip->group_entry[index + 1].idnum != 0) { for(j = index + 1; j <= dip->group_cnt; j++) { dip->group_entry[j-1].idnum = dip->group_entry[j].idnum; dip->group_entry[j-1].perms = dip->group_entry[j].perms; } dip->group_entry[dip->group_cnt].idnum = 0; dip->group_entry[dip->group_cnt].perms = 0; } } } } vrele(nd.ni_vp); return 0; }