/* * Remove a directory entry after a call to namei, using * the parameters which it left in nameidata. The entry * dp->i_offset contains the offset into the directory of the * entry to be eliminated. The dp->i_count field contains the * size of the previous record in the directory. If this * is 0, the first entry is being deleted, so we need only * zero the inode number to mark the entry as free. If the * entry is not the first in the directory, we must reclaim * the space of the now empty record by adding the record size * to the size of the previous entry. */ int ext2fs_dirremove(struct vnode *dvp, struct componentname *cnp) { struct inode *dp; struct ext2fs_direct *ep; struct buf *bp; int error; dp = VTOI(dvp); if (dp->i_count == 0) { /* * First entry in block: set d_ino to zero. */ error = ext2fs_bufatoff(dp, (off_t)dp->i_offset, (char **)&ep, &bp); if (error != 0) return (error); ep->e2d_ino = 0; error = VOP_BWRITE(bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } /* * Collapse new free space into previous entry. */ error = ext2fs_bufatoff(dp, (off_t)(dp->i_offset - dp->i_count), (char **)&ep, &bp); if (error != 0) return (error); ep->e2d_reclen = h2fs16(fs2h16(ep->e2d_reclen) + dp->i_reclen); error = VOP_BWRITE(bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); }
/* * Attempt to expand the size of a directory */ static int expanddir(struct uvnode *vp, union lfs_dinode *dp, char *name) { daddr_t lastbn; struct ubuf *bp; char *cp, firstblk[LFS_DIRBLKSIZ]; lastbn = lfs_lblkno(fs, lfs_dino_getsize(fs, dp)); if (lastbn >= ULFS_NDADDR - 1 || lfs_dino_getdb(fs, dp, lastbn) == 0 || lfs_dino_getsize(fs, dp) == 0) return (0); lfs_dino_setdb(fs, dp, lastbn + 1, lfs_dino_getdb(fs, dp, lastbn)); lfs_dino_setdb(fs, dp, lastbn, 0); bp = getblk(vp, lastbn, lfs_sb_getbsize(fs)); VOP_BWRITE(bp); lfs_dino_setsize(fs, dp, lfs_dino_getsize(fs, dp) + lfs_sb_getbsize(fs)); lfs_dino_setblocks(fs, dp, lfs_dino_getblocks(fs, dp) + lfs_btofsb(fs, lfs_sb_getbsize(fs))); bread(vp, lfs_dino_getdb(fs, dp, lastbn + 1), (long) lfs_dblksize(fs, dp, lastbn + 1), 0, &bp); if (bp->b_flags & B_ERROR) goto bad; memcpy(firstblk, bp->b_data, LFS_DIRBLKSIZ); bread(vp, lastbn, lfs_sb_getbsize(fs), 0, &bp); if (bp->b_flags & B_ERROR) goto bad; memcpy(bp->b_data, firstblk, LFS_DIRBLKSIZ); for (cp = &bp->b_data[LFS_DIRBLKSIZ]; cp < &bp->b_data[lfs_sb_getbsize(fs)]; cp += LFS_DIRBLKSIZ) zerodirblk(cp); VOP_BWRITE(bp); bread(vp, lfs_dino_getdb(fs, dp, lastbn + 1), (long) lfs_dblksize(fs, dp, lastbn + 1), 0, &bp); if (bp->b_flags & B_ERROR) goto bad; zerodirblk(bp->b_data); pwarn("NO SPACE LEFT IN %s", name); if (preen) printf(" (EXPANDED)\n"); else if (reply("EXPAND") == 0) goto bad; VOP_BWRITE(bp); inodirty(VTOI(vp)); return (1); bad: lfs_dino_setdb(fs, dp, lastbn, lfs_dino_getdb(fs, dp, lastbn + 1)); lfs_dino_setdb(fs, dp, lastbn + 1, 0); lfs_dino_setsize(fs, dp, lfs_dino_getsize(fs, dp) - lfs_sb_getbsize(fs)); lfs_dino_setblocks(fs, dp, lfs_dino_getblocks(fs, dp) - lfs_btofsb(fs, lfs_sb_getbsize(fs))); return (0); }
void clearinode(ino_t inumber) { struct ubuf *bp; IFILE *ifp; daddr_t daddr; /* Send cleared inode to the free list */ LFS_IENTRY(ifp, fs, inumber, bp); daddr = ifp->if_daddr; if (daddr == LFS_UNUSED_DADDR) { brelse(bp, 0); return; } ifp->if_daddr = LFS_UNUSED_DADDR; ifp->if_nextfree = fs->lfs_freehd; fs->lfs_freehd = inumber; sbdirty(); VOP_BWRITE(bp); /* * update segment usage. */ if (daddr != LFS_UNUSED_DADDR) { SEGUSE *sup; u_int32_t oldsn = dtosn(fs, daddr); seg_table[oldsn].su_nbytes -= DINODE1_SIZE; LFS_SEGENTRY(sup, fs, oldsn, bp); sup->su_nbytes -= DINODE1_SIZE; LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */ } }
/* * Asynchronous block write; just an asynchronous bwrite(). */ void bawrite(struct buf *bp) { SET(bp->b_flags, B_ASYNC); VOP_BWRITE(bp); }
/* * Rewrite an existing directory entry to point at the inode * supplied. The parameters describing the directory entry are * set up by a call to namei. */ int ufs_dirrewrite(struct inode *dp, struct inode *oip, ufsino_t newinum, int newtype, int isrmdir) { struct buf *bp; struct direct *ep; struct vnode *vdp = ITOV(dp); int error; error = UFS_BUFATOFF(dp, (off_t)dp->i_offset, (char **)&ep, &bp); if (error) return (error); ep->d_ino = newinum; if (vdp->v_mount->mnt_maxsymlinklen > 0) ep->d_type = newtype; oip->i_effnlink--; if (DOINGSOFTDEP(vdp)) { softdep_change_linkcnt(oip, 0); softdep_setup_directory_change(bp, dp, oip, newinum, isrmdir); bdwrite(bp); } else { DIP_ADD(oip, nlink, -1); oip->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(oip, MNT_WAIT); if (DOINGASYNC(vdp)) { bdwrite(bp); error = 0; } else { error = VOP_BWRITE(bp); } } dp->i_flag |= IN_CHANGE | IN_UPDATE; UFS_WAPBL_UPDATE(dp, MNT_WAIT); return (error); }
int RUMP_VOP_BWRITE(struct vnode *vp, struct buf *bp) { int error; rump_schedule(); error = VOP_BWRITE(vp, bp); rump_unschedule(); return error; }
/* * Remove a directory entry after a call to namei, using * the auxiliary results it provided. The entry * ulr_offset contains the offset into the directory of the * entry to be eliminated. The ulr_count field contains the * size of the previous record in the directory. If this * is 0, the first entry is being deleted, so we need only * zero the inode number to mark the entry as free. If the * entry is not the first in the directory, we must reclaim * the space of the now empty record by adding the record size * to the size of the previous entry. */ int ext2fs_dirremove(struct vnode *dvp, const struct ufs_lookup_results *ulr, struct componentname *cnp) { struct inode *dp; struct ext2fs_direct *ep; struct buf *bp; int error; dp = VTOI(dvp); if (ulr->ulr_count == 0) { /* * First entry in block: set d_ino to zero. */ error = ext2fs_blkatoff(dvp, (off_t)ulr->ulr_offset, (void *)&ep, &bp); if (error != 0) return (error); ep->e2d_ino = 0; error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); } /* * Collapse new free space into previous entry. */ error = ext2fs_blkatoff(dvp, (off_t)(ulr->ulr_offset - ulr->ulr_count), (void *)&ep, &bp); if (error != 0) return (error); ep->e2d_reclen = h2fs16(fs2h16(ep->e2d_reclen) + ulr->ulr_reclen); error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); }
/* * allocate an unused inode */ ino_t allocino(ino_t request, int type) { ino_t ino; struct ufs1_dinode *dp; time_t t; struct uvnode *vp; struct ubuf *bp; if (request == 0) request = ROOTINO; else if (statemap[request] != USTATE) return (0); for (ino = request; ino < maxino; ino++) if (statemap[ino] == USTATE) break; if (ino == maxino) extend_ifile(fs); switch (type & IFMT) { case IFDIR: statemap[ino] = DSTATE; break; case IFREG: case IFLNK: statemap[ino] = FSTATE; break; default: return (0); } vp = lfs_valloc(fs, ino); if (vp == NULL) return (0); dp = (VTOI(vp)->i_din.ffs1_din); bp = getblk(vp, 0, fs->lfs_fsize); VOP_BWRITE(bp); dp->di_mode = type; (void) time(&t); dp->di_atime = t; dp->di_mtime = dp->di_ctime = dp->di_atime; dp->di_size = fs->lfs_fsize; dp->di_blocks = btofsb(fs, fs->lfs_fsize); n_files++; inodirty(VTOI(vp)); typemap[ino] = IFTODT(type); return (ino); }
/* * Scan each entry in a directory block. */ int dirscan(struct inodesc *idesc) { LFS_DIRHEADER *dp; struct ubuf *bp; int dsize, n; long blksiz; char dbuf[LFS_DIRBLKSIZ]; struct uvnode *vp; if (idesc->id_type != DATA) errexit("wrong type to dirscan %d", idesc->id_type); if (idesc->id_entryno == 0 && (idesc->id_filesize & (LFS_DIRBLKSIZ - 1)) != 0) idesc->id_filesize = roundup(idesc->id_filesize, LFS_DIRBLKSIZ); blksiz = idesc->id_numfrags * lfs_sb_getfsize(fs); if (chkrange(idesc->id_blkno, idesc->id_numfrags)) { idesc->id_filesize -= blksiz; return (SKIP); } idesc->id_loc = 0; vp = vget(fs, idesc->id_number); for (dp = fsck_readdir(vp, idesc); dp != NULL; dp = fsck_readdir(vp, idesc)) { dsize = lfs_dir_getreclen(fs, dp); memcpy(dbuf, dp, (size_t) dsize); idesc->id_dirp = (LFS_DIRHEADER *) dbuf; if ((n = (*idesc->id_func) (idesc)) & ALTERED) { bread(vp, idesc->id_lblkno, blksiz, 0, &bp); memcpy(bp->b_data + idesc->id_loc - dsize, dbuf, (size_t) dsize); VOP_BWRITE(bp); sbdirty(); } if (n & STOP) return (n); } return (idesc->id_filesize > 0 ? KEEPON : STOP); }
/* * Rewrite an existing directory entry to point at the inode * supplied. The parameters describing the directory entry are * set up by a call to namei. */ int ext2fs_dirrewrite(struct inode *dp, struct inode *ip, struct componentname *cnp) { struct buf *bp; struct ext2fs_direct *ep; int error; error = ext2fs_bufatoff(dp, (off_t)dp->i_offset, (char **)&ep, &bp); if (error != 0) return (error); ep->e2d_ino = h2fs32(ip->i_number); if (ip->i_e2fs->e2fs.e2fs_rev > E2FS_REV0 && (ip->i_e2fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_FTYPE)) { ep->e2d_type = inot2ext2dt(IFTODT(ip->i_e2fs_mode)); } else { ep->e2d_type = 0; } error = VOP_BWRITE(bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); }
/* * Rewrite an existing directory entry to point at the inode * supplied. The parameters describing the directory entry are * set up by a call to namei. */ int ext2fs_dirrewrite(struct inode *dp, const struct ufs_lookup_results *ulr, struct inode *ip, struct componentname *cnp) { struct buf *bp; struct ext2fs_direct *ep; struct vnode *vdp = ITOV(dp); int error; error = ext2fs_blkatoff(vdp, (off_t)ulr->ulr_offset, (void *)&ep, &bp); if (error != 0) return (error); ep->e2d_ino = h2fs32(ip->i_number); if (ip->i_e2fs->e2fs.e2fs_rev > E2FS_REV0 && (ip->i_e2fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_FTYPE)) { ep->e2d_type = inot2ext2dt(IFTODT(ip->i_e2fs_mode)); } else { ep->e2d_type = 0; } error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; return (error); }
/* VOP_BWRITE ULFS_NIADDR+2 times */ int lfs_balloc(struct vnode *vp, off_t startoffset, int iosize, kauth_cred_t cred, int flags, struct buf **bpp) { int offset; daddr_t daddr, idaddr; struct buf *ibp, *bp; struct inode *ip; struct lfs *fs; struct indir indirs[ULFS_NIADDR+2], *idp; daddr_t lbn, lastblock; int bcount; int error, frags, i, nsize, osize, num; ip = VTOI(vp); fs = ip->i_lfs; offset = lfs_blkoff(fs, startoffset); KASSERT(iosize <= lfs_sb_getbsize(fs)); lbn = lfs_lblkno(fs, startoffset); /* (void)lfs_check(vp, lbn, 0); */ ASSERT_MAYBE_SEGLOCK(fs); /* * Three cases: it's a block beyond the end of file, it's a block in * the file that may or may not have been assigned a disk address or * we're writing an entire block. * * Note, if the daddr is UNWRITTEN, the block already exists in * the cache (it was read or written earlier). If so, make sure * we don't count it as a new block or zero out its contents. If * it did not, make sure we allocate any necessary indirect * blocks. * * If we are writing a block beyond the end of the file, we need to * check if the old last block was a fragment. If it was, we need * to rewrite it. */ if (bpp) *bpp = NULL; /* Check for block beyond end of file and fragment extension needed. */ lastblock = lfs_lblkno(fs, ip->i_size); if (lastblock < ULFS_NDADDR && lastblock < lbn) { osize = lfs_blksize(fs, ip, lastblock); if (osize < lfs_sb_getbsize(fs) && osize > 0) { if ((error = lfs_fragextend(vp, osize, lfs_sb_getbsize(fs), lastblock, (bpp ? &bp : NULL), cred))) return (error); ip->i_size = (lastblock + 1) * lfs_sb_getbsize(fs); lfs_dino_setsize(fs, ip->i_din, ip->i_size); uvm_vnp_setsize(vp, ip->i_size); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp) (void) VOP_BWRITE(bp->b_vp, bp); } } /* * If the block we are writing is a direct block, it's the last * block in the file, and offset + iosize is less than a full * block, we can write one or more fragments. There are two cases: * the block is brand new and we should allocate it the correct * size or it already exists and contains some fragments and * may need to extend it. */ if (lbn < ULFS_NDADDR && lfs_lblkno(fs, ip->i_size) <= lbn) { osize = lfs_blksize(fs, ip, lbn); nsize = lfs_fragroundup(fs, offset + iosize); if (lfs_lblktosize(fs, lbn) >= ip->i_size) { /* Brand new block or fragment */ frags = lfs_numfrags(fs, nsize); if (!ISSPACE(fs, frags, cred)) return ENOSPC; if (bpp) { *bpp = bp = getblk(vp, lbn, nsize, 0, 0); bp->b_blkno = UNWRITTEN; if (flags & B_CLRBUF) clrbuf(bp); } ip->i_lfs_effnblks += frags; mutex_enter(&lfs_lock); lfs_sb_subbfree(fs, frags); mutex_exit(&lfs_lock); lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN); } else { if (nsize <= osize) { /* No need to extend */ if (bpp && (error = bread(vp, lbn, osize, 0, &bp))) return error; } else { /* Extend existing block */ if ((error = lfs_fragextend(vp, osize, nsize, lbn, (bpp ? &bp : NULL), cred))) return error; } if (bpp) *bpp = bp; } return 0; } error = ulfs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL); if (error) return (error); KASSERT(daddr <= LFS_MAX_DADDR(fs)); /* * Do byte accounting all at once, so we can gracefully fail *before* * we start assigning blocks. */ frags = fs->um_seqinc; bcount = 0; if (daddr == UNASSIGNED) { bcount = frags; } for (i = 1; i < num; ++i) { if (!indirs[i].in_exists) { bcount += frags; } } if (ISSPACE(fs, bcount, cred)) { mutex_enter(&lfs_lock); lfs_sb_subbfree(fs, bcount); mutex_exit(&lfs_lock); ip->i_lfs_effnblks += bcount; } else { return ENOSPC; } if (daddr == UNASSIGNED) { if (num > 0 && lfs_dino_getib(fs, ip->i_din, indirs[0].in_off) == 0) { lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN); } /* * Create new indirect blocks if necessary */ if (num > 1) { idaddr = lfs_dino_getib(fs, ip->i_din, indirs[0].in_off); for (i = 1; i < num; ++i) { ibp = getblk(vp, indirs[i].in_lbn, lfs_sb_getbsize(fs), 0,0); if (!indirs[i].in_exists) { clrbuf(ibp); ibp->b_blkno = UNWRITTEN; } else if (!(ibp->b_oflags & (BO_DELWRI | BO_DONE))) { ibp->b_blkno = LFS_FSBTODB(fs, idaddr); ibp->b_flags |= B_READ; VOP_STRATEGY(vp, ibp); biowait(ibp); } /* * This block exists, but the next one may not. * If that is the case mark it UNWRITTEN to keep * the accounting straight. */ /* XXX ondisk32 */ if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0) ((int32_t *)ibp->b_data)[indirs[i].in_off] = UNWRITTEN; /* XXX ondisk32 */ idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off]; #ifdef DEBUG if (vp == fs->lfs_ivnode) { LFS_ENTER_LOG("balloc", __FILE__, __LINE__, indirs[i].in_lbn, ibp->b_flags, curproc->p_pid); } #endif if ((error = VOP_BWRITE(ibp->b_vp, ibp))) return error; } } } /* * Get the existing block from the cache, if requested. */ if (bpp) *bpp = bp = getblk(vp, lbn, lfs_blksize(fs, ip, lbn), 0, 0); /* * Do accounting on blocks that represent pages. */ if (!bpp) lfs_register_block(vp, lbn); /* * The block we are writing may be a brand new block * in which case we need to do accounting. * * We can tell a truly new block because ulfs_bmaparray will say * it is UNASSIGNED. Once we allocate it we will assign it the * disk address UNWRITTEN. */ if (daddr == UNASSIGNED) { if (bpp) { if (flags & B_CLRBUF) clrbuf(bp); /* Note the new address */ bp->b_blkno = UNWRITTEN; } switch (num) { case 0: lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN); break; case 1: lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN); break; default: idp = &indirs[num - 1]; if (bread(vp, idp->in_lbn, lfs_sb_getbsize(fs), B_MODIFY, &ibp)) panic("lfs_balloc: bread bno %lld", (long long)idp->in_lbn); /* XXX ondisk32 */ ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN; #ifdef DEBUG if (vp == fs->lfs_ivnode) { LFS_ENTER_LOG("balloc", __FILE__, __LINE__, idp->in_lbn, ibp->b_flags, curproc->p_pid); } #endif VOP_BWRITE(ibp->b_vp, ibp); } } else if (bpp && !(bp->b_oflags & (BO_DONE|BO_DELWRI))) { /* * Not a brand new block, also not in the cache; * read it in from disk. */ if (iosize == lfs_sb_getbsize(fs)) /* Optimization: I/O is unnecessary. */ bp->b_blkno = daddr; else { /* * We need to read the block to preserve the * existing bytes. */ bp->b_blkno = daddr; bp->b_flags |= B_READ; VOP_STRATEGY(vp, bp); return (biowait(bp)); } } return (0); }
/* * Write a directory entry after a call to namei, using the parameters * that it left in nameidata. The argument ip is the inode which the new * directory entry will refer to. Dvp is a pointer to the directory to * be written, which was left locked by namei. Remaining parameters * (ulr_offset, ulr_count) indicate how the space for the new * entry is to be obtained. */ int ext2fs_direnter(struct inode *ip, struct vnode *dvp, const struct ufs_lookup_results *ulr, struct componentname *cnp) { struct ext2fs_direct *ep, *nep; struct inode *dp; struct buf *bp; struct ext2fs_direct newdir; struct iovec aiov; struct uio auio; u_int dsize; int error, loc, newentrysize, spacefree; char *dirbuf; struct ufsmount *ump = VFSTOUFS(dvp->v_mount); int dirblksiz = ump->um_dirblksiz; dp = VTOI(dvp); newdir.e2d_ino = h2fs32(ip->i_number); newdir.e2d_namlen = cnp->cn_namelen; if (ip->i_e2fs->e2fs.e2fs_rev > E2FS_REV0 && (ip->i_e2fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_FTYPE)) { newdir.e2d_type = inot2ext2dt(IFTODT(ip->i_e2fs_mode)); } else { newdir.e2d_type = 0; } memcpy(newdir.e2d_name, cnp->cn_nameptr, (unsigned)cnp->cn_namelen + 1); newentrysize = EXT2FS_DIRSIZ(cnp->cn_namelen); if (ulr->ulr_count == 0) { /* * If ulr_count is 0, then namei could find no * space in the directory. Here, ulr_offset will * be on a directory block boundary and we will write the * new entry into a fresh block. */ if (ulr->ulr_offset & (dirblksiz - 1)) panic("ext2fs_direnter: newblk"); auio.uio_offset = ulr->ulr_offset; newdir.e2d_reclen = h2fs16(dirblksiz); auio.uio_resid = newentrysize; aiov.iov_len = newentrysize; aiov.iov_base = (void *)&newdir; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_WRITE; UIO_SETUP_SYSSPACE(&auio); error = VOP_WRITE(dvp, &auio, IO_SYNC, cnp->cn_cred); if (dirblksiz > dvp->v_mount->mnt_stat.f_bsize) /* XXX should grow with balloc() */ panic("ext2fs_direnter: frag size"); else if (!error) { error = ext2fs_setsize(dp, roundup(ext2fs_size(dp), dirblksiz)); if (error) return (error); dp->i_flag |= IN_CHANGE; uvm_vnp_setsize(dvp, ext2fs_size(dp)); } return (error); } /* * If ulr_count is non-zero, then namei found space * for the new entry in the range ulr_offset to * ulr_offset + ulr_count in the directory. * To use this space, we may have to compact the entries located * there, by copying them together towards the beginning of the * block, leaving the free space in one usable chunk at the end. */ /* * Get the block containing the space for the new directory entry. */ if ((error = ext2fs_blkatoff(dvp, (off_t)ulr->ulr_offset, &dirbuf, &bp)) != 0) return (error); /* * Find space for the new entry. In the simple case, the entry at * offset base will have the space. If it does not, then namei * arranged that compacting the region ulr_offset to * ulr_offset + ulr_count would yield the * space. */ ep = (struct ext2fs_direct *)dirbuf; dsize = EXT2FS_DIRSIZ(ep->e2d_namlen); spacefree = fs2h16(ep->e2d_reclen) - dsize; for (loc = fs2h16(ep->e2d_reclen); loc < ulr->ulr_count; ) { nep = (struct ext2fs_direct *)(dirbuf + loc); if (ep->e2d_ino) { /* trim the existing slot */ ep->e2d_reclen = h2fs16(dsize); ep = (struct ext2fs_direct *)((char *)ep + dsize); } else { /* overwrite; nothing there; header is ours */ spacefree += dsize; } dsize = EXT2FS_DIRSIZ(nep->e2d_namlen); spacefree += fs2h16(nep->e2d_reclen) - dsize; loc += fs2h16(nep->e2d_reclen); memcpy((void *)ep, (void *)nep, dsize); } /* * Update the pointer fields in the previous entry (if any), * copy in the new entry, and write out the block. */ if (ep->e2d_ino == 0) { #ifdef DIAGNOSTIC if (spacefree + dsize < newentrysize) panic("ext2fs_direnter: compact1"); #endif newdir.e2d_reclen = h2fs16(spacefree + dsize); } else { #ifdef DIAGNOSTIC if (spacefree < newentrysize) { printf("ext2fs_direnter: compact2 %u %u", (u_int)spacefree, (u_int)newentrysize); panic("ext2fs_direnter: compact2"); } #endif newdir.e2d_reclen = h2fs16(spacefree); ep->e2d_reclen = h2fs16(dsize); ep = (struct ext2fs_direct *)((char *)ep + dsize); } memcpy((void *)ep, (void *)&newdir, (u_int)newentrysize); error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; if (!error && ulr->ulr_endoff && ulr->ulr_endoff < ext2fs_size(dp)) error = ext2fs_truncate(dvp, (off_t)ulr->ulr_endoff, IO_SYNC, cnp->cn_cred); return (error); }
int lfs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred) { daddr_t lastblock; struct inode *oip = VTOI(ovp); daddr_t bn, lbn, lastiblock[ULFS_NIADDR], indir_lbn[ULFS_NIADDR]; /* XXX ondisk32 */ int32_t newblks[ULFS_NDADDR + ULFS_NIADDR]; struct lfs *fs; struct buf *bp; int offset, size, level; daddr_t count, rcount; daddr_t blocksreleased = 0, real_released = 0; int i, nblocks; int aflags, error, allerror = 0; off_t osize; long lastseg; size_t bc; int obufsize, odb; int usepc; if (ovp->v_type == VCHR || ovp->v_type == VBLK || ovp->v_type == VFIFO || ovp->v_type == VSOCK) { KASSERT(oip->i_size == 0); return 0; } if (length < 0) return (EINVAL); /* * Just return and not update modification times. */ if (oip->i_size == length) { /* still do a uvm_vnp_setsize() as writesize may be larger */ uvm_vnp_setsize(ovp, length); return (0); } fs = oip->i_lfs; if (ovp->v_type == VLNK && (oip->i_size < fs->um_maxsymlinklen || (fs->um_maxsymlinklen == 0 && oip->i_ffs1_blocks == 0))) { #ifdef DIAGNOSTIC if (length != 0) panic("lfs_truncate: partial truncate of symlink"); #endif memset((char *)SHORTLINK(oip), 0, (u_int)oip->i_size); oip->i_size = oip->i_ffs1_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (lfs_update(ovp, NULL, NULL, 0)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (lfs_update(ovp, NULL, NULL, 0)); } lfs_imtime(fs); osize = oip->i_size; usepc = (ovp->v_type == VREG && ovp != fs->lfs_ivnode); ASSERT_NO_SEGLOCK(fs); /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > fs->um_maxfilesize) return (EFBIG); aflags = B_CLRBUF; if (ioflag & IO_SYNC) aflags |= B_SYNC; if (usepc) { if (lfs_lblkno(fs, osize) < ULFS_NDADDR && lfs_lblkno(fs, osize) != lfs_lblkno(fs, length) && lfs_blkroundup(fs, osize) != osize) { off_t eob; eob = lfs_blkroundup(fs, osize); uvm_vnp_setwritesize(ovp, eob); error = ulfs_balloc_range(ovp, osize, eob - osize, cred, aflags); if (error) { (void) lfs_truncate(ovp, osize, ioflag & IO_SYNC, cred); return error; } if (ioflag & IO_SYNC) { mutex_enter(ovp->v_interlock); VOP_PUTPAGES(ovp, trunc_page(osize & lfs_sb_getbmask(fs)), round_page(eob), PGO_CLEANIT | PGO_SYNCIO); } } uvm_vnp_setwritesize(ovp, length); error = ulfs_balloc_range(ovp, length - 1, 1, cred, aflags); if (error) { (void) lfs_truncate(ovp, osize, ioflag & IO_SYNC, cred); return error; } uvm_vnp_setsize(ovp, length); oip->i_flag |= IN_CHANGE | IN_UPDATE; KASSERT(ovp->v_size == oip->i_size); oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1; return (lfs_update(ovp, NULL, NULL, 0)); } else { error = lfs_reserve(fs, ovp, NULL, lfs_btofsb(fs, (ULFS_NIADDR + 2) << lfs_sb_getbshift(fs))); if (error) return (error); error = lfs_balloc(ovp, length - 1, 1, cred, aflags, &bp); lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (ULFS_NIADDR + 2) << lfs_sb_getbshift(fs))); if (error) return (error); oip->i_ffs1_size = oip->i_size = length; uvm_vnp_setsize(ovp, length); (void) VOP_BWRITE(bp->b_vp, bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1; return (lfs_update(ovp, NULL, NULL, 0)); } } if ((error = lfs_reserve(fs, ovp, NULL, lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)))) != 0) return (error); /* * Shorten the size of the file. If the file is not being * truncated to a block boundary, the contents of the * partial block following the end of the file must be * zero'ed in case it ever becomes accessible again because * of subsequent file growth. Directories however are not * zero'ed as they should grow back initialized to empty. */ offset = lfs_blkoff(fs, length); lastseg = -1; bc = 0; if (ovp != fs->lfs_ivnode) lfs_seglock(fs, SEGM_PROT); if (offset == 0) { oip->i_size = oip->i_ffs1_size = length; } else if (!usepc) { lbn = lfs_lblkno(fs, length); aflags = B_CLRBUF; if (ioflag & IO_SYNC) aflags |= B_SYNC; error = lfs_balloc(ovp, length - 1, 1, cred, aflags, &bp); if (error) { lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); goto errout; } obufsize = bp->b_bufsize; odb = lfs_btofsb(fs, bp->b_bcount); oip->i_size = oip->i_ffs1_size = length; size = lfs_blksize(fs, oip, lbn); if (ovp->v_type != VDIR) memset((char *)bp->b_data + offset, 0, (u_int)(size - offset)); allocbuf(bp, size, 1); if ((bp->b_flags & B_LOCKED) != 0 && bp->b_iodone == NULL) { mutex_enter(&lfs_lock); locked_queue_bytes -= obufsize - bp->b_bufsize; mutex_exit(&lfs_lock); } if (bp->b_oflags & BO_DELWRI) { lfs_sb_addavail(fs, odb - lfs_btofsb(fs, size)); /* XXX shouldn't this wake up on lfs_availsleep? */ } (void) VOP_BWRITE(bp->b_vp, bp); } else { /* vp->v_type == VREG && length < osize && offset != 0 */ /* * When truncating a regular file down to a non-block-aligned * size, we must zero the part of last block which is past * the new EOF. We must synchronously flush the zeroed pages * to disk since the new pages will be invalidated as soon * as we inform the VM system of the new, smaller size. * We must do this before acquiring the GLOCK, since fetching * the pages will acquire the GLOCK internally. * So there is a window where another thread could see a whole * zeroed page past EOF, but that's life. */ daddr_t xlbn; voff_t eoz; aflags = ioflag & IO_SYNC ? B_SYNC : 0; error = ulfs_balloc_range(ovp, length - 1, 1, cred, aflags); if (error) { lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); goto errout; } xlbn = lfs_lblkno(fs, length); size = lfs_blksize(fs, oip, xlbn); eoz = MIN(lfs_lblktosize(fs, xlbn) + size, osize); ubc_zerorange(&ovp->v_uobj, length, eoz - length, UBC_UNMAP_FLAG(ovp)); if (round_page(eoz) > round_page(length)) { mutex_enter(ovp->v_interlock); error = VOP_PUTPAGES(ovp, round_page(length), round_page(eoz), PGO_CLEANIT | PGO_DEACTIVATE | ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0)); if (error) { lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); goto errout; } } } genfs_node_wrlock(ovp); oip->i_size = oip->i_ffs1_size = length; uvm_vnp_setsize(ovp, length); /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ /* Avoid sign overflow - XXX assumes that off_t is a quad_t. */ if (length > QUAD_MAX - lfs_sb_getbsize(fs)) lastblock = lfs_lblkno(fs, QUAD_MAX - lfs_sb_getbsize(fs)); else lastblock = lfs_lblkno(fs, length + lfs_sb_getbsize(fs) - 1) - 1; lastiblock[SINGLE] = lastblock - ULFS_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - LFS_NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - LFS_NINDIR(fs) * LFS_NINDIR(fs); nblocks = lfs_btofsb(fs, lfs_sb_getbsize(fs)); /* * Record changed file and block pointers before we start * freeing blocks. lastiblock values are also normalized to -1 * for calls to lfs_indirtrunc below. */ memcpy((void *)newblks, (void *)&oip->i_ffs1_db[0], sizeof newblks); for (level = TRIPLE; level >= SINGLE; level--) if (lastiblock[level] < 0) { newblks[ULFS_NDADDR+level] = 0; lastiblock[level] = -1; } for (i = ULFS_NDADDR - 1; i > lastblock; i--) newblks[i] = 0; oip->i_size = oip->i_ffs1_size = osize; error = lfs_vtruncbuf(ovp, lastblock + 1, false, 0); if (error && !allerror) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -ULFS_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - LFS_NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - LFS_NINDIR(fs) * LFS_NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ffs1_ib[level]; if (bn != 0) { error = lfs_indirtrunc(oip, indir_lbn[level], bn, lastiblock[level], level, &count, &rcount, &lastseg, &bc); if (error) allerror = error; real_released += rcount; blocksreleased += count; if (lastiblock[level] < 0) { if (oip->i_ffs1_ib[level] > 0) real_released += nblocks; blocksreleased += nblocks; oip->i_ffs1_ib[level] = 0; lfs_blkfree(fs, oip, bn, lfs_sb_getbsize(fs), &lastseg, &bc); lfs_deregister_block(ovp, bn); } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = ULFS_NDADDR - 1; i > lastblock; i--) { long bsize, obsize; bn = oip->i_ffs1_db[i]; if (bn == 0) continue; bsize = lfs_blksize(fs, oip, i); if (oip->i_ffs1_db[i] > 0) { /* Check for fragment size changes */ obsize = oip->i_lfs_fragsize[i]; real_released += lfs_btofsb(fs, obsize); oip->i_lfs_fragsize[i] = 0; } else obsize = 0; blocksreleased += lfs_btofsb(fs, bsize); oip->i_ffs1_db[i] = 0; lfs_blkfree(fs, oip, bn, obsize, &lastseg, &bc); lfs_deregister_block(ovp, bn); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_ffs1_db[lastblock]; if (bn != 0) { long oldspace, newspace; #if 0 long olddspace; #endif /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = lfs_blksize(fs, oip, lastblock); #if 0 olddspace = oip->i_lfs_fragsize[lastblock]; #endif oip->i_size = oip->i_ffs1_size = length; newspace = lfs_blksize(fs, oip, lastblock); if (newspace == 0) panic("itrunc: newspace"); if (oldspace - newspace > 0) { blocksreleased += lfs_btofsb(fs, oldspace - newspace); } #if 0 if (bn > 0 && olddspace - newspace > 0) { /* No segment accounting here, just vnode */ real_released += lfs_btofsb(fs, olddspace - newspace); } #endif } done: /* Finish segment accounting corrections */ lfs_update_seguse(fs, oip, lastseg, bc); #ifdef DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if ((newblks[ULFS_NDADDR + level] == 0) != ((oip->i_ffs1_ib[level]) == 0)) { panic("lfs itrunc1"); } for (i = 0; i < ULFS_NDADDR; i++) if ((newblks[i] == 0) != (oip->i_ffs1_db[i] == 0)) { panic("lfs itrunc2"); } if (length == 0 && (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd))) panic("lfs itrunc3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = oip->i_ffs1_size = length; oip->i_lfs_effnblks -= blocksreleased; oip->i_ffs1_blocks -= real_released; mutex_enter(&lfs_lock); lfs_sb_addbfree(fs, blocksreleased); mutex_exit(&lfs_lock); #ifdef DIAGNOSTIC if (oip->i_size == 0 && (oip->i_ffs1_blocks != 0 || oip->i_lfs_effnblks != 0)) { printf("lfs_truncate: truncate to 0 but %d blks/%jd effblks\n", oip->i_ffs1_blocks, (intmax_t)oip->i_lfs_effnblks); panic("lfs_truncate: persistent blocks"); } #endif /* * If we truncated to zero, take us off the paging queue. */ mutex_enter(&lfs_lock); if (oip->i_size == 0 && oip->i_flags & IN_PAGING) { oip->i_flags &= ~IN_PAGING; TAILQ_REMOVE(&fs->lfs_pchainhd, oip, i_lfs_pchain); } mutex_exit(&lfs_lock); oip->i_flag |= IN_CHANGE; #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) (void) lfs_chkdq(oip, -blocksreleased, NOCRED, 0); #endif lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); genfs_node_unlock(ovp); errout: oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1; if (ovp != fs->lfs_ivnode) lfs_segunlock(fs); return (allerror ? allerror : error); }
void pass5(void) { SEGUSE *su; struct ubuf *bp; int i; unsigned long bb; /* total number of used blocks (lower bound) */ unsigned long ubb; /* upper bound number of used blocks */ unsigned long avail; /* blocks available for writing */ unsigned long dmeta; /* blocks in segsums and inodes */ int nclean; /* clean segments */ size_t labelskew; int diddirty; /* * Check segment holdings against actual holdings. Check for * "clean" segments that contain live data. If we are only * rolling forward, we can't check the segment holdings, but * we can still check the cleanerinfo data. */ nclean = 0; avail = 0; bb = ubb = 0; dmeta = 0; for (i = 0; i < fs->lfs_nseg; i++) { diddirty = 0; LFS_SEGENTRY(su, fs, i, bp); if (!preen && !(su->su_flags & SEGUSE_DIRTY) && seg_table[i].su_nbytes > 0) { pwarn("CLEAN SEGMENT %d CONTAINS %d BYTES\n", i, seg_table[i].su_nbytes); if (reply("MARK SEGMENT DIRTY")) { su->su_flags |= SEGUSE_DIRTY; ++diddirty; } } if (!preen && su->su_nbytes != seg_table[i].su_nbytes) { pwarn("SEGMENT %d CLAIMS %d BYTES BUT HAS %d", i, su->su_nbytes, seg_table[i].su_nbytes); if ((int32_t)su->su_nbytes > (int32_t)seg_table[i].su_nbytes) pwarn(" (HIGH BY %d)\n", su->su_nbytes - seg_table[i].su_nbytes); else pwarn(" (LOW BY %d)\n", -su->su_nbytes + seg_table[i].su_nbytes); if (reply("FIX")) { su->su_nbytes = seg_table[i].su_nbytes; ++diddirty; } } if (su->su_flags & SEGUSE_DIRTY) { bb += btofsb(fs, su->su_nbytes + su->su_nsums * fs->lfs_sumsize); ubb += btofsb(fs, su->su_nbytes + su->su_nsums * fs->lfs_sumsize + su->su_ninos * fs->lfs_ibsize); dmeta += btofsb(fs, fs->lfs_sumsize * su->su_nsums); dmeta += btofsb(fs, fs->lfs_ibsize * su->su_ninos); } else { nclean++; avail += segtod(fs, 1); if (su->su_flags & SEGUSE_SUPERBLOCK) avail -= btofsb(fs, LFS_SBPAD); if (i == 0 && fs->lfs_version > 1 && fs->lfs_start < btofsb(fs, LFS_LABELPAD)) avail -= btofsb(fs, LFS_LABELPAD) - fs->lfs_start; } if (diddirty) VOP_BWRITE(bp); else brelse(bp, 0); } /* Also may be available bytes in current seg */ i = dtosn(fs, fs->lfs_offset); avail += sntod(fs, i + 1) - fs->lfs_offset; /* But do not count minfreesegs */ avail -= segtod(fs, (fs->lfs_minfreeseg - (fs->lfs_minfreeseg / 2))); /* Note we may have bytes to write yet */ avail -= btofsb(fs, locked_queue_bytes); if (idaddr) pwarn("NOTE: when using -i, expect discrepancies in dmeta," " avail, nclean, bfree\n"); if (dmeta != fs->lfs_dmeta) { pwarn("DMETA GIVEN AS %d, SHOULD BE %ld\n", fs->lfs_dmeta, dmeta); if (preen || reply("FIX")) { fs->lfs_dmeta = dmeta; sbdirty(); } } if (avail != fs->lfs_avail) { pwarn("AVAIL GIVEN AS %d, SHOULD BE %ld\n", fs->lfs_avail, avail); if (preen || reply("FIX")) { fs->lfs_avail = avail; sbdirty(); } } if (nclean != fs->lfs_nclean) { pwarn("NCLEAN GIVEN AS %d, SHOULD BE %d\n", fs->lfs_nclean, nclean); if (preen || reply("FIX")) { fs->lfs_nclean = nclean; sbdirty(); } } labelskew = 0; if (fs->lfs_version > 1 && fs->lfs_start < btofsb(fs, LFS_LABELPAD)) labelskew = btofsb(fs, LFS_LABELPAD); if (fs->lfs_bfree > fs->lfs_dsize - bb - labelskew || fs->lfs_bfree < fs->lfs_dsize - ubb - labelskew) { pwarn("BFREE GIVEN AS %d, SHOULD BE BETWEEN %ld AND %ld\n", fs->lfs_bfree, (fs->lfs_dsize - ubb - labelskew), fs->lfs_dsize - bb - labelskew); if (preen || reply("FIX")) { fs->lfs_bfree = ((fs->lfs_dsize - labelskew - ubb) + fs->lfs_dsize - labelskew - bb) / 2; sbdirty(); } } }
/* * Release blocks associated with the inode ip and stored in the indirect * block bn. Blocks are free'd in LIFO order up to (but not including) * lastbn. If level is greater than SINGLE, the block is an indirect block * and recursive calls to indirtrunc must be used to cleanse other indirect * blocks. * * NB: triple indirect blocks are untested. */ static int lfs_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, daddr_t lastbn, int level, daddr_t *countp, daddr_t *rcountp, long *lastsegp, size_t *bcp) { int i; struct buf *bp; struct lfs *fs = ip->i_lfs; int32_t *bap; /* XXX ondisk32 */ struct vnode *vp; daddr_t nb, nlbn, last; int32_t *copy = NULL; /* XXX ondisk32 */ daddr_t blkcount, rblkcount, factor; int nblocks; daddr_t blocksreleased = 0, real_released = 0; int error = 0, allerror = 0; ASSERT_SEGLOCK(fs); /* * Calculate index in current block of last * block to be kept. -1 indicates the entire * block so we need not calculate the index. */ factor = 1; for (i = SINGLE; i < level; i++) factor *= LFS_NINDIR(fs); last = lastbn; if (lastbn > 0) last /= factor; nblocks = lfs_btofsb(fs, lfs_sb_getbsize(fs)); /* * Get buffer of block pointers, zero those entries corresponding * to blocks to be free'd, and update on disk copy first. Since * double(triple) indirect before single(double) indirect, calls * to bmap on these blocks will fail. However, we already have * the on disk address, so we have to set the b_blkno field * explicitly instead of letting bread do everything for us. */ vp = ITOV(ip); bp = getblk(vp, lbn, lfs_sb_getbsize(fs), 0, 0); if (bp->b_oflags & (BO_DONE | BO_DELWRI)) { /* Braces must be here in case trace evaluates to nothing. */ trace(TR_BREADHIT, pack(vp, lfs_sb_getbsize(fs)), lbn); } else { trace(TR_BREADMISS, pack(vp, lfs_sb_getbsize(fs)), lbn); curlwp->l_ru.ru_inblock++; /* pay for read */ bp->b_flags |= B_READ; if (bp->b_bcount > bp->b_bufsize) panic("lfs_indirtrunc: bad buffer size"); bp->b_blkno = LFS_FSBTODB(fs, dbn); VOP_STRATEGY(vp, bp); error = biowait(bp); } if (error) { brelse(bp, 0); *countp = *rcountp = 0; return (error); } bap = (int32_t *)bp->b_data; /* XXX ondisk32 */ if (lastbn >= 0) { copy = lfs_malloc(fs, lfs_sb_getbsize(fs), LFS_NB_IBLOCK); memcpy((void *)copy, (void *)bap, lfs_sb_getbsize(fs)); memset((void *)&bap[last + 1], 0, /* XXX ondisk32 */ (u_int)(LFS_NINDIR(fs) - (last + 1)) * sizeof (int32_t)); error = VOP_BWRITE(bp->b_vp, bp); if (error) allerror = error; bap = copy; } /* * Recursively free totally unused blocks. */ for (i = LFS_NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; i--, nlbn += factor) { nb = bap[i]; if (nb == 0) continue; if (level > SINGLE) { error = lfs_indirtrunc(ip, nlbn, nb, (daddr_t)-1, level - 1, &blkcount, &rblkcount, lastsegp, bcp); if (error) allerror = error; blocksreleased += blkcount; real_released += rblkcount; } lfs_blkfree(fs, ip, nb, lfs_sb_getbsize(fs), lastsegp, bcp); if (bap[i] > 0) real_released += nblocks; blocksreleased += nblocks; } /* * Recursively free last partial block. */ if (level > SINGLE && lastbn >= 0) { last = lastbn % factor; nb = bap[i]; if (nb != 0) { error = lfs_indirtrunc(ip, nlbn, nb, last, level - 1, &blkcount, &rblkcount, lastsegp, bcp); if (error) allerror = error; real_released += rblkcount; blocksreleased += blkcount; } } if (copy != NULL) { lfs_free(fs, copy, LFS_NB_IBLOCK); } else { mutex_enter(&bufcache_lock); if (bp->b_oflags & BO_DELWRI) { LFS_UNLOCK_BUF(bp); lfs_sb_addavail(fs, lfs_btofsb(fs, bp->b_bcount)); wakeup(&fs->lfs_availsleep); } brelsel(bp, BC_INVAL); mutex_exit(&bufcache_lock); } *countp = blocksreleased; *rcountp = real_released; return (allerror); }
/* * get next entry in a directory. */ static LFS_DIRHEADER * fsck_readdir(struct uvnode *vp, struct inodesc *idesc) { LFS_DIRHEADER *dp, *ndp; struct ubuf *bp; long size, blksiz, fix, dploc; blksiz = idesc->id_numfrags * lfs_sb_getfsize(fs); bread(vp, idesc->id_lblkno, blksiz, 0, &bp); if (idesc->id_loc % LFS_DIRBLKSIZ == 0 && idesc->id_filesize > 0 && idesc->id_loc < blksiz) { dp = (LFS_DIRHEADER *) (bp->b_data + idesc->id_loc); if (dircheck(idesc, dp)) goto dpok; brelse(bp, 0); if (idesc->id_fix == IGNORE) return (0); fix = dofix(idesc, "DIRECTORY CORRUPTED"); bread(vp, idesc->id_lblkno, blksiz, 0, &bp); dp = (LFS_DIRHEADER *) (bp->b_data + idesc->id_loc); lfs_dir_setino(fs, dp, 0); lfs_dir_settype(fs, dp, LFS_DT_UNKNOWN); lfs_dir_setnamlen(fs, dp, 0); lfs_dir_setreclen(fs, dp, LFS_DIRBLKSIZ); /* for now at least, don't zero the old contents */ /*lfs_copydirname(fs, lfs_dir_nameptr(fs, dp), "", 0, LFS_DIRBLKSIZ);*/ lfs_dir_nameptr(fs, dp)[0] = '\0'; if (fix) VOP_BWRITE(bp); else brelse(bp, 0); idesc->id_loc += LFS_DIRBLKSIZ; idesc->id_filesize -= LFS_DIRBLKSIZ; return (dp); } dpok: if (idesc->id_filesize <= 0 || idesc->id_loc >= blksiz) { brelse(bp, 0); return NULL; } dploc = idesc->id_loc; dp = (LFS_DIRHEADER *) (bp->b_data + dploc); idesc->id_loc += lfs_dir_getreclen(fs, dp); idesc->id_filesize -= lfs_dir_getreclen(fs, dp); if ((idesc->id_loc % LFS_DIRBLKSIZ) == 0) { brelse(bp, 0); return dp; } ndp = (LFS_DIRHEADER *) (bp->b_data + idesc->id_loc); if (idesc->id_loc < blksiz && idesc->id_filesize > 0 && dircheck(idesc, ndp) == 0) { brelse(bp, 0); size = LFS_DIRBLKSIZ - (idesc->id_loc % LFS_DIRBLKSIZ); idesc->id_loc += size; idesc->id_filesize -= size; if (idesc->id_fix == IGNORE) return 0; fix = dofix(idesc, "DIRECTORY CORRUPTED"); bread(vp, idesc->id_lblkno, blksiz, 0, &bp); dp = (LFS_DIRHEADER *) (bp->b_data + dploc); lfs_dir_setreclen(fs, dp, lfs_dir_getreclen(fs, dp) + size); if (fix) VOP_BWRITE(bp); else brelse(bp, 0); } else brelse(bp, 0); return (dp); }
static int iblock(struct inodesc *idesc, long ilevel, u_int64_t isize) { ufs_daddr_t *ap, *aplim; struct ubuf *bp; int i, n, (*func) (struct inodesc *), nif; u_int64_t sizepb; char pathbuf[MAXPATHLEN + 1], buf[BUFSIZ]; struct uvnode *devvp, *vp; int diddirty = 0; if (idesc->id_type == ADDR) { func = idesc->id_func; n = (*func) (idesc); if ((n & KEEPON) == 0) return (n); } else func = dirscan; if (chkrange(idesc->id_blkno, fragstofsb(fs, idesc->id_numfrags))) return (SKIP); devvp = fs->lfs_devvp; bread(devvp, fsbtodb(fs, idesc->id_blkno), fs->lfs_bsize, NOCRED, 0, &bp); ilevel--; for (sizepb = fs->lfs_bsize, i = 0; i < ilevel; i++) sizepb *= NINDIR(fs); if (isize > sizepb * NINDIR(fs)) nif = NINDIR(fs); else nif = howmany(isize, sizepb); if (idesc->id_func == pass1check && nif < NINDIR(fs)) { aplim = ((ufs_daddr_t *) bp->b_data) + NINDIR(fs); for (ap = ((ufs_daddr_t *) bp->b_data) + nif; ap < aplim; ap++) { if (*ap == 0) continue; (void) sprintf(buf, "PARTIALLY TRUNCATED INODE I=%llu", (unsigned long long)idesc->id_number); if (dofix(idesc, buf)) { *ap = 0; ++diddirty; } } } aplim = ((ufs_daddr_t *) bp->b_data) + nif; for (ap = ((ufs_daddr_t *) bp->b_data); ap < aplim; ap++) { if (*ap) { idesc->id_blkno = *ap; if (ilevel == 0) { /* * dirscan needs lblkno. */ idesc->id_lblkno++; n = (*func) (idesc); } else { n = iblock(idesc, ilevel, isize); } if (n & STOP) { if (diddirty) VOP_BWRITE(bp); else brelse(bp, 0); return (n); } } else { if (idesc->id_type == DATA && isize > 0) { /* An empty block in a directory XXX */ getpathname(pathbuf, sizeof(pathbuf), idesc->id_number, idesc->id_number); pfatal("DIRECTORY %s INO %lld: CONTAINS EMPTY BLOCKS [3]", pathbuf, (long long)idesc->id_number); if (reply("ADJUST LENGTH") == 1) { vp = vget(fs, idesc->id_number); VTOI(vp)->i_ffs1_size -= isize; isize = 0; printf( "YOU MUST RERUN FSCK AFTERWARDS\n"); rerun = 1; inodirty(VTOI(vp)); if (diddirty) VOP_BWRITE(bp); else brelse(bp, 0); return (STOP); } } } isize -= sizepb; } if (diddirty) VOP_BWRITE(bp); else brelse(bp, 0); return (KEEPON); }
/* * Mkdir system call */ int ufs_mkdir(void *v) { struct vop_mkdir_args *ap = v; struct vnode *dvp = ap->a_dvp; struct vattr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp; struct vnode *tvp; struct buf *bp; struct direct newdir; struct dirtemplate dirtemplate, *dtp; int error, dmode, blkoff; #ifdef DIAGNOSTIC if ((cnp->cn_flags & HASBUF) == 0) panic("ufs_mkdir: no name"); #endif dp = VTOI(dvp); if ((nlink_t) DIP(dp, nlink) >= LINK_MAX) { error = EMLINK; goto out; } dmode = vap->va_mode & 0777; dmode |= IFDIR; /* * Must simulate part of ufs_makeinode here to acquire the inode, * but not have it entered in the parent directory. The entry is * made later after writing "." and ".." entries. */ if ((error = UFS_INODE_ALLOC(dp, dmode, cnp->cn_cred, &tvp)) != 0) goto out; ip = VTOI(tvp); DIP_ASSIGN(ip, uid, cnp->cn_cred->cr_uid); DIP_ASSIGN(ip, gid, DIP(dp, gid)); if ((error = getinoquota(ip)) || (error = ufs_quota_alloc_inode(ip, cnp->cn_cred))) { pool_put(&namei_pool, cnp->cn_pnbuf); UFS_INODE_FREE(ip, ip->i_number, dmode); vput(tvp); vput(dvp); return (error); } ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; DIP_ASSIGN(ip, mode, dmode); tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ ip->i_effnlink = 2; DIP_ASSIGN(ip, nlink, 2); if (DOINGSOFTDEP(tvp)) softdep_change_linkcnt(ip, 0); /* * Bump link count in parent directory to reflect work done below. * Should be done before reference is create so cleanup is * possible if we crash. */ dp->i_effnlink++; DIP_ADD(dp, nlink, 1); dp->i_flag |= IN_CHANGE; if (DOINGSOFTDEP(dvp)) softdep_change_linkcnt(dp, 0); if ((error = UFS_UPDATE(dp, !DOINGSOFTDEP(dvp))) != 0) goto bad; /* * Initialize directory with "." and ".." from static template. */ if (dvp->v_mount->mnt_maxsymlinklen > 0) dtp = &mastertemplate; else dtp = (struct dirtemplate *)&omastertemplate; dirtemplate = *dtp; dirtemplate.dot_ino = ip->i_number; dirtemplate.dotdot_ino = dp->i_number; if ((error = UFS_BUF_ALLOC(ip, (off_t)0, DIRBLKSIZ, cnp->cn_cred, B_CLRBUF, &bp)) != 0) goto bad; DIP_ASSIGN(ip, size, DIRBLKSIZ); ip->i_flag |= IN_CHANGE | IN_UPDATE; uvm_vnp_setsize(tvp, DIP(ip, size)); bcopy((caddr_t)&dirtemplate, (caddr_t)bp->b_data, sizeof dirtemplate); if (DOINGSOFTDEP(tvp)) { /* * Ensure that the entire newly allocated block is a * valid directory so that future growth within the * block does not have to ensure that the block is * written before the inode */ blkoff = DIRBLKSIZ; while (blkoff < bp->b_bcount) { ((struct direct *) (bp->b_data + blkoff))->d_reclen = DIRBLKSIZ; blkoff += DIRBLKSIZ; } } if ((error = UFS_UPDATE(ip, !DOINGSOFTDEP(tvp))) != 0) { (void)VOP_BWRITE(bp); goto bad; } /* * Directory set up, now install its entry in the parent directory. * * If we are not doing soft dependencies, then we must write out the * buffer containing the new directory body before entering the new * name in the parent. If we are doing soft dependencies, then the * buffer containing the new directory body will be passed to and * released in the soft dependency code after the code has attached * an appropriate ordering dependency to the buffer which ensures that * the buffer is written before the new name is written in the parent. */ if (!DOINGSOFTDEP(dvp) && ((error = VOP_BWRITE(bp)) != 0)) goto bad; ufs_makedirentry(ip, cnp, &newdir); error = ufs_direnter(dvp, tvp, &newdir, cnp, bp); bad: if (error == 0) { VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); *ap->a_vpp = tvp; } else { dp->i_effnlink--; DIP_ADD(dp, nlink, -1); dp->i_flag |= IN_CHANGE; if (DOINGSOFTDEP(dvp)) softdep_change_linkcnt(dp, 0); /* * No need to do an explicit VOP_TRUNCATE here, vrele will * do this for us because we set the link count to 0. */ ip->i_effnlink = 0; DIP_ASSIGN(ip, nlink, 0); ip->i_flag |= IN_CHANGE; if (DOINGSOFTDEP(tvp)) softdep_change_linkcnt(ip, 0); vput(tvp); } out: pool_put(&namei_pool, cnp->cn_pnbuf); vput(dvp); return (error); }
int ufs_mkdir(void *v) { struct vop_mkdir_v3_args /* { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; } */ *ap = v; struct vnode *dvp = ap->a_dvp, *tvp; struct vattr *vap = ap->a_vap; struct componentname *cnp = ap->a_cnp; struct inode *ip, *dp = VTOI(dvp); struct buf *bp; struct dirtemplate dirtemplate; struct direct *newdir; int error, dmode; struct ufsmount *ump = dp->i_ump; int dirblksiz = ump->um_dirblksiz; struct ufs_lookup_results *ulr; fstrans_start(dvp->v_mount, FSTRANS_SHARED); /* XXX should handle this material another way */ ulr = &dp->i_crap; UFS_CHECK_CRAPCOUNTER(dp); if ((nlink_t)dp->i_nlink >= LINK_MAX) { error = EMLINK; goto out; } dmode = vap->va_mode & ACCESSPERMS; dmode |= IFDIR; /* * Must simulate part of ufs_makeinode here to acquire the inode, * but not have it entered in the parent directory. The entry is * made later after writing "." and ".." entries. */ if ((error = UFS_VALLOC(dvp, dmode, cnp->cn_cred, ap->a_vpp)) != 0) goto out; tvp = *ap->a_vpp; ip = VTOI(tvp); error = UFS_WAPBL_BEGIN(ap->a_dvp->v_mount); if (error) { UFS_VFREE(tvp, ip->i_number, dmode); vput(tvp); goto out; } ip->i_uid = kauth_cred_geteuid(cnp->cn_cred); DIP_ASSIGN(ip, uid, ip->i_uid); ip->i_gid = dp->i_gid; DIP_ASSIGN(ip, gid, ip->i_gid); #if defined(QUOTA) || defined(QUOTA2) if ((error = chkiq(ip, 1, cnp->cn_cred, 0))) { UFS_VFREE(tvp, ip->i_number, dmode); UFS_WAPBL_END(dvp->v_mount); fstrans_done(dvp->v_mount); vput(tvp); return (error); } #endif ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; ip->i_mode = dmode; DIP_ASSIGN(ip, mode, dmode); tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ ip->i_nlink = 2; DIP_ASSIGN(ip, nlink, 2); if (cnp->cn_flags & ISWHITEOUT) { ip->i_flags |= UF_OPAQUE; DIP_ASSIGN(ip, flags, ip->i_flags); } /* * Bump link count in parent directory to reflect work done below. * Should be done before reference is created so cleanup is * possible if we crash. */ dp->i_nlink++; DIP_ASSIGN(dp, nlink, dp->i_nlink); dp->i_flag |= IN_CHANGE; if ((error = UFS_UPDATE(dvp, NULL, NULL, UPDATE_DIROP)) != 0) goto bad; /* * Initialize directory with "." and ".." from static template. */ dirtemplate = mastertemplate; dirtemplate.dotdot_reclen = dirblksiz - dirtemplate.dot_reclen; dirtemplate.dot_ino = ufs_rw32(ip->i_number, UFS_MPNEEDSWAP(ump)); dirtemplate.dotdot_ino = ufs_rw32(dp->i_number, UFS_MPNEEDSWAP(ump)); dirtemplate.dot_reclen = ufs_rw16(dirtemplate.dot_reclen, UFS_MPNEEDSWAP(ump)); dirtemplate.dotdot_reclen = ufs_rw16(dirtemplate.dotdot_reclen, UFS_MPNEEDSWAP(ump)); if (ump->um_maxsymlinklen <= 0) { #if BYTE_ORDER == LITTLE_ENDIAN if (UFS_MPNEEDSWAP(ump) == 0) #else if (UFS_MPNEEDSWAP(ump) != 0) #endif { dirtemplate.dot_type = dirtemplate.dot_namlen; dirtemplate.dotdot_type = dirtemplate.dotdot_namlen; dirtemplate.dot_namlen = dirtemplate.dotdot_namlen = 0; } else dirtemplate.dot_type = dirtemplate.dotdot_type = 0; } if ((error = UFS_BALLOC(tvp, (off_t)0, dirblksiz, cnp->cn_cred, B_CLRBUF, &bp)) != 0) goto bad; ip->i_size = dirblksiz; DIP_ASSIGN(ip, size, dirblksiz); ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; uvm_vnp_setsize(tvp, ip->i_size); memcpy((void *)bp->b_data, (void *)&dirtemplate, sizeof dirtemplate); /* * Directory set up, now install its entry in the parent directory. * We must write out the buffer containing the new directory body * before entering the new name in the parent. */ if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0) goto bad; if ((error = UFS_UPDATE(tvp, NULL, NULL, UPDATE_DIROP)) != 0) { goto bad; } newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK); ufs_makedirentry(ip, cnp, newdir); error = ufs_direnter(dvp, ulr, tvp, newdir, cnp, bp); pool_cache_put(ufs_direct_cache, newdir); bad: if (error == 0) { VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); VOP_UNLOCK(tvp); UFS_WAPBL_END(dvp->v_mount); } else { dp->i_nlink--; DIP_ASSIGN(dp, nlink, dp->i_nlink); dp->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(dvp, NULL, NULL, UPDATE_DIROP); /* * No need to do an explicit UFS_TRUNCATE here, vrele will * do this for us because we set the link count to 0. */ ip->i_nlink = 0; DIP_ASSIGN(ip, nlink, 0); ip->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(tvp, NULL, NULL, UPDATE_DIROP); UFS_WAPBL_END(dvp->v_mount); vput(tvp); } out: fstrans_done(dvp->v_mount); return (error); }
/* * allocate a new directory */ int allocdir(ino_t parent, ino_t request, int mode) { ino_t ino; char *cp; union lfs_dinode *dp; struct ubuf *bp; LFS_DIRHEADER *dirp; struct uvnode *vp; ino = allocino(request, LFS_IFDIR | mode); vp = vget(fs, ino); dp = VTOD(vp); bread(vp, lfs_dino_getdb(fs, dp, 0), lfs_sb_getfsize(fs), 0, &bp); if (bp->b_flags & B_ERROR) { brelse(bp, 0); freeino(ino); return (0); } dirp = (LFS_DIRHEADER *)bp->b_data; /* . */ lfs_dir_setino(fs, dirp, ino); lfs_dir_setreclen(fs, dirp, LFS_DIRECTSIZ(fs, 1)); lfs_dir_settype(fs, dirp, LFS_DT_DIR); lfs_dir_setnamlen(fs, dirp, 1); lfs_copydirname(fs, lfs_dir_nameptr(fs, dirp), ".", 1, LFS_DIRECTSIZ(fs, 1)); /* .. */ dirp = LFS_NEXTDIR(fs, dirp); lfs_dir_setino(fs, dirp, parent); lfs_dir_setreclen(fs, dirp, LFS_DIRBLKSIZ - LFS_DIRECTSIZ(fs, 1)); lfs_dir_settype(fs, dirp, LFS_DT_DIR); lfs_dir_setnamlen(fs, dirp, 2); lfs_copydirname(fs, lfs_dir_nameptr(fs, dirp), "..", 2, LFS_DIRBLKSIZ - LFS_DIRECTSIZ(fs, 1)); for (cp = &bp->b_data[LFS_DIRBLKSIZ]; cp < &bp->b_data[lfs_sb_getfsize(fs)]; cp += LFS_DIRBLKSIZ) { zerodirblk(cp); } VOP_BWRITE(bp); lfs_dino_setnlink(fs, dp, 2); inodirty(VTOI(vp)); if (ino == ULFS_ROOTINO) { lncntp[ino] = lfs_dino_getnlink(fs, dp); cacheino(dp, ino); return (ino); } if (statemap[parent] != DSTATE && statemap[parent] != DFOUND) { freeino(ino); return (0); } cacheino(dp, ino); statemap[ino] = statemap[parent]; if (statemap[ino] == DSTATE) { lncntp[ino] = lfs_dino_getnlink(fs, dp); lncntp[parent]++; } vp = vget(fs, parent); dp = VTOD(vp); lfs_dino_setnlink(fs, dp, lfs_dino_getnlink(fs, dp) + 1); inodirty(VTOI(vp)); return (ino); }
/* * Write a directory entry after a call to namei, using the parameters * that it left in nameidata. The argument dirp is the new directory * entry contents. Dvp is a pointer to the directory to be written, * which was left locked by namei. Remaining parameters (dp->i_offset, * dp->i_count) indicate how the space for the new entry is to be obtained. * Non-null bp indicates that a directory is being created (for the * soft dependency code). */ int ufs_direnter(struct vnode *dvp, struct vnode *tvp, struct direct *dirp, struct componentname *cnp, struct buf *newdirbp) { struct ucred *cr; struct proc *p; int newentrysize; struct inode *dp; struct buf *bp; u_int dsize; struct direct *ep, *nep; int error, ret, blkoff, loc, spacefree, flags; char *dirbuf; UFS_WAPBL_JLOCK_ASSERT(dvp->v_mount); error = 0; cr = cnp->cn_cred; p = cnp->cn_proc; dp = VTOI(dvp); newentrysize = DIRSIZ(FSFMT(dvp), dirp); if (dp->i_count == 0) { /* * If dp->i_count is 0, then namei could find no * space in the directory. Here, dp->i_offset will * be on a directory block boundary and we will write the * new entry into a fresh block. */ if (dp->i_offset & (DIRBLKSIZ - 1)) panic("ufs_direnter: newblk"); flags = B_CLRBUF; if (!DOINGSOFTDEP(dvp)) flags |= B_SYNC; if ((error = UFS_BUF_ALLOC(dp, (off_t)dp->i_offset, DIRBLKSIZ, cr, flags, &bp)) != 0) { if (DOINGSOFTDEP(dvp) && newdirbp != NULL) bdwrite(newdirbp); return (error); } DIP_ASSIGN(dp, size, dp->i_offset + DIRBLKSIZ); dp->i_flag |= IN_CHANGE | IN_UPDATE; uvm_vnp_setsize(dvp, DIP(dp, size)); dirp->d_reclen = DIRBLKSIZ; blkoff = dp->i_offset & (VFSTOUFS(dvp->v_mount)->um_mountp->mnt_stat.f_iosize - 1); memcpy(bp->b_data + blkoff, dirp, newentrysize); #ifdef UFS_DIRHASH if (dp->i_dirhash != NULL) { ufsdirhash_newblk(dp, dp->i_offset); ufsdirhash_add(dp, dirp, dp->i_offset); ufsdirhash_checkblock(dp, (char *)bp->b_data + blkoff, dp->i_offset); } #endif if (DOINGSOFTDEP(dvp)) { /* * Ensure that the entire newly allocated block is a * valid directory so that future growth within the * block does not have to ensure that the block is * written before the inode. */ blkoff += DIRBLKSIZ; while (blkoff < bp->b_bcount) { ((struct direct *) (bp->b_data + blkoff))->d_reclen = DIRBLKSIZ; blkoff += DIRBLKSIZ; } if (softdep_setup_directory_add(bp, dp, dp->i_offset, dirp->d_ino, newdirbp, 1) == 0) { bdwrite(bp); return (UFS_UPDATE(dp, 0)); } /* We have just allocated a directory block in an * indirect block. Rather than tracking when it gets * claimed by the inode, we simply do a VOP_FSYNC * now to ensure that it is there (in case the user * does a future fsync). Note that we have to unlock * the inode for the entry that we just entered, as * the VOP_FSYNC may need to lock other inodes which * can lead to deadlock if we also hold a lock on * the newly entered node. */ if ((error = VOP_BWRITE(bp))) return (error); if (tvp != NULL) VOP_UNLOCK(tvp, 0); error = VOP_FSYNC(dvp, p->p_ucred, MNT_WAIT); if (tvp != NULL) vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); return (error); } error = VOP_BWRITE(bp); ret = UFS_UPDATE(dp, !DOINGSOFTDEP(dvp)); if (error == 0) return (ret); return (error); } /* * If dp->i_count is non-zero, then namei found space for the new * entry in the range dp->i_offset to dp->i_offset + dp->i_count * in the directory. To use this space, we may have to compact * the entries located there, by copying them together towards the * beginning of the block, leaving the free space in one usable * chunk at the end. */ /* * Increase size of directory if entry eats into new space. * This should never push the size past a new multiple of * DIRBLKSIZE. * * N.B. - THIS IS AN ARTIFACT OF 4.2 AND SHOULD NEVER HAPPEN. */ if (dp->i_offset + dp->i_count > DIP(dp, size)) { DIP_ASSIGN(dp, size, dp->i_offset + dp->i_count); dp->i_flag |= IN_CHANGE | IN_UPDATE; UFS_WAPBL_UPDATE(dp, MNT_WAIT); } /* * Get the block containing the space for the new directory entry. */ if ((error = UFS_BUFATOFF(dp, (off_t)dp->i_offset, &dirbuf, &bp)) != 0) { if (DOINGSOFTDEP(dvp) && newdirbp != NULL) bdwrite(newdirbp); return (error); } /* * Find space for the new entry. In the simple case, the entry at * offset base will have the space. If it does not, then namei * arranged that compacting the region dp->i_offset to * dp->i_offset + dp->i_count would yield the space. */ ep = (struct direct *)dirbuf; dsize = ep->d_ino ? DIRSIZ(FSFMT(dvp), ep) : 0; spacefree = ep->d_reclen - dsize; for (loc = ep->d_reclen; loc < dp->i_count; ) { nep = (struct direct *)(dirbuf + loc); /* Trim the existing slot (NB: dsize may be zero). */ ep->d_reclen = dsize; ep = (struct direct *)((char *)ep + dsize); /* Read nep->d_reclen now as the memmove() may clobber it. */ loc += nep->d_reclen; if (nep->d_ino == 0) { /* * A mid-block unused entry. Such entries are * never created by the kernel, but fsck_ffs * can create them (and it doesn't fix them). * * Add up the free space, and initialise the * relocated entry since we don't memmove it. */ spacefree += nep->d_reclen; ep->d_ino = 0; dsize = 0; continue; } dsize = DIRSIZ(FSFMT(dvp), nep); spacefree += nep->d_reclen - dsize; #ifdef UFS_DIRHASH if (dp->i_dirhash != NULL) ufsdirhash_move(dp, nep, dp->i_offset + ((char *)nep - dirbuf), dp->i_offset + ((char *)ep - dirbuf)); #endif if (DOINGSOFTDEP(dvp)) softdep_change_directoryentry_offset(dp, dirbuf, (caddr_t)nep, (caddr_t)ep, dsize); else memmove(ep, nep, dsize); } /* * Here, `ep' points to a directory entry containing `dsize' in-use * bytes followed by `spacefree' unused bytes. If ep->d_ino == 0, * then the entry is completely unused (dsize == 0). The value * of ep->d_reclen is always indeterminate. * * Update the pointer fields in the previous entry (if any), * copy in the new entry, and write out the block. */ if (ep->d_ino == 0) { if (spacefree + dsize < newentrysize) panic("ufs_direnter: compact1"); dirp->d_reclen = spacefree + dsize; } else { if (spacefree < newentrysize) panic("ufs_direnter: compact2"); dirp->d_reclen = spacefree; ep->d_reclen = dsize; ep = (struct direct *)((char *)ep + dsize); } #ifdef UFS_DIRHASH if (dp->i_dirhash != NULL && (ep->d_ino == 0 || dirp->d_reclen == spacefree)) ufsdirhash_add(dp, dirp, dp->i_offset + ((char *)ep - dirbuf)); #endif memcpy(ep, dirp, newentrysize); #ifdef UFS_DIRHASH if (dp->i_dirhash != NULL) ufsdirhash_checkblock(dp, dirbuf - (dp->i_offset & (DIRBLKSIZ - 1)), dp->i_offset & ~(DIRBLKSIZ - 1)); #endif if (DOINGSOFTDEP(dvp)) { (void)softdep_setup_directory_add(bp, dp, dp->i_offset + (caddr_t)ep - dirbuf, dirp->d_ino, newdirbp, 0); bdwrite(bp); } else { error = VOP_BWRITE(bp); } dp->i_flag |= IN_CHANGE | IN_UPDATE; /* * If all went well, and the directory can be shortened, proceed * with the truncation. Note that we have to unlock the inode for * the entry that we just entered, as the truncation may need to * lock other inodes which can lead to deadlock if we also hold a * lock on the newly entered node. */ if (error == 0 && dp->i_endoff && dp->i_endoff < DIP(dp, size)) { if (tvp != NULL) VOP_UNLOCK(tvp, 0); #ifdef UFS_DIRHASH if (dp->i_dirhash != NULL) ufsdirhash_dirtrunc(dp, dp->i_endoff); #endif error = UFS_TRUNCATE(dp, (off_t)dp->i_endoff, IO_SYNC, cr); if (tvp != NULL) vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p); } UFS_WAPBL_UPDATE(dp, MNT_WAIT); return (error); }
/* * Write a directory entry after a call to namei, using the parameters * that ulfs_lookup left in nameidata and in the ulfs_lookup_results. * * DVP is the directory to be updated. It must be locked. * ULR is the ulfs_lookup_results structure from the final lookup step. * TVP is not used. (XXX: why is it here? remove it) * DIRP is the new directory entry contents. * CNP is the componentname from the final lookup step. * NEWDIRBP is not used and (XXX) should be removed. The previous * comment here said it was used by the now-removed softupdates code. * * The link count of the target inode is *not* incremented; the * caller does that. * * If ulr->ulr_count is 0, ulfs_lookup did not find space to insert the * directory entry. ulr_offset, which is the place to put the entry, * should be on a block boundary (and should be at the end of the * directory AFAIK) and a fresh block is allocated to put the new * directory entry in. * * If ulr->ulr_count is not zero, ulfs_lookup found a slot to insert * the entry into. This slot ranges from ulr_offset to ulr_offset + * ulr_count. However, this slot may already be partially populated * requiring compaction. See notes below. * * Furthermore, if ulr_count is not zero and ulr_endoff is not the * same as i_size, the directory is truncated to size ulr_endoff. */ int ulfs_direnter(struct vnode *dvp, const struct ulfs_lookup_results *ulr, struct vnode *tvp, struct lfs_direct *dirp, struct componentname *cnp, struct buf *newdirbp) { kauth_cred_t cr; int newentrysize; struct inode *dp; struct buf *bp; u_int dsize; struct lfs_direct *ep, *nep; int error, ret, lfs_blkoff, loc, spacefree; char *dirbuf; struct timespec ts; struct ulfsmount *ump = VFSTOULFS(dvp->v_mount); struct lfs *fs = ump->um_lfs; const int needswap = ULFS_MPNEEDSWAP(fs); int dirblksiz = fs->um_dirblksiz; error = 0; cr = cnp->cn_cred; dp = VTOI(dvp); newentrysize = LFS_DIRSIZ(0, dirp, 0); if (ulr->ulr_count == 0) { /* * If ulr_count is 0, then namei could find no * space in the directory. Here, ulr_offset will * be on a directory block boundary and we will write the * new entry into a fresh block. */ if (ulr->ulr_offset & (dirblksiz - 1)) panic("ulfs_direnter: newblk"); if ((error = lfs_balloc(dvp, (off_t)ulr->ulr_offset, dirblksiz, cr, B_CLRBUF | B_SYNC, &bp)) != 0) { return (error); } dp->i_size = ulr->ulr_offset + dirblksiz; DIP_ASSIGN(dp, size, dp->i_size); dp->i_flag |= IN_CHANGE | IN_UPDATE; uvm_vnp_setsize(dvp, dp->i_size); dirp->d_reclen = ulfs_rw16(dirblksiz, needswap); dirp->d_ino = ulfs_rw32(dirp->d_ino, needswap); if (FSFMT(dvp)) { #if (BYTE_ORDER == LITTLE_ENDIAN) if (needswap == 0) { #else if (needswap != 0) { #endif u_char tmp = dirp->d_namlen; dirp->d_namlen = dirp->d_type; dirp->d_type = tmp; } } lfs_blkoff = ulr->ulr_offset & (ump->um_mountp->mnt_stat.f_iosize - 1); memcpy((char *)bp->b_data + lfs_blkoff, dirp, newentrysize); #ifdef LFS_DIRHASH if (dp->i_dirhash != NULL) { ulfsdirhash_newblk(dp, ulr->ulr_offset); ulfsdirhash_add(dp, dirp, ulr->ulr_offset); ulfsdirhash_checkblock(dp, (char *)bp->b_data + lfs_blkoff, ulr->ulr_offset); } #endif error = VOP_BWRITE(bp->b_vp, bp); vfs_timestamp(&ts); ret = lfs_update(dvp, &ts, &ts, UPDATE_DIROP); if (error == 0) return (ret); return (error); } /* * If ulr_count is non-zero, then namei found space for the new * entry in the range ulr_offset to ulr_offset + ulr_count * in the directory. To use this space, we may have to compact * the entries located there, by copying them together towards the * beginning of the block, leaving the free space in one usable * chunk at the end. */ /* * Increase size of directory if entry eats into new space. * This should never push the size past a new multiple of * DIRBLKSIZ. * * N.B. - THIS IS AN ARTIFACT OF 4.2 AND SHOULD NEVER HAPPEN. */ if (ulr->ulr_offset + ulr->ulr_count > dp->i_size) { #ifdef DIAGNOSTIC printf("ulfs_direnter: reached 4.2-only block, " "not supposed to happen\n"); #endif dp->i_size = ulr->ulr_offset + ulr->ulr_count; DIP_ASSIGN(dp, size, dp->i_size); dp->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Get the block containing the space for the new directory entry. */ error = ulfs_blkatoff(dvp, (off_t)ulr->ulr_offset, &dirbuf, &bp, true); if (error) { return (error); } /* * Find space for the new entry. In the simple case, the entry at * offset base will have the space. If it does not, then namei * arranged that compacting the region ulr_offset to * ulr_offset + ulr_count would yield the space. */ ep = (struct lfs_direct *)dirbuf; dsize = (ep->d_ino != 0) ? LFS_DIRSIZ(FSFMT(dvp), ep, needswap) : 0; spacefree = ulfs_rw16(ep->d_reclen, needswap) - dsize; for (loc = ulfs_rw16(ep->d_reclen, needswap); loc < ulr->ulr_count; ) { uint16_t reclen; nep = (struct lfs_direct *)(dirbuf + loc); /* Trim the existing slot (NB: dsize may be zero). */ ep->d_reclen = ulfs_rw16(dsize, needswap); ep = (struct lfs_direct *)((char *)ep + dsize); reclen = ulfs_rw16(nep->d_reclen, needswap); loc += reclen; if (nep->d_ino == 0) { /* * A mid-block unused entry. Such entries are * never created by the kernel, but fsck_ffs * can create them (and it doesn't fix them). * * Add up the free space, and initialise the * relocated entry since we don't memcpy it. */ spacefree += reclen; ep->d_ino = 0; dsize = 0; continue; } dsize = LFS_DIRSIZ(FSFMT(dvp), nep, needswap); spacefree += reclen - dsize; #ifdef LFS_DIRHASH if (dp->i_dirhash != NULL) ulfsdirhash_move(dp, nep, ulr->ulr_offset + ((char *)nep - dirbuf), ulr->ulr_offset + ((char *)ep - dirbuf)); #endif memcpy((void *)ep, (void *)nep, dsize); } /* * Here, `ep' points to a directory entry containing `dsize' in-use * bytes followed by `spacefree' unused bytes. If ep->d_ino == 0, * then the entry is completely unused (dsize == 0). The value * of ep->d_reclen is always indeterminate. * * Update the pointer fields in the previous entry (if any), * copy in the new entry, and write out the block. */ if (ep->d_ino == 0 || (ulfs_rw32(ep->d_ino, needswap) == ULFS_WINO && memcmp(ep->d_name, dirp->d_name, dirp->d_namlen) == 0)) { if (spacefree + dsize < newentrysize) panic("ulfs_direnter: compact1"); dirp->d_reclen = spacefree + dsize; } else { if (spacefree < newentrysize) panic("ulfs_direnter: compact2"); dirp->d_reclen = spacefree; ep->d_reclen = ulfs_rw16(dsize, needswap); ep = (struct lfs_direct *)((char *)ep + dsize); } dirp->d_reclen = ulfs_rw16(dirp->d_reclen, needswap); dirp->d_ino = ulfs_rw32(dirp->d_ino, needswap); if (FSFMT(dvp)) { #if (BYTE_ORDER == LITTLE_ENDIAN) if (needswap == 0) { #else if (needswap != 0) { #endif u_char tmp = dirp->d_namlen; dirp->d_namlen = dirp->d_type; dirp->d_type = tmp; } } #ifdef LFS_DIRHASH if (dp->i_dirhash != NULL && (ep->d_ino == 0 || dirp->d_reclen == spacefree)) ulfsdirhash_add(dp, dirp, ulr->ulr_offset + ((char *)ep - dirbuf)); #endif memcpy((void *)ep, (void *)dirp, (u_int)newentrysize); #ifdef LFS_DIRHASH if (dp->i_dirhash != NULL) ulfsdirhash_checkblock(dp, dirbuf - (ulr->ulr_offset & (dirblksiz - 1)), ulr->ulr_offset & ~(dirblksiz - 1)); #endif error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; /* * If all went well, and the directory can be shortened, proceed * with the truncation. Note that we have to unlock the inode for * the entry that we just entered, as the truncation may need to * lock other inodes which can lead to deadlock if we also hold a * lock on the newly entered node. */ if (error == 0 && ulr->ulr_endoff && ulr->ulr_endoff < dp->i_size) { #ifdef LFS_DIRHASH if (dp->i_dirhash != NULL) ulfsdirhash_dirtrunc(dp, ulr->ulr_endoff); #endif (void) lfs_truncate(dvp, (off_t)ulr->ulr_endoff, IO_SYNC, cr); } return (error); } /* * Remove a directory entry after a call to namei, using the * parameters that ulfs_lookup left in nameidata and in the * ulfs_lookup_results. * * DVP is the directory to be updated. It must be locked. * ULR is the ulfs_lookup_results structure from the final lookup step. * IP, if not null, is the inode being unlinked. * FLAGS may contain DOWHITEOUT. * ISRMDIR is not used and (XXX) should be removed. * * If FLAGS contains DOWHITEOUT the entry is replaced with a whiteout * instead of being cleared. * * ulr->ulr_offset contains the position of the directory entry * to be removed. * * ulr->ulr_reclen contains the size of the directory entry to be * removed. * * ulr->ulr_count contains the size of the *previous* directory * entry. This allows finding it, for free space management. If * ulr_count is 0, the target entry is at the beginning of the * directory. (Does this ever happen? The first entry should be ".", * which should only be removed at rmdir time. Does rmdir come here * to clear out the "." and ".." entries? Perhaps, but I doubt it.) * * The space is marked free by adding it to the record length (not * name length) of the preceding entry. If the first entry becomes * free, it is marked free by setting the inode number to 0. * * The link count of IP is decremented. Note that this is not the * inverse behavior of ulfs_direnter, which does not adjust link * counts. Sigh. */ int ulfs_dirremove(struct vnode *dvp, const struct ulfs_lookup_results *ulr, struct inode *ip, int flags, int isrmdir) { struct inode *dp = VTOI(dvp); struct lfs_direct *ep; struct buf *bp; int error; const int needswap = ULFS_MPNEEDSWAP(dp->i_lfs); if (flags & DOWHITEOUT) { /* * Whiteout entry: set d_ino to ULFS_WINO. */ error = ulfs_blkatoff(dvp, (off_t)ulr->ulr_offset, (void *)&ep, &bp, true); if (error) return (error); ep->d_ino = ulfs_rw32(ULFS_WINO, needswap); ep->d_type = LFS_DT_WHT; goto out; } if ((error = ulfs_blkatoff(dvp, (off_t)(ulr->ulr_offset - ulr->ulr_count), (void *)&ep, &bp, true)) != 0) return (error); #ifdef LFS_DIRHASH /* * Remove the dirhash entry. This is complicated by the fact * that `ep' is the previous entry when ulr_count != 0. */ if (dp->i_dirhash != NULL) ulfsdirhash_remove(dp, (ulr->ulr_count == 0) ? ep : (struct lfs_direct *)((char *)ep + ulfs_rw16(ep->d_reclen, needswap)), ulr->ulr_offset); #endif if (ulr->ulr_count == 0) { /* * First entry in block: set d_ino to zero. */ ep->d_ino = 0; } else { /* * Collapse new free space into previous entry. */ ep->d_reclen = ulfs_rw16(ulfs_rw16(ep->d_reclen, needswap) + ulr->ulr_reclen, needswap); } #ifdef LFS_DIRHASH if (dp->i_dirhash != NULL) { int dirblksiz = ip->i_lfs->um_dirblksiz; ulfsdirhash_checkblock(dp, (char *)ep - ((ulr->ulr_offset - ulr->ulr_count) & (dirblksiz - 1)), ulr->ulr_offset & ~(dirblksiz - 1)); } #endif out: if (ip) { ip->i_nlink--; DIP_ASSIGN(ip, nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; } /* * XXX did it ever occur to anyone that it might be a good * idea to restore ip->i_nlink if this fails? Or something? * Currently on error return from this function the state of * ip->i_nlink depends on what happened, and callers * definitely do not take this into account. */ error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; /* * If the last named reference to a snapshot goes away, * drop its snapshot reference so that it will be reclaimed * when last open reference goes away. */ if (ip != 0 && (ip->i_flags & SF_SNAPSHOT) != 0 && ip->i_nlink == 0) ulfs_snapgone(ip); return (error); } /* * Rewrite an existing directory entry to point at the inode supplied. * * DP is the directory to update. * OFFSET is the position of the entry in question. It may come * from ulr_offset of a ulfs_lookup_results. * OIP is the old inode the directory previously pointed to. * NEWINUM is the number of the new inode. * NEWTYPE is the new value for the type field of the directory entry. * (This is ignored if the fs doesn't support that.) * ISRMDIR is not used and (XXX) should be removed. * IFLAGS are added to DP's inode flags. * * The link count of OIP is decremented. Note that the link count of * the new inode is *not* incremented. Yay for symmetry. */ int ulfs_dirrewrite(struct inode *dp, off_t offset, struct inode *oip, ino_t newinum, int newtype, int isrmdir, int iflags) { struct buf *bp; struct lfs_direct *ep; struct vnode *vdp = ITOV(dp); int error; error = ulfs_blkatoff(vdp, offset, (void *)&ep, &bp, true); if (error) return (error); ep->d_ino = ulfs_rw32(newinum, ULFS_IPNEEDSWAP(dp)); if (!FSFMT(vdp)) ep->d_type = newtype; oip->i_nlink--; DIP_ASSIGN(oip, nlink, oip->i_nlink); oip->i_flag |= IN_CHANGE; error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= iflags; /* * If the last named reference to a snapshot goes away, * drop its snapshot reference so that it will be reclaimed * when last open reference goes away. */ if ((oip->i_flags & SF_SNAPSHOT) != 0 && oip->i_nlink == 0) ulfs_snapgone(oip); return (error); } /* * Check if a directory is empty or not. * Inode supplied must be locked. * * Using a struct lfs_dirtemplate here is not precisely * what we want, but better than using a struct lfs_direct. * * NB: does not handle corrupted directories. */ int ulfs_dirempty(struct inode *ip, ino_t parentino, kauth_cred_t cred) { doff_t off; struct lfs_dirtemplate dbuf; struct lfs_direct *dp = (struct lfs_direct *)&dbuf; int error, namlen; size_t count; const int needswap = ULFS_IPNEEDSWAP(ip); #define MINDIRSIZ (sizeof (struct lfs_dirtemplate) / 2) for (off = 0; off < ip->i_size; off += ulfs_rw16(dp->d_reclen, needswap)) { error = vn_rdwr(UIO_READ, ITOV(ip), (void *)dp, MINDIRSIZ, off, UIO_SYSSPACE, IO_NODELOCKED, cred, &count, NULL); /* * Since we read MINDIRSIZ, residual must * be 0 unless we're at end of file. */ if (error || count != 0) return (0); /* avoid infinite loops */ if (dp->d_reclen == 0) return (0); /* skip empty entries */ if (dp->d_ino == 0 || ulfs_rw32(dp->d_ino, needswap) == ULFS_WINO) continue; /* accept only "." and ".." */ #if (BYTE_ORDER == LITTLE_ENDIAN) if (FSFMT(ITOV(ip)) && needswap == 0) namlen = dp->d_type; else namlen = dp->d_namlen; #else if (FSFMT(ITOV(ip)) && needswap != 0) namlen = dp->d_type; else namlen = dp->d_namlen; #endif if (namlen > 2) return (0); if (dp->d_name[0] != '.') return (0); /* * At this point namlen must be 1 or 2. * 1 implies ".", 2 implies ".." if second * char is also "." */ if (namlen == 1 && ulfs_rw32(dp->d_ino, needswap) == ip->i_number) continue; if (dp->d_name[1] == '.' && ulfs_rw32(dp->d_ino, needswap) == parentino) continue; return (0); } return (1); } #define ULFS_DIRRABLKS 0 int ulfs_dirrablks = ULFS_DIRRABLKS; /* * ulfs_blkatoff: Return buffer with the contents of block "offset" from * the beginning of directory "vp". If "res" is non-NULL, fill it in with * a pointer to the remaining space in the directory. If the caller intends * to modify the buffer returned, "modify" must be true. */ int ulfs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp, bool modify) { struct inode *ip __diagused; struct buf *bp; daddr_t lbn; const int dirrablks = ulfs_dirrablks; daddr_t *blks; int *blksizes; int run, error; struct mount *mp = vp->v_mount; const int bshift = mp->mnt_fs_bshift; const int bsize = 1 << bshift; off_t eof; blks = kmem_alloc((1 + dirrablks) * sizeof(daddr_t), KM_SLEEP); blksizes = kmem_alloc((1 + dirrablks) * sizeof(int), KM_SLEEP); ip = VTOI(vp); KASSERT(vp->v_size == ip->i_size); GOP_SIZE(vp, vp->v_size, &eof, 0); lbn = offset >> bshift; for (run = 0; run <= dirrablks;) { const off_t curoff = lbn << bshift; const int size = MIN(eof - curoff, bsize); if (size == 0) { break; } KASSERT(curoff < eof); blks[run] = lbn; blksizes[run] = size; lbn++; run++; if (size != bsize) { break; } } KASSERT(run >= 1); error = breadn(vp, blks[0], blksizes[0], &blks[1], &blksizes[1], run - 1, NOCRED, (modify ? B_MODIFY : 0), &bp); if (error != 0) { *bpp = NULL; goto out; } if (res) { *res = (char *)bp->b_data + (offset & (bsize - 1)); } *bpp = bp; out: kmem_free(blks, (1 + dirrablks) * sizeof(daddr_t)); kmem_free(blksizes, (1 + dirrablks) * sizeof(int)); return error; }