/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred, struct thread *td) { struct vnode *ovp = vp; int32_t lastblock; struct inode *oip; int32_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; uint32_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct m_ext2fs *fs; struct buf *bp; int offset, size, level; e4fs_daddr_t count, nblocks, blocksreleased = 0; int error, i, allerror; off_t osize; #ifdef INVARIANTS struct bufobj *bo; #endif oip = VTOI(ovp); #ifdef INVARIANTS bo = &ovp->v_bufobj; #endif ASSERT_VOP_LOCKED(vp, "ext2_truncate"); if (length < 0) return (EINVAL); if (ovp->v_type == VLNK && oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { #ifdef INVARIANTS if (length != 0) panic("ext2_truncate: partial truncate of symlink"); #endif bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); oip->i_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, 1)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, 0)); } fs = oip->i_e2fs; osize = oip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > oip->i_e2fs->e2fs_maxfilesize) return (EFBIG); vnode_pager_setsize(ovp, length); offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, flags); if (error) { vnode_pager_setsize(vp, osize); return (error); } oip->i_size = length; if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ext2_update(ovp, !DOINGASYNC(ovp))); } /* * Shorten the size of the file. If the file is not being * truncated to a block boundry, the contents of the * partial block following the end of the file must be * zero'ed in case it ever become accessible again because * of subsequent file growth. */ /* I don't understand the comment above */ offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = ext2_balloc(oip, lbn, offset, cred, &bp, flags); if (error) return (error); oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (bp->b_bufsize == fs->e2fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(ovp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->e2fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->e2fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ext2_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[NDADDR + level] = oip->i_ib[level]; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } } for (i = 0; i < NDADDR; i++) { oldblks[i] = oip->i_db[i]; if (i > lastblock) oip->i_db[i] = 0; } oip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ext2_update(ovp, !DOINGASYNC(ovp)); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < NDADDR; i++) { newblks[i] = oip->i_db[i]; oip->i_db[i] = oldblks[i]; } for (i = 0; i < NIADDR; i++) { newblks[NDADDR + i] = oip->i_ib[i]; oip->i_ib[i] = oldblks[NDADDR + i]; } oip->i_size = osize; error = vtruncbuf(ovp, cred, length, (int)fs->e2fs_bsize); if (error && (allerror == 0)) allerror = error; vnode_pager_setsize(ovp, length); /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ext2_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ext2_blkfree(oip, bn, fs->e2fs_fsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ext2_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ext2_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ext2_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != oip->i_ib[level]) panic("itrunc1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("itrunc2"); BO_LOCK(bo); if (length == 0 && (bo->bo_dirty.bv_cnt != 0 || bo->bo_clean.bv_cnt != 0)) panic("itrunc3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ oip->i_size = length; if (oip->i_blocks >= blocksreleased) oip->i_blocks -= blocksreleased; else /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; vnode_pager_setsize(ovp, length); return (allerror); }
void source(int argc, char *argv[]) { struct stat stb; static BUF buffer; BUF *bp; off_t i; int amt, fd, haderr, indx, result; char *last, *name, buf[BUFSIZ]; for (indx = 0; indx < argc; ++indx) { name = argv[indx]; if ((fd = open(name, O_RDONLY, 0)) < 0) goto syserr; if (fstat(fd, &stb)) { syserr: run_err("%s: %s", name, strerror(errno)); goto next; } switch (stb.st_mode & S_IFMT) { case S_IFREG: break; case S_IFDIR: if (iamrecursive) { rsource(name, &stb); goto next; } /* FALLTHROUGH */ default: run_err("%s: not a regular file", name); goto next; } if ((last = strrchr(name, '/')) == NULL) last = name; else ++last; if (pflag) { /* * Make it compatible with possible future * versions expecting microseconds. */ (void)snprintf(buf, sizeof(buf), "T%ld 0 %ld 0\n", (long)stb.st_mtim.tv_sec, (long)stb.st_atim.tv_sec); (void)write(rem, buf, strlen(buf)); if (response() < 0) goto next; } #define MODEMASK (S_ISUID|S_ISGID|S_ISTXT|S_IRWXU|S_IRWXG|S_IRWXO) (void)snprintf(buf, sizeof(buf), "C%04o %jd %s\n", stb.st_mode & MODEMASK, (intmax_t)stb.st_size, last); (void)write(rem, buf, strlen(buf)); if (response() < 0) goto next; if ((bp = allocbuf(&buffer, fd, BUFSIZ)) == NULL) { next: if (fd >= 0) (void)close(fd); continue; } /* Keep writing after an error so that we stay sync'd up. */ for (haderr = i = 0; i < stb.st_size; i += bp->cnt) { amt = bp->cnt; if (i + amt > stb.st_size) amt = stb.st_size - i; if (!haderr) { result = read(fd, bp->buf, amt); if (result != amt) haderr = result >= 0 ? EIO : errno; } if (haderr) (void)write(rem, bp->buf, amt); else { result = write(rem, bp->buf, amt); if (result != amt) haderr = result >= 0 ? EIO : errno; } } if (close(fd) && !haderr) haderr = errno; if (!haderr) (void)write(rem, "", 1); else run_err("%s: %s", name, strerror(haderr)); (void)response(); } }
void source(int argc, char **argv) { struct stat stb; static BUF buffer; BUF *bp; off_t i, statbytes; size_t amt; int fd = -1, haderr, indx; char *last, *name, buf[16384], encname[MAXPATHLEN]; int len; for (indx = 0; indx < argc; ++indx) { fd = -1; name = argv[indx]; statbytes = 0; len = strlen(name); while (len > 1 && name[len-1] == '/') name[--len] = '\0'; if ((fd = open(name, O_RDONLY|O_NONBLOCK, 0)) < 0) goto syserr; if (strchr(name, '\n') != NULL) { strvisx(encname, name, len, VIS_NL); name = encname; } if (fstat(fd, &stb) < 0) { syserr: run_err("%s: %s", name, strerror(errno)); goto next; } if (stb.st_size < 0) { run_err("%s: %s", name, "Negative file size"); goto next; } unset_nonblock(fd); switch (stb.st_mode & S_IFMT) { case S_IFREG: break; case S_IFDIR: if (iamrecursive) { rsource(name, &stb); goto next; } /* FALLTHROUGH */ default: run_err("%s: not a regular file", name); goto next; } if ((last = strrchr(name, '/')) == NULL) last = name; else ++last; curfile = last; if (pflag) { /* * Make it compatible with possible future * versions expecting microseconds. */ (void) snprintf(buf, sizeof buf, "T%lu 0 %lu 0\n", (u_long) (stb.st_mtime < 0 ? 0 : stb.st_mtime), (u_long) (stb.st_atime < 0 ? 0 : stb.st_atime)); if (verbose_mode) { fprintf(stderr, "File mtime %ld atime %ld\n", (long)stb.st_mtime, (long)stb.st_atime); fprintf(stderr, "Sending file timestamps: %s", buf); } (void) atomicio(vwrite, remout, buf, strlen(buf)); if (response() < 0) goto next; } #define FILEMODEMASK (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO) snprintf(buf, sizeof buf, "C%04o %lld %s\n", (u_int) (stb.st_mode & FILEMODEMASK), (long long)stb.st_size, last); if (verbose_mode) { fprintf(stderr, "Sending file modes: %s", buf); } (void) atomicio(vwrite, remout, buf, strlen(buf)); if (response() < 0) goto next; if ((bp = allocbuf(&buffer, fd, COPY_BUFLEN)) == NULL) { next: if (fd != -1) { (void) close(fd); fd = -1; } continue; } if (showprogress) start_progress_meter(curfile, stb.st_size, &statbytes); set_nonblock(remout); for (haderr = i = 0; i < stb.st_size; i += bp->cnt) { amt = bp->cnt; if (i + (off_t)amt > stb.st_size) amt = stb.st_size - i; if (!haderr) { if (atomicio(read, fd, bp->buf, amt) != amt) haderr = errno; } /* Keep writing after error to retain sync */ if (haderr) { (void)atomicio(vwrite, remout, bp->buf, amt); continue; } if (atomicio6(vwrite, remout, bp->buf, amt, scpio, &statbytes) != amt) haderr = errno; } unset_nonblock(remout); if (showprogress) stop_progress_meter(); if (fd != -1) { if (close(fd) < 0 && !haderr) haderr = errno; fd = -1; } if (!haderr) (void) atomicio(vwrite, remout, empty, 1); else run_err("%s: %s", name, strerror(haderr)); (void) response(); } }
void sink(int argc, char **argv) { static BUF buffer; struct stat stb; enum { YES, NO, DISPLAYED } wrerr; BUF *bp; off_t i; size_t j, count; int amt, exists, first, ofd; mode_t mode, omode, mask; off_t size, statbytes; int setimes, targisdir, wrerrno = 0; char ch, *cp, *np, *targ, *vect[1], buf[16384]; const char *why; struct timeval tv[2]; #define atime tv[0] #define mtime tv[1] #define SCREWUP(str) { why = str; goto screwup; } setimes = targisdir = 0; mask = umask(0); if (!pflag) (void) umask(mask); if (argc != 1) { run_err("ambiguous target"); exit(1); } targ = *argv; if (targetshouldbedirectory) verifydir(targ); (void) atomicio(vwrite, remout, empty, 1); if (stat(targ, &stb) == 0 && S_ISDIR(stb.st_mode)) targisdir = 1; for (first = 1;; first = 0) { cp = buf; if (atomicio(read, remin, cp, 1) != 1) return; if (*cp++ == '\n') SCREWUP("unexpected <newline>"); do { if (atomicio(read, remin, &ch, sizeof(ch)) != sizeof(ch)) SCREWUP("lost connection"); *cp++ = ch; } while (cp < &buf[sizeof(buf) - 1] && ch != '\n'); *cp = 0; if (verbose_mode) fprintf(stderr, "Sink: %s", buf); if (buf[0] == '\01' || buf[0] == '\02') { if (iamremote == 0) (void) atomicio(vwrite, STDERR_FILENO, buf + 1, strlen(buf + 1)); if (buf[0] == '\02') exit(1); ++errs; continue; } if (buf[0] == 'E') { (void) atomicio(vwrite, remout, empty, 1); return; } if (ch == '\n') *--cp = 0; cp = buf; if (*cp == 'T') { setimes++; cp++; mtime.tv_sec = strtol(cp, &cp, 10); if (!cp || *cp++ != ' ') SCREWUP("mtime.sec not delimited"); mtime.tv_usec = strtol(cp, &cp, 10); if (!cp || *cp++ != ' ') SCREWUP("mtime.usec not delimited"); atime.tv_sec = strtol(cp, &cp, 10); if (!cp || *cp++ != ' ') SCREWUP("atime.sec not delimited"); atime.tv_usec = strtol(cp, &cp, 10); if (!cp || *cp++ != '\0') SCREWUP("atime.usec not delimited"); (void) atomicio(vwrite, remout, empty, 1); continue; } if (*cp != 'C' && *cp != 'D') { /* * Check for the case "rcp remote:foo\* local:bar". * In this case, the line "No match." can be returned * by the shell before the rcp command on the remote is * executed so the ^Aerror_message convention isn't * followed. */ if (first) { run_err("%s", cp); exit(1); } SCREWUP("expected control record"); } mode = 0; for (++cp; cp < buf + 5; cp++) { if (*cp < '0' || *cp > '7') SCREWUP("bad mode"); mode = (mode << 3) | (*cp - '0'); } if (*cp++ != ' ') SCREWUP("mode not delimited"); for (size = 0; isdigit((unsigned char)*cp);) size = size * 10 + (*cp++ - '0'); if (*cp++ != ' ') SCREWUP("size not delimited"); if ((strchr(cp, '/') != NULL) || (strcmp(cp, "..") == 0)) { run_err("error: unexpected filename: %s", cp); exit(1); } if (targisdir) { static char *namebuf; static size_t cursize; size_t need; need = strlen(targ) + strlen(cp) + 250; if (need > cursize) { if (namebuf) xfree(namebuf); namebuf = xmalloc(need); cursize = need; } (void) snprintf(namebuf, need, "%s%s%s", targ, strcmp(targ, "/") ? "/" : "", cp); np = namebuf; } else np = targ; curfile = cp; exists = stat(np, &stb) == 0; if (buf[0] == 'D') { int mod_flag = pflag; if (!iamrecursive) SCREWUP("received directory without -r"); if (exists) { if (!S_ISDIR(stb.st_mode)) { errno = ENOTDIR; goto bad; } if (pflag) (void) chmod(np, mode); } else { /* Handle copying from a read-only directory */ mod_flag = 1; if (mkdir(np, mode | S_IRWXU) < 0) goto bad; } vect[0] = xstrdup(np); sink(1, vect); if (setimes) { setimes = 0; if (utimes(vect[0], tv) < 0) run_err("%s: set times: %s", vect[0], strerror(errno)); } if (mod_flag) (void) chmod(vect[0], mode); if (vect[0]) xfree(vect[0]); continue; } omode = mode; mode |= S_IWRITE; if ((ofd = open(np, O_WRONLY|O_CREAT, mode)) < 0) { bad: run_err("%s: %s", np, strerror(errno)); continue; } (void) atomicio(vwrite, remout, empty, 1); if ((bp = allocbuf(&buffer, ofd, COPY_BUFLEN)) == NULL) { (void) close(ofd); continue; } cp = bp->buf; wrerr = NO; statbytes = 0; if (showprogress) start_progress_meter(curfile, size, &statbytes); set_nonblock(remin); for (count = i = 0; i < size; i += bp->cnt) { amt = bp->cnt; if (i + amt > size) amt = size - i; count += amt; do { j = atomicio6(read, remin, cp, amt, scpio, &statbytes); if (j == 0) { run_err("%s", j != EPIPE ? strerror(errno) : "dropped connection"); exit(1); } amt -= j; cp += j; } while (amt > 0); if (count == bp->cnt) { /* Keep reading so we stay sync'd up. */ if (wrerr == NO) { if (atomicio(vwrite, ofd, bp->buf, count) != count) { wrerr = YES; wrerrno = errno; } } count = 0; cp = bp->buf; } } unset_nonblock(remin); if (showprogress) stop_progress_meter(); if (count != 0 && wrerr == NO && atomicio(vwrite, ofd, bp->buf, count) != count) { wrerr = YES; wrerrno = errno; } if (wrerr == NO && (!exists || S_ISREG(stb.st_mode)) && ftruncate(ofd, size) != 0) { run_err("%s: truncate: %s", np, strerror(errno)); wrerr = DISPLAYED; } if (pflag) { if (exists || omode != mode) if (fchmod(ofd, omode)) { run_err("%s: set mode: %s", np, strerror(errno)); wrerr = DISPLAYED; } } else { if (!exists && omode != mode) if (fchmod(ofd, omode & ~mask)) { run_err("%s: set mode: %s", np, strerror(errno)); wrerr = DISPLAYED; } } if (close(ofd) == -1) { wrerr = YES; wrerrno = errno; } (void) response(); if (setimes && wrerr == NO) { setimes = 0; if (utimes(np, tv) < 0) { run_err("%s: set times: %s", np, strerror(errno)); wrerr = DISPLAYED; } } switch (wrerr) { case YES: run_err("%s: %s", np, strerror(wrerrno)); break; case NO: (void) atomicio(vwrite, remout, empty, 1); break; case DISPLAYED: break; } } screwup: run_err("protocol error: %s", why); exit(1); }
void source(int argc, char **argv) { struct stat stb; static BUF buffer; BUF *bp; off_t i, amt, statbytes; size_t result; int fd = -1, haderr, indx; char *last, *name, buf[2048]; int len; for (indx = 0; indx < argc; ++indx) { name = argv[indx]; statbytes = 0; len = strlen(name); while (len > 1 && name[len-1] == '/') name[--len] = '\0'; if (strchr(name, '\n') != NULL) { run_err("%s: skipping, filename contains a newline", name); goto next; } if ((fd = open(name, O_RDONLY, 0)) < 0) goto syserr; if (fstat(fd, &stb) < 0) { syserr: run_err("%s: %s", name, strerror(errno)); goto next; } switch (stb.st_mode & S_IFMT) { case S_IFREG: break; case S_IFDIR: if (iamrecursive) { rsource(name, &stb); goto next; } /* FALLTHROUGH */ default: run_err("%s: not a regular file", name); goto next; } if ((last = strrchr(name, '/')) == NULL) last = name; else ++last; curfile = last; if (pflag) { /* * Make it compatible with possible future * versions expecting microseconds. */ (void) snprintf(buf, sizeof buf, "T%lu 0 %lu 0\n", (u_long) stb.st_mtime, (u_long) stb.st_atime); (void) atomicio(vwrite, remout, buf, strlen(buf)); if (response() < 0) goto next; } #define FILEMODEMASK (S_ISUID|S_ISGID|S_IRWXU|S_IRWXG|S_IRWXO) snprintf(buf, sizeof buf, "C%04o %lld %s\n", (u_int) (stb.st_mode & FILEMODEMASK), (long long)stb.st_size, last); if (verbose_mode) { fprintf(stderr, "Sending file modes: %s", buf); } (void) atomicio(vwrite, remout, buf, strlen(buf)); if (response() < 0) goto next; if ((bp = allocbuf(&buffer, fd, 2048)) == NULL) { next: if (fd != -1) { (void) close(fd); fd = -1; } continue; } #if PROGRESS_METER if (showprogress) start_progress_meter(curfile, stb.st_size, &statbytes); #endif /* Keep writing after an error so that we stay sync'd up. */ for (haderr = i = 0; i < stb.st_size; i += bp->cnt) { amt = bp->cnt; if (i + amt > stb.st_size) amt = stb.st_size - i; if (!haderr) { result = atomicio(read, fd, bp->buf, amt); if (result != amt) haderr = errno; } if (haderr) (void) atomicio(vwrite, remout, bp->buf, amt); else { result = atomicio(vwrite, remout, bp->buf, amt); if (result != amt) haderr = errno; statbytes += result; } } #ifdef PROGRESS_METER if (showprogress) stop_progress_meter(); #endif if (fd != -1) { if (close(fd) < 0 && !haderr) haderr = errno; fd = -1; } if (!haderr) (void) atomicio(vwrite, remout, "", 1); else run_err("%s: %s", name, strerror(haderr)); (void) response(); } }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ffs_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred) { struct vnode *ovp = vp; ufs_daddr_t lastblock; struct inode *oip; ufs_daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; ufs_daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct fs *fs; struct buf *bp; int offset, size, level; long count, nblocks, blocksreleased = 0; int i; int aflags, error, allerror; off_t osize; oip = VTOI(ovp); fs = oip->i_fs; if (length < 0) return (EINVAL); if (length > fs->fs_maxfilesize) return (EFBIG); if (ovp->v_type == VLNK && (oip->i_size < ovp->v_mount->mnt_maxsymlinklen || oip->i_din.di_blocks == 0)) { #ifdef DIAGNOSTIC if (length != 0) panic("ffs_truncate: partial truncate of symlink"); #endif /* DIAGNOSTIC */ bzero((char *)&oip->i_shortlink, (uint)oip->i_size); oip->i_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 1)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 0)); } if (fs->fs_ronly) panic("ffs_truncate: read-only filesystem"); #ifdef QUOTA error = ufs_getinoquota(oip); if (error) return (error); #endif ovp->v_lasta = ovp->v_clen = ovp->v_cstart = ovp->v_lastw = 0; if (DOINGSOFTDEP(ovp)) { if (length > 0 || softdep_slowdown(ovp)) { /* * If a file is only partially truncated, then * we have to clean up the data structures * describing the allocation past the truncation * point. Finding and deallocating those structures * is a lot of work. Since partial truncation occurs * rarely, we solve the problem by syncing the file * so that it will have no data structures left. */ if ((error = VOP_FSYNC(ovp, MNT_WAIT, 0)) != 0) return (error); } else { #ifdef QUOTA (void) ufs_chkdq(oip, -oip->i_blocks, NOCRED, 0); #endif softdep_setup_freeblocks(oip, length); vinvalbuf(ovp, 0, 0, 0); nvnode_pager_setsize(ovp, 0, fs->fs_bsize, 0); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 0)); } } osize = oip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. * * nvextendbuf() only breads the old buffer. The blocksize * of the new buffer must be specified so it knows how large * to make the VM object. */ if (osize < length) { nvextendbuf(vp, osize, length, blkoffsize(fs, oip, osize), /* oblksize */ blkoffresize(fs, length), /* nblksize */ blkoff(fs, osize), blkoff(fs, length), 0); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; /* BALLOC will reallocate the fragment at the old EOF */ error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); if (error) return (error); oip->i_size = length; if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (aflags & B_SYNC) bwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(ovp, 1)); } /* * Shorten the size of the file. * * NOTE: The block size specified in nvtruncbuf() is the blocksize * of the buffer containing length prior to any reallocation * of the block. */ allerror = nvtruncbuf(ovp, length, blkoffsize(fs, oip, length), blkoff(fs, length), 0); offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; error = VOP_BALLOC(ovp, length - 1, 1, cred, aflags, &bp); if (error) return (error); /* * When we are doing soft updates and the UFS_BALLOC * above fills in a direct block hole with a full sized * block that will be truncated down to a fragment below, * we must flush out the block dependency with an FSYNC * so that we do not get a soft updates inconsistency * when we create the fragment below. * * nvtruncbuf() may have re-dirtied the underlying block * as part of its truncation zeroing code. To avoid a * 'locking against myself' panic in the second fsync we * can simply undirty the bp since the redirtying was * related to areas of the buffer that we are going to * throw away anyway, and we will b*write() the remainder * anyway down below. */ if (DOINGSOFTDEP(ovp) && lbn < NDADDR && fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize) { bundirty(bp); error = VOP_FSYNC(ovp, MNT_WAIT, 0); if (error) { bdwrite(bp); return (error); } } oip->i_size = length; size = blksize(fs, oip, lbn); #if 0 /* remove - nvtruncbuf deals with this */ if (ovp->v_type != VDIR) bzero((char *)bp->b_data + offset, (uint)(size - offset)); #endif /* Kirk's code has reallocbuf(bp, size, 1) here */ allocbuf(bp, size); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (aflags & B_SYNC) bwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); for (level = TRIPLE; level >= SINGLE; level--) if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } for (i = NDADDR - 1; i > lastblock; i--) oip->i_db[i] = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; error = ffs_update(ovp, 1); if (error && allerror == 0) allerror = error; /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); oip->i_size = osize; if (error && allerror == 0) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ffs_indirtrunc(oip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ffs_blkfree(oip, bn, fs->fs_bsize); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ffs_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("ffs_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ffs_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != oip->i_ib[level]) panic("ffs_truncate1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("ffs_truncate2"); if (length == 0 && !RB_EMPTY(&ovp->v_rbdirty_tree)) panic("ffs_truncate3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = length; oip->i_blocks -= blocksreleased; if (oip->i_blocks < 0) /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; #ifdef QUOTA (void) ufs_chkdq(oip, -blocksreleased, NOCRED, 0); #endif return (allerror); }
/* * Truncate the inode ip to at most length size, freeing the * disk blocks. */ int ffs_truncate(vnode *vp, off_t length, int flags, Ucred *cred) { print("HARVEY TODO: %s\n", __func__); #if 0 struct inode *ip; ufs2_daddr_t bn, lbn, lastblock, lastiblock[UFS_NIADDR]; ufs2_daddr_t indir_lbn[UFS_NIADDR], oldblks[UFS_NDADDR + UFS_NIADDR]; ufs2_daddr_t newblks[UFS_NDADDR + UFS_NIADDR]; ufs2_daddr_t count, blocksreleased = 0, datablocks, blkno; struct bufobj *bo; struct fs *fs; struct buf *bp; struct ufsmount *ump; int softdeptrunc, journaltrunc; int needextclean, extblocks; int offset, size, level, nblocks; int i, error, allerror, indiroff, waitforupdate; off_t osize; ip = VTOI(vp); ump = VFSTOUFS(vp->v_mount); fs = ump->um_fs; bo = &vp->v_bufobj; ASSERT_VOP_LOCKED(vp, "ffs_truncate"); if (length < 0) return (EINVAL); if (length > fs->fs_maxfilesize) return (EFBIG); #ifdef QUOTA error = getinoquota(ip); if (error) return (error); #endif /* * Historically clients did not have to specify which data * they were truncating. So, if not specified, we assume * traditional behavior, e.g., just the normal data. */ if ((flags & (IO_EXT | IO_NORMAL)) == 0) flags |= IO_NORMAL; if (!DOINGSOFTDEP(vp) && !DOINGASYNC(vp)) flags |= IO_SYNC; waitforupdate = (flags & IO_SYNC) != 0 || !DOINGASYNC(vp); /* * If we are truncating the extended-attributes, and cannot * do it with soft updates, then do it slowly here. If we are * truncating both the extended attributes and the file contents * (e.g., the file is being unlinked), then pick it off with * soft updates below. */ allerror = 0; needextclean = 0; softdeptrunc = 0; journaltrunc = DOINGSUJ(vp); if (journaltrunc == 0 && DOINGSOFTDEP(vp) && length == 0) softdeptrunc = !softdep_slowdown(vp); extblocks = 0; datablocks = DIP(ip, i_blocks); if (fs->fs_magic == FS_UFS2_MAGIC && ip->i_din2->di_extsize > 0) { extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); datablocks -= extblocks; } if ((flags & IO_EXT) && extblocks > 0) { if (length != 0) panic("ffs_truncate: partial trunc of extdata"); if (softdeptrunc || journaltrunc) { if ((flags & IO_NORMAL) == 0) goto extclean; needextclean = 1; } else { if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); #ifdef QUOTA (void) chkdq(ip, -extblocks, NOCRED, 0); #endif vinvalbuf(vp, V_ALT, 0, 0); vn_pages_remove(vp, OFF_TO_IDX(lblktosize(fs, -extblocks)), 0); osize = ip->i_din2->di_extsize; ip->i_din2->di_blocks -= extblocks; ip->i_din2->di_extsize = 0; for (i = 0; i < UFS_NXADDR; i++) { oldblks[i] = ip->i_din2->di_extb[i]; ip->i_din2->di_extb[i] = 0; } ip->i_flag |= IN_CHANGE; if ((error = ffs_update(vp, waitforupdate))) return (error); for (i = 0; i < UFS_NXADDR; i++) { if (oldblks[i] == 0) continue; ffs_blkfree(ump, fs, ITODEVVP(ip), oldblks[i], sblksize(fs, osize, i), ip->i_number, vp->v_type, nil); } } } if ((flags & IO_NORMAL) == 0) return (0); if (vp->v_type == VLNK && (ip->i_size < vp->v_mount->mnt_maxsymlinklen || datablocks == 0)) { #ifdef INVARIANTS if (length != 0) panic("ffs_truncate: partial truncate of symlink"); #endif bzero(SHORTLINK(ip), (uint)ip->i_size); ip->i_size = 0; DIP_SET(ip, i_size, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (needextclean) goto extclean; return (ffs_update(vp, waitforupdate)); } if (ip->i_size == length) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (needextclean) goto extclean; return (ffs_update(vp, 0)); } if (fs->fs_ronly) panic("ffs_truncate: read-only filesystem"); if (IS_SNAPSHOT(ip)) ffs_snapremove(vp); vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; osize = ip->i_size; /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { vnode_pager_setsize(vp, length); flags |= BA_CLRBUF; error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); if (error) { vnode_pager_setsize(vp, osize); return (error); } ip->i_size = length; DIP_SET(ip, i_size, length); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(vp)) bdwrite(bp); else bawrite(bp); ip->i_flag |= IN_CHANGE | IN_UPDATE; return (ffs_update(vp, waitforupdate)); } /* * Lookup block number for a given offset. Zero length files * have no blocks, so return a blkno of -1. */ lbn = lblkno(fs, length - 1); if (length == 0) { blkno = -1; } else if (lbn < UFS_NDADDR) { blkno = DIP(ip, i_db[lbn]); } else { error = UFS_BALLOC(vp, lblktosize(fs, (off_t)lbn), fs->fs_bsize, cred, BA_METAONLY, &bp); if (error) return (error); indiroff = (lbn - UFS_NDADDR) % NINDIR(fs); if (I_IS_UFS1(ip)) blkno = ((ufs1_daddr_t *)(bp->b_data))[indiroff]; else blkno = ((ufs2_daddr_t *)(bp->b_data))[indiroff]; /* * If the block number is non-zero, then the indirect block * must have been previously allocated and need not be written. * If the block number is zero, then we may have allocated * the indirect block and hence need to write it out. */ if (blkno != 0) brelse(bp); else if (flags & IO_SYNC) bwrite(bp); else bdwrite(bp); } /* * If the block number at the new end of the file is zero, * then we must allocate it to ensure that the last block of * the file is allocated. Soft updates does not handle this * case, so here we have to clean up the soft updates data * structures describing the allocation past the truncation * point. Finding and deallocating those structures is a lot of * work. Since partial truncation with a hole at the end occurs * rarely, we solve the problem by syncing the file so that it * will have no soft updates data structures left. */ if (blkno == 0 && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); if (blkno != 0 && DOINGSOFTDEP(vp)) { if (softdeptrunc == 0 && journaltrunc == 0) { /* * If soft updates cannot handle this truncation, * clean up soft dependency data structures and * fall through to the synchronous truncation. */ if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); } else { flags = IO_NORMAL | (needextclean ? IO_EXT: 0); if (journaltrunc) softdep_journal_freeblocks(ip, cred, length, flags); else softdep_setup_freeblocks(ip, length, flags); ASSERT_VOP_LOCKED(vp, "ffs_truncate1"); if (journaltrunc == 0) { ip->i_flag |= IN_CHANGE | IN_UPDATE; error = ffs_update(vp, 0); } return (error); } } /* * Shorten the size of the file. If the last block of the * shortened file is unallocated, we must allocate it. * Additionally, if the file is not being truncated to a * block boundary, the contents of the partial block * following the end of the file must be zero'ed in * case it ever becomes accessible again because of * subsequent file growth. Directories however are not * zero'ed as they should grow back initialized to empty. */ offset = blkoff(fs, length); if (blkno != 0 && offset == 0) { ip->i_size = length; DIP_SET(ip, i_size, length); } else { lbn = lblkno(fs, length); flags |= BA_CLRBUF; error = UFS_BALLOC(vp, length - 1, 1, cred, flags, &bp); if (error) return (error); /* * When we are doing soft updates and the UFS_BALLOC * above fills in a direct block hole with a full sized * block that will be truncated down to a fragment below, * we must flush out the block dependency with an FSYNC * so that we do not get a soft updates inconsistency * when we create the fragment below. */ if (DOINGSOFTDEP(vp) && lbn < UFS_NDADDR && fragroundup(fs, blkoff(fs, length)) < fs->fs_bsize && (error = ffs_syncvnode(vp, MNT_WAIT, 0)) != 0) return (error); ip->i_size = length; DIP_SET(ip, i_size, length); size = blksize(fs, ip, lbn); if (vp->v_type != VDIR && offset != 0) bzero((char *)bp->b_data + offset, (uint)(size - offset)); /* Kirk's code has reallocbuf(bp, size, 1) here */ allocbuf(bp, size); if (bp->b_bufsize == fs->fs_bsize) bp->b_flags |= B_CLUSTEROK; if (flags & IO_SYNC) bwrite(bp); else if (DOINGASYNC(vp)) bdwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->fs_bsize - 1) - 1; lastiblock[SINGLE] = lastblock - UFS_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->fs_bsize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ffs_indirtrunc below. */ for (level = TRIPLE; level >= SINGLE; level--) { oldblks[UFS_NDADDR + level] = DIP(ip, i_ib[level]); if (lastiblock[level] < 0) { DIP_SET(ip, i_ib[level], 0); lastiblock[level] = -1; } } for (i = 0; i < UFS_NDADDR; i++) { oldblks[i] = DIP(ip, i_db[i]); if (i > lastblock) DIP_SET(ip, i_db[i], 0); } ip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = ffs_update(vp, waitforupdate); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ for (i = 0; i < UFS_NDADDR; i++) { newblks[i] = DIP(ip, i_db[i]); DIP_SET(ip, i_db[i], oldblks[i]); } for (i = 0; i < UFS_NIADDR; i++) { newblks[UFS_NDADDR + i] = DIP(ip, i_ib[i]); DIP_SET(ip, i_ib[i], oldblks[UFS_NDADDR + i]); } ip->i_size = osize; DIP_SET(ip, i_size, osize); error = vtruncbuf(vp, cred, length, fs->fs_bsize); if (error && (allerror == 0)) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -UFS_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = DIP(ip, i_ib[level]); if (bn != 0) { error = ffs_indirtrunc(ip, indir_lbn[level], fsbtodb(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { DIP_SET(ip, i_ib[level], 0); ffs_blkfree(ump, fs, ump->um_devvp, bn, fs->fs_bsize, ip->i_number, vp->v_type, nil); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = UFS_NDADDR - 1; i > lastblock; i--) { long bsize; bn = DIP(ip, i_db[i]); if (bn == 0) continue; DIP_SET(ip, i_db[i], 0); bsize = blksize(fs, ip, i); ffs_blkfree(ump, fs, ump->um_devvp, bn, bsize, ip->i_number, vp->v_type, nil); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = DIP(ip, i_db[lastblock]); if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, ip, lastblock); ip->i_size = length; DIP_SET(ip, i_size, length); newspace = blksize(fs, ip, lastblock); if (newspace == 0) panic("ffs_truncate: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ffs_blkfree(ump, fs, ump->um_devvp, bn, oldspace - newspace, ip->i_number, vp->v_type, nil); blocksreleased += btodb(oldspace - newspace); } } done: #ifdef INVARIANTS for (level = SINGLE; level <= TRIPLE; level++) if (newblks[UFS_NDADDR + level] != DIP(ip, i_ib[level])) panic("ffs_truncate1"); for (i = 0; i < UFS_NDADDR; i++) if (newblks[i] != DIP(ip, i_db[i])) panic("ffs_truncate2"); BO_LOCK(bo); if (length == 0 && (fs->fs_magic != FS_UFS2_MAGIC || ip->i_din2->di_extsize == 0) && (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0)) panic("ffs_truncate3"); BO_UNLOCK(bo); #endif /* INVARIANTS */ /* * Put back the real size. */ ip->i_size = length; DIP_SET(ip, i_size, length); if (DIP(ip, i_blocks) >= blocksreleased) DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - blocksreleased); else /* sanity */ DIP_SET(ip, i_blocks, 0); ip->i_flag |= IN_CHANGE; #ifdef QUOTA (void) chkdq(ip, -blocksreleased, NOCRED, 0); #endif return (allerror); extclean: if (journaltrunc) softdep_journal_freeblocks(ip, cred, length, IO_EXT); else softdep_setup_freeblocks(ip, length, IO_EXT); return (ffs_update(vp, waitforupdate)); #endif // 0 return 0; }
/* * Get a block of requested size that is associated with * a given vnode and block offset. If it is found in the * block cache, mark it as having been found, make it busy * and return it. Otherwise, return an empty block of the * correct size. It is up to the caller to insure that the * cached blocks be of the correct size. */ struct buf * getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) { struct bufhashhdr *bh; struct buf *bp, *nbp = NULL; int s, err; /* * XXX * The following is an inlined version of 'incore()', but with * the 'invalid' test moved to after the 'busy' test. It's * necessary because there are some cases in which the NFS * code sets B_INVAL prior to writing data to the server, but * in which the buffers actually contain valid data. In this * case, we can't allow the system to allocate a new buffer for * the block until the write is finished. */ bh = BUFHASH(vp, blkno); start: LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { if (bp->b_lblkno != blkno || bp->b_vp != vp) continue; s = splbio(); if (ISSET(bp->b_flags, B_BUSY)) { SET(bp->b_flags, B_WANTED); err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk", slptimeo); splx(s); if (err) return (NULL); goto start; } if (!ISSET(bp->b_flags, B_INVAL)) { SET(bp->b_flags, (B_BUSY | B_CACHE)); bremfree(bp); splx(s); break; } splx(s); } if (bp == NULL) { if (nbp == NULL && getnewbuf(slpflag, slptimeo, &nbp) != 0) { goto start; } bp = nbp; binshash(bp, bh); bp->b_blkno = bp->b_lblkno = blkno; s = splbio(); bgetvp(vp, bp); splx(s); } else if (nbp != NULL) { /* * Set B_AGE so that buffer appear at BQ_CLEAN head * and gets reused ASAP. */ SET(nbp->b_flags, B_AGE); brelse(nbp); } allocbuf(bp, size); return (bp); }
static int fuse_write_biobackend(struct vnode *vp, struct uio *uio, struct ucred *cred, struct fuse_filehandle *fufh, int ioflag) { struct fuse_vnode_data *fvdat = VTOFUD(vp); struct buf *bp; daddr_t lbn; int bcount; int n, on, err = 0; const int biosize = fuse_iosize(vp); KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); FS_DEBUG("resid=%zx offset=%jx fsize=%jx\n", uio->uio_resid, uio->uio_offset, fvdat->filesize); if (vp->v_type != VREG) return (EIO); if (uio->uio_offset < 0) return (EINVAL); if (uio->uio_resid == 0) return (0); if (ioflag & IO_APPEND) uio_setoffset(uio, fvdat->filesize); /* * Find all of this file's B_NEEDCOMMIT buffers. If our writes * would exceed the local maximum per-file write commit size when * combined with those, we must decide whether to flush, * go synchronous, or return err. We don't bother checking * IO_UNIT -- we just make all writes atomic anyway, as there's * no point optimizing for something that really won't ever happen. */ do { if (fuse_isdeadfs(vp)) { err = ENXIO; break; } lbn = uio->uio_offset / biosize; on = uio->uio_offset & (biosize - 1); n = MIN((unsigned)(biosize - on), uio->uio_resid); FS_DEBUG2G("lbn %ju, on %d, n %d, uio offset %ju, uio resid %zd\n", (uintmax_t)lbn, on, n, (uintmax_t)uio->uio_offset, uio->uio_resid); again: /* * Handle direct append and file extension cases, calculate * unaligned buffer size. */ if (uio->uio_offset == fvdat->filesize && n) { /* * Get the buffer (in its pre-append state to maintain * B_CACHE if it was previously set). Resize the * nfsnode after we have locked the buffer to prevent * readers from reading garbage. */ bcount = on; FS_DEBUG("getting block from OS, bcount %d\n", bcount); bp = getblk(vp, lbn, bcount, PCATCH, 0, 0); if (bp != NULL) { long save; err = fuse_vnode_setsize(vp, cred, uio->uio_offset + n); if (err) { brelse(bp); break; } save = bp->b_flags & B_CACHE; bcount += n; allocbuf(bp, bcount); bp->b_flags |= save; } } else { /* * Obtain the locked cache block first, and then * adjust the file's size as appropriate. */ bcount = on + n; if ((off_t)lbn * biosize + bcount < fvdat->filesize) { if ((off_t)(lbn + 1) * biosize < fvdat->filesize) bcount = biosize; else bcount = fvdat->filesize - (off_t)lbn *biosize; } FS_DEBUG("getting block from OS, bcount %d\n", bcount); bp = getblk(vp, lbn, bcount, PCATCH, 0, 0); if (bp && uio->uio_offset + n > fvdat->filesize) { err = fuse_vnode_setsize(vp, cred, uio->uio_offset + n); if (err) { brelse(bp); break; } } } if (!bp) { err = EINTR; break; } /* * Issue a READ if B_CACHE is not set. In special-append * mode, B_CACHE is based on the buffer prior to the write * op and is typically set, avoiding the read. If a read * is required in special append mode, the server will * probably send us a short-read since we extended the file * on our end, resulting in b_resid == 0 and, thusly, * B_CACHE getting set. * * We can also avoid issuing the read if the write covers * the entire buffer. We have to make sure the buffer state * is reasonable in this case since we will not be initiating * I/O. See the comments in kern/vfs_bio.c's getblk() for * more information. * * B_CACHE may also be set due to the buffer being cached * normally. */ if (on == 0 && n == bcount) { bp->b_flags |= B_CACHE; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; } if ((bp->b_flags & B_CACHE) == 0) { bp->b_iocmd = BIO_READ; vfs_busy_pages(bp, 0); fuse_io_strategy(vp, bp); if ((err = bp->b_error)) { brelse(bp); break; } } if (bp->b_wcred == NOCRED) bp->b_wcred = crhold(cred); /* * If dirtyend exceeds file size, chop it down. This should * not normally occur but there is an append race where it * might occur XXX, so we log it. * * If the chopping creates a reverse-indexed or degenerate * situation with dirtyoff/end, we 0 both of them. */ if (bp->b_dirtyend > bcount) { FS_DEBUG("FUSE append race @%lx:%d\n", (long)bp->b_blkno * biosize, bp->b_dirtyend - bcount); bp->b_dirtyend = bcount; } if (bp->b_dirtyoff >= bp->b_dirtyend) bp->b_dirtyoff = bp->b_dirtyend = 0; /* * If the new write will leave a contiguous dirty * area, just update the b_dirtyoff and b_dirtyend, * otherwise force a write rpc of the old dirty area. * * While it is possible to merge discontiguous writes due to * our having a B_CACHE buffer ( and thus valid read data * for the hole), we don't because it could lead to * significant cache coherency problems with multiple clients, * especially if locking is implemented later on. * * as an optimization we could theoretically maintain * a linked list of discontinuous areas, but we would still * have to commit them separately so there isn't much * advantage to it except perhaps a bit of asynchronization. */ if (bp->b_dirtyend > 0 && (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) { /* * Yes, we mean it. Write out everything to "storage" * immediatly, without hesitation. (Apart from other * reasons: the only way to know if a write is valid * if its actually written out.) */ bwrite(bp); if (bp->b_error == EINTR) { err = EINTR; break; } goto again; } err = uiomove((char *)bp->b_data + on, n, uio); /* * Since this block is being modified, it must be written * again and not just committed. Since write clustering does * not work for the stage 1 data write, only the stage 2 * commit rpc, we have to clear B_CLUSTEROK as well. */ bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); if (err) { bp->b_ioflags |= BIO_ERROR; bp->b_error = err; brelse(bp); break; } /* * Only update dirtyoff/dirtyend if not a degenerate * condition. */ if (n) { if (bp->b_dirtyend > 0) { bp->b_dirtyoff = MIN(on, bp->b_dirtyoff); bp->b_dirtyend = MAX((on + n), bp->b_dirtyend); } else { bp->b_dirtyoff = on; bp->b_dirtyend = on + n; } vfs_bio_set_valid(bp, on, n); } err = bwrite(bp); if (err) break; } while (uio->uio_resid > 0 && n > 0); if (fuse_sync_resize && (fvdat->flag & FN_SIZECHANGE) != 0) fuse_vnode_savesize(vp, cred); return (err); }
/* * Truncate the inode oip to at most length size, freeing the * disk blocks. */ int ext2_truncate(struct vnode *vp, off_t length, int flags, struct ucred *cred) { struct vnode *ovp = vp; daddr_t lastblock; struct inode *oip; daddr_t bn, lbn, lastiblock[NIADDR], indir_lbn[NIADDR]; daddr_t oldblks[NDADDR + NIADDR], newblks[NDADDR + NIADDR]; struct ext2_sb_info *fs; struct buf *bp; int offset, size, level; long count, nblocks, blocksreleased = 0; int i; int aflags, error, allerror; off_t osize; /* kprintf("ext2_truncate called %d to %d\n", VTOI(ovp)->i_number, length); */ /* * negative file sizes will totally break the code below and * are not meaningful anyways. */ if (length < 0) return EFBIG; oip = VTOI(ovp); if (ovp->v_type == VLNK && oip->i_size < ovp->v_mount->mnt_maxsymlinklen) { #if DIAGNOSTIC if (length != 0) panic("ext2_truncate: partial truncate of symlink"); #endif bzero((char *)&oip->i_shortlink, (u_int)oip->i_size); oip->i_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (EXT2_UPDATE(ovp, 1)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (EXT2_UPDATE(ovp, 0)); } #if QUOTA if ((error = ext2_getinoquota(oip)) != 0) return (error); #endif fs = oip->i_e2fs; osize = oip->i_size; ext2_discard_prealloc(oip); /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { offset = blkoff(fs, length - 1); lbn = lblkno(fs, length - 1); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; vnode_pager_setsize(ovp, length); error = ext2_balloc(oip, lbn, offset + 1, cred, &bp, aflags); if (error) { vnode_pager_setsize(ovp, osize); return (error); } oip->i_size = length; if (aflags & IO_SYNC) bwrite(bp); else bawrite(bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; return (EXT2_UPDATE(ovp, 1)); } /* * Shorten the size of the file. If the file is not being * truncated to a block boundry, the contents of the * partial block following the end of the file must be * zero'ed in case it ever become accessable again because * of subsequent file growth. */ /* I don't understand the comment above */ offset = blkoff(fs, length); if (offset == 0) { oip->i_size = length; } else { lbn = lblkno(fs, length); aflags = B_CLRBUF; if (flags & IO_SYNC) aflags |= B_SYNC; error = ext2_balloc(oip, lbn, offset, cred, &bp, aflags); if (error) return (error); oip->i_size = length; size = blksize(fs, oip, lbn); bzero((char *)bp->b_data + offset, (u_int)(size - offset)); allocbuf(bp, size); if (aflags & IO_SYNC) bwrite(bp); else bawrite(bp); } /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ lastblock = lblkno(fs, length + fs->s_blocksize - 1) - 1; lastiblock[SINGLE] = lastblock - NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - NINDIR(fs) * NINDIR(fs); nblocks = btodb(fs->s_blocksize); /* * Update file and block pointers on disk before we start freeing * blocks. If we crash before free'ing blocks below, the blocks * will be returned to the free list. lastiblock values are also * normalized to -1 for calls to ext2_indirtrunc below. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)oldblks, sizeof oldblks); for (level = TRIPLE; level >= SINGLE; level--) if (lastiblock[level] < 0) { oip->i_ib[level] = 0; lastiblock[level] = -1; } for (i = NDADDR - 1; i > lastblock; i--) oip->i_db[i] = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; allerror = EXT2_UPDATE(ovp, 1); /* * Having written the new inode to disk, save its new configuration * and put back the old block pointers long enough to process them. * Note that we save the new block configuration so we can check it * when we are done. */ bcopy((caddr_t)&oip->i_db[0], (caddr_t)newblks, sizeof newblks); bcopy((caddr_t)oldblks, (caddr_t)&oip->i_db[0], sizeof oldblks); oip->i_size = osize; error = vtruncbuf(ovp, length, (int)fs->s_blocksize); if (error && (allerror == 0)) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - NINDIR(fs) * NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ib[level]; if (bn != 0) { error = ext2_indirtrunc(oip, indir_lbn[level], fsbtodoff(fs, bn), lastiblock[level], level, &count); if (error) allerror = error; blocksreleased += count; if (lastiblock[level] < 0) { oip->i_ib[level] = 0; ext2_blkfree(oip, bn, fs->s_frag_size); blocksreleased += nblocks; } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = NDADDR - 1; i > lastblock; i--) { long bsize; bn = oip->i_db[i]; if (bn == 0) continue; oip->i_db[i] = 0; bsize = blksize(fs, oip, i); ext2_blkfree(oip, bn, bsize); blocksreleased += btodb(bsize); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_db[lastblock]; if (bn != 0) { long oldspace, newspace; /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = blksize(fs, oip, lastblock); oip->i_size = length; newspace = blksize(fs, oip, lastblock); if (newspace == 0) panic("itrunc: newspace"); if (oldspace - newspace > 0) { /* * Block number of space to be free'd is * the old block # plus the number of frags * required for the storage we're keeping. */ bn += numfrags(fs, newspace); ext2_blkfree(oip, bn, oldspace - newspace); blocksreleased += btodb(oldspace - newspace); } } done: #if DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if (newblks[NDADDR + level] != oip->i_ib[level]) panic("itrunc1"); for (i = 0; i < NDADDR; i++) if (newblks[i] != oip->i_db[i]) panic("itrunc2"); if (length == 0 && (!RB_EMPTY(&ovp->v_rbdirty_tree) || !RB_EMPTY(&ovp->v_rbclean_tree))) panic("itrunc3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = length; oip->i_blocks -= blocksreleased; if (oip->i_blocks < 0) /* sanity */ oip->i_blocks = 0; oip->i_flag |= IN_CHANGE; vnode_pager_setsize(ovp, length); #if QUOTA ext2_chkdq(oip, -blocksreleased, NOCRED, 0); #endif return (allerror); }
/* * Find a buffer which is available for use. * If free memory for buffer space and an empty header from the empty list, * use that. Otherwise, select something from a free list. * Preference is to AGE list, then LRU list. */ static struct buf * getnewbuf(int sz) { struct buf *bp; int x; x = splbio(); start: /* can we constitute a new buffer? */ if (freebufspace > sz && bfreelist[BQ_EMPTY].av_forw != (struct buf *)bfreelist+BQ_EMPTY) { caddr_t addr; /*#define notyet*/ #ifndef notyet if ((addr = malloc (sz, M_TEMP, M_WAITOK)) == 0) goto tryfree; #else /* notyet */ /* get new memory buffer */ if (round_page(sz) == sz) addr = (caddr_t) kmem_alloc_wired_wait(buffer_map, sz); else addr = (caddr_t) malloc (sz, M_TEMP, M_WAITOK); /*if ((addr = malloc (sz, M_TEMP, M_NOWAIT)) == 0) goto tryfree;*/ bzero(addr, sz); #endif /* notyet */ freebufspace -= sz; allocbufspace += sz; bp = bfreelist[BQ_EMPTY].av_forw; bp->b_flags = B_BUSY | B_INVAL; bremfree(bp); bp->b_un.b_addr = addr; bp->b_bufsize = sz; /* 20 Aug 92*/ goto fillin; } tryfree: if (bfreelist[BQ_AGE].av_forw != (struct buf *)bfreelist+BQ_AGE) { bp = bfreelist[BQ_AGE].av_forw; bremfree(bp); } else if (bfreelist[BQ_LRU].av_forw != (struct buf *)bfreelist+BQ_LRU) { bp = bfreelist[BQ_LRU].av_forw; bremfree(bp); } else { /* wait for a free buffer of any kind */ (bfreelist + BQ_AGE)->b_flags |= B_WANTED; sleep(bfreelist, PRIBIO); splx(x); return (0); } /* if we are a delayed write, convert to an async write! */ if (bp->b_flags & B_DELWRI) { bp->b_flags |= B_BUSY; bawrite (bp); goto start; } if(bp->b_vp) brelvp(bp); /* we are not free, nor do we contain interesting data */ if (bp->b_rcred != NOCRED) crfree(bp->b_rcred); /* 25 Apr 92*/ if (bp->b_wcred != NOCRED) crfree(bp->b_wcred); bp->b_flags = B_BUSY; fillin: bremhash(bp); splx(x); bp->b_dev = NODEV; bp->b_vp = NULL; bp->b_blkno = bp->b_lblkno = 0; bp->b_iodone = 0; bp->b_error = 0; bp->b_wcred = bp->b_rcred = NOCRED; if (bp->b_bufsize != sz) allocbuf(bp, sz); bp->b_bcount = bp->b_bufsize = sz; bp->b_dirtyoff = bp->b_dirtyend = 0; return (bp); }
/* VOP_BWRITE 1 time */ int lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp, kauth_cred_t cred) { struct inode *ip; struct lfs *fs; long frags; int error; extern long locked_queue_bytes; size_t obufsize; ip = VTOI(vp); fs = ip->i_lfs; frags = (long)lfs_numfrags(fs, nsize - osize); error = 0; ASSERT_NO_SEGLOCK(fs); /* * Get the seglock so we don't enlarge blocks while a segment * is being written. If we're called with bpp==NULL, though, * we are only pretending to change a buffer, so we don't have to * lock. */ top: if (bpp) { rw_enter(&fs->lfs_fraglock, RW_READER); LFS_DEBUG_COUNTLOCKED("frag"); } if (!ISSPACE(fs, frags, cred)) { error = ENOSPC; goto out; } /* * If we are not asked to actually return the block, all we need * to do is allocate space for it. UBC will handle dirtying the * appropriate things and making sure it all goes to disk. * Don't bother to read in that case. */ if (bpp && (error = bread(vp, lbn, osize, 0, bpp))) { goto out; } #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) if ((error = lfs_chkdq(ip, frags, cred, 0))) { if (bpp) brelse(*bpp, 0); goto out; } #endif /* * Adjust accounting for lfs_avail. If there's not enough room, * we will have to wait for the cleaner, which we can't do while * holding a block busy or while holding the seglock. In that case, * release both and start over after waiting. */ if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) { if (!lfs_fits(fs, frags)) { if (bpp) brelse(*bpp, 0); #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) lfs_chkdq(ip, -frags, cred, 0); #endif rw_exit(&fs->lfs_fraglock); lfs_availwait(fs, frags); goto top; } lfs_sb_subavail(fs, frags); } mutex_enter(&lfs_lock); lfs_sb_subbfree(fs, frags); mutex_exit(&lfs_lock); ip->i_lfs_effnblks += frags; ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp) { obufsize = (*bpp)->b_bufsize; allocbuf(*bpp, nsize, 1); /* Adjust locked-list accounting */ if (((*bpp)->b_flags & B_LOCKED) != 0 && (*bpp)->b_iodone == NULL) { mutex_enter(&lfs_lock); locked_queue_bytes += (*bpp)->b_bufsize - obufsize; mutex_exit(&lfs_lock); } memset((char *)((*bpp)->b_data) + osize, 0, (u_int)(nsize - osize)); } out: if (bpp) { rw_exit(&fs->lfs_fraglock); } return (error); }
void sink(int argc, char *argv[]) { static BUF buffer; static struct stat stb; struct timeval tv[2]; enum { YES, NO, DISPLAYED } wrerr; BUF *bp; off_t i, j; int exists, first, mask, mode, ofd, omode; int setimes, targisdir, wrerrno = 0; size_t amt, count, size; char ch, *cp, *np, *targ, *why, *vect[1]; static char buf[BUFSIZ]; #define atime tv[0] #define mtime tv[1] #define SCREWUP(str) { why = str; goto screwup; } setimes = targisdir = 0; #ifndef __GNO__ mask = umask(0); if (!pflag) (void)umask(mask); #endif if (argc != 1) { run_err("ambiguous target"); exit(1); } targ = *argv; if (targetshouldbedirectory) verifydir(targ); (void)write(rem, "", 1); if (stat(targ, &stb) == 0 && S_ISDIR(stb.st_mode)) targisdir = 1; for (first = 1;; first = 0) { cp = buf; if (read(rem, cp, 1) <= 0) return; if (*cp++ == '\n') SCREWUP("unexpected <newline>"); do { if (read(rem, &ch, sizeof(ch)) != sizeof(ch)) SCREWUP("lost connection"); *cp++ = ch; } while (cp < &buf[BUFSIZ - 1] && ch != '\n'); *cp = 0; if (buf[0] == '\01' || buf[0] == '\02') { if (iamremote == 0) (void)write(STDERR_FILENO, buf + 1, strlen(buf + 1)); if (buf[0] == '\02') exit(1); ++errs; continue; } if (buf[0] == 'E') { (void)write(rem, "", 1); return; } if (ch == '\n') *--cp = 0; #define getnum(t) (t) = 0; while (isdigit(*cp)) (t) = (t) * 10 + (*cp++ - '0'); cp = buf; if (*cp == 'T') { setimes++; cp++; getnum(mtime.tv_sec); if (*cp++ != ' ') SCREWUP("mtime.sec not delimited"); getnum(mtime.tv_usec); if (*cp++ != ' ') SCREWUP("mtime.usec not delimited"); getnum(atime.tv_sec); if (*cp++ != ' ') SCREWUP("atime.sec not delimited"); getnum(atime.tv_usec); if (*cp++ != '\0') SCREWUP("atime.usec not delimited"); (void)write(rem, "", 1); continue; } if (*cp != 'C' && *cp != 'D') { /* * Check for the case "rcp remote:foo\* local:bar". * In this case, the line "No match." can be returned * by the shell before the rcp command on the remote is * executed so the ^Aerror_message convention isn't * followed. */ if (first) { run_err("%s", cp); exit(1); } SCREWUP("expected control record"); } mode = 0; for (++cp; cp < buf + 5; cp++) { if (*cp < '0' || *cp > '7') SCREWUP("bad mode"); mode = (mode << 3) | (*cp - '0'); } if (*cp++ != ' ') SCREWUP("mode not delimited"); for (size = 0; isdigit(*cp);) size = size * 10 + (*cp++ - '0'); if (*cp++ != ' ') SCREWUP("size not delimited"); if (targisdir) { static char *namebuf; static int cursize; size_t need; need = strlen(targ) + strlen(cp) + 250; if (need > cursize) { if (!(namebuf = malloc(need))) run_err("%s", strerror(errno)); } (void)snprintf(namebuf, need, "%s%s%s", targ, *targ ? "/" : "", cp); np = namebuf; } else np = targ; exists = stat(np, &stb) == 0; if (buf[0] == 'D') { int mod_flag = pflag; if (exists) { if (!S_ISDIR(stb.st_mode)) { errno = ENOTDIR; goto bad; } if (pflag) (void)chmod(np, mode); } else { /* Handle copying from a read-only directory */ mod_flag = 1; #ifdef __GNO__ if (mkdir(np) < 0) #else if (mkdir(np, mode | S_IRWXU) < 0) #endif goto bad; } vect[0] = np; sink(1, vect); if (setimes) { setimes = 0; #ifndef __GNO__ if (utimes(np, tv) < 0) run_err("%s: set times: %s", np, strerror(errno)); #endif } if (mod_flag) (void)chmod(np, mode); continue; } omode = mode; mode |= S_IWRITE; if ((ofd = open(np, O_WRONLY|O_CREAT, mode)) < 0) { bad: run_err("%s: %s", np, strerror(errno)); continue; } (void)write(rem, "", 1); if ((bp = allocbuf(&buffer, ofd, BUFSIZ)) == NULL) { (void)close(ofd); continue; } cp = bp->buf; wrerr = NO; for (count = i = 0; i < size; i += BUFSIZ) { amt = BUFSIZ; if (i + amt > size) amt = size - i; count += amt; do { j = read(rem, cp, amt); if (j <= 0) { run_err("%s", j ? strerror(errno) : "dropped connection"); exit(1); } amt -= j; cp += j; } while (amt > 0); if (count == bp->cnt) { /* Keep reading so we stay sync'd up. */ if (wrerr == NO) { j = write(ofd, bp->buf, count); if (j != count) { wrerr = YES; wrerrno = j >= 0 ? EIO : errno; } } count = 0; cp = bp->buf; } } if (count != 0 && wrerr == NO && (j = write(ofd, bp->buf, count)) != count) { wrerr = YES; wrerrno = j >= 0 ? EIO : errno; } if (ftruncate(ofd, size)) { run_err("%s: truncate: %s", np, strerror(errno)); wrerr = DISPLAYED; } #ifndef __GNO__ if (pflag) { if (exists || omode != mode) if (fchmod(ofd, omode)) run_err("%s: set mode: %s", np, strerror(errno)); } else { if (!exists && omode != mode) if (fchmod(ofd, omode & ~mask)) run_err("%s: set mode: %s", np, strerror(errno)); } #endif (void)close(ofd); (void)response(); if (setimes && wrerr == NO) { setimes = 0; #ifndef __GNO__ if (utimes(np, tv) < 0) { run_err("%s: set times: %s", np, strerror(errno)); wrerr = DISPLAYED; } #endif } switch(wrerr) { case YES: run_err("%s: %s", np, strerror(wrerrno)); break; case NO: (void)write(rem, "", 1); break; case DISPLAYED: break; } } screwup: run_err("protocol error: %s", why); exit(1); }
void source(int argc, char **argv) { struct stat stb; static BUF buffer; BUF *bp; off_t i; off_t amt; int fd, haderr, indx, result; char *last, *name, buf[BUFSIZ]; for (indx = 0; indx < argc; ++indx) { name = argv[indx]; if ((fd = open(name, O_RDONLY, 0)) < 0) goto syserr; if (fstat(fd, &stb)) { syserr: run_err("%s: %s", name, strerror(errno)); goto next; } if (S_ISDIR(stb.st_mode) && iamrecursive) { rsource(name, &stb); goto next; } else if (!S_ISREG(stb.st_mode)) { run_err("%s: not a regular file", name); goto next; } if ((last = strrchr(name, '/')) == NULL) last = name; else ++last; if (pflag) { /* * Make it compatible with possible future * versions expecting microseconds. */ snprintf(buf, sizeof(buf), "T%ld 0 %ld 0\n", (long)stb.st_mtime, (long)stb.st_atime); write(remout, buf, strlen(buf)); if (response() < 0) goto next; } #undef MODEMASK #define MODEMASK (S_ISUID|S_ISGID|S_ISVTX|S_IRWXU|S_IRWXG|S_IRWXO) snprintf(buf, sizeof(buf), "C%04o %lu %s\n", (unsigned int)(stb.st_mode & MODEMASK), (unsigned long)stb.st_size, last); write(remout, buf, strlen(buf)); if (response() < 0) goto next; if ((bp = allocbuf(&buffer, fd, BUFSIZ)) == NULL) { next: close(fd); continue; } /* Keep writing after an error so that we stay sync'd up. */ for (haderr = i = 0; i < stb.st_size; i += bp->cnt) { amt = bp->cnt; if (i + amt > stb.st_size) amt = stb.st_size - i; if (!haderr) { result = read(fd, bp->buf, (size_t)amt); if (result != amt) haderr = result >= 0 ? EIO : errno; } if (haderr) write(remout, bp->buf, amt); else { result = write(remout, bp->buf, (size_t)amt); if (result != amt) haderr = result >= 0 ? EIO : errno; } } if (close(fd) && !haderr) haderr = errno; if (!haderr) write(remout, "", 1); else run_err("%s: %s", name, strerror(haderr)); response(); } }
int lfs_truncate(struct vnode *ovp, off_t length, int ioflag, kauth_cred_t cred) { daddr_t lastblock; struct inode *oip = VTOI(ovp); daddr_t bn, lbn, lastiblock[ULFS_NIADDR], indir_lbn[ULFS_NIADDR]; /* XXX ondisk32 */ int32_t newblks[ULFS_NDADDR + ULFS_NIADDR]; struct lfs *fs; struct buf *bp; int offset, size, level; daddr_t count, rcount; daddr_t blocksreleased = 0, real_released = 0; int i, nblocks; int aflags, error, allerror = 0; off_t osize; long lastseg; size_t bc; int obufsize, odb; int usepc; if (ovp->v_type == VCHR || ovp->v_type == VBLK || ovp->v_type == VFIFO || ovp->v_type == VSOCK) { KASSERT(oip->i_size == 0); return 0; } if (length < 0) return (EINVAL); /* * Just return and not update modification times. */ if (oip->i_size == length) { /* still do a uvm_vnp_setsize() as writesize may be larger */ uvm_vnp_setsize(ovp, length); return (0); } fs = oip->i_lfs; if (ovp->v_type == VLNK && (oip->i_size < fs->um_maxsymlinklen || (fs->um_maxsymlinklen == 0 && oip->i_ffs1_blocks == 0))) { #ifdef DIAGNOSTIC if (length != 0) panic("lfs_truncate: partial truncate of symlink"); #endif memset((char *)SHORTLINK(oip), 0, (u_int)oip->i_size); oip->i_size = oip->i_ffs1_size = 0; oip->i_flag |= IN_CHANGE | IN_UPDATE; return (lfs_update(ovp, NULL, NULL, 0)); } if (oip->i_size == length) { oip->i_flag |= IN_CHANGE | IN_UPDATE; return (lfs_update(ovp, NULL, NULL, 0)); } lfs_imtime(fs); osize = oip->i_size; usepc = (ovp->v_type == VREG && ovp != fs->lfs_ivnode); ASSERT_NO_SEGLOCK(fs); /* * Lengthen the size of the file. We must ensure that the * last byte of the file is allocated. Since the smallest * value of osize is 0, length will be at least 1. */ if (osize < length) { if (length > fs->um_maxfilesize) return (EFBIG); aflags = B_CLRBUF; if (ioflag & IO_SYNC) aflags |= B_SYNC; if (usepc) { if (lfs_lblkno(fs, osize) < ULFS_NDADDR && lfs_lblkno(fs, osize) != lfs_lblkno(fs, length) && lfs_blkroundup(fs, osize) != osize) { off_t eob; eob = lfs_blkroundup(fs, osize); uvm_vnp_setwritesize(ovp, eob); error = ulfs_balloc_range(ovp, osize, eob - osize, cred, aflags); if (error) { (void) lfs_truncate(ovp, osize, ioflag & IO_SYNC, cred); return error; } if (ioflag & IO_SYNC) { mutex_enter(ovp->v_interlock); VOP_PUTPAGES(ovp, trunc_page(osize & lfs_sb_getbmask(fs)), round_page(eob), PGO_CLEANIT | PGO_SYNCIO); } } uvm_vnp_setwritesize(ovp, length); error = ulfs_balloc_range(ovp, length - 1, 1, cred, aflags); if (error) { (void) lfs_truncate(ovp, osize, ioflag & IO_SYNC, cred); return error; } uvm_vnp_setsize(ovp, length); oip->i_flag |= IN_CHANGE | IN_UPDATE; KASSERT(ovp->v_size == oip->i_size); oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1; return (lfs_update(ovp, NULL, NULL, 0)); } else { error = lfs_reserve(fs, ovp, NULL, lfs_btofsb(fs, (ULFS_NIADDR + 2) << lfs_sb_getbshift(fs))); if (error) return (error); error = lfs_balloc(ovp, length - 1, 1, cred, aflags, &bp); lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (ULFS_NIADDR + 2) << lfs_sb_getbshift(fs))); if (error) return (error); oip->i_ffs1_size = oip->i_size = length; uvm_vnp_setsize(ovp, length); (void) VOP_BWRITE(bp->b_vp, bp); oip->i_flag |= IN_CHANGE | IN_UPDATE; oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1; return (lfs_update(ovp, NULL, NULL, 0)); } } if ((error = lfs_reserve(fs, ovp, NULL, lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs)))) != 0) return (error); /* * Shorten the size of the file. If the file is not being * truncated to a block boundary, the contents of the * partial block following the end of the file must be * zero'ed in case it ever becomes accessible again because * of subsequent file growth. Directories however are not * zero'ed as they should grow back initialized to empty. */ offset = lfs_blkoff(fs, length); lastseg = -1; bc = 0; if (ovp != fs->lfs_ivnode) lfs_seglock(fs, SEGM_PROT); if (offset == 0) { oip->i_size = oip->i_ffs1_size = length; } else if (!usepc) { lbn = lfs_lblkno(fs, length); aflags = B_CLRBUF; if (ioflag & IO_SYNC) aflags |= B_SYNC; error = lfs_balloc(ovp, length - 1, 1, cred, aflags, &bp); if (error) { lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); goto errout; } obufsize = bp->b_bufsize; odb = lfs_btofsb(fs, bp->b_bcount); oip->i_size = oip->i_ffs1_size = length; size = lfs_blksize(fs, oip, lbn); if (ovp->v_type != VDIR) memset((char *)bp->b_data + offset, 0, (u_int)(size - offset)); allocbuf(bp, size, 1); if ((bp->b_flags & B_LOCKED) != 0 && bp->b_iodone == NULL) { mutex_enter(&lfs_lock); locked_queue_bytes -= obufsize - bp->b_bufsize; mutex_exit(&lfs_lock); } if (bp->b_oflags & BO_DELWRI) { lfs_sb_addavail(fs, odb - lfs_btofsb(fs, size)); /* XXX shouldn't this wake up on lfs_availsleep? */ } (void) VOP_BWRITE(bp->b_vp, bp); } else { /* vp->v_type == VREG && length < osize && offset != 0 */ /* * When truncating a regular file down to a non-block-aligned * size, we must zero the part of last block which is past * the new EOF. We must synchronously flush the zeroed pages * to disk since the new pages will be invalidated as soon * as we inform the VM system of the new, smaller size. * We must do this before acquiring the GLOCK, since fetching * the pages will acquire the GLOCK internally. * So there is a window where another thread could see a whole * zeroed page past EOF, but that's life. */ daddr_t xlbn; voff_t eoz; aflags = ioflag & IO_SYNC ? B_SYNC : 0; error = ulfs_balloc_range(ovp, length - 1, 1, cred, aflags); if (error) { lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); goto errout; } xlbn = lfs_lblkno(fs, length); size = lfs_blksize(fs, oip, xlbn); eoz = MIN(lfs_lblktosize(fs, xlbn) + size, osize); ubc_zerorange(&ovp->v_uobj, length, eoz - length, UBC_UNMAP_FLAG(ovp)); if (round_page(eoz) > round_page(length)) { mutex_enter(ovp->v_interlock); error = VOP_PUTPAGES(ovp, round_page(length), round_page(eoz), PGO_CLEANIT | PGO_DEACTIVATE | ((ioflag & IO_SYNC) ? PGO_SYNCIO : 0)); if (error) { lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); goto errout; } } } genfs_node_wrlock(ovp); oip->i_size = oip->i_ffs1_size = length; uvm_vnp_setsize(ovp, length); /* * Calculate index into inode's block list of * last direct and indirect blocks (if any) * which we want to keep. Lastblock is -1 when * the file is truncated to 0. */ /* Avoid sign overflow - XXX assumes that off_t is a quad_t. */ if (length > QUAD_MAX - lfs_sb_getbsize(fs)) lastblock = lfs_lblkno(fs, QUAD_MAX - lfs_sb_getbsize(fs)); else lastblock = lfs_lblkno(fs, length + lfs_sb_getbsize(fs) - 1) - 1; lastiblock[SINGLE] = lastblock - ULFS_NDADDR; lastiblock[DOUBLE] = lastiblock[SINGLE] - LFS_NINDIR(fs); lastiblock[TRIPLE] = lastiblock[DOUBLE] - LFS_NINDIR(fs) * LFS_NINDIR(fs); nblocks = lfs_btofsb(fs, lfs_sb_getbsize(fs)); /* * Record changed file and block pointers before we start * freeing blocks. lastiblock values are also normalized to -1 * for calls to lfs_indirtrunc below. */ memcpy((void *)newblks, (void *)&oip->i_ffs1_db[0], sizeof newblks); for (level = TRIPLE; level >= SINGLE; level--) if (lastiblock[level] < 0) { newblks[ULFS_NDADDR+level] = 0; lastiblock[level] = -1; } for (i = ULFS_NDADDR - 1; i > lastblock; i--) newblks[i] = 0; oip->i_size = oip->i_ffs1_size = osize; error = lfs_vtruncbuf(ovp, lastblock + 1, false, 0); if (error && !allerror) allerror = error; /* * Indirect blocks first. */ indir_lbn[SINGLE] = -ULFS_NDADDR; indir_lbn[DOUBLE] = indir_lbn[SINGLE] - LFS_NINDIR(fs) - 1; indir_lbn[TRIPLE] = indir_lbn[DOUBLE] - LFS_NINDIR(fs) * LFS_NINDIR(fs) - 1; for (level = TRIPLE; level >= SINGLE; level--) { bn = oip->i_ffs1_ib[level]; if (bn != 0) { error = lfs_indirtrunc(oip, indir_lbn[level], bn, lastiblock[level], level, &count, &rcount, &lastseg, &bc); if (error) allerror = error; real_released += rcount; blocksreleased += count; if (lastiblock[level] < 0) { if (oip->i_ffs1_ib[level] > 0) real_released += nblocks; blocksreleased += nblocks; oip->i_ffs1_ib[level] = 0; lfs_blkfree(fs, oip, bn, lfs_sb_getbsize(fs), &lastseg, &bc); lfs_deregister_block(ovp, bn); } } if (lastiblock[level] >= 0) goto done; } /* * All whole direct blocks or frags. */ for (i = ULFS_NDADDR - 1; i > lastblock; i--) { long bsize, obsize; bn = oip->i_ffs1_db[i]; if (bn == 0) continue; bsize = lfs_blksize(fs, oip, i); if (oip->i_ffs1_db[i] > 0) { /* Check for fragment size changes */ obsize = oip->i_lfs_fragsize[i]; real_released += lfs_btofsb(fs, obsize); oip->i_lfs_fragsize[i] = 0; } else obsize = 0; blocksreleased += lfs_btofsb(fs, bsize); oip->i_ffs1_db[i] = 0; lfs_blkfree(fs, oip, bn, obsize, &lastseg, &bc); lfs_deregister_block(ovp, bn); } if (lastblock < 0) goto done; /* * Finally, look for a change in size of the * last direct block; release any frags. */ bn = oip->i_ffs1_db[lastblock]; if (bn != 0) { long oldspace, newspace; #if 0 long olddspace; #endif /* * Calculate amount of space we're giving * back as old block size minus new block size. */ oldspace = lfs_blksize(fs, oip, lastblock); #if 0 olddspace = oip->i_lfs_fragsize[lastblock]; #endif oip->i_size = oip->i_ffs1_size = length; newspace = lfs_blksize(fs, oip, lastblock); if (newspace == 0) panic("itrunc: newspace"); if (oldspace - newspace > 0) { blocksreleased += lfs_btofsb(fs, oldspace - newspace); } #if 0 if (bn > 0 && olddspace - newspace > 0) { /* No segment accounting here, just vnode */ real_released += lfs_btofsb(fs, olddspace - newspace); } #endif } done: /* Finish segment accounting corrections */ lfs_update_seguse(fs, oip, lastseg, bc); #ifdef DIAGNOSTIC for (level = SINGLE; level <= TRIPLE; level++) if ((newblks[ULFS_NDADDR + level] == 0) != ((oip->i_ffs1_ib[level]) == 0)) { panic("lfs itrunc1"); } for (i = 0; i < ULFS_NDADDR; i++) if ((newblks[i] == 0) != (oip->i_ffs1_db[i] == 0)) { panic("lfs itrunc2"); } if (length == 0 && (!LIST_EMPTY(&ovp->v_cleanblkhd) || !LIST_EMPTY(&ovp->v_dirtyblkhd))) panic("lfs itrunc3"); #endif /* DIAGNOSTIC */ /* * Put back the real size. */ oip->i_size = oip->i_ffs1_size = length; oip->i_lfs_effnblks -= blocksreleased; oip->i_ffs1_blocks -= real_released; mutex_enter(&lfs_lock); lfs_sb_addbfree(fs, blocksreleased); mutex_exit(&lfs_lock); #ifdef DIAGNOSTIC if (oip->i_size == 0 && (oip->i_ffs1_blocks != 0 || oip->i_lfs_effnblks != 0)) { printf("lfs_truncate: truncate to 0 but %d blks/%jd effblks\n", oip->i_ffs1_blocks, (intmax_t)oip->i_lfs_effnblks); panic("lfs_truncate: persistent blocks"); } #endif /* * If we truncated to zero, take us off the paging queue. */ mutex_enter(&lfs_lock); if (oip->i_size == 0 && oip->i_flags & IN_PAGING) { oip->i_flags &= ~IN_PAGING; TAILQ_REMOVE(&fs->lfs_pchainhd, oip, i_lfs_pchain); } mutex_exit(&lfs_lock); oip->i_flag |= IN_CHANGE; #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) (void) lfs_chkdq(oip, -blocksreleased, NOCRED, 0); #endif lfs_reserve(fs, ovp, NULL, -lfs_btofsb(fs, (2 * ULFS_NIADDR + 3) << lfs_sb_getbshift(fs))); genfs_node_unlock(ovp); errout: oip->i_lfs_hiblk = lfs_lblkno(fs, oip->i_size + lfs_sb_getbsize(fs) - 1) - 1; if (ovp != fs->lfs_ivnode) lfs_segunlock(fs); return (allerror ? allerror : error); }