/** Read a bip file. * Format example: * * 4 # cols (variables) * 3 # rows (constraints) * 2 3 5 4 <= 8 * 3 6 0 8 <= 10 * 0 0 1 1 <= 1 * * comments (\#) and empty lines are ignored. * * @param filename name of file to read * @return ptr to BIP data structure */ int bip_read(BIP* bip, const char* filename) { assert(bip_is_valid(bip)); assert(NULL != filename); assert(0 < strlen(filename)); char buf[MAX_LINE_LEN]; char* s; FILE* fp; LFS* lfs = NULL; int lines = 0; LINE_MODE mode = READ_COLS; if (NULL == (fp = fopen(filename, "r"))) return fprintf(stderr, "Can't open file %s\n", filename), -1; printf("Reading %s\n", filename); while(mode != READ_ERROR && NULL != (s = fgets(buf, sizeof(buf), fp))) { lines++; lfs = lfs_split_line(lfs, s, "#"); if (VERB_DEBUG <= bip->verb_level) lfs_print(lfs, stderr); mode = process_line(mode, lfs, lines, bip); } fclose(fp); if (NULL != lfs) lfs_free(lfs); if (READ_ERROR == mode) return -1; if (bip->cols == 0 || bip->rows == 0 || bip->read_rows < bip->rows) return fprintf(stderr, "Error: unexpected EOF\n"), -1; assert(bip->read_rows == bip->rows); printf("Read %d rows, %d cols\n", bip->read_rows, bip->cols); if (bip_can_overflow(bip)) return -1; bip_preprocess(bip); assert(bip_is_valid(bip)); return 0; }
int sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval) { /* { syscallarg(fsid_t *) fsidp; syscallarg(struct block_info *) blkiov; syscallarg(int) blkcnt; } */ BLOCK_INFO *blkiov; int blkcnt, error; fsid_t fsid; struct lfs *fs; struct mount *mntp; error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL); if (error) return (error); if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) return (error); if ((mntp = vfs_getvfs(fsidp)) == NULL) return (ENOENT); fs = VFSTOULFS(mntp)->um_lfs; blkcnt = SCARG(uap, blkcnt); if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT) return (EINVAL); KERNEL_LOCK(1, NULL); blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); if ((error = copyin(SCARG(uap, blkiov), blkiov, blkcnt * sizeof(BLOCK_INFO))) != 0) goto out; if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) copyout(blkiov, SCARG(uap, blkiov), blkcnt * sizeof(BLOCK_INFO)); out: lfs_free(fs, blkiov, LFS_NB_BLKIOV); KERNEL_UNLOCK_ONE(NULL); return error; }
int sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval) { /* { syscallarg(fsid_t *) fsidp; syscallarg(struct block_info *) blkiov; syscallarg(int) blkcnt; } */ BLOCK_INFO *blkiov; int blkcnt, error; fsid_t fsid; struct lfs *fs; struct mount *mntp; if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) return (error); if ((mntp = vfs_getvfs(&fsid)) == NULL) return (ENOENT); fs = VFSTOULFS(mntp)->um_lfs; blkcnt = SCARG(uap, blkcnt); #if SIZE_T_MAX <= UINT_MAX if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO)) return (EINVAL); #endif KERNEL_LOCK(1, NULL); blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); if ((error = copyin(SCARG(uap, blkiov), blkiov, blkcnt * sizeof(BLOCK_INFO))) != 0) goto out; if ((error = lfs_bmapv(l, &fsid, blkiov, blkcnt)) == 0) copyout(blkiov, SCARG(uap, blkiov), blkcnt * sizeof(BLOCK_INFO)); out: lfs_free(fs, blkiov, LFS_NB_BLKIOV); KERNEL_UNLOCK_ONE(NULL); return error; }
int sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval) { /* { syscallarg(fsid_t *) fsidp; syscallarg(struct block_info *) blkiov; syscallarg(int) blkcnt; } */ BLOCK_INFO *blkiov; BLOCK_INFO_15 *blkiov15; int i, blkcnt, error; fsid_t fsid; struct lfs *fs; struct mount *mntp; error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL); if (error) return (error); if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) return (error); if ((mntp = vfs_getvfs(&fsid)) == NULL) return (ENOENT); fs = VFSTOULFS(mntp)->um_lfs; blkcnt = SCARG(uap, blkcnt); if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO)) return (EINVAL); KERNEL_LOCK(1, NULL); blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV); if ((error = copyin(SCARG(uap, blkiov), blkiov15, blkcnt * sizeof(BLOCK_INFO_15))) != 0) goto out; for (i = 0; i < blkcnt; i++) { blkiov[i].bi_inode = blkiov15[i].bi_inode; blkiov[i].bi_lbn = blkiov15[i].bi_lbn; blkiov[i].bi_daddr = blkiov15[i].bi_daddr; blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate; blkiov[i].bi_version = blkiov15[i].bi_version; blkiov[i].bi_bp = blkiov15[i].bi_bp; blkiov[i].bi_size = blkiov15[i].bi_size; } if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) { for (i = 0; i < blkcnt; i++) { blkiov15[i].bi_inode = blkiov[i].bi_inode; blkiov15[i].bi_lbn = blkiov[i].bi_lbn; blkiov15[i].bi_daddr = blkiov[i].bi_daddr; blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate; blkiov15[i].bi_version = blkiov[i].bi_version; blkiov15[i].bi_bp = blkiov[i].bi_bp; blkiov15[i].bi_size = blkiov[i].bi_size; } copyout(blkiov15, SCARG(uap, blkiov), blkcnt * sizeof(BLOCK_INFO_15)); } out: lfs_free(fs, blkiov, LFS_NB_BLKIOV); lfs_free(fs, blkiov15, LFS_NB_BLKIOV); KERNEL_UNLOCK_ONE(NULL); return error; }
/* * Release blocks associated with the inode ip and stored in the indirect * block bn. Blocks are free'd in LIFO order up to (but not including) * lastbn. If level is greater than SINGLE, the block is an indirect block * and recursive calls to indirtrunc must be used to cleanse other indirect * blocks. * * NB: triple indirect blocks are untested. */ static int lfs_indirtrunc(struct inode *ip, daddr_t lbn, daddr_t dbn, daddr_t lastbn, int level, daddr_t *countp, daddr_t *rcountp, long *lastsegp, size_t *bcp) { int i; struct buf *bp; struct lfs *fs = ip->i_lfs; int32_t *bap; /* XXX ondisk32 */ struct vnode *vp; daddr_t nb, nlbn, last; int32_t *copy = NULL; /* XXX ondisk32 */ daddr_t blkcount, rblkcount, factor; int nblocks; daddr_t blocksreleased = 0, real_released = 0; int error = 0, allerror = 0; ASSERT_SEGLOCK(fs); /* * Calculate index in current block of last * block to be kept. -1 indicates the entire * block so we need not calculate the index. */ factor = 1; for (i = SINGLE; i < level; i++) factor *= LFS_NINDIR(fs); last = lastbn; if (lastbn > 0) last /= factor; nblocks = lfs_btofsb(fs, lfs_sb_getbsize(fs)); /* * Get buffer of block pointers, zero those entries corresponding * to blocks to be free'd, and update on disk copy first. Since * double(triple) indirect before single(double) indirect, calls * to bmap on these blocks will fail. However, we already have * the on disk address, so we have to set the b_blkno field * explicitly instead of letting bread do everything for us. */ vp = ITOV(ip); bp = getblk(vp, lbn, lfs_sb_getbsize(fs), 0, 0); if (bp->b_oflags & (BO_DONE | BO_DELWRI)) { /* Braces must be here in case trace evaluates to nothing. */ trace(TR_BREADHIT, pack(vp, lfs_sb_getbsize(fs)), lbn); } else { trace(TR_BREADMISS, pack(vp, lfs_sb_getbsize(fs)), lbn); curlwp->l_ru.ru_inblock++; /* pay for read */ bp->b_flags |= B_READ; if (bp->b_bcount > bp->b_bufsize) panic("lfs_indirtrunc: bad buffer size"); bp->b_blkno = LFS_FSBTODB(fs, dbn); VOP_STRATEGY(vp, bp); error = biowait(bp); } if (error) { brelse(bp, 0); *countp = *rcountp = 0; return (error); } bap = (int32_t *)bp->b_data; /* XXX ondisk32 */ if (lastbn >= 0) { copy = lfs_malloc(fs, lfs_sb_getbsize(fs), LFS_NB_IBLOCK); memcpy((void *)copy, (void *)bap, lfs_sb_getbsize(fs)); memset((void *)&bap[last + 1], 0, /* XXX ondisk32 */ (u_int)(LFS_NINDIR(fs) - (last + 1)) * sizeof (int32_t)); error = VOP_BWRITE(bp->b_vp, bp); if (error) allerror = error; bap = copy; } /* * Recursively free totally unused blocks. */ for (i = LFS_NINDIR(fs) - 1, nlbn = lbn + 1 - i * factor; i > last; i--, nlbn += factor) { nb = bap[i]; if (nb == 0) continue; if (level > SINGLE) { error = lfs_indirtrunc(ip, nlbn, nb, (daddr_t)-1, level - 1, &blkcount, &rblkcount, lastsegp, bcp); if (error) allerror = error; blocksreleased += blkcount; real_released += rblkcount; } lfs_blkfree(fs, ip, nb, lfs_sb_getbsize(fs), lastsegp, bcp); if (bap[i] > 0) real_released += nblocks; blocksreleased += nblocks; } /* * Recursively free last partial block. */ if (level > SINGLE && lastbn >= 0) { last = lastbn % factor; nb = bap[i]; if (nb != 0) { error = lfs_indirtrunc(ip, nlbn, nb, last, level - 1, &blkcount, &rblkcount, lastsegp, bcp); if (error) allerror = error; real_released += rblkcount; blocksreleased += blkcount; } } if (copy != NULL) { lfs_free(fs, copy, LFS_NB_IBLOCK); } else { mutex_enter(&bufcache_lock); if (bp->b_oflags & BO_DELWRI) { LFS_UNLOCK_BUF(bp); lfs_sb_addavail(fs, lfs_btofsb(fs, bp->b_bcount)); wakeup(&fs->lfs_availsleep); } brelsel(bp, BC_INVAL); mutex_exit(&bufcache_lock); } *countp = blocksreleased; *rcountp = real_released; return (allerror); }