void relpath_expr::compute_scripting_kind() { theScriptingKind = UNKNOWN_SCRIPTING_KIND; for (unsigned i = 0; i < size(); ++i) { expr* step = theSteps[i]; if (step->is_updating()) { throw XQUERY_EXCEPTION(err::XUST0001, ERROR_PARAMS(ZED(XUST0001_Generic)), ERROR_LOC(get_loc())); } theScriptingKind |= step->get_scripting_detail(); } theScriptingKind &= ~VACUOUS_EXPR; if (is_sequential(theScriptingKind)) theScriptingKind &= ~SIMPLE_EXPR; checkScriptingKind(); }
void Ioss::Map::map_data(void *data, const Ioss::Field &field, size_t count) const { if (!is_sequential(map)) { if (field.get_type() == Ioss::Field::INTEGER) { int *datum = static_cast<int*>(data); for (size_t i=0; i < count; i++) datum[i] = map[datum[i]]; } else { int64_t *datum = static_cast<int64_t*>(data); for (size_t i=0; i < count; i++) datum[i] = map[datum[i]]; } } }
void flwor_expr::compute_scripting_kind() { csize numClauses = num_clauses(); for (csize i = 0; i < numClauses; ++i) { const flwor_clause* c = theClauses[i]; flwor_clause::ClauseKind k = c->get_kind(); if (k == flwor_clause::for_clause || k == flwor_clause::let_clause || k == flwor_clause::window_clause) { const forletwin_clause* c2 = static_cast<const forletwin_clause*>(c); theScriptingKind |= c2->get_expr()->get_scripting_detail(); if (c2->get_expr()->is_sequential()) set_sequential_clauses(true); } } const expr* ret = get_return_expr(); if (ret) theScriptingKind |= ret->get_scripting_detail(); if (is_sequential(theScriptingKind)) { theScriptingKind &= ~SIMPLE_EXPR; theScriptingKind &= ~VACUOUS_EXPR; } if (theScriptingKind & UPDATING_EXPR) { theScriptingKind &= ~SIMPLE_EXPR; theScriptingKind &= ~VACUOUS_EXPR; } if (theScriptingKind & VACUOUS_EXPR) { if (ret && ret->is_vacuous()) theScriptingKind &= ~SIMPLE_EXPR; else theScriptingKind &= ~VACUOUS_EXPR; } checkScriptingKind(); }
void Ioss::Map::reverse_map_data(void *data, const Ioss::Field &field, size_t count) const { assert(!map.empty()); if (!is_sequential(map)) { if (field.get_type() == Ioss::Field::INTEGER) { int* connect = static_cast<int*>(data); for (size_t i=0; i < count; i++) { int global_id = connect[i]; connect[i] = global_to_local(global_id, true); } } else { int64_t* connect = static_cast<int64_t*>(data); for (size_t i=0; i < count; i++) { int64_t global_id = connect[i]; connect[i] = global_to_local(global_id, true); } } } }
void relpath_expr::add_back(expr* step) { if (step->is_updating()) { throw XQUERY_EXCEPTION(err::XUST0001, ERROR_PARAMS(ZED(XUST0001_Generic)), ERROR_LOC(get_loc())); } theScriptingKind |= step->get_scripting_detail(); if (theScriptingKind & VACUOUS_EXPR) theScriptingKind &= ~VACUOUS_EXPR; if (is_sequential(theScriptingKind)) theScriptingKind &= ~SIMPLE_EXPR; checkScriptingKind(); theSteps.push_back(step); }
int ext2_bmaparray(struct vnode *vp, daddr_t bn, daddr_t *bnp, int *runp, int *runb) { struct inode *ip; struct buf *bp; struct ext2mount *ump; struct mount *mp; struct vnode *devvp; struct indir a[NIADDR+1], *ap; daddr_t daddr; e2fs_lbn_t metalbn; int error, num, maxrun = 0, bsize; int *nump; ap = NULL; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOEXT2(mp); devvp = ump->um_devvp; bsize = EXT2_BLOCK_SIZE(ump->um_e2fs); if (runp) { maxrun = mp->mnt_iosize_max / bsize - 1; *runp = 0; } if (runb) { *runb = 0; } ap = a; nump = # error = ext2_getlbns(vp, bn, ap, nump); if (error) return (error); num = *nump; if (num == 0) { *bnp = blkptrtodb(ump, ip->i_db[bn]); if (*bnp == 0) { *bnp = -1; } else if (runp) { daddr_t bnb = bn; for (++bn; bn < NDADDR && *runp < maxrun && is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]); ++bn, ++*runp); bn = bnb; if (runb && (bn > 0)) { for (--bn; (bn >= 0) && (*runb < maxrun) && is_sequential(ump, ip->i_db[bn], ip->i_db[bn + 1]); --bn, ++*runb); } } return (0); } /* Get disk address out of indirect block array */ daddr = ip->i_ib[ap->in_off]; for (bp = NULL, ++ap; --num; ++ap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = ap->in_lbn; if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn) break; /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) bqrelse(bp); bp = getblk(vp, metalbn, bsize, 0, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { #ifdef INVARIANTS if (!daddr) panic("ext2_bmaparray: indirect block not in cache"); #endif bp->b_blkno = blkptrtodb(ump, daddr); bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); bstrategy(bp); curthread->td_ru.ru_inblock++; error = bufwait(bp); if (error) { brelse(bp); return (error); } } daddr = ((e2fs_daddr_t *)bp->b_data)[ap->in_off]; if (num == 1 && daddr && runp) { for (bn = ap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((e2fs_daddr_t *)bp->b_data)[bn - 1], ((e2fs_daddr_t *)bp->b_data)[bn]); ++bn, ++*runp); bn = ap->in_off; if (runb && bn) { for (--bn; bn >= 0 && *runb < maxrun && is_sequential(ump, ((e2fs_daddr_t *)bp->b_data)[bn], ((e2fs_daddr_t *)bp->b_data)[bn + 1]); --bn, ++*runb); } } } if (bp) bqrelse(bp); /* * Since this is FFS independent code, we are out of scope for the * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they * will fall in the range 1..um_seqinc, so we use that test and * return a request for a zeroed out buffer if attempts are made * to read a BLK_NOCOPY or BLK_SNAP block. */ if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc){ *bnp = -1; return (0); } *bnp = blkptrtodb(ump, daddr); if (*bnp == 0) { *bnp = -1; } return (0); }
/* * Indirect blocks are now on the vnode for the file. They are given negative * logical block numbers. Indirect blocks are addressed by the negative * address of the first data block to which they point. Double indirect blocks * are addressed by one less than the address of the first indirect block to * which they point. Triple indirect blocks are addressed by one less than * the address of the first double indirect block to which they point. * * ufs_bmaparray does the bmap conversion, and if requested returns the * array of logical blocks which must be traversed to get to a block. * Each entry contains the offset into that block that gets you to the * next block and the disk address of the block (if it is assigned). */ int ufs_bmaparray(struct vnode *vp, daddr64_t bn, daddr64_t *bnp, struct indir *ap, int *nump, int *runp) { struct inode *ip; struct buf *bp; struct ufsmount *ump; struct mount *mp; struct vnode *devvp; struct indir a[NIADDR+1], *xap; daddr64_t daddr, metalbn; int error, maxrun = 0, num; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOUFS(mp); #ifdef DIAGNOSTIC if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL)) panic("ufs_bmaparray: invalid arguments"); #endif if (runp) { /* * XXX * If MAXBSIZE is the largest transfer the disks can handle, * we probably want maxrun to be 1 block less so that we * don't create a block larger than the device can handle. */ *runp = 0; maxrun = MAXBSIZE / mp->mnt_stat.f_iosize - 1; } xap = ap == NULL ? a : ap; if (!nump) nump = # if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0) return (error); num = *nump; if (num == 0) { *bnp = blkptrtodb(ump, DIP(ip, db[bn])); if (*bnp == 0) *bnp = -1; else if (runp) for (++bn; bn < NDADDR && *runp < maxrun && is_sequential(ump, DIP(ip, db[bn - 1]), DIP(ip, db[bn])); ++bn, ++*runp); return (0); } /* Get disk address out of indirect block array */ daddr = DIP(ip, ib[xap->in_off]); devvp = VFSTOUFS(vp->v_mount)->um_devvp; for (bp = NULL, ++xap; --num; ++xap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = xap->in_lbn; if ((daddr == 0 && !incore(vp, metalbn)) || metalbn == bn) break; /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) brelse(bp); xap->in_exists = 1; bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0, 0); if (bp->b_flags & (B_DONE | B_DELWRI)) { ; } #ifdef DIAGNOSTIC else if (!daddr) panic("ufs_bmaparray: indirect block not in cache"); #endif else { bp->b_blkno = blkptrtodb(ump, daddr); bp->b_flags |= B_READ; bcstats.pendingreads++; bcstats.numreads++; VOP_STRATEGY(bp); curproc->p_ru.ru_inblock++; /* XXX */ if ((error = biowait(bp)) != 0) { brelse(bp); return (error); } } #ifdef FFS2 if (ip->i_ump->um_fstype == UM_UFS2) { daddr = ((int64_t *)bp->b_data)[xap->in_off]; if (num == 1 && daddr && runp) for (bn = xap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((int64_t *)bp->b_data)[bn - 1], ((int64_t *)bp->b_data)[bn]); ++bn, ++*runp); continue; } #endif /* FFS2 */ daddr = ((int32_t *)bp->b_data)[xap->in_off]; if (num == 1 && daddr && runp) for (bn = xap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((int32_t *)bp->b_data)[bn - 1], ((int32_t *)bp->b_data)[bn]); ++bn, ++*runp); } if (bp) brelse(bp); daddr = blkptrtodb(ump, daddr); *bnp = daddr == 0 ? -1 : daddr; return (0); }
/* * Indirect blocks are now on the vnode for the file. They are given negative * logical block numbers. Indirect blocks are addressed by the negative * address of the first data block to which they point. Double indirect blocks * are addressed by one less than the address of the first indirect block to * which they point. Triple indirect blocks are addressed by one less than * the address of the first double indirect block to which they point. * * ext2_bmaparray does the bmap conversion, and if requested returns the * array of logical blocks which must be traversed to get to a block. * Each entry contains the offset into that block that gets you to the * next block and the disk address of the block (if it is assigned). */ static int ext2_bmaparray(struct vnode *vp, ext2_daddr_t bn, ext2_daddr_t *bnp, struct indir *ap, int *nump, int *runp, int *runb) { struct inode *ip; struct buf *bp; struct ext2_mount *ump; struct mount *mp; struct ext2_sb_info *fs; struct indir a[NIADDR+1], *xap; ext2_daddr_t daddr; long metalbn; int error, maxrun, num; ip = VTOI(vp); mp = vp->v_mount; ump = VFSTOEXT2(mp); fs = ip->i_e2fs; #ifdef DIAGNOSTIC if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL)) panic("ext2_bmaparray: invalid arguments"); #endif if (runp) { *runp = 0; } if (runb) { *runb = 0; } maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1; xap = ap == NULL ? a : ap; if (!nump) nump = # error = ext2_getlbns(vp, bn, xap, nump); if (error) return (error); num = *nump; if (num == 0) { *bnp = blkptrtodb(ump, ip->i_db[bn]); if (*bnp == 0) *bnp = -1; else if (runp) { daddr_t bnb = bn; for (++bn; bn < NDADDR && *runp < maxrun && is_sequential(ump, ip->i_db[bn - 1], ip->i_db[bn]); ++bn, ++*runp); bn = bnb; if (runb && (bn > 0)) { for (--bn; (bn >= 0) && (*runb < maxrun) && is_sequential(ump, ip->i_db[bn], ip->i_db[bn+1]); --bn, ++*runb); } } return (0); } /* Get disk address out of indirect block array */ daddr = ip->i_ib[xap->in_off]; for (bp = NULL, ++xap; --num; ++xap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = xap->in_lbn; if ((daddr == 0 && !findblk(vp, dbtodoff(fs, metalbn), FINDBLK_TEST)) || metalbn == bn) { break; } /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) bqrelse(bp); xap->in_exists = 1; bp = getblk(vp, lblktodoff(fs, metalbn), mp->mnt_stat.f_iosize, 0, 0); if ((bp->b_flags & B_CACHE) == 0) { #ifdef DIAGNOSTIC if (!daddr) panic("ext2_bmaparray: indirect block not in cache"); #endif /* * This runs through ext2_strategy using bio2 to * cache the disk offset, then comes back through * bio1. So we want to wait on bio1 */ bp->b_bio1.bio_done = biodone_sync; bp->b_bio1.bio_flags |= BIO_SYNC; bp->b_bio2.bio_offset = fsbtodoff(fs, daddr); bp->b_flags &= ~(B_INVAL|B_ERROR); bp->b_cmd = BUF_CMD_READ; vfs_busy_pages(bp->b_vp, bp); vn_strategy(bp->b_vp, &bp->b_bio1); error = biowait(&bp->b_bio1, "biord"); if (error) { brelse(bp); return (error); } } daddr = ((ext2_daddr_t *)bp->b_data)[xap->in_off]; if (num == 1 && daddr && runp) { for (bn = xap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((ext2_daddr_t *)bp->b_data)[bn - 1], ((ext2_daddr_t *)bp->b_data)[bn]); ++bn, ++*runp); bn = xap->in_off; if (runb && bn) { for(--bn; bn >= 0 && *runb < maxrun && is_sequential(ump, ((daddr_t *)bp->b_data)[bn], ((daddr_t *)bp->b_data)[bn+1]); --bn, ++*runb); } } } if (bp) bqrelse(bp); daddr = blkptrtodb(ump, daddr); *bnp = daddr == 0 ? -1 : daddr; return (0); }
int ufs_bmaparray( vnode *vp, ufs2_daddr_t bn, ufs2_daddr_t *bnp, Buf *nbp, int *runp, int *runb) { Buf *bp; Indir a[UFS_NIADDR+1], *ap; ufs2_daddr_t daddr; ufs_lbn_t metalbn; int error, num, maxrun = 0; int *nump; inode *ip = vp->data; MountPoint *mp = vp->mount; ufsmount *ump = mp->mnt_data; if (runp) { maxrun = mp->mnt_iosize_max / mp->mnt_stat.f_iosize - 1; *runp = 0; } if (runb) { *runb = 0; } ap = a; nump = # error = ufs_getlbns(vp, bn, ap, nump); if (error) return error; num = *nump; if (num == 0) { if (bn >= 0 && bn < UFS_NDADDR) { *bnp = blkptrtodb(ump, ip->din2->di_db[bn]); } else if (bn < 0 && bn >= -UFS_NXADDR) { *bnp = blkptrtodb(ump, ip->din2->di_extb[-1 - bn]); if (*bnp == 0) *bnp = -1; if (nbp == nil) panic("ufs_bmaparray: mapping ext data"); // TODO HARVEY Mark ALTDATA? //nbp->b_xflags |= BX_ALTDATA; return (0); } else { panic("ufs_bmaparray: blkno out of range"); } /* * Since this is FFS independent code, we are out of * scope for the definitions of BLK_NOCOPY and * BLK_SNAP, but we do know that they will fall in * the range 1..um_seqinc, so we use that test and * return a request for a zeroed out buffer if attempts * are made to read a BLK_NOCOPY or BLK_SNAP block. */ if ((ip->i_flags & SF_SNAPSHOT) && ip->din2->di_db[bn] > 0 && ip->din2->di_db[bn] < ump->um_seqinc) { *bnp = -1; } else if (*bnp == 0) { if (ip->i_flags & SF_SNAPSHOT) *bnp = blkptrtodb(ump, bn * ump->um_seqinc); else *bnp = -1; } else if (runp) { ufs2_daddr_t bnb = bn; for (++bn; bn < UFS_NDADDR && *runp < maxrun && is_sequential(ump, ip->din2->di_db[bn - 1], ip->din2->di_db[bn]); ++bn, ++*runp) ; bn = bnb; if (runb && (bn > 0)) { for (--bn; (bn >= 0) && (*runb < maxrun) && is_sequential(ump, ip->din2->di_db[bn], ip->din2->di_db[bn+1]); --bn, ++*runb) ; } } return (0); } /* Get disk address out of indirect block array */ daddr = ip->din2->di_ib[ap->in_off]; for (bp = nil, ++ap; --num; ++ap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = ap->in_lbn; // TODO HARVEY Going to have to revisit this when we implement // writing, so we can read writes before they've been flushed // to disk. //if ((daddr == 0 && !incore(&vp->v_bufobj, metalbn)) || metalbn == bn) // break; if (daddr == 0 || metalbn == bn) break; /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) releasebuf(bp); bp = getblk(vp, metalbn, mp->mnt_stat.f_iosize, 0); // TODO HARVEY Revisit when we manage a cache of Bufs /*if ((bp->b_flags & B_CACHE) == 0) { #ifdef INVARIANTS if (!daddr) panic("ufs_bmaparray: indirect block not in cache"); #endif bp->b_blkno = blkptrtodb(ump, daddr); bp->b_iocmd = BIO_READ; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; vfs_busy_pages(bp, 0); bp->b_iooffset = dbtob(bp->b_blkno); ffs_geom_strategy(bp); curthread->td_ru.ru_inblock++; error = bufwait(bp); if (error) { brelse(bp); return (error); } }*/ daddr = ((ufs2_daddr_t *)bp->data)[ap->in_off]; if (num == 1 && daddr && runp) { for (bn = ap->in_off + 1; bn < ump->um_nindir && *runp < maxrun && is_sequential(ump, ((ufs2_daddr_t *)bp->data)[bn - 1], ((ufs2_daddr_t *)bp->data)[bn]); ++bn, ++*runp); bn = ap->in_off; if (runb && bn) { for (--bn; bn >= 0 && *runb < maxrun && is_sequential(ump, ((ufs2_daddr_t *)bp->data)[bn], ((ufs2_daddr_t *)bp->data)[bn + 1]); --bn, ++*runb); } } } if (bp) releasebuf(bp); /* * Since this is FFS independent code, we are out of scope for the * definitions of BLK_NOCOPY and BLK_SNAP, but we do know that they * will fall in the range 1..um_seqinc, so we use that test and * return a request for a zeroed out buffer if attempts are made * to read a BLK_NOCOPY or BLK_SNAP block. */ if ((ip->i_flags & SF_SNAPSHOT) && daddr > 0 && daddr < ump->um_seqinc) { *bnp = -1; return (0); } *bnp = blkptrtodb(ump, daddr); if (*bnp == 0) { if (ip->i_flags & SF_SNAPSHOT) *bnp = blkptrtodb(ump, bn * ump->um_seqinc); else *bnp = -1; } return 0; }
int ext2fs_bmaparray(struct vnode *vp, #undef struct daddr_t bn, daddr_t *bnp, struct indir *ap, int *nump, int *runp) { struct inode *ip; struct buf *bp, *cbp; #define struct // struct ufsmount *ump; struct mount *mp; #undef struct struct indir a[NIADDR+1], *xap; daddr_t daddr; daddr_t metalbn; int error, maxrun = 0, num; ip = VTOI(vp); mp = EXT2_SIMPLE_FILE_SYSTEM_PRIVATE_DATA_FROM_THIS(vp->Filesystem); // mp = vp->v_mount; !!!! need to fix this badly! // ump = ip->i_ump; NEED TO DO SOMETHING ABOUT ufsmount #ifdef DIAGNOSTIC if ((ap != NULL && nump == NULL) || (ap == NULL && nump != NULL)) panic("ext2fs_bmaparray: invalid arguments"); #endif if (runp) { /* * XXX * If MAXBSIZE is the largest transfer the disks can handle, * we probably want maxrun to be 1 block less so that we * don't create a block larger than the device can handle. */ *runp = 0; maxrun = MAXBSIZE / //mp->mnt_stat.f_iosize - 1; NEEDS FIX!!! mp->fs->e2fs_bsize - 1; } if (bn >= 0 && bn < NDADDR) { /* XXX ondisk32 */ *bnp = blkptrtodb(ump, fs2h32(ip->i_e2fs_blocks[bn])); if (*bnp == 0) *bnp = -1; else if (runp) /* XXX ondisk32 */ for (++bn; bn < NDADDR && *runp < maxrun && is_sequential(ump, (daddr_t)fs2h32(ip->i_e2fs_blocks[bn - 1]), (daddr_t)fs2h32(ip->i_e2fs_blocks[bn])); ++bn, ++*runp); return (0); } xap = ap == NULL ? a : ap; if (!nump) nump = # if ((error = ufs_getlbns(vp, bn, xap, nump)) != 0) return (error); num = *nump; /* Get disk address out of indirect block array */ /* XXX ondisk32 */ daddr = fs2h32(ip->i_e2fs_blocks[NDADDR + xap->in_off]); #ifdef DIAGNOSTIC if (num > NIADDR + 1 || num < 1) { printf("ext2fs_bmaparray: num=%d\n", num); panic("ext2fs_bmaparray: num"); } #endif for (bp = NULL, ++xap; --num; ++xap) { /* * Exit the loop if there is no disk address assigned yet and * the indirect block isn't in the cache, or if we were * looking for an indirect block and we've found it. */ metalbn = xap->in_lbn; if (metalbn == bn) break; if (daddr == 0) { mutex_enter(&bufcache_lock); cbp = incore(vp, metalbn); mutex_exit(&bufcache_lock); if (cbp == NULL) break; } /* * If we get here, we've either got the block in the cache * or we have a disk address for it, go fetch it. */ if (bp) brelse(bp, 0); xap->in_exists = 1; //!!!!!!!!!!!!!!replaced 3rd param with 1 ftw bp = getblk(vp, metalbn, 1, 0, 0); if (bp == NULL) { /* * getblk() above returns NULL only iff we are * pagedaemon. See the implementation of getblk * for detail. */ return (ENOMEM); } if (bp->b_oflags & (BO_DONE | BO_DELWRI)) { trace(TR_BREADHIT, pack(vp, size), metalbn); } #ifdef DIAGNOSTIC else if (!daddr) panic("ext2fs_bmaparry: indirect block not in cache"); #endif else { trace(TR_BREADMISS, pack(vp, size), metalbn); bp->b_blkno = blkptrtodb(ump, daddr); bp->b_flags |= B_READ; VOP_STRATEGY(vp, bp); // curlwp->l_ru.ru_inblock++; *//* XXX */ if ((error = biowait(bp)) != 0) { brelse(bp, 0); return (error); } } /* XXX ondisk32 */ daddr = fs2h32(((int32_t *)bp->b_data)[xap->in_off]); if (num == 1 && daddr && runp) /* XXX ondisk32 */ for (bn = xap->in_off + 1; bn < MNINDIR(ump) && *runp < maxrun && is_sequential(ump, ((int32_t *)bp->b_data)[bn - 1], ((int32_t *)bp->b_data)[bn]); ++bn, ++*runp); } if (bp) brelse(bp, 0); daddr = blkptrtodb(ump, daddr); *bnp = daddr == 0 ? -1 : daddr; return (0); }