/* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. */ int ufs_strategy(void *v) { struct vop_strategy_args /* { struct vnode *a_vp; struct buf *a_bp; } */ *ap = v; struct buf *bp; struct vnode *vp; struct inode *ip; struct mount *mp; int error; bp = ap->a_bp; vp = ap->a_vp; ip = VTOI(vp); if (vp->v_type == VBLK || vp->v_type == VCHR) panic("ufs_strategy: spec"); KASSERT(bp->b_bcount != 0); if (bp->b_blkno == bp->b_lblkno) { error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); if (error) { bp->b_error = error; biodone(bp); return (error); } if (bp->b_blkno == -1) /* no valid data */ clrbuf(bp); } if (bp->b_blkno < 0) { /* block is not on disk */ biodone(bp); return (0); } vp = ip->i_devvp; error = VOP_STRATEGY(vp, bp); if (error) return error; if (!BUF_ISREAD(bp)) return 0; mp = wapbl_vptomp(vp); if (mp == NULL || mp->mnt_wapbl_replay == NULL || !WAPBL_REPLAY_ISOPEN(mp) || !WAPBL_REPLAY_CAN_READ(mp, bp->b_blkno, bp->b_bcount)) return 0; error = biowait(bp); if (error) return error; error = WAPBL_REPLAY_READ(mp, bp->b_data, bp->b_blkno, bp->b_bcount); if (error) { mutex_enter(&bufcache_lock); SET(bp->b_cflags, BC_INVAL); mutex_exit(&bufcache_lock); } return error; }
bool TerrainBlock::buildPolyList(PolyListContext, AbstractPolyList* polyList, const Box3F &box, const SphereF&) { PROFILE_SCOPE( TerrainBlock_buildPolyList ); // First check to see if the query misses the // terrain elevation range. const Point3F &terrainPos = getPosition(); if ( box.maxExtents.z - terrainPos.z < -TerrainThickness || box.minExtents.z - terrainPos.z > fixedToFloat( mFile->getMaxHeight() ) ) return false; // Transform the bounding sphere into the object's coord // space. Note that this is really optimal. Box3F osBox = box; mWorldToObj.mul(osBox); AssertWarn(mObjScale == Point3F::One, "Error, handle the scale transform on the terrain"); // Setup collision state data polyList->setTransform(&getTransform(), getScale()); polyList->setObject(this); S32 xStart = (S32)mFloor( osBox.minExtents.x / mSquareSize ); S32 xEnd = (S32)mCeil ( osBox.maxExtents.x / mSquareSize ); S32 yStart = (S32)mFloor( osBox.minExtents.y / mSquareSize ); S32 yEnd = (S32)mCeil ( osBox.maxExtents.y / mSquareSize ); if ( xStart < 0 ) xStart = 0; S32 xExt = xEnd - xStart; if ( xExt > MaxExtent ) xExt = MaxExtent; xEnd = xStart + xExt; U32 heightMax = floatToFixed(osBox.maxExtents.z); U32 heightMin = (osBox.minExtents.z < 0.0f)? 0.0f: floatToFixed(osBox.minExtents.z); // Index of shared points U32 bp[(MaxExtent + 1) * 2],*vb[2]; vb[0] = &bp[0]; vb[1] = &bp[xExt + 1]; clrbuf(vb[1],xExt + 1); const U32 BlockMask = mFile->mSize - 1; bool emitted = false; for (S32 y = yStart; y < yEnd; y++) { S32 yi = y & BlockMask; swap(vb[0],vb[1]); clrbuf(vb[1],xExt + 1); // for (S32 x = xStart; x < xEnd; x++) { S32 xi = x & BlockMask; const TerrainSquare *sq = mFile->findSquare( 0, xi, yi ); if ( x != xi || y != yi ) continue; // holes only in the primary terrain block if ( ( ( sq->flags & TerrainSquare::Empty ) && x == xi && y == yi ) || sq->minHeight > heightMax || sq->maxHeight < heightMin ) continue; emitted = true; // Add the missing points U32 vi[5]; for (int i = 0; i < 4 ; i++) { S32 dx = i >> 1; S32 dy = dx ^ (i & 1); U32* vp = &vb[dy][x - xStart + dx]; if (*vp == U32_MAX) { Point3F pos; pos.x = (F32)((x + dx) * mSquareSize); pos.y = (F32)((y + dy) * mSquareSize); pos.z = fixedToFloat( mFile->getHeight(xi + dx, yi + dy) ); *vp = polyList->addPoint(pos); } vi[i] = *vp; } U32* vp = &vi[0]; if ( !( sq->flags & TerrainSquare::Split45 ) ) vi[4] = vi[0], vp++; BaseMatInstance *material = NULL; //getMaterialInst( xi, yi ); U32 surfaceKey = ((xi << 16) + yi) << 1; polyList->begin(material,surfaceKey); polyList->vertex(vp[0]); polyList->vertex(vp[1]); polyList->vertex(vp[2]); polyList->plane(vp[0],vp[1],vp[2]); polyList->end(); polyList->begin(material,surfaceKey + 1); polyList->vertex(vp[0]); polyList->vertex(vp[2]); polyList->vertex(vp[3]); polyList->plane(vp[0],vp[2],vp[3]); polyList->end(); } } return emitted; }
int main (int argc, char *const* argv) { MyRec element; /* to store in the stack */ MyRec * retval; /* to retrieve from the stack */ Stack * main_stack; /* the list/stack to test */ int option; /* each command line option */ long command; /* stack command entered by user */ long status; /* return status of stack functions */ /* initialize debug states */ set_debug_off(); /* check command line options for debug display */ while ((option = getopt (argc, argv, "x")) != EOF) { switch (option) { case 'x': set_debug_on(); break; } } /* allocate and initialize */ main_stack = new_Stack(copy_MyRec, delete_MyRec, is_greater_than_MyRec, write_MyRec); while (1) { command = 0; /* initialize command, need for loops */ writeline ("\nThe commands are:\n"); writeline (" is(e)mpty, "); writeline ("(i)nsert, "); writeline ("(p)op, "); newline (); writeline (" (a)dvance pre, advance (n)ext, "); newline (); writeline (" (r)emove, "); writeline ("(t)op, "); writeline ("p(u)sh, "); newline (); writeline (" (v)iew, "); writeline ("(w)rite, (W)rite_reverse,"); newline (); writeline ("\nPlease enter a command: "); command = fgetc (stdin); if (command == EOF) /* are we done? */ break; clrbuf (command); /* get rid of extra characters */ switch (command) { /* process commands */ case 'a': /* advance pre */ advance_pre_List (main_stack); break; case 'n': /* advance next */ advance_next_List (main_stack); break; case 'e': /* isempty */ if (isempty_Stack (main_stack)) writeline ("\nStack is empty."); else writeline ("\nStack is not empty."); break; case 'i': /* insert */ writeline ( "\nPlease enter a number to insert into list: "); element.xxx = decin (); clrbuf (0); /* get rid of extra characters */ status = insert (main_stack, &element, from_where (TRUE)); if (! status) fprintf (stderr, "\nWARNING: insert FAILED\n"); break; case 'p': /* pop */ retval = (MyRec *) pop (main_stack); if (! retval) fprintf (stderr, "\nWARNING: pop FAILED\n"); else { element = *retval; writeline ( "\nNumber popped from the stack is: "); write_MyRec (&element, stdout); delete_MyRec (&retval); } break; case 'r': /* remove */ retval = (MyRec *) remove_List (main_stack, from_where (FALSE)); if (! retval) fprintf (stderr, "\nWARNING: remove FAILED\n"); else { element = *retval; writeline ( "\nNumber removed from list is: "); write_MyRec (&element, stdout); delete_MyRec (&retval); } break; case 't': /* top */ retval = (MyRec *) top (main_stack); if (! retval) fprintf (stderr, "\nWARNING: top FAILED\n"); else { element = *retval; writeline ( "\nNumber at top of the stack is: "); write_MyRec (&element, stdout); } break; case 'u': /* push */ writeline ( "\nPlease enter a number to push to stack: "); element.xxx = decin (); clrbuf (0); /* get rid of extra characters */ status = push (main_stack, &element); if (! status) fprintf (stderr, "\nWARNING: push FAILED\n"); break; case 'v': /* view */ retval = (MyRec *) view (main_stack, from_where (FALSE)); if (! retval) fprintf (stderr, "\nWARNING: view FAILED\n"); else { element = *retval; writeline ( "\nNumber viewed from the list is: "); write_MyRec (&element, stdout); } break; case 'w': /* write */ writeline ("\nThe Stack contains:\n"); write_Stack (main_stack, stderr); newline (); break; case 'W': /* write */ writeline ("\nThe Stack contains (in reverse):\n"); write_reverse_List (main_stack, stderr); newline (); break; } } delete_Stack (&main_stack); /* deallocate stack */ newline (); return 0; }
/* * Balloc defines the structure of file system storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. */ int ext2fs_balloc(struct inode *ip, daddr_t bn, int size, kauth_cred_t cred, struct buf **bpp, int flags) { struct m_ext2fs *fs; daddr_t nb; struct buf *bp, *nbp; struct vnode *vp = ITOV(ip); struct indir indirs[EXT2FS_NIADDR + 2]; daddr_t newb, lbn, pref; int32_t *bap; /* XXX ondisk32 */ int num, i, error; u_int deallocated; daddr_t *blkp, *allocblk, allociblk[EXT2FS_NIADDR + 1]; int32_t *allocib; /* XXX ondisk32 */ int unwindidx = -1; UVMHIST_FUNC("ext2fs_balloc"); UVMHIST_CALLED(ubchist); UVMHIST_LOG(ubchist, "bn 0x%x", bn,0,0,0); if (bpp != NULL) { *bpp = NULL; } if (bn < 0) return (EFBIG); fs = ip->i_e2fs; lbn = bn; /* * The first EXT2FS_NDADDR blocks are direct blocks */ if (bn < EXT2FS_NDADDR) { /* XXX ondisk32 */ nb = fs2h32(ip->i_e2fs_blocks[bn]); if (nb != 0) { /* * the block is already allocated, just read it. */ if (bpp != NULL) { error = bread(vp, bn, fs->e2fs_bsize, NOCRED, B_MODIFY, &bp); if (error) { return (error); } *bpp = bp; } return (0); } /* * allocate a new direct block. */ error = ext2fs_alloc(ip, bn, ext2fs_blkpref(ip, bn, bn, &ip->i_e2fs_blocks[0]), cred, &newb); if (error) return (error); ip->i_e2fs_last_lblk = lbn; ip->i_e2fs_last_blk = newb; /* XXX ondisk32 */ ip->i_e2fs_blocks[bn] = h2fs32((int32_t)newb); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp != NULL) { bp = getblk(vp, bn, fs->e2fs_bsize, 0, 0); bp->b_blkno = EXT2_FSBTODB(fs, newb); if (flags & B_CLRBUF) clrbuf(bp); *bpp = bp; } return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = ufs_getlbns(vp, bn, indirs, &num)) != 0) return(error); #ifdef DIAGNOSTIC if (num < 1) panic ("ext2fs_balloc: ufs_getlbns returned indirect block\n"); #endif /* * Fetch the first indirect block allocating if necessary. */ --num; /* XXX ondisk32 */ nb = fs2h32(ip->i_e2fs_blocks[EXT2FS_NDADDR + indirs[0].in_off]); allocib = NULL; allocblk = allociblk; if (nb == 0) { pref = ext2fs_blkpref(ip, lbn, 0, (int32_t *)0); error = ext2fs_alloc(ip, lbn, pref, cred, &newb); if (error) return (error); nb = newb; *allocblk++ = nb; ip->i_e2fs_last_blk = newb; bp = getblk(vp, indirs[1].in_lbn, fs->e2fs_bsize, 0, 0); bp->b_blkno = EXT2_FSBTODB(fs, newb); clrbuf(bp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(bp)) != 0) goto fail; unwindidx = 0; allocib = &ip->i_e2fs_blocks[EXT2FS_NDADDR + indirs[0].in_off]; /* XXX ondisk32 */ *allocib = h2fs32((int32_t)newb); ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { goto fail; } bap = (int32_t *)bp->b_data; /* XXX ondisk32 */ nb = fs2h32(bap[indirs[i].in_off]); if (i == num) break; i++; if (nb != 0) { brelse(bp, 0); continue; } pref = ext2fs_blkpref(ip, lbn, 0, (int32_t *)0); error = ext2fs_alloc(ip, lbn, pref, cred, &newb); if (error) { brelse(bp, 0); goto fail; } nb = newb; *allocblk++ = nb; ip->i_e2fs_last_blk = newb; nbp = getblk(vp, indirs[i].in_lbn, fs->e2fs_bsize, 0, 0); nbp->b_blkno = EXT2_FSBTODB(fs, nb); clrbuf(nbp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { brelse(bp, 0); goto fail; } if (unwindidx < 0) unwindidx = i - 1; /* XXX ondisk32 */ bap[indirs[i - 1].in_off] = h2fs32((int32_t)nb); /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { bwrite(bp); } else { bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { pref = ext2fs_blkpref(ip, lbn, indirs[num].in_off, &bap[0]); error = ext2fs_alloc(ip, lbn, pref, cred, &newb); if (error) { brelse(bp, 0); goto fail; } nb = newb; *allocblk++ = nb; ip->i_e2fs_last_lblk = lbn; ip->i_e2fs_last_blk = newb; /* XXX ondisk32 */ bap[indirs[num].in_off] = h2fs32((int32_t)nb); /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { bwrite(bp); } else { bdwrite(bp); } if (bpp != NULL) { nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0); nbp->b_blkno = EXT2_FSBTODB(fs, nb); if (flags & B_CLRBUF) clrbuf(nbp); *bpp = nbp; } return (0); } brelse(bp, 0); if (bpp != NULL) { if (flags & B_CLRBUF) { error = bread(vp, lbn, (int)fs->e2fs_bsize, NOCRED, B_MODIFY, &nbp); if (error) { goto fail; } } else { nbp = getblk(vp, lbn, fs->e2fs_bsize, 0, 0); nbp->b_blkno = EXT2_FSBTODB(fs, nb); } *bpp = nbp; } return (0); fail: /* * If we have failed part way through block allocation, we * have to deallocate any indirect blocks that we have allocated. */ for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { ext2fs_blkfree(ip, *blkp); deallocated += fs->e2fs_bsize; } if (unwindidx >= 0) { if (unwindidx == 0) { *allocib = 0; } else { int r; r = bread(vp, indirs[unwindidx].in_lbn, (int)fs->e2fs_bsize, NOCRED, B_MODIFY, &bp); if (r) { panic("Could not unwind indirect block, error %d", r); } else { bap = (int32_t *)bp->b_data; /* XXX ondisk32 */ bap[indirs[unwindidx].in_off] = 0; if (flags & B_SYNC) bwrite(bp); else bdwrite(bp); } } for (i = unwindidx + 1; i <= num; i++) { bp = getblk(vp, indirs[i].in_lbn, (int)fs->e2fs_bsize, 0, 0); brelse(bp, BC_INVAL); } } if (deallocated) { ext2fs_setnblock(ip, ext2fs_nblock(ip) - btodb(deallocated)); ip->i_e2fs_flags |= IN_CHANGE | IN_UPDATE; } return error; }
/* VOP_BWRITE ULFS_NIADDR+2 times */ int lfs_balloc(struct vnode *vp, off_t startoffset, int iosize, kauth_cred_t cred, int flags, struct buf **bpp) { int offset; daddr_t daddr, idaddr; struct buf *ibp, *bp; struct inode *ip; struct lfs *fs; struct indir indirs[ULFS_NIADDR+2], *idp; daddr_t lbn, lastblock; int bcount; int error, frags, i, nsize, osize, num; ip = VTOI(vp); fs = ip->i_lfs; offset = lfs_blkoff(fs, startoffset); KASSERT(iosize <= lfs_sb_getbsize(fs)); lbn = lfs_lblkno(fs, startoffset); /* (void)lfs_check(vp, lbn, 0); */ ASSERT_MAYBE_SEGLOCK(fs); /* * Three cases: it's a block beyond the end of file, it's a block in * the file that may or may not have been assigned a disk address or * we're writing an entire block. * * Note, if the daddr is UNWRITTEN, the block already exists in * the cache (it was read or written earlier). If so, make sure * we don't count it as a new block or zero out its contents. If * it did not, make sure we allocate any necessary indirect * blocks. * * If we are writing a block beyond the end of the file, we need to * check if the old last block was a fragment. If it was, we need * to rewrite it. */ if (bpp) *bpp = NULL; /* Check for block beyond end of file and fragment extension needed. */ lastblock = lfs_lblkno(fs, ip->i_size); if (lastblock < ULFS_NDADDR && lastblock < lbn) { osize = lfs_blksize(fs, ip, lastblock); if (osize < lfs_sb_getbsize(fs) && osize > 0) { if ((error = lfs_fragextend(vp, osize, lfs_sb_getbsize(fs), lastblock, (bpp ? &bp : NULL), cred))) return (error); ip->i_size = (lastblock + 1) * lfs_sb_getbsize(fs); lfs_dino_setsize(fs, ip->i_din, ip->i_size); uvm_vnp_setsize(vp, ip->i_size); ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp) (void) VOP_BWRITE(bp->b_vp, bp); } } /* * If the block we are writing is a direct block, it's the last * block in the file, and offset + iosize is less than a full * block, we can write one or more fragments. There are two cases: * the block is brand new and we should allocate it the correct * size or it already exists and contains some fragments and * may need to extend it. */ if (lbn < ULFS_NDADDR && lfs_lblkno(fs, ip->i_size) <= lbn) { osize = lfs_blksize(fs, ip, lbn); nsize = lfs_fragroundup(fs, offset + iosize); if (lfs_lblktosize(fs, lbn) >= ip->i_size) { /* Brand new block or fragment */ frags = lfs_numfrags(fs, nsize); if (!ISSPACE(fs, frags, cred)) return ENOSPC; if (bpp) { *bpp = bp = getblk(vp, lbn, nsize, 0, 0); bp->b_blkno = UNWRITTEN; if (flags & B_CLRBUF) clrbuf(bp); } ip->i_lfs_effnblks += frags; mutex_enter(&lfs_lock); lfs_sb_subbfree(fs, frags); mutex_exit(&lfs_lock); lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN); } else { if (nsize <= osize) { /* No need to extend */ if (bpp && (error = bread(vp, lbn, osize, 0, &bp))) return error; } else { /* Extend existing block */ if ((error = lfs_fragextend(vp, osize, nsize, lbn, (bpp ? &bp : NULL), cred))) return error; } if (bpp) *bpp = bp; } return 0; } error = ulfs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL); if (error) return (error); KASSERT(daddr <= LFS_MAX_DADDR(fs)); /* * Do byte accounting all at once, so we can gracefully fail *before* * we start assigning blocks. */ frags = fs->um_seqinc; bcount = 0; if (daddr == UNASSIGNED) { bcount = frags; } for (i = 1; i < num; ++i) { if (!indirs[i].in_exists) { bcount += frags; } } if (ISSPACE(fs, bcount, cred)) { mutex_enter(&lfs_lock); lfs_sb_subbfree(fs, bcount); mutex_exit(&lfs_lock); ip->i_lfs_effnblks += bcount; } else { return ENOSPC; } if (daddr == UNASSIGNED) { if (num > 0 && lfs_dino_getib(fs, ip->i_din, indirs[0].in_off) == 0) { lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN); } /* * Create new indirect blocks if necessary */ if (num > 1) { idaddr = lfs_dino_getib(fs, ip->i_din, indirs[0].in_off); for (i = 1; i < num; ++i) { ibp = getblk(vp, indirs[i].in_lbn, lfs_sb_getbsize(fs), 0,0); if (!indirs[i].in_exists) { clrbuf(ibp); ibp->b_blkno = UNWRITTEN; } else if (!(ibp->b_oflags & (BO_DELWRI | BO_DONE))) { ibp->b_blkno = LFS_FSBTODB(fs, idaddr); ibp->b_flags |= B_READ; VOP_STRATEGY(vp, ibp); biowait(ibp); } /* * This block exists, but the next one may not. * If that is the case mark it UNWRITTEN to keep * the accounting straight. */ /* XXX ondisk32 */ if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0) ((int32_t *)ibp->b_data)[indirs[i].in_off] = UNWRITTEN; /* XXX ondisk32 */ idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off]; #ifdef DEBUG if (vp == fs->lfs_ivnode) { LFS_ENTER_LOG("balloc", __FILE__, __LINE__, indirs[i].in_lbn, ibp->b_flags, curproc->p_pid); } #endif if ((error = VOP_BWRITE(ibp->b_vp, ibp))) return error; } } } /* * Get the existing block from the cache, if requested. */ if (bpp) *bpp = bp = getblk(vp, lbn, lfs_blksize(fs, ip, lbn), 0, 0); /* * Do accounting on blocks that represent pages. */ if (!bpp) lfs_register_block(vp, lbn); /* * The block we are writing may be a brand new block * in which case we need to do accounting. * * We can tell a truly new block because ulfs_bmaparray will say * it is UNASSIGNED. Once we allocate it we will assign it the * disk address UNWRITTEN. */ if (daddr == UNASSIGNED) { if (bpp) { if (flags & B_CLRBUF) clrbuf(bp); /* Note the new address */ bp->b_blkno = UNWRITTEN; } switch (num) { case 0: lfs_dino_setdb(fs, ip->i_din, lbn, UNWRITTEN); break; case 1: lfs_dino_setib(fs, ip->i_din, indirs[0].in_off, UNWRITTEN); break; default: idp = &indirs[num - 1]; if (bread(vp, idp->in_lbn, lfs_sb_getbsize(fs), B_MODIFY, &ibp)) panic("lfs_balloc: bread bno %lld", (long long)idp->in_lbn); /* XXX ondisk32 */ ((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN; #ifdef DEBUG if (vp == fs->lfs_ivnode) { LFS_ENTER_LOG("balloc", __FILE__, __LINE__, idp->in_lbn, ibp->b_flags, curproc->p_pid); } #endif VOP_BWRITE(ibp->b_vp, ibp); } } else if (bpp && !(bp->b_oflags & (BO_DONE|BO_DELWRI))) { /* * Not a brand new block, also not in the cache; * read it in from disk. */ if (iosize == lfs_sb_getbsize(fs)) /* Optimization: I/O is unnecessary. */ bp->b_blkno = daddr; else { /* * We need to read the block to preserve the * existing bytes. */ bp->b_blkno = daddr; bp->b_flags |= B_READ; VOP_STRATEGY(vp, bp); return (biowait(bp)); } } return (0); }
/* * hpfs_write(struct vnode *a_vp, struct uio *a_uio, int a_ioflag, * struct ucred *a_cred) */ static int hpfs_write(struct vop_write_args *ap) { struct vnode *vp = ap->a_vp; struct hpfsnode *hp = VTOHP(vp); struct uio *uio = ap->a_uio; struct buf *bp; u_int xfersz, towrite; u_int off; daddr_t lbn, bn; int runl; int error = 0; dprintf(("hpfs_write(0x%x, off: %d resid: %ld, segflg: %d):\n", hp->h_no, (u_int32_t)uio->uio_offset, uio->uio_resid, uio->uio_segflg)); if (ap->a_ioflag & IO_APPEND) { dprintf(("hpfs_write: APPEND mode\n")); uio->uio_offset = hp->h_fn.fn_size; } if (uio->uio_offset + uio->uio_resid > hp->h_fn.fn_size) { error = hpfs_extend (hp, uio->uio_offset + uio->uio_resid); if (error) { kprintf("hpfs_write: hpfs_extend FAILED %d\n", error); return (error); } } while (uio->uio_resid) { lbn = uio->uio_offset >> DEV_BSHIFT; off = uio->uio_offset & (DEV_BSIZE - 1); dprintf(("hpfs_write: resid: 0x%lx lbn: 0x%x off: 0x%x\n", uio->uio_resid, lbn, off)); error = hpfs_hpbmap(hp, lbn, &bn, &runl); if (error) return (error); towrite = szmin(off + uio->uio_resid, min(DFLTPHYS, (runl+1)*DEV_BSIZE)); xfersz = (towrite + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); dprintf(("hpfs_write: bn: 0x%x (0x%x) towrite: 0x%x (0x%x)\n", bn, runl, towrite, xfersz)); /* * We do not have to issue a read-before-write if the xfer * size does not cover the whole block. * * In the UIO_NOCOPY case, however, we are not overwriting * anything and must do a read-before-write to fill in * any missing pieces. */ if (off == 0 && towrite == xfersz && uio->uio_segflg != UIO_NOCOPY) { bp = getblk(hp->h_devvp, dbtodoff(bn), xfersz, 0, 0); clrbuf(bp); } else { error = bread(hp->h_devvp, dbtodoff(bn), xfersz, &bp); if (error) { brelse(bp); return (error); } } error = uiomove(bp->b_data + off, (size_t)(towrite - off), uio); if(error) { brelse(bp); return (error); } if (ap->a_ioflag & IO_SYNC) bwrite(bp); else bawrite(bp); } dprintf(("hpfs_write: successful\n")); return (0); }
/* * Calculate the logical to physical mapping if not done already, * then call the device strategy routine. */ int ntfs_strategy(void *v) { struct vop_strategy_args *ap = v; struct buf *bp = ap->a_bp; struct vnode *vp = bp->b_vp; struct fnode *fp = VTOF(vp); struct ntnode *ip = FTONT(fp); struct ntfsmount *ntmp = ip->i_mp; int error, s; DPRINTF("ntfs_strategy: blkno: %lld, lblkno: %lld\n", (long long)bp->b_blkno, (long long)bp->b_lblkno); DPRINTF("strategy: bcount: %ld flags: 0x%lx\n", bp->b_bcount, bp->b_flags); if (bp->b_flags & B_READ) { u_int32_t toread; if (ntfs_cntob(bp->b_blkno) >= fp->f_size) { clrbuf(bp); error = 0; } else { toread = MIN(bp->b_bcount, fp->f_size - ntfs_cntob(bp->b_blkno)); DPRINTF("ntfs_strategy: toread: %u, fsize: %llu\n", toread, fp->f_size); error = ntfs_readattr(ntmp, ip, fp->f_attrtype, fp->f_attrname, ntfs_cntob(bp->b_blkno), toread, bp->b_data, NULL); if (error) { printf("ntfs_strategy: ntfs_readattr failed\n"); bp->b_error = error; bp->b_flags |= B_ERROR; } bzero(bp->b_data + toread, bp->b_bcount - toread); } } else { size_t tmp; u_int32_t towrite; if (ntfs_cntob(bp->b_blkno) + bp->b_bcount >= fp->f_size) { printf("ntfs_strategy: CAN'T EXTEND FILE\n"); bp->b_error = error = EFBIG; bp->b_flags |= B_ERROR; } else { towrite = MIN(bp->b_bcount, fp->f_size - ntfs_cntob(bp->b_blkno)); DPRINTF("ntfs_strategy: towrite: %u, fsize: %llu\n", towrite, fp->f_size); error = ntfs_writeattr_plain(ntmp, ip, fp->f_attrtype, fp->f_attrname, ntfs_cntob(bp->b_blkno),towrite, bp->b_data, &tmp, NULL); if (error) { printf("ntfs_strategy: ntfs_writeattr fail\n"); bp->b_error = error; bp->b_flags |= B_ERROR; } } } s = splbio(); biodone(bp); splx(s); return (error); }
void get(char ch) { if(ch=='e') { loop:while(1) { printf("Please input your string.\n"); clrbuf(); fgets(s,N,stdin); num=0; if(s[0]=='\n') continue; while(s[++num]!='\n'); for(i=0;i<num;i++) { if(s[i]<0) { printf("不支持中文~~~\n"); goto loop; } else; } break; } } else if(ch=='d') { while(1) { printf("Please input your string.\n"); clrbuf(); fgets(s1,N,stdin); if(s1[0]!='*') { printf("Invalid string!!!\n"); continue; } else { int i=1; num=0; int max=strlen(s1); while(i<max) { sscanf(&s1[i],"%d*",&s2[num]); if(s2[num]>=0&&s2[num]<10) i=i+2,num++; else if((s2[num]>=10&&s2[num]<100)||(s2[num]<0&&s2[num]>-10)) i=i+3,num++; else if((s2[num]>=100&&s2[num]<1000)||(s2[num]<=-10&&s2[num]>-100)) i=i+4,num++; else if(s2[num]<=-100&&s2[num]>-1000) i=i+5,num++; else {i++; continue;} } } break; } } else; }
void space(void) { byte c; byte move_left, move_right; char delay_count; byte do_exit; move_left = 0; move_right = 0; shot_y = 0; shot_x = 0; inv_shot_y = 0; inv_shot_x = 0; do_exit = 0; clrbuf(); init_game(); delay_count = 0; do { clrbuf(); draw_invaders(); draw_tank(tank_pos); // tank shot tank_shot(); // invader shot invaders_shot(); buf2screen(); // delay invader movement if (++delay_count >= 5 - ((char)inv_pos_y / 3)) { delay_count = 0; move_invaders(); } // delay if (inv_count < 14) delay_ms((14 - inv_count)); // read keyboard c = io_read(129); if (c == 0xe0) { c = io_read(129); if (c == 0x6b) { // left down move_left = 1; } else if (c == 0x74) { // right down move_right = 1; } else if (c == 0x6b + 0x80) { // left up move_left = 0; } else if (c == 0x74 + 0x80) { // right up move_right = 0; } } else if (c == 0x29) { // shoot if (shot_y == 0) { shot_x = (tank_pos + 1) * 2 + 1; shot_y = 90; } } else if (c == 0x76) { // escape do_exit = 1; } // move tank if (move_left) { if (tank_pos > 0) tank_pos -= 1; } else if (move_right) { if (tank_pos < 76) tank_pos += 1; } } while (!do_exit); }
/* * Write data to a file or directory. */ int msdosfs_write(void *v) { struct vop_write_args *ap = v; int n; int croffset; int resid; uint32_t osize; int error = 0; uint32_t count, lastcn; daddr64_t bn; struct buf *bp; int ioflag = ap->a_ioflag; struct uio *uio = ap->a_uio; struct proc *p = uio->uio_procp; struct vnode *vp = ap->a_vp; struct vnode *thisvp; struct denode *dep = VTODE(vp); struct msdosfsmount *pmp = dep->de_pmp; struct ucred *cred = ap->a_cred; #ifdef MSDOSFS_DEBUG printf("msdosfs_write(vp %08x, uio %08x, ioflag %08x, cred %08x\n", vp, uio, ioflag, cred); printf("msdosfs_write(): diroff %d, dirclust %d, startcluster %d\n", dep->de_diroffset, dep->de_dirclust, dep->de_StartCluster); #endif switch (vp->v_type) { case VREG: if (ioflag & IO_APPEND) uio->uio_offset = dep->de_FileSize; thisvp = vp; break; case VDIR: return EISDIR; default: panic("msdosfs_write(): bad file type"); } if (uio->uio_offset < 0) return (EINVAL); if (uio->uio_resid == 0) return (0); /* Don't bother to try to write files larger than the f/s limit */ if (uio->uio_offset + uio->uio_resid > MSDOSFS_FILESIZE_MAX) return (EFBIG); /* * If they've exceeded their filesize limit, tell them about it. */ if (p && ((uio->uio_offset + uio->uio_resid) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { psignal(p, SIGXFSZ); return (EFBIG); } /* * If the offset we are starting the write at is beyond the end of * the file, then they've done a seek. Unix filesystems allow * files with holes in them, DOS doesn't so we must fill the hole * with zeroed blocks. */ if (uio->uio_offset > dep->de_FileSize) { if ((error = deextend(dep, uio->uio_offset, cred)) != 0) return (error); } /* * Remember some values in case the write fails. */ resid = uio->uio_resid; osize = dep->de_FileSize; /* * If we write beyond the end of the file, extend it to its ultimate * size ahead of the time to hopefully get a contiguous area. */ if (uio->uio_offset + resid > osize) { count = de_clcount(pmp, uio->uio_offset + resid) - de_clcount(pmp, osize); if ((error = extendfile(dep, count, NULL, NULL, 0)) && (error != ENOSPC || (ioflag & IO_UNIT))) goto errexit; lastcn = dep->de_fc[FC_LASTFC].fc_frcn; } else lastcn = de_clcount(pmp, osize) - 1; do { if (de_cluster(pmp, uio->uio_offset) > lastcn) { error = ENOSPC; break; } bn = de_blk(pmp, uio->uio_offset); if ((uio->uio_offset & pmp->pm_crbomask) == 0 && (de_blk(pmp, uio->uio_offset + uio->uio_resid) > de_blk(pmp, uio->uio_offset) || uio->uio_offset + uio->uio_resid >= dep->de_FileSize)) { /* * If either the whole cluster gets written, * or we write the cluster from its start beyond EOF, * then no need to read data from disk. */ bp = getblk(thisvp, bn, pmp->pm_bpcluster, 0, 0); clrbuf(bp); /* * Do the bmap now, since pcbmap needs buffers * for the fat table. (see msdosfs_strategy) */ if (bp->b_blkno == bp->b_lblkno) { error = pcbmap(dep, de_bn2cn(pmp, bp->b_lblkno), &bp->b_blkno, 0, 0); if (error) bp->b_blkno = -1; } if (bp->b_blkno == -1) { brelse(bp); if (!error) error = EIO; /* XXX */ break; } } else { /* * The block we need to write into exists, so read it in. */ error = bread(thisvp, bn, pmp->pm_bpcluster, NOCRED, &bp); if (error) { brelse(bp); break; } } croffset = uio->uio_offset & pmp->pm_crbomask; n = min(uio->uio_resid, pmp->pm_bpcluster - croffset); if (uio->uio_offset + n > dep->de_FileSize) { dep->de_FileSize = uio->uio_offset + n; uvm_vnp_setsize(vp, dep->de_FileSize); } uvm_vnp_uncache(vp); /* * Should these vnode_pager_* functions be done on dir * files? */ /* * Copy the data from user space into the buf header. */ error = uiomove(bp->b_data + croffset, n, uio); /* * If they want this synchronous then write it and wait for * it. Otherwise, if on a cluster boundary write it * asynchronously so we can move on to the next block * without delay. Otherwise do a delayed write because we * may want to write somemore into the block later. */ if (ioflag & IO_SYNC) (void) bwrite(bp); else if (n + croffset == pmp->pm_bpcluster) bawrite(bp); else bdwrite(bp); dep->de_flag |= DE_UPDATE; } while (error == 0 && uio->uio_resid > 0); /* * If the write failed and they want us to, truncate the file back * to the size it was before the write was attempted. */ errexit: if (error) { if (ioflag & IO_UNIT) { detrunc(dep, osize, ioflag & IO_SYNC, NOCRED, NULL); uio->uio_offset -= resid - uio->uio_resid; uio->uio_resid = resid; } else { detrunc(dep, dep->de_FileSize, ioflag & IO_SYNC, NOCRED, NULL); if (uio->uio_resid != resid) error = 0; } } else if (ioflag & IO_SYNC) error = deupdat(dep, 1); return (error); }
void create(void) { head = p = (struct node*)malloc(LEN); printf("Creating linked list...\n"); printf("Please input data:\n"); #ifdef DEGBUG1 char str1[10], str2[10]; scanf("%d", &p->data); getchar(); while (1) { a = getchar(); if (a != '\n') { b = -1; pl = p; p = (struct node*)malloc(LEN); HANDLE Thread2 = CreateThread(NULL, 0, scanf, NULL, 0, NULL); Sleep(10); if (b == -1) { TerminateThread(Thread2,0); CloseHandle(Thread2); p->data = a - 48; pl->next = p; clrbuf(); } else { CloseHandle(Thread2); _itoa(a - 48, str1, 10); _itoa(b, str2, 10); strcat(str1, str2); p->data = atoi(str1); pl->next = p; } continue; } break; } #else clrbuf(); a=getchar(); ungetc(a,stdin); #ifdef _WIN32 HANDLE Thread2 = CreateThread(NULL, 0, scanf, NULL, 0, NULL); Sleep(10); TerminateThread(Thread2, 0); CloseHandle(Thread2); #else pthread_t tid; int *temp=NULL; if(pthread_create(&tid,NULL,getnum,temp)!=0) { perror("Can't create thread!"); getchar(); exit(1); } sleep(0.1); if(pthread_cancel(tid)!=0) { perror("Can't kill thread!"); getchar(); exit(1); } #endif p->data = numin[0]; for (int j = 1; j < i; j++) { pl = p; p = (struct node*)malloc(LEN); p->data = numin[j]; pl->next = p; } #endif p->next = NULL; p = head; }
/* * Balloc defines the structure of file system storage * by allocating the physical blocks on a device given * the inode and the logical block number in a file. */ int ffs1_balloc(struct inode *ip, off_t startoffset, int size, struct ucred *cred, int flags, struct buf **bpp) { daddr_t lbn, nb, newb, pref; struct fs *fs; struct buf *bp, *nbp; struct vnode *vp; struct proc *p; struct indir indirs[NIADDR + 2]; int32_t *bap; int deallocated, osize, nsize, num, i, error; int32_t *allocib, *blkp, *allocblk, allociblk[NIADDR+1]; int unwindidx = -1; vp = ITOV(ip); fs = ip->i_fs; p = curproc; lbn = lblkno(fs, startoffset); size = blkoff(fs, startoffset) + size; if (size > fs->fs_bsize) panic("ffs1_balloc: blk too big"); if (bpp != NULL) *bpp = NULL; if (lbn < 0) return (EFBIG); /* * If the next write will extend the file into a new block, * and the file is currently composed of a fragment * this fragment has to be extended to be a full block. */ nb = lblkno(fs, ip->i_ffs1_size); if (nb < NDADDR && nb < lbn) { osize = blksize(fs, ip, nb); if (osize < fs->fs_bsize && osize > 0) { error = ffs_realloccg(ip, nb, ffs1_blkpref(ip, nb, (int)nb, &ip->i_ffs1_db[0]), osize, (int)fs->fs_bsize, cred, bpp, &newb); if (error) return (error); if (DOINGSOFTDEP(vp)) softdep_setup_allocdirect(ip, nb, newb, ip->i_ffs1_db[nb], fs->fs_bsize, osize, bpp ? *bpp : NULL); ip->i_ffs1_size = lblktosize(fs, nb + 1); uvm_vnp_setsize(vp, ip->i_ffs1_size); ip->i_ffs1_db[nb] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp != NULL) { if (flags & B_SYNC) bwrite(*bpp); else bawrite(*bpp); } } } /* * The first NDADDR blocks are direct blocks */ if (lbn < NDADDR) { nb = ip->i_ffs1_db[lbn]; if (nb != 0 && ip->i_ffs1_size >= lblktosize(fs, lbn + 1)) { /* * The block is an already-allocated direct block * and the file already extends past this block, * thus this must be a whole block. * Just read the block (if requested). */ if (bpp != NULL) { error = bread(vp, lbn, fs->fs_bsize, bpp); if (error) { brelse(*bpp); return (error); } } return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size)); nsize = fragroundup(fs, size); if (nsize <= osize) { /* * The existing block is already * at least as big as we want. * Just read the block (if requested). */ if (bpp != NULL) { error = bread(vp, lbn, fs->fs_bsize, bpp); if (error) { brelse(*bpp); return (error); } (*bpp)->b_bcount = osize; } return (0); } else { /* * The existing block is smaller than we * want, grow it. */ error = ffs_realloccg(ip, lbn, ffs1_blkpref(ip, lbn, (int)lbn, &ip->i_ffs1_db[0]), osize, nsize, cred, bpp, &newb); if (error) return (error); if (DOINGSOFTDEP(vp)) softdep_setup_allocdirect(ip, lbn, newb, nb, nsize, osize, bpp ? *bpp : NULL); } } else { /* * The block was not previously allocated, * allocate a new block or fragment. */ if (ip->i_ffs1_size < lblktosize(fs, lbn + 1)) nsize = fragroundup(fs, size); else nsize = fs->fs_bsize; error = ffs_alloc(ip, lbn, ffs1_blkpref(ip, lbn, (int)lbn, &ip->i_ffs1_db[0]), nsize, cred, &newb); if (error) return (error); if (bpp != NULL) { *bpp = getblk(vp, lbn, fs->fs_bsize, 0, 0); if (nsize < fs->fs_bsize) (*bpp)->b_bcount = nsize; (*bpp)->b_blkno = fsbtodb(fs, newb); if (flags & B_CLRBUF) clrbuf(*bpp); } if (DOINGSOFTDEP(vp)) softdep_setup_allocdirect(ip, lbn, newb, 0, nsize, 0, bpp ? *bpp : NULL); } ip->i_ffs1_db[lbn] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0) return(error); #ifdef DIAGNOSTIC if (num < 1) panic ("ffs1_balloc: ufs_bmaparray returned indirect block"); #endif /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ip->i_ffs1_ib[indirs[0].in_off]; allocib = NULL; allocblk = allociblk; if (nb == 0) { pref = ffs1_blkpref(ip, lbn, -indirs[0].in_off - 1, NULL); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb); if (error) goto fail; nb = newb; *allocblk++ = nb; bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0); bp->b_blkno = fsbtodb(fs, nb); clrbuf(bp); if (DOINGSOFTDEP(vp)) { softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off, newb, 0, fs->fs_bsize, 0, bp); bdwrite(bp); } else { /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(bp)) != 0) goto fail; } allocib = &ip->i_ffs1_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->fs_bsize, &bp); if (error) { brelse(bp); goto fail; } bap = (int32_t *)bp->b_data; nb = bap[indirs[i].in_off]; if (i == num) break; i++; if (nb != 0) { brelse(bp); continue; } if (pref == 0) pref = ffs1_blkpref(ip, lbn, i - num - 1, NULL); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb); if (error) { brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); if (DOINGSOFTDEP(vp)) { softdep_setup_allocindir_meta(nbp, ip, bp, indirs[i - 1].in_off, nb); bdwrite(nbp); } else { /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { brelse(bp); goto fail; } } bap[indirs[i - 1].in_off] = nb; if (allocib == NULL && unwindidx < 0) unwindidx = i - 1; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { bwrite(bp); } else { bdwrite(bp); } } /* * Get the data block, allocating if necessary. */ if (nb == 0) { pref = ffs1_blkpref(ip, lbn, indirs[i].in_off, &bap[0]); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb); if (error) { brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; if (bpp != NULL) { nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); if (flags & B_CLRBUF) clrbuf(nbp); *bpp = nbp; } if (DOINGSOFTDEP(vp)) softdep_setup_allocindir_page(ip, lbn, bp, indirs[i].in_off, nb, 0, bpp ? *bpp : NULL); bap[indirs[i].in_off] = nb; /* * If required, write synchronously, otherwise use * delayed write. */ if (flags & B_SYNC) { bwrite(bp); } else { bdwrite(bp); } return (0); } brelse(bp); if (bpp != NULL) { if (flags & B_CLRBUF) { error = bread(vp, lbn, (int)fs->fs_bsize, &nbp); if (error) { brelse(nbp); goto fail; } } else { nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); } *bpp = nbp; } return (0); fail: /* * If we have failed to allocate any blocks, simply return the error. * This is the usual case and avoids the need to fsync the file. */ if (allocblk == allociblk && allocib == NULL && unwindidx == -1) return (error); /* * If we have failed part way through block allocation, we have to * deallocate any indirect blocks that we have allocated. We have to * fsync the file before we start to get rid of all of its * dependencies so that we do not leave them dangling. We have to sync * it at the end so that the softdep code does not find any untracked * changes. Although this is really slow, running out of disk space is * not expected to be a common occurrence. The error return from fsync * is ignored as we already have an error to return to the user. */ VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p); for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { ffs_blkfree(ip, *blkp, fs->fs_bsize); deallocated += fs->fs_bsize; } if (allocib != NULL) { *allocib = 0; } else if (unwindidx >= 0) { int r; r = bread(vp, indirs[unwindidx].in_lbn, (int)fs->fs_bsize, &bp); if (r) panic("Could not unwind indirect block, error %d", r); bap = (int32_t *)bp->b_data; bap[indirs[unwindidx].in_off] = 0; if (flags & B_SYNC) { bwrite(bp); } else { bdwrite(bp); } } if (deallocated) { /* * Restore user's disk quota because allocation failed. */ (void)ufs_quota_free_blocks(ip, btodb(deallocated), cred); ip->i_ffs1_blocks -= btodb(deallocated); ip->i_flag |= IN_CHANGE | IN_UPDATE; } VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p); return (error); }
int ffs2_balloc(struct inode *ip, off_t off, int size, struct ucred *cred, int flags, struct buf **bpp) { daddr_t lbn, lastlbn, nb, newb, *blkp; daddr_t pref, *allocblk, allociblk[NIADDR + 1]; daddr_t *bap, *allocib; int deallocated, osize, nsize, num, i, error, unwindidx, r; struct buf *bp, *nbp; struct indir indirs[NIADDR + 2]; struct fs *fs; struct vnode *vp; struct proc *p; vp = ITOV(ip); fs = ip->i_fs; p = curproc; unwindidx = -1; lbn = lblkno(fs, off); size = blkoff(fs, off) + size; if (size > fs->fs_bsize) panic("ffs2_balloc: block too big"); if (bpp != NULL) *bpp = NULL; if (lbn < 0) return (EFBIG); /* * If the next write will extend the file into a new block, and the * file is currently composed of a fragment, this fragment has to be * extended to be a full block. */ lastlbn = lblkno(fs, ip->i_ffs2_size); if (lastlbn < NDADDR && lastlbn < lbn) { nb = lastlbn; osize = blksize(fs, ip, nb); if (osize < fs->fs_bsize && osize > 0) { error = ffs_realloccg(ip, nb, ffs2_blkpref(ip, lastlbn, nb, &ip->i_ffs2_db[0]), osize, (int) fs->fs_bsize, cred, bpp, &newb); if (error) return (error); if (DOINGSOFTDEP(vp)) softdep_setup_allocdirect(ip, nb, newb, ip->i_ffs2_db[nb], fs->fs_bsize, osize, bpp ? *bpp : NULL); ip->i_ffs2_size = lblktosize(fs, nb + 1); uvm_vnp_setsize(vp, ip->i_ffs2_size); ip->i_ffs2_db[nb] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp) { if (flags & B_SYNC) bwrite(*bpp); else bawrite(*bpp); } } } /* * The first NDADDR blocks are direct. */ if (lbn < NDADDR) { nb = ip->i_ffs2_db[lbn]; if (nb != 0 && ip->i_ffs2_size >= lblktosize(fs, lbn + 1)) { /* * The direct block is already allocated and the file * extends past this block, thus this must be a whole * block. Just read it, if requested. */ if (bpp != NULL) { error = bread(vp, lbn, fs->fs_bsize, bpp); if (error) { brelse(*bpp); return (error); } } return (0); } if (nb != 0) { /* * Consider the need to allocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_ffs2_size)); nsize = fragroundup(fs, size); if (nsize <= osize) { /* * The existing block is already at least as * big as we want. Just read it, if requested. */ if (bpp != NULL) { error = bread(vp, lbn, fs->fs_bsize, bpp); if (error) { brelse(*bpp); return (error); } (*bpp)->b_bcount = osize; } return (0); } else { /* * The existing block is smaller than we want, * grow it. */ error = ffs_realloccg(ip, lbn, ffs2_blkpref(ip, lbn, (int) lbn, &ip->i_ffs2_db[0]), osize, nsize, cred, bpp, &newb); if (error) return (error); if (DOINGSOFTDEP(vp)) softdep_setup_allocdirect(ip, lbn, newb, nb, nsize, osize, bpp ? *bpp : NULL); } } else { /* * The block was not previously allocated, allocate a * new block or fragment. */ if (ip->i_ffs2_size < lblktosize(fs, lbn + 1)) nsize = fragroundup(fs, size); else nsize = fs->fs_bsize; error = ffs_alloc(ip, lbn, ffs2_blkpref(ip, lbn, (int) lbn, &ip->i_ffs2_db[0]), nsize, cred, &newb); if (error) return (error); if (bpp != NULL) { bp = getblk(vp, lbn, fs->fs_bsize, 0, 0); if (nsize < fs->fs_bsize) bp->b_bcount = nsize; bp->b_blkno = fsbtodb(fs, newb); if (flags & B_CLRBUF) clrbuf(bp); *bpp = bp; } if (DOINGSOFTDEP(vp)) softdep_setup_allocdirect(ip, lbn, newb, 0, nsize, 0, bpp ? *bpp : NULL); } ip->i_ffs2_db[lbn] = newb; ip->i_flag |= IN_CHANGE | IN_UPDATE; return (0); } /* * Determine the number of levels of indirection. */ pref = 0; error = ufs_getlbns(vp, lbn, indirs, &num); if (error) return (error); #ifdef DIAGNOSTIC if (num < 1) panic("ffs2_balloc: ufs_bmaparray returned indirect block"); #endif /* * Fetch the first indirect block allocating it necessary. */ --num; nb = ip->i_ffs2_ib[indirs[0].in_off]; allocib = NULL; allocblk = allociblk; if (nb == 0) { pref = ffs2_blkpref(ip, lbn, -indirs[0].in_off - 1, NULL); error = ffs_alloc(ip, lbn, pref, (int) fs->fs_bsize, cred, &newb); if (error) goto fail; nb = newb; *allocblk++ = nb; bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0); bp->b_blkno = fsbtodb(fs, nb); clrbuf(bp); if (DOINGSOFTDEP(vp)) { softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off, newb, 0, fs->fs_bsize, 0, bp); bdwrite(bp); } else { /* * Write synchronously so that indirect blocks never * point at garbage. */ error = bwrite(bp); if (error) goto fail; } unwindidx = 0; allocib = &ip->i_ffs2_ib[indirs[0].in_off]; *allocib = nb; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(vp, indirs[i].in_lbn, (int)fs->fs_bsize, &bp); if (error) { brelse(bp); goto fail; } bap = (int64_t *) bp->b_data; nb = bap[indirs[i].in_off]; if (i == num) break; i++; if (nb != 0) { brelse(bp); continue; } if (pref == 0) pref = ffs2_blkpref(ip, lbn, i - num - 1, NULL); error = ffs_alloc(ip, lbn, pref, (int) fs->fs_bsize, cred, &newb); if (error) { brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); if (DOINGSOFTDEP(vp)) { softdep_setup_allocindir_meta(nbp, ip, bp, indirs[i - 1].in_off, nb); bdwrite(nbp); } else { /* * Write synchronously so that indirect blocks never * point at garbage. */ error = bwrite(nbp); if (error) { brelse(bp); goto fail; } } if (unwindidx < 0) unwindidx = i - 1; bap[indirs[i - 1].in_off] = nb; /* * If required, write synchronously, otherwise use delayed * write. */ if (flags & B_SYNC) bwrite(bp); else bdwrite(bp); } /* * Get the data block, allocating if necessary. */ if (nb == 0) { pref = ffs2_blkpref(ip, lbn, indirs[num].in_off, &bap[0]); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred, &newb); if (error) { brelse(bp); goto fail; } nb = newb; *allocblk++ = nb; if (bpp != NULL) { nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); if (flags & B_CLRBUF) clrbuf(nbp); *bpp = nbp; } if (DOINGSOFTDEP(vp)) softdep_setup_allocindir_page(ip, lbn, bp, indirs[num].in_off, nb, 0, bpp ? *bpp : NULL); bap[indirs[num].in_off] = nb; if (allocib == NULL && unwindidx < 0) unwindidx = i - 1; /* * If required, write synchronously, otherwise use delayed * write. */ if (flags & B_SYNC) bwrite(bp); else bdwrite(bp); return (0); } brelse(bp); if (bpp != NULL) { if (flags & B_CLRBUF) { error = bread(vp, lbn, (int)fs->fs_bsize, &nbp); if (error) { brelse(nbp); goto fail; } } else { nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0); nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); } *bpp = nbp; } return (0); fail: /* * If we have failed to allocate any blocks, simply return the error. * This is the usual case and avoids the need to fsync the file. */ if (allocblk == allociblk && allocib == NULL && unwindidx == -1) return (error); /* * If we have failed part way through block allocation, we have to * deallocate any indirect blocks that we have allocated. We have to * fsync the file before we start to get rid of all of its * dependencies so that we do not leave them dangling. We have to sync * it at the end so that the softdep code does not find any untracked * changes. Although this is really slow, running out of disk space is * not expected to be a common occurrence. The error return from fsync * is ignored as we already have an error to return to the user. */ VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p); if (unwindidx >= 0) { /* * First write out any buffers we've created to resolve their * softdeps. This must be done in reverse order of creation so * that we resolve the dependencies in one pass. * Write the cylinder group buffers for these buffers too. */ for (i = num; i >= unwindidx; i--) { if (i == 0) break; bp = getblk(vp, indirs[i].in_lbn, (int) fs->fs_bsize, 0, 0); if (bp->b_flags & B_DELWRI) { nb = fsbtodb(fs, cgtod(fs, dtog(fs, dbtofsb(fs, bp->b_blkno)))); bwrite(bp); bp = getblk(ip->i_devvp, nb, (int) fs->fs_cgsize, 0, 0); if (bp->b_flags & B_DELWRI) bwrite(bp); else { bp->b_flags |= B_INVAL; brelse(bp); } } else { bp->b_flags |= B_INVAL; brelse(bp); } } if (DOINGSOFTDEP(vp) && unwindidx == 0) { ip->i_flag |= IN_CHANGE | IN_UPDATE; ffs_update(ip, 1); } /* * Now that any dependencies that we created have been * resolved, we can undo the partial allocation. */ if (unwindidx == 0) { *allocib = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; if (DOINGSOFTDEP(vp)) ffs_update(ip, 1); } else { r = bread(vp, indirs[unwindidx].in_lbn, (int)fs->fs_bsize, &bp); if (r) panic("ffs2_balloc: unwind failed"); bap = (int64_t *) bp->b_data; bap[indirs[unwindidx].in_off] = 0; bwrite(bp); } for (i = unwindidx + 1; i <= num; i++) { bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0, 0); bp->b_flags |= B_INVAL; brelse(bp); } } for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) { ffs_blkfree(ip, *blkp, fs->fs_bsize); deallocated += fs->fs_bsize; } if (deallocated) { /* * Restore user's disk quota because allocation failed. */ (void) ufs_quota_free_blocks(ip, btodb(deallocated), cred); ip->i_ffs2_blocks -= btodb(deallocated); ip->i_flag |= IN_CHANGE | IN_UPDATE; } VOP_FSYNC(vp, p->p_ucred, MNT_WAIT, p); return (error); }
static int ffs_balloc_ufs1(struct inode *ip, off_t offset, int bufsize, struct buf **bpp) { daddr_t lbn, lastlbn; int size; int32_t nb; struct buf *bp, *nbp; struct fs *fs = ip->i_fs; struct indir indirs[NIADDR + 2]; daddr_t newb, pref; int32_t *bap; int osize, nsize, num, i, error; int32_t *allocblk, allociblk[NIADDR + 1]; int32_t *allocib; const int needswap = UFS_FSNEEDSWAP(fs); lbn = lblkno(fs, offset); size = blkoff(fs, offset) + bufsize; if (bpp != NULL) { *bpp = NULL; } assert(size <= fs->fs_bsize); if (lbn < 0) return (EFBIG); /* * If the next write will extend the file into a new block, * and the file is currently composed of a fragment * this fragment has to be extended to be a full block. */ lastlbn = lblkno(fs, ip->i_ffs1_size); if (lastlbn < NDADDR && lastlbn < lbn) { nb = lastlbn; osize = blksize(fs, ip, nb); if (osize < fs->fs_bsize && osize > 0) { warnx("need to ffs_realloccg; not supported!"); abort(); } } /* * The first NDADDR blocks are direct blocks */ if (lbn < NDADDR) { nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap); if (nb != 0 && ip->i_ffs1_size >= lblktosize(fs, lbn + 1)) { /* * The block is an already-allocated direct block * and the file already extends past this block, * thus this must be a whole block. * Just read the block (if requested). */ if (bpp != NULL) { error = bread(ip->i_fd, ip->i_fs, lbn, fs->fs_bsize, bpp); if (error) { brelse(*bpp); return (error); } } return (0); } if (nb != 0) { /* * Consider need to reallocate a fragment. */ osize = fragroundup(fs, blkoff(fs, ip->i_ffs1_size)); nsize = fragroundup(fs, size); if (nsize <= osize) { /* * The existing block is already * at least as big as we want. * Just read the block (if requested). */ if (bpp != NULL) { error = bread(ip->i_fd, ip->i_fs, lbn, osize, bpp); if (error) { brelse(*bpp); return (error); } } return 0; } else { warnx("need to ffs_realloccg; not supported!"); abort(); } } else { /* * the block was not previously allocated, * allocate a new block or fragment. */ if (ip->i_ffs1_size < lblktosize(fs, lbn + 1)) nsize = fragroundup(fs, size); else nsize = fs->fs_bsize; error = ffs_alloc(ip, lbn, ffs_blkpref_ufs1(ip, lbn, (int)lbn, &ip->i_ffs1_db[0]), nsize, &newb); if (error) return (error); if (bpp != NULL) { bp = getblk(ip->i_fd, ip->i_fs, lbn, nsize); bp->b_blkno = fsbtodb(fs, newb); clrbuf(bp); *bpp = bp; } } ip->i_ffs1_db[lbn] = ufs_rw32((int32_t)newb, needswap); return (0); } /* * Determine the number of levels of indirection. */ pref = 0; if ((error = ufs_getlbns(ip, lbn, indirs, &num)) != 0) return (error); if (num < 1) { warnx("ffs_balloc: ufs_getlbns returned indirect block"); abort(); } /* * Fetch the first indirect block allocating if necessary. */ --num; nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap); allocib = NULL; allocblk = allociblk; if (nb == 0) { pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); if (error) return error; nb = newb; *allocblk++ = nb; bp = getblk(ip->i_fd, ip->i_fs, indirs[1].in_lbn, fs->fs_bsize); bp->b_blkno = fsbtodb(fs, nb); clrbuf(bp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(bp)) != 0) return error; allocib = &ip->i_ffs1_ib[indirs[0].in_off]; *allocib = ufs_rw32((int32_t)nb, needswap); } /* * Fetch through the indirect blocks, allocating as necessary. */ for (i = 1;;) { error = bread(ip->i_fd, ip->i_fs, indirs[i].in_lbn, fs->fs_bsize, &bp); if (error) { brelse(bp); return error; } bap = (int32_t *)bp->b_data; nb = ufs_rw32(bap[indirs[i].in_off], needswap); if (i == num) break; i++; if (nb != 0) { brelse(bp); continue; } if (pref == 0) pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); if (error) { brelse(bp); return error; } nb = newb; *allocblk++ = nb; nbp = getblk(ip->i_fd, ip->i_fs, indirs[i].in_lbn, fs->fs_bsize); nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); /* * Write synchronously so that indirect blocks * never point at garbage. */ if ((error = bwrite(nbp)) != 0) { brelse(bp); return error; } bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap); bwrite(bp); } /* * Get the data block, allocating if necessary. */ if (nb == 0) { pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]); error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, &newb); if (error) { brelse(bp); return error; } nb = newb; *allocblk++ = nb; if (bpp != NULL) { nbp = getblk(ip->i_fd, ip->i_fs, lbn, fs->fs_bsize); nbp->b_blkno = fsbtodb(fs, nb); clrbuf(nbp); *bpp = nbp; } bap[indirs[num].in_off] = ufs_rw32(nb, needswap); /* * If required, write synchronously, otherwise use * delayed write. */ bwrite(bp); return (0); } brelse(bp); if (bpp != NULL) { error = bread(ip->i_fd, ip->i_fs, lbn, (int)fs->fs_bsize, &nbp); if (error) { brelse(nbp); return error; } *bpp = nbp; } return (0); }