/*===========================================================================* * lmfs_flushdev * *===========================================================================*/ void lmfs_flushdev(dev_t dev) { /* Flush all dirty blocks for one device. */ register struct buf *bp; static struct buf **dirty; /* static so it isn't on stack */ static unsigned int dirtylistsize = 0; int ndirty; if(dirtylistsize != nr_bufs) { if(dirtylistsize > 0) { assert(dirty != NULL); free(dirty); } if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs))) panic("couldn't allocate dirty buf list"); dirtylistsize = nr_bufs; } for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) { if (!lmfs_isclean(bp) && bp->lmfs_dev == dev) { dirty[ndirty++] = bp; } } lmfs_rw_scattered(dev, dirty, ndirty, WRITING); }
/* * Prefetch up to "nblocks" blocks on "dev" starting from block number "block". * Stop early when either the I/O request fills up or when a block is already * found to be in the cache. The latter is likely to happen often, since this * function is called before getting each block for reading. Prefetching is a * strictly best-effort operation, and may fail silently. * TODO: limit according to the number of available buffers. */ static void block_prefetch(dev_t dev, block_t block, block_t nblocks) { struct buf *bp, *bufs[NR_IOREQS]; unsigned int count; for (count = 0; count < nblocks; count++) { bp = lmfs_get_block(dev, block + count, PREFETCH); assert(bp != NULL); if (lmfs_dev(bp) != NO_DEV) { lmfs_put_block(bp, FULL_DATA_BLOCK); break; } bufs[count] = bp; } if (count > 0) lmfs_rw_scattered(dev, bufs, count, READING); }