/*===========================================================================*
 *				lmfs_flushdev				     *
 *===========================================================================*/
void lmfs_flushdev(dev_t dev)
{
/* Flush all dirty blocks for one device. */

  register struct buf *bp;
  static noxfer_buf_ptr_t *dirty;
  static unsigned int dirtylistsize = 0;
  unsigned int ndirty;

  if(dirtylistsize != nr_bufs) {
	if(dirtylistsize > 0) {
		assert(dirty != NULL);
		free(dirty);
	}
	if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
		panic("couldn't allocate dirty buf list");
	dirtylistsize = nr_bufs;
  }

  for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++) {
	/* Do not flush dirty blocks that are in use (lmfs_count>0): the file
	 * system may mark the block as dirty before changing its contents, in
	 * which case the new contents could end up being lost.
	 */
	if (!lmfs_isclean(bp) && bp->lmfs_dev == dev && bp->lmfs_count == 0) {
		dirty[ndirty++] = bp;
	}
  }

  rw_scattered(dev, dirty, ndirty, WRITING);
}
예제 #2
0
파일: cache.c 프로젝트: Spenser309/CS551
/*===========================================================================*
 *				flushall				     *
 *===========================================================================*/
PUBLIC void flushall(
  dev_t dev 			/* device to flush */
)
{
/* Flush all dirty blocks for one device. */

  register struct buf *bp;
  static struct buf **dirty;	/* static so it isn't on stack */
  static int unsigned dirtylistsize = 0;
  int ndirty;

  if(dirtylistsize != nr_bufs) {
	if(dirtylistsize > 0) {
		assert(dirty != NULL);
		free(dirty);
	}
	if(!(dirty = malloc(sizeof(dirty[0])*nr_bufs)))
		panic("couldn't allocate dirty buf list");
	dirtylistsize = nr_bufs;
  }

  for (bp = &buf[0], ndirty = 0; bp < &buf[nr_bufs]; bp++)
	if (bp->b_dirt == DIRTY && bp->b_dev == dev) dirty[ndirty++] = bp;
  rw_scattered(dev, dirty, ndirty, WRITING);
}
/*===========================================================================*
 *				lmfs_readahead				     *
 *===========================================================================*/
void lmfs_readahead(dev_t dev, block64_t base_block, unsigned int nblocks,
	size_t last_size)
{
/* Read ahead 'nblocks' blocks starting from the block 'base_block' on device
 * 'dev'. The number of blocks must be between 1 and LMFS_MAX_PREFETCH,
 * inclusive. All blocks have the file system's block size, possibly except the
 * last block in the range, which is of size 'last_size'. The caller must
 * ensure that none of the blocks in the range are already in the cache.
 * However, the caller must also not rely on all or even any of the blocks to
 * be present in the cache afterwards--failures are (deliberately!) ignored.
 */
  static noxfer_buf_ptr_t bufq[LMFS_MAX_PREFETCH]; /* static for size only */
  struct buf *bp;
  unsigned int count;
  int r;

  assert(nblocks >= 1 && nblocks <= LMFS_MAX_PREFETCH);

  for (count = 0; count < nblocks; count++) {
	if (count == nblocks - 1)
		r = lmfs_get_partial_block(&bp, dev, base_block + count,
		    NO_READ, last_size);
	else
		r = lmfs_get_block(&bp, dev, base_block + count, NO_READ);

	if (r != OK)
		break;

	/* We could add a flag that makes the get_block() calls fail if the
	 * block is already in the cache, but it is not a major concern if it
	 * is: we just perform a useless read in that case. However, if the
	 * block is cached *and* dirty, we are about to lose its new contents.
	 */
	assert(lmfs_isclean(bp));

	bufq[count] = bp;
  }

  rw_scattered(dev, bufq, count, READING);
}