Пример #1
0
/*===========================================================================*
 *				lmfs_get_block_ino			     *
 *===========================================================================*/
struct buf *lmfs_get_block_ino(dev_t dev, block_t block, int only_search,
	ino_t ino, u64_t ino_off)
{
/* Check to see if the requested block is in the block cache.  If so, return
 * a pointer to it.  If not, evict some other block and fetch it (unless
 * 'only_search' is 1).  All the blocks in the cache that are not in use
 * are linked together in a chain, with 'front' pointing to the least recently
 * used block and 'rear' to the most recently used block.  If 'only_search' is
 * 1, the block being requested will be overwritten in its entirety, so it is
 * only necessary to see if it is in the cache; if it is not, any free buffer
 * will do.  It is not necessary to actually read the block in from disk.
 * If 'only_search' is PREFETCH, the block need not be read from the disk,
 * and the device is not to be marked on the block, so callers can tell if
 * the block returned is valid.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */

  int b;
  static struct buf *bp;
  u64_t dev_off = (u64_t) block * fs_block_size;
  struct buf *prev_ptr;

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  assert(dev != NO_DEV);

  if((ino_off % fs_block_size)) {

	printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n",
		ino_off);
  	util_stacktrace();
  }

  /* Search the hash chain for (dev, block). */
  b = BUFHASH(block);
  bp = buf_hash[b];
  while (bp != NULL) {
  	if (bp->lmfs_blocknr == block && bp->lmfs_dev == dev) {
  		if(bp->lmfs_flags & VMMC_EVICTED) {
  			/* We had it but VM evicted it; invalidate it. */
  			ASSERT(bp->lmfs_count == 0);
  			ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
  			ASSERT(!(bp->lmfs_flags & VMMC_DIRTY));
  			bp->lmfs_dev = NO_DEV;
  			bp->lmfs_bytes = 0;
  			bp->data = NULL;
  			break;
  		}
  		ASSERT(bp->lmfs_needsetcache == 0);
  		/* Block needed has been found. */
  		if (bp->lmfs_count == 0) {
			rm_lru(bp);
  			ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
			bp->lmfs_flags |= VMMC_BLOCK_LOCKED;
		}
		raisecount(bp);
  		ASSERT(bp->lmfs_bytes == fs_block_size);
  		ASSERT(bp->lmfs_dev == dev);
  		ASSERT(bp->lmfs_dev != NO_DEV);
 		ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
  		ASSERT(bp->data);

		if(ino != VMC_NO_INODE) {
			if(bp->lmfs_inode == VMC_NO_INODE
			|| bp->lmfs_inode != ino
			|| bp->lmfs_inode_offset != ino_off) {
				bp->lmfs_inode = ino;
				bp->lmfs_inode_offset = ino_off;
				bp->lmfs_needsetcache = 1;
			}
		}

  		return(bp);
  	} else {
  		/* This block is not the one sought. */
  		bp = bp->lmfs_hash; /* move to next block on hash chain */
  	}
  }

  /* Desired block is not on available chain. Find a free block to use. */
  if(bp) {
  	ASSERT(bp->lmfs_flags & VMMC_EVICTED);
  } else {
	if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
  }
  assert(bp);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->lmfs_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->lmfs_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->lmfs_hash != NULL)
		if (prev_ptr->lmfs_hash == bp) {
			prev_ptr->lmfs_hash = bp->lmfs_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->lmfs_hash;	/* keep looking */
		}
  }

  freeblock(bp);

  bp->lmfs_inode = ino;
  bp->lmfs_inode_offset = ino_off;

  bp->lmfs_flags = VMMC_BLOCK_LOCKED;
  bp->lmfs_needsetcache = 0;
  bp->lmfs_dev = dev;		/* fill in device number */
  bp->lmfs_blocknr = block;	/* fill in block number */
  ASSERT(bp->lmfs_count == 0);
  raisecount(bp);
  b = BUFHASH(bp->lmfs_blocknr);
  bp->lmfs_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  assert(dev != NO_DEV);

  /* Block is not found in our cache, but we do want it
   * if it's in the vm cache.
   */
  assert(!bp->data);
  assert(!bp->lmfs_bytes);
  if(vmcache) {
	if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off,
		&bp->lmfs_flags, fs_block_size)) != MAP_FAILED) {
		bp->lmfs_bytes = fs_block_size;
		ASSERT(!bp->lmfs_needsetcache);
		return bp;
	}
  }
  bp->data = NULL;

  /* Not in the cache; reserve memory for its contents. */

  lmfs_alloc_block(bp);

  assert(bp->data);

  if(only_search == PREFETCH) {
	/* PREFETCH: don't do i/o. */
	bp->lmfs_dev = NO_DEV;
  } else if (only_search == NORMAL) {
	read_block(bp);
  } else if(only_search == NO_READ) {
  	/* This block will be overwritten by new contents. */
  } else
	panic("unexpected only_search value: %d", only_search);

  assert(bp->data);

  return(bp);			/* return the newly acquired block */
}
Пример #2
0
/*===========================================================================*
 *				get_block				     *
 *===========================================================================*/
struct buf *get_block(
  register dev_t dev,		/* on which device is the block? */
  register block_t block,	/* which block is wanted? */
  int only_search		/* if NO_READ, don't read, else act normal */
)
{
/* Check to see if the requested block is in the block cache.  If so, return
 * a pointer to it.  If not, evict some other block and fetch it (unless
 * 'only_search' is 1).  All the blocks in the cache that are not in use
 * are linked together in a chain, with 'front' pointing to the least recently
 * used block and 'rear' to the most recently used block.  If 'only_search' is
 * 1, the block being requested will be overwritten in its entirety, so it is
 * only necessary to see if it is in the cache; if it is not, any free buffer
 * will do.  It is not necessary to actually read the block in from disk.
 * If 'only_search' is PREFETCH, the block need not be read from the disk,
 * and the device is not to be marked on the block, so callers can tell if
 * the block returned is valid.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */

  int b;
  static struct buf *bp, *prev_ptr;
  u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block);

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  /* Search the hash chain for (dev, block). Do_read() can use 
   * get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
   * someone wants to read from a hole in a file, in which case this search
   * is skipped
   */
  if (dev != NO_DEV) {
	b = BUFHASH(block);
	bp = buf_hash[b];
	while (bp != NULL) {
		if (bp->b_blocknr == block && bp->b_dev == dev) {
			/* Block needed has been found. */
			if (bp->b_count == 0) rm_lru(bp);
			bp->b_count++;	/* record that block is in use */
			ASSERT(bp->b_bytes == fs_block_size);
			ASSERT(bp->b_dev == dev);
			ASSERT(bp->b_dev != NO_DEV);
			ASSERT(bp->bp);
			return(bp);
		} else {
			/* This block is not the one sought. */
			bp = bp->b_hash; /* move to next block on hash chain */
		}
	}
  }

  /* Desired block is not on available chain.  Take oldest block ('front'). */
  if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);

  if(bp->b_bytes < fs_block_size) {
	ASSERT(!bp->bp);
	ASSERT(bp->b_bytes == 0);
	if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) {
		printf("MFS: couldn't allocate a new block.\n");
		for(bp = front;
			bp && bp->b_bytes < fs_block_size; bp = bp->b_next)
			;
		if(!bp) {
			panic("no buffer available");
		}
	} else {
  		bp->b_bytes = fs_block_size;
	}
  }

  ASSERT(bp);
  ASSERT(bp->bp);
  ASSERT(bp->b_bytes == fs_block_size);
  ASSERT(bp->b_count == 0);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->b_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->b_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->b_hash != NULL)
		if (prev_ptr->b_hash == bp) {
			prev_ptr->b_hash = bp->b_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->b_hash;	/* keep looking */
		}
  }

  /* If the block taken is dirty, make it clean by writing it to the disk.
   * Avoid hysteresis by flushing all other dirty blocks for the same device.
   */
  if (bp->b_dev != NO_DEV) {
	if (ISDIRTY(bp)) flushall(bp->b_dev);

	/* Are we throwing out a block that contained something?
	 * Give it to VM for the second-layer cache.
	 */
	yieldid = make64(bp->b_dev, bp->b_blocknr);
	assert(bp->b_bytes == fs_block_size);
	BP_CLEARDEV(bp);
  }

  /* Fill in block's parameters and add it to the hash chain where it goes. */
  if(dev == NO_DEV) BP_CLEARDEV(bp);
  else BP_SETDEV(bp, dev);
  bp->b_blocknr = block;	/* fill in block number */
  bp->b_count++;		/* record that block is being used */
  b = BUFHASH(bp->b_blocknr);
  bp->b_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  if(dev == NO_DEV) {
	if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
		vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE,
			bp->bp, fs_block_size);
	}
	return(bp);	/* If the caller wanted a NO_DEV block, work is done. */
  }

  /* Go get the requested block unless searching or prefetching. */
  if(only_search == PREFETCH || only_search == NORMAL) {
	/* Block is not found in our cache, but we do want it
	 * if it's in the vm cache.
	 */
	if(vmcache) {
		/* If we can satisfy the PREFETCH or NORMAL request 
		 * from the vm cache, work is done.
		 */
		if(vm_yield_block_get_block(yieldid, getid,
			bp->bp, fs_block_size) == OK) {
			return bp;
		}
	}
  }

  if(only_search == PREFETCH) {
	/* PREFETCH: don't do i/o. */
	BP_CLEARDEV(bp);
  } else if (only_search == NORMAL) {
	read_block(bp);
  } else if(only_search == NO_READ) {
	/* we want this block, but its contents
	 * will be overwritten. VM has to forget
	 * about it.
	 */
	if(vmcache) {
		vm_forgetblock(getid);
	}
  } else
	panic("unexpected only_search value: %d", only_search);

  assert(bp->bp);

  return(bp);			/* return the newly acquired block */
}
/*===========================================================================*
 *				get_block_ino				     *
 *===========================================================================*/
static int get_block_ino(struct buf **bpp, dev_t dev, block64_t block, int how,
	ino_t ino, u64_t ino_off, size_t block_size)
{
/* Check to see if the requested block is in the block cache.  The requested
 * block is identified by the block number in 'block' on device 'dev', counted
 * in the file system block size.  The amount of data requested for this block
 * is given in 'block_size', which may be less than the file system block size
 * iff the requested block is the last (partial) block on a device.  Note that
 * the given block size does *not* affect the conversion of 'block' to a byte
 * offset!  Either way, if the block could be obtained, either from the cache
 * or by reading from the device, return OK, with a pointer to the buffer
 * structure stored in 'bpp'.  If not, return a negative error code (and no
 * buffer).  If necessary, evict some other block and fetch the contents from
 * disk (if 'how' is NORMAL).  If 'how' is NO_READ, the caller intends to
 * overwrite the requested block in its entirety, so it is only necessary to
 * see if it is in the cache; if it is not, any free buffer will do.  If 'how'
 * is PEEK, the function returns the block if it is in the cache or the VM
 * cache, and an ENOENT error code otherwise.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */
  int b, r;
  static struct buf *bp;
  uint64_t dev_off;
  struct buf *prev_ptr;

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  assert(dev != NO_DEV);

  assert(block <= UINT64_MAX / fs_block_size);

  dev_off = block * fs_block_size;

  if((ino_off % fs_block_size)) {

	printf("cache: unaligned lmfs_get_block_ino ino_off %llu\n",
		ino_off);
  	util_stacktrace();
  }

  /* See if the block is in the cache. If so, we can return it right away. */
  bp = find_block(dev, block);
  if (bp != NULL && !(bp->lmfs_flags & VMMC_EVICTED)) {
	ASSERT(bp->lmfs_dev == dev);
	ASSERT(bp->lmfs_dev != NO_DEV);

	/* The block must have exactly the requested number of bytes. */
	if (bp->lmfs_bytes != block_size)
		return EIO;

	/* Block needed has been found. */
	if (bp->lmfs_count == 0) {
		rm_lru(bp);
		ASSERT(bp->lmfs_needsetcache == 0);
		ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
		/* FIXME: race condition against the VMMC_EVICTED check */
		bp->lmfs_flags |= VMMC_BLOCK_LOCKED;
	}
	raisecount(bp);
	ASSERT(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
	ASSERT(bp->data);

	if(ino != VMC_NO_INODE) {
		if(bp->lmfs_inode == VMC_NO_INODE
		|| bp->lmfs_inode != ino
		|| bp->lmfs_inode_offset != ino_off) {
			bp->lmfs_inode = ino;
			bp->lmfs_inode_offset = ino_off;
			bp->lmfs_needsetcache = 1;
		}
	}

	*bpp = bp;
	return OK;
  }

  /* We had the block in the cache but VM evicted it; invalidate it. */
  if (bp != NULL) {
	assert(bp->lmfs_flags & VMMC_EVICTED);
	ASSERT(bp->lmfs_count == 0);
	ASSERT(!(bp->lmfs_flags & VMMC_BLOCK_LOCKED));
	ASSERT(!(bp->lmfs_flags & VMMC_DIRTY));
	bp->lmfs_dev = NO_DEV;
	bp->lmfs_bytes = 0;
	bp->data = NULL;
  }

  /* Desired block is not on available chain. Find a free block to use. */
  if(bp) {
  	ASSERT(bp->lmfs_flags & VMMC_EVICTED);
  } else {
	if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);
  }
  assert(bp);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->lmfs_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->lmfs_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->lmfs_hash != NULL)
		if (prev_ptr->lmfs_hash == bp) {
			prev_ptr->lmfs_hash = bp->lmfs_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->lmfs_hash;	/* keep looking */
		}
  }

  freeblock(bp);

  bp->lmfs_inode = ino;
  bp->lmfs_inode_offset = ino_off;

  bp->lmfs_flags = VMMC_BLOCK_LOCKED;
  bp->lmfs_needsetcache = 0;
  bp->lmfs_dev = dev;		/* fill in device number */
  bp->lmfs_blocknr = block;	/* fill in block number */
  ASSERT(bp->lmfs_count == 0);
  raisecount(bp);
  b = BUFHASH(bp->lmfs_blocknr);
  bp->lmfs_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  assert(dev != NO_DEV);

  /* The block is not found in our cache, but we do want it if it's in the VM
   * cache. The exception is NO_READ, purely for context switching performance
   * reasons. NO_READ is used for 1) newly allocated blocks, 2) blocks being
   * prefetched, and 3) blocks about to be fully overwritten. In the first two
   * cases, VM will not have the block in its cache anyway, and for the third
   * we save on one VM call only if the block is in the VM cache.
   */
  assert(!bp->data);
  assert(!bp->lmfs_bytes);
  if (how != NO_READ && vmcache) {
	if((bp->data = vm_map_cacheblock(dev, dev_off, ino, ino_off,
	    &bp->lmfs_flags, roundup(block_size, PAGE_SIZE))) != MAP_FAILED) {
		bp->lmfs_bytes = block_size;
		ASSERT(!bp->lmfs_needsetcache);
		*bpp = bp;
		return OK;
	}
  }
  bp->data = NULL;

  /* The block is not in the cache, and VM does not know about it. If we were
   * requested to search for the block only, we can now return failure to the
   * caller. Return the block to the pool without allocating data pages, since
   * these would be freed upon recycling the block anyway.
   */
  if (how == PEEK) {
	bp->lmfs_dev = NO_DEV;

	put_block(bp, ONE_SHOT);

	return ENOENT;
  }

  /* Not in the cache; reserve memory for its contents. */

  lmfs_alloc_block(bp, block_size);

  assert(bp->data);

  if (how == NORMAL) {
	/* Try to read the block. Return an error code on failure. */
	if ((r = read_block(bp, block_size)) != OK) {
		put_block(bp, 0);

		return r;
	}
  } else if(how == NO_READ) {
  	/* This block will be overwritten by new contents. */
  } else
	panic("unexpected 'how' value: %d", how);

  assert(bp->data);

  *bpp = bp;			/* return the newly acquired block */
  return OK;
}