/*===========================================================================*
 *				lmfs_zero_block_ino			     *
 *===========================================================================*/
void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t ino_off)
{
/* Files may have holes. From an application perspective, these are just file
 * regions filled with zeroes. From a file system perspective however, holes
 * may represent unallocated regions on disk. Thus, these holes do not have
 * corresponding blocks on the disk, and therefore also no block number.
 * Therefore, we cannot simply use lmfs_get_block_ino() for them. For reads,
 * this is not a problem, since the file system can just zero out the target
 * application buffer instead. For mapped pages however, this *is* a problem,
 * since the VM cache needs to be told about the corresponding block, and VM
 * does not accept blocks without a device offset. The role of this function is
 * therefore to tell VM about the hole using a fake device offset. The device
 * offsets are picked so that the VM cache will see a block memory-mapped for
 * the hole in the file, while the same block is not visible when
 * memory-mapping the block device.
 */
  struct buf *bp;
  static block64_t fake_block = 0;
  int r;

  if (!vmcache)
	return;

  assert(fs_block_size > 0);

  /* Pick a block number which is above the threshold of what can possibly be
   * mapped in by mmap'ing the device, since off_t is signed, and it is safe to
   * say that it will take a while before we have 8-exabyte devices. Pick a
   * different block number each time to avoid possible concurrency issues.
   * FIXME: it does not seem like VM actually verifies mmap offsets though..
   */
  if (fake_block == 0 || ++fake_block >= UINT64_MAX / fs_block_size)
	fake_block = ((uint64_t)INT64_MAX + 1) / fs_block_size;

  /* Obtain a block. */
  if ((r = lmfs_get_block_ino(&bp, dev, fake_block, NO_READ, ino,
      ino_off)) != OK)
	panic("libminixfs: getting a NO_READ block failed: %d", r);
  assert(bp != NULL);
  assert(bp->lmfs_dev != NO_DEV);

  /* The block is already zeroed, as it has just been allocated with mmap. File
   * systems do not rely on this assumption yet, so if VM ever gets changed to
   * not clear the blocks we allocate (e.g., by recycling pages in the VM cache
   * for the same process, which would be safe), we need to add a memset here.
   */

  /* Release the block. We don't expect it to be accessed ever again. Moreover,
   * if we keep the block around in the VM cache, it may erroneously be mapped
   * in beyond the file end later. Hence, use VMSF_ONCE when passing it to VM.
   * TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate
   * all such pages in its cache.
   */
  put_block(bp, ONE_SHOT);
}
Пример #2
0
/*===========================================================================*
 *				lmfs_get_block				     *
 *===========================================================================*/
struct buf *lmfs_get_block(register dev_t dev, register block_t block,
	int only_search)
{
	return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
}
/*===========================================================================*
 *				lmfs_get_block				     *
 *===========================================================================*/
int lmfs_get_block(struct buf **bpp, dev_t dev, block64_t block, int how)
{
	return lmfs_get_block_ino(bpp, dev, block, how, VMC_NO_INODE, 0);
}