Ejemplo n.º 1
0
/*
 * It's worth taking a moment to describe how mmap is implemented
 * for zfs because it differs considerably from other Linux filesystems.
 * However, this issue is handled the same way under OpenSolaris.
 *
 * The issue is that by design zfs bypasses the Linux page cache and
 * leaves all caching up to the ARC.  This has been shown to work
 * well for the common read(2)/write(2) case.  However, mmap(2)
 * is problem because it relies on being tightly integrated with the
 * page cache.  To handle this we cache mmap'ed files twice, once in
 * the ARC and a second time in the page cache.  The code is careful
 * to keep both copies synchronized.
 *
 * When a file with an mmap'ed region is written to using write(2)
 * both the data in the ARC and existing pages in the page cache
 * are updated.  For a read(2) data will be read first from the page
 * cache then the ARC if needed.  Neither a write(2) or read(2) will
 * will ever result in new pages being added to the page cache.
 *
 * New pages are added to the page cache only via .readpage() which
 * is called when the vfs needs to read a page off disk to back the
 * virtual memory region.  These pages may be modified without
 * notifying the ARC and will be written out periodically via
 * .writepage().  This will occur due to either a sync or the usual
 * page aging behavior.  Note because a read(2) of a mmap'ed file
 * will always check the page cache first even when the ARC is out
 * of date correct data will still be returned.
 *
 * While this implementation ensures correct behavior it does have
 * have some drawbacks.  The most obvious of which is that it
 * increases the required memory footprint when access mmap'ed
 * files.  It also adds additional complexity to the code keeping
 * both caches synchronized.
 *
 * Longer term it may be possible to cleanly resolve this wart by
 * mapping page cache pages directly on to the ARC buffers.  The
 * Linux address space operations are flexible enough to allow
 * selection of which pages back a particular index.  The trick
 * would be working out the details of which subsystem is in
 * charge, the ARC, the page cache, or both.  It may also prove
 * helpful to move the ARC buffers to a scatter-gather lists
 * rather than a vmalloc'ed region.
 */
static int
zpl_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct inode *ip = filp->f_mapping->host;
	znode_t *zp = ITOZ(ip);
	int error;
	fstrans_cookie_t cookie;

	cookie = spl_fstrans_mark();
	error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
	    (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
	spl_fstrans_unmark(cookie);
	if (error)
		return (error);

	error = generic_file_mmap(filp, vma);
	if (error)
		return (error);

	mutex_enter(&zp->z_lock);
	zp->z_is_mapped = 1;
	mutex_exit(&zp->z_lock);

	return (error);
}
Ejemplo n.º 2
0
static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!IS_DAX(file_inode(file)))
		return generic_file_mmap(file, vma);

	file_accessed(file);
	vma->vm_ops = &ext2_dax_vm_ops;
	return 0;
}
Ejemplo n.º 3
0
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	if ((vma->vm_flags & VM_SHARED)) {
		if ((vma->vm_flags & VM_WRITE))
			return -ENODEV;
		else
			vma->vm_flags &= ~VM_MAYWRITE;
	}
	return generic_file_mmap(file, vma);
}
Ejemplo n.º 4
0
Archivo: file.c Proyecto: 020gzh/linux
static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!IS_DAX(file_inode(file)))
		return generic_file_mmap(file, vma);

	file_accessed(file);
	vma->vm_ops = &ext2_dax_vm_ops;
	vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
	return 0;
}
Ejemplo n.º 5
0
static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	int rc;

	rc = generic_file_mmap(file, vma);
	if (!rc)
		vma->vm_ops = &ecryptfs_file_vm_ops;

	return rc;
}
Ejemplo n.º 6
0
static int cramfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	unsigned long address, length;
	struct inode *inode = file->f_dentry->d_inode;
	struct super_block *sb = inode->i_sb;

	/* this is only used in the case of read-only maps for XIP */

	if (vma->vm_flags & VM_WRITE)
		return generic_file_mmap(file, vma);

	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
		return -EINVAL;

	address  = PAGE_ALIGN(sb->CRAMFS_SB_LINEAR_PHYS_ADDR + OFFSET(inode));
	address += vma->vm_pgoff << PAGE_SHIFT;

	length = vma->vm_end - vma->vm_start;

	if (length > inode->i_size)
		length = inode->i_size;

	length = PAGE_ALIGN(length);


#if 0
	/* Doing the following makes it slower and more broken.  bdl */
	/*
	 * Accessing memory above the top the kernel knows about or
	 * through a file pointer that was marked O_SYNC will be
	 * done non-cached.
	 */
	vma->vm_page_prot =
		__pgprot((pgprot_val(vma->vm_page_prot) & ~_CACHE_MASK)
			| _CACHE_UNCACHED);
#endif

	/*
	 * Don't dump addresses that are not real memory to a core file.
	 */
	vma->vm_flags |= VM_IO;
	flush_tlb_page(vma, address);
	if (remap_page_range(vma->vm_start, address, length,
			     vma->vm_page_prot))
		return -EAGAIN;

#ifdef DEBUG_CRAMFS_XIP
	printk("cramfs_mmap: mapped %s at 0x%08lx, length %lu to vma 0x%08lx"
		", page_prot 0x%08lx\n",
		file->f_dentry->d_name.name, address, length,
		vma->vm_start, pgprot_val(vma->vm_page_prot));
#endif

	return 0;
}
Ejemplo n.º 7
0
int 
xixfs_file_mmap(
		struct file * file, 
		struct vm_area_struct * vma
)
{
	DebugTrace(DEBUG_LEVEL_ERROR, (DEBUG_TARGET_FCB|DEBUG_TARGET_VFSAPIT), 
		("ENTER xixfs_file_mmap (%s).\n", file->f_dentry->d_name.name));	
	
	return generic_file_mmap(file, vma);
}
Ejemplo n.º 8
0
static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct file *lower_file = ecryptfs_file_to_lower(file);
	/*
	 * Don't allow mmap on top of file systems that don't support it
	 * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
	 * allows recursive mounting, this will need to be extended.
	 */
	if (!lower_file->f_op->mmap)
		return -ENODEV;
	return generic_file_mmap(file, vma);
}
Ejemplo n.º 9
0
static int
v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int retval;


	retval = generic_file_mmap(filp, vma);
	if (!retval)
		vma->vm_ops = &v9fs_file_vm_ops;

	return retval;
}
Ejemplo n.º 10
0
static int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
	struct dentry *dentry = file->f_dentry;
	int	status;

	dfprintk(VFS, "nfs: mmap(%s/%s)\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);

	status = nfs_revalidate_inode(NFS_DSERVER(dentry), dentry);
	if (!status)
		status = generic_file_mmap(file, vma);
	return status;
}
Ejemplo n.º 11
0
static int coda_file_mmap(struct file * file, struct vm_area_struct * vma)
{
        struct coda_inode_info *cii;
	int res;

	coda_vfs_stat.file_mmap++;

        ENTRY;
	cii = ITOC(file->f_dentry->d_inode);
  
	res =generic_file_mmap(file, vma);
	EXIT;
	return res;
}
Ejemplo n.º 12
0
static int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
	struct dentry *dentry = file->f_path.dentry;
	struct inode *inode = dentry->d_inode;
	int	status;

	dfprintk(VFS, "nfs: mmap(%s/%s)\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);

	status = nfs_revalidate_mapping(inode, file->f_mapping);
	if (!status)
		status = generic_file_mmap(file, vma);
	return status;
}
Ejemplo n.º 13
0
int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
	struct inode *inode = file_inode(file);
	int	status;

	dprintk("NFS: mmap(%pD2)\n", file);

	/* Note: generic_file_mmap() returns ENOSYS on nommu systems
	 *       so we call that before revalidating the mapping
	 */
	status = generic_file_mmap(file, vma);
	if (!status) {
		vma->vm_ops = &nfs_file_vm_ops;
		status = nfs_revalidate_mapping(inode, file->f_mapping);
	}
	return status;
}
Ejemplo n.º 14
0
static int
smb_file_mmap(struct file * file, struct vm_area_struct * vma)
{
	struct dentry * dentry = file->f_dentry;
	int	status;

	VERBOSE("file %s/%s, address %lu - %lu\n",
		DENTRY_PATH(dentry), vma->vm_start, vma->vm_end);

	status = smb_revalidate_inode(dentry);
	if (status) {
		PARANOIA("%s/%s validation failed, error=%d\n",
			 DENTRY_PATH(dentry), status);
		goto out;
	}
	status = generic_file_mmap(file, vma);
out:
	return status;
}
Ejemplo n.º 15
0
int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct inode *inode = file_inode(file);
	int rc;

	if (ll_file_nolock(file))
		return -EOPNOTSUPP;

	ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
	rc = generic_file_mmap(file, vma);
	if (rc == 0) {
		vma->vm_ops = &ll_file_vm_ops;
		vma->vm_ops->open(vma);
		/* update the inode's size and mtime */
		rc = ll_glimpse_size(inode);
	}

	return rc;
}
Ejemplo n.º 16
0
static int
nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
{
	struct dentry *dentry = file->f_path.dentry;
	struct inode *inode = dentry->d_inode;
	int	status;

	dprintk("NFS: mmap(%s/%s)\n",
		dentry->d_parent->d_name.name, dentry->d_name.name);

	/* Note: generic_file_mmap() returns ENOSYS on nommu systems
	 *       so we call that before revalidating the mapping
	 */
	status = generic_file_mmap(file, vma);
	if (!status) {
		vma->vm_ops = &nfs_file_vm_ops;
		status = nfs_revalidate_mapping(inode, file->f_mapping);
	}
	return status;
}
Ejemplo n.º 17
0
int linvfs_generic_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
	vnode_t	*vp;
	int	ret;

	/* this will return a (-) error so flip */
	ret = -generic_file_mmap(filp, vma);
	if (!ret) {
		vattr_t va, *vap;

		vap = &va;
		vap->va_mask = AT_UPDATIME;

		vp = LINVFS_GET_VP(filp->f_dentry->d_inode);

		ASSERT(vp);

		VOP_SETATTR(vp, vap, AT_UPDATIME, NULL, ret);
	}
	return(-ret);
}
Ejemplo n.º 18
0
static int
HgfsMmap(struct file *file,		// IN: File we operate on
         struct vm_area_struct *vma)	// IN/OUT: VM area information
{
   int result;

   ASSERT(file);
   ASSERT(vma);
   ASSERT(file->f_dentry);

   LOG(6, (KERN_DEBUG "VMware hgfs: HgfsMmap: was called\n"));

   result = HgfsRevalidate(file->f_dentry);
   if (result) {
      LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMmap: invalid dentry\n"));
      goto out;
   }

   result = generic_file_mmap(file, vma);
  out:
   return result;
}
Ejemplo n.º 19
0
static int
v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int retval;
	struct inode *inode;
	struct v9fs_inode *v9inode;
	struct p9_fid *fid;

	inode = file_inode(filp);
	v9inode = V9FS_I(inode);
	mutex_lock(&v9inode->v_mutex);
	if (!v9inode->writeback_fid &&
	    (vma->vm_flags & VM_WRITE)) {
		/*
		 * clone a fid and add it to writeback_fid
		 * we do it during mmap instead of
		 * page dirty time via write_begin/page_mkwrite
		 * because we want write after unlink usecase
		 * to work.
		 */
		fid = v9fs_writeback_fid(file_dentry(filp));
		if (IS_ERR(fid)) {
			retval = PTR_ERR(fid);
			mutex_unlock(&v9inode->v_mutex);
			return retval;
		}
		v9inode->writeback_fid = (void *) fid;
	}
	mutex_unlock(&v9inode->v_mutex);

	retval = generic_file_mmap(filp, vma);
	if (!retval)
		vma->vm_ops = &v9fs_mmap_file_vm_ops;

	return retval;
}
Ejemplo n.º 20
0
static int ccfs_file_mmap(struct file * file, struct vm_area_struct * vma) {
	mdbg(INFO3,"Mmap file %p [%s]", file, file->f_dentry->d_name.name);
  
	return generic_file_mmap(file, vma);
}
Ejemplo n.º 21
0
static int unionfs_mmap(struct file *file, struct vm_area_struct *vma)
{
	int err = 0;
	bool willwrite;
	struct file *lower_file;
	struct dentry *dentry = file->f_path.dentry;
	struct dentry *parent;
	struct vm_operations_struct *saved_vm_ops = NULL;

	unionfs_read_lock(dentry->d_sb, UNIONFS_SMUTEX_PARENT);
	parent = unionfs_lock_parent(dentry, UNIONFS_DMUTEX_PARENT);
	unionfs_lock_dentry(dentry, UNIONFS_DMUTEX_CHILD);

	/* This might be deferred to mmap's writepage */
	willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);
	err = unionfs_file_revalidate(file, parent, willwrite);
	if (unlikely(err))
		goto out;
	unionfs_check_file(file);

	/*
	 * File systems which do not implement ->writepage may use
	 * generic_file_readonly_mmap as their ->mmap op.  If you call
	 * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
	 * But we cannot call the lower ->mmap op, so we can't tell that
	 * writeable mappings won't work.  Therefore, our only choice is to
	 * check if the lower file system supports the ->writepage, and if
	 * not, return EINVAL (the same error that
	 * generic_file_readonly_mmap returns in that case).
	 */
	lower_file = unionfs_lower_file(file);
	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
		err = -EINVAL;
		printk(KERN_ERR "unionfs: branch %d file system does not "
		       "support writeable mmap\n", fbstart(file));
		goto out;
	}

	/*
	 * find and save lower vm_ops.
	 *
	 * XXX: the VFS should have a cleaner way of finding the lower vm_ops
	 */
	if (!UNIONFS_F(file)->lower_vm_ops) {
		err = lower_file->f_op->mmap(lower_file, vma);
		if (err) {
			printk(KERN_ERR "unionfs: lower mmap failed %d\n", err);
			goto out;
		}
		saved_vm_ops = vma->vm_ops;
		err = do_munmap(current->mm, vma->vm_start,
				vma->vm_end - vma->vm_start);
		if (err) {
			printk(KERN_ERR "unionfs: do_munmap failed %d\n", err);
			goto out;
		}
	}

	file->f_mapping->a_ops = &unionfs_dummy_aops;
	err = generic_file_mmap(file, vma);
	file->f_mapping->a_ops = &unionfs_aops;
	if (err) {
		printk(KERN_ERR "unionfs: generic_file_mmap failed %d\n", err);
		goto out;
	}
	vma->vm_ops = &unionfs_vm_ops;
	if (!UNIONFS_F(file)->lower_vm_ops)
		UNIONFS_F(file)->lower_vm_ops = saved_vm_ops;

out:
	if (!err) {
		/* copyup could cause parent dir times to change */
		unionfs_copy_attr_times(parent->d_inode);
		unionfs_check_file(file);
	}
	unionfs_unlock_dentry(dentry);
	unionfs_unlock_parent(dentry, parent);
	unionfs_read_unlock(dentry->d_sb);
	return err;
}
Ejemplo n.º 22
0
int sjfs_fops_mmap(struct file *f, struct vm_area_struct *v) {
	printk("sjfs_fops_mmap -> generic_file_mmap\n");
	return generic_file_mmap(f, v);
}
Ejemplo n.º 23
0
/*
  * @breif called by the mmap(2) system call ? dont understance...
  *
  * @param pfile    file structure to map on
  * @param vma   virtual memory address?
  *
  * returns error codes
  */
int yramfs_file_mmap(struct file * pfile, struct vm_area_struct * vma)
{
    DBG_PRINT("mmap");
    return generic_file_mmap(pfile, vma);
}