コード例 #1
0
/*
 * invalidate part or all of a page
 * - release a page and clean up its private data if offset is 0 (indicating
 *   the entire page)
 */
static void afs_invalidatepage(struct page *page, unsigned long offset)
{
	struct afs_writeback *wb = (struct afs_writeback *) page_private(page);

	_enter("{%lu},%lu", page->index, offset);

	BUG_ON(!PageLocked(page));

	/* we clean up only if the entire page is being invalidated */
	if (offset == 0) {
#ifdef CONFIG_AFS_FSCACHE
		if (PageFsCache(page)) {
			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
			fscache_wait_on_page_write(vnode->cache, page);
			fscache_uncache_page(vnode->cache, page);
			ClearPageFsCache(page);
		}
#endif

		if (PagePrivate(page)) {
			if (wb && !PageWriteback(page)) {
				set_page_private(page, 0);
				afs_put_writeback(wb);
			}

			if (!page_private(page))
				ClearPagePrivate(page);
		}
	}

	_leave("");
}
コード例 #2
0
ファイル: file.c プロジェクト: flwh/Alcatel_OT_985_kernel
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);

	_enter("{{%x:%u}[%lu],%lx},%x",
	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
	       gfp_flags);

	/* deny if page is being written to the cache and the caller hasn't
	 * elected to wait */
#ifdef CONFIG_AFS_FSCACHE
	if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
		_leave(" = F [cache busy]");
		return 0;
	}
#endif

	if (PagePrivate(page)) {
		if (wb) {
			set_page_private(page, 0);
			afs_put_writeback(wb);
		}
		ClearPagePrivate(page);
	}

	/* indicate that the page can be released */
	_leave(" = T");
	return 1;
}
コード例 #3
0
ファイル: bitmap.c プロジェクト: Mr-Aloof/wl500g
/* copied from buffer.c */
static void
__clear_page_buffers(struct page *page)
{
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
}
コード例 #4
0
ファイル: data.c プロジェクト: aejsmith/linux
void f2fs_invalidate_page(struct page *page, unsigned int offset,
                          unsigned int length)
{
    struct inode *inode = page->mapping->host;
    struct f2fs_sb_info *sbi = F2FS_I_SB(inode);

    if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
            (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE))
        return;

    if (PageDirty(page)) {
        if (inode->i_ino == F2FS_META_INO(sbi))
            dec_page_count(sbi, F2FS_DIRTY_META);
        else if (inode->i_ino == F2FS_NODE_INO(sbi))
            dec_page_count(sbi, F2FS_DIRTY_NODES);
        else
            inode_dec_dirty_pages(inode);
    }

    /* This is atomic written page, keep Private */
    if (IS_ATOMIC_WRITTEN_PAGE(page))
        return;

    ClearPagePrivate(page);
}
コード例 #5
0
ファイル: crypto.c プロジェクト: mansr/linux-tangox
void fscrypt_restore_control_page(struct page *page)
{
	struct fscrypt_ctx *ctx;

	ctx = (struct fscrypt_ctx *)page_private(page);
	set_page_private(page, (unsigned long)NULL);
	ClearPagePrivate(page);
	unlock_page(page);
	fscrypt_release_ctx(ctx);
}
コード例 #6
0
ファイル: crypto.c プロジェクト: Menpiko/SnaPKernel-N6P
void ext4_restore_control_page(struct page *data_page)
{
	struct ext4_crypto_ctx *ctx =
		(struct ext4_crypto_ctx *)page_private(data_page);

	set_page_private(data_page, (unsigned long)NULL);
	ClearPagePrivate(data_page);
	unlock_page(data_page);
	ext4_release_crypto_ctx(ctx);
}
コード例 #7
0
static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
			pgprot_t ref_prot;
			struct page *split;

			ref_prot =
			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
			split = split_large_page(address, prot, ref_prot);
			if (!split)
				return -ENOMEM;
			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
			kpte_page = split;
		}
		page_private(kpte_page)++;
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
			ClearPagePrivate(kpte_page);
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
} 
コード例 #8
0
ファイル: data.c プロジェクト: aejsmith/linux
int f2fs_release_page(struct page *page, gfp_t wait)
{
    /* If this is dirty page, keep PagePrivate */
    if (PageDirty(page))
        return 0;

    /* This is atomic written page, keep Private */
    if (IS_ATOMIC_WRITTEN_PAGE(page))
        return 0;

    ClearPagePrivate(page);
    return 1;
}
コード例 #9
0
ファイル: grant-table.c プロジェクト: DenisLug/mptcp
/**
 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 * @nr_pages; number of pages to free
 * @pages: the pages
 */
void gnttab_free_pages(int nr_pages, struct page **pages)
{
	int i;

	for (i = 0; i < nr_pages; i++) {
		if (PagePrivate(pages[i])) {
#if BITS_PER_LONG < 64
			kfree((void *)page_private(pages[i]));
#endif
			ClearPagePrivate(pages[i]);
		}
	}
	free_xenballooned_pages(nr_pages, pages);
}
コード例 #10
0
ファイル: file.c プロジェクト: xf739645524/kernel-rhel5
/*
 * release a page and cleanup its private data
 */
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
{
	_enter("{%lu},%x", page->index, gfp_flags);

#ifdef CONFIG_AFS_FSCACHE
	wait_on_page_fs_misc(page);
	fscache_uncache_page(AFS_FS_I(page->mapping->host)->cache, page);
	ClearPagePrivate(page);
#endif

	/* indicate that the page can be released */
	_leave(" = 1");
	return 1;

} /* end afs_file_releasepage() */
コード例 #11
0
ファイル: p2m.c プロジェクト: abinnj009/ubuntu-vivid
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
			      struct gnttab_map_grant_ref *kmap_ops,
			      struct page **pages, unsigned int count)
{
	int i, ret = 0;
	bool lazy = false;

	if (xen_feature(XENFEAT_auto_translated_physmap))
		return 0;

	if (kmap_ops &&
	    !in_interrupt() &&
	    paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
		arch_enter_lazy_mmu_mode();
		lazy = true;
	}

	for (i = 0; i < count; i++) {
		unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
		unsigned long pfn = page_to_pfn(pages[i]);

		if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
			ret = -EINVAL;
			goto out;
		}

		set_page_private(pages[i], INVALID_P2M_ENTRY);
		WARN_ON(!PagePrivate(pages[i]));
		ClearPagePrivate(pages[i]);
		set_phys_to_machine(pfn, pages[i]->index);

		if (kmap_ops)
			ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
		if (ret)
			goto out;
	}

out:
	if (lazy)
		arch_leave_lazy_mmu_mode();
	return ret;
}
コード例 #12
0
ファイル: write.c プロジェクト: friackazoid/linux-2.6
/*
 * Remove a write request from an inode
 */
static void nfs_inode_remove_request(struct nfs_page *req)
{
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	struct nfs_inode *nfsi = NFS_I(inode);

	BUG_ON (!NFS_WBACK_BUSY(req));

	spin_lock(&inode->i_lock);
	set_page_private(req->wb_page, 0);
	ClearPagePrivate(req->wb_page);
	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
	nfsi->npages--;
	if (!nfsi->npages) {
		spin_unlock(&inode->i_lock);
		iput(inode);
	} else
		spin_unlock(&inode->i_lock);
	nfs_clear_request(req);
	nfs_release_request(req);
}
コード例 #13
0
/*
 * release a page and cleanup its private data
 */
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
	struct afs_writeback *wb;

	_enter("{{%x:%u}[%lu],%lx},%x",
	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
	       gfp_flags);

	if (PagePrivate(page)) {
		wb = (struct afs_writeback *) page_private(page);
		ASSERT(wb != NULL);
		set_page_private(page, 0);
		ClearPagePrivate(page);
		afs_put_writeback(wb);
	}

	_leave(" = 0");
	return 0;
}
コード例 #14
0
/*
 * It only removes the dentry from the dentry page, corresponding name
 * entry in name page does not need to be touched during deletion.
 */
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
                       struct inode *dir, struct inode *inode)
{
    struct	f2fs_dentry_block *dentry_blk;
    unsigned int bit_pos;
    int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
    int i;

    if (f2fs_has_inline_dentry(dir))
        return f2fs_delete_inline_entry(dentry, page, dir, inode);

    lock_page(page);
    f2fs_wait_on_page_writeback(page, DATA);

    dentry_blk = page_address(page);
    bit_pos = dentry - dentry_blk->dentry;
    for (i = 0; i < slots; i++)
        test_and_clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);

    /* Let's check and deallocate this dentry page */
    bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
                               NR_DENTRY_IN_BLOCK,
                               0);
    kunmap(page); /* kunmap - pair of f2fs_find_entry */
    set_page_dirty(page);

    dir->i_ctime = dir->i_mtime = CURRENT_TIME;

    if (inode)
        f2fs_drop_nlink(dir, inode, NULL);

    if (bit_pos == NR_DENTRY_IN_BLOCK) {
        truncate_hole(dir, page->index, page->index + 1);
        clear_page_dirty_for_io(page);
        ClearPagePrivate(page);
        ClearPageUptodate(page);
        inode_dec_dirty_pages(dir);
    }
    f2fs_put_page(page, 1);
}
コード例 #15
0
ファイル: file.c プロジェクト: WiseMan787/ralink_sdk
/*
 * release a page and cleanup its private data
 */
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct cachefs_page *pageio;

	_enter("{%lu},%x", page->index, gfp_flags);

	if (PagePrivate(page)) {
#ifdef AFS_CACHING_SUPPORT
		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
		cachefs_uncache_page(vnode->cache, page);
#endif

		pageio = (struct cachefs_page *) page_private(page);
		set_page_private(page, 0);
		ClearPagePrivate(page);

		kfree(pageio);
	}

	_leave(" = 0");
	return 0;
} /* end afs_file_releasepage() */
コード例 #16
0
ファイル: userfaultfd.c プロジェクト: markus-oberhumer/linux
/*
 * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
 * called with mmap_sem held, it will release mmap_sem before returning.
 */
static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
					      struct vm_area_struct *dst_vma,
					      unsigned long dst_start,
					      unsigned long src_start,
					      unsigned long len,
					      bool zeropage)
{
	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
	int vm_shared = dst_vma->vm_flags & VM_SHARED;
	ssize_t err;
	pte_t *dst_pte;
	unsigned long src_addr, dst_addr;
	long copied;
	struct page *page;
	struct hstate *h;
	unsigned long vma_hpagesize;
	pgoff_t idx;
	u32 hash;
	struct address_space *mapping;

	/*
	 * There is no default zero huge page for all huge page sizes as
	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
	 * by THP.  Since we can not reliably insert a zero page, this
	 * feature is not supported.
	 */
	if (zeropage) {
		up_read(&dst_mm->mmap_sem);
		return -EINVAL;
	}

	src_addr = src_start;
	dst_addr = dst_start;
	copied = 0;
	page = NULL;
	vma_hpagesize = vma_kernel_pagesize(dst_vma);

	/*
	 * Validate alignment based on huge page size
	 */
	err = -EINVAL;
	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
		goto out_unlock;

retry:
	/*
	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
	 * retry, dst_vma will be set to NULL and we must lookup again.
	 */
	if (!dst_vma) {
		err = -ENOENT;
		dst_vma = find_vma(dst_mm, dst_start);
		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
			goto out_unlock;
		/*
		 * Check the vma is registered in uffd, this is
		 * required to enforce the VM_MAYWRITE check done at
		 * uffd registration time.
		 */
		if (!dst_vma->vm_userfaultfd_ctx.ctx)
			goto out_unlock;

		if (dst_start < dst_vma->vm_start ||
		    dst_start + len > dst_vma->vm_end)
			goto out_unlock;

		err = -EINVAL;
		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
			goto out_unlock;

		vm_shared = dst_vma->vm_flags & VM_SHARED;
	}

	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
		    (len - copied) & (vma_hpagesize - 1)))
		goto out_unlock;

	/*
	 * If not shared, ensure the dst_vma has a anon_vma.
	 */
	err = -ENOMEM;
	if (!vm_shared) {
		if (unlikely(anon_vma_prepare(dst_vma)))
			goto out_unlock;
	}

	h = hstate_vma(dst_vma);

	while (src_addr < src_start + len) {
		pte_t dst_pteval;

		BUG_ON(dst_addr >= dst_start + len);
		VM_BUG_ON(dst_addr & ~huge_page_mask(h));

		/*
		 * Serialize via hugetlb_fault_mutex
		 */
		idx = linear_page_index(dst_vma, dst_addr);
		mapping = dst_vma->vm_file->f_mapping;
		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
								idx, dst_addr);
		mutex_lock(&hugetlb_fault_mutex_table[hash]);

		err = -ENOMEM;
		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
		if (!dst_pte) {
			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
			goto out_unlock;
		}

		err = -EEXIST;
		dst_pteval = huge_ptep_get(dst_pte);
		if (!huge_pte_none(dst_pteval)) {
			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
			goto out_unlock;
		}

		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
						dst_addr, src_addr, &page);

		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
		vm_alloc_shared = vm_shared;

		cond_resched();

		if (unlikely(err == -ENOENT)) {
			up_read(&dst_mm->mmap_sem);
			BUG_ON(!page);

			err = copy_huge_page_from_user(page,
						(const void __user *)src_addr,
						pages_per_huge_page(h), true);
			if (unlikely(err)) {
				err = -EFAULT;
				goto out;
			}
			down_read(&dst_mm->mmap_sem);

			dst_vma = NULL;
			goto retry;
		} else
			BUG_ON(page);

		if (!err) {
			dst_addr += vma_hpagesize;
			src_addr += vma_hpagesize;
			copied += vma_hpagesize;

			if (fatal_signal_pending(current))
				err = -EINTR;
		}
		if (err)
			break;
	}

out_unlock:
	up_read(&dst_mm->mmap_sem);
out:
	if (page) {
		/*
		 * We encountered an error and are about to free a newly
		 * allocated huge page.
		 *
		 * Reservation handling is very subtle, and is different for
		 * private and shared mappings.  See the routine
		 * restore_reserve_on_error for details.  Unfortunately, we
		 * can not call restore_reserve_on_error now as it would
		 * require holding mmap_sem.
		 *
		 * If a reservation for the page existed in the reservation
		 * map of a private mapping, the map was modified to indicate
		 * the reservation was consumed when the page was allocated.
		 * We clear the PagePrivate flag now so that the global
		 * reserve count will not be incremented in free_huge_page.
		 * The reservation map will still indicate the reservation
		 * was consumed and possibly prevent later page allocation.
		 * This is better than leaking a global reservation.  If no
		 * reservation existed, it is still safe to clear PagePrivate
		 * as no adjustments to reservation counts were made during
		 * allocation.
		 *
		 * The reservation map for shared mappings indicates which
		 * pages have reservations.  When a huge page is allocated
		 * for an address with a reservation, no change is made to
		 * the reserve map.  In this case PagePrivate will be set
		 * to indicate that the global reservation count should be
		 * incremented when the page is freed.  This is the desired
		 * behavior.  However, when a huge page is allocated for an
		 * address without a reservation a reservation entry is added
		 * to the reservation map, and PagePrivate will not be set.
		 * When the page is freed, the global reserve count will NOT
		 * be incremented and it will appear as though we have leaked
		 * reserved page.  In this case, set PagePrivate so that the
		 * global reserve count will be incremented to match the
		 * reservation map entry which was created.
		 *
		 * Note that vm_alloc_shared is based on the flags of the vma
		 * for which the page was originally allocated.  dst_vma could
		 * be different or NULL on error.
		 */
		if (vm_alloc_shared)
			SetPagePrivate(page);
		else
			ClearPagePrivate(page);
		put_page(page);
	}
	BUG_ON(copied < 0);
	BUG_ON(err > 0);
	BUG_ON(!copied && !err);
	return copied ? copied : err;
}
コード例 #17
0
ファイル: file.c プロジェクト: xf739645524/kernel-rhel5
/*
 * AFS read page from file (or symlink)
 */
static int afs_file_readpage(struct file *file, struct page *page)
{
	struct afs_rxfs_fetch_descriptor desc;
	struct afs_vnode *vnode;
	struct inode *inode;
	int ret;

	inode = page->mapping->host;

	_enter("{%lu},%p{%lu}", inode->i_ino, page, page->index);

	vnode = AFS_FS_I(inode);

	BUG_ON(!PageLocked(page));

	ret = -ESTALE;
	if (vnode->flags & AFS_VNODE_DELETED)
		goto error;

#ifdef CONFIG_AFS_FSCACHE
	/* is it cached? */
	ret = fscache_read_or_alloc_page(vnode->cache,
					 page,
					 afs_file_readpage_read_complete,
					 NULL,
					 GFP_KERNEL);
#else
	ret = -ENOBUFS;
#endif

	switch (ret) {
		/* read BIO submitted (page in cache) */
	case 0:
		break;

		/* page not yet cached */
	case -ENODATA:
		_debug("cache said ENODATA");
		goto go_on;

		/* page will not be cached */
	case -ENOBUFS:
		_debug("cache said ENOBUFS");
	default:
	go_on:
		desc.fid	= vnode->fid;
		desc.offset	= page->index << PAGE_CACHE_SHIFT;
		desc.size	= min((size_t) (inode->i_size - desc.offset),
				      (size_t) PAGE_SIZE);
		desc.buffer	= kmap(page);

		clear_page(desc.buffer);

		/* read the contents of the file from the server into the
		 * page */
		ret = afs_vnode_fetch_data(vnode, &desc);
		kunmap(page);
		if (ret < 0) {
			if (ret == -ENOENT) {
				kdebug("got NOENT from server"
				       " - marking file deleted and stale");
				vnode->flags |= AFS_VNODE_DELETED;
				ret = -ESTALE;
			}

#ifdef CONFIG_AFS_FSCACHE
			fscache_uncache_page(vnode->cache, page);
			ClearPagePrivate(page);
#endif
			goto error;
		}

		SetPageUptodate(page);

		/* send the page to the cache */
#ifdef CONFIG_AFS_FSCACHE
		if (PagePrivate(page)) {
			if (TestSetPageFsMisc(page))
				BUG();
			if (fscache_write_page(vnode->cache,
					       page,
					       afs_file_readpage_write_complete,
					       NULL,
					       GFP_KERNEL) != 0
			    ) {
				fscache_uncache_page(vnode->cache, page);
				ClearPagePrivate(page);
				end_page_fs_misc(page);
			}
		}
#endif
		unlock_page(page);
	}

	_leave(" = 0");
	return 0;

 error:
	SetPageError(page);
	unlock_page(page);

	_leave(" = %d", ret);
	return ret;

} /* end afs_file_readpage() */
コード例 #18
0
int m2p_remove_override(struct page *page, bool clear_pte)
{
	unsigned long flags;
	unsigned long mfn;
	unsigned long pfn;
	unsigned long uninitialized_var(address);
	unsigned level;
	pte_t *ptep = NULL;
	int ret = 0;

	pfn = page_to_pfn(page);
	mfn = get_phys_to_machine(pfn);
	if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
		return -EINVAL;

	if (!PageHighMem(page)) {
		address = (unsigned long)__va(pfn << PAGE_SHIFT);
		ptep = lookup_address(address, &level);

		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
					"m2p_remove_override: pfn %lx not mapped", pfn))
			return -EINVAL;
	}

	spin_lock_irqsave(&m2p_override_lock, flags);
	list_del(&page->lru);
	spin_unlock_irqrestore(&m2p_override_lock, flags);
	WARN_ON(!PagePrivate(page));
	ClearPagePrivate(page);

	if (clear_pte) {
		struct gnttab_map_grant_ref *map_op =
			(struct gnttab_map_grant_ref *) page->index;
		set_phys_to_machine(pfn, map_op->dev_bus_addr);
		if (!PageHighMem(page)) {
			struct multicall_space mcs;
			struct gnttab_unmap_grant_ref *unmap_op;

			/*
			 * It might be that we queued all the m2p grant table
			 * hypercalls in a multicall, then m2p_remove_override
			 * get called before the multicall has actually been
			 * issued. In this case handle is going to -1 because
			 * it hasn't been modified yet.
			 */
			if (map_op->handle == -1)
				xen_mc_flush();
			/*
			 * Now if map_op->handle is negative it means that the
			 * hypercall actually returned an error.
			 */
			if (map_op->handle == GNTST_general_error) {
				printk(KERN_WARNING "m2p_remove_override: "
						"pfn %lx mfn %lx, failed to modify kernel mappings",
						pfn, mfn);
				return -1;
			}

			mcs = xen_mc_entry(
					sizeof(struct gnttab_unmap_grant_ref));
			unmap_op = mcs.args;
			unmap_op->host_addr = map_op->host_addr;
			unmap_op->handle = map_op->handle;
			unmap_op->dev_bus_addr = 0;

			MULTI_grant_table_op(mcs.mc,
					GNTTABOP_unmap_grant_ref, unmap_op, 1);

			xen_mc_issue(PARAVIRT_LAZY_MMU);

			set_pte_at(&init_mm, address, ptep,
					pfn_pte(pfn, PAGE_KERNEL));
			__flush_tlb_single(address);
			map_op->host_addr = 0;
		}
	} else
		set_phys_to_machine(pfn, page->index);

	/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
	 * somewhere in this domain, even before being added to the
	 * m2p_override (see comment above in m2p_add_override).
	 * If there are no other entries in the m2p_override corresponding
	 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
	 * the original pfn (the one shared by the frontend): the backend
	 * cannot do any IO on this page anymore because it has been
	 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
	 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
	 * pfn again. */
	mfn &= ~FOREIGN_FRAME_BIT;
	ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
	if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
			m2p_find_override(mfn) == NULL)
		set_phys_to_machine(pfn, mfn);

	return 0;
}
コード例 #19
0
ファイル: write.c プロジェクト: friackazoid/linux-2.6
/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
static int nfs_write_mapping(struct address_space *mapping, int how)
{
	struct writeback_control wbc = {
		.bdi = mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = 0,
		.range_end = LLONG_MAX,
	};

	return __nfs_write_mapping(mapping, &wbc, how);
}

/*
 * flush the inode to disk.
 */
int nfs_wb_all(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, 0);
}

int nfs_wb_nocommit(struct inode *inode)
{
	return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
}

int nfs_wb_page_cancel(struct inode *inode, struct page *page)
{
	struct nfs_page *req;
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret = 0;

	BUG_ON(!PageLocked(page));
	for (;;) {
		req = nfs_page_find_request(page);
		if (req == NULL)
			goto out;
		if (test_bit(PG_CLEAN, &req->wb_flags)) {
			nfs_release_request(req);
			break;
		}
		if (nfs_lock_request_dontget(req)) {
			nfs_inode_remove_request(req);
			/*
			 * In case nfs_inode_remove_request has marked the
			 * page as being dirty
			 */
			cancel_dirty_page(page, PAGE_CACHE_SIZE);
			nfs_unlock_request(req);
			break;
		}
		ret = nfs_wait_on_request(req);
		if (ret < 0)
			goto out;
	}
	if (!PagePrivate(page))
		return 0;
	ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
out:
	return ret;
}

static int nfs_wb_page_priority(struct inode *inode, struct page *page,
				int how)
{
	loff_t range_start = page_offset(page);
	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
	struct writeback_control wbc = {
		.bdi = page->mapping->backing_dev_info,
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = LONG_MAX,
		.range_start = range_start,
		.range_end = range_end,
	};
	int ret;

	do {
		if (clear_page_dirty_for_io(page)) {
			ret = nfs_writepage_locked(page, &wbc);
			if (ret < 0)
				goto out_error;
		} else if (!PagePrivate(page))
			break;
		ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
		if (ret < 0)
			goto out_error;
	} while (PagePrivate(page));
	return 0;
out_error:
	__mark_inode_dirty(inode, I_DIRTY_PAGES);
	return ret;
}

/*
 * Write back all requests on one page - we do this before reading it.
 */
int nfs_wb_page(struct inode *inode, struct page* page)
{
	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
}

#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
		struct page *page)
{
	struct nfs_page *req;
	int ret;

	if (PageFsCache(page))
		nfs_fscache_release_page(page, GFP_KERNEL);

	req = nfs_find_and_lock_request(page);
	ret = PTR_ERR(req);
	if (IS_ERR(req))
		goto out;

	ret = migrate_page(mapping, newpage, page);
	if (!req)
		goto out;
	if (ret)
		goto out_unlock;
	page_cache_get(newpage);
	req->wb_page = newpage;
	SetPagePrivate(newpage);
	set_page_private(newpage, page_private(page));
	ClearPagePrivate(page);
	set_page_private(page, 0);
	page_cache_release(page);
out_unlock:
	nfs_clear_page_tag_locked(req);
	nfs_release_request(req);
out:
	return ret;
}
#endif

int __init nfs_init_writepagecache(void)
{
	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
					     sizeof(struct nfs_write_data),
					     0, SLAB_HWCACHE_ALIGN,
					     NULL);
	if (nfs_wdata_cachep == NULL)
		return -ENOMEM;

	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
						     nfs_wdata_cachep);
	if (nfs_wdata_mempool == NULL)
		return -ENOMEM;

	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
						      nfs_wdata_cachep);
	if (nfs_commit_mempool == NULL)
		return -ENOMEM;

	/*
	 * NFS congestion size, scale with available memory.
	 *
	 *  64MB:    8192k
	 * 128MB:   11585k
	 * 256MB:   16384k
	 * 512MB:   23170k
	 *   1GB:   32768k
	 *   2GB:   46340k
	 *   4GB:   65536k
	 *   8GB:   92681k
	 *  16GB:  131072k
	 *
	 * This allows larger machines to have larger/more transfers.
	 * Limit the default to 256M
	 */
	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
	if (nfs_congestion_kb > 256*1024)
		nfs_congestion_kb = 256*1024;

	return 0;
}

void nfs_destroy_writepagecache(void)
{
	mempool_destroy(nfs_commit_mempool);
	mempool_destroy(nfs_wdata_mempool);
	kmem_cache_destroy(nfs_wdata_cachep);
}