Пример #1
0
static unsigned int shrink_pages(struct mm_struct *mm,
				 struct list_head *zone0_page_list,
				 struct list_head *zone1_page_list,
				 unsigned int num_to_scan)
{
	unsigned long addr;
	unsigned int isolate_pages_countter = 0;

	struct vm_area_struct *vma = mm->mmap;
	while (vma != NULL) {

		for (addr = vma->vm_start; addr < vma->vm_end;
		     addr += PAGE_SIZE) {
			struct page *page;
			/*get the page address from virtual memory address */
			page = follow_page(vma, addr, FOLL_GET);

			if (page && !IS_ERR(page)) {

				put_page(page);
				/* only moveable, anonymous and not dirty pages can be swapped  */
				if ((!PageUnevictable(page))
				    && (!PageDirty(page)) && ((PageAnon(page)))
				    && (0 == page_is_file_cache(page))) {
					switch (page_zone_id(page)) {
					case 0:
						if (!isolate_lru_page_compcache(page)) {
							/* isolate page from LRU and add to temp list  */
							/*create new page list, it will be used in shrink_page_list */
							list_add_tail(&page->lru, zone0_page_list);
							isolate_pages_countter++;
						}
						break;
					case 1:
						if (!isolate_lru_page_compcache(page)) {
							/* isolate page from LRU and add to temp list  */
							/*create new page list, it will be used in shrink_page_list */
							list_add_tail(&page->lru, zone1_page_list);
							isolate_pages_countter++;
						}
						break;
					default:
						break;
					}
				}
			}

			if (isolate_pages_countter >= num_to_scan) {
				return isolate_pages_countter;
			}
		}

		vma = vma->vm_next;
	}

	return isolate_pages_countter;
}
Пример #2
0
/**
 * __replace_page - replace page in vma by new page.
 * based on replace_page in mm/ksm.c
 *
 * @vma:      vma that holds the pte pointing to page
 * @addr:     address the old @page is mapped at
 * @page:     the cowed page we are replacing by kpage
 * @kpage:    the modified page we replace page by
 *
 * Returns 0 on success, -EFAULT on failure.
 */
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
				struct page *old_page, struct page *new_page)
{
	struct mm_struct *mm = vma->vm_mm;
	spinlock_t *ptl;
	pte_t *ptep;
	int err;
	/* For mmu_notifiers */
	const unsigned long mmun_start = addr;
	const unsigned long mmun_end   = addr + PAGE_SIZE;
	struct mem_cgroup *memcg;

	err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, &memcg,
			false);
	if (err)
		return err;

	/* For try_to_free_swap() and munlock_vma_page() below */
	lock_page(old_page);

	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
	err = -EAGAIN;
	ptep = page_check_address(old_page, mm, addr, &ptl, 0);
	if (!ptep) {
		mem_cgroup_cancel_charge(new_page, memcg, false);
		goto unlock;
	}

	get_page(new_page);
	page_add_new_anon_rmap(new_page, vma, addr, false);
	mem_cgroup_commit_charge(new_page, memcg, false, false);
	lru_cache_add_active_or_unevictable(new_page, vma);

	if (!PageAnon(old_page)) {
		dec_mm_counter(mm, mm_counter_file(old_page));
		inc_mm_counter(mm, MM_ANONPAGES);
	}

	flush_cache_page(vma, addr, pte_pfn(*ptep));
	ptep_clear_flush_notify(vma, addr, ptep);
	set_pte_at_notify(mm, addr, ptep, mk_pte(new_page, vma->vm_page_prot));

	page_remove_rmap(old_page, false);
	if (!page_mapped(old_page))
		try_to_free_swap(old_page);
	pte_unmap_unlock(ptep, ptl);

	if (vma->vm_flags & VM_LOCKED)
		munlock_vma_page(old_page);
	put_page(old_page);

	err = 0;
 unlock:
	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
	unlock_page(old_page);
	return err;
}
Пример #3
0
static inline int io_debug_precheck_save(struct page *page)
{
	if (unlikely(PageAnon(page))) {
		anon_pages++;
		return 1;
	}

	return 0;
}
Пример #4
0
Файл: swap.c Проект: Lyude/linux
/**
 * mark_page_lazyfree - make an anon page lazyfree
 * @page: page to deactivate
 *
 * mark_page_lazyfree() moves @page to the inactive file list.
 * This is done to accelerate the reclaim of @page.
 */
void mark_page_lazyfree(struct page *page)
{
	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
	    !PageSwapCache(page) && !PageUnevictable(page)) {
		struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs);

		get_page(page);
		if (!pagevec_add(pvec, page) || PageCompound(page))
			pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
		put_cpu_var(lru_lazyfree_pvecs);
	}
}
Пример #5
0
void flush_anon_page(struct vm_area_struct *vma,
		     struct page *page, unsigned long vaddr)
{
	unsigned long flags;
	if (!PageAnon(page))
		return;

	if (vma->vm_mm != current->active_mm)
		return;

	local_irq_save(flags);
	if (vma->vm_flags & VM_EXEC)
		cpu_icache_inval_page(vaddr & PAGE_MASK);
	cpu_dcache_wbinval_page((unsigned long)page_address(page));
	local_irq_restore(flags);
}
Пример #6
0
/*
 * At what user virtual address is page expected in vma?
 * Caller should check the page is actually part of the vma.
 */
static unsigned long mr_page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
	if (PageAnon(page)) {
		struct anon_vma *page__anon_vma = page_anon_vma(page);
		/*
		 * Note: swapoff's unuse_vma() is more efficient with this
		 * check, and needs it to match anon_vma when KSM is active.
		 */
		if (!vma->anon_vma || !page__anon_vma ||
		    vma->anon_vma->root != page__anon_vma->root)
			return -EFAULT;
	} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
		if (!vma->vm_file ||
		    vma->vm_file->f_mapping != page->mapping)
			return -EFAULT;
	} else
		return -EFAULT;
	return vma_address(page, vma);
}
Пример #7
0
static int bc_io_show(struct seq_file *f, void *v)
{
	struct list_head *lh;
	struct page_beancounter *pb;
	struct page *pg;

	lh = (struct list_head *)v;
	if (lh == &pb_io_list) {
		seq_printf(f, "Races: anon %lu missed %lu\n",
				anon_pages, not_released);

		seq_printf(f, "%-*s %-1s %-*s %-4s %*s %*s "
				"%-*s %-*s %-1s %-*s %-*s\n",
				PTR_SIZE, "pb", "",
				PTR_SIZE, "page", "flg",
				INT_SIZE, "cnt", INT_SIZE, "mcnt",
				PTR_SIZE, "pb_list",
				PTR_SIZE, "page_pb", "",
				PTR_SIZE, "mapping",
				INT_SIZE, "ub");
		return 0;
	}

	pb = list_entry(lh, struct page_beancounter, io_list);
	pg = pb->page;
	seq_printf(f, "%p %c %p %c%c%c%c %*d %*d %p %p %c %p %d\n",
			pb, pb->io_debug ? 'e' : 'm', pg,
			PageDirty(pg) ? 'D' : 'd',
			PageAnon(pg) ? 'A' : 'a',
			PageWriteback(pg) ? 'W' : 'w',
			PageLocked(pg) ? 'L' : 'l',
			INT_SIZE, page_count(pg),
			INT_SIZE, page_mapcount(pg),
			pb->page_pb_list, page_pbc(pg),
			iopb_to_pb(page_pbc(pg)) == pb ? ' ' : '!',
			pg->mapping, pb->ub->ub_uid);
	return 0;
}
Пример #8
0
Файл: swap.c Проект: Lyude/linux
static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
			    void *arg)
{
	if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
	    !PageSwapCache(page) && !PageUnevictable(page)) {
		bool active = PageActive(page);

		del_page_from_lru_list(page, lruvec,
				       LRU_INACTIVE_ANON + active);
		ClearPageActive(page);
		ClearPageReferenced(page);
		/*
		 * lazyfree pages are clean anonymous pages. They have
		 * SwapBacked flag cleared to distinguish normal anonymous
		 * pages
		 */
		ClearPageSwapBacked(page);
		add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);

		__count_vm_events(PGLAZYFREE, hpage_nr_pages(page));
		count_memcg_page_event(page, PGLAZYFREE);
		update_page_reclaim_stat(lruvec, 1, 0);
	}
}
Пример #9
0
u64 stable_page_flags(struct page *page)
{
	u64 k;
	u64 u;

	/*
	 * pseudo flag: KPF_NOPAGE
	 * it differentiates a memory hole from a page with no flags
	 */
	if (!page)
		return 1 << KPF_NOPAGE;

	k = page->flags;
	u = 0;

	/*
	 * pseudo flags for the well known (anonymous) memory mapped pages
	 *
	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
	 * simple test in page_mapcount() is not enough.
	 */
	if (!PageSlab(page) && page_mapcount(page))
		u |= 1 << KPF_MMAP;
	if (PageAnon(page))
		u |= 1 << KPF_ANON;
	if (PageKsm(page))
		u |= 1 << KPF_KSM;

	/*
	 * compound pages: export both head/tail info
	 * they together define a compound page's start/end pos and order
	 */
	if (PageHead(page))
		u |= 1 << KPF_COMPOUND_HEAD;
	if (PageTail(page))
		u |= 1 << KPF_COMPOUND_TAIL;
	if (PageHuge(page))
		u |= 1 << KPF_HUGE;
	/*
	 * PageTransCompound can be true for non-huge compound pages (slab
	 * pages or pages allocated by drivers with __GFP_COMP) because it
	 * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
	 * to make sure a given page is a thp, not a non-huge compound page.
	 */
	else if (PageTransCompound(page)) {
		struct page *head = compound_head(page);

		if (PageLRU(head) || PageAnon(head))
			u |= 1 << KPF_THP;
		else if (is_huge_zero_page(head)) {
			u |= 1 << KPF_ZERO_PAGE;
			u |= 1 << KPF_THP;
		}
	} else if (is_zero_pfn(page_to_pfn(page)))
		u |= 1 << KPF_ZERO_PAGE;


	/*
	 * Caveats on high order pages: page->_count will only be set
	 * -1 on the head page; SLUB/SLQB do the same for PG_slab;
	 * SLOB won't set PG_slab at all on compound pages.
	 */
	if (PageBuddy(page))
		u |= 1 << KPF_BUDDY;

	if (PageBalloon(page))
		u |= 1 << KPF_BALLOON;

	if (page_is_idle(page))
		u |= 1 << KPF_IDLE;

	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);

	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);

	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);

	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);

	u |= kpf_copy_bit(k, KPF_SWAPCACHE,	PG_swapcache);
	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);

	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);

#ifdef CONFIG_MEMORY_FAILURE
	u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
#endif

#ifdef CONFIG_ARCH_USES_PG_UNCACHED
	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
#endif

	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);

	return u;
};
Пример #10
0
u64 stable_page_flags(struct page *page)
{
	u64 k;
	u64 u;

	/*
	 * pseudo flag: KPF_NOPAGE
	 * it differentiates a memory hole from a page with no flags
	 */
	if (!page)
		return 1 << KPF_NOPAGE;

	k = page->flags;
	u = 0;

	/*
	 * pseudo flags for the well known (anonymous) memory mapped pages
	 *
	 * Note that page->_mapcount is overloaded in SLOB/SLUB/SLQB, so the
	 * simple test in page_mapped() is not enough.
	 */
	if (!PageSlab(page) && page_mapped(page))
		u |= 1 << KPF_MMAP;
	if (PageAnon(page))
		u |= 1 << KPF_ANON;
	if (PageKsm(page))
		u |= 1 << KPF_KSM;

	/*
	 * compound pages: export both head/tail info
	 * they together define a compound page's start/end pos and order
	 */
	if (PageHead(page))
		u |= 1 << KPF_COMPOUND_HEAD;
	if (PageTail(page))
		u |= 1 << KPF_COMPOUND_TAIL;
	if (PageHuge(page))
		u |= 1 << KPF_HUGE;

	u |= kpf_copy_bit(k, KPF_LOCKED,	PG_locked);

	/*
	 * Caveats on high order pages:
	 * PG_buddy will only be set on the head page; SLUB/SLQB do the same
	 * for PG_slab; SLOB won't set PG_slab at all on compound pages.
	 */
	u |= kpf_copy_bit(k, KPF_SLAB,		PG_slab);
	u |= kpf_copy_bit(k, KPF_BUDDY,		PG_buddy);

	u |= kpf_copy_bit(k, KPF_ERROR,		PG_error);
	u |= kpf_copy_bit(k, KPF_DIRTY,		PG_dirty);
	u |= kpf_copy_bit(k, KPF_UPTODATE,	PG_uptodate);
	u |= kpf_copy_bit(k, KPF_WRITEBACK,	PG_writeback);

	u |= kpf_copy_bit(k, KPF_LRU,		PG_lru);
	u |= kpf_copy_bit(k, KPF_REFERENCED,	PG_referenced);
	u |= kpf_copy_bit(k, KPF_ACTIVE,	PG_active);
	u |= kpf_copy_bit(k, KPF_RECLAIM,	PG_reclaim);

	u |= kpf_copy_bit(k, KPF_SWAPCACHE,	PG_swapcache);
	u |= kpf_copy_bit(k, KPF_SWAPBACKED,	PG_swapbacked);

	u |= kpf_copy_bit(k, KPF_UNEVICTABLE,	PG_unevictable);
	u |= kpf_copy_bit(k, KPF_MLOCKED,	PG_mlocked);

#ifdef CONFIG_MEMORY_FAILURE
	u |= kpf_copy_bit(k, KPF_HWPOISON,	PG_hwpoison);
#endif

#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
	u |= kpf_copy_bit(k, KPF_UNCACHED,	PG_uncached);
#endif

	u |= kpf_copy_bit(k, KPF_RESERVED,	PG_reserved);
	u |= kpf_copy_bit(k, KPF_MAPPEDTODISK,	PG_mappedtodisk);
	u |= kpf_copy_bit(k, KPF_PRIVATE,	PG_private);
	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);

	return u;
};
Пример #11
0
static int get_pages (struct task_struct *task, unsigned long start,
		      unsigned long nr_pages,
		      unsigned long page_bit_map[PAGE_BITS],
		      struct page *pages[PAGE_QUEUE],
		      struct vm_area_struct *vmas[PAGE_QUEUE])
{
	struct mm_struct *mm = task->mm;
	struct vm_area_struct *vma = NULL;
	unsigned long addr = start, nr;
	int i = 0, j, rv;

	while (i < nr_pages) {
		int last = i;

		if (test_bit(last, page_bit_map)) {
			i = find_next_zero_bit(page_bit_map, PAGE_QUEUE,
					       last + 1);
			if (i > nr_pages)
				i = nr_pages;
			nr = i - last;

			DEBUG("GIPC_SEND get_user_pages %ld pages at %lx\n",
			      addr, nr);

			rv = __get_user_pages(task, mm, addr, nr,
					      FOLL_GET|FOLL_FORCE|FOLL_SPLIT,
					      pages + last, vmas + last, NULL);

			if (rv <= 0) {
				printk(KERN_ERR "Graphene error: "
				       "get_user_pages at 0x%016lx-0x%016lx\n",
				       addr, addr + (nr << PAGE_SHIFT));
				return rv;
			}

			if (rv != nr) {
				printk(KERN_ERR "Graphene error: "
				       "get_user_pages at 0x%016lx\n",
				       addr + (rv << PAGE_SHIFT));
				return -EACCES;
			}

			for (j = 0; j < nr; j++) {
				/* Mark source COW */
				rv = make_page_cow(mm, vmas[last + j],
						   addr + (j << PAGE_SHIFT));
				if (rv)
					return rv;

				if (PageAnon(pages[last + j])) {
					/* Fix up the counters */
					inc_mm_counter_fast(mm, MM_FILEPAGES);
					dec_mm_counter_fast(mm, MM_ANONPAGES);
				}

				pages[last + j]->mapping = NULL;
			}

			vma = vmas[i - 1];
			addr += nr << PAGE_SHIFT;
		} else {
			/* This is the case where a page (or pages) are not
			 * currently mapped.
			 * Handle the hole appropriately. */
			i = find_next_bit(page_bit_map, PAGE_QUEUE, last + 1);
			if (i > nr_pages)
				i = nr_pages;
			nr = i - last;

			DEBUG("GIPC_SEND skip %ld pages at %lx\n", addr, nr);

			for (j = 0; j < nr; j++) {
				if (!vma) {
					vma = find_vma(mm, addr);
				} else {
					/* DEP 6/17/13 - these addresses should
					 * be monotonically increasing. */
					for (; vma && addr >= vma->vm_end;
					     vma = vma->vm_next);

					/* Leverage monotonic increasing vmas
					 * to more quickly detect holes in the
					 * address space. */
					if (vma && addr < vma->vm_start)
						vma = NULL;
				}

				pages[last + j] = NULL;
				vmas[last + j] = vma;
				addr += PAGE_SIZE;
			}
		}
	}

	return i;
}