Exemplo n.º 1
0
Arquivo: swap.c Projeto: Lyude/linux
/*
 * Mark a page as having seen activity.
 *
 * inactive,unreferenced	->	inactive,referenced
 * inactive,referenced		->	active,unreferenced
 * active,unreferenced		->	active,referenced
 *
 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
 */
void mark_page_accessed(struct page *page)
{
	page = compound_head(page);
	if (!PageActive(page) && !PageUnevictable(page) &&
			PageReferenced(page)) {

		/*
		 * If the page is on the LRU, queue it for activation via
		 * activate_page_pvecs. Otherwise, assume the page is on a
		 * pagevec, mark it active and it'll be moved to the active
		 * LRU on the next drain.
		 */
		if (PageLRU(page))
			activate_page(page);
		else
			__lru_cache_activate_page(page);
		ClearPageReferenced(page);
		if (page_is_file_cache(page))
			workingset_activation(page);
	} else if (!PageReferenced(page)) {
		SetPageReferenced(page);
	}
	if (page_is_idle(page))
		clear_page_idle(page);
}
Exemplo n.º 2
0
/*
 * Mark a page as having seen activity.
 *
 * inactive,unreferenced	->	inactive,referenced
 * inactive,referenced		->	active,unreferenced
 * active,unreferenced		->	active,referenced
 */
void fastcall mark_page_accessed(struct page *page)
{
	if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
		activate_page(page);
		ClearPageReferenced(page);
	} else if (!PageReferenced(page)) {
		SetPageReferenced(page);
	}
}
Exemplo n.º 3
0
/*
 * Mark a page as having seen activity.
 *
 * inactive,unreferenced	->	inactive,referenced
 * inactive,referenced		->	active,unreferenced
 * active,unreferenced		->	active,referenced
 */
void mark_page_accessed(struct page *page)
{
	if (!PageActive(page) && !PageUnevictable(page) &&
			PageReferenced(page) && PageLRU(page)) {
		activate_page(page);
		ClearPageReferenced(page);
	} else if (!PageReferenced(page)) {
		SetPageReferenced(page);
	}
}
Exemplo n.º 4
0
/*
 * mark_page_accessed:页访问的状态变化
 * 非活动页的第一次访问状态变化: inactive,unreferenced	->	inactive,referenced
 * 非活动页的第二次访问状态变化: inactive,referenced	->	active,unreferenced
 * 活动页的第一次访问状态变化:   active,unreferenced	->	active,referenced
 */
void mark_page_accessed(struct page *page)
{
	if (!PageActive(page) && !PageUnevictable(page) &&
			PageReferenced(page) && PageLRU(page)) {
		/* 非活动页的第二次访问:
		 * 将会使页从非活动链表移动到
		 * 活动链表,并且设置unreference*/
		activate_page(page);
		ClearPageReferenced(page);
	} else if (!PageReferenced(page)) { 
		/*第一次访问非活动page或者访问活动页时只设置PG_reference*/
		SetPageReferenced(page);
	}
}
Exemplo n.º 5
0
static void smaps_account(struct mem_size_stats *mss, struct page *page,
		unsigned long size, bool young, bool dirty)
{
	int mapcount;

	if (PageAnon(page))
		mss->anonymous += size;

	mss->resident += size;
	/* Accumulate the size in pages that have been accessed. */
	if (young || PageReferenced(page))
		mss->referenced += size;
	mapcount = page_mapcount(page);
	if (mapcount >= 2) {
		u64 pss_delta;

		if (dirty || PageDirty(page))
			mss->shared_dirty += size;
		else
			mss->shared_clean += size;
		pss_delta = (u64)size << PSS_SHIFT;
		do_div(pss_delta, mapcount);
		mss->pss += pss_delta;
	} else {
		if (dirty || PageDirty(page))
			mss->private_dirty += size;
		else
			mss->private_clean += size;
		mss->pss += (u64)size << PSS_SHIFT;
	}
}
Exemplo n.º 6
0
/*
 * Mark a page as having seen activity.
 *
 * inactive,unreferenced	->	inactive,referenced
 * inactive,referenced		->	active,unreferenced
 * active,unreferenced		->	active,referenced
 */
void mark_page_accessed(struct page *page)
{
	if (!PageActive(page) && !PageUnevictable(page) &&
			PageReferenced(page)) {
		if (PageLRU(page))
			activate_page(page);
		else if (PageIONBacked(page))
			SetPageActive(page);
		else
			return;
		ClearPageReferenced(page);
	} else if (!PageReferenced(page)) {
		SetPageReferenced(page);
		if (PageIONBacked(page) && PageActive(page))
			ion_activate_page(page);
	}
}
Exemplo n.º 7
0
static void print_page(struct page *page)
{
	dprintk("PRINTPAGE page %p\n", page);
	dprintk("	PagePrivate %d\n", PagePrivate(page));
	dprintk("	PageUptodate %d\n", PageUptodate(page));
	dprintk("	PageError %d\n", PageError(page));
	dprintk("	PageDirty %d\n", PageDirty(page));
	dprintk("	PageReferenced %d\n", PageReferenced(page));
	dprintk("	PageLocked %d\n", PageLocked(page));
	dprintk("	PageWriteback %d\n", PageWriteback(page));
	dprintk("	PageMappedToDisk %d\n", PageMappedToDisk(page));
	dprintk("\n");
}
Exemplo n.º 8
0
/*
 * Show free area list (used inside shift_scroll-lock stuff)
 * We also calculate the percentage fragmentation. We do this by counting the
 * memory on each free list with the exception of the first item on the list.
 */
int Jcmd_mem(char *arg1, char *arg2) {
	unsigned long order, flags;
	unsigned long total = 0;

	spin_lock_irqsave(&free_area_lock, flags);
	for (order = 0; order < NR_MEM_LISTS; order++) {
		struct page * tmp;
		unsigned long nr = 0;
		for (tmp = free_mem_area[order].next;
				tmp != memory_head(free_mem_area+order); tmp = tmp->next) {
			nr++;
		}
		total += nr << order;
		ut_printf("%d(%d): count:%d  static count:%d total:%d (%dM)\n", order,1<<order, nr,
				free_mem_area[order].stat_count, (nr << order), ((nr << order)*PAGE_SIZE)/(1024*1024));
	}
	spin_unlock_irqrestore(&free_area_lock, flags);
	ut_printf("total Free pages = %d (%dM) Actual pages: %d (%dM) pagecachesize: %dM , freepages:%d\n", total, (total * 4) / 1024,g_stat_mem_size/PAGE_SIZE,g_stat_mem_size/(1024*1024),g_pagecache_size/(1024*1024),g_nr_free_pages);

	int slab=0;
	int referenced=0;
	int reserved=0;
	int dma=0;
	unsigned long va_end=(unsigned long)__va(g_phy_mem_size);

	page_struct_t *p;
	p = g_mem_map + MAP_NR(va_end);
	do {
		--p;
		if (PageReserved(p)) reserved++;
		if (PageDMA(p)) dma++;
		if (PageReferenced(p))referenced++;
		if (PageSlab(p)) slab++;
	} while (p > g_mem_map);
	ut_printf(" reserved :%d(%dM) referenced:%d dma:%d slab:%d  stat_allocs:%d stat_frees: %d\n\n",reserved,(reserved*PAGE_SIZE)/(1024*1024),referenced,dma,slab,stat_allocs,stat_frees);
	if ((arg1 != 0) && (ut_strcmp(arg1,"all")==0))
		Jcmd_jslab(0,0);
	return 1;
}
Exemplo n.º 9
0
unsigned long mm_getFreePages(int gfp_mask, unsigned long order) {
	unsigned long flags;
	unsigned long ret_address;
	unsigned long page_order ;

	stat_allocs++;
	ret_address = 0;
	page_order = order;
	if (order >= NR_MEM_LISTS)
		return ret_address;

	spin_lock_irqsave(&free_area_lock, flags);
	do {
		struct free_mem_area_struct * area = free_mem_area+order;
		unsigned long new_order = order;
		do { struct page *prev = memory_head(area), *ret = prev->next;
			while (memory_head(area) != ret) {
				if ( CAN_DMA(ret)) {
					unsigned long map_nr;
					(prev->next = ret->next)->prev = prev;
					map_nr = ret - g_mem_map;
					MARK_USED(map_nr, new_order, area);
					area->stat_count--;
					g_nr_free_pages -= 1 << order;
					EXPAND(ret, map_nr, order, new_order, area);
					DEBUG(" Page alloc return address: %x mask:%x order:%d \n",ADDRESS(map_nr),gfp_mask,order);
					if (gfp_mask & MEM_CLEAR) ut_memset(ADDRESS(map_nr),0,PAGE_SIZE<<order);
					if (!(gfp_mask & MEM_FOR_CACHE)) memleakHook_alloc(ADDRESS(map_nr),PAGE_SIZE<<order,0,0);
					ret_address = ADDRESS(map_nr);
					goto last;
				}
				prev = ret;
				ret = ret->next;
			}
			new_order++; area++;
		} while (new_order < NR_MEM_LISTS);
	} while (0);


last:
	if (ret_address > 0) {
		unsigned long i = (1 << page_order);
		struct page *page = virt_to_page(ret_address);

		while (i--) {
#ifdef MEMORY_DEBUG
			if (PageReferenced(page)){
				ut_log("Page Backtrace in Alloc page :\n");
				ut_printBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH);
			}
#endif
			assert(!PageReferenced(page));
			PageSetReferenced(page);
#ifdef MEMORY_DEBUG
			ut_storeBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH);
#endif
			page++;
		}
	}
	spin_unlock_irqrestore(&free_area_lock, flags);

	if (ret_address ==0) return ret_address;
	if ((ret_address >= (KADDRSPACE_START+g_phy_mem_size)) || (ret_address < KADDRSPACE_START)){
		ut_log(" ERROR:  frames execeeding the max frames :%x\n",ret_address);
		BUG();
	}

	return ret_address;
}
Exemplo n.º 10
0
int mm_putFreePages(unsigned long addr, unsigned long order) {
	unsigned long map_nr = MAP_NR(addr);
	int ret = 0;
	int page_order = order;
	unsigned long flags;

	stat_frees++;
#ifdef MEMLEAK_TOOL
	memleakHook_free(addr,0);
#endif
	spin_lock_irqsave(&free_area_lock, flags);
	if (map_nr < g_max_mapnr) {
		page_struct_t * map = g_mem_map + map_nr;
		if (PageReserved(map)) {
			BUG();
		}
		if (PageNetBuf(map)){
			BUG();
		}
#ifdef MEMORY_DEBUG
		if (map->option_data != 0){
			BUG();
		}
#endif
		if (atomic_dec_and_test(&map->count)) {
			if (PageSwapCache(map)){
				ut_log("PANIC Freeing swap cache pages");
				BUG();
			}
		//	map->flags &= ~(1 << PG_referenced);
			_free_pages_ok(map_nr, order);
			if (init_done == 1) {
				DEBUG(" Freeing memory addr:%x order:%d \n", addr, order);
			}else{
			//	BUG();
			}
			ret = 1;
		}
	}else{
		BUG();
	}
last:
	if (ret){
		unsigned long i = (1 << page_order);
		struct page *page = virt_to_page(addr);

		while (i--) {
#ifdef MEMORY_DEBUG
			if (!PageReferenced(page)){
				ut_printf("Page Backtrace in Free Page :\n");
				ut_printBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH);
			}
#endif
			assert(PageReferenced(page));
			PageClearReferenced(page);
#ifdef MEMORY_DEBUG
			ut_storeBackTrace(page->bt_addr_list,MAX_BACKTRACE_LENGTH);
#endif
			page++;
		}
	}else{
		BUG();
	}
	spin_unlock_irqrestore(&free_area_lock, flags);
	return ret;
}
Exemplo n.º 11
0
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
{
	struct page_ext *page_ext = lookup_page_ext(page);
	struct stack_trace trace = {
		.nr_entries = 0,
		.max_entries = ARRAY_SIZE(page_ext->trace_entries),
		.entries = &page_ext->trace_entries[0],
		.skip = 3,
	};

	save_stack_trace(&trace);

	page_ext->order = order;
	page_ext->gfp_mask = gfp_mask;
	page_ext->nr_entries = trace.nr_entries;

	__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}

static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
		struct page *page, struct page_ext *page_ext)
{
	int ret;
	int pageblock_mt, page_mt;
	char *kbuf;
	struct stack_trace trace = {
		.nr_entries = page_ext->nr_entries,
		.entries = &page_ext->trace_entries[0],
	};

	kbuf = kmalloc(count, GFP_KERNEL);
	if (!kbuf)
		return -ENOMEM;

	ret = snprintf(kbuf, count,
			"Page allocated via order %u, mask 0x%x\n",
			page_ext->order, page_ext->gfp_mask);

	if (ret >= count)
		goto err;

	/* Print information relevant to grouping pages by mobility */
	pageblock_mt = get_pfnblock_migratetype(page, pfn);
	page_mt  = gfpflags_to_migratetype(page_ext->gfp_mask);
	ret += snprintf(kbuf + ret, count - ret,
			"PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
			pfn,
			pfn >> pageblock_order,
			pageblock_mt,
			pageblock_mt != page_mt ? "Fallback" : "        ",
			PageLocked(page)	? "K" : " ",
			PageError(page)		? "E" : " ",
			PageReferenced(page)	? "R" : " ",
			PageUptodate(page)	? "U" : " ",
			PageDirty(page)		? "D" : " ",
			PageLRU(page)		? "L" : " ",
			PageActive(page)	? "A" : " ",
			PageSlab(page)		? "S" : " ",
			PageWriteback(page)	? "W" : " ",
			PageCompound(page)	? "C" : " ",
			PageSwapCache(page)	? "B" : " ",
			PageMappedToDisk(page)	? "M" : " ");

	if (ret >= count)
		goto err;

	ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
	if (ret >= count)
		goto err;

	ret += snprintf(kbuf + ret, count - ret, "\n");
	if (ret >= count)
		goto err;

	if (copy_to_user(buf, kbuf, ret))
		ret = -EFAULT;

	kfree(kbuf);
	return ret;

err:
	kfree(kbuf);
	return -ENOMEM;
}

static ssize_t
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
	unsigned long pfn;
	struct page *page;
	struct page_ext *page_ext;

	if (!page_owner_inited)
		return -EINVAL;

	page = NULL;
	pfn = min_low_pfn + *ppos;

	/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
	while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
		pfn++;

	drain_all_pages(NULL);

	/* Find an allocated page */
	for (; pfn < max_pfn; pfn++) {
		/*
		 * If the new page is in a new MAX_ORDER_NR_PAGES area,
		 * validate the area as existing, skip it if not
		 */
		if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0 && !pfn_valid(pfn)) {
			pfn += MAX_ORDER_NR_PAGES - 1;
			continue;
		}

		/* Check for holes within a MAX_ORDER area */
		if (!pfn_valid_within(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			if (freepage_order < MAX_ORDER)
				pfn += (1UL << freepage_order) - 1;
			continue;
		}

		page_ext = lookup_page_ext(page);

		/*
		 * Some pages could be missed by concurrent allocation or free,
		 * because we don't hold the zone lock.
		 */
		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
			continue;

		/* Record the next PFN to read in the file offset */
		*ppos = (pfn - min_low_pfn) + 1;

		return print_page_owner(buf, count, pfn, page, page_ext);
	}

	return 0;
}