void __init page_cgroup_init(void)
{
	unsigned long pfn;
	int nid;

	if (mem_cgroup_disabled())
		return;

	for_each_node_state(nid, N_HIGH_MEMORY) {
		unsigned long start_pfn, end_pfn;

		start_pfn = node_start_pfn(nid);
		end_pfn = node_end_pfn(nid);
		for (pfn = start_pfn;
		     pfn < end_pfn;
                     pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {

			if (!pfn_valid(pfn))
				continue;
			if (pfn_to_nid(pfn) != nid)
				continue;
			if (init_section_page_cgroup(pfn, nid))
				goto oom;
		}
	}
Exemple #2
0
void __init page_cgroup_init(void)
{
	unsigned long pfn;
	int nid;

	if (mem_cgroup_disabled())
		return;

	for_each_node_state(nid, N_HIGH_MEMORY) {
		unsigned long start_pfn, end_pfn;

		start_pfn = node_start_pfn(nid);
		end_pfn = node_end_pfn(nid);
		/*
		 * start_pfn and end_pfn may not be aligned to SECTION and the
		 * page->flags of out of node pages are not initialized.  So we
		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
		 */
		for (pfn = start_pfn;
		     pfn < end_pfn;
                     pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {

	      /* 
	       * even if the first pfn is invalid, that doesn't mean
	       * that the entire section is not used.
	       * so we should check if the section has the mem map.
	       * if it has the mem map then we should go ahead and
	       * create the page cgroup.
	       */
	      struct mem_section *section = __pfn_to_section(pfn);
		      if (!section->section_mem_map) 
				continue;
			/*
			 * Nodes's pfns can be overlapping.
			 * We know some arch can have a nodes layout such as
			 * -------------pfn-------------->
			 * N0 | N1 | N2 | N0 | N1 | N2|....
			 */
			if (pfn_to_nid(pfn) != nid)
				continue;
			if (init_section_page_cgroup(pfn, nid))
				goto oom;
		}
	}
Exemple #3
0
void __init page_cgroup_init_flatmem(void)
{

	int nid, fail;

	if (mem_cgroup_disabled())
		return;

	for_each_online_node(nid)  {
		fail = alloc_node_page_cgroup(nid);
		if (fail)
			goto fail;
	}
	printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
	printk(KERN_INFO "please try 'cgroup_disable=memory' option if you"
	" don't want memory cgroups\n");
	return;
fail:
	printk(KERN_CRIT "allocation of page_cgroup failed.\n");
	printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n");
	panic("Out of memory");
}
Exemple #4
0
void __init page_cgroup_init(void)
{
	unsigned long pfn;
	int fail = 0;

	if (mem_cgroup_disabled())
		return;

	for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
		if (!pfn_present(pfn))
			continue;
		fail = init_section_page_cgroup(pfn);
	}
	if (fail) {
		printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
		panic("Out of memory");
	} else {
		hotplug_memory_notifier(page_cgroup_callback, 0);
	}
	printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
	printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't"
	" want memory cgroups\n");
}
Exemple #5
0
void __init page_cgroup_init(void)
{
	unsigned long pfn;
	int nid;

	if (mem_cgroup_disabled())
		return;

	for_each_node_state(nid, N_HIGH_MEMORY) {
		unsigned long start_pfn, end_pfn;

		start_pfn = node_start_pfn(nid);
		end_pfn = node_end_pfn(nid);
		/*
		 * start_pfn and end_pfn may not be aligned to SECTION and the
		 * page->flags of out of node pages are not initialized.  So we
		 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
		 */
		for (pfn = start_pfn;
		     pfn < end_pfn;
                     pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {

			if (!pfn_valid(pfn))
				continue;
			/*
			 * Nodes's pfns can be overlapping.
			 * We know some arch can have a nodes layout such as
			 * -------------pfn-------------->
			 * N0 | N1 | N2 | N0 | N1 | N2|....
			 */
			if (pfn_to_nid(pfn) != nid)
				continue;
			if (init_section_page_cgroup(pfn, nid))
				goto oom;
		}
	}
Exemple #6
0
static int meminfo_proc_show(struct seq_file *m, void *v)
{
    struct sysinfo i;
    unsigned long committed;
    unsigned long allowed;
    struct vmalloc_info vmi;
    long cached;
    unsigned long pages[NR_LRU_LISTS];
    int lru;
    bool instance_view = false;
    struct mem_cgroup *memcg = NULL;

    if (in_noninit_pid_ns(current->nsproxy->pid_ns) &&
            !mem_cgroup_disabled()) {
        instance_view = true;
        memcg = mem_cgroup_from_task(current);
    }

    /*
     * display in kilobytes.
     */
#define K(x) ((x) << (PAGE_SHIFT - 10))
    if (!instance_view) {
        si_meminfo(&i);
        si_swapinfo(&i);
        cached = global_page_state(NR_FILE_PAGES) -
                 total_swapcache_pages - i.bufferram;
        if (cached < 0)
            cached = 0;
        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
            pages[lru] = global_page_state(NR_LRU_BASE + lru);
    } else {
        cgroup_mem_sw_info(&i, memcg, &cached, pages);
    }
    committed = percpu_counter_read_positive(&vm_committed_as);
    allowed = ((totalram_pages - hugetlb_total_pages())
               * sysctl_overcommit_ratio / 100) + total_swap_pages;

    get_vmalloc_info(&vmi);


    /*
     * Tagged format, for easy grepping and expansion.
     */
    seq_printf(m,
               "MemTotal:       %8lu kB\n"
               "MemFree:        %8lu kB\n"
               "Buffers:        %8lu kB\n"
               "Cached:         %8lu kB\n"
               "SwapCached:     %8lu kB\n"
               "Active:         %8lu kB\n"
               "Inactive:       %8lu kB\n"
               "Active(anon):   %8lu kB\n"
               "Inactive(anon): %8lu kB\n"
               "Active(file):   %8lu kB\n"
               "Inactive(file): %8lu kB\n"
               "Unevictable:    %8lu kB\n"
               "Mlocked:        %8lu kB\n"
#ifdef CONFIG_HIGHMEM
               "HighTotal:      %8lu kB\n"
               "HighFree:       %8lu kB\n"
               "LowTotal:       %8lu kB\n"
               "LowFree:        %8lu kB\n"
#endif
#ifndef CONFIG_MMU
               "MmapCopy:       %8lu kB\n"
#endif
               "SwapTotal:      %8lu kB\n"
               "SwapFree:       %8lu kB\n"
               "Dirty:          %8lu kB\n"
               "Writeback:      %8lu kB\n"
               "AnonPages:      %8lu kB\n"
               "Mapped:         %8lu kB\n"
               "Shmem:          %8lu kB\n"
               "Slab:           %8lu kB\n"
               "SReclaimable:   %8lu kB\n"
               "SUnreclaim:     %8lu kB\n"
               "KernelStack:    %8lu kB\n"
               "PageTables:     %8lu kB\n"
#ifdef CONFIG_QUICKLIST
               "Quicklists:     %8lu kB\n"
#endif
               "NFS_Unstable:   %8lu kB\n"
               "Bounce:         %8lu kB\n"
               "WritebackTmp:   %8lu kB\n"
               "CommitLimit:    %8lu kB\n"
               "Committed_AS:   %8lu kB\n"
               "VmallocTotal:   %8lu kB\n"
               "VmallocUsed:    %8lu kB\n"
               "VmallocChunk:   %8lu kB\n"
#ifdef CONFIG_MEMORY_FAILURE
               "HardwareCorrupted: %5lu kB\n"
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
               "AnonHugePages:  %8lu kB\n"
#endif
               ,
               K(i.totalram),
               K(i.freeram),
               K(i.bufferram),
               K(cached),
               K(total_swapcache_pages),
               K(pages[LRU_ACTIVE_ANON]   + pages[LRU_ACTIVE_FILE]),
               K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]),
               K(pages[LRU_ACTIVE_ANON]),
               K(pages[LRU_INACTIVE_ANON]),
               K(pages[LRU_ACTIVE_FILE]),
               K(pages[LRU_INACTIVE_FILE]),
               K(pages[LRU_UNEVICTABLE]),
               K(global_page_state(NR_MLOCK)),
#ifdef CONFIG_HIGHMEM
               K(i.totalhigh),
               K(i.freehigh),
               K(i.totalram-i.totalhigh),
               K(i.freeram-i.freehigh),
#endif
#ifndef CONFIG_MMU
               K((unsigned long) atomic_long_read(&mmap_pages_allocated)),
#endif
               K(i.totalswap),
               K(i.freeswap),
               K(global_page_state(NR_FILE_DIRTY)),
               K(global_page_state(NR_WRITEBACK)),
               K(global_page_state(NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
                 + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
                 HPAGE_PMD_NR
#endif
                ),
               K(global_page_state(NR_FILE_MAPPED)),
               K(global_page_state(NR_SHMEM)),
               K(global_page_state(NR_SLAB_RECLAIMABLE) +
                 global_page_state(NR_SLAB_UNRECLAIMABLE)),
               K(global_page_state(NR_SLAB_RECLAIMABLE)),
               K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
               global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024,
               K(global_page_state(NR_PAGETABLE)),
#ifdef CONFIG_QUICKLIST
               K(quicklist_total_size()),
#endif
               K(global_page_state(NR_UNSTABLE_NFS)),
               K(global_page_state(NR_BOUNCE)),
               K(global_page_state(NR_WRITEBACK_TEMP)),
               K(allowed),
               K(committed),
               (unsigned long)VMALLOC_TOTAL >> 10,
               vmi.used >> 10,
               vmi.largest_chunk >> 10
#ifdef CONFIG_MEMORY_FAILURE
               ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
               ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
                  HPAGE_PMD_NR)
#endif
              );

    hugetlb_report_meminfo(m);

    arch_report_meminfo(m);

    return 0;
#undef K
}