Example #1
0
static int __init balloon_init(void)
{
	unsigned long pfn, extra_pfn_end;
	struct page *page;

	if (!xen_pv_domain())
		return -ENODEV;

	pr_info("xen_balloon: Initialising balloon driver.\n");

	balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
	balloon_stats.target_pages  = balloon_stats.current_pages;
	balloon_stats.balloon_low   = 0;
	balloon_stats.balloon_high  = 0;

	init_timer(&balloon_timer);
	balloon_timer.data = 0;
	balloon_timer.function = balloon_alarm;

	register_balloon(&balloon_sysdev);

	/*
	 * Initialise the balloon with excess memory space.  We need
	 * to make sure we don't add memory which doesn't exist or
	 * logically exist.  The E820 map can be trimmed to be smaller
	 * than the amount of physical memory due to the mem= command
	 * line parameter.  And if this is a 32-bit non-HIGHMEM kernel
	 * on a system with memory which requires highmem to access,
	 * don't try to use it.
	 */
	extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
			    (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
	for (pfn = PFN_UP(xen_extra_mem_start);
	     pfn < extra_pfn_end;
	     pfn++) {
		page = pfn_to_page(pfn);
		/* totalram_pages and totalhigh_pages do not include the boot-time
		   balloon extension, so don't subtract from it. */
		__balloon_append(page);
	}

	target_watch.callback = watch_target;
	xenstore_notifier.notifier_call = balloon_init_watcher;

	register_xenstore_notifier(&xenstore_notifier);

	return 0;
}
static int hwpoison_inject(void *data, u64 val)
{
	unsigned long pfn = val;
	struct page *p;
	struct page *hpage;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!pfn_valid(pfn))
		return -ENXIO;

	p = pfn_to_page(pfn);
	hpage = compound_head(p);
	/*
	 * This implies unable to support free buddy pages.
	 */
	if (!get_page_unless_zero(hpage))
		return 0;

	if (!hwpoison_filter_enable)
		goto inject;

	if (!PageLRU(p) && !PageHuge(p))
		shake_page(p, 0);
	/*
	 * This implies unable to support non-LRU pages.
	 */
	if (!PageLRU(p) && !PageHuge(p))
		return 0;

	/*
	 * do a racy check with elevated page count, to make sure PG_hwpoison
	 * will only be set for the targeted owner (or on a free page).
	 * We temporarily take page lock for try_get_mem_cgroup_from_page().
	 * memory_failure() will redo the check reliably inside page lock.
	 */
	lock_page(hpage);
	err = hwpoison_filter(hpage);
	unlock_page(hpage);
	if (err)
		return 0;

inject:
	printk(KERN_INFO "Injecting memory failure at pfn %lx\n", pfn);
	return memory_failure(pfn, 18, MF_COUNT_INCREASED);
}
Example #3
0
void dma_generic_free_coherent(struct device *dev, size_t size,
			       void *vaddr, dma_addr_t dma_handle,
			       unsigned long attrs)
{
	int order = get_order(size);
	unsigned long pfn = dma_handle >> PAGE_SHIFT;
	int k;

	if (!WARN_ON(!dev))
		pfn += dev->dma_pfn_offset;

	for (k = 0; k < (1 << order); k++)
		__free_pages(pfn_to_page(pfn + k), 0);

	iounmap(vaddr);
}
Example #4
0
static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
    unsigned long pfn;

    for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
        struct page *page = pfn_to_page(pfn);
        void *addr = phys_to_virt(PFN_PHYS(pfn));

        ClearPageReserved(page);
        init_page_count(page);
        memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
        __free_page(page);
        totalram_pages++;
    }
    printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
void kbase_sync_to_cpu(phys_addr_t paddr, void *vaddr, size_t sz)
{
#ifdef CONFIG_ARM
	__cpuc_flush_dcache_area(vaddr, sz);
	outer_flush_range(paddr, paddr + sz);
#elif defined(CONFIG_ARM64)
	/* FIXME (MID64-46): There's no other suitable cache flush function for ARM64 */
	flush_cache_all();
#elif defined(CONFIG_X86)
	struct scatterlist scl = { 0, };
	sg_set_page(&scl, pfn_to_page(PFN_DOWN(paddr)), sz, paddr & (PAGE_SIZE - 1));
	dma_sync_sg_for_cpu(NULL, &scl, 1, DMA_FROM_DEVICE);
#else
#error Implement cache maintenance for your architecture here
#endif
}
Example #6
0
/*
 * Called with mm->page_table_lock held to protect against other
 * threads/the swapper from ripping pte's out from under us.
 */
static int filemap_sync_pte(pte_t *ptep, struct vm_area_struct *vma,
	unsigned long address, unsigned int flags)
{
	pte_t pte = *ptep;
	unsigned long pfn = pte_pfn(pte);
	struct page *page;

	if (pte_present(pte) && pfn_valid(pfn)) {
		page = pfn_to_page(pfn);
		if (!PageReserved(page) &&
		    (ptep_clear_flush_dirty(vma, address, ptep) ||
		     page_test_and_clear_dirty(page)))
			set_page_dirty(page);
	}
	return 0;
}
Example #7
0
/*
 * For SH-4, we have our own implementation for ptep_get_and_clear
 */
inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	pte_t pte = *ptep;

	pte_clear(mm, addr, ptep);
	if (!pte_not_present(pte)) {
		unsigned long pfn = pte_pfn(pte);
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);
			struct address_space *mapping = page_mapping(page);
			if (!mapping || !mapping_writably_mapped(mapping))
				__clear_bit(PG_mapped, &page->flags);
		}
	}
	return pte;
}
Example #8
0
void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
	if (pfn_valid(pfn)) {
		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
		if (dirty)
			__flush_purge_region(page_address(page), PAGE_SIZE);
	}
}
Example #9
0
static int __init balloon_init(void)
{
	unsigned long pfn,num_physpages,max_pfn;
	struct page *page;

	if (!xen_domain())
		return -ENODEV;

	pr_info("xen_balloon: Initialising balloon driver.\n");

	num_physpages = get_num_physpages();

	if (xen_pv_domain())
               max_pfn = xen_start_info->nr_pages;
    	else
               max_pfn = num_physpages;

   	balloon_stats.current_pages = min(num_physpages,max_pfn);
    totalram_bias = balloon_stats.current_pages - totalram_pages;
    old_totalram_pages = totalram_pages;
	balloon_stats.target_pages  = balloon_stats.current_pages;
	balloon_stats.balloon_low   = 0;
	balloon_stats.balloon_high  = 0;
	balloon_stats.driver_pages  = 0UL;
    pr_info("current_pages=%luKB, totalram_pages=%luKB, totalram_bias=%luKB\n",balloon_stats.current_pages*4, totalram_pages*4, totalram_bias*4);

	init_timer(&balloon_timer);
	balloon_timer.data = 0;
	balloon_timer.function = balloon_alarm;

	register_balloon(&balloon_sysdev);

	/* Initialise the balloon with excess memory space. */
#ifdef CONFIG_PVM
	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
		page = pfn_to_page(pfn);
		if (!PageReserved(page))
			balloon_append(page);
	}
#endif
	target_watch.callback = watch_target;
	xenstore_notifier.notifier_call = balloon_init_watcher;

	register_xenstore_notifier(&xenstore_notifier);

	return 0;
}
Example #10
0
/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 1 if all pages in the range are isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
				  bool skip_hwpoisoned_pages)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			/*
			 * If race between isolatation and allocation happens,
			 * some free pages could be in MIGRATE_MOVABLE list
			 * although pageblock's migratation type of the page
			 * is MIGRATE_ISOLATE. Catch it and move the page into
			 * MIGRATE_ISOLATE list.
			 */
			if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
				struct page *end_page;

				end_page = page + (1 << page_order(page)) - 1;
				move_freepages(page_zone(page), page, end_page,
						MIGRATE_ISOLATE);
			}
			pfn += 1 << page_order(page);
		}
		else if (page_count(page) == 0 &&
			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else if (skip_hwpoisoned_pages && PageHWPoison(page)) {
			/*
			 * The HWPoisoned page may be not in buddy
			 * system, and page_count() is not 0.
			 */
			pfn++;
			continue;
		}
		else
			break;
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
	unsigned long start, start_pfn;
	struct zone *zone;
	int ret;

	start_pfn = base >> PAGE_SHIFT;

	if (!pfn_valid(start_pfn)) {
		memblock_remove(base, memblock_size);
		return 0;
	}

	zone = page_zone(pfn_to_page(start_pfn));

	/*
	 * Remove section mappings and sysfs entries for the
	 * section of the memory we are removing.
	 *
	 * NOTE: Ideally, this should be done in generic code like
	 * remove_memory(). But remove_memory() gets called by writing
	 * to sysfs "state" file and we can't remove sysfs entries
	 * while writing to it. So we have to defer it to here.
	 */
	ret = __remove_pages(zone, start_pfn, memblock_size >> PAGE_SHIFT);
	if (ret)
		return ret;

	/*
	 * Update memory regions for memory remove
	 */
	memblock_remove(base, memblock_size);

	/*
	 * Remove htab bolted mappings for this section of memory
	 */
	start = (unsigned long)__va(base);
	ret = remove_section_mapping(start, start + memblock_size);

	/* Ensure all vmalloc mappings are flushed in case they also
	 * hit that section of memory
	 */
	vm_unmap_aliases();

	return ret;
}
Example #12
0
/* Soft offline a page */
static ssize_t
store_soft_offline_page(struct device *dev,
			struct device_attribute *attr,
			const char *buf, size_t count)
{
	int ret;
	u64 pfn;
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	if (kstrtoull(buf, 0, &pfn) < 0)
		return -EINVAL;
	pfn >>= PAGE_SHIFT;
	if (!pfn_valid(pfn))
		return -ENXIO;
	ret = soft_offline_page(pfn_to_page(pfn), 0);
	return ret == 0 ? count : ret;
}
Example #13
0
/*
 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
 * OK to have direct references to sparsemem variables in here.
 */
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
    int i;
    unsigned long start_pfn, start_paddr;
    unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
    struct page *first_page;
    int ret;

    first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);

    /*
     * The probe routines leave the pages reserved, just
     * as the bootmem code does.  Make sure they're still
     * that way.
     */
    if (action == MEM_ONLINE) {
        for (i = 0; i < nr_pages; i++) {
            if (PageReserved(first_page+i))
                continue;

            printk(KERN_WARNING "section number %ld page number %d "
                   "not reserved, was it already online?\n",
                   phys_index, i);
            return -EBUSY;
        }
    }

    switch (action) {
    case MEM_ONLINE:
        start_pfn = page_to_pfn(first_page);
        ret = online_pages(start_pfn, nr_pages);
        break;
    case MEM_OFFLINE:
        start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
        ret = remove_memory(start_paddr,
                            nr_pages << PAGE_SHIFT);
        break;
    default:
        WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
             "%ld\n", __func__, phys_index, action, action);
        ret = -EINVAL;
    }

    return ret;
}
Example #14
0
/*
 * see if a mapped address was really a "safe" buffer and if so, copy
 * the data from the safe buffer back to the unsafe buffer and free up
 * the safe buffer.  (basically return things back to the way they
 * should be)
 */
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
		enum dma_data_direction dir)
{
	struct safe_buffer *buf;

	dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
		__func__, dma_addr, size, dir);

	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
	if (!buf) {
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
			dma_addr & ~PAGE_MASK, size, dir);
		return;
	}

	unmap_single(dev, buf, size, dir);
}
Example #15
0
static void send_netdump_mem(struct netpoll *np, req_t *req)
{
	int i;
	char *kaddr;
	char str[1024];
	struct page *page = NULL;
	unsigned long nr = req->from;
	int nr_chunks = PAGE_SIZE/1024;
	reply_t reply;
	
	Dprintk(" ... send_netdump_mem\n");
	reply.nr = req->nr;
	reply.info = 0;
	if (req->from >= platform_max_pfn()) {
		sprintf(str, "page %08lx is bigger than max page # %08lx!\n", 
			nr, platform_max_pfn());
		reply.code = REPLY_ERROR;
		send_netdump_msg(np, str, strlen(str), &reply);
		return;
	}
	if (platform_page_is_ram(nr)) {
		page = pfn_to_page(nr);
		if (page_to_pfn(page) != nr)
			page = NULL;
	}
	if (!page) {
		reply.code = REPLY_RESERVED;
		reply.info = platform_next_available(nr);
		send_netdump_msg(np, str, 0, &reply);
		return;
	}

	kaddr = (char *)kmap_atomic(page, KM_CRASHDUMP);

	for (i = 0; i < nr_chunks; i++) {
		unsigned int offset = i*1024;
		reply.code = REPLY_MEM;
		reply.info = offset;
		Dprintk(" ... send_netdump_mem: sending message\n");
		send_netdump_msg(np, kaddr + offset, 1024, &reply);
		Dprintk(" ... send_netdump_mem: sent message\n");
	}

	kunmap_atomic(kaddr, KM_CRASHDUMP);
	Dprintk(" ... send_netdump_mem: returning\n");
}
Example #16
0
/* New fault method instead of nopage */
static int pme_mem_fops_fault(struct vm_area_struct *vma,
				       struct vm_fault *vmf)
{
	struct page *pageptr;
	unsigned long offset, physaddr, pageframe;
	struct pme_fb_vma *mem_node = vma->vm_private_data;
	int index = 0;
	if (!mem_node)
		return -1;
	if (mem_node->type == fb_phys_mapped) {
		/* Memory is mapped using the physical address method*/
		offset = vma->vm_pgoff << PAGE_SHIFT;
		physaddr = (unsigned long)vmf->virtual_address - vma->vm_start +
					offset;
		pageframe = physaddr >> PAGE_SHIFT;
		pageptr = pfn_to_page(pageframe);
	} else {
Example #17
0
static void dma_cache_maint_page(struct page *page, unsigned long offset,
	size_t size, enum dma_data_direction dir,
	void (*op)(const void *, size_t, int))
{
	unsigned long pfn;
	size_t left = size;

	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
	offset %= PAGE_SIZE;

	/*
	 * A single sg entry may refer to multiple physically contiguous
	 * pages.  But we still need to process highmem pages individually.
	 * If highmem is not configured then the bulk of this loop gets
	 * optimized out.
	 */
	do {
		size_t len = left;
		void *vaddr;

		page = pfn_to_page(pfn);

		if (PageHighMem(page)) {
			if (len + offset > PAGE_SIZE)
				len = PAGE_SIZE - offset;
			vaddr = kmap_high_get(page);
			if (vaddr) {
				vaddr += offset;
				op(vaddr, len, dir);
				kunmap_high(page);
			} else if (cache_is_vipt()) {
				/* unmapped pages might still be cached */
				vaddr = kmap_atomic(page);
				op(vaddr + offset, len, dir);
				kunmap_atomic(vaddr);
			}
		} else {
			vaddr = page_address(page) + offset;
			op(vaddr, len, dir);
		}
		offset = 0;
		pfn++;
		left -= len;
	} while (left);
}
static int save_highmem_zone(struct zone *zone)
{
	unsigned long zone_pfn;
	mark_free_pages(zone);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
		struct page *page;
		struct highmem_page *save;
		void *kaddr;
		unsigned long pfn = zone_pfn + zone->zone_start_pfn;

		if (!(pfn%1000))
			printk(".");
		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		/*
		 * This condition results from rvmalloc() sans vmalloc_32()
		 * and architectural memory reservations. This should be
		 * corrected eventually when the cases giving rise to this
		 * are better understood.
		 */
		if (PageReserved(page)) {
			printk("highmem reserved page?!\n");
			continue;
		}
		BUG_ON(PageNosave(page));
		if (PageNosaveFree(page))
			continue;
		save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC);
		if (!save)
			return -ENOMEM;
		save->next = highmem_copy;
		save->page = page;
		save->data = (void *) get_zeroed_page(GFP_ATOMIC);
		if (!save->data) {
			kfree(save);
			return -ENOMEM;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		memcpy(save->data, kaddr, PAGE_SIZE);
		kunmap_atomic(kaddr, KM_USER0);
		highmem_copy = save;
	}
	return 0;
}
void swsusp_free(void)
{
	struct zone *zone;
	unsigned long zone_pfn;

	for_each_zone(zone) {
		for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
			if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
				struct page *page;
				page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
				if (PageNosave(page) && PageNosaveFree(page)) {
					ClearPageNosave(page);
					ClearPageNosaveFree(page);
					free_page((long) page_address(page));
				}
			}
	}
}
Example #20
0
static struct page *saveable_page(unsigned long pfn)
{
	struct page *page;

	if (!pfn_valid(pfn))
		return NULL;

	page = pfn_to_page(pfn);

	if (PageNosave(page))
		return NULL;
	if (PageReserved(page) && pfn_is_nosave(pfn))
		return NULL;
	if (PageNosaveFree(page))
		return NULL;

	return page;
}
Example #21
0
/*
 * Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags()
 */
static pte_t do_dcache_icache_coherency(pte_t pte)
{
	unsigned long pfn = pte_pfn(pte);
	struct page *page;

	if (unlikely(!pfn_valid(pfn)))
		return pte;
	page = pfn_to_page(pfn);

	if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
		pr_debug("do_dcache_icache_coherency... flushing\n");
		flush_dcache_icache_page(page);
		set_bit(PG_arch_1, &page->flags);
	}
	else
		pr_debug("do_dcache_icache_coherency... already clean\n");
	return __pte(pte_val(pte) | _PAGE_HWEXEC);
}
Example #22
0
/**
 * frame_vector_to_pages - convert frame vector to contain page pointers
 * @vec:	frame vector to convert
 *
 * Convert @vec to contain array of page pointers.  If the conversion is
 * successful, return 0. Otherwise return an error. Note that we do not grab
 * page references for the page structures.
 */
int frame_vector_to_pages(struct frame_vector *vec)
{
	int i;
	unsigned long *nums;
	struct page **pages;

	if (!vec->is_pfns)
		return 0;
	nums = frame_vector_pfns(vec);
	for (i = 0; i < vec->nr_frames; i++)
		if (!pfn_valid(nums[i]))
			return -EINVAL;
	pages = (struct page **)nums;
	for (i = 0; i < vec->nr_frames; i++)
		pages[i] = pfn_to_page(nums[i]);
	vec->is_pfns = false;
	return 0;
}
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;

	pfn = pte_pfn(pte);
	if (unlikely(!pfn_valid(pfn)))
		return;
	page = pfn_to_page(pfn);
	if (page_mapping(page) && Page_dcache_dirty(page)) {
		addr = (unsigned long) page_address(page);
		if (exec || pages_do_alias(addr, address & PAGE_MASK))
			flush_data_cache_page(addr);
		ClearPageDcacheDirty(page);
	}
}
Example #24
0
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;

	pfn = pte_pfn(pte);
	if (pfn_valid(pfn) && (page = pfn_to_page(pfn), page_mapping(page)) &&
	    Page_dcache_dirty(page)) {
		if (pages_do_alias((unsigned long)page_address(page),
		                   address & PAGE_MASK)) {
			addr = (unsigned long) page_address(page);
			flush_data_cache_page(addr);
		}

		ClearPageDcacheDirty(page);
	}
}
Example #25
0
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pteval);

	if (unlikely(!pfn_valid(pfn)))
		return;

	page = pfn_to_page(pfn);
	if (page_mapping(page) && Page_dcache_dirty(page)) {
		unsigned long page_addr = (unsigned long) page_address(page);

		if (!cpu_has_ic_fills_f_dc ||
		    pages_do_alias(page_addr, address & PAGE_MASK))
			flush_data_cache_page(page_addr);
		ClearPageDcacheDirty(page);
	}
}
Example #26
0
/*
 * free page(s) as defined by the above mapping.
 */
void consistent_free(size_t size, void *vaddr)
{
	struct page *page;

	if (in_interrupt())
		BUG();

	size = PAGE_ALIGN(size);

#ifndef CONFIG_MMU
	/* Clear SHADOW_MASK bit in address, and free as per usual */
# ifdef CONFIG_XILINX_UNCACHED_SHADOW
	vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
# endif
	page = virt_to_page(vaddr);

	do {
		__free_reserved_page(page);
		page++;
	} while (size -= PAGE_SIZE);
#else
	do {
		pte_t *ptep;
		unsigned long pfn;

		ptep = pte_offset_kernel(pmd_offset(pgd_offset_k(
						(unsigned int)vaddr),
					(unsigned int)vaddr),
				(unsigned int)vaddr);
		if (!pte_none(*ptep) && pte_present(*ptep)) {
			pfn = pte_pfn(*ptep);
			pte_clear(&init_mm, (unsigned int)vaddr, ptep);
			if (pfn_valid(pfn)) {
				page = pfn_to_page(pfn);
				__free_reserved_page(page);
			}
		}
		vaddr += PAGE_SIZE;
	} while (size -= PAGE_SIZE);

	/* flush tlb */
	flush_tlb_all();
#endif
}
Example #27
0
/* /proc/kpagecount - an array exposing page counts
 *
 * Each entry is a u64 representing the corresponding
 * physical page count.
 */
static ssize_t kpagecount_read(struct file *file, char __user *buf,
			     size_t count, loff_t *ppos)
{
	u64 __user *out = (u64 __user *)buf;
	struct page *ppage;
	unsigned long src = *ppos;
	unsigned long pfn;
	unsigned long max_pfn_kpmsize = max_pfn * KPMSIZE;
	ssize_t ret = 0;
	u64 pcount;

	pfn = src / KPMSIZE;
	if(src != max_pfn_kpmsize){
        count = min_t(size_t, count, max_pfn_kpmsize - src);
	}
	
	if (src & KPMMASK || count & KPMMASK)
		return -EINVAL;

	while (count > 0) {
		if (pfn_valid(pfn))
			ppage = pfn_to_page(pfn);
		else
			ppage = NULL;
		if (!ppage || PageSlab(ppage))
			pcount = 0;
		else
			pcount = page_mapcount(ppage);

		if (put_user(pcount, out)) {
			ret = -EFAULT;
			break;
		}

		pfn++;
		out++;
		count -= KPMSIZE;
	}

	*ppos += (char __user *)out - buf;
	if (!ret)
		ret = (char __user *)out - buf;
	return ret;
}
Example #28
0
void show_mem(unsigned int filter)
{
	int free = 0, total = 0, reserved = 0;
	int shared = 0, cached = 0, slab = 0, i;
	struct meminfo * mi = &meminfo;

	printk("Mem-info:\n");
	show_free_areas(filter);

	if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
		return;

	for_each_bank (i, mi) {
		struct membank *bank = &mi->bank[i];
		unsigned int pfn1, pfn2;
		struct page *page;

		pfn1 = bank_pfn_start(bank);
		pfn2 = bank_pfn_end(bank);

		do {
			page = pfn_to_page(pfn1);
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
			pfn1++;
		} while (pfn1 < pfn2);
	}

	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
}
Example #29
0
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr = (unsigned long) begin;

	if (kdata_huge && !initfree) {
		pr_warning("Warning: ignoring initfree=0:"
			   " incompatible with kdata=huge\n");
		initfree = 1;
	}
	end = (end + PAGE_SIZE - 1) & PAGE_MASK;
	local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
		/*
		 * Note we just reset the home here directly in the
		 * page table.  We know this is safe because our caller
		 * just flushed the caches on all the other cpus,
		 * and they won't be touching any of these pages.
		 */
		int pfn = kaddr_to_pfn((void *)addr);
		struct page *page = pfn_to_page(pfn);
		pte_t *ptep = virt_to_pte(NULL, addr);
		if (!initfree) {
			/*
			 * If debugging page accesses then do not free
			 * this memory but mark them not present - any
			 * buggy init-section access will create a
			 * kernel page fault:
			 */
			pte_clear(&init_mm, addr, ptep);
			continue;
		}
		__ClearPageReserved(page);
		init_page_count(page);
		if (pte_huge(*ptep))
			BUG_ON(!kdata_huge);
		else
			set_pte_at(&init_mm, addr, ptep,
				   pfn_pte(pfn, PAGE_KERNEL));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_page(addr);
		totalram_pages++;
	}
	pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
/**
 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 * @dev:   Pointer to device for which the allocation is performed.
 * @count: Requested number of pages.
 * @align: Requested alignment of pages (in PAGE_SIZE order).
 *
 * This function allocates memory buffer for specified device. It uses
 * device specific contiguous memory area if available or the default
 * global one. Requires architecture specific get_dev_cma_area() helper
 * function.
 */
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
				       unsigned int align)
{
	unsigned long mask, pfn, pageno, start = 0;
	struct cma *cma = dev_get_cma_area(dev);
	struct page *page = NULL;
	int ret;

	if (!cma || !cma->count)
		return NULL;

	if (align > CONFIG_CMA_ALIGNMENT)
		align = CONFIG_CMA_ALIGNMENT;

	if (!count)
		return NULL;

	mask = (1 << align) - 1;

	mutex_lock(&cma_mutex);

	for (;;) {
		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
						    start, count, mask);
		if (pageno >= cma->count)
			break;

		pfn = cma->base_pfn + pageno;
		ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
		if (ret == 0) {
			bitmap_set(cma->bitmap, pageno, count);
			page = pfn_to_page(pfn);
			break;
		} else if (ret != -EBUSY) {
			break;
		}
		/* try again with a bit different memory target */
		start = pageno + mask + 1;
	}

	mutex_unlock(&cma_mutex);
	return page;
}