STATIC int balong_ion_free_mem_to_buddy(void)
{
    int i;
    u32 fb_heap_phy = 0;
    struct ion_heap_info_data mem_data;

    if (0 != hisi_ion_get_heap_info(ION_FB_HEAP_ID, &mem_data)) {
        balongfb_loge("fail to get ION_FB_HEAP_ID\n");
        return -EINVAL;
    }

    if (0 == mem_data.heap_size) {
        balongfb_loge("fb reserved size 0\n");
        return -EINVAL;
    }

    fb_heap_phy = mem_data.heap_phy;
    for(i = 0; i < ((mem_data.heap_size)/PAGE_SIZE); i++){
        free_reserved_page(phys_to_page(mem_data.heap_phy));
#ifdef CONFIG_HIGHMEM
        if (PageHighMem(phys_to_page(mem_data.heap_phy)))
            totalhigh_pages += 1;
#endif
        mem_data.heap_phy += PAGE_SIZE;
    }

    memblock_free(fb_heap_phy, mem_data.heap_size);
    return 0;
}
Ejemplo n.º 2
0
static ssize_t dump_end_proc_read(struct file *file, char __user *userbuf,
				  size_t bytes, loff_t *off)
{
	phys_addr_t addr;
	struct page *page;

	for (addr = g_memdump_addr; addr < (g_memdump_addr + g_memdump_size);
	     addr += PAGE_SIZE) {
		page = pfn_to_page(addr >> PAGE_SHIFT);
		free_reserved_page(page);
#ifdef CONFIG_HIGHMEM
		if (PageHighMem(page))
			totalhigh_pages++;
#endif
	}

	memblock_free(g_memdump_addr, g_memdump_size);

	pr_err("dump_end_proc_read:g_memdump_addr=0x%x, g_memdump_end=0x%x,g_memdump_size=0x%x\n",
		(unsigned int)g_memdump_addr, (unsigned int)g_memdump_end,
		g_memdump_size);
	pr_info("%s:addr%lu\n", __func__, (unsigned long)addr);
	g_memdump_addr = 0;
	g_memdump_end = 0;
	g_memdump_size = 0;
	return 0;
}
Ejemplo n.º 3
0
void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
	unsigned long page_order = get_order(page_size);
	unsigned long alt_start = ~0, alt_end = ~0;
	unsigned long base_pfn;

	start = _ALIGN_DOWN(start, page_size);
	if (altmap) {
		alt_start = altmap->base_pfn;
		alt_end = altmap->base_pfn + altmap->reserve +
			  altmap->free + altmap->alloc + altmap->align;
	}

	pr_debug("vmemmap_free %lx...%lx\n", start, end);

	for (; start < end; start += page_size) {
		unsigned long nr_pages, addr;
		struct page *section_base;
		struct page *page;

		/*
		 * the section has already be marked as invalid, so
		 * vmemmap_populated() true means some other sections still
		 * in this page, so skip it.
		 */
		if (vmemmap_populated(start, page_size))
			continue;

		addr = vmemmap_list_free(start);
		if (!addr)
			continue;

		page = pfn_to_page(addr >> PAGE_SHIFT);
		section_base = pfn_to_page(vmemmap_section_start(start));
		nr_pages = 1 << page_order;
		base_pfn = PHYS_PFN(addr);

		if (base_pfn >= alt_start && base_pfn < alt_end) {
			vmem_altmap_free(altmap, nr_pages);
		} else if (PageReserved(page)) {
			/* allocated from bootmem */
			if (page_size < PAGE_SIZE) {
				/*
				 * this shouldn't happen, but if it is
				 * the case, leave the memory there
				 */
				WARN_ON_ONCE(1);
			} else {
				while (nr_pages--)
					free_reserved_page(page++);
			}
		} else {
			free_pages((unsigned long)(__va(addr)), page_order);
		}

		vmemmap_remove_mapping(start, page_size);
	}
}
void lge_panic_handler_fb_free_page(unsigned long mem_addr, unsigned long size)
{
	unsigned long pfn_start, pfn_end, pfn_idx;

	pfn_start = mem_addr >> PAGE_SHIFT;
	pfn_end = (mem_addr + size) >> PAGE_SHIFT;
	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
		free_reserved_page(pfn_to_page(pfn_idx));
}
Ejemplo n.º 5
0
void __ref vmemmap_free(unsigned long start, unsigned long end)
{
	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;

	start = _ALIGN_DOWN(start, page_size);

	pr_debug("vmemmap_free %lx...%lx\n", start, end);

	for (; start < end; start += page_size) {
		unsigned long addr;

		/*
		 * the section has already be marked as invalid, so
		 * vmemmap_populated() true means some other sections still
		 * in this page, so skip it.
		 */
		if (vmemmap_populated(start, page_size))
			continue;

		addr = vmemmap_list_free(start);
		if (addr) {
			struct page *page = pfn_to_page(addr >> PAGE_SHIFT);

			if (PageReserved(page)) {
				/* allocated from bootmem */
				if (page_size < PAGE_SIZE) {
					/*
					 * this shouldn't happen, but if it is
					 * the case, leave the memory there
					 */
					WARN_ON_ONCE(1);
				} else {
					unsigned int nr_pages =
						1 << get_order(page_size);
					while (nr_pages--)
						free_reserved_page(page++);
				}
			} else
				free_pages((unsigned long)(__va(addr)),
							get_order(page_size));

			vmemmap_remove_mapping(start, page_size);
		}
	}
}
Ejemplo n.º 6
0
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr = (unsigned long) begin;

	if (kdata_huge && !initfree) {
		pr_warning("Warning: ignoring initfree=0:"
			   " incompatible with kdata=huge\n");
		initfree = 1;
	}
	end = (end + PAGE_SIZE - 1) & PAGE_MASK;
	local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
		/*
		 * Note we just reset the home here directly in the
		 * page table.  We know this is safe because our caller
		 * just flushed the caches on all the other cpus,
		 * and they won't be touching any of these pages.
		 */
		int pfn = kaddr_to_pfn((void *)addr);
		struct page *page = pfn_to_page(pfn);
		pte_t *ptep = virt_to_kpte(addr);
		if (!initfree) {
			/*
			 * If debugging page accesses then do not free
			 * this memory but mark them not present - any
			 * buggy init-section access will create a
			 * kernel page fault:
			 */
			pte_clear(&init_mm, addr, ptep);
			continue;
		}
		if (pte_huge(*ptep))
			BUG_ON(!kdata_huge);
		else
			set_pte_at(&init_mm, addr, ptep,
				   pfn_pte(pfn, PAGE_KERNEL));
		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
		free_reserved_page(page);
	}
	pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}