Example #1
0
void __init set_highmem_pages_init(void)
{
	struct zone *zone;
	int nid;

	for_each_zone(zone) {
		unsigned long zone_start_pfn, zone_end_pfn;

		if (!is_highmem(zone))
			continue;

		zone_start_pfn = zone->zone_start_pfn;
		zone_end_pfn = zone_start_pfn + zone->spanned_pages;

		nid = zone_to_nid(zone);
		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
				zone->name, nid, zone_start_pfn, zone_end_pfn);

		add_highpages_with_active_regions(nid, zone_start_pfn,
				 zone_end_pfn);

		/* XEN: init high-mem pages outside initial allocation. */
		if (zone_start_pfn < xen_start_info->nr_pages)
			zone_start_pfn = xen_start_info->nr_pages;
		for (; zone_start_pfn < zone_end_pfn; zone_start_pfn++) {
			ClearPageReserved(pfn_to_page(zone_start_pfn));
			init_page_count(pfn_to_page(zone_start_pfn));
		}
	}
	totalram_pages += totalhigh_pages;
}
Example #2
0
static void __init init_free_pfn_range(unsigned long start, unsigned long end)
{
	unsigned long pfn;
	struct page *page = pfn_to_page(start);

	for (pfn = start; pfn < end; ) {
		/* Optimize by freeing pages in large batches */
		int order = __ffs(pfn);
		int count, i;
		struct page *p;

		if (order >= MAX_ORDER)
			order = MAX_ORDER-1;
		count = 1 << order;
		while (pfn + count > end) {
			count >>= 1;
			--order;
		}
		for (p = page, i = 0; i < count; ++i, ++p) {
			__ClearPageReserved(p);
			/*
			 * Hacky direct set to avoid unnecessary
			 * lock take/release for EVERY page here.
			 */
			p->_count.counter = 0;
			p->_mapcount.counter = -1;
		}
		init_page_count(page);
		__free_pages(page, order);
		totalram_pages += count;

		page += count;
		pfn += count;
	}
}
Example #3
0
/*
 * Memory hotplug specific functions
 */
void online_page(struct page *page)
{
	ClearPageReserved(page);
	init_page_count(page);
	__free_page(page);
	totalram_pages++;
	num_physpages++;
}
Example #4
0
void free_initrd_mem(unsigned long start, unsigned long end)
{
	unsigned long p;
	for (p = start; p < end; p += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(p));
		init_page_count(virt_to_page(p));
		free_page(p);
		totalram_pages++;
	}
	printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
}
Example #5
0
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
    int pages = 0;
    for (; start < end; start += PAGE_SIZE) {
        ClearPageReserved(virt_to_page(start));
        init_page_count(virt_to_page(start));
        free_page(start);
        totalram_pages++;
        pages++;
    }
    printk("Freeing initrd memory: %dKiB freed\n", (pages * PAGE_SIZE) >> 10);
} /* end free_initrd_mem() */
void __homecache_free_pages(struct page *page, unsigned int order)
{
	if (put_page_testzero(page)) {
		homecache_change_page_home(page, order, PAGE_HOME_HASH);
		if (order == 0) {
			free_hot_cold_page(page, false);
		} else {
			init_page_count(page);
			__free_pages(page, order);
		}
	}
}
Example #7
0
void free_initmem(void)
{
	unsigned long addr;

	addr = (unsigned long)&__init_begin;
	for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
		virt_to_page(addr)->flags &= ~(1 << PG_reserved);
		init_page_count(virt_to_page(addr));
		free_page(addr);
		totalram_pages++;
	}
}
Example #8
0
void free_initrd_mem(unsigned long start, unsigned long end)
{
	int pages = 0;
	for (; start < end; start += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(start));
		init_page_count(virt_to_page(start));
		free_page(start);
		totalram_pages++;
		pages++;
	}
	printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages);
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr;

	for (addr = begin; addr < end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *) addr, 0xcc, PAGE_SIZE);
		free_page(addr);
		totalram_pages++;
	}
	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
Example #10
0
File: init_no.c Project: 7L/pi_plus
void free_initrd_mem(unsigned long start, unsigned long end)
{
	int pages = 0;
	for (; start < end; start += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(start));
		init_page_count(virt_to_page(start));
		free_page(start);
		totalram_pages++;
		pages++;
	}
	pr_notice("Freeing initrd memory: %luk freed\n",
		  pages * (PAGE_SIZE / 1024));
}
Example #11
0
static void free_init_pages(const char *what, unsigned long start, unsigned long end) {
	unsigned long pfn;
	printk("Freeing %s mem: %ldk freed\n", what, (end-start) >> 10);

	for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++) {
		struct page* page = pfn_to_page(pfn);

		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
		totalram_pages++;
	}
}
Example #12
0
void pte_free(struct page *pte)
{
	unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);

	if (!pte_write(*virt_to_ptep(va)))
		BUG_ON(HYPERVISOR_update_va_mapping(
			va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));

	ClearPageForeign(pte);
	init_page_count(pte);

	__free_page(pte);
}
Example #13
0
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
#ifdef CONFIG_HOMECACHE
    int home = initial_heap_home();
#endif
    unsigned long addr = (unsigned long) begin;

    if (kdata_huge && !initfree) {
        pr_warning("Warning: ignoring initfree=0:"
                   " incompatible with kdata=huge\n");
        initfree = 1;
    }
    end = (end + PAGE_SIZE - 1) & PAGE_MASK;
    local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
    for (addr = begin; addr < end; addr += PAGE_SIZE) {
        /*
         * Note we just reset the home here directly in the
         * page table.  We know this is safe because our caller
         * just flushed the caches on all the other cpus,
         * and they won't be touching any of these pages.
         */
        int pfn = kaddr_to_pfn((void *)addr);
        struct page *page = pfn_to_page(pfn);
        pte_t *ptep = virt_to_pte(NULL, addr);
        if (!initfree) {
            /*
             * If debugging page accesses then do not free
             * this memory but mark them not present - any
             * buggy init-section access will create a
             * kernel page fault:
             */
            pte_clear(&init_mm, addr, ptep);
            continue;
        }
#ifdef CONFIG_HOMECACHE
        set_page_home(page, home);
        __clear_bit(PG_homecache_nomigrate, &page->flags);
#endif
        __ClearPageReserved(page);
        init_page_count(page);
        if (pte_huge(*ptep))
            BUG_ON(!kdata_huge);
        else
            set_pte_at(&init_mm, addr, ptep,
                       pfn_pte(pfn, PAGE_KERNEL));
        memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
        free_page(addr);
        totalram_pages++;
    }
    pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
Example #14
0
void platform_release_memory(void *ptr, int size)
{
	unsigned long addr;
	unsigned long end;

	addr = ((unsigned long)ptr + (PAGE_SIZE - 1)) & PAGE_MASK;
	end = ((unsigned long)ptr + size) & PAGE_MASK;

	for (; addr < end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(__va(addr)));
		init_page_count(virt_to_page(__va(addr)));
		free_page((unsigned long)__va(addr));
	}
}
Example #15
0
static void free_init_pages(const char *what, unsigned long start, unsigned long end)
{
	unsigned long addr;

	for (addr = start; addr < end; addr += PAGE_SIZE) {
		struct page* page = virt_to_page(addr);

		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
		totalram_pages++;
	}

	printk("Freeing %s mem: %ldk freed\n", what, (end-start) >> 10);
}
Example #16
0
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *pte;

#ifdef CONFIG_HIGHPTE
	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
#else
	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
	if (pte) {
		SetPageForeign(pte, pte_free);
		init_page_count(pte);
	}
#endif
	return pte;
}
Example #17
0
void free_initmem(void)
{
	unsigned long addr;

	addr = (unsigned long)(&__init_begin);
	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		free_page(addr);
		totalram_pages++;
	}
	printk("Freeing unused kernel memory: %ldk freed\n",
	       ((unsigned long)&__init_end -
	        (unsigned long)&__init_begin) >> 10);
}
Example #18
0
static __init void kvm_free_tmp(void)
{
	unsigned long start, end;

	start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
	end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;

	/* Free the tmp space we don't need */
	for (; start < end; start += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(start));
		init_page_count(virt_to_page(start));
		free_page(start);
		totalram_pages++;
	}
}
Example #19
0
void __init mem_init(void)
{
    unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT;
    unsigned long tmp;
#ifdef CONFIG_MMU
    unsigned long loop, pfn;
    int datapages = 0;
#endif
    int codek = 0, datak = 0;

    /* this will put all memory onto the freelists */
    totalram_pages = free_all_bootmem();

#ifdef CONFIG_MMU
    for (loop = 0 ; loop < npages ; loop++)
        if (PageReserved(&mem_map[loop]))
            datapages++;

#ifdef CONFIG_HIGHMEM
    for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) {
        struct page *page = &mem_map[pfn];

        ClearPageReserved(page);
        init_page_count(page);
        __free_page(page);
        totalram_pages++;
    }
#endif

    codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
    datak = datapages << (PAGE_SHIFT - 10);

#else
    codek = (_etext - _stext) >> 10;
    datak = 0; //(_ebss - _sdata) >> 10;
#endif

    tmp = nr_free_pages() << PAGE_SHIFT;
    printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n",
           tmp >> 10,
           npages << (PAGE_SHIFT - 10),
           (rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
           rom_length >> 10,
           codek,
           datak
          );

} /* end mem_init() */
Example #20
0
static void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
    unsigned long pfn;

    for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
        struct page *page = pfn_to_page(pfn);
        void *addr = phys_to_virt(PFN_PHYS(pfn));

        ClearPageReserved(page);
        init_page_count(page);
        memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
        __free_page(page);
        totalram_pages++;
    }
    printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
Example #21
0
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr = begin;

	if (begin >= end)
		return;
	for (; addr < end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)(addr & PAGE_MASK), POISON_FREE_INITMEM,
		       PAGE_SIZE);
		free_page(addr);
		totalram_pages++;
	}
	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
}
Example #22
0
/* Free up now-unused memory */
static void free_sec(unsigned long start, unsigned long end, const char *name)
{
	unsigned long cnt = 0;

	while (start < end) {
		ClearPageReserved(virt_to_page(start));
		init_page_count(virt_to_page(start));
		free_page(start);
		cnt++;
		start += PAGE_SIZE;
 	}
	if (cnt) {
		printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
		totalram_pages += cnt;
	}
}
Example #23
0
/*
 * free the memory that was only required for initialisation
 */
void free_initmem(void)
{
#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
    unsigned long start, end, addr;

    start = PAGE_ALIGN((unsigned long) &__init_begin);	/* round up */
    end   = ((unsigned long) &__init_end) & PAGE_MASK;	/* round down */

    /* next to check that the page we free is not a partial page */
    for (addr = start; addr < end; addr += PAGE_SIZE) {
        ClearPageReserved(virt_to_page(addr));
        init_page_count(virt_to_page(addr));
        free_page(addr);
        totalram_pages++;
    }

    printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n",
           (end - start) >> 10, start, end);
#endif
} /* end free_initmem() */
Example #24
0
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
	unsigned long addr;

	if (begin >= end)
		return;

	printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
	for (addr = begin; addr < end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		memset((void *)(addr & ~(PAGE_SIZE-1)),
			POISON_FREE_INITMEM, PAGE_SIZE);
		if (addr >= __START_KERNEL_map)
			change_page_attr_addr(addr, 1, __pgprot(0));
		free_page(addr);
		totalram_pages++;
	}
	if (addr > __START_KERNEL_map)
		global_flush_tlb();
}
Example #25
0
File: init_no.c Project: 7L/pi_plus
void free_initmem(void)
{
#ifdef CONFIG_RAMKERNEL
	unsigned long addr;
	/*
	 * The following code should be cool even if these sections
	 * are not page aligned.
	 */
	addr = PAGE_ALIGN((unsigned long) __init_begin);
	/* next to check that the page we free is not a partial page */
	for (; addr + PAGE_SIZE < ((unsigned long) __init_end); addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		init_page_count(virt_to_page(addr));
		free_page(addr);
		totalram_pages++;
	}
	pr_notice("Freeing unused kernel memory: %luk freed (0x%x - 0x%x)\n",
			(addr - PAGE_ALIGN((unsigned long) __init_begin)) >> 10,
			(int)(PAGE_ALIGN((unsigned long) __init_begin)),
			(int)(addr - PAGE_SIZE));
#endif
}
Example #26
0
static unsigned long highmem_setup(void)
{
	unsigned long pfn;
	unsigned long reservedpages = 0;

	for (pfn = max_low_pfn; pfn < max_pfn; ++pfn) {
		struct page *page = pfn_to_page(pfn);

		/* FIXME not sure about */
		if (memblock_is_reserved(pfn << PAGE_SHIFT))
			continue;
		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
		totalhigh_pages++;
		reservedpages++;
	}
	totalram_pages += totalhigh_pages;
	printk(KERN_INFO "High memory: %luk\n",
					totalhigh_pages << (PAGE_SHIFT-10));

	return reservedpages;
}
Example #27
0
static int increase_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	long           rc;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	spin_lock_irqsave(&xen_reservation_lock, flags);

	page = balloon_first_page();
	for (i = 0; i < nr_pages; i++) {
		BUG_ON(page == NULL);
		frame_list[i] = page_to_pfn(page);
		page = balloon_next_page(page);
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents = nr_pages;
	rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
	if (rc < 0)
		goto out;

	for (i = 0; i < rc; i++) {
		page = balloon_retrieve();
		BUG_ON(page == NULL);

		pfn = page_to_pfn(page);
		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
		       phys_to_machine_mapping_valid(pfn));

		set_phys_to_machine(pfn, frame_list[i]);

		/* Link back into the page tables if not highmem. */
		if (pfn < max_low_pfn) {
			int ret;
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				mfn_pte(frame_list[i], PAGE_KERNEL),
				0);
			BUG_ON(ret);
		}

		/* Relinquish the page back to the allocator. */
		ClearPageReserved(page);
		init_page_count(page);
		__free_page(page);
	}

	balloon_stats.current_pages += rc;

 out:
	spin_unlock_irqrestore(&xen_reservation_lock, flags);

	return rc < 0 ? rc : rc != nr_pages;
}

static int decrease_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	int            need_sleep = 0;
	int ret;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	for (i = 0; i < nr_pages; i++) {
		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
			nr_pages = i;
			need_sleep = 1;
			break;
		}

		pfn = page_to_pfn(page);
		frame_list[i] = pfn_to_mfn(pfn);

		scrub_page(page);

		if (!PageHighMem(page)) {
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				__pte_ma(0), 0);
			BUG_ON(ret);
                }

	}

	/* Ensure that ballooned highmem pages don't have kmaps. */
	kmap_flush_unused();
	flush_tlb_all();

	spin_lock_irqsave(&xen_reservation_lock, flags);

	/* No more mappings: invalidate P2M and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = mfn_to_pfn(frame_list[i]);
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		balloon_append(pfn_to_page(pfn));
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
	BUG_ON(ret != nr_pages);

	balloon_stats.current_pages -= nr_pages;

	spin_unlock_irqrestore(&xen_reservation_lock, flags);

	return need_sleep;
}

/*
 * We avoid multiple worker processes conflicting via the balloon mutex.
 * We may of course race updates of the target counts (which are protected
 * by the balloon lock), or with changes to the Xen hard limit, but we will
 * recover from these in time.
 */
static void balloon_process(struct work_struct *work)
{
	int need_sleep = 0;
	long credit;

	mutex_lock(&balloon_mutex);

	do {
		credit = current_target() - balloon_stats.current_pages;
		if (credit > 0)
			need_sleep = (increase_reservation(credit) != 0);
		if (credit < 0)
			need_sleep = (decrease_reservation(-credit) != 0);

#ifndef CONFIG_PREEMPT
		if (need_resched())
			schedule();
#endif
	} while ((credit != 0) && !need_sleep);

	/* Schedule more work if there is some still to be done. */
	if (current_target() != balloon_stats.current_pages)
		mod_timer(&balloon_timer, jiffies + HZ);

	mutex_unlock(&balloon_mutex);
}
Example #28
0
static void __meminit free_new_highpage(struct page *page)
{
	init_page_count(page);
	__free_page(page);
	totalhigh_pages++;
}
Example #29
0
void gnttab_reset_grant_page(struct page *page)
{
	init_page_count(page);
	reset_page_mapcount(page);
}
Example #30
0
void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
	int nid;
#endif
	pg_data_t *pgdat;
	unsigned long i;
	struct page *page;
	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;

	num_physpages = lmb.memory.size >> PAGE_SHIFT;
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

#ifdef CONFIG_NEED_MULTIPLE_NODES
        for_each_online_node(nid) {
		if (NODE_DATA(nid)->node_spanned_pages != 0) {
			printk("freeing bootmem node %d\n", nid);
			totalram_pages +=
				free_all_bootmem_node(NODE_DATA(nid));
		}
	}
#else
	max_mapnr = max_pfn;
	totalram_pages += free_all_bootmem();
#endif
	for_each_online_pgdat(pgdat) {
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			if (!pfn_valid(pgdat->node_start_pfn + i))
				continue;
			page = pgdat_page_nr(pgdat, i);
			if (PageReserved(page))
				reservedpages++;
		}
	}

	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
			struct page *page = pfn_to_page(pfn);

			ClearPageReserved(page);
			init_page_count(page);
			__free_page(page);
			totalhigh_pages++;
		}
		totalram_pages += totalhigh_pages;
		printk(KERN_DEBUG "High memory: %luk\n",
		       totalhigh_pages << (PAGE_SHIFT-10));
	}
#endif /* CONFIG_HIGHMEM */

	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		bsssize >> 10,
		initsize >> 10);

	mem_init_done = 1;
}