예제 #1
0
static struct page * rmqueue(zone_t *zone, unsigned int order)
{
	free_area_t * area = zone->free_area + order;
	unsigned int curr_order = order;
	struct list_head *head, *curr;
	unsigned long flags;
	struct page *page;
	int i;

	spin_lock_irqsave(&zone->lock, flags);
	do {
		head = &area->free_list;
		curr = head->next;

		if (curr != head) {
			unsigned int index;

			page = list_entry(curr, struct page, list);
			if (BAD_RANGE(zone,page))
				BUG();
			list_del(curr);
			index = page - zone->zone_mem_map;
			if (curr_order != MAX_ORDER-1)
				MARK_USED(index, curr_order, area);
			zone->free_pages -= 1UL << order;

			page = expand(zone, page, index, order, curr_order, area);
			spin_unlock_irqrestore(&zone->lock, flags);

			set_page_count(page, 1);
			if (BAD_RANGE(zone,page))
				BUG();
			if (PageLRU(page))
				BUG();
			if (PageActive(page))
				BUG();

			/*
			 * we need to reference all the pages for this order,
			 * otherwise if anyone accesses one of the pages with
			 * (get/put) it * will be freed :-(
			 */
			for (i = 1; i < (1 << order); i++)
				set_page_count(&page[i], 1);

			return page;	
		}
		curr_order++;
		area++;
	} while (curr_order < MAX_ORDER);
예제 #2
0
파일: pgtable_64.c 프로젝트: Endika/linux
static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
{
	void *ret = NULL;
	struct page *page;

	if (!kernel) {
		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
		if (!page)
			return NULL;
		if (!pgtable_page_ctor(page)) {
			__free_page(page);
			return NULL;
		}
	} else {
		page = alloc_page(PGALLOC_GFP);
		if (!page)
			return NULL;
	}

	ret = page_address(page);
	spin_lock(&mm->page_table_lock);
	/*
	 * If we find pgtable_page set, we return
	 * the allocated page with single fragement
	 * count.
	 */
	if (likely(!mm->context.pte_frag)) {
		set_page_count(page, PTE_FRAG_NR);
		mm->context.pte_frag = ret + PTE_FRAG_SIZE;
	}
	spin_unlock(&mm->page_table_lock);

	return (pte_t *)ret;
}
예제 #3
0
파일: memory.c 프로젝트: sarnobat/knoppix
void __init prom_free_prom_memory (void)
{
	unsigned long addr, end;

	/*
	 * Free everything below the kernel itself but leave
	 * the first page reserved for the exception handlers.
	 */

#if defined(CONFIG_DECLANCE) || defined(CONFIG_DECLANCE_MODULE)
	/*
	 * Leave 128 KB reserved for Lance memory for
	 * IOASIC DECstations.
	 *
	 * XXX: save this address for use in dec_lance.c?
	 */
	if (IOASIC)
		end = __pa(&_text) - 0x00020000;
	else
#endif
		end = __pa(&_text);

	addr = PAGE_SIZE;
	while (addr < end) {
		ClearPageReserved(virt_to_page(__va(addr)));
		set_page_count(virt_to_page(__va(addr)), 1);
		free_page((unsigned long)__va(addr));
		addr += PAGE_SIZE;
	}

	printk("Freeing unused PROM memory: %ldk freed\n",
	       (end - PAGE_SIZE) >> 10);
}
예제 #4
0
파일: mem.c 프로젝트: 1x23/unifi-gpl
void online_page(struct page *page)
{
	ClearPageReserved(page);
	set_page_count(page, 0);
	free_cold_page(page);
	totalram_pages++;
	num_physpages++;
}
예제 #5
0
/*
 * This function will allocate the requested contiguous pages and
 * map them into the kernel's vmalloc() space.  This is done so we
 * get unique mapping for these pages, outside of the kernel's 1:1
 * virtual:physical mapping.  This is necessary so we can cover large
 * portions of the kernel with single large page TLB entries, and
 * still get unique uncached pages for consistent DMA.
 */
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
{
	struct vm_struct *area;
	unsigned long page, va, pa;
	void *ret;
	int order, err, i;

	if (in_interrupt())
		BUG();

	/* only allocate page size areas */
	size = PAGE_ALIGN(size);
	order = get_order(size);

	page = __get_free_pages(gfp, order);
	if (!page) {
		BUG();
		return NULL;
	}

	/* allocate some common virtual space to map the new pages */
	area = get_vm_area(size, VM_ALLOC);
	if (area == 0) {
		free_pages(page, order);
		return NULL;
	}
	va = VMALLOC_VMADDR(area->addr);
	ret = (void *) va;

	/* this gives us the real physical address of the first page */
	*dma_handle = pa = virt_to_bus((void *) page);

	/* set refcount=1 on all pages in an order>0 allocation so that vfree() will actually free
	 * all pages that were allocated.
	 */
	if (order > 0) {
		struct page *rpage = virt_to_page(page);

		for (i = 1; i < (1 << order); i++)
			set_page_count(rpage + i, 1);
	}

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE);

	if (err) {
		vfree((void *) va);
		return NULL;
	}

	/* we need to ensure that there are no cachelines in use, or worse dirty in this area
	 * - can't do until after virtual address mappings are created
	 */
	frv_cache_invalidate(va, va + size);

	return ret;
}
예제 #6
0
void release_individual_page_counts(void *buffer, unsigned long order) {
    int i;
    struct page *spa;
    if (order) {
	for (i=1;i<(1 << order);i++) {
	    spa=virt_to_page(buffer+i*PAGE_SIZE);
	    set_page_count(spa,0);
	}
    }	  
}
예제 #7
0
/* code to manipulate page counters of page blocks */
void  add_individual_page_counts(void *buffer, unsigned long order) {
    int i,orig;
    struct page *spa;
    if (order) {
	orig=page_count(virt_to_page(buffer)); /* first page setting */
	for (i=1;i<(1 << order);i++) {
	    spa=virt_to_page(buffer+i*PAGE_SIZE);
	    set_page_count(spa,orig);
	}
    }	  
}
예제 #8
0
파일: init.c 프로젝트: sarnobat/knoppix
void free_initrd_mem(unsigned long start, unsigned long end)
{
	int pages = 0;
	for (; start < end; start += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(start));
		set_page_count(virt_to_page(start), 1);
		free_page(start);
		totalram_pages++;
		pages++;
	}
	printk ("Freeing initrd memory: %dk freed\n", pages);
}
예제 #9
0
void __init mem_init(void)
{
	unsigned long npages = (memory_end - memory_start) >> PAGE_SHIFT;
	unsigned long tmp;
#ifdef CONFIG_MMU
	unsigned long loop, pfn;
	int datapages = 0;
#endif
	int codek = 0, datak = 0;

	/* this will put all memory onto the freelists */
	totalram_pages = free_all_bootmem();

#ifdef CONFIG_MMU
	for (loop = 0 ; loop < npages ; loop++)
		if (PageReserved(&mem_map[loop]))
			datapages++;

#ifdef CONFIG_HIGHMEM
	for (pfn = num_physpages - 1; pfn >= num_mappedpages; pfn--) {
		struct page *page = &mem_map[pfn];

		ClearPageReserved(page);
		set_bit(PG_highmem, &page->flags);
		set_page_count(page, 1);
		__free_page(page);
		totalram_pages++;
	}
#endif

	codek = ((unsigned long) &_etext - (unsigned long) &_stext) >> 10;
	datak = datapages << (PAGE_SHIFT - 10);

#else
	codek = (_etext - _stext) >> 10;
	datak = 0; //(_ebss - _sdata) >> 10;
#endif

	tmp = nr_free_pages() << PAGE_SHIFT;
	printk("Memory available: %luKiB/%luKiB RAM, %luKiB/%luKiB ROM (%dKiB kernel code, %dKiB data)\n",
	       tmp >> 10,
	       npages << (PAGE_SHIFT - 10),
	       (rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
	       rom_length >> 10,
	       codek,
	       datak
	       );

} /* end mem_init() */
예제 #10
0
파일: init.c 프로젝트: kzlin129/tt-gpl
/* Free up now-unused memory */
static void free_sec(unsigned long start, unsigned long end, const char *name)
{
	unsigned long cnt = 0;

	while (start < end) {
		ClearPageReserved(virt_to_page(start));
		set_page_count(virt_to_page(start), 1);
		free_page(start);
		cnt++;
		start += PAGE_SIZE;
 	}
	if (cnt) {
		printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
		totalram_pages += cnt;
	}
}
예제 #11
0
/*
 * free the memory that was only required for initialisation
 */
void __init free_initmem(void)
{
#if defined(CONFIG_RAMKERNEL) && !defined(CONFIG_PROTECT_KERNEL)
	unsigned long start, end, addr;

	start = PAGE_ALIGN((unsigned long) &__init_begin);	/* round up */
	end   = ((unsigned long) &__init_end) & PAGE_MASK;	/* round down */

	/* next to check that the page we free is not a partial page */
	for (addr = start; addr < end; addr += PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		set_page_count(virt_to_page(addr), 1);
		free_page(addr);
		totalram_pages++;
	}

	printk("Freeing unused kernel memory: %ldKiB freed (0x%lx - 0x%lx)\n",
	       (end - start) >> 10, start, end);
#endif
} /* end free_initmem() */
예제 #12
0
파일: memory.c 프로젝트: TitaniumBoy/lin
void prom_free_prom_memory (void)
{
	struct prom_pmemblock *p;
	unsigned long freed = 0;
	unsigned long addr;

	for (p = pblocks; p->size != 0; p++) {
		if (p->type != MEMTYPE_PROM)
			continue;

		addr = p->base;
		while (addr < p->base + p->size) {
		        ClearPageReserved(virt_to_page(__va(addr)));
			set_page_count(virt_to_page(__va(addr)), 1);
			free_page(__va(addr));
			addr += PAGE_SIZE;
			freed += PAGE_SIZE;
		}
	}
	printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
}
예제 #13
0
static struct page * rmqueue(zone_t *zone, unsigned int order)
{
	free_area_t * area = zone->free_area + order;
	unsigned int curr_order = order;
	struct list_head *head, *curr;
	unsigned long flags;
	struct page *page;

	spin_lock_irqsave(&zone->lock, flags);
	do {
		head = &area->free_list;
		curr = head->next;

		if (curr != head) {
			unsigned int index;

			page = list_entry(curr, struct page, list);
			if (BAD_RANGE(zone,page))
				BUG();
			list_del(curr);
			index = page - zone->zone_mem_map;
			if (curr_order != MAX_ORDER-1)
				MARK_USED(index, curr_order, area);
			zone->free_pages -= 1UL << order;

			page = expand(zone, page, index, order, curr_order, area);
			spin_unlock_irqrestore(&zone->lock, flags);

			set_page_count(page, 1);
			if (BAD_RANGE(zone,page))
				BUG();
			if (PageLRU(page))
				BUG();
			if (PageActive(page))
				BUG();
			return page;	
		}
		curr_order++;
		area++;
	} while (curr_order < MAX_ORDER);
예제 #14
0
파일: memory.c 프로젝트: houzhenggang/MT
void __init
prom_free_prom_memory (void)
{
	int i;
	unsigned long freed = 0;
	unsigned long addr;

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
			continue;

		addr = boot_mem_map.map[i].addr;
		while (addr < boot_mem_map.map[i].addr
			      + boot_mem_map.map[i].size) {
			ClearPageReserved(virt_to_page(__va(addr)));
			set_page_count(virt_to_page(__va(addr)), 1);
			free_page((unsigned long)__va(addr));
			addr += PAGE_SIZE;
			freed += PAGE_SIZE;
		}
	}
	printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
}
예제 #15
0
파일: init.c 프로젝트: sarnobat/knoppix
void
free_initmem()
{
#ifdef CONFIG_RAMKERNEL
	unsigned long addr;
	extern char __init_begin, __init_end;
/*
 *	the following code should be cool even if these sections
 *	are not page aligned.
 */
	addr = PAGE_ALIGN((unsigned long)(&__init_begin));
	/* next to check that the page we free is not a partial page */
	for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
		ClearPageReserved(virt_to_page(addr));
		set_page_count(virt_to_page(addr), 1);
		free_page(addr);
		totalram_pages++;
	}
	printk("Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
			(addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
			(int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
			(int)(addr - PAGE_SIZE));
#endif
}
예제 #16
0
파일: mem.c 프로젝트: 1x23/unifi-gpl
void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
	int nid;
#endif
	pg_data_t *pgdat;
	unsigned long i;
	struct page *page;
	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;

	num_physpages = lmb.memory.size >> PAGE_SHIFT;
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

#ifdef CONFIG_NEED_MULTIPLE_NODES
        for_each_online_node(nid) {
		if (NODE_DATA(nid)->node_spanned_pages != 0) {
			printk("freeing bootmem node %x\n", nid);
			totalram_pages +=
				free_all_bootmem_node(NODE_DATA(nid));
		}
	}
#else
	max_mapnr = max_pfn;
	totalram_pages += free_all_bootmem();
#endif
	for_each_pgdat(pgdat) {
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			if (!pfn_valid(pgdat->node_start_pfn + i))
				continue;
			page = pgdat_page_nr(pgdat, i);
			if (PageReserved(page))
				reservedpages++;
		}
	}

	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
			struct page *page = pfn_to_page(pfn);

			ClearPageReserved(page);
			set_page_count(page, 1);
			__free_page(page);
			totalhigh_pages++;
		}
		totalram_pages += totalhigh_pages;
		printk(KERN_INFO "High memory: %luk\n",
		       totalhigh_pages << (PAGE_SHIFT-10));
	}
#endif /* CONFIG_HIGHMEM */

	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		bsssize >> 10,
		initsize >> 10);

	mem_init_done = 1;

	/* Initialize the vDSO */
	vdso_init();
}
예제 #17
0
static void __free_pages_ok (struct page *page, unsigned int order)
{
	unsigned long index, page_idx, mask, flags;
	free_area_t *area;
	struct page *base;
	zone_t *zone;

	/*
	 * Subtle. We do not want to test this in the inlined part of
	 * __free_page() - it's a rare condition and just increases
	 * cache footprint unnecesserily. So we do an 'incorrect'
	 * decrement on page->count for reserved pages, but this part
	 * makes it safe.
	 */
	if (PageReserved(page))
		return;

	/*
	 * Yes, think what happens when other parts of the kernel take 
	 * a reference to a page in order to pin it for io. -ben
	 */
	if (PageLRU(page)) {
		if (unlikely(in_interrupt())) {
			unsigned long flags;

			spin_lock_irqsave(&free_pages_ok_no_irq_lock, flags);
			page->next_hash = free_pages_ok_no_irq_head;
			free_pages_ok_no_irq_head = page;
			page->index = order;
	
			spin_unlock_irqrestore(&free_pages_ok_no_irq_lock, flags);
	
			schedule_task(&free_pages_ok_no_irq_task);
			return;
		}
		
		lru_cache_del(page);
	}

	if (page->buffers)
		BUG();
	if (page->mapping)
		BUG();
	if (!VALID_PAGE(page))
		BUG();
	if (PageLocked(page))
		BUG();
	if (PageActive(page))
		BUG();

	ClearPageReferenced(page);
	ClearPageDirty(page);

	/* de-reference all the pages for this order */
	for (page_idx = 1; page_idx < (1 << order); page_idx++)
		set_page_count(&page[page_idx], 0);

	if (current->flags & PF_FREE_PAGES)
		goto local_freelist;
 back_local_freelist:

	zone = page_zone(page);

	mask = (~0UL) << order;
	base = zone->zone_mem_map;
	page_idx = page - base;
	if (page_idx & ~mask)
		BUG();
	index = page_idx >> (1 + order);

	area = zone->free_area + order;

	spin_lock_irqsave(&zone->lock, flags);

	zone->free_pages -= mask;

	while (mask + (1 << (MAX_ORDER-1))) {
		struct page *buddy1, *buddy2;

		if (area >= zone->free_area + MAX_ORDER)
			BUG();
		if (!__test_and_change_bit(index, area->map))
			/*
			 * the buddy page is still allocated.
			 */
			break;
		/*
		 * Move the buddy up one level.
		 * This code is taking advantage of the identity:
		 * 	-mask = 1+~mask
		 */
		buddy1 = base + (page_idx ^ -mask);
		buddy2 = base + page_idx;
		if (BAD_RANGE(zone,buddy1))
			BUG();
		if (BAD_RANGE(zone,buddy2))
			BUG();

		list_del(&buddy1->list);
		mask <<= 1;
		area++;
		index >>= 1;
		page_idx &= mask;
	}
	list_add(&(base + page_idx)->list, &area->free_list);

	spin_unlock_irqrestore(&zone->lock, flags);
	return;

 local_freelist:
	if (current->nr_local_pages)
		goto back_local_freelist;
	if (in_interrupt())
		goto back_local_freelist;		

	list_add(&page->list, &current->local_pages);
	page->index = order;
	current->nr_local_pages++;
}
예제 #18
0
static int increase_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	long           rc;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	balloon_lock(flags);

	page = balloon_first_page();
	for (i = 0; i < nr_pages; i++) {
		BUG_ON(page == NULL);
		frame_list[i] = page_to_pfn(page);;
		page = balloon_next_page(page);
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	rc = HYPERVISOR_memory_op(
		XENMEM_populate_physmap, &reservation);
	if (rc < nr_pages) {
		int ret;
		/* We hit the Xen hard limit: reprobe. */
		set_xen_guest_handle(reservation.extent_start, frame_list);
		reservation.nr_extents   = rc;
		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
				&reservation);
		BUG_ON(ret != rc);
		hard_limit = current_pages + rc - driver_pages;
		goto out;
	}

	for (i = 0; i < nr_pages; i++) {
		page = balloon_retrieve();
		BUG_ON(page == NULL);

		pfn = page_to_pfn(page);
		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
		       phys_to_machine_mapping_valid(pfn));

		/* Update P->M and M->P tables. */
		set_phys_to_machine(pfn, frame_list[i]);

#ifdef CONFIG_XEN
		xen_machphys_update(frame_list[i], pfn);

		/* Link back into the page tables if not highmem. */
		if (pfn < max_low_pfn) {
			int ret;
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
				0);
			BUG_ON(ret);
		}
#endif
		/* Relinquish the page back to the allocator. */
		ClearPageReserved(page);
		set_page_count(page, 1);
		__free_page(page);
	}

	current_pages += nr_pages;
	totalram_pages = current_pages;

 out:
	balloon_unlock(flags);

	return 0;
}

static int decrease_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	void          *v;
	int            need_sleep = 0;
	int ret;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	for (i = 0; i < nr_pages; i++) {
		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
			nr_pages = i;
			need_sleep = 1;
			break;
		}

		pfn = page_to_pfn(page);
		frame_list[i] = pfn_to_mfn(pfn);

		if (!PageHighMem(page)) {
			v = phys_to_virt(pfn << PAGE_SHIFT);
			scrub_pages(v, 1);
#ifdef CONFIG_XEN
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)v, __pte_ma(0), 0);
			BUG_ON(ret);
#endif
		}
#ifdef CONFIG_XEN_SCRUB_PAGES
		else {
			v = kmap(page);
			scrub_pages(v, 1);
			kunmap(page);
		}
#endif
	}

#ifdef CONFIG_XEN
	/* Ensure that ballooned highmem pages don't have kmaps. */
	kmap_flush_unused();
	flush_tlb_all();
#endif

	balloon_lock(flags);

	/* No more mappings: invalidate P2M and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = mfn_to_pfn(frame_list[i]);
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		balloon_append(pfn_to_page(pfn));
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
	BUG_ON(ret != nr_pages);

	current_pages -= nr_pages;
	totalram_pages = current_pages;

	balloon_unlock(flags);

	return need_sleep;
}

/*
 * We avoid multiple worker processes conflicting via the balloon mutex.
 * We may of course race updates of the target counts (which are protected
 * by the balloon lock), or with changes to the Xen hard limit, but we will
 * recover from these in time.
 */
static void balloon_process(void *unused)
{
	int need_sleep = 0;
	long credit;

	down(&balloon_mutex);

	do {
		credit = current_target() - current_pages;
		if (credit > 0)
			need_sleep = (increase_reservation(credit) != 0);
		if (credit < 0)
			need_sleep = (decrease_reservation(-credit) != 0);

#ifndef CONFIG_PREEMPT
		if (need_resched())
			schedule();
#endif
	} while ((credit != 0) && !need_sleep);

	/* Schedule more work if there is some still to be done. */
	if (current_target() != current_pages)
		mod_timer(&balloon_timer, jiffies + HZ);

	up(&balloon_mutex);
}
예제 #19
0
파일: memory.c 프로젝트: erdoukki/linux-xtr
unsigned long __init prom_free_prom_memory(void)
{
    unsigned long freed = 0;
    unsigned long addr;
    int i;
#ifdef	CONFIG_REALTEK_RECLAIM_BOOT_MEM
    unsigned long dest;
    struct page *page;
    int count;
#endif

    for (i = 0; i < boot_mem_map.nr_map; i++) {
        if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
            continue;

        addr = boot_mem_map.map[i].addr;
        while (addr < boot_mem_map.map[i].addr
                + boot_mem_map.map[i].size) {
            ClearPageReserved(virt_to_page(__va(addr)));
            set_page_count(virt_to_page(__va(addr)), 1);
            free_page((unsigned long)__va(addr));
            addr += PAGE_SIZE;
            freed += PAGE_SIZE;
        }
    }
    printk("Freeing prom memory: %ldkb freed\n", freed >> 10);
#ifdef	CONFIG_REALTEK_RECLAIM_BOOT_MEM
    if (!is_mars_cpu()) {
        // venus or neptune
        addr = F_ADDR1;
        if (debug_flag)
            dest = T_ADDR1;
        else
            dest = T_ADDR2;
    } else {
        // mars
        addr = F_ADDR2;
        if (debug_flag)
            dest = T_ADDR1;
        else
            dest = T_ADDR3;
    }
    printk("Reclaim bootloader memory from %x to %x\n", addr, dest);
    count = 0;
    while (addr < dest) {
        page = virt_to_page(addr);
        /*
        		printk("mem_map: %x, page: %x, size: %d \n", (int)mem_map, (int)page, sizeof(struct page));
        		if (PageReserved(page) != 1)
        			BUG();
        		if (page->_count.counter != -1)
        			BUG();
        */
        count++;

        __ClearPageReserved(page);
        set_page_count(page, 1);
        __free_page(page);

        addr += 0x1000; // 4KB
    }
    totalram_pages += count;
#endif
    return freed;
}
예제 #20
0
/*
 * Turn a non-refcounted page (->_count == 0) into refcounted with
 * a count of one.
 */
static inline void set_page_refcounted(struct page *page)
{
	VM_BUG_ON(PageTail(page));
	VM_BUG_ON(atomic_read(&page->_count));
	set_page_count(page, 1);
}
예제 #21
0
void gnttab_reset_grant_page(struct page *page)
{
	set_page_count(page, 1);
	reset_page_mapcount(page);
}