Ejemplo n.º 1
0
/*
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * in null mappings (currently treated as "copy-on-access")
 */
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long offset, pgprot_t prot)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		pte_t oldpage = *pte;
		pte_clear(pte);
		if (offset >= high_memory || PageReserved(mem_map+MAP_NR(offset)))
 			set_pte(pte, mk_pte(offset, prot));
		forget_pte(oldpage);
		address += PAGE_SIZE;
		offset += PAGE_SIZE;
		pte++;
	} while (address < end);
}
Ejemplo n.º 2
0
/*
 * transfer all the memory from the bootmem allocator to the runtime allocator
 */
void __init mem_init(void)
{
	int codesize, reservedpages, datasize, initsize;
	int tmp;

	if (!mem_map)
		BUG();

#define START_PFN	(contig_page_data.bdata->node_min_pfn)
#define MAX_LOW_PFN	(contig_page_data.bdata->node_low_pfn)

	max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
	high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);

	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem();

	reservedpages = 0;
	for (tmp = 0; tmp < num_physpages; tmp++)
		if (PageReserved(&mem_map[tmp]))
			reservedpages++;

	codesize =  (unsigned long) &_etext - (unsigned long) &_stext;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

	printk(KERN_INFO
	       "Memory: %luk/%luk available"
	       " (%dk kernel code, %dk reserved, %dk data, %dk init,"
	       " %ldk highmem)\n",
	       nr_free_pages() << (PAGE_SHIFT - 10),
	       max_mapnr << (PAGE_SHIFT - 10),
	       codesize >> 10,
	       reservedpages << (PAGE_SHIFT - 10),
	       datasize >> 10,
	       initsize >> 10,
	       totalhigh_pages << (PAGE_SHIFT - 10));
}
static int _phymem_pages_proc_show(struct seq_file *m, void *v)
{
	int free = 0, total = 0, reserved = 0;
	int other = 0, shared = 0, cached = 0, slab = 0, node, i;

	struct meminfo * mi = &meminfo;
	for_each_bank (i,mi) {
		struct membank *bank = &mi->bank[i];
		unsigned int pfn1, pfn2;
		struct page *page, *end;
		pfn1 = bank_pfn_start(bank);
		pfn2 = bank_pfn_end(bank);
		page = pfn_to_page(pfn1);
		end  = pfn_to_page(pfn2 - 1) + 1;
		do {
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (page_count(page) > 1)
				shared++;
			else if (!page_count(page))
				free++;
			else
				other++;
			page++;
		}while (page < end);
	}

	seq_printf(m, "pages of RAM       %d\n", total);
	seq_printf(m, "free pages         %d\n", free);
	seq_printf(m, "reserved pages     %d\n", reserved);
	seq_printf(m, "slab pages         %d\n", slab);
	seq_printf(m, "pages shared       %d\n", shared);
	seq_printf(m, "pages swap cached  %d\n", cached);
	seq_printf(m, "other pages        %d\n", other);
	return 0;
}
Ejemplo n.º 4
0
void si_meminfo(struct sysinfo *val)
{
	int i;

	i = MAP_NR(high_memory);
	val->totalram = 0;
	val->sharedram = 0;
	val->freeram = nr_free_pages << PAGE_SHIFT;
	val->bufferram = buffermem;
	while (i-- > 0)  {
		if (PageReserved(mem_map+i))
			continue;
		val->totalram++;
		if (!atomic_read(&mem_map[i].count))
			continue;
		val->sharedram += atomic_read(&mem_map[i].count) - 1;
	}
	val->totalram <<= PAGE_SHIFT;
	val->sharedram <<= PAGE_SHIFT;
	return;
}
Ejemplo n.º 5
0
static void
default_init_memmap(struct Page *base, size_t n) {
    assert(n > 0);
	// n是页数
    struct Page *p = base;
    for (; p != base + n; p ++) {
        assert(PageReserved(p));
        p->flags = p->property = 0;
		SetPageProperty(p);
        set_page_ref(p, 0); // 引用计数设置为0
		// 也就是在free_list的前面插入,不过考虑到这是一个双向链表,所以实际的意思是,插到链表的尾部
		list_add_before(&free_list, &(p->page_link));
    }
	// 因为base是头表
	// property这个玩意只有在free block首地址对应的Page有用,其余都被设置为了0
	// 用于指示这个free block一共有多少个free page
    base->property = n; // 可用的页面数是n
    SetPageProperty(base); // base所在的页面为头表
    nr_free += n; // 空闲的页面数目增加了n
    //list_add(&free_list, &(base->page_link)); // 添加到free_list的头部
}
Ejemplo n.º 6
0
void si_meminfo(struct sysinfo *val)
{
    unsigned long i;

    i = (high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
    val->totalram = 0;
    val->sharedram = 0;
    val->freeram = nr_free_pages << PAGE_SHIFT;
    val->bufferram = buffermem;
    while (i-- > 0) {
	if (PageReserved(mem_map+i))
	    continue;
	val->totalram++;
	if (!mem_map[i].count)
	    continue;
	val->sharedram += mem_map[i].count-1;
    }
    val->totalram <<= PAGE_SHIFT;
    val->sharedram <<= PAGE_SHIFT;
    return;
}
Ejemplo n.º 7
0
static void default_free_pages(struct Page *base, size_t n) {
	assert(n > 0);
	struct Page *p = base;
	for (; p != base + n; p ++) { 
		assert(!PageReserved(p) && !PageProperty(p));
        p->flags = 0;
        set_page_ref(p, 0);
    }
    base->property = n;
    SetPageProperty(base);
    list_entry_t *le = list_next(&free_list);
    while (le != &free_list) {
        p = le2page(le, page_link);
        le = list_next(le);
        if (base + base->property == p) {
            base->property += p->property;
            ClearPageProperty(p);
            list_del(&(p->page_link));
        }
        else if (p + p->property == base) {
            p->property += base->property;
            ClearPageProperty(base);
            base = p;
            list_del(&(p->page_link));
        }
    }
    nr_free += n;
    //list_add(&free_list, &(base->page_link));
	list_entry_t *curr=&free_list;
	while((curr=list_next(curr))!=&free_list){
		struct Page *currp=le2page(curr,page_link);
		if(p<currp){
			list_add_before(curr,&(base->page_link));
			break;
		}
	}
	if(curr==&free_list){
		list_add_before(curr,&(base->page_link));
	}
}
Ejemplo n.º 8
0
static void
default_free_pages(struct Page *base, size_t n) {
    assert(n > 0);
    struct Page *p = base;
    for (; p != base + n; p ++) {
        assert(PageReserved(p) && !PageProperty(p));
        p->flags = 0;
        set_page_ref(p, 0);
    }
    base->property = n;
    SetPageProperty(base);
    struct Page* tempPG = base;
    for (; tempPG != base + n; tempPG++) {
        ClearPageReserved(tempPG);
        SetPageProperty(tempPG);
    }
    list_entry_t *le = list_next(&free_list);
    while (le != &free_list) {
        p = le2page(le, page_link);
        le = list_next(le);
        if (base + base->property == p) {
            base->property += p->property;
            p->property = 0;
            list_del(&(p->page_link));
        }
        else if (p + p->property == base) {
            p->property += base->property;
            base->property = 0;
            list_del(&(p->page_link));
            base = p;
        }
    }
    nr_free += n;
    list_entry_t* rank = &free_list;
    while((rank=list_next(rank)) != &free_list) {
        if(le2page(rank, page_link) > base)
            break;
    }
    list_add_before(rank, &(base->page_link));
}
Ejemplo n.º 9
0
Archivo: init.c Proyecto: cilynx/dd-wrt
void show_mem(void)
{
	int free = 0, total = 0, reserved = 0;
	int shared = 0, cached = 0, slab = 0, node, i;
	struct meminfo * mi = &meminfo;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));

	for_each_online_node(node) {
		pg_data_t *n = NODE_DATA(node);
		struct page *map = n->node_mem_map - n->node_start_pfn;

		for_each_nodebank (i,mi,node) {
			unsigned int pfn1, pfn2;
			struct page *page, *end;

			pfn1 = __phys_to_pfn(mi->bank[i].start);
			pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start);

			page = map + pfn1;
			end  = map + pfn2;

			do {
				total++;
				if (PageReserved(page))
					reserved++;
				else if (PageSwapCache(page))
					cached++;
				else if (PageSlab(page))
					slab++;
				else if (!page_count(page))
					free++;
				else
					shared += page_count(page) - 1;
				page++;
			} while (page < end);
		}
	}
Ejemplo n.º 10
0
void mark_dirty_kiobuf(struct kiobuf *iobuf, int bytes)
{
	int index, offset, remaining;
	struct page *page;
	
	index = iobuf->offset >> PAGE_SHIFT;
	offset = iobuf->offset & ~PAGE_MASK;
	remaining = bytes;
	if (remaining > iobuf->length)
		remaining = iobuf->length;
	
	while (remaining > 0 && index < iobuf->nr_pages) {
		page = iobuf->maplist[index];
		
		if (!PageReserved(page))
			set_page_dirty(page);

		remaining -= (PAGE_SIZE - offset);
		offset = 0;
		index++;
	}
}
Ejemplo n.º 11
0
static void default_free_pages(struct Page* base, size_t n) {
    assert(0 < n);
    struct Page* p = base;
    for (; p != base + n; ++p) {
        assert(!PageReserved(p) && !PageProperty(p));
        p->flags = 0;
        set_page_ref(p, 0);
    }
    base->property = n;
    SetPageProperty(base);
    list_entry_t* le = list_next(&free_list);
    // Find insertion position
    while (le2page(le, page_link) < base && le != &free_list) {
        le = list_next(le);
    }
    le = list_prev(le);
    // Insert into list
    list_add(le, &(base->page_link));
    // Merge previous one
    p = le2page(le, page_link);
    if (p + p->property == base) {
        p->property += base->property;
        ClearPageProperty(base);
        base->property = 0;
        list_del(&(base->page_link));
        base = p;
    }
    // Merge next one
    if (list_next(&(base->page_link)) != &free_list) {
        p = le2page(list_next(&(base->page_link)), page_link);
        if (base + base->property == p) {
            base->property += p->property;
            ClearPageProperty(p);
            p->property = 0;
            list_del(&(p->page_link));
        }
    }
    nr_free += n;
}
Ejemplo n.º 12
0
/*
 * Show free area list (used inside shift_scroll-lock stuff)
 * We also calculate the percentage fragmentation. We do this by counting the
 * memory on each free list with the exception of the first item on the list.
 */
int Jcmd_mem(char *arg1, char *arg2) {
	unsigned long order, flags;
	unsigned long total = 0;

	spin_lock_irqsave(&free_area_lock, flags);
	for (order = 0; order < NR_MEM_LISTS; order++) {
		struct page * tmp;
		unsigned long nr = 0;
		for (tmp = free_mem_area[order].next;
				tmp != memory_head(free_mem_area+order); tmp = tmp->next) {
			nr++;
		}
		total += nr << order;
		ut_printf("%d(%d): count:%d  static count:%d total:%d (%dM)\n", order,1<<order, nr,
				free_mem_area[order].stat_count, (nr << order), ((nr << order)*PAGE_SIZE)/(1024*1024));
	}
	spin_unlock_irqrestore(&free_area_lock, flags);
	ut_printf("total Free pages = %d (%dM) Actual pages: %d (%dM) pagecachesize: %dM , freepages:%d\n", total, (total * 4) / 1024,g_stat_mem_size/PAGE_SIZE,g_stat_mem_size/(1024*1024),g_pagecache_size/(1024*1024),g_nr_free_pages);

	int slab=0;
	int referenced=0;
	int reserved=0;
	int dma=0;
	unsigned long va_end=(unsigned long)__va(g_phy_mem_size);

	page_struct_t *p;
	p = g_mem_map + MAP_NR(va_end);
	do {
		--p;
		if (PageReserved(p)) reserved++;
		if (PageDMA(p)) dma++;
		if (PageReferenced(p))referenced++;
		if (PageSlab(p)) slab++;
	} while (p > g_mem_map);
	ut_printf(" reserved :%d(%dM) referenced:%d dma:%d slab:%d  stat_allocs:%d stat_frees: %d\n\n",reserved,(reserved*PAGE_SIZE)/(1024*1024),referenced,dma,slab,stat_allocs,stat_frees);
	if ((arg1 != 0) && (ut_strcmp(arg1,"all")==0))
		Jcmd_jslab(0,0);
	return 1;
}
Ejemplo n.º 13
0
void show_mem(void)
{
	unsigned long total = 0, reserved = 0;
	unsigned long shared = 0, cached = 0;
	unsigned long highmem = 0;
	struct page *page;
	pg_data_t *pgdat;
	unsigned long i;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
	for_each_online_pgdat(pgdat) {
		unsigned long flags;
		pgdat_resize_lock(pgdat, &flags);
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			if (!pfn_valid(pgdat->node_start_pfn + i))
				continue;
			page = pgdat_page_nr(pgdat, i);
			total++;
			if (PageHighMem(page))
				highmem++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (page_count(page))
				shared += page_count(page) - 1;
		}
		pgdat_resize_unlock(pgdat, &flags);
	}
	printk("%ld pages of RAM\n", total);
#ifdef CONFIG_HIGHMEM
	printk("%ld pages of HIGHMEM\n", highmem);
#endif
	printk("%ld reserved pages\n", reserved);
	printk("%ld pages shared\n", shared);
	printk("%ld pages swap cached\n", cached);
}
Ejemplo n.º 14
0
void show_mem(void)
{
	int free = 0, total = 0, reserved = 0;
	int shared = 0, cached = 0, slab = 0, node;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));

	for_each_online_node(node) {
		struct page *page, *end;

		page = NODE_MEM_MAP(node);
		end  = page + NODE_DATA(node)->node_spanned_pages;

		do {
			total++;
			if (PageReserved(page))
				reserved++;
			else if (PageSwapCache(page))
				cached++;
			else if (PageSlab(page))
				slab++;
			else if (!page_count(page))
				free++;
			else
				shared += page_count(page) - 1;
			page++;
		} while (page < end);
	}

	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
}
Ejemplo n.º 15
0
static int __init balloon_init(void)
{
	unsigned long pfn;
	struct page *page;

	if (!xen_pv_domain())
		return -ENODEV;

	pr_info("xen_balloon: Initialising balloon driver.\n");

	balloon_stats.current_pages = min(xen_start_info->nr_pages, max_pfn);
	totalram_pages   = balloon_stats.current_pages;
	balloon_stats.target_pages  = balloon_stats.current_pages;
	balloon_stats.balloon_low   = 0;
	balloon_stats.balloon_high  = 0;
	balloon_stats.driver_pages  = 0UL;
	balloon_stats.hard_limit    = ~0UL;

	init_timer(&balloon_timer);
	balloon_timer.data = 0;
	balloon_timer.function = balloon_alarm;

	register_balloon(&balloon_sysdev);

	/* Initialise the balloon with excess memory space. */
	for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
		page = pfn_to_page(pfn);
		if (!PageReserved(page))
			balloon_append(page);
	}

	target_watch.callback = watch_target;
	xenstore_notifier.notifier_call = balloon_init_watcher;

	register_xenstore_notifier(&xenstore_notifier);

	return 0;
}
Ejemplo n.º 16
0
static struct Page *
default_alloc_pages(size_t n) {
    assert(n > 0);
    if (n > nr_free) {
        return NULL;
    }
    struct Page *page = NULL;
    list_entry_t *le = &free_list;
    while ((le = list_next(le)) != &free_list) {
        struct Page *p = le2page(le, page_link);
        if (PageProperty(p) && !PageReserved(p) && p->property >= n) {
            page = p;
            break;
        }
    }
    if (page != NULL) {
	le = page->page_link.prev;        
	list_del(&(page->page_link));
        if (page->property > n) {
            struct Page *p = page + n;
	    ClearPageReserved(p);
            SetPageProperty(p);            p->property = page->property - n;
            list_add(le, &(p->page_link));
    }
        struct Page *p = page;
        for (; p < page + n; ++p) {

            SetPageReserved(p);
            ClearPageProperty(p);
            p->property = 0;

            list_init(&(p->page_link));
        }        
	nr_free -= n;
        
    }
    return page;
}
Ejemplo n.º 17
0
/*
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * in null mappings (currently treated as "copy-on-access")
 */
static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size,
	unsigned long phys_addr, pgprot_t prot)
{
	unsigned long end;

	address &= ~PMD_MASK;
	end = address + size;
	if (end > PMD_SIZE)
		end = PMD_SIZE;
	do {
		struct page *page;
		pte_t oldpage;
		oldpage = ptep_get_and_clear(pte);

		page = virt_to_page(__va(phys_addr));
		if ((!VALID_PAGE(page)) || PageReserved(page))
 			set_pte(pte, mk_pte_phys(phys_addr, prot));
		forget_pte(oldpage);
		address += PAGE_SIZE;
		phys_addr += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
}
Ejemplo n.º 18
0
/*buddy_init_memmap - Set the detected physical memory block allocable*/
static void buddy_init_memmap(struct Page* page, size_t n){
    assert(n > 0);
    if(!mem_init){
		mem = page2kva(page);
		uint32_t mem_sz = (shl(1, MAX_ORD-1)-1) * sizeof(Buddy);
		uint32_t pgnum = ROUNDUP(mem_sz, PGSIZE) / PGSIZE;
		page += pgnum;
		n -= pgnum;
		mem_init = 1;
		buddy_fill(0);
    }
    struct Page *p = page;
    nr_free += n;
    uint32_t idx;
    for (; p != page + n; p++) {
        assert(PageReserved(p));
        p->flags = p->property = 0;
		idx = page2buddy(p, 2);
		set_buddy(mem[idx], 2, 1, 1);
		update(idx);
        set_page_ref(p, 0);
    }
}
Ejemplo n.º 19
0
/*
 * We can use this swap cache entry directly
 * if there are no other references to it.
 *
 * Here "exclusive_swap_page()" does the real
 * work, but we opportunistically check whether
 * we need to get all the locks first..
 */
int can_share_swap_page(struct page *page)
{
	int retval = 0;

	if (!PageLocked(page))
		BUG();
	switch (page_count(page)) {
	case 3:
		if (!page->buffers)
			break;
		/* Fallthrough */
	case 2:
		if (!PageSwapCache(page))
			break;
		retval = exclusive_swap_page(page);
		break;
	case 1:
		if (PageReserved(page))
			break;
		retval = 1;
	}
	return retval;
}
Ejemplo n.º 20
0
/*
 * Batched page_cache_release().  Decrement the reference count on all the
 * passed pages.  If it fell to zero then remove the page from the LRU and
 * free it.
 *
 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 * for the remainder of the operation.
 *
 * The locking in this function is against shrink_cache(): we recheck the
 * page count inside the lock to see whether shrink_cache grabbed the page
 * via the LRU.  If it did, give up: shrink_cache will free it.
 */
void release_pages(struct page **pages, int nr, int cold)
{
	int i;
	struct pagevec pages_to_free;
	struct zone *zone = NULL;

	pagevec_init(&pages_to_free, cold);
	for (i = 0; i < nr; i++) {
		struct page *page = pages[i];
		struct zone *pagezone;

		if (PageReserved(page) || !put_page_testzero(page))
			continue;

		pagezone = page_zone(page);
		if (pagezone != zone) {
			if (zone)
				spin_unlock_irq(&zone->lru_lock);
			zone = pagezone;
			spin_lock_irq(&zone->lru_lock);
		}
		if (TestClearPageLRU(page))
			del_page_from_lru(zone, page);
		if (page_count(page) == 0) {
			if (!pagevec_add(&pages_to_free, page)) {
				spin_unlock_irq(&zone->lru_lock);
				__pagevec_free(&pages_to_free);
				pagevec_reinit(&pages_to_free);
				zone = NULL;	/* No lock is held */
			}
		}
	}
	if (zone)
		spin_unlock_irq(&zone->lru_lock);

	pagevec_free(&pages_to_free);
}
Ejemplo n.º 21
0
/* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
{
	pte_t * pte;
	unsigned long pmd_end;

	if (pmd_none(*dir))
		return count;
	if (pmd_bad(*dir)) {
		pmd_ERROR(*dir);
		pmd_clear(dir);
		return count;
	}
	
	pte = pte_offset(dir, address);
	
	pmd_end = (address + PMD_SIZE) & PMD_MASK;
	if (end > pmd_end)
		end = pmd_end;

	do {
		if (pte_present(*pte)) {
			struct page *page = pte_page(*pte);

			if (VALID_PAGE(page) && !PageReserved(page)) {
				count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
				if (!count) {
					address += PAGE_SIZE;
					break;
				}
			}
		}
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
	mm->swap_address = address;
	return count;
}
Ejemplo n.º 22
0
static void
default_free_pages(struct Page *base, size_t n) {
    assert(n > 0);
    struct Page* p = base;
    for (; p != base + n; p ++) {
        assert(!PageReserved(p) && !PageProperty(p));
        p->flags = 0;
        set_page_ref(p, 0);
    }
    base->property= n;
    SetPageProperty(base);
    list_entry_t* le = list_next(&free_list);
    while (le != &free_list)	{
    	p = le2page(le, page_link);
    	if (p > base)
    		break ;
    	le = list_next(le);
    }

    p = le2page(le, page_link);
	list_add_before(le, &(base->page_link));
	le = list_prev(&(base->page_link));
	struct Page* pp = le2page(le, page_link);
	if (base + base->property == p) {
		base->property += p->property;
		ClearPageProperty(p);
		list_del(&(p->page_link));
	}
	if (pp + pp->property == base) {
		pp->property += base->property;
		ClearPageProperty(base);
		list_del(&(base->page_link));
	}

	nr_free += n;
}
Ejemplo n.º 23
0
static inline void forget_pte(pte_t page)
{
#if 0 /* old 2.4 code */
	if (pte_none(page))
		return;
	if (pte_present(page)) {
		unsigned long pfn = pte_pfn(page);
		struct page *ptpage;
		if (!pfn_valid(pfn))
			return;
		ptpage = pfn_to_page(pfn);
		if (PageReserved(ptpage))
			return;
		page_cache_release(ptpage);
		return;
	}
	swap_free(pte_to_swp_entry(page));
#else
	if (!pte_none(page)) {
		printk("forget_pte: old mapping existed!\n");
		BUG();
	}
#endif
}
Ejemplo n.º 24
0
unsigned int count_highmem_pages(void)
{
	struct zone *zone;
	unsigned long zone_pfn;
	unsigned int n = 0;

	for_each_zone (zone)
		if (is_highmem(zone)) {
			mark_free_pages(zone);
			for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) {
				struct page *page;
				unsigned long pfn = zone_pfn + zone->zone_start_pfn;
				if (!pfn_valid(pfn))
					continue;
				page = pfn_to_page(pfn);
				if (PageReserved(page))
					continue;
				if (PageNosaveFree(page))
					continue;
				n++;
			}
		}
	return n;
}
Ejemplo n.º 25
0
static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size)
{
	unsigned long offset;
	pte_t * ptep;
	int freed = 0;

	if (pmd_none(*pmd))
		return 0;
	if (pmd_bad(*pmd)) {
		pmd_ERROR(*pmd);
		pmd_clear(pmd);
		return 0;
	}
	ptep = pte_offset(pmd, address);
	offset = address & ~PMD_MASK;
	if (offset + size > PMD_SIZE)
		size = PMD_SIZE - offset;
	size &= PAGE_MASK;
	for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) {
		pte_t pte = *ptep;
		if (pte_none(pte))
			continue;
		if (pte_present(pte)) {
			struct page *page = pte_page(pte);
			if (VALID_PAGE(page) && !PageReserved(page))
				freed ++;
			/* This will eventually call __free_pte on the pte. */
			tlb_remove_page(tlb, ptep, address + offset);
		} else {
			free_swap_and_cache(pte_to_swp_entry(pte));
			pte_clear(ptep);
		}
	}

	return freed;
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
{
	int write;
	int dirty;
	struct page *page;
	int i;
	struct ttm_backend *be = ttm->be;

	BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
	write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
	dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);

	if (be)
		be->func->clear(be);

	for (i = 0; i < ttm->num_pages; ++i) {
		page = ttm->pages[i];
		if (page == NULL)
			continue;

		if (page == ttm->dummy_read_page) {
			BUG_ON(write);
			continue;
		}

		if (write && dirty && !PageReserved(page))
			set_page_dirty_lock(page);

		ttm->pages[i] = NULL;
		ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
		put_page(page);
	}
	ttm->state = tt_unpopulated;
	ttm->first_himem_page = ttm->num_pages;
	ttm->last_lomem_page = -1;
}
Ejemplo n.º 27
0
/*
 * mem_init - initializes memory
 *
 * Frees up bootmem
 * Calculates and displays memory available/used
 */
void __init mem_init(void)
{
	int codesize, datasize, initsize, reserved_pages, free_pages;
	int tmp;

	high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);

	free_all_bootmem();

	/* count all reserved pages [kernel code/data/mem_map..] */
	reserved_pages = 0;
	for (tmp = 0; tmp < max_mapnr; tmp++)
		if (PageReserved(mem_map + tmp))
			reserved_pages++;

	/* XXX: nr_free_pages() is equivalent */
	free_pages = max_mapnr - reserved_pages;

	/*
	 * For the purpose of display below, split the "reserve mem"
	 * kernel code/data is already shown explicitly,
	 * Show any other reservations (mem_map[ ] et al)
	 */
	reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
								PAGE_SHIFT);

	codesize = _etext - _text;
	datasize = _end - _etext;
	initsize = __init_end - __init_begin;

	pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
		PAGES_TO_MB(free_pages),
		TO_MB(arc_mem_sz),
		TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
		PAGES_TO_KB(reserved_pages));
}
Ejemplo n.º 28
0
void mem_init(unsigned long start_mem, unsigned long end_mem)
{
	int codek = 0;
	int datapages = 0;
	unsigned long tmp;
#ifdef CONFIG_COLDFIRE
	extern char _etext, _stext, __data_start;
#else
	extern char _etext, _romvec, __data_start;
#endif
	unsigned long len = end_mem-(unsigned long)&__data_start;

	/* Bloody watchdog... */
#ifdef CONFIG_SHGLCORE
       (*((volatile unsigned char*)0xFFFA21)) = 128 | 64/* | 32 | 16*/;
       (*((volatile unsigned short*)0xFFFA24)) &= ~512;
       (*((volatile unsigned char*)0xFFFA27)) = 0x55;
       (*((volatile unsigned char*)0xFFFA27)) = 0xAA;
       
       /*printk("Initiated watchdog, SYPCR = %x\n", *(volatile char*)0xFFFA21);*/
#endif	                

#ifdef DEBUG
	printk("Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
#endif

	end_mem &= PAGE_MASK;
	high_memory = end_mem;

	start_mem = PAGE_ALIGN(start_mem);
	while (start_mem < high_memory)
       	{
		clear_bit(PG_reserved, &mem_map[MAP_NR(start_mem)].flags);
		start_mem += PAGE_SIZE;
	}

	for (tmp = PAGE_OFFSET ; tmp < end_mem ; tmp += PAGE_SIZE) {

#ifdef MAX_DMA_ADDRESS
		if (VTOP (tmp) >= MAX_DMA_ADDRESS)
			clear_bit(PG_DMA, &mem_map[MAP_NR(tmp)].flags);
#endif

		if (PageReserved(mem_map+MAP_NR(tmp))) {
			datapages++;
			continue;
		}
		mem_map[MAP_NR(tmp)].count = 1;
#ifdef CONFIG_BLK_DEV_INITRD
		if (!initrd_start ||
		    (tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
#endif
			free_page(tmp);
	}
	
#ifdef CONFIG_COLDFIRE
	codek = (&_etext - &_stext) >> 10;
#else
	codek = (&_etext - &_romvec) >> 10;
#endif
	tmp = nr_free_pages << PAGE_SHIFT;
/*	printk("Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel data, %dk code)\n",
	       tmp >> 10,
	       len >> 10,
	       (rom_length > 0) ? ((rom_length >> 10) - codek) : 0,
	       rom_length >> 10,
	       datapages << (PAGE_SHIFT-10),
	       codek
	       );*/
}
Ejemplo n.º 29
0
void __init mem_init(void)
{
#ifdef CONFIG_NEED_MULTIPLE_NODES
	int nid;
#endif
	pg_data_t *pgdat;
	unsigned long i;
	struct page *page;
	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;

	num_physpages = lmb.memory.size >> PAGE_SHIFT;
	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

#ifdef CONFIG_NEED_MULTIPLE_NODES
        for_each_online_node(nid) {
		if (NODE_DATA(nid)->node_spanned_pages != 0) {
			printk("freeing bootmem node %d\n", nid);
			totalram_pages +=
				free_all_bootmem_node(NODE_DATA(nid));
		}
	}
#else
	max_mapnr = max_pfn;
	totalram_pages += free_all_bootmem();
#endif
	for_each_online_pgdat(pgdat) {
		for (i = 0; i < pgdat->node_spanned_pages; i++) {
			if (!pfn_valid(pgdat->node_start_pfn + i))
				continue;
			page = pgdat_page_nr(pgdat, i);
			if (PageReserved(page))
				reservedpages++;
		}
	}

	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;

#ifdef CONFIG_HIGHMEM
	{
		unsigned long pfn, highmem_mapnr;

		highmem_mapnr = total_lowmem >> PAGE_SHIFT;
		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
			struct page *page = pfn_to_page(pfn);

			ClearPageReserved(page);
			init_page_count(page);
			__free_page(page);
			totalhigh_pages++;
		}
		totalram_pages += totalhigh_pages;
		printk(KERN_DEBUG "High memory: %luk\n",
		       totalhigh_pages << (PAGE_SHIFT-10));
	}
#endif /* CONFIG_HIGHMEM */

	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
		(unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		reservedpages << (PAGE_SHIFT-10),
		datasize >> 10,
		bsssize >> 10,
		initsize >> 10);

	mem_init_done = 1;
}
Ejemplo n.º 30
0
/*
 * Must not be called with IRQs off.  This should only be used on the
 * slow path.
 *
 * Copy a foreign granted page to local memory.
 */
int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
{
	struct gnttab_unmap_and_replace unmap;
	mmu_update_t mmu;
	struct page *page;
	struct page *new_page;
	void *new_addr;
	void *addr;
	paddr_t pfn;
	maddr_t mfn;
	maddr_t new_mfn;
	int err;

	page = *pagep;
	if (!get_page_unless_zero(page))
		return -ENOENT;

	err = -ENOMEM;
	new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
	if (!new_page)
		goto out;

	new_addr = page_address(new_page);
	addr = page_address(page);
	copy_page(new_addr, addr);

	pfn = page_to_pfn(page);
	mfn = pfn_to_mfn(pfn);
	new_mfn = virt_to_mfn(new_addr);

	write_seqlock_bh(&gnttab_dma_lock);

	/* Make seq visible before checking page_mapped. */
	smp_mb();

	/* Has the page been DMA-mapped? */
	if (unlikely(page_mapped(page))) {
		write_sequnlock_bh(&gnttab_dma_lock);
		put_page(new_page);
		err = -EBUSY;
		goto out;
	}

	if (!xen_feature(XENFEAT_auto_translated_physmap))
		set_phys_to_machine(pfn, new_mfn);

	gnttab_set_replace_op(&unmap, (unsigned long)addr,
			      (unsigned long)new_addr, ref);

	err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
					&unmap, 1);
	BUG_ON(err);
	BUG_ON(unmap.status != GNTST_okay);

	write_sequnlock_bh(&gnttab_dma_lock);

	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);

		mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
		mmu.val = pfn;
		err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
		BUG_ON(err);
	}

	new_page->mapping = page->mapping;
	new_page->index = page->index;
	set_bit(PG_foreign, &new_page->flags);
	if (PageReserved(page))
		SetPageReserved(new_page);
	*pagep = new_page;

	SetPageForeign(page, gnttab_page_free);
	page->mapping = NULL;

out:
	put_page(page);
	return err;
}