Example #1
0
struct page *lookup_cgroup_page(struct page_cgroup *pc)
{
	unsigned long pfn;
	struct page *page;
	pg_data_t *pgdat;

	pgdat = NODE_DATA(page_cgroup_array_id(pc));
	pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
	page = pfn_to_page(pfn);
	VM_BUG_ON(pc != lookup_page_cgroup(page));
	return page;
}
static int __init alloc_node_page_cgroup(int nid)
{
	struct page_cgroup *base;
	unsigned long table_size;
	unsigned long nr_pages;

	nr_pages = NODE_DATA(nid)->node_spanned_pages;
	if (!nr_pages)
		return 0;

	table_size = sizeof(struct page_cgroup) * nr_pages;

	base = memblock_virt_alloc_try_nid_nopanic(
			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
			BOOTMEM_ALLOC_ACCESSIBLE, nid);
	if (!base)
		return -ENOMEM;
	NODE_DATA(nid)->node_page_cgroup = base;
	total_usage += table_size;
	return 0;
}
Example #3
0
static int __init alloc_node_page_cgroup(int nid)
{
	struct page_cgroup *base;
	unsigned long table_size;
	unsigned long nr_pages;

	nr_pages = NODE_DATA(nid)->node_spanned_pages;
	if (!nr_pages)
		return 0;

	table_size = sizeof(struct page_cgroup) * nr_pages;

	base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
	if (!base)
		return -ENOMEM;
	NODE_DATA(nid)->node_page_cgroup = base;
	total_usage += table_size;
	page_cgroup_lock_init(base, nr_pages);
	return 0;
}
static void __init bootmem_init_one_node(unsigned int nid)
{
	unsigned long total_pages, paddr;
	unsigned long end_pfn;
	struct pglist_data *p;

	p = NODE_DATA(nid);

	/* Nothing to do.. */
	if (!p->node_spanned_pages)
		return;

	end_pfn = p->node_start_pfn + p->node_spanned_pages;
#ifdef CONFIG_HIGHMEM
	if (end_pfn > max_low_pfn)
		end_pfn = max_low_pfn;
#endif

	total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);

	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
	if (!paddr)
		panic("Can't allocate bootmap for nid[%d]\n", nid);

	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/*
	 * XXX Handle initial reservations for the system memory node
	 * only for the moment, we'll refactor this later for handling
	 * reservations in other nodes.
	 */
	if (nid == 0) {
		struct memblock_region *reg;

		/* Reserve the sections we're already using. */
		for_each_memblock(reserved, reg) {
			unsigned long size = reg->size;

#ifdef CONFIG_HIGHMEM
			/* ...but not highmem */
			if (PFN_DOWN(reg->base) >= highstart_pfn)
				continue;

			if (PFN_UP(reg->base + size) > highstart_pfn)
				size = (highstart_pfn - PFN_DOWN(reg->base))
				       << PAGE_SHIFT;
#endif

			reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
		}
	}
Example #5
0
void __init mem_init(void)
{
	int codesize, datasize, initsize;
	int nid;

	num_physpages = 0;
	high_memory = NULL;

	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long node_pages = 0;
		void *node_high_memory;

		num_physpages += pgdat->node_present_pages;

		if (pgdat->node_spanned_pages)
			node_pages = free_all_bootmem_node(pgdat);

		totalram_pages += node_pages;

		node_high_memory = (void *)__va((pgdat->node_start_pfn +
						 pgdat->node_spanned_pages) <<
						 PAGE_SHIFT);
		if (node_high_memory > high_memory)
			high_memory = node_high_memory;
	}

	/* clear the zero-page */
	memset(empty_zero_page, 0, PAGE_SIZE);
	__flush_wback_region(empty_zero_page, PAGE_SIZE);

	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;

	kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		   VMALLOC_END - VMALLOC_START);

	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
	       "%dk data, %dk init)\n",
		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
		num_physpages << (PAGE_SHIFT-10),
		codesize >> 10,
		datasize >> 10,
		initsize >> 10);

	p3_cache_init();

	/* Initialize the vDSO */
	vsyscall_init();
}
Example #6
0
static void __init
contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
{
	unsigned long bootmap_size, bootmap;

	bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
	bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
	if (bootmap == -1L)
		panic("Cannot find bootmem map of size %ld\n",bootmap_size);
	bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
	e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
	reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
} 
void prio_sched_for_all_threads(struct sched_t* sched, void(*func)(thread_t*))
{
	int i;
	for (i = 0; i < LOWEST_PRIO; ++i)
	{
		node_t* node = QUEUE_HEAD(SCHED_QUEUE(sched)[i]);
		while (node != NULL)
		{
			func(NODE_DATA(node));
			node = NODE_NEXT(node);
		}
	}
}
Example #8
0
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
    unsigned long bootmap_pages;
    unsigned long start_pfn, end_pfn;
    unsigned long bootmem_paddr;

    /* Don't allow bogus node assignment */
    BUG_ON(nid > MAX_NUMNODES || nid <= 0);

    start_pfn = start >> PAGE_SHIFT;
    end_pfn = end >> PAGE_SHIFT;

    pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
                     PAGE_KERNEL);

    lmb_add(start, end - start);

    __add_active_range(nid, start_pfn, end_pfn);

    /* Node-local pgdat */
    NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
                                         SMP_CACHE_BYTES, end));
    memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

    NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
    NODE_DATA(nid)->node_start_pfn = start_pfn;
    NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

    /* Node-local bootmap */
    bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
    bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
                                   PAGE_SIZE, end);
    init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
                      start_pfn, end_pfn);

    free_bootmem_with_active_regions(nid, end_pfn);

    /* Reserve the pgdat and bootmap space with the bootmem allocator */
    reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
                         sizeof(struct pglist_data), BOOTMEM_DEFAULT);
    reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
                         bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

    /* It's up */
    node_set_online(nid);

    /* Kick sparsemem */
    sparse_memory_present_with_active_regions(nid);
}
// ARM10C 20131207
// min: 0x20000, max_low: 0x4f800
static void __init arm_bootmem_init(unsigned long start_pfn,
	unsigned long end_pfn)
{
	struct memblock_region *reg;
	unsigned int boot_pages;
	phys_addr_t bitmap;
	pg_data_t *pgdat;

	/*
	 * Allocate the bootmem bitmap page.  This must be in a region
	 * of memory which has already been mapped.
	 */
	// start_pfn: 0x20000, end_pfn: 0x4f800, end_pfn - start_pfn: 0x2f800
	// boot_pages: 0x6
	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);

	// boot_pages << PAGE_SHIFT: 0x6000, L1_CACHE_BYTES: 64
	// __pfn_to_phys(0x4f800); 0x4f800000
	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
				__pfn_to_phys(end_pfn));

	/*
	 * Initialise the bootmem allocator, handing the
	 * memory banks over to bootmem.
	 */
	node_set_online(0);

	// pglist_data.bdata 의 bootmem_node_data 주소로 설정
	pgdat = NODE_DATA(0);

	// pgdat: ?, __phys_to_pfn(bitmap): ?, start_pfn: 0x20000, end_pfn: 0x4f800
	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);

	/* Free the lowmem regions from memblock into bootmem. */
	for_each_memblock(memory, reg) {
		// start: 0x20000
		unsigned long start = memblock_region_memory_base_pfn(reg);
		// end: 0xA0000
		unsigned long end = memblock_region_memory_end_pfn(reg);

		// end: 0xA0000, end_pfn: 0x4f800
		if (end >= end_pfn)
			// end: 0x4f800
			end = end_pfn;
		// start: 0x20000, end: 0x4f800
		if (start >= end)
			break;

		// __pfn_to_phys(0x20000): 0x20000000, (end - start) << PAGE_SHIFT: 0x2f800000
		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
	}
Example #10
0
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	int nid;

	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));

	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

	/* Populate the relevant portions of swapper_pg_dir so that
	 * we can use the fixmap entries without calling kmalloc.
	 * pte's will be filled in by __set_fixmap(). */
	page_table_range_init(FIXADDR_START, FIXADDR_TOP, swapper_pg_dir);

	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

		start_pfn = pgdat->bdata->node_min_pfn;
		low = pgdat->bdata->node_low_pfn;

		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}

	free_area_init_nodes(max_zone_pfns);

#ifdef CONFIG_SUPERH32
	/* Set up the uncached fixmap */
	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));

#ifdef CONFIG_29BIT
	/*
	 * Handle trivial transitions between cached and uncached
	 * segments, making use of the 1:1 mapping relationship in
	 * 512MB lowmem.
	 */
	cached_to_uncached = P2SEG - P1SEG;
#endif
#endif
}
Example #11
0
unsigned long __init setup_memory(void)
{
	unsigned long bootmap_size;
	unsigned long min_pfn;
	int nid;
	mem_prof_t *mp;

	max_low_pfn = 0;
	min_low_pfn = -1;

	mem_prof_init();

	for_each_online_node(nid) {
		mp = &mem_prof[nid];
		NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
		NODE_DATA(nid)->bdata = &node_bdata[nid];
		min_pfn = mp->start_pfn;
		max_pfn = mp->start_pfn + mp->pages;
		bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
			mp->start_pfn, max_pfn);

		free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
			PFN_PHYS(mp->pages));

		reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
			PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size);

		if (max_low_pfn < max_pfn)
			max_low_pfn = max_pfn;

		if (min_low_pfn > min_pfn)
			min_low_pfn = min_pfn;
	}

#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
			reserve_bootmem_node(NODE_DATA(0), INITRD_START,
				INITRD_SIZE);
			initrd_start = INITRD_START ?
				INITRD_START + PAGE_OFFSET : 0;

			initrd_end = initrd_start + INITRD_SIZE;
			printk("initrd:start[%08lx],size[%08lx]\n",
				initrd_start, INITRD_SIZE);
		} else {
			printk("initrd extends beyond end of memory "
				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				INITRD_START + INITRD_SIZE,
				PFN_PHYS(max_low_pfn));

			initrd_start = 0;
		}
	}
#endif	/* CONFIG_BLK_DEV_INITRD */

	return max_low_pfn;
}
Example #12
0
void _list_menu(QSP_ARG_DECL  const Menu *mp)
{
	List *lp;
	Node *np;

	lp = MENU_LIST(mp);
	np = QLIST_HEAD(lp);
	while( np != NULL ){
		Command *cp;
		cp = (Command *) NODE_DATA(np);
		list_command(QSP_ARG  cp);
		np = NODE_NEXT(np);
	}
}
Example #13
0
// ARM10C 20140329
// pgdat: &contig_page_data
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
{
	// pgdat->node_id: (&contig_page_data)->node_id: 0
	// next_online_node((&contig_page_data)->node_id): 1
	int nid = next_online_node(pgdat->node_id);
	// nid: 1

	// MAX_NUMNODES: 1
	if (nid == MAX_NUMNODES)
		return NULL;
		// return NULL

	return NODE_DATA(nid);
}
Example #14
0
void _call_funcs_from_list(QSP_ARG_DECL  List *lp )
{
	Node *np;
	void (*func)(SINGLE_QSP_ARG_DECL);

	np=QLIST_HEAD(lp);
	assert( np != NULL );

	while( np != NULL ){
		func = (void (*)(SINGLE_QSP_ARG_DECL)) NODE_DATA(np);
		(*func)(SINGLE_QSP_ARG);
		np = NODE_NEXT(np);
	}
}
Example #15
0
static int __init alloc_node_page_cgroup(int nid)
{
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	unsigned long start_pfn, nr_pages, index;

	start_pfn = NODE_DATA(nid)->node_start_pfn;
	nr_pages = NODE_DATA(nid)->node_spanned_pages;

	table_size = sizeof(struct page_cgroup) * nr_pages;

	base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
	if (!base)
		return -ENOMEM;
	for (index = 0; index < nr_pages; index++) {
		pc = base + index;
		__init_page_cgroup(pc, start_pfn + index);
	}
	NODE_DATA(nid)->node_page_cgroup = base;
	total_usage += table_size;
	return 0;
}
Example #16
0
/*
 * On SH machines the conventional approach is to stash system RAM
 * in node 0, and other memory blocks in to node 1 and up, ordered by
 * latency. Each node's pgdat is node-local at the beginning of the node,
 * immediately followed by the node mem map.
 */
void __init setup_memory(void)
{
    unsigned long free_pfn = PFN_UP(__pa(_end));
    u64 base = min_low_pfn << PAGE_SHIFT;
    u64 size = (max_low_pfn << PAGE_SHIFT) - base;

    lmb_add(base, size);

    /* Reserve the LMB regions used by the kernel, initrd, etc.. */
    lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
                (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
                (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));

    /*
     * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
     */
    if (CONFIG_ZERO_PAGE_OFFSET != 0)
        lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);

    lmb_analyze();
    lmb_dump_all();

    /*
     * Node 0 sets up its pgdat at the first available pfn,
     * and bumps it up before setting up the bootmem allocator.
     */
    NODE_DATA(0) = pfn_to_kaddr(free_pfn);
    memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
    free_pfn += PFN_UP(sizeof(struct pglist_data));
    NODE_DATA(0)->bdata = &bootmem_node_data[0];

    /* Set up node 0 */
    setup_bootmem_allocator(free_pfn);

    /* Give the platforms a chance to hook up their nodes */
    plat_mem_setup();
}
Example #17
0
/* __alloc_bootmem...() is protected by !slab_available() */
int __init_refok init_section_page_cgroup(unsigned long pfn)
{
	struct mem_section *section;
	struct page_cgroup *base, *pc;
	unsigned long table_size;
	int nid, index;

	section = __pfn_to_section(pfn);

	if (!section->page_cgroup) {
		nid = page_to_nid(pfn_to_page(pfn));
		table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
		if (slab_is_available()) {
			base = kmalloc_node(table_size, GFP_KERNEL, nid);
			if (!base)
				base = vmalloc_node(table_size, nid);
		} else {
			base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
				table_size,
				PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
		}
	} else {
		/*
 		 * We don't have to allocate page_cgroup again, but
		 * address of memmap may be changed. So, we have to initialize
		 * again.
		 */
		base = section->page_cgroup + pfn;
		table_size = 0;
		/* check address of memmap is changed or not. */
		if (base->page == pfn_to_page(pfn))
			return 0;
	}

	if (!base) {
		printk(KERN_ERR "page cgroup allocation failure\n");
		return -ENOMEM;
	}

	for (index = 0; index < PAGES_PER_SECTION; index++) {
		pc = base + index;
		__init_page_cgroup(pc, pfn + index);
	}

	section = __pfn_to_section(pfn);
	section->page_cgroup = base - pfn;
	total_usage += table_size;
	return 0;
}
Example #18
0
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
	unsigned long bootmap_pages, bootmap_start, bootmap_size;
	unsigned long start_pfn, free_pfn, end_pfn;

	/* Don't allow bogus node assignment */
	BUG_ON(nid > MAX_NUMNODES || nid == 0);

	/*
	 * The free pfn starts at the beginning of the range, and is
	 * advanced as necessary for pgdat and node map allocations.
	 */
	free_pfn = start_pfn = start >> PAGE_SHIFT;
	end_pfn = end >> PAGE_SHIFT;

	__add_active_range(nid, start_pfn, end_pfn);

	/* Node-local pgdat */
	NODE_DATA(nid) = pfn_to_kaddr(free_pfn);
	free_pfn += PFN_UP(sizeof(struct pglist_data));
	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
	NODE_DATA(nid)->node_start_pfn = start_pfn;
	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

	/* Node-local bootmap */
	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bootmap_start = (unsigned long)pfn_to_kaddr(free_pfn);
	bootmap_size = init_bootmem_node(NODE_DATA(nid), free_pfn, start_pfn,
				    end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/* Reserve the pgdat and bootmap space with the bootmem allocator */
	reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
			     sizeof(struct pglist_data), BOOTMEM_DEFAULT);
	reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT,
			     bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

	/* It's up */
	node_set_online(nid);

	/* Kick sparsemem */
	sparse_memory_present_with_active_regions(nid);
}
Example #19
0
static int __init alloc_node_page_ext(int nid)
{
	struct page_ext *base;
	unsigned long table_size;
	unsigned long nr_pages;

/* IAMROOT-12AB:
 * -------------
 * page_ext 구조체용 메모리를 페이지 수 만큼 할당한다.
 * (정렬되어 있지 않은 경우 MAX_ORDER_NR_PAGES(2^(MAX_ORDER-1))만큼 추가 할당한다)
 * 노드 구조체의 node_page_ext는 이 page_ext를 가리킨다.
 */
	nr_pages = NODE_DATA(nid)->node_spanned_pages;
	if (!nr_pages)
		return 0;

	/*
	 * Need extra space if node range is not aligned with
	 * MAX_ORDER_NR_PAGES. When page allocator's buddy algorithm
	 * checks buddy's status, range could be out of exact node range.
	 */
	if (!IS_ALIGNED(node_start_pfn(nid), MAX_ORDER_NR_PAGES) ||
		!IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
		nr_pages += MAX_ORDER_NR_PAGES;

	table_size = sizeof(struct page_ext) * nr_pages;

	base = memblock_virt_alloc_try_nid_nopanic(
			table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
			BOOTMEM_ALLOC_ACCESSIBLE, nid);
	if (!base)
		return -ENOMEM;
	NODE_DATA(nid)->node_page_ext = base;
	total_usage += table_size;
	return 0;
}
static int link_mem_sections(int nid)
{
	unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
	unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
	unsigned long pfn;
	struct memory_block *mem_blk = NULL;
	int err = 0;

	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
		unsigned long section_nr = pfn_to_section_nr(pfn);
		struct mem_section *mem_sect;
		int ret;

		if (!present_section_nr(section_nr))
			continue;
		mem_sect = __nr_to_section(section_nr);

		/* same memblock ? */
		if (mem_blk)
			if ((section_nr >= mem_blk->start_section_nr) &&
			    (section_nr <= mem_blk->end_section_nr))
				continue;

		mem_blk = find_memory_block_hinted(mem_sect, mem_blk);

		ret = register_mem_sect_under_node(mem_blk, nid);
		if (!err)
			err = ret;

		/* discard ref obtained in find_memory_block() */
	}

	if (mem_blk)
		kobject_put(&mem_blk->dev.kobj);
	return err;
}
Example #21
0
int arch_add_memory(int nid, u64 start, u64 size)
{
	pg_data_t *pgdat;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	int ret;

	pgdat = NODE_DATA(nid);

	/* We only have ZONE_NORMAL, so this is easy.. */
	ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
	if (unlikely(ret))
		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);

	return ret;
}
Example #22
0
static struct mem_section *sparse_index_alloc(int nid)
{
	struct mem_section *section = NULL;
	unsigned long array_size = SECTIONS_PER_ROOT *
				   sizeof(struct mem_section);

	if (slab_is_available())
		section = kmalloc_node(array_size, GFP_KERNEL, nid);
	else
		section = alloc_bootmem_node(NODE_DATA(nid), array_size);

	if (section)
		memset(section, 0, array_size);

	return section;
}
Example #23
0
int arch_add_memory(int nid, u64 start, u64 size)
{
	struct pglist_data *pgdat;
	struct zone *zone;
	int rc;

	pgdat = NODE_DATA(nid);
	zone = pgdat->node_zones + ZONE_MOVABLE;
	rc = vmem_add_mapping(start, size);
	if (rc)
		return rc;
	rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
	if (rc)
		vmem_remove_mapping(start, size);
	return rc;
}
Example #24
0
static void _delete_subobjects(QSP_ARG_DECL  Data_Obj *dp)			/** delete all subimages */
{
	Node *np;

	/*
	 * delvec() will remove the parent's child list
	 * after deleting last child.
	 *
	 * List elements are de-allocated by delvec()
	 */

	while( OBJ_CHILDREN( dp ) != NULL ){
		np=QLIST_HEAD( OBJ_CHILDREN( dp ) );
		delvec( (Data_Obj *) NODE_DATA(np) );
	}
}
Example #25
0
int get_camera_names( QSP_ARG_DECL  Data_Obj *str_dp )
{
	// Could check format of object here...
	// Should be string table with enough entries to hold the modes
	// Should the strings be rows or multidim pixels?
	List *lp;
	Node *np;
	PGR_Cam *pgcp;
	int i, n;

	lp = pgc_list();
	if( lp == NULL ){
		WARN("No cameras!?");
		return 0;
	}

	n=eltcount(lp);
	if( OBJ_COLS(str_dp) < n ){
		sprintf(ERROR_STRING,"String object %s has too few columns (%ld) to hold %d camera names",
			OBJ_NAME(str_dp),(long)OBJ_COLS(str_dp),n);
		WARN(ERROR_STRING);
		n = OBJ_COLS(str_dp);
	}
		
	np=QLIST_HEAD(lp);
	i=0;
	while(np!=NULL){
		char *dst;
		pgcp = (PGR_Cam *) NODE_DATA(np);
		dst = OBJ_DATA_PTR(str_dp);
		dst += i * OBJ_PXL_INC(str_dp);
		if( strlen(pgcp->pc_name)+1 > OBJ_COMPS(str_dp) ){
			sprintf(ERROR_STRING,"String object %s has too few components (%ld) to hold camera name \"%s\"",
				OBJ_NAME(str_dp),(long)OBJ_COMPS(str_dp),pgcp->pc_name);
			WARN(ERROR_STRING);
		} else {
			strcpy(dst,pgcp->pc_name);
		}
		i++;
		if( i>=n )
			np=NULL;
		else
			np = NODE_NEXT(np);
	}

	return i;
}
Example #26
0
void ax8swap_show_mem(void)
{
	int free = 0, total = 0, reserved = 0;
	int shared = 0, cached = 0, slab = 0, node, i;
	struct meminfo * mi = ax8swap_meminfo;

	printk("Mem-info:\n");
	ax8swap_show_free_areas();
	for_each_online_node(node) {
		pg_data_t *n = NODE_DATA(node);
		struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;

		for_each_nodebank (i,mi,node) {
			struct membank *bank = &mi->bank[i];
			unsigned int pfn1, pfn2;
			struct page *page, *end;

			pfn1 = bank_pfn_start(bank);
			pfn2 = bank_pfn_end(bank);

			page = map + pfn1;
			end  = map + pfn2;

			do {
				total++;
				if (PageReserved(page))
					reserved++;
				else if (PageSwapCache(page))
					cached++;
				else if (PageSlab(page))
					slab++;
				else if (!page_count(page))
					free++;
				else
					shared += page_count(page) - 1;
				page++;
			} while (page < end);
		}
	}

	printk("%d pages of RAM\n", total);
	printk("%d free pages\n", free);
	printk("%d reserved pages\n", reserved);
	printk("%d slab pages\n", slab);
	printk("%d pages shared\n", shared);
	printk("%d pages swap cached\n", cached);
}
Example #27
0
/*
 * Called from map_io. We need to call to this early enough so that we
 * can reserve the fixed SDRAM regions before VM could get hold of them.
 */
void __init omapfb_reserve_sdram(void)
{
	struct bootmem_data	*bdata;
	unsigned long		sdram_start, sdram_size;
	unsigned long		reserved;
	int			i;

	if (config_invalid)
		return;

	bdata = NODE_DATA(0)->bdata;
	sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
	sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
	reserved = 0;
	for (i = 0; ; i++) {
		struct omapfb_mem_region	rg;

		if (get_fbmem_region(i, &rg) < 0)
			break;
		if (i == OMAPFB_PLANE_NUM) {
			printk(KERN_ERR
				"Extraneous FB mem configuration entries\n");
			config_invalid = 1;
			return;
		}
		/* Check if it's our memory type. */
		if (set_fbmem_region_type(&rg, OMAPFB_MEMTYPE_SDRAM,
				          sdram_start, sdram_size) < 0 ||
		    (rg.type != OMAPFB_MEMTYPE_SDRAM))
			continue;
		BUG_ON(omapfb_config.mem_desc.region[i].size);
		if (check_fbmem_region(i, &rg, sdram_start, sdram_size) < 0) {
			config_invalid = 1;
			return;
		}
		if (rg.paddr) {
			reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT);
			reserved += rg.size;
		}
		omapfb_config.mem_desc.region[i] = rg;
		configured_regions++;
	}
	omapfb_config.mem_desc.region_cnt = i;
	if (reserved)
		pr_info("Reserving %lu bytes SDRAM for frame buffer\n",
			 reserved);
}
Example #28
0
/*
 * paging_init() sets up the page tables
 */
void __init paging_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	unsigned long vaddr;
	int nid;

	/* We don't need to map the kernel through the TLB, as
	 * it is permanatly mapped using P1. So clear the
	 * entire pgd. */
	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));

	/* Set an initial value for the MMU.TTB so we don't have to
	 * check for a null value. */
	set_TTB(swapper_pg_dir);

	/*
	 * Populate the relevant portions of swapper_pg_dir so that
	 * we can use the fixmap entries without calling kmalloc.
	 * pte's will be filled in by __set_fixmap().
	 */
	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
	page_table_range_init(vaddr, 0, swapper_pg_dir);

	kmap_coherent_init();

	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));

	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
		unsigned long low, start_pfn;

		start_pfn = pgdat->bdata->node_min_pfn;
		low = pgdat->bdata->node_low_pfn;

		if (max_zone_pfns[ZONE_NORMAL] < low)
			max_zone_pfns[ZONE_NORMAL] = low;

		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
		       nid, start_pfn, low);
	}

	free_area_init_nodes(max_zone_pfns);

	/* Set up the uncached fixmap */
	set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
}
Example #29
0
static void __init dma32_free_bootmem(void)
{
    int node;

    if (end_pfn <= MAX_DMA32_PFN)
        return;

    if (!dma32_bootmem_ptr)
        return;

    for_each_online_node(node)
        free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
                  dma32_bootmem_size);

    dma32_bootmem_ptr = NULL;
    dma32_bootmem_size = 0;
}
Example #30
0
File: mem.c Project: 1x23/unifi-gpl
/*
 * This works only for the non-NUMA case.  Later, we'll need a lookup
 * to convert from real physical addresses to nid, that doesn't use
 * pfn_to_nid().
 */
int __devinit add_memory(u64 start, u64 size)
{
	struct pglist_data *pgdata = NODE_DATA(0);
	struct zone *zone;
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;

	start += KERNELBASE;
	create_section_mapping(start, start + size);

	/* this should work for most non-highmem platforms */
	zone = pgdata->node_zones;

	return __add_pages(zone, start_pfn, nr_pages);

	return 0;
}