Beispiel #1
0
/*
 * Reserve Xen mfn_list.
 */
static void __init xen_reserve_xen_mfnlist(void)
{
	phys_addr_t start, size;

	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
		start = __pa(xen_start_info->mfn_list);
		size = PFN_ALIGN(xen_start_info->nr_pages *
				 sizeof(unsigned long));
	} else {
		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
	}

	if (!xen_is_e820_reserved(start, size)) {
		memblock_reserve(start, size);
		return;
	}

#ifdef CONFIG_X86_32
	/*
	 * Relocating the p2m on 32 bit system to an arbitrary virtual address
	 * is not supported, so just give up.
	 */
	xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
	BUG();
#else
	xen_relocate_p2m();
#endif
}
Beispiel #2
0
void __init bootmem_init(void)
{
	/* Reserve all memory below PHYS_OFFSET, as memory
	 * accounting doesn't work for pages below that address.
	 *
	 * If PHYS_OFFSET is zero reserve page at address 0:
	 * successfull allocations should never return NULL.
	 */
	if (PHYS_OFFSET)
		memblock_reserve(0, PHYS_OFFSET);
	else
		memblock_reserve(0, 1);

	early_init_fdt_scan_reserved_mem();

	if (!memblock_phys_mem_size())
		panic("No memory found!\n");

	min_low_pfn = PFN_UP(memblock_start_of_DRAM());
	min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
	max_low_pfn = min(max_pfn, MAX_LOW_PFN);

	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));

	memblock_dump_all();
}
Beispiel #3
0
static void __init xen_add_extra_mem(unsigned long start_pfn,
				     unsigned long n_pfns)
{
	int i;

	/*
	 * No need to check for zero size, should happen rarely and will only
	 * write a new entry regarded to be unused due to zero size.
	 */
	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
		/* Add new region. */
		if (xen_extra_mem[i].n_pfns == 0) {
			xen_extra_mem[i].start_pfn = start_pfn;
			xen_extra_mem[i].n_pfns = n_pfns;
			break;
		}
		/* Append to existing region. */
		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
		    start_pfn) {
			xen_extra_mem[i].n_pfns += n_pfns;
			break;
		}
	}
	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
		printk(KERN_WARNING "Warning: not enough extra memory regions\n");

	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
Beispiel #4
0
phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
{
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
	/*
	 * This code originates from sparc which really wants use to walk by addresses
	 * and returns the nid. This is not very convenient for early_pfn_map[] users
	 * as the map isn't sorted yet, and it really wants to be walked by nid.
	 *
	 * For now, I implement the inefficient method below which walks the early
	 * map multiple times. Eventually we may want to use an ARCH config option
	 * to implement a completely different method for both case.
	 */
	unsigned long start_pfn, end_pfn;
	int i;

	for (i = 0; i < MAX_NUMNODES; i++) {
		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
			continue;
		*nid = i;
		return min(end, PFN_PHYS(end_pfn));
	}
#endif
	*nid = 0;

	return end;
}
Beispiel #5
0
static void __init xen_del_extra_mem(unsigned long start_pfn,
				     unsigned long n_pfns)
{
	int i;
	unsigned long start_r, size_r;

	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
		start_r = xen_extra_mem[i].start_pfn;
		size_r = xen_extra_mem[i].n_pfns;

		/* Start of region. */
		if (start_r == start_pfn) {
			BUG_ON(n_pfns > size_r);
			xen_extra_mem[i].start_pfn += n_pfns;
			xen_extra_mem[i].n_pfns -= n_pfns;
			break;
		}
		/* End of region. */
		if (start_r + size_r == start_pfn + n_pfns) {
			BUG_ON(n_pfns > size_r);
			xen_extra_mem[i].n_pfns -= n_pfns;
			break;
		}
		/* Mid of region. */
		if (start_pfn > start_r && start_pfn < start_r + size_r) {
			BUG_ON(start_pfn + n_pfns > start_r + size_r);
			xen_extra_mem[i].n_pfns = start_pfn - start_r;
			/* Calling memblock_reserve() again is okay. */
			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
					  (start_pfn + n_pfns));
			break;
		}
	}
	memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
Beispiel #6
0
	for_each_memblock(memory, reg) {
		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);

		memblock_set_node(PFN_PHYS(start_pfn),
				  PFN_PHYS(end_pfn - start_pfn),
				  &memblock.memory, 0);
	}
Beispiel #7
0
static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
						 phys_addr_t end, int *nid)
{
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
	unsigned long start_pfn, end_pfn;
	int i;

	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
		if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn))
			return max(start, PFN_PHYS(start_pfn));
#endif
	*nid = 0;
	return start;
}
Beispiel #8
0
void __init crashlog_init_mem(bootmem_data_t *bdata)
{
	unsigned long addr;

	if (crashlog_addr)
		return;

	addr = PFN_PHYS(bdata->node_low_pfn) - CRASHLOG_OFFSET;
	if (reserve_bootmem(addr, CRASHLOG_SIZE, BOOTMEM_EXCLUSIVE) < 0) {
		printk("Crashlog failed to allocate RAM at address 0x%lx\n", addr);
		bdata->node_low_pfn -= CRASHLOG_PAGES;
		addr = PFN_PHYS(bdata->node_low_pfn);
	}
	crashlog_addr = addr;
}
Beispiel #9
0
/*
 * On Meta machines the conventional approach is to stash system RAM
 * in node 0, and other memory blocks in to node 1 and up, ordered by
 * latency. Each node's pgdat is node-local at the beginning of the node,
 * immediately followed by the node mem map.
 */
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
	unsigned long bootmap_pages, bootmem_paddr;
	unsigned long start_pfn, end_pfn;
	unsigned long pgdat_paddr;

	/* Don't allow bogus node assignment */
	BUG_ON(nid > MAX_NUMNODES || nid <= 0);

	start_pfn = start >> PAGE_SHIFT;
	end_pfn = end >> PAGE_SHIFT;

	memblock_add(start, end - start);

	memblock_set_node(PFN_PHYS(start_pfn),
			  PFN_PHYS(end_pfn - start_pfn), nid);

	/* Node-local pgdat */
	pgdat_paddr = memblock_alloc_base(sizeof(struct pglist_data),
					  SMP_CACHE_BYTES, end);
	NODE_DATA(nid) = __va(pgdat_paddr);
	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));

	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
	NODE_DATA(nid)->node_start_pfn = start_pfn;
	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;

	/* Node-local bootmap */
	bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
	bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
					    PAGE_SIZE, end);
	init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
			  start_pfn, end_pfn);

	free_bootmem_with_active_regions(nid, end_pfn);

	/* Reserve the pgdat and bootmap space with the bootmem allocator */
	reserve_bootmem_node(NODE_DATA(nid), pgdat_paddr & PAGE_MASK,
			     sizeof(struct pglist_data), BOOTMEM_DEFAULT);
	reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
			     bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);

	/* It's up */
	node_set_online(nid);

	/* Kick sparsemem */
	sparse_memory_present_with_active_regions(nid);
}
Beispiel #10
0
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
				 dma_addr_t *dma_handle, gfp_t gfp,
				 unsigned long attrs)
{
	void *ret, *ret_nocache;
	int order = get_order(size);

	gfp |= __GFP_ZERO;

	ret = (void *)__get_free_pages(gfp, order);
	if (!ret)
		return NULL;

	/*
	 * Pages from the page allocator may have data present in
	 * cache. So flush the cache before using uncached memory.
	 */
	sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL);

	ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
	if (!ret_nocache) {
		free_pages((unsigned long)ret, order);
		return NULL;
	}

	split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);

	*dma_handle = virt_to_phys(ret);
	if (!WARN_ON(!dev))
		*dma_handle -= PFN_PHYS(dev->dma_pfn_offset);

	return ret_nocache;
}
Beispiel #11
0
/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{
	int level;
	pte_t *ptep;
	pte_t pte;
	unsigned long pfn;
	struct page *page;

	ptep = lookup_address((unsigned long)v, &level);
	BUG_ON(ptep == NULL);

	pfn = pte_pfn(*ptep);
	page = pfn_to_page(pfn);

	pte = pfn_pte(pfn, prot);

	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
		BUG();

	if (!PageHighMem(page)) {
		void *av = __va(PFN_PHYS(pfn));

		if (av != v)
			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
				BUG();
	} else
		kmap_flush_unused();
}
Beispiel #12
0
static void xen_load_gdt(const struct desc_ptr *dtr)
{
	unsigned long va = dtr->address;
	unsigned int size = dtr->size + 1;
	unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
	unsigned long frames[pages];
	int f;

	/* A GDT can be up to 64k in size, which corresponds to 8192
	   8-byte entries, or 16 4k pages.. */

	BUG_ON(size > 65536);
	BUG_ON(va & ~PAGE_MASK);

	for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
		int level;
		pte_t *ptep = lookup_address(va, &level);
		unsigned long pfn, mfn;
		void *virt;

		BUG_ON(ptep == NULL);

		pfn = pte_pfn(*ptep);
		mfn = pfn_to_mfn(pfn);
		virt = __va(PFN_PHYS(pfn));

		frames[f] = mfn;

		make_lowmem_page_readonly((void *)va);
		make_lowmem_page_readonly(virt);
	}

	if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
		BUG();
}
Beispiel #13
0
/**
 *
 * Create a new NUMA distance table.
 *
 */
static int __init numa_alloc_distance(void)
{
	size_t size;
	u64 phys;
	int i, j;

	size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
				      size, PAGE_SIZE);
	if (WARN_ON(!phys))
		return -ENOMEM;

	memblock_reserve(phys, size);

	numa_distance = __va(phys);
	numa_distance_cnt = nr_node_ids;

	/* fill with the default distances */
	for (i = 0; i < numa_distance_cnt; i++)
		for (j = 0; j < numa_distance_cnt; j++)
			numa_distance[i * numa_distance_cnt + j] = i == j ?
				LOCAL_DISTANCE : REMOTE_DISTANCE;

	pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);

	return 0;
}
Beispiel #14
0
static void __init setup_initrd(void)
{
	unsigned long size;

	if (initrd_start >= initrd_end) {
		pr_info("initrd not found or empty");
		goto disable;
	}
	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
		pr_err("initrd extends beyond end of memory");
		goto disable;
	}

	size = initrd_end - initrd_start;
	memblock_reserve(__pa(initrd_start), size);
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
		(void *)(initrd_start), size);
	return;
disable:
	pr_cont(" - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
Beispiel #15
0
char * __init xen_memory_setup(void)
{
	unsigned long max_pfn = xen_start_info->nr_pages;

	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);

	e820.nr_map = 0;

	e820_add_region(0, PFN_PHYS((u64)max_pfn), E820_RAM);

	/*
	 * Even though this is normal, usable memory under Xen, reserve
	 * ISA memory anyway because too many things think they can poke
	 * about in there.
	 */
	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
			E820_RESERVED);

	/*
	 * Reserve Xen bits:
	 *  - mfn_list
	 *  - xen_start_info
	 * See comment above "struct start_info" in <xen/interface/xen.h>
	 */
	memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
		      __pa(xen_start_info->pt_base),
			"XEN START INFO");

	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);

	xen_return_unused_memory(xen_start_info->nr_pages, &e820);

	return "Xen";
}
Beispiel #16
0
/* Create kernel-VA-space MMIO mapping for an on-chip IO device. */
void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
			    unsigned long size)
{
	pgprot_t mmio_base, prot = { 0 };
	unsigned long pfn;
	int err;

	/* Look up the shim's lotar and base PA. */
	err = __iorpc_get_mmio_base(hv_fd, &mmio_base);
	if (err) {
		TRACE("get_mmio_base() failure: %d\n", err);
		return NULL;
	}

	/* Make sure the HV driver approves of our offset and size. */
	err = __iorpc_check_mmio_offset(hv_fd, offset, size);
	if (err) {
		TRACE("check_mmio_offset() failure: %d\n", err);
		return NULL;
	}

	/*
	 * mmio_base contains a base pfn and homing coordinates.  Turn
	 * it into an MMIO pgprot and offset pfn.
	 */
	prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base));
	pfn = pte_pfn(mmio_base) + PFN_DOWN(offset);

	return ioremap_prot(PFN_PHYS(pfn), size, prot);
}
Beispiel #17
0
void init_mm(void)
{
    unsigned long start_pfn, max_pfn;

    printk("go_mm: initializing\n");

    arch_init_mm(&start_pfn, &max_pfn);

    printk("go_mm: initializing best fit page allocator for %lx-%lx\n",
        (unsigned long)pfn_to_virt(start_pfn),
        (unsigned long)pfn_to_virt(max_pfn));
    init_page_allocator(PFN_PHYS(start_pfn), PFN_PHYS(max_pfn));
    printk("go_mm: done\n");

    arch_init_p2m(max_pfn);
    arch_init_demand_mapping_area(max_pfn);
}
Beispiel #18
0
static int vpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_flags |= VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	if(PFN_PHYS(vma->vm_pgoff) < 0x13200000 || PFN_PHYS(vma->vm_pgoff) >= 0x13300000){
		if(PFN_PHYS(vma->vm_pgoff) != 0x10000000) {
			printk("phy addr err ,range is 0x13200000 - 13300000");
			return -EAGAIN;
		}
	}
	if (io_remap_pfn_range(vma,vma->vm_start,
			       vma->vm_pgoff,
			       vma->vm_end - vma->vm_start,
			       vma->vm_page_prot))
		return -EAGAIN;

	return 0;
}
Beispiel #19
0
char * __init xen_memory_setup(void)
{
	unsigned long max_pfn = xen_start_info->nr_pages;

	e820.nr_map = 0;
	add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);

	return "Xen";
}
Beispiel #20
0
/*
 * Set the page permissions for a particular virtual address.  If the
 * address is a vmalloc mapping (or other non-linear mapping), then
 * find the linear mapping of the page and also set its protections to
 * match.
 */
static void set_aliased_prot(void *v, pgprot_t prot)
{
	int level;
	pte_t *ptep;
	pte_t pte;
	unsigned long pfn;
	struct page *page;
	unsigned char dummy;

	ptep = lookup_address((unsigned long)v, &level);
	BUG_ON(ptep == NULL);

	pfn = pte_pfn(*ptep);
	page = pfn_to_page(pfn);

	pte = pfn_pte(pfn, prot);

	/*
	 * Careful: update_va_mapping() will fail if the virtual address
	 * we're poking isn't populated in the page tables.  We don't
	 * need to worry about the direct map (that's always in the page
	 * tables), but we need to be careful about vmap space.  In
	 * particular, the top level page table can lazily propagate
	 * entries between processes, so if we've switched mms since we
	 * vmapped the target in the first place, we might not have the
	 * top-level page table entry populated.
	 *
	 * We disable preemption because we want the same mm active when
	 * we probe the target and when we issue the hypercall.  We'll
	 * have the same nominal mm, but if we're a kernel thread, lazy
	 * mm dropping could change our pgd.
	 *
	 * Out of an abundance of caution, this uses __get_user() to fault
	 * in the target address just in case there's some obscure case
	 * in which the target address isn't readable.
	 */

	preempt_disable();

	pagefault_disable();	/* Avoid warnings due to being atomic. */
	__get_user(dummy, (unsigned char __user __force *)v);
	pagefault_enable();

	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
		BUG();

	if (!PageHighMem(page)) {
		void *av = __va(PFN_PHYS(pfn));

		if (av != v)
			if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
				BUG();
	} else
		kmap_flush_unused();

	preempt_enable();
}
Beispiel #21
0
static u64 __init mem_hole_size(u64 start, u64 end)
{
	unsigned long start_pfn = PFN_UP(start);
	unsigned long end_pfn = PFN_DOWN(end);

	if (start_pfn < end_pfn)
		return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
	return 0;
}
Beispiel #22
0
static void __init bootmem_init(void)
{
	unsigned long start_pfn, bootmap_size;
	unsigned long size = initrd_end - initrd_start;

	start_pfn = PFN_UP(__pa(&_end));

	min_low_pfn = PFN_UP(MEMORY_START);
	max_low_pfn = PFN_UP(MEMORY_START + MEMORY_SIZE);

	/* Initialize the boot-time allocator with low memory only. */
	bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
					 min_low_pfn, max_low_pfn);
	add_active_range(0, min_low_pfn, max_low_pfn);

	free_bootmem(PFN_PHYS(start_pfn),
		     (max_low_pfn - start_pfn) << PAGE_SHIFT);
	memory_present(0, start_pfn, max_low_pfn);

	/* Reserve space for the bootmem bitmap. */
	reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);

	if (size == 0) {
		printk(KERN_INFO "Initrd not found or empty");
		goto disable;
	}

	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
		printk(KERN_ERR "Initrd extends beyond end of memory");
		goto disable;
	}

	/* Reserve space for the initrd bitmap. */
	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
	initrd_below_start_ok = 1;

	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
		 initrd_start, size);
	return;
disable:
	printk(KERN_CONT " - disabling initrd\n");
	initrd_start = 0;
	initrd_end = 0;
}
Beispiel #23
0
void __init bootmem_init(void)
{
	struct memblock_region *reg;
	unsigned long bootmap_size;
	unsigned long free_pfn, end_pfn, start_pfn;

	init_mm.start_code = (unsigned long)_stext;
	init_mm.end_code = (unsigned long)_etext;
	init_mm.end_data = (unsigned long)_edata;
	init_mm.brk = (unsigned long)_end;

	memblock_init();
	memblock_add(memory_start, memory_end);

	if(((unsigned long)__pa(_end) < memory_start) || ((unsigned long)__pa(_end) > memory_end))
		printk("BUG: your kernel is not located in the ddr sdram");

	start_pfn = PFN_UP(memory_start);
	free_pfn = PFN_UP(__pa((unsigned long)_end));
	end_pfn = PFN_DOWN(memory_end);

	//memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(free_pfn - start_pfn));
	memblock_reserve(__pa(_stext), _end - _stext);

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && initrd_end && initrd_start < initrd_end) {
		memblock_reserve(initrd_start, initrd_end - initrd_start);
	}
#endif

	bootmap_size = init_bootmem(free_pfn, end_pfn);
	memblock_reserve(PFN_PHYS(free_pfn), bootmap_size);

	free_bootmem(PFN_PHYS(free_pfn), PFN_PHYS(end_pfn - free_pfn));

	for_each_memblock(reserved, reg)
		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);

	memory_start += PAGE_OFFSET;
	memory_end += PAGE_OFFSET;

	memblock_analyze();
	memblock_dump_all();
}
Beispiel #24
0
/*
 * Register fully available low RAM pages with the bootmem allocator.
 */
static void __init register_bootmem_low_pages(void)
{
	unsigned long curr_pfn, last_pfn, pages;

	/*
	 * We are rounding up the start address of usable memory:
	 */
	curr_pfn = PFN_UP(__MEMORY_START);

	/*
	 * ... and at the end of the usable range downwards:
	 */
	last_pfn = PFN_DOWN(__pa(memory_end));

	if (last_pfn > max_low_pfn)
		last_pfn = max_low_pfn;

	pages = last_pfn - curr_pfn;
	free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
}
Beispiel #25
0
/* Flush a page out of whatever cache(s) it is in. */
void homecache_flush_cache(struct page *page, int order)
{
	int pages = 1 << order;
	int length = cache_flush_length(pages * PAGE_SIZE);
	unsigned long pfn = page_to_pfn(page);
	struct cpumask home_mask;

	homecache_mask(page, pages, &home_mask);
	flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
	sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
}
Beispiel #26
0
void __init mem_init(void)
{
#ifdef CONFIG_FLATMEM
	BUG_ON(!mem_map);
#endif /* CONFIG_FLATMEM */

	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
	memblock_free_all();

	mem_init_print_info(NULL);
}
Beispiel #27
0
static unsigned long __init setup_memory(void)
{
	unsigned long bootmap_size, start_pfn, max_low_pfn;


#define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PFN_DOWN(x)     ((x) >> PAGE_SHIFT)
#define PFN_PHYS(x)     ((x) << PAGE_SHIFT)

#ifndef CONFIG_FB_OC_SHMEM_SIZE
#define CONFIG_FB_OC_SHMEM_SIZE 0
#endif /* CONFIG_FB_OC_SHMEM_SIZE */


	/* min_low_pfn points to the start of DRAM, start_pfn points
	 * to the first DRAM pages after the kernel, and max_low_pfn
	 * to the end of DRAM. Partial pages are not useful, so round it 
	 * down.
	 */ 
	start_pfn   = PFN_UP(__pa(&_end));
	max_low_pfn = PFN_DOWN(CONFIG_OR32_MEMORY_SIZE
			       -CONFIG_FB_OC_SHMEM_SIZE
			       -CONFIG_OR32_RESERVED_MEM_SIZE);
	min_low_pfn = PAGE_OFFSET >> PAGE_SHIFT;

#undef CONFIG_FB_OC_SHMEM_SIZE

	/*
	 * set the beginning of frame buffer
	 */
	fb_mem_start = PFN_PHYS(max_low_pfn);

	/* 
	 * initialize the boot-time allocator (with low memory only)
	 */ 
	bootmap_size = init_bootmem(start_pfn, max_low_pfn);
	free_bootmem(PFN_PHYS(start_pfn), PFN_PHYS(max_low_pfn - start_pfn));
	reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size);

	return(max_low_pfn);
}
Beispiel #28
0
static void __init zone_sizes_init(void)
{
	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };

#ifdef CONFIG_ZONE_DMA32
	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
			(unsigned long) PFN_PHYS(max_low_pfn)));
#endif
	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

	free_area_init_nodes(max_zone_pfns);
}
Beispiel #29
0
unsigned long __init setup_memory(void)
{
	unsigned long bootmap_size;
	unsigned long min_pfn;
	int nid;
	mem_prof_t *mp;

	max_low_pfn = 0;
	min_low_pfn = -1;

	mem_prof_init();

	for_each_online_node(nid) {
		mp = &mem_prof[nid];
		NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
		NODE_DATA(nid)->bdata = &node_bdata[nid];
		min_pfn = mp->start_pfn;
		max_pfn = mp->start_pfn + mp->pages;
		bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
			mp->start_pfn, max_pfn);

		free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
			PFN_PHYS(mp->pages));

		reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
			PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size);

		if (max_low_pfn < max_pfn)
			max_low_pfn = max_pfn;

		if (min_low_pfn > min_pfn)
			min_low_pfn = min_pfn;
	}

#ifdef CONFIG_BLK_DEV_INITRD
	if (LOADER_TYPE && INITRD_START) {
		if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
			reserve_bootmem_node(NODE_DATA(0), INITRD_START,
				INITRD_SIZE);
			initrd_start = INITRD_START ?
				INITRD_START + PAGE_OFFSET : 0;

			initrd_end = initrd_start + INITRD_SIZE;
			printk("initrd:start[%08lx],size[%08lx]\n",
				initrd_start, INITRD_SIZE);
		} else {
			printk("initrd extends beyond end of memory "
				"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
				INITRD_START + INITRD_SIZE,
				PFN_PHYS(max_low_pfn));

			initrd_start = 0;
		}
	}
#endif	/* CONFIG_BLK_DEV_INITRD */

	return max_low_pfn;
}
Beispiel #30
0
/**
 * dma_contiguous_isolate() - isolate contiguous memory from the page allocator
 * @dev: Pointer to device which owns the contiguous memory
 *
 * This function isolates contiguous memory from the page allocator. If some of
 * the contiguous memory is allocated, it is reclaimed.
 */
int dma_contiguous_isolate(struct device *dev)
{
	struct cma *cma = dev_get_cma_area(dev);
	int ret;
	int idx;

	if (!cma)
		return -ENODEV;

	if (cma->count == 0)
		return 0;

	mutex_lock(&cma_mutex);

	if (cma->isolated) {
		mutex_unlock(&cma_mutex);
		dev_err(dev, "Alread isolated!\n");
		return 0;
	}

	idx = find_first_zero_bit(cma->bitmap, cma->count);
	while (idx < cma->count) {
		int idx_set;

		idx_set = find_next_bit(cma->bitmap, cma->count, idx);
		do {
			ret = alloc_contig_range(cma->base_pfn + idx,
						cma->base_pfn + idx_set,
						MIGRATE_CMA);
		} while (ret == -EBUSY);

		if (ret < 0) {
			mutex_unlock(&cma_mutex);
			dma_contiguous_deisolate_until(dev, idx_set);
			dev_err(dev, "Failed to isolate %#lx@%#010llx (%d).\n",
				(idx_set - idx) * PAGE_SIZE,
				PFN_PHYS(cma->base_pfn + idx), ret);
			return ret;
		}

		idx = find_next_zero_bit(cma->bitmap, cma->count, idx_set);
	}

	cma->isolated = true;

	mutex_unlock(&cma_mutex);

	return 0;
}