コード例 #1
0
static void __init lge_add_persist_ram_devices(void)
{
	int ret;
	phys_addr_t base;
	phys_addr_t size;

	size = lge_ramoops_data.mem_size;

	/* find a 1M section from highmem */
	base = memblock_find_in_range(memblock.current_limit,
			MEMBLOCK_ALLOC_ANYWHERE, size, SECTION_SIZE);
	if (!base) {
		/* find a 1M section from lowmem */
		base = memblock_find_in_range(0,
				MEMBLOCK_ALLOC_ACCESSIBLE,
				size, SECTION_SIZE);
		if (!base) {
			pr_err("%s: not enough membank\n", __func__);
			return;
		}
	}

	pr_info("ramoops: reserved 1 MiB at 0x%08x\n", (int)base);

	lge_ramoops_data.mem_address = base;
	ret = memblock_reserve(lge_ramoops_data.mem_address,
			lge_ramoops_data.mem_size);

	if (ret)
		pr_err("%s: failed to initialize persistent ram\n", __func__);
}
コード例 #2
0
/**
 * init_alloc_remap - Initialize remap allocator for a NUMA node
 * @nid: NUMA node to initizlie remap allocator for
 *
 * NUMA nodes may end up without any lowmem.  As allocating pgdat and
 * memmap on a different node with lowmem is inefficient, a special
 * remap allocator is implemented which can be used by alloc_remap().
 *
 * For each node, the amount of memory which will be necessary for
 * pgdat and memmap is calculated and two memory areas of the size are
 * allocated - one in the node and the other in lowmem; then, the area
 * in the node is remapped to the lowmem area.
 *
 * As pgdat and memmap must be allocated in lowmem anyway, this
 * doesn't waste lowmem address space; however, the actual lowmem
 * which gets remapped over is wasted.  The amount shouldn't be
 * problematic on machines this feature will be used.
 *
 * Initialization failure isn't fatal.  alloc_remap() is used
 * opportunistically and the callers will fall back to other memory
 * allocation mechanisms on failure.
 */
void __init init_alloc_remap(int nid, u64 start, u64 end)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long end_pfn = end >> PAGE_SHIFT;
	unsigned long size, pfn;
	u64 node_pa, remap_pa;
	void *remap_va;

	/*
	 * The acpi/srat node info can show hot-add memroy zones where
	 * memory could be added but not currently present.
	 */
	printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n",
	       nid, start_pfn, end_pfn);

	/* calculate the necessary space aligned to large page size */
	size = node_memmap_size_bytes(nid, start_pfn, end_pfn);
	size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
	size = ALIGN(size, LARGE_PAGE_BYTES);

	/* allocate node memory and the lowmem remap area */
	node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
	if (!node_pa) {
		pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
			   size, nid);
		return;
	}
	memblock_reserve(node_pa, size);

	remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
					  max_low_pfn << PAGE_SHIFT,
					  size, LARGE_PAGE_BYTES);
	if (!remap_pa) {
		pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
			   size, nid);
		memblock_free(node_pa, size);
		return;
	}
	memblock_reserve(remap_pa, size);
	remap_va = phys_to_virt(remap_pa);

	/* perform actual remap */
	for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE)
		set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT),
			    (node_pa >> PAGE_SHIFT) + pfn,
			    PAGE_KERNEL_LARGE);

	/* initialize remap allocator parameters */
	node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
	node_remap_start_vaddr[nid] = remap_va;
	node_remap_end_vaddr[nid] = remap_va + size;
	node_remap_alloc_vaddr[nid] = remap_va;

	printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n",
	       nid, node_pa, node_pa + size, remap_va, remap_va + size);
}
コード例 #3
0
ファイル: init.c プロジェクト: 59psi/linux
/*
 * Pages returned are already directly mapped.
 *
 * Changing that is likely to break Xen, see commit:
 *
 *    279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
 *
 * for detailed information.
 */
__ref void *alloc_low_pages(unsigned int num)
{
	unsigned long pfn;
	int i;

	if (after_bootmem) {
		unsigned int order;

		order = get_order((unsigned long)num << PAGE_SHIFT);
		return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
						__GFP_ZERO, order);
	}

	if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
		unsigned long ret;
		if (min_pfn_mapped >= max_pfn_mapped)
			panic("alloc_low_page: ran out of memory");
		ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
					max_pfn_mapped << PAGE_SHIFT,
					PAGE_SIZE * num , PAGE_SIZE);
		if (!ret)
			panic("alloc_low_page: can not alloc memory");
		memblock_reserve(ret, PAGE_SIZE * num);
		pfn = ret >> PAGE_SHIFT;
	} else {
コード例 #4
0
ファイル: page_alloc.c プロジェクト: harvey-che/Xc
u64 find_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit)
{
    int i;
	for_each_active_range_index_in_nid_reverse(i, nid) {
        u64 addr;
		u64 ei_start, ei_last;
		u64 final_start, final_end;
		
		ei_last = early_node_map[i].end_pfn;
		ei_last <<= PAGE_SHIFT;
		ei_start = early_node_map[i].start_pfn;
		ei_start <<= PAGE_SHIFT;

		final_start = max(ei_start, goal);
		final_end = min(ei_last, limit);

		if (final_start >= final_end)
			continue;

		addr = memblock_find_in_range(final_start, final_end, size, align);

		if (addr == MEMBLOCK_ERROR)
			continue;

		return addr;
	}
コード例 #5
0
ファイル: numa.c プロジェクト: AlexShiLucky/linux
/**
 *
 * Create a new NUMA distance table.
 *
 */
static int __init numa_alloc_distance(void)
{
	size_t size;
	u64 phys;
	int i, j;

	size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn),
				      size, PAGE_SIZE);
	if (WARN_ON(!phys))
		return -ENOMEM;

	memblock_reserve(phys, size);

	numa_distance = __va(phys);
	numa_distance_cnt = nr_node_ids;

	/* fill with the default distances */
	for (i = 0; i < numa_distance_cnt; i++)
		for (j = 0; j < numa_distance_cnt; j++)
			numa_distance[i * numa_distance_cnt + j] = i == j ?
				LOCAL_DISTANCE : REMOTE_DISTANCE;

	pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);

	return 0;
}
コード例 #6
0
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
	phys_addr_t found;

	/* We align the size to limit fragmentation. Without this, a lot of
	 * small allocs quickly eat up the whole reserve array on sparc
	 */
	size = round_up(size, align);

	found = memblock_find_in_range(0, max_addr, size, align);
	if (found && !memblock_reserve(found, size))
		return found;

	return 0;
}
コード例 #7
0
ファイル: init.c プロジェクト: FireBurn/linux
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_base, crash_size;
	int ret;

	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
				&crash_size, &crash_base);
	/* no crashkernel= or invalid value specified */
	if (ret || !crash_size)
		return;

	crash_size = PAGE_ALIGN(crash_size);

	if (crash_base == 0) {
		/* Current arm64 boot protocol requires 2MB alignment */
		crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
				crash_size, SZ_2M);
		if (crash_base == 0) {
			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. */
		if (!memblock_is_region_memory(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region is not memory\n");
			return;
		}

		if (memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
			return;
		}

		if (!IS_ALIGNED(crash_base, SZ_2M)) {
			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
			return;
		}
	}
	memblock_reserve(crash_base, crash_size);

	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
		crash_base, crash_base + crash_size, crash_size >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
コード例 #8
0
static int __init numa_alloc_distance(void)
{
	nodemask_t nodes_parsed;
	size_t size;
	int i, j, cnt = 0;
	u64 phys;

	/* size the new table and allocate it */
	/* numa_nodes_parsed에는 이미 분석이 끝난 node 정보들이 들어 있음 */
	nodes_parsed = numa_nodes_parsed;
	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);

	/* 마지막 node id가 cnt로 됨 */
	for_each_node_mask(i, nodes_parsed)
		cnt = i;
	cnt++;
	size = cnt * cnt * sizeof(numa_distance[0]);

	/* size크기 만큼 할당가능한 메모리 주소를 얻어옮 */
	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
				      size, PAGE_SIZE);
	if (!phys) {
		pr_warning("NUMA: Warning: can't allocate distance table!\n");
		/* don't retry until explicitly reset */
		numa_distance = (void *)1LU;
		return -ENOMEM;
	}
  /* size 크기 만큼 등록 */
	memblock_reserve(phys, size);

	/* numa_distance 재설정 */
	numa_distance = __va(phys);
	numa_distance_cnt = cnt;

	/* fill with the default distances */
	/* 기본 distance 설정.
	같은 node일경우 Local Distance(10)를,
	아닌 경우 RemoteDistance(20) 저장 */
	for (i = 0; i < cnt; i++)
		for (j = 0; j < cnt; j++)
			numa_distance[i * cnt + j] = i == j ?
				LOCAL_DISTANCE : REMOTE_DISTANCE;
	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);

	return 0;
}
コード例 #9
0
ファイル: init.c プロジェクト: ParrotSec/linux-psec
void __init reserve_real_mode(void)
{
	phys_addr_t mem;
	unsigned char *base;
	size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);

	/* Has to be under 1M so we can execute real-mode AP code. */
	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
	if (!mem)
		panic("Cannot allocate trampoline\n");

	base = __va(mem);
	memblock_reserve(mem, size);
	real_mode_header = (struct real_mode_header *) base;
	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
	       base, (unsigned long long)mem, size);
}
コード例 #10
0
void s5p_reserve_mem(size_t boundary)
{
	struct s5p_media_device *mdev;
	u64 start, end;
	int i, ret;

	for (i = 0; i < meminfo.nr_banks; i++)
		media_base[i] = meminfo.bank[i].start + meminfo.bank[i].size;

	for (i = 0; i < nr_media_devs; i++) {
		mdev = &media_devs[i];
		if (mdev->memsize <= 0)
			continue;

		if (mdev->bank > meminfo.nr_banks) {
			pr_err("mdev %s: mdev->bank(%d), max_bank(%d)\n",
				mdev->name, mdev->bank, meminfo.nr_banks);
			return;
		}

		if (!mdev->paddr) {
			start = meminfo.bank[mdev->bank].start;
			end = start + meminfo.bank[mdev->bank].size;

			if (boundary && (boundary < end - start))
				start = end - boundary;

			mdev->paddr = memblock_find_in_range(start, end,
						mdev->memsize, PAGE_SIZE);
		}

		ret = memblock_reserve(mdev->paddr, mdev->memsize);
		if (ret < 0)
			pr_err("memblock_reserve(%x, %x) failed\n",
				mdev->paddr, mdev->memsize);

		if (media_base[mdev->bank] > mdev->paddr)
			media_base[mdev->bank] = mdev->paddr;

		printk(KERN_INFO "s5p: %lu bytes system memory reserved "
			"for %s at 0x%08x, %d-bank base(0x%08x)\n",
			(unsigned long) mdev->memsize, mdev->name, mdev->paddr,
			mdev->bank, media_base[mdev->bank]);
	}
}
コード例 #11
0
void __init setup_trampolines(void)
{
	phys_addr_t mem;
	size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);

	/*                                                                   */
	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
	if (!mem)
		panic("Cannot allocate trampoline\n");

	x86_trampoline_base = __va(mem);
	memblock_reserve(mem, size);

	printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
	       x86_trampoline_base, (unsigned long long)mem, size);

	memcpy(x86_trampoline_base, x86_trampoline_start, size);
}
コード例 #12
0
ファイル: sleep.c プロジェクト: Adjustxx/Savaged-Zen
/**
 * acpi_reserve_wakeup_memory - do _very_ early ACPI initialisation
 *
 * We allocate a page from the first 1MB of memory for the wakeup
 * routine for when we come back from a sleep state. The
 * runtime allocator allows specification of <16MB pages, but not
 * <1MB pages.
 */
void __init acpi_reserve_wakeup_memory(void)
{
	phys_addr_t mem;

	if ((&wakeup_code_end - &wakeup_code_start) > WAKEUP_SIZE) {
		printk(KERN_ERR
		       "ACPI: Wakeup code way too big, S3 disabled.\n");
		return;
	}

	mem = memblock_find_in_range(0, 1<<20, WAKEUP_SIZE, PAGE_SIZE);

	if (mem == MEMBLOCK_ERROR) {
		printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
		return;
	}
	acpi_realmode = (unsigned long) phys_to_virt(mem);
	acpi_wakeup_address = mem;
	memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
}
コード例 #13
0
void __init reserve_real_mode(void)
{
	phys_addr_t mem;
	size_t size = real_mode_size_needed();

	if (!size)
		return;

	WARN_ON(slab_is_available());

	/* Has to be under 1M so we can execute real-mode AP code. */
	mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
	if (!mem) {
		pr_info("No sub-1M memory is available for the trampoline\n");
		return;
	}

	memblock_reserve(mem, size);
	set_real_mode_mem(mem, size);
}
コード例 #14
0
static int __init numa_alloc_distance(void)
{
	nodemask_t nodes_parsed;
	size_t size;
	int i, j, cnt = 0;
	u64 phys;

	
	nodes_parsed = numa_nodes_parsed;
	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);

	for_each_node_mask(i, nodes_parsed)
		cnt = i;
	cnt++;
	size = cnt * cnt * sizeof(numa_distance[0]);

	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
				      size, PAGE_SIZE);
	if (!phys) {
		pr_warning("NUMA: Warning: can't allocate distance table!\n");
		
		numa_distance = (void *)1LU;
		return -ENOMEM;
	}
	memblock_reserve(phys, size);

	numa_distance = __va(phys);
	numa_distance_cnt = cnt;

	
	for (i = 0; i < cnt; i++)
		for (j = 0; j < cnt; j++)
			numa_distance[i * cnt + j] = i == j ?
				LOCAL_DISTANCE : REMOTE_DISTANCE;
	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);

	return 0;
}
コード例 #15
0
ファイル: aperture_64.c プロジェクト: 0-T-0/ps4-linux
static u32 __init allocate_aperture(void)
{
	u32 aper_size;
	unsigned long addr;

	/* aper_size should <= 1G */
	if (fallback_aper_order > 5)
		fallback_aper_order = 5;
	aper_size = (32 * 1024 * 1024) << fallback_aper_order;

	/*
	 * Aperture has to be naturally aligned. This means a 2GB aperture
	 * won't have much chance of finding a place in the lower 4GB of
	 * memory. Unfortunately we cannot move it up because that would
	 * make the IOMMU useless.
	 */
	addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
				      aper_size, aper_size);
	if (!addr) {
		pr_err("Cannot allocate aperture memory hole [mem %#010lx-%#010lx] (%uKB)\n",
		       addr, addr + aper_size - 1, aper_size >> 10);
		return 0;
	}
コード例 #16
0
static u32 __init allocate_aperture(void)
{
	u32 aper_size;
	unsigned long addr;

	/*                        */
	if (fallback_aper_order > 5)
		fallback_aper_order = 5;
	aper_size = (32 * 1024 * 1024) << fallback_aper_order;

	/*
                                                                   
                                                                 
                                                                 
                           
  */
	addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
				      aper_size, aper_size);
	if (!addr || addr + aper_size > GART_MAX_ADDR) {
		printk(KERN_ERR
			"Cannot allocate aperture memory hole (%lx,%uK)\n",
				addr, aper_size>>10);
		return 0;
	}
コード例 #17
0
ファイル: aperture_64.c プロジェクト: AbheekG/XIA-for-Linux
static u32 __init allocate_aperture(void)
{
	u32 aper_size;
	unsigned long addr;

	/* aper_size should <= 1G */
	if (fallback_aper_order > 5)
		fallback_aper_order = 5;
	aper_size = (32 * 1024 * 1024) << fallback_aper_order;

	/*
	 * Aperture has to be naturally aligned. This means a 2GB aperture
	 * won't have much chance of finding a place in the lower 4GB of
	 * memory. Unfortunately we cannot move it up because that would
	 * make the IOMMU useless.
	 */
	/*
	 * using 512M as goal, in case kexec will load kernel_big
	 * that will do the on position decompress, and  could overlap with
	 * that position with gart that is used.
	 * sequende:
	 * kernel_small
	 * ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
	 * ==> kernel_small(gart area become e820_reserved)
	 * ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
	 * ==> kerne_big (uncompressed size will be big than 64M or 128M)
	 * so don't use 512M below as gart iommu, leave the space for kernel
	 * code for safe
	 */
	addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
	if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
		printk(KERN_ERR
			"Cannot allocate aperture memory hole (%lx,%uK)\n",
				addr, aper_size>>10);
		return 0;
	}
コード例 #18
0
ファイル: mx_cma.c プロジェクト: gcrisis/android_kernel_mx2
static void __init mx_cma_region_reserve(
    struct cma_region *regions_normal,
    struct cma_region *regions_secure)
{
    struct cma_region *reg;
    phys_addr_t paddr_last = 0xFFFFFFFF;

    for (reg = regions_normal; reg->size != 0; reg++) {
        phys_addr_t paddr;

        if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
            pr_err("S5P/CMA: size of '%s' is NOT page-aligned\n",
                   reg->name);
            reg->size = PAGE_ALIGN(reg->size);
        }

        if (reg->reserved) {
            pr_err("S5P/CMA: '%s' alread reserved\n", reg->name);
            continue;
        }

        if (reg->alignment) {
            if ((reg->alignment & ~PAGE_MASK) ||
                    (reg->alignment & ~reg->alignment)) {
                pr_err("S5P/CMA: Failed to reserve '%s': "
                       "incorrect alignment 0x%08x.\n",
                       reg->name, reg->alignment);
                continue;
            }
        } else {
            reg->alignment = PAGE_SIZE;
        }

        if (reg->start) {
            if (!memblock_is_region_reserved(reg->start, reg->size)
                    && (memblock_reserve(reg->start, reg->size) == 0))
                reg->reserved = 1;
            else
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
            continue;
        }

        paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
                                       reg->size, reg->alignment);
        if (paddr != MEMBLOCK_ERROR) {
            if (memblock_reserve(paddr, reg->size)) {
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
                continue;
            }
            reg->start = paddr;
            reg->reserved = 1;
            pr_info("name = %s, paddr = 0x%x, size = %d\n", reg->name, paddr, reg->size);
        } else {
            pr_err("S5P/CMA: No free space in memory for '%s'\n",
                   reg->name);
        }

        if (cma_early_region_register(reg)) {
            pr_err("S5P/CMA: Failed to register '%s'\n",
                   reg->name);
            memblock_free(reg->start, reg->size);
        } else {
            paddr_last = min(paddr, paddr_last);
        }
    }

    if (regions_secure && regions_secure->size) {
        size_t size_secure = 0;
        size_t align_secure, size_region2, aug_size, order_region2;

        for (reg = regions_secure; reg->size != 0; reg++)
            size_secure += reg->size;

        reg--;

        /* Entire secure regions will be merged into 2
         * consecutive regions. */
        align_secure = 1 <<
                       (get_order((size_secure + 1) / 2) + PAGE_SHIFT);
        /* Calculation of a subregion size */
        size_region2 = size_secure - align_secure;
        order_region2 = get_order(size_region2) + PAGE_SHIFT;
        if (order_region2 < 20)
            order_region2 = 20; /* 1MB */
        order_region2 -= 3; /* divide by 8 */
        size_region2 = ALIGN(size_region2, 1 << order_region2);

        aug_size = align_secure + size_region2 - size_secure;
        if (aug_size > 0)
            reg->size += aug_size;

        size_secure = ALIGN(size_secure, align_secure);

        if (paddr_last >= memblock.current_limit) {
            paddr_last = memblock_find_in_range(0,
                                                MEMBLOCK_ALLOC_ACCESSIBLE,
                                                size_secure, reg->alignment);
        } else {
            paddr_last -= size_secure;
            paddr_last = round_down(paddr_last, align_secure);
        }

        if (paddr_last) {
            while (memblock_reserve(paddr_last, size_secure))
                paddr_last -= align_secure;

            do {
                reg->start = paddr_last;
                reg->reserved = 1;
                paddr_last += reg->size;

                if (cma_early_region_register(reg)) {
                    memblock_free(reg->start, reg->size);
                    pr_err("S5P/CMA: "
                           "Failed to register secure region "
                           "'%s'\n", reg->name);
                } else {
                    size_secure -= reg->size;
                }
            } while (reg-- != regions_secure);

            if (size_secure > 0)
                memblock_free(paddr_last, size_secure);
        } else {
            pr_err("S5P/CMA: Failed to reserve secure regions\n");
        }
    }
}
コード例 #19
0
ファイル: dev-cma.c プロジェクト: advx9600/kernel-4.4-RuiEr
void __init nxp_cma_region_reserve(struct cma_region *regions, const char *map)
{
    struct cma_region *reg;
    phys_addr_t paddr_last = 0xFFFFFFFF;

    for (reg = regions; reg->size != 0; reg++) {
        phys_addr_t paddr;

        if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
            pr_debug("NXP/CMA: size of '%s' is NOT page-aligned\n", reg->name);
            reg->size = PAGE_ALIGN(reg->size);
        }

        if (reg->reserved) {
            pr_err("NXP/CMA: '%s' already reserved\n", reg->name);
            continue;
        }

        if (reg->alignment) {
            if ((reg->alignment & ~PAGE_MASK) ||
                (reg->alignment & ~reg->alignment)) {
                pr_err("NXP/CMA: failed to reserve '%s': "
                        "incorrect alignment 0x%08x.\n",
                        reg->name, reg->alignment);
                continue;
            }
        } else {
            reg->alignment = PAGE_SIZE;
        }

        if (reg->start) {
            if (!memblock_is_region_reserved(reg->start, reg->size)
                && (memblock_reserve(reg->start, reg->size) == 0)) {
                reg->reserved = 1;
            } else {
                pr_err("NXP/CMA: failed to reserve '%s'\n", reg->name);
            }

        } else {
            paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
                    reg->size, reg->alignment);
            if (paddr) {
                if (memblock_reserve(paddr, reg->size)) {
                    pr_err("NXP/CMA: failed to reserve '%s': memblock_reserve() failed\n",
                            reg->name);
                    continue;
                }

                reg->start = paddr;
                reg->reserved = 1;
            } else {
                pr_err("NXP/CMA: No free space in memory for '%s': size(%d)\n",
                        reg->name, reg->size);
            }
        }

        if (reg->reserved) {
            pr_debug("NXP/CMA: "
                    "Reserved 0x%08x/0x%08x for '%s'\n",
                    reg->start, reg->size, reg->name);
            printk("NXP/CMA: "
                    "Reserved 0x%08x/0x%08x for '%s'\n",
                    reg->start, reg->size, reg->name);

            if (0 == cma_early_region_register(reg)) {
                paddr_last = min(paddr, paddr_last);
                pr_debug("NXP/CMA: success register cma region for '%s'\n",
                        reg->name);
                printk("NXP/CMA: success register cma region for '%s'\n",
                        reg->name);
            } else {
                pr_err("NXP/CMA: failed to cma_early_region_register for '%s'\n",
                        reg->name);
                memblock_free(reg->start, reg->size);
            }
        }
    }

    if (map) {
        cma_set_defaults(NULL, map);
    }
}
コード例 #20
0
static int __init_memblock memblock_double_array(struct memblock_type *type)
{
	struct memblock_region *new_array, *old_array;
	phys_addr_t old_size, new_size, addr;
	int use_slab = slab_is_available();

	/* We don't allow resizing until we know about the reserved regions
	 * of memory that aren't suitable for allocation
	 */
	if (!memblock_can_resize)
		return -1;

	/* Calculate new doubled size */
	old_size = type->max * sizeof(struct memblock_region);
	new_size = old_size << 1;

	/* Try to find some space for it.
	 *
	 * WARNING: We assume that either slab_is_available() and we use it or
	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
	 * when bootmem is currently active (unless bootmem itself is implemented
	 * on top of MEMBLOCK which isn't the case yet)
	 *
	 * This should however not be an issue for now, as we currently only
	 * call into MEMBLOCK while it's still active, or much later when slab is
	 * active for memory hotplug operations
	 */
	if (use_slab) {
		new_array = kmalloc(new_size, GFP_KERNEL);
		addr = new_array ? __pa(new_array) : 0;
	} else
		addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
	if (!addr) {
		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
		       memblock_type_name(type), type->max, type->max * 2);
		return -1;
	}
	new_array = __va(addr);

	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);

	/* Found space, we now need to move the array over before
	 * we add the reserved region since it may be our reserved
	 * array itself that is full.
	 */
	memcpy(new_array, type->regions, old_size);
	memset(new_array + type->max, 0, old_size);
	old_array = type->regions;
	type->regions = new_array;
	type->max <<= 1;

	/* If we use SLAB that's it, we are done */
	if (use_slab)
		return 0;

	/* Add the new reserved region now. Should not fail ! */
	BUG_ON(memblock_reserve(addr, new_size));

	/* If the array wasn't our static init one, then free it. We only do
	 * that before SLAB is available as later on, we don't know whether
	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
	 * anyways
	 */
	if (old_array != memblock_memory_init_regions &&
	    old_array != memblock_reserved_init_regions)
		memblock_free(__pa(old_array), old_size);

	return 0;
}
コード例 #21
0
void __init s5p_cma_region_reserve(struct cma_region *regions_normal,
				      struct cma_region *regions_secure,
				      size_t align_secure, const char *map)
{
	struct cma_region *reg;
	phys_addr_t paddr_last = 0xFFFFFFFF;

	for (reg = regions_normal; reg->size != 0; reg++) {
		phys_addr_t paddr;

		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned\n",
								reg->name);
			reg->size = PAGE_ALIGN(reg->size);
		}


		if (reg->reserved) {
			pr_err("S5P/CMA: '%s' already reserved\n", reg->name);
			continue;
		}

		if (reg->alignment) {
			if ((reg->alignment & ~PAGE_MASK) ||
				(reg->alignment & ~reg->alignment)) {
				pr_err("S5P/CMA: Failed to reserve '%s': "
						"incorrect alignment 0x%08x.\n",
						reg->name, reg->alignment);
				continue;
			}
		} else {
			reg->alignment = PAGE_SIZE;
		}

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && (memblock_reserve(reg->start, reg->size) == 0))
				reg->reserved = 1;
			else {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
				       reg->name);
				continue;
			}

			pr_debug("S5P/CMA: "
				 "Reserved 0x%08x/0x%08x for '%s'\n",
				 reg->start, reg->size, reg->name);

			cma_region_descriptor_add(reg->name, reg->start, reg->size);

			paddr = reg->start;
		} else {
			paddr = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					reg->size, reg->alignment);
		}

		if (paddr != MEMBLOCK_ERROR) {
			if (memblock_reserve(paddr, reg->size)) {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
								reg->name);
				continue;
			}

			reg->start = paddr;
			reg->reserved = 1;

			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'\n",
						reg->start, reg->size, reg->name);

			cma_region_descriptor_add(reg->name, reg->start, reg->size);
		} else {
			pr_err("S5P/CMA: No free space in memory for '%s'\n",
								reg->name);
		}

		if (cma_early_region_register(reg)) {
			pr_err("S5P/CMA: Failed to register '%s'\n",
								reg->name);
			memblock_free(reg->start, reg->size);
		} else {
			paddr_last = min(paddr, paddr_last);
		}
	}

	if (align_secure & ~align_secure) {
		pr_err("S5P/CMA: "
			"Wrong alignment requirement for secure region.\n");
	} else if (regions_secure && regions_secure->size) {
		size_t size_secure = 0;

		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		/* Entire secure regions will be merged into 2
		 * consecutive regions. */
		if (align_secure == 0) {
			size_t size_region2;
			size_t order_region2;
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
			pr_info("S5P/CMA: "
				"Reserved 0x%08x/0x%08x for 'secure_region'\n",
				paddr_last, size_secure);
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif
			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#if defined(CONFIG_USE_MFC_CMA) && defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
					} else {
						if (memblock_reserve(reg->start,
								reg->size))
							panic("memblock\n");
					}
#else
					if (memblock_reserve(reg->start,
								reg->size))
						panic("memblock\n");

#endif
				} else {
					reg->start = paddr_last;
					reg->reserved = 1;
					paddr_last += reg->size;
				}
#endif
				pr_info("S5P/CMA: "
					"Reserved 0x%08x/0x%08x for '%s'\n",
					reg->start, reg->size, reg->name);

				cma_region_descriptor_add(reg->name, reg->start, reg->size);

				if (cma_early_region_register(reg)) {
					memblock_free(reg->start, reg->size);
					pr_err("S5P/CMA: "
					"Failed to register secure region "
					"'%s'\n", reg->name);
				} else {
					size_secure -= reg->size;
				}
			} while (reg-- != regions_secure);

			if (size_secure > 0)
				memblock_free(paddr_last, size_secure);
		} else {
			pr_err("S5P/CMA: Failed to reserve secure regions\n");
		}
	}

	if (map)
		cma_set_defaults(NULL, map);
}
コード例 #22
0
void __init acpi_initrd_override(void *data, size_t size)
{
	int sig, no, table_nr = 0, total_offset = 0;
	long offset = 0;
	struct acpi_table_header *table;
	char cpio_path[32] = "kernel/firmware/acpi/";
	struct cpio_data file;

	if (data == NULL || size == 0)
		return;

	for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
		file = find_cpio_data(cpio_path, data, size, &offset);
		if (!file.data)
			break;

		data += offset;
		size -= offset;

		if (file.size < sizeof(struct acpi_table_header)) {
			pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
				cpio_path, file.name);
			continue;
		}

		table = file.data;

		for (sig = 0; table_sigs[sig]; sig++)
			if (!memcmp(table->signature, table_sigs[sig], 4))
				break;

		if (!table_sigs[sig]) {
			pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
				cpio_path, file.name);
			continue;
		}
		if (file.size != table->length) {
			pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
				cpio_path, file.name);
			continue;
		}
		if (acpi_table_checksum(file.data, table->length)) {
			pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
				cpio_path, file.name);
			continue;
		}

		pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
			table->signature, cpio_path, file.name, table->length);

		all_tables_size += table->length;
		acpi_initrd_files[table_nr].data = file.data;
		acpi_initrd_files[table_nr].size = file.size;
		table_nr++;
	}
	if (table_nr == 0)
		return;

	acpi_tables_addr =
		memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
				       all_tables_size, PAGE_SIZE);
	if (!acpi_tables_addr) {
		WARN_ON(1);
		return;
	}
	/*
	 * Only calling e820_add_reserve does not work and the
	 * tables are invalid (memory got used) later.
	 * memblock_reserve works as expected and the tables won't get modified.
	 * But it's not enough on X86 because ioremap will
	 * complain later (used by acpi_os_map_memory) that the pages
	 * that should get mapped are not marked "reserved".
	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
	 * works fine.
	 */
	memblock_reserve(acpi_tables_addr, all_tables_size);
	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);

	/*
	 * early_ioremap only can remap 256k one time. If we map all
	 * tables one time, we will hit the limit. Need to map chunks
	 * one by one during copying the same as that in relocate_initrd().
	 */
	for (no = 0; no < table_nr; no++) {
		unsigned char *src_p = acpi_initrd_files[no].data;
		phys_addr_t size = acpi_initrd_files[no].size;
		phys_addr_t dest_addr = acpi_tables_addr + total_offset;
		phys_addr_t slop, clen;
		char *dest_p;

		total_offset += size;

		while (size) {
			slop = dest_addr & ~PAGE_MASK;
			clen = size;
			if (clen > MAP_CHUNK_SIZE - slop)
				clen = MAP_CHUNK_SIZE - slop;
			dest_p = early_ioremap(dest_addr & PAGE_MASK,
						 clen + slop);
			memcpy(dest_p + slop, src_p, clen);
			early_iounmap(dest_p, clen + slop);
			src_p += clen;
			dest_addr += clen;
			size -= clen;
		}
	}
}
コード例 #23
0
ファイル: osl.c プロジェクト: AiWinters/linux
void __init acpi_initrd_override(void *data, size_t size)
{
	int sig, no, table_nr = 0, total_offset = 0;
	long offset = 0;
	struct acpi_table_header *table;
	char cpio_path[32] = "kernel/firmware/acpi/";
	struct cpio_data file;
	struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
	char *p;

	if (data == NULL || size == 0)
		return;

	for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
		file = find_cpio_data(cpio_path, data, size, &offset);
		if (!file.data)
			break;

		data += offset;
		size -= offset;

		if (file.size < sizeof(struct acpi_table_header))
			INVALID_TABLE("Table smaller than ACPI header",
				      cpio_path, file.name);

		table = file.data;

		for (sig = 0; table_sigs[sig]; sig++)
			if (!memcmp(table->signature, table_sigs[sig], 4))
				break;

		if (!table_sigs[sig])
			INVALID_TABLE("Unknown signature",
				      cpio_path, file.name);
		if (file.size != table->length)
			INVALID_TABLE("File length does not match table length",
				      cpio_path, file.name);
		if (acpi_table_checksum(file.data, table->length))
			INVALID_TABLE("Bad table checksum",
				      cpio_path, file.name);

		pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
			table->signature, cpio_path, file.name, table->length);

		all_tables_size += table->length;
		early_initrd_files[table_nr].data = file.data;
		early_initrd_files[table_nr].size = file.size;
		table_nr++;
	}
	if (table_nr == 0)
		return;

	acpi_tables_addr =
		memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
				       all_tables_size, PAGE_SIZE);
	if (!acpi_tables_addr) {
		WARN_ON(1);
		return;
	}
	/*
	 * Only calling e820_add_reserve does not work and the
	 * tables are invalid (memory got used) later.
	 * memblock_reserve works as expected and the tables won't get modified.
	 * But it's not enough on X86 because ioremap will
	 * complain later (used by acpi_os_map_memory) that the pages
	 * that should get mapped are not marked "reserved".
	 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
	 * works fine.
	 */
	memblock_reserve(acpi_tables_addr, acpi_tables_addr + all_tables_size);
	arch_reserve_mem_area(acpi_tables_addr, all_tables_size);

	p = early_ioremap(acpi_tables_addr, all_tables_size);

	for (no = 0; no < table_nr; no++) {
		memcpy(p + total_offset, early_initrd_files[no].data,
		       early_initrd_files[no].size);
		total_offset += early_initrd_files[no].size;
	}
	early_iounmap(p, all_tables_size);
}