void __init bitfix_reserve(void)
{
	int i;
	int ret;

	/*
	 * We'll auto-enable if needed.  However we still allocate memory even
	 * if we detect we're not needed.  That allows us to enable this at
	 * runtime for testing.
	 */
	bitfix_enabled = bitfix_is_needed();

	/* We need pm_check enabled */
	if (bitfix_enabled) {
		pr_info("%s: Detected firmware that needs bitfix\n", __func__);
		s3c_pm_check_set_enable(true);
	}

	for (i = 0; i < UPPER_LOOPS; i++) {
		phys_addr_t xor_superchunk_addr =
			bitfix_get_xor_superchunk_addr(i);
		bool was_reserved;

		pr_debug("%s: trying to reserve %08x@%08x\n",
			__func__, SUPERCHUNK_SIZE, xor_superchunk_addr);
		was_reserved = memblock_is_region_reserved(xor_superchunk_addr,
			SUPERCHUNK_SIZE);
		if (was_reserved) {
			pr_err("%s: memory already reserved %08x@%08x\n",
				__func__, SUPERCHUNK_SIZE, xor_superchunk_addr);
			goto error;
		}

		ret = memblock_reserve(xor_superchunk_addr, SUPERCHUNK_SIZE);
		if (ret) {
			pr_err("%s: memblock_reserve fail (%d) %08x@%08x\n",
				__func__, ret, SUPERCHUNK_SIZE,
				xor_superchunk_addr);
			goto error;
		}
	}

	return;
error:
	/*
	 * If we detected that we needed bitfix code and we couldn't init
	 * then that's a serious problem.  Dump stack so it's pretty obvious.
	 */
	WARN_ON(true);

	for (i--; i >= 0; i--) {
		phys_addr_t xor_superchunk_addr =
			bitfix_get_xor_superchunk_addr(i);
		ret = memblock_free(xor_superchunk_addr, SUPERCHUNK_SIZE);
		WARN_ON(ret);
	}
	bitfix_enabled = false;

	__memblock_dump_all();
}
/*
 * The UEFI specification makes it clear that the operating system is free to do
 * whatever it wants with boot services code after ExitBootServices() has been
 * called. Ignoring this recommendation a significant bunch of EFI implementations 
 * continue calling into boot services code (SetVirtualAddressMap). In order to 
 * work around such buggy implementations we reserve boot services region during 
 * EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it
* is discarded.
*/
void __init efi_reserve_boot_services(void)
{
	void *p;

	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
		efi_memory_desc_t *md = p;
		u64 start = md->phys_addr;
		u64 size = md->num_pages << EFI_PAGE_SHIFT;

		if (md->type != EFI_BOOT_SERVICES_CODE &&
		    md->type != EFI_BOOT_SERVICES_DATA)
			continue;
		/* Only reserve where possible:
		 * - Not within any already allocated areas
		 * - Not over any memory area (really needed, if above?)
		 * - Not within any part of the kernel
		 * - Not the bios reserved area
		*/
		if ((start + size > __pa_symbol(_text)
				&& start <= __pa_symbol(_end)) ||
			!e820_all_mapped(start, start+size, E820_RAM) ||
			memblock_is_region_reserved(start, size)) {
			/* Could not reserve, skip it */
			md->num_pages = 0;
			memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
				     start, start+size-1);
		} else
			memblock_reserve(start, size);
	}
}
Exemplo n.º 3
0
static int reserve_sdram(unsigned long addr, unsigned long size)
{
	if (memblock_is_region_reserved(addr, size))
		return -EBUSY;
	if (memblock_reserve(addr, size))
		return -ENOMEM;
	return 0;
}
Exemplo n.º 4
0
static int __init __iomem_reserve_memblock(void)
{
	if (memblock_is_region_reserved(SPRD_IO_MEM_BASE, SPRD_IO_MEM_SIZE))
		return -EBUSY;
	if (memblock_reserve(SPRD_IO_MEM_BASE, SPRD_IO_MEM_SIZE))
		return -ENOMEM;
	return 0;
}
Exemplo n.º 5
0
int __init __ramconsole_reserve_memblock(void)
{
	if (memblock_is_region_reserved(SPRD_RAM_CONSOLE_START, SPRD_RAM_CONSOLE_SIZE))
		return -EBUSY;
	if (memblock_reserve(SPRD_RAM_CONSOLE_START, SPRD_RAM_CONSOLE_SIZE))
		return -ENOMEM;
	return 0;
}
Exemplo n.º 6
0
int __init __ftrace_reserve_memblock(void)
{
	if (memblock_is_region_reserved(FT_BASE_PHY, FT_SIZE_PHY))
		return -EBUSY;
	if (memblock_reserve(FT_BASE_PHY, FT_SIZE_PHY))
		return -ENOMEM;

	return 0;
}
Exemplo n.º 7
0
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
					phys_addr_t size, bool nomap)
{
	if (memblock_is_region_reserved(base, size))
		return -EBUSY;
	if (nomap)
		return memblock_remove(base, size);
	return memblock_reserve(base, size);
}
Exemplo n.º 8
0
static int __init __fbmem_reserve_memblock(void)
{
	pr_err("__fbmem_reserve_memblock,SPRD_FB_MEM_BASE:%x,SPRD_FB_MEM_SIZE:%x\n",SPRD_FB_MEM_BASE,SPRD_FB_MEM_SIZE);
	if (memblock_is_region_reserved(SPRD_FB_MEM_BASE, SPRD_FB_MEM_SIZE))
		return -EBUSY;
	if (memblock_reserve(SPRD_FB_MEM_BASE, SPRD_FB_MEM_SIZE))
		return -ENOMEM;
	pr_err("__fbmem_reserve_memblock-end,\n");
	return 0;
}
Exemplo n.º 9
0
static void __init exynos5_cma_region_reserve(
			struct cma_region *regions_normal,
			struct cma_region *regions_secure)
{
	struct cma_region *reg;
	size_t size_secure = 0, align_secure = 0;
	phys_addr_t paddr = 0;

	for (reg = regions_normal; reg->size != 0; reg++) {
		if ((reg->alignment & (reg->alignment - 1)) || reg->reserved)
			continue;

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && memblock_reserve(reg->start, reg->size) >= 0)
				reg->reserved = 1;
		} else {
			paddr = __memblock_alloc_base(reg->size, reg->alignment,
					MEMBLOCK_ALLOC_ACCESSIBLE);
			if (paddr) {
				reg->start = paddr;
				reg->reserved = 1;
				if (reg->size & (reg->alignment - 1))
					memblock_free(paddr + reg->size,
						ALIGN(reg->size, reg->alignment)
						- reg->size);
			}
		}
	}

	if (regions_secure && regions_secure->size) {
		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		align_secure = reg->alignment;
		BUG_ON(align_secure & (align_secure - 1));

		paddr -= size_secure;
		paddr &= ~(align_secure - 1);

		if (!memblock_reserve(paddr, size_secure)) {
			do {
				reg->start = paddr;
				reg->reserved = 1;
				paddr += reg->size;
			} while (reg-- != regions_secure);
		}
	}
}
Exemplo n.º 10
0
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_base, crash_size;
	int ret;

	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
				&crash_size, &crash_base);
	/* no crashkernel= or invalid value specified */
	if (ret || !crash_size)
		return;

	crash_size = PAGE_ALIGN(crash_size);

	if (crash_base == 0) {
		/* Current arm64 boot protocol requires 2MB alignment */
		crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
				crash_size, SZ_2M);
		if (crash_base == 0) {
			pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. */
		if (!memblock_is_region_memory(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region is not memory\n");
			return;
		}

		if (memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
			return;
		}

		if (!IS_ALIGNED(crash_base, SZ_2M)) {
			pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
			return;
		}
	}
	memblock_reserve(crash_base, crash_size);

	pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
		crash_base, crash_base + crash_size, crash_size >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
Exemplo n.º 11
0
/*
 * reserve_elfcorehdr() - reserves memory for elf core header
 *
 * This function reserves the memory occupied by an elf core header
 * described in the device tree. This region contains all the
 * information about primary kernel's core image and is used by a dump
 * capture kernel to access the system memory on primary kernel.
 */
static void __init reserve_elfcorehdr(void)
{
	of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);

	if (!elfcorehdr_size)
		return;

	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
		pr_warn("elfcorehdr is overlapped\n");
		return;
	}

	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);

	pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
		elfcorehdr_size >> 10, elfcorehdr_addr);
}
/*
	reserved memory from 0x30000000  - 0x3fffffff
	reserved memory from 0xe0000000  - 0xefffffff
	reserved method:
	For every continue 8K, that one 4k has been reserved by ASIC, so SW can choose continue 8k to 
	reserved. like this:
		0k-4k
		4k-8k ---> reserved
		8k-12k---> reserved
		12k-16k
		16k-20k
		20k-24k -->reserved
		24k-28k -->reserved
*/
static int __init __reserve_memblock(phys_addr_t addr_base, phys_addr_t size)
{
	phys_addr_t offset = SZ_4K;
	phys_addr_t i = 0;
	for (; i < size; i+= SZ_16K ) {
#if 0 //ignore this to decrease power on time(nearly 8~9 seconds)
/*FIXME: for optimize, we can ignor this*/
		if (memblock_is_region_reserved(addr_base + i + offset, SZ_8K)){
			BUG_ON(1);
			return -EBUSY;
		}
#endif
		if (memblock_reserve(addr_base + i + offset, SZ_8K))
			return -ENOMEM;         
	}

	return 0;
}
Exemplo n.º 13
0
/*
 * reserve_elfcorehdr() - reserves memory for elf core header
 *
 * This function reserves memory area given in "elfcorehdr=" kernel command
 * line parameter. The memory reserved is used by a dump capture kernel to
 * identify the memory used by primary kernel.
 */
static void __init reserve_elfcorehdr(void)
{
	if (!elfcorehdr_size)
		return;

	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
		pr_warn("elfcorehdr reservation failed - memory is in use (0x%llx)\n",
			elfcorehdr_addr);
		return;
	}

	if (memblock_reserve(elfcorehdr_addr, elfcorehdr_size)) {
		pr_warn("elfcorehdr reservation failed - out of memory\n");
		return;
	}

	pr_info("Reserving %lldKB of memory at %lldMB for elfcorehdr\n",
		elfcorehdr_size >> 10, elfcorehdr_addr >> 20);
}
Exemplo n.º 14
0
int __init __sprd_iq_memblock(void)
{
	int i;
	struct membank bank;
	bool bfound = false;
	if(!in_iqmode())
		return -EINVAL;
	for(i = meminfo.nr_banks; i > 0; i--) {
		printk("sprd_iq high: %d, start %d, size %d \n", meminfo.bank[i-1].highmem, meminfo.bank[i-1].start,
			meminfo.bank[i-1].size);
		if(meminfo.bank[i-1].highmem || meminfo.bank[i-1].size < SPRD_IQ_SIZE)
			continue;
		bank.start = meminfo.bank[i-1].start;
		bank.size = meminfo.bank[i-1].size;
		while(bank.size - SPRD_IQ_SIZE > 0) {
			if(memblock_is_region_reserved(bank.start + bank.size - SPRD_IQ_SIZE, SPRD_IQ_SIZE)) {
				bank.size -= SZ_1M;
			} else {
				bfound = true;
				break;
			}
		}
		if(bfound)
			break;
	}
	printk("sprd_iq found mem %d \n", bank.size);
	if(bfound) {
		int err = memblock_reserve(bank.start + bank.size - SPRD_IQ_SIZE, SPRD_IQ_SIZE);
		if(0 != err)
		{
			printk("sprd_iq memblock_reserve err =  %d \n", err);
			return -ENOMEM;
		}
		else {
			s_iq_addr = bank.start + bank.size - SPRD_IQ_SIZE;
			return 0;
		}
	} else
		return -ENOMEM;

}
Exemplo n.º 15
0
static int log_buf_open(struct inode *inode, struct file *filp)
{
	void  *base;

	/* determine the ram area is valid or not. */
	if (!valid_phys_addr_range(log_start, log_size)
	    || !memblock_is_region_reserved(log_start, log_size)) {
		pr_err("%s: check memory range failed!\n", __func__);

		return -EINVAL;
	}

	base = ioremap(log_start, log_size);
	filp->private_data = base;
	if (!base) {
		pr_err("%s: remap error!\n", __func__);
		return -ENOMEM;
	}

	return 0;
}
Exemplo n.º 16
0
static int __init __iomem_reserve_memblock(void)
{
	int ret;

#ifndef CONFIG_CMA
	if (memblock_is_region_reserved(SPRD_ION_MEM_BASE, SPRD_ION_MEM_SIZE))
		return -EBUSY;
	if (memblock_reserve(SPRD_ION_MEM_BASE, SPRD_ION_MEM_SIZE))
		return -ENOMEM;
#else
#ifndef CONFIG_OF
	ret = dma_declare_contiguous_reserved(&sprd_ion_dev.dev, SPRD_ION_MEM_SIZE, SPRD_ION_MEM_BASE, 0, CMA_RESERVE, CMA_THRESHOLD);
	if (unlikely(ret))
	{
		pr_err("reserve CMA area(base:%x size:%x) for ION failed!!!\n", SPRD_ION_MEM_BASE, SPRD_ION_MEM_SIZE);
		return -ENOMEM;
	}
	pr_info("reserve CMA area(base:%x size:%x) for ION\n", SPRD_ION_MEM_BASE, SPRD_ION_MEM_SIZE);
#endif
#endif
	return 0;
}
Exemplo n.º 17
0
/*
 * reserve_crashkernel() - reserves memory for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by a dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(phys_addr_t limit)
{
	unsigned long long crash_size = 0, crash_base = 0;
	int ret;

	ret = parse_crashkernel(boot_command_line, limit,
				&crash_size, &crash_base);
	if (ret)
		return;

	if (crash_base == 0) {
		crash_base = memblock_alloc(crash_size, 1 << 20);
		if (crash_base == 0) {
			pr_warn("crashkernel allocation failed (size:%llx)\n",
				crash_size);
			return;
		}
	} else {
		/* User specifies base address explicitly. Sanity check */
		if (!memblock_is_region_memory(crash_base, crash_size) ||
			memblock_is_region_reserved(crash_base, crash_size)) {
			pr_warn("crashkernel= has wrong address or size\n");
			return;
		}

		if (memblock_reserve(crash_base, crash_size)) {
			pr_warn("crashkernel reservation failed - out of memory\n");
			return;
		}
	}

	pr_info("Reserving %lldMB of memory at %lldMB for crashkernel\n",
		crash_size >> 20, crash_base >> 20);

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
}
Exemplo n.º 18
0
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
		 void *caller)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/* Make sure we have the base flags */
	if ((flags & _PAGE_PRESENT) == 0)
		flags |= PAGE_KERNEL;

	/* Non-cacheable page cannot be coherent */
	if (flags & _PAGE_NO_CACHE)
		flags &= ~_PAGE_COHERENT;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from ioremap_base
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

#ifndef CONFIG_CRASH_DUMP
	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (mem_init_done && (p < virt_to_phys(high_memory)) &&
	    !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
		printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
		       (unsigned long long)p, __builtin_return_address(0));
		return NULL;
	}
#endif

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * BAT mapping.  If the whole area is mapped then we're done,
	 * otherwise remap it since we want to keep the virt addrs for
	 * each request contiguous.
	 *
	 * We make the assumption here that if the bottom and top
	 * of the range we want are mapped then it's mapped to the
	 * same virt address (and this is contiguous).
	 *  -- Cort
	 */
	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
		goto out;

	if ((v = p_mapped_by_tlbcam(p)))
		goto out;

	if (mem_init_done) {
		struct vm_struct *area;
		area = get_vm_area_caller(size, VM_IOREMAP, caller);
		if (area == 0)
			return NULL;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (mem_init_done)
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Exemplo n.º 19
0
static int  tima_setup_rkp_mem(void){
#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(TIMA_DEBUG_LOG_START, TIMA_DEBUG_LOG_SIZE) ||
			memblock_reserve(TIMA_DEBUG_LOG_START, TIMA_DEBUG_LOG_SIZE)) {
#else
	if(reserve_bootmem(TIMA_DEBUG_LOG_START, TIMA_DEBUG_LOG_SIZE, BOOTMEM_EXCLUSIVE)){
#endif
		pr_err("%s: RKP failed reserving size %d " \
			   "at base 0x%x\n", __func__, TIMA_DEBUG_LOG_SIZE, TIMA_DEBUG_LOG_START);
		goto out;
	}
	pr_info("RKP :%s, base:%x, size:%x \n", __func__,TIMA_DEBUG_LOG_START, TIMA_DEBUG_LOG_SIZE);

#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(TIMA_SEC_LOG, TIMA_SEC_LOG_SIZE) ||
			memblock_reserve(TIMA_SEC_LOG, TIMA_SEC_LOG_SIZE)) {
#else
	if(reserve_bootmem(TIMA_SEC_LOG, TIMA_SEC_LOG_SIZE, BOOTMEM_EXCLUSIVE)){
#endif
		pr_err("%s: RKP failed reserving size %d " \
			   "at base 0x%x\n", __func__, TIMA_SEC_LOG_SIZE, TIMA_SEC_LOG);
		goto out;
	}
	pr_info("RKP :%s, base:%x, size:%x \n", __func__,TIMA_SEC_LOG, TIMA_SEC_LOG_SIZE);

#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(TIMA_PHYS_MAP, TIMA_PHYS_MAP_SIZE) ||
			memblock_reserve(TIMA_PHYS_MAP, TIMA_PHYS_MAP_SIZE)) {
#else
	if(reserve_bootmem(TIMA_PHYS_MAP,  TIMA_PHYS_MAP_SIZE, BOOTMEM_EXCLUSIVE)){
#endif
		pr_err("%s: RKP failed reserving size %d "					\
			   "at base 0x%x\n", __func__, TIMA_PHYS_MAP_SIZE, TIMA_PHYS_MAP);
		goto out;
	}
	pr_info("RKP :%s, base:%x, size:%x \n", __func__,TIMA_PHYS_MAP, TIMA_PHYS_MAP_SIZE);

#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(TIMA_DASHBOARD_START, TIMA_DASHBOARD_SIZE) ||
			memblock_reserve(TIMA_DASHBOARD_START, TIMA_DASHBOARD_SIZE)) {
#else
	if(reserve_bootmem(TIMA_DASHBOARD_START,  TIMA_DASHBOARD_SIZE, BOOTMEM_EXCLUSIVE)){
#endif
		pr_err("%s: RKP failed reserving size %d "					\
			   "at base 0x%x\n", __func__, TIMA_DASHBOARD_SIZE, TIMA_DASHBOARD_START);
		goto out;
	}
	pr_info("RKP :%s, base:%x, size:%x \n", __func__,TIMA_DASHBOARD_START, TIMA_DASHBOARD_SIZE);

#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(TIMA_ROBUF_START, TIMA_ROBUF_SIZE) ||
			memblock_reserve(TIMA_ROBUF_START, TIMA_ROBUF_SIZE)) {
#else
	if(reserve_bootmem(TIMA_ROBUF_START,  TIMA_ROBUF_SIZE, BOOTMEM_EXCLUSIVE)){
#endif
		pr_err("%s: RKP failed reserving size %d "					\
			   "at base 0x%x\n", __func__, TIMA_ROBUF_SIZE, TIMA_ROBUF_START);
		goto out;
	}
	pr_info("RKP :%s, base:%x, size:%x \n", __func__,TIMA_ROBUF_START, TIMA_ROBUF_SIZE);

#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(TIMA_VMM_START, TIMA_VMM_SIZE) ||
			memblock_reserve(TIMA_VMM_START, TIMA_VMM_SIZE)) {
#else
	if(reserve_bootmem(TIMA_VMM_START,  TIMA_VMM_SIZE, BOOTMEM_EXCLUSIVE)){
#endif
		pr_err("%s: RKP failed reserving size %d "					\
			   "at base 0x%x\n", __func__, TIMA_VMM_SIZE, TIMA_VMM_START);
		goto out;
	}
	pr_info("RKP :%s, base:%x, size:%x \n", __func__,TIMA_VMM_START, TIMA_VMM_SIZE);

	return 1; 
 out: 
	return 0; 

}
#else /* !CONFIG_TIMA_RKP*/
static int tima_setup_rkp_mem(void){
	return 1;
}
#endif
static int __init sec_tima_log_setup(char *str)
{
	unsigned size = memparse(str, &str);
	unsigned long base = 0;
	/* If we encounter any problem parsing str ... */
	if (!size || size != roundup_pow_of_two(size) || *str != '@'
		|| kstrtoul(str + 1, 0, &base))
			goto out;

#ifdef CONFIG_NO_BOOTMEM
	if (memblock_is_region_reserved(base, size) ||
		memblock_reserve(base, size)) {
#else
	if (reserve_bootmem(base , size, BOOTMEM_EXCLUSIVE)) {
#endif
			pr_err("%s: failed reserving size %d " \
						"at base 0x%lx\n", __func__, size, base);
			goto out;
	}
	pr_info("tima :%s, base:%lx, size:%x \n", __func__,base, size);
	
	tima_debug_logging_start = base;
	
	if( !tima_setup_rkp_mem())  goto out; 

	return 1;
out:
	return 0;
}
__setup("sec_tima_log=", sec_tima_log_setup);

ssize_t	tima_read(struct file *filep, char __user *buf, size_t size, loff_t *offset)
{
	/* First check is to get rid of integer overflow exploits */
	if (size > DEBUG_LOG_SIZE || (*offset) + size > DEBUG_LOG_SIZE) {
		printk(KERN_ERR"Extra read\n");
		return -EINVAL;
	}

	if (!strcmp(filep->f_path.dentry->d_iname, "tima_secure_log"))
		tima_log_addr = tima_secure_log_addr;
	else if( !strcmp(filep->f_path.dentry->d_iname, "tima_debug_log"))
		tima_log_addr = tima_debug_log_addr;
#ifdef CONFIG_TIMA_RKP
	else if( !strcmp(filep->f_path.dentry->d_iname, "tima_debug_rkp_log"))
		tima_log_addr = tima_debug_rkp_log_addr;
	else
		tima_log_addr = tima_secure_rkp_log_addr;
#endif
	if (copy_to_user(buf, (const char *)tima_log_addr + (*offset), size)) {
		printk(KERN_ERR"Copy to user failed\n");
		return -1;
	} else {
		*offset += size;
		return size;
	}
}

static const struct file_operations tima_proc_fops = {
	.read		= tima_read,
};

/**
 *      tima_debug_log_read_init -  Initialization function for TIMA
 *
 *      It creates and initializes tima proc entry with initialized read handler 
 */
static int __init tima_debug_log_read_init(void)
{
	unsigned long tima_secure_logging_start = 0;
	tima_secure_logging_start = tima_debug_logging_start + DEBUG_LOG_SIZE;
	
	if (proc_create("tima_debug_log", 0644,NULL, &tima_proc_fops) == NULL) {
		printk(KERN_ERR"tima_debug_log_read_init: Error creating proc entry\n");
		goto error_return;
	}
	if (proc_create("tima_secure_log", 0644,NULL, &tima_proc_fops) == NULL) {
		printk(KERN_ERR"tima_secure_log_read_init: Error creating proc entry\n");
		goto remove_debug_entry;
	}
	printk(KERN_INFO"tima_debug_log_read_init: Registering /proc/tima_debug_log Interface \n");

#ifdef CONFIG_TIMA_RKP
	if (proc_create("tima_debug_rkp_log", 0644,NULL, &tima_proc_fops) == NULL) {
		printk(KERN_ERR"tima_debug_rkp_log_read_init: Error creating proc entry\n");
		goto remove_secure_entry;
	}
	if (proc_create("tima_secure_rkp_log", 0644,NULL, &tima_proc_fops) == NULL) {
		printk(KERN_ERR"tima_secure_rkp_log_read_init: Error creating proc entry\n");
		goto remove_debug_rkp_entry;
	}
#endif
	tima_debug_log_addr = (unsigned long *)phys_to_virt(tima_debug_logging_start);
	tima_secure_log_addr = (unsigned long *)phys_to_virt(tima_secure_logging_start);
#ifdef CONFIG_TIMA_RKP
	tima_debug_rkp_log_addr  = (unsigned long *)phys_to_virt(DEBUG_RKP_LOG_START);
	tima_secure_rkp_log_addr = (unsigned long *)phys_to_virt(SECURE_RKP_LOG_START);
#endif
	return 0;

#ifdef CONFIG_TIMA_RKP
remove_debug_rkp_entry:
	remove_proc_entry("tima_debug_rkp_log", NULL);
remove_secure_entry:
	remove_proc_entry("tima_secure_log", NULL);
#endif
remove_debug_entry:
	remove_proc_entry("tima_debug_log", NULL);
error_return:
	return -1;
}

/**
 *      tima_debug_log_read_exit -  Cleanup Code for TIMA
 *
 *      It removes /proc/tima proc entry and does the required cleanup operations 
 */
static void __exit tima_debug_log_read_exit(void)
{
	remove_proc_entry("tima_debug_log", NULL);
	remove_proc_entry("tima_secure_log", NULL);
#ifdef CONFIG_TIMA_RKP
	remove_proc_entry("tima_debug_rkp_log", NULL);
	remove_proc_entry("tima_secure_rkp_log", NULL);
#endif
	printk(KERN_INFO"Deregistering /proc/tima_debug_log Interface\n");
}

module_init(tima_debug_log_read_init);
module_exit(tima_debug_log_read_exit);

MODULE_DESCRIPTION(DRIVER_DESC);
Exemplo n.º 20
0
/**
 * dma_declare_contiguous() - reserve area for contiguous memory handling
 *			      for particular device
 * @dev:   Pointer to device structure.
 * @size:  Size of the reserved memory.
 * @base:  Start address of the reserved memory (optional, 0 for any).
 * @limit: End address of the reserved memory (optional, 0 for any).
 *
 * This function reserves memory for specified device. It should be
 * called by board specific code when early allocator (memblock or bootmem)
 * is still activate.
 */
int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
				  phys_addr_t base, phys_addr_t limit)
{
	struct cma_reserved *r = &cma_reserved[cma_reserved_count];
	phys_addr_t alignment;

	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
		 (unsigned long)size, (unsigned long)base,
		 (unsigned long)limit);

	/* Sanity checks */
	if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size)
		return -EINVAL;

	/* Sanitise input arguments */
	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
	base = ALIGN(base, alignment);
	size = ALIGN(size, alignment);
	limit &= ~(alignment - 1);

	/* Reserve memory */
	if (base) {
		if (memblock_is_region_reserved(base, size) ||
		    memblock_reserve(base, size) < 0) {
			base = -EBUSY;
			goto err;
		}
	} else {
		/*
		 * Use __memblock_alloc_base() since
		 * memblock_alloc_base() panic()s.
		 */
		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
		if (!addr) {
			base = -ENOMEM;
			goto err;
		} else {
			base = addr;
		}
	}

	/*
	 * Each reserved area must be initialised later, when more kernel
	 * subsystems (like slab allocator) are available.
	 */
	r->start = base;
	r->size = size;
	r->dev = dev;
	cma_reserved_count++;
	pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
		(unsigned long)base);

	/* Architecture specific contiguous memory fixup. */
	dma_contiguous_early_fixup(base, size);
	return 0;
err:
	pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
	return base;
}
Exemplo n.º 21
0
void __iomem *
__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
		 void *caller)
{
	unsigned long v, i;
	phys_addr_t p;
	int err;

	/* Make sure we have the base flags */
	if ((flags & _PAGE_PRESENT) == 0)
		flags |= pgprot_val(PAGE_KERNEL);

	/* Non-cacheable page cannot be coherent */
	if (flags & _PAGE_NO_CACHE)
		flags &= ~_PAGE_COHERENT;

	/*
	 * Choose an address to map it to.
	 * Once the vmalloc system is running, we use it.
	 * Before then, we use space going down from IOREMAP_TOP
	 * (ioremap_bot records where we're up to).
	 */
	p = addr & PAGE_MASK;
	size = PAGE_ALIGN(addr + size) - p;

	/*
	 * If the address lies within the first 16 MB, assume it's in ISA
	 * memory space
	 */
	if (p < 16*1024*1024)
		p += _ISA_MEM_BASE;

#ifndef CONFIG_CRASH_DUMP
	/*
	 * Don't allow anybody to remap normal RAM that we're using.
	 * mem_init() sets high_memory so only do the check after that.
	 */
	if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
	    !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
		printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
		       (unsigned long long)p, __builtin_return_address(0));
		return NULL;
	}
#endif

	if (size == 0)
		return NULL;

	/*
	 * Is it already mapped?  Perhaps overlapped by a previous
	 * mapping.
	 */
	v = p_block_mapped(p);
	if (v)
		goto out;

	if (slab_is_available()) {
		struct vm_struct *area;
		area = get_vm_area_caller(size, VM_IOREMAP, caller);
		if (area == 0)
			return NULL;
		area->phys_addr = p;
		v = (unsigned long) area->addr;
	} else {
		v = (ioremap_bot -= size);
	}

	/*
	 * Should check if it is a candidate for a BAT mapping
	 */

	err = 0;
	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
		err = map_page(v+i, p+i, flags);
	if (err) {
		if (slab_is_available())
			vunmap((void *)v);
		return NULL;
	}

out:
	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
}
Exemplo n.º 22
0
static void __init mx_cma_region_reserve(
    struct cma_region *regions_normal,
    struct cma_region *regions_secure)
{
    struct cma_region *reg;
    phys_addr_t paddr_last = 0xFFFFFFFF;

    for (reg = regions_normal; reg->size != 0; reg++) {
        phys_addr_t paddr;

        if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
            pr_err("S5P/CMA: size of '%s' is NOT page-aligned\n",
                   reg->name);
            reg->size = PAGE_ALIGN(reg->size);
        }

        if (reg->reserved) {
            pr_err("S5P/CMA: '%s' alread reserved\n", reg->name);
            continue;
        }

        if (reg->alignment) {
            if ((reg->alignment & ~PAGE_MASK) ||
                    (reg->alignment & ~reg->alignment)) {
                pr_err("S5P/CMA: Failed to reserve '%s': "
                       "incorrect alignment 0x%08x.\n",
                       reg->name, reg->alignment);
                continue;
            }
        } else {
            reg->alignment = PAGE_SIZE;
        }

        if (reg->start) {
            if (!memblock_is_region_reserved(reg->start, reg->size)
                    && (memblock_reserve(reg->start, reg->size) == 0))
                reg->reserved = 1;
            else
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
            continue;
        }

        paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
                                       reg->size, reg->alignment);
        if (paddr != MEMBLOCK_ERROR) {
            if (memblock_reserve(paddr, reg->size)) {
                pr_err("S5P/CMA: Failed to reserve '%s'\n",
                       reg->name);
                continue;
            }
            reg->start = paddr;
            reg->reserved = 1;
            pr_info("name = %s, paddr = 0x%x, size = %d\n", reg->name, paddr, reg->size);
        } else {
            pr_err("S5P/CMA: No free space in memory for '%s'\n",
                   reg->name);
        }

        if (cma_early_region_register(reg)) {
            pr_err("S5P/CMA: Failed to register '%s'\n",
                   reg->name);
            memblock_free(reg->start, reg->size);
        } else {
            paddr_last = min(paddr, paddr_last);
        }
    }

    if (regions_secure && regions_secure->size) {
        size_t size_secure = 0;
        size_t align_secure, size_region2, aug_size, order_region2;

        for (reg = regions_secure; reg->size != 0; reg++)
            size_secure += reg->size;

        reg--;

        /* Entire secure regions will be merged into 2
         * consecutive regions. */
        align_secure = 1 <<
                       (get_order((size_secure + 1) / 2) + PAGE_SHIFT);
        /* Calculation of a subregion size */
        size_region2 = size_secure - align_secure;
        order_region2 = get_order(size_region2) + PAGE_SHIFT;
        if (order_region2 < 20)
            order_region2 = 20; /* 1MB */
        order_region2 -= 3; /* divide by 8 */
        size_region2 = ALIGN(size_region2, 1 << order_region2);

        aug_size = align_secure + size_region2 - size_secure;
        if (aug_size > 0)
            reg->size += aug_size;

        size_secure = ALIGN(size_secure, align_secure);

        if (paddr_last >= memblock.current_limit) {
            paddr_last = memblock_find_in_range(0,
                                                MEMBLOCK_ALLOC_ACCESSIBLE,
                                                size_secure, reg->alignment);
        } else {
            paddr_last -= size_secure;
            paddr_last = round_down(paddr_last, align_secure);
        }

        if (paddr_last) {
            while (memblock_reserve(paddr_last, size_secure))
                paddr_last -= align_secure;

            do {
                reg->start = paddr_last;
                reg->reserved = 1;
                paddr_last += reg->size;

                if (cma_early_region_register(reg)) {
                    memblock_free(reg->start, reg->size);
                    pr_err("S5P/CMA: "
                           "Failed to register secure region "
                           "'%s'\n", reg->name);
                } else {
                    size_secure -= reg->size;
                }
            } while (reg-- != regions_secure);

            if (size_secure > 0)
                memblock_free(paddr_last, size_secure);
        } else {
            pr_err("S5P/CMA: Failed to reserve secure regions\n");
        }
    }
}
Exemplo n.º 23
0
void __init s5p_cma_region_reserve(struct cma_region *regions_normal,
				      struct cma_region *regions_secure,
				      size_t align_secure, const char *map)
{
	struct cma_region *reg;
	phys_addr_t paddr_last = 0xFFFFFFFF;

	for (reg = regions_normal; reg->size != 0; reg++) {
		phys_addr_t paddr;

		if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
			pr_debug("S5P/CMA: size of '%s' is NOT page-aligned\n",
								reg->name);
			reg->size = PAGE_ALIGN(reg->size);
		}


		if (reg->reserved) {
			pr_err("S5P/CMA: '%s' already reserved\n", reg->name);
			continue;
		}

		if (reg->alignment) {
			if ((reg->alignment & ~PAGE_MASK) ||
				(reg->alignment & ~reg->alignment)) {
				pr_err("S5P/CMA: Failed to reserve '%s': "
						"incorrect alignment 0x%08x.\n",
						reg->name, reg->alignment);
				continue;
			}
		} else {
			reg->alignment = PAGE_SIZE;
		}

		if (reg->start) {
			if (!memblock_is_region_reserved(reg->start, reg->size)
			    && (memblock_reserve(reg->start, reg->size) == 0))
				reg->reserved = 1;
			else {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
				       reg->name);
				continue;
			}

			pr_debug("S5P/CMA: "
				 "Reserved 0x%08x/0x%08x for '%s'\n",
				 reg->start, reg->size, reg->name);

			cma_region_descriptor_add(reg->name, reg->start, reg->size);

			paddr = reg->start;
		} else {
			paddr = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					reg->size, reg->alignment);
		}

		if (paddr != MEMBLOCK_ERROR) {
			if (memblock_reserve(paddr, reg->size)) {
				pr_err("S5P/CMA: Failed to reserve '%s'\n",
								reg->name);
				continue;
			}

			reg->start = paddr;
			reg->reserved = 1;

			pr_info("S5P/CMA: Reserved 0x%08x/0x%08x for '%s'\n",
						reg->start, reg->size, reg->name);

			cma_region_descriptor_add(reg->name, reg->start, reg->size);
		} else {
			pr_err("S5P/CMA: No free space in memory for '%s'\n",
								reg->name);
		}

		if (cma_early_region_register(reg)) {
			pr_err("S5P/CMA: Failed to register '%s'\n",
								reg->name);
			memblock_free(reg->start, reg->size);
		} else {
			paddr_last = min(paddr, paddr_last);
		}
	}

	if (align_secure & ~align_secure) {
		pr_err("S5P/CMA: "
			"Wrong alignment requirement for secure region.\n");
	} else if (regions_secure && regions_secure->size) {
		size_t size_secure = 0;

		for (reg = regions_secure; reg->size != 0; reg++)
			size_secure += reg->size;

		reg--;

		/* Entire secure regions will be merged into 2
		 * consecutive regions. */
		if (align_secure == 0) {
			size_t size_region2;
			size_t order_region2;
			size_t aug_size;

			align_secure = 1 <<
				(get_order((size_secure + 1) / 2) + PAGE_SHIFT);
			/* Calculation of a subregion size */
			size_region2 = size_secure - align_secure;
			order_region2 = get_order(size_region2) + PAGE_SHIFT;
			if (order_region2 < 20)
				order_region2 = 20; /* 1MB */
			order_region2 -= 3; /* divide by 8 */
			size_region2 = ALIGN(size_region2, 1 << order_region2);

			aug_size = align_secure + size_region2 - size_secure;
			if (aug_size > 0) {
				reg->size += aug_size;
				size_secure += aug_size;
				pr_debug("S5P/CMA: "
					"Augmented size of '%s' by %#x B.\n",
					reg->name, aug_size);
			}
		} else
			size_secure = ALIGN(size_secure, align_secure);

		pr_info("S5P/CMA: "
			"Reserving %#x for secure region aligned by %#x.\n",
						size_secure, align_secure);

		if (paddr_last >= memblock.current_limit) {
			paddr_last = memblock_find_in_range(0,
					MEMBLOCK_ALLOC_ACCESSIBLE,
					size_secure, reg->alignment);
		} else {
			paddr_last -= size_secure;
			paddr_last = round_down(paddr_last, align_secure);
		}

		if (paddr_last) {
			pr_info("S5P/CMA: "
				"Reserved 0x%08x/0x%08x for 'secure_region'\n",
				paddr_last, size_secure);
#ifndef CONFIG_DMA_CMA
			while (memblock_reserve(paddr_last, size_secure))
				paddr_last -= align_secure;
#else
			if (!reg->start) {
				while (memblock_reserve(paddr_last,
							size_secure))
					paddr_last -= align_secure;
			}
#endif
			do {
#ifndef CONFIG_DMA_CMA
				reg->start = paddr_last;
				reg->reserved = 1;
				paddr_last += reg->size;
#else
				if (reg->start) {
					reg->reserved = 1;
#if defined(CONFIG_USE_MFC_CMA) && defined(CONFIG_MACH_M0)
					if (reg->start == 0x5C100000) {
						if (memblock_reserve(0x5C100000,
								0x700000))
							panic("memblock\n");
						if (memblock_reserve(0x5F000000,
								0x200000))
							panic("memblock\n");
					} else {
						if (memblock_reserve(reg->start,
								reg->size))
							panic("memblock\n");
					}
#else
					if (memblock_reserve(reg->start,
								reg->size))
						panic("memblock\n");

#endif
				} else {
					reg->start = paddr_last;
					reg->reserved = 1;
					paddr_last += reg->size;
				}
#endif
				pr_info("S5P/CMA: "
					"Reserved 0x%08x/0x%08x for '%s'\n",
					reg->start, reg->size, reg->name);

				cma_region_descriptor_add(reg->name, reg->start, reg->size);

				if (cma_early_region_register(reg)) {
					memblock_free(reg->start, reg->size);
					pr_err("S5P/CMA: "
					"Failed to register secure region "
					"'%s'\n", reg->name);
				} else {
					size_secure -= reg->size;
				}
			} while (reg-- != regions_secure);

			if (size_secure > 0)
				memblock_free(paddr_last, size_secure);
		} else {
			pr_err("S5P/CMA: Failed to reserve secure regions\n");
		}
	}

	if (map)
		cma_set_defaults(NULL, map);
}
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
				       phys_addr_t limit, struct cma **res_cma)
{
	struct cma *cma = &cma_areas[cma_area_count];
	phys_addr_t alignment;
	int ret = 0;

	pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
		 (unsigned long)size, (unsigned long)base,
		 (unsigned long)limit);

	/* Sanity checks */
	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
		pr_err("Not enough slots for CMA reserved regions!\n");
		return -ENOSPC;
	}

	if (!size)
		return -EINVAL;

	/* Sanitise input arguments */
	alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
	base = ALIGN(base, alignment);
	size = ALIGN(size, alignment);
	limit &= ~(alignment - 1);

	/* Reserve memory */
	if (base) {
		if (memblock_is_region_reserved(base, size) ||
		    memblock_reserve(base, size) < 0) {
			ret = -EBUSY;
			goto err;
		}
	} else {
		/*
		 * Use __memblock_alloc_base() since
		 * memblock_alloc_base() panic()s.
		 */
		phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
		if (!addr) {
			ret = -ENOMEM;
			goto err;
		} else {
			base = addr;
		}
	}

	/*
	 * Each reserved area must be initialised later, when more kernel
	 * subsystems (like slab allocator) are available.
	 */
	cma->base_pfn = PFN_DOWN(base);
	cma->count = size >> PAGE_SHIFT;
	*res_cma = cma;
	cma_area_count++;

	pr_err("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
		(unsigned long)base);

	/* Architecture specific contiguous memory fixup. */
	dma_contiguous_early_fixup(base, size);
	return 0;
err:
	return ret;
}
Exemplo n.º 25
0
void __init nxp_cma_region_reserve(struct cma_region *regions, const char *map)
{
    struct cma_region *reg;
    phys_addr_t paddr_last = 0xFFFFFFFF;

    for (reg = regions; reg->size != 0; reg++) {
        phys_addr_t paddr;

        if (!IS_ALIGNED(reg->size, PAGE_SIZE)) {
            pr_debug("NXP/CMA: size of '%s' is NOT page-aligned\n", reg->name);
            reg->size = PAGE_ALIGN(reg->size);
        }

        if (reg->reserved) {
            pr_err("NXP/CMA: '%s' already reserved\n", reg->name);
            continue;
        }

        if (reg->alignment) {
            if ((reg->alignment & ~PAGE_MASK) ||
                (reg->alignment & ~reg->alignment)) {
                pr_err("NXP/CMA: failed to reserve '%s': "
                        "incorrect alignment 0x%08x.\n",
                        reg->name, reg->alignment);
                continue;
            }
        } else {
            reg->alignment = PAGE_SIZE;
        }

        if (reg->start) {
            if (!memblock_is_region_reserved(reg->start, reg->size)
                && (memblock_reserve(reg->start, reg->size) == 0)) {
                reg->reserved = 1;
            } else {
                pr_err("NXP/CMA: failed to reserve '%s'\n", reg->name);
            }

        } else {
            paddr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE,
                    reg->size, reg->alignment);
            if (paddr) {
                if (memblock_reserve(paddr, reg->size)) {
                    pr_err("NXP/CMA: failed to reserve '%s': memblock_reserve() failed\n",
                            reg->name);
                    continue;
                }

                reg->start = paddr;
                reg->reserved = 1;
            } else {
                pr_err("NXP/CMA: No free space in memory for '%s': size(%d)\n",
                        reg->name, reg->size);
            }
        }

        if (reg->reserved) {
            pr_debug("NXP/CMA: "
                    "Reserved 0x%08x/0x%08x for '%s'\n",
                    reg->start, reg->size, reg->name);
            printk("NXP/CMA: "
                    "Reserved 0x%08x/0x%08x for '%s'\n",
                    reg->start, reg->size, reg->name);

            if (0 == cma_early_region_register(reg)) {
                paddr_last = min(paddr, paddr_last);
                pr_debug("NXP/CMA: success register cma region for '%s'\n",
                        reg->name);
                printk("NXP/CMA: success register cma region for '%s'\n",
                        reg->name);
            } else {
                pr_err("NXP/CMA: failed to cma_early_region_register for '%s'\n",
                        reg->name);
                memblock_free(reg->start, reg->size);
            }
        }
    }

    if (map) {
        cma_set_defaults(NULL, map);
    }
}