/**
 * kexec_list_flush - Helper to flush the kimage list to PoC.
 */
static void kexec_list_flush(unsigned long kimage_head)
{
	void *dest;
	unsigned long *entry;

	for (entry = &kimage_head, dest = NULL; ; entry++) {
		unsigned int flag = *entry &
			(IND_DESTINATION | IND_INDIRECTION | IND_DONE |
			IND_SOURCE);
		void *addr = phys_to_virt(*entry & PAGE_MASK);

		switch (flag) {
		case IND_INDIRECTION:
			entry = (unsigned long *)addr - 1;
			__flush_dcache_area(addr, PAGE_SIZE);
			break;
		case IND_DESTINATION:
			dest = addr;
			break;
		case IND_SOURCE:
			__flush_dcache_area(addr, PAGE_SIZE);
			dest += PAGE_SIZE;
			break;
		case IND_DONE:
			return;
		default:
			BUG();
		}
	}
}
ssize_t fmpfw_hash_update(struct fmp_info *info, struct hash_data *hdata,
				struct scatterlist *sg, size_t len)
{
	int ret = 0;
	unsigned long addr;
	struct device *dev = info->dev;
	struct hmac_sha256_fmpfw_info *fmpfw_info = hdata->fmpfw_info;

	fmpfw_info->s.step = UPDATE;
	fmpfw_info->s.input = (uint32_t)sg_phys(sg);
	__flush_dcache_area(sg_virt(sg), len);
	fmpfw_info->s.input_len = len;
	__flush_dcache_area(fmpfw_info, sizeof(*fmpfw_info));
	addr = virt_to_phys(fmpfw_info);

	reinit_completion(&hdata->async.result->completion);
	if (fmpfw_info->hmac_mode) {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_HMAC_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW HMAC SHA256 update. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	} else {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW SHA256 update. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	}

	return waitfor(info, hdata->async.result, ret);
}
예제 #3
0
/*
 * This is called by __cpu_suspend_enter() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 *
 * ptr: CPU context virtual address
 * save_ptr: address of the location where the context physical address
 *           must be saved
 */
void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
				phys_addr_t *save_ptr)
{
	*save_ptr = virt_to_phys(ptr);

	cpu_do_suspend(ptr);
	/*
	 * Only flush the context that must be retrieved with the MMU
	 * off. VA primitives ensure the flush is applied to all
	 * cache levels so context is pushed to DRAM.
	 */
	__flush_dcache_area(ptr, sizeof(*ptr));
	__flush_dcache_area(save_ptr, sizeof(*save_ptr));
}
/*
 * This is called by __cpu_suspend() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 *
 * @ptr: CPU context virtual address
 * @save_ptr: address of the location where the context physical address
 *            must be saved
 */
void __cpu_suspend_save(struct cpu_suspend_ctx *ptr, u64 *save_ptr)
{
	*save_ptr = virt_to_phys(ptr);

	ptr->ttbr0_el1 = virt_to_phys(idmap_pg_dir);

	cpu_do_suspend(ptr);
	/*
	 * Only flush the context that must be retrieved with the MMU
	 * off. VA primitives ensure the flush is applied to all
	 * cache levels so context is pushed to DRAM.
	 */
	__flush_dcache_area(ptr, sizeof(*ptr));
	__flush_dcache_area(save_ptr, sizeof(*save_ptr));
}
예제 #5
0
static int __init tzasc_test_init(void)
{
	int ret;

	tzasc_buf_phys = ALIGN(virt_to_phys(tzasc_buf), PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER);

	__flush_dcache_area(phys_to_virt(tzasc_buf_phys),
			PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER);
	outer_inv_range(tzasc_buf_phys, tzasc_buf_phys +
			(PAGE_SIZE << CONFIG_TZASC_TEST_PAGE_ORDER));

	ret = misc_register(&tzasc_dev);
	if (ret) {
		pr_err("misc device registration failed\n");
		goto out;
	}

	ret = device_create_file(tzasc_dev.this_device, &dev_attr_physaddr);
	if (ret) {
		pr_err("physaddr sysfs file creation failed\n");
		goto out_deregister;
	}

	return 0;

out_deregister:
	misc_deregister(&tzasc_dev);
out:
	return ret;
}
예제 #6
0
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
	int ret;

	/*
	 * We need to tell the secondary core where to find its stack and the
	 * page tables.
	 */
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
	__flush_dcache_area(&secondary_data, sizeof(secondary_data));

	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it to come online or
		 * time out.
		 */
		wait_for_completion_timeout(&cpu_running,
					    msecs_to_jiffies(1000));

		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
			ret = -EIO;
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
	}

	secondary_data.stack = NULL;

	return ret;
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
	void **release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	release_addr = __va(cpu_release_addr[cpu]);

	/*
	 * We write the release address as LE regardless of the native
	 * endianess of the kernel. Therefore, any boot-loaders that
	 * read this address need to convert this address to the
	 * boot-loader's endianess before jumping. This is mandated by
	 * the boot protocol.
	 */
	release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));

	__flush_dcache_area(release_addr, sizeof(release_addr[0]));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	return 0;
}
예제 #8
0
파일: tegra124.c 프로젝트: Kirill2013/kasan
static void tegra132_flush_dcache(struct page *page, unsigned long offset,
				  size_t size)
{
	void *virt = page_address(page) + offset;

	__flush_dcache_area(virt, size);
}
예제 #9
0
/**
 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 *			  level in order to build a linear index from an
 *			  MPIDR value. Resulting algorithm is a collision
 *			  free hash carried out through shifting and ORing
 */
static void __init smp_build_mpidr_hash(void)
{
	u32 i, affinity, fs[4], bits[4], ls;
	u64 mask = 0;
	/*
	 * Pre-scan the list of MPIDRS and filter out bits that do
	 * not contribute to affinity levels, ie they never toggle.
	 */
	for_each_possible_cpu(i)
		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
	pr_debug("mask of set bits %#llx\n", mask);
	/*
	 * Find and stash the last and first bit set at all affinity levels to
	 * check how many bits are required to represent them.
	 */
	for (i = 0; i < 4; i++) {
		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
		/*
		 * Find the MSB bit and LSB bits position
		 * to determine how many bits are required
		 * to express the affinity level.
		 */
		ls = fls(affinity);
		fs[i] = affinity ? ffs(affinity) - 1 : 0;
		bits[i] = ls - fs[i];
	}
	/*
	 * An index can be created from the MPIDR_EL1 by isolating the
	 * significant bits at each affinity level and by shifting
	 * them in order to compress the 32 bits values space to a
	 * compressed set of values. This is equivalent to hashing
	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
	 * hash though not minimal since some levels might contain a number
	 * of CPUs that is not an exact power of 2 and their bit
	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
	 */
	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
						(bits[1] + bits[0]);
	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
				  fs[3] - (bits[2] + bits[1] + bits[0]);
	mpidr_hash.mask = mask;
	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
		mpidr_hash.shift_aff[0],
		mpidr_hash.shift_aff[1],
		mpidr_hash.shift_aff[2],
		mpidr_hash.shift_aff[3],
		mpidr_hash.mask,
		mpidr_hash.bits);
	/*
	 * 4x is an arbitrary value used to warn on a hash table much bigger
	 * than expected on most systems.
	 */
	if (mpidr_hash_size() > 4 * num_possible_cpus())
		pr_warn("Large number of MPIDR hash buckets detected\n");
	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
예제 #10
0
파일: smp.c 프로젝트: cake654326/xpenology
/*
 * Write secondary_holding_pen_release in a way that is guaranteed to be
 * visible to all observers, irrespective of whether they're taking part
 * in coherency or not.  This is necessary for the hotplug code to work
 * reliably.
 */
static void __cpuinit write_pen_release(u64 val)
{
	void *start = (void *)&secondary_holding_pen_release;
	unsigned long size = sizeof(secondary_holding_pen_release);

	secondary_holding_pen_release = val;
	__flush_dcache_area(start, size);
}
/*
 * This is called by __cpu_suspend() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 *
 * @arg: Argument to pass to suspend operations
 * @ptr: CPU context virtual address
 * @save_ptr: address of the location where the context physical address
 *            must be saved
 */
int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
			   phys_addr_t *save_ptr)
{
	int cpu = smp_processor_id();

	*save_ptr = stack_page1_virt_to_phys(ptr);

	cpu_do_suspend(ptr);
	/*
	 * Only flush the context that must be retrieved with the MMU
	 * off. VA primitives ensure the flush is applied to all
	 * cache levels so context is pushed to DRAM.
	 */
	__flush_dcache_area(ptr, sizeof(*ptr));
	__flush_dcache_area(save_ptr, sizeof(*save_ptr));

	return cpu_ops[cpu]->cpu_suspend(arg);
}
예제 #12
0
static int __init cpu_suspend_init(void)
{
	void *ctx_ptr;

	/* ctx_ptr is an array of physical addresses */
	ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);

	if (WARN_ON(!ctx_ptr))
		return -ENOMEM;

	sleep_save_sp.save_ptr_stash = ctx_ptr;
	sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
	sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
	__flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
	__flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));

	return 0;
}
예제 #13
0
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
	struct page *page = pte_page(pte);

	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
		__flush_dcache_area(page_address(page),
				PAGE_SIZE << compound_order(page));
		__flush_icache_all();
	} else if (icache_is_aivivt()) {
		__flush_icache_all();
	}
}
예제 #14
0
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
				unsigned long uaddr, void *kaddr,
				unsigned long len)
{
	if (vma->vm_flags & VM_EXEC) {
		unsigned long addr = (unsigned long)kaddr;
		if (icache_is_aliasing()) {
			__flush_dcache_area(kaddr, len);
			__flush_icache_all();
		} else {
			flush_icache_range(addr, addr + len);
		}
	}
}
예제 #15
0
파일: flush.c 프로젝트: 0-T-0/ps4-linux
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
	struct page *page = pte_page(pte);

	/* no flushing needed for anonymous pages */
	if (!page_mapping(page))
		return;

	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
		__flush_dcache_area(page_address(page),
				PAGE_SIZE << compound_order(page));
		__flush_icache_all();
	} else if (icache_is_aivivt()) {
		__flush_icache_all();
	}
}
예제 #16
0
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
	int ret,res;
	int i;
    struct wd_api * wd_api = NULL;
	/*
	 * We need to tell the secondary core where to find its stack and the
	 * page tables.
	 */
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
	__flush_dcache_area(&secondary_data, sizeof(secondary_data));

	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it to come online or
		 * time out.
		 */
		wait_for_completion_timeout(&cpu_running,
					    msecs_to_jiffies(1000));

		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
            #if 1
	        pr_crit("Trigger WDT RESET\n");
            res = get_wd_api(&wd_api);
            if(res) 
            {
              pr_crit("get wd api error !!\n");
            }else {
              wd_api -> wd_sw_reset(3);  //=> this action will ask system to reboot
            }
            #endif
                        
            ret = -EIO;
	        }
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
	}

	secondary_data.stack = NULL;

	return ret;
}
예제 #17
0
static int __init smp_spin_table_prepare_cpu(int cpu)
{
	void **release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	release_addr = __va(cpu_release_addr[cpu]);
	release_addr[0] = (void *)__pa(secondary_holding_pen);
	__flush_dcache_area(release_addr, sizeof(release_addr[0]));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	return 0;
}
예제 #18
0
void preload_balance_init(struct kbase_device *kbdev)
{
	int j = 0;

	struct page *p = phys_to_page(mali_balance.base);
	int npages = PAGE_ALIGN(mali_balance.size) / PAGE_SIZE;
	struct page **pages = vmalloc(sizeof(struct page *) * npages);
	struct page **tmp = pages;

	for (j = 0; j < npages; j++ ) {
		*(tmp++) = p++;
	}

	mali_balance.stream_reg = (unsigned char *)vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
	__flush_dcache_area(mali_balance.stream_reg, streamout_size);

	memset(mali_balance.stream_reg, 0x0, mali_balance.size);
	memcpy(mali_balance.stream_reg, streamout, streamout_size);

	vfree(pages);
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
	__le64 __iomem *release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	/*
	 * The cpu-release-addr may or may not be inside the linear mapping.
	 * As ioremap_cache will either give us a new mapping or reuse the
	 * existing linear mapping, we can use it to cover both cases. In
	 * either case the memory will be MT_NORMAL.
	 */
	release_addr = ioremap_cache(cpu_release_addr[cpu],
				     sizeof(*release_addr));
	if (!release_addr)
		return -ENOMEM;

	/*
	 * We write the release address as LE regardless of the native
	 * endianess of the kernel. Therefore, any boot-loaders that
	 * read this address need to convert this address to the
	 * boot-loader's endianess before jumping. This is mandated by
	 * the boot protocol.
	 */
	writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
	__flush_dcache_area((__force void *)release_addr,
			    sizeof(*release_addr));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	iounmap(release_addr);

	return 0;
}
int fmpfw_hash_final(struct fmp_info *info, struct hash_data *hdata, void *output)
{
	int ret = 0;
	unsigned long addr;
	struct device *dev = info->dev;
	struct hmac_sha256_fmpfw_info *fmpfw_info = hdata->fmpfw_info;

	memset(output, 0, hdata->digestsize);
	fmpfw_info->s.step = FINAL;
	fmpfw_info->s.output = (uint32_t)virt_to_phys(output);
	__flush_dcache_area(fmpfw_info, sizeof(*fmpfw_info));
	addr = virt_to_phys(fmpfw_info);

	reinit_completion(&hdata->async.result->completion);
	__dma_unmap_area((void *)output, hdata->digestsize, DMA_FROM_DEVICE);
	if (fmpfw_info->hmac_mode) {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_HMAC_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW HMAC SHA256 final. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	} else {
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_SHA2_TEST, addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW SHA256 final. ret = 0x%x\n", ret);
			ret = -EFAULT;
		}
	}
	__dma_unmap_area((void *)output, hdata->digestsize, DMA_FROM_DEVICE);

	if (fmpfw_info->hmac_mode)
		dev_info(dev, "fmp fw hmac sha256 F/W final is done\n");
	else
		dev_info(dev, "fmp fw sha256 F/W final is done\n");

	kfree(fmpfw_info);
	return waitfor(info, hdata->async.result, ret);
}
예제 #21
0
void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{
	copy_page(kto, kfrom);
	__flush_dcache_area(kto, PAGE_SIZE);
}
예제 #22
0
파일: smp.c 프로젝트: bradbishop/linux
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
	int ret;
	long status;

	/*
	 * We need to tell the secondary core where to find its stack and the
	 * page tables.
	 */
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
	update_cpu_boot_status(CPU_MMU_OFF);
	__flush_dcache_area(&secondary_data, sizeof(secondary_data));

	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it to come online or
		 * time out.
		 */
		wait_for_completion_timeout(&cpu_running,
					    msecs_to_jiffies(1000));

		if (!cpu_online(cpu)) {
			pr_crit("CPU%u: failed to come online\n", cpu);
			ret = -EIO;
		}
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
	}

	secondary_data.stack = NULL;
	status = READ_ONCE(secondary_data.status);
	if (ret && status) {

		if (status == CPU_MMU_OFF)
			status = READ_ONCE(__early_cpu_boot_status);

		switch (status) {
		default:
			pr_err("CPU%u: failed in unknown state : 0x%lx\n",
					cpu, status);
			break;
		case CPU_KILL_ME:
			if (!op_cpu_kill(cpu)) {
				pr_crit("CPU%u: died during early boot\n", cpu);
				break;
			}
			/* Fall through */
			pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
		case CPU_STUCK_IN_KERNEL:
			pr_crit("CPU%u: is stuck in kernel\n", cpu);
			cpus_stuck_in_kernel++;
			break;
		case CPU_PANIC_KERNEL:
			panic("CPU%u detected unsupported configuration\n", cpu);
		}
	}

	return ret;
}
/* Hash functions */
int fmpfw_hash_init(struct fmp_info *info, struct hash_data *hdata,
			const char *alg_name, int hmac_mode,
			void *mackey, size_t mackeylen)
{
	int ret;
	unsigned long addr;
	struct device *dev = info->dev;
	struct hmac_sha256_fmpfw_info *fmpfw_info;

	fmpfw_info = (struct hmac_sha256_fmpfw_info *)kzalloc(sizeof(*fmpfw_info), GFP_KERNEL);
	if (unlikely(!fmpfw_info)) {
		dev_err(dev, "Fail to alloc fmpfw info\n");
		ret = ENOMEM;
		goto error_alloc_info;
	}

	if (hmac_mode != 0) {
		fmpfw_info->s.step = INIT;
		fmpfw_info->hmac_mode = 1;
		fmpfw_info->key = (uint32_t)virt_to_phys(mackey);
		__flush_dcache_area(mackey, mackeylen);
		fmpfw_info->key_len = mackeylen;
		__flush_dcache_area(fmpfw_info, sizeof(*fmpfw_info));
		addr = virt_to_phys(fmpfw_info);
		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_HMAC_SHA2_TEST, (uint32_t)addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW HMAC SHA256 init. ret = 0x%x\n", ret);
			ret = -EFAULT;
			goto error_hmac_smc;
		}
	} else {
		fmpfw_info->s.step = INIT;
		fmpfw_info->hmac_mode = 0;
		fmpfw_info->key = 0;
		fmpfw_info->key_len = 0;
		__flush_dcache_area(fmpfw_info, sizeof(*fmpfw_info));
		addr = virt_to_phys(fmpfw_info);

		ret = exynos_smc(SMC_CMD_FMP, FMP_FW_SHA2_TEST, (uint32_t)addr, 0);
		if (unlikely(ret)) {
			dev_err(dev, "Fail to smc call for FMPFW SHA256 init. ret = 0x%x\n", ret);
			ret = -EFAULT;
			goto error_sha_smc;
		}
	}

	hdata->digestsize = SHA256_DIGEST_SIZE;
	hdata->alignmask = 0x0;
	hdata->fmpfw_info = fmpfw_info;

	hdata->async.result = kzalloc(sizeof(*hdata->async.result), GFP_KERNEL);
	if (unlikely(!hdata->async.result)) {
		ret = -ENOMEM;
		goto error;
	}

	init_completion(&hdata->async.result->completion);

	hdata->init = 1;

	return 0;

error_hmac_smc:
error_sha_smc:
	kfree(fmpfw_info);
error_alloc_info:
	hdata->fmpfw_info = NULL;
error:
	kfree(hdata->async.result);
	return ret;
}
/**
 * machine_kexec - Do the kexec reboot.
 *
 * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
 */
void machine_kexec(struct kimage *image)
{
	phys_addr_t reboot_code_buffer_phys;
	void *reboot_code_buffer;

	BUG_ON(num_online_cpus() > 1);

	arm64_kexec_kimage_head = image->head;

	reboot_code_buffer_phys = page_to_phys(image->control_code_page);
	reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);

	kexec_image_info(image);

	pr_devel("%s:%d: control_code_page:        %p\n", __func__, __LINE__,
		image->control_code_page);
	pr_devel("%s:%d: reboot_code_buffer_phys:  %pa\n", __func__, __LINE__,
		&reboot_code_buffer_phys);
	pr_devel("%s:%d: reboot_code_buffer:       %p\n", __func__, __LINE__,
		reboot_code_buffer);
	pr_devel("%s:%d: relocate_new_kernel:      %p\n", __func__, __LINE__,
		relocate_new_kernel);
	pr_devel("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
		__func__, __LINE__, relocate_new_kernel_size,
		relocate_new_kernel_size);

	pr_devel("%s:%d: kexec_dtb_addr:           %lx\n", __func__, __LINE__,
		arm64_kexec_dtb_addr);
	pr_devel("%s:%d: kexec_kimage_head:        %lx\n", __func__, __LINE__,
		arm64_kexec_kimage_head);
	pr_devel("%s:%d: kexec_kimage_start:       %lx\n", __func__, __LINE__,
		arm64_kexec_kimage_start);

#ifdef CONFIG_KEXEC_HARDBOOT
	if (!kexec_boot_atags)
		kexec_boot_atags = image->start - 0x8000 + 0x1000;

	kexec_hardboot = image->hardboot;
#endif

	/*
	 * Copy relocate_new_kernel to the reboot_code_buffer for use
	 * after the kernel is shut down.
	 */
	memcpy(reboot_code_buffer, relocate_new_kernel,
		relocate_new_kernel_size);

	/* Flush the reboot_code_buffer in preparation for its execution. */
	__flush_dcache_area(reboot_code_buffer, relocate_new_kernel_size);

	/* Flush the kimage list. */
	kexec_list_flush(image->head);

	pr_info("Bye!\n");

	/* Disable all DAIF exceptions. */
	asm volatile ("msr daifset, #0xf" : : : "memory");

	/*
	 * soft_restart() will shutdown the MMU, disable data caches, then
	 * transfer control to the reboot_code_buffer which contains a copy of
	 * the relocate_new_kernel routine.  relocate_new_kernel will use
	 * physical addressing to relocate the new kernel to its final position
	 * and then will transfer control to the entry point of the new kernel.
	 */
	soft_restart(reboot_code_buffer_phys);
}
예제 #25
0
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
			    unsigned long len, unsigned long align,
			    unsigned long flags)
{
	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
	struct device *dev = cma_heap->dev;
	struct ion_cma_buffer_info *info;

	dev_dbg(dev, "Request buffer allocation len %ld\n", len);

	if (buffer->flags & ION_FLAG_CACHED)
		return -EINVAL;

	if (align > PAGE_SIZE)
		return -EINVAL;

	if (!ion_is_heap_available(heap, flags, NULL))
		return -EPERM;

	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
	if (!info) {
		dev_err(dev, "Can't allocate buffer info\n");
		return ION_CMA_ALLOCATE_FAILED;
	}

	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
						GFP_HIGHUSER | __GFP_ZERO);

	if (!info->cpu_addr) {
		dev_err(dev, "Fail to allocate buffer\n");
		goto err;
	}

	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!info->table) {
		dev_err(dev, "Fail to allocate sg table\n");
		goto free_mem;
	}

	if (ion_cma_get_sgtable
	    (dev, info->table, info->cpu_addr, info->handle, len))
		goto free_table;
	/* keep this for memory release */
	buffer->priv_virt = info;

#ifdef CONFIG_ARM64
	if (!ion_buffer_cached(buffer) && !(buffer->flags & ION_FLAG_PROTECTED)) {
		if (ion_buffer_need_flush_all(buffer))
			flush_all_cpu_caches();
		else
			__flush_dcache_area(page_address(sg_page(info->table->sgl)),
									len);
	}
#else
	if (!ion_buffer_cached(buffer) && !(buffer->flags & ION_FLAG_PROTECTED)) {
		if (ion_buffer_need_flush_all(buffer))
			flush_all_cpu_caches();
		else
			dmac_flush_range(page_address(sg_page(info->table->sgl),
							len, DMA_BIDIRECTIONAL));
	}
#endif
	if ((buffer->flags & ION_FLAG_PROTECTED) && ion_secure_protect(heap))
		goto free_table;

	dev_dbg(dev, "Allocate buffer %p\n", buffer);
	return 0;

free_table:
	kfree(info->table);
free_mem:
	dma_free_coherent(dev, len, info->cpu_addr, info->handle);
err:
	kfree(info);
	return ION_CMA_ALLOCATE_FAILED;
}
예제 #26
0
파일: kaslr.c 프로젝트: cmaiolino/linux
/*
 * This routine will be executed with the kernel mapped at its default virtual
 * address, and if it returns successfully, the kernel will be remapped, and
 * start_kernel() will be executed from a randomized virtual offset. The
 * relocation will result in all absolute references (e.g., static variables
 * containing function pointers) to be reinitialized, and zero-initialized
 * .bss variables will be reset to 0.
 */
u64 __init kaslr_early_init(u64 dt_phys)
{
	void *fdt;
	u64 seed, offset, mask, module_range;
	const u8 *cmdline, *str;
	int size;

	/*
	 * Set a reasonable default for module_alloc_base in case
	 * we end up running with module randomization disabled.
	 */
	module_alloc_base = (u64)_etext - MODULES_VSIZE;

	/*
	 * Try to map the FDT early. If this fails, we simply bail,
	 * and proceed with KASLR disabled. We will make another
	 * attempt at mapping the FDT in setup_machine()
	 */
	early_fixmap_init();
	fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
	if (!fdt)
		return 0;

	/*
	 * Retrieve (and wipe) the seed from the FDT
	 */
	seed = get_kaslr_seed(fdt);
	if (!seed)
		return 0;

	/*
	 * Check if 'nokaslr' appears on the command line, and
	 * return 0 if that is the case.
	 */
	cmdline = kaslr_get_cmdline(fdt);
	str = strstr(cmdline, "nokaslr");
	if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
		return 0;

	/*
	 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
	 * kernel image offset from the seed. Let's place the kernel in the
	 * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of
	 * the lower and upper quarters to avoid colliding with other
	 * allocations.
	 * Even if we could randomize at page granularity for 16k and 64k pages,
	 * let's always round to 2 MB so we don't interfere with the ability to
	 * map using contiguous PTEs
	 */
	mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1);
	offset = BIT(VA_BITS - 3) + (seed & mask);

	/* use the top 16 bits to randomize the linear region */
	memstart_offset_seed = seed >> 48;

	if (IS_ENABLED(CONFIG_KASAN))
		/*
		 * KASAN does not expect the module region to intersect the
		 * vmalloc region, since shadow memory is allocated for each
		 * module at load time, whereas the vmalloc region is shadowed
		 * by KASAN zero pages. So keep modules out of the vmalloc
		 * region if KASAN is enabled, and put the kernel well within
		 * 4 GB of the module region.
		 */
		return offset % SZ_2G;

	if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
		/*
		 * Randomize the module region over a 4 GB window covering the
		 * kernel. This reduces the risk of modules leaking information
		 * about the address of the kernel itself, but results in
		 * branches between modules and the core kernel that are
		 * resolved via PLTs. (Branches between modules will be
		 * resolved normally.)
		 */
		module_range = SZ_4G - (u64)(_end - _stext);
		module_alloc_base = max((u64)_end + offset - SZ_4G,
					(u64)MODULES_VADDR);
	} else {
		/*
		 * Randomize the module region by setting module_alloc_base to
		 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE,
		 * _stext) . This guarantees that the resulting region still
		 * covers [_stext, _etext], and that all relative branches can
		 * be resolved without veneers.
		 */
		module_range = MODULES_VSIZE - (u64)(_etext - _stext);
		module_alloc_base = (u64)_etext + offset - MODULES_VSIZE;
	}

	/* use the lower 21 bits to randomize the base of the module region */
	module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
	module_alloc_base &= PAGE_MASK;

	__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
	__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));

	return offset;
}
예제 #27
0
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
{
	int ret,res;
	int i;
  struct wd_api * wd_api = NULL;
	/*
	 * We need to tell the secondary core where to find its stack and the
	 * page tables.
	 */
	secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
	__flush_dcache_area(&secondary_data, sizeof(secondary_data));

	/*
	 * Now bring the CPU into our world.
	 */
	ret = boot_secondary(cpu, idle);
	if (ret == 0) {
		/*
		 * CPU was successfully started, wait for it to come online or
		 * time out.
		 */
		wait_for_completion_timeout(&cpu_running,
					    msecs_to_jiffies(1000));

		if (!cpu_online(cpu)) {
#if 0
			for(i=0x0;i<=4;i++)
			{
			   REG_WRITE(0x10200404 ,((REG_READ(0x10200404 )&0xffffff00)|i));
			   pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200408));
			}
			for(i=0x05;i<=0x15;i++)
			{
			   REG_WRITE(0x10200404 ,((REG_READ(0x10200404 )&0xffffff00)|i));
			   pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200408));
			}
			for(i=0x20;i<=0x45;i++)
			{
			   REG_WRITE(0x10200404 ,((REG_READ(0x10200404 )&0xffffff00)|i));
			   pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200408));
			}
			
			for(i=0x0;i<=4;i++)
			{
			   REG_WRITE(0x10200504  ,((REG_READ(0x10200504 )&0xffffff00)|i));
			   pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200508 ));
			}
			for(i=0x05;i<=0x15;i++)
			{
			   REG_WRITE(0x10200504  ,((REG_READ(0x10200504 )&0xffffff00)|i));
			   pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200508 ));
			}
			for(i=0x20;i<=0x45;i++)
			{
			   REG_WRITE(0x10200504  ,((REG_READ(0x10200504 )&0xffffff00)|i));
			   pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200508 ));
			}
			
			pr_crit("MPx_AXI_CONFIG: REG 0x1020002c : 0x%x\n", REG_READ(0x1020002c));
			pr_crit("MPx_AXI_CONFIG: REG 0x1020022c : 0x%x\n", REG_READ(0x1020022c));
			pr_crit("ACLKEN_DIV: REG 0x10200640 : 0x%x\n", REG_READ(0x10200640));
			pr_crit("CCI: REG 0x10394000  : 0x%x\n", REG_READ(0x10394000));
			pr_crit("CCI: REG 0x10395000  : 0x%x\n", REG_READ(0x10395000));
#endif
			pr_crit("CPU%u: failed to come online\n", cpu);
                        #if 1
		        pr_crit("Trigger WDT RESET\n");
                        res = get_wd_api(&wd_api);
                        if(res) 
                        {
                          pr_crit("get wd api error !!\n");
                        }else {
                          wd_api -> wd_sw_reset(3);  //=> this action will ask system to reboot
                        }
                        #endif
                        #if 0
			pr_crit("Trigger PMIC full reset.\n");
                        if(check_pmic_wrap_init())
                        {
                          mt_pwrap_hal_init();
                        }
                        pmic_full_reset();
                        #endif
                   ret = -EIO;
	        }
	} else {
		pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
	}

	secondary_data.stack = NULL;

	return ret;
}
예제 #28
0
/*
 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
 *
 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
 * we don't need to free it here.
 */
int swsusp_arch_resume(void)
{
	int rc = 0;
	void *zero_page;
	size_t exit_size;
	pgd_t *tmp_pg_dir;
	phys_addr_t phys_hibernate_exit;
	void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
					  void *, phys_addr_t, phys_addr_t);

	/*
	 * Restoring the memory image will overwrite the ttbr1 page tables.
	 * Create a second copy of just the linear map, and use this when
	 * restoring.
	 */
	tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
	if (!tmp_pg_dir) {
		pr_err("Failed to allocate memory for temporary page tables.\n");
		rc = -ENOMEM;
		goto out;
	}
	rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
	if (rc)
		goto out;

	/*
	 * We need a zero page that is zero before & after resume in order to
	 * to break before make on the ttbr1 page tables.
	 */
	zero_page = (void *)get_safe_page(GFP_ATOMIC);
	if (!zero_page) {
		pr_err("Failed to allocate zero page.\n");
		rc = -ENOMEM;
		goto out;
	}

	/*
	 * Locate the exit code in the bottom-but-one page, so that *NULL
	 * still has disastrous affects.
	 */
	hibernate_exit = (void *)PAGE_SIZE;
	exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
	/*
	 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
	 * a new set of ttbr0 page tables and load them.
	 */
	rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
				   (unsigned long)hibernate_exit,
				   &phys_hibernate_exit,
				   (void *)get_safe_page, GFP_ATOMIC);
	if (rc) {
		pr_err("Failed to create safe executable page for hibernate_exit code.\n");
		goto out;
	}

	/*
	 * The hibernate exit text contains a set of el2 vectors, that will
	 * be executed at el2 with the mmu off in order to reload hyp-stub.
	 */
	__flush_dcache_area(hibernate_exit, exit_size);

	/*
	 * KASLR will cause the el2 vectors to be in a different location in
	 * the resumed kernel. Load hibernate's temporary copy into el2.
	 *
	 * We can skip this step if we booted at EL1, or are running with VHE.
	 */
	if (el2_reset_needed()) {
		phys_addr_t el2_vectors = phys_hibernate_exit;  /* base */
		el2_vectors += hibernate_el2_vectors -
			       __hibernate_exit_text_start;     /* offset */

		__hyp_set_vectors(el2_vectors);
	}

	hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
		       resume_hdr.reenter_kernel, restore_pblist,
		       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));

out:
	return rc;
}