/*
 * This is called by __cpu_suspend() to save the state, and do whatever
 * flushing is required to ensure that when the CPU goes to sleep we have
 * the necessary data available when the caches are not searched.
 */
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
	u32 *ctx = ptr;

	*save_ptr = virt_to_phys(ptr);

	/* This must correspond to the LDM in cpu_resume() assembly */
	*ptr++ = virt_to_phys(idmap_pgd);
	*ptr++ = sp;
	*ptr++ = virt_to_phys(cpu_do_resume);

	cpu_do_suspend(ptr);

	flush_cache_louis();

	/*
	 * flush_cache_louis does not guarantee that
	 * save_ptr and ptr are cleaned to main memory,
	 * just up to the Level of Unification Inner Shareable.
	 * Since the context pointer and context itself
	 * are to be retrieved with the MMU off that
	 * data must be cleaned from all cache levels
	 * to main memory using "area" cache primitives.
	*/
	__cpuc_flush_dcache_area(ctx, ptrsz);
	__cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));

	outer_clean_range(*save_ptr, *save_ptr + ptrsz);
	outer_clean_range(virt_to_phys(save_ptr),
			  virt_to_phys(save_ptr) + sizeof(*save_ptr));
}
Esempio n. 2
0
static int __init init_static_idmap(void)
{
	idmap_pgd = pgd_alloc(&init_mm);
	if (!idmap_pgd)
		return -ENOMEM;

	identity_mapping_add(idmap_pgd, __idmap_text_start,
			     __idmap_text_end, 0);

	/* Flush L1 for the hardware to see this page table content */
	flush_cache_louis();

	return 0;
}
Esempio n. 3
0
File: idmap.c Progetto: Afek/linux
static int __init init_static_idmap(void)
{
	int ret;

	idmap_pgd = pgd_alloc(&init_mm);
	if (!idmap_pgd)
		return -ENOMEM;

	pr_info("Setting up static identity map for 0x%p - 0x%p\n",
		__idmap_text_start, __idmap_text_end);
	identity_mapping_add(idmap_pgd, __idmap_text_start,
			     __idmap_text_end, 0);

	ret = init_static_idmap_hyp();

	/* Flush L1 for the hardware to see this page table content */
	flush_cache_louis();

	return ret;
}
Esempio n. 4
0
File: idmap.c Progetto: 4atty/linux
static int __init init_static_idmap(void)
{
	phys_addr_t idmap_start, idmap_end;

	idmap_pgd = pgd_alloc(&init_mm);
	if (!idmap_pgd)
		return -ENOMEM;

	/* Add an identity mapping for the physical address of the section. */
	idmap_start = virt_to_phys((void *)__idmap_text_start);
	idmap_end = virt_to_phys((void *)__idmap_text_end);

	pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
		(long long)idmap_start, (long long)idmap_end);
	identity_mapping_add(idmap_pgd, idmap_start, idmap_end);

	/* Flush L1 for the hardware to see this page table content */
	flush_cache_louis();

	return 0;
}
Esempio n. 5
0
static void tc2_pm_psci_power_down(void)
{
	struct psci_power_state power_state;
	unsigned int mpidr, cpu, cluster;

	mpidr = read_cpuid_mpidr();
	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);

	BUG_ON(!psci_ops.cpu_off);

	switch (atomic_dec_return(&tc2_pm_use_count[cpu][cluster])) {
	case 1:
		/*
		 * Overtaken by a power up. Flush caches, exit coherency,
		 * return & fake a reset
		 */
		set_cr(get_cr() & ~CR_C);

		flush_cache_louis();

		asm volatile ("clrex");
		set_auxcr(get_auxcr() & ~(1 << 6));

		return;
	case 0:
		/* A normal request to possibly power down the cluster */
		power_state.id = PSCI_POWER_STATE_ID;
		power_state.type = PSCI_POWER_STATE_TYPE_POWER_DOWN;
		power_state.affinity_level = PSCI_POWER_STATE_AFFINITY_LEVEL1;

		psci_ops.cpu_off(power_state);

		/* On success this function never returns */
	default:
		/* Any other value is a bug */
		BUG();
	}
}
Esempio n. 6
0
static void flush_all_cpu_cache(void *info)
{
	flush_cache_louis();
}