/* * The framework loads the hibernation image into a linked list anchored * at restore_pblist, for swsusp_arch_resume() to copy back to the proper * destinations. * * To make this work if resume is triggered from initramfs, the * pagetables need to be switched to allow writes to kernel mem. */ void notrace mtk_arch_restore_image(void) { phys_reset_t phys_reset; struct pbe *pbe; for (pbe = restore_pblist; pbe; pbe = pbe->next) copy_page(pbe->orig_address, pbe->address); #if 0 /* [ALPS01496758] since CA17 has cache bug, replace with the modified assemlby version */ /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); #else __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2(); #endif /* Take out a flat memory mapping. */ setup_mm_for_reboot(); phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); /* Return from cpu_suspend/swsusp_arch_suspend */ phys_reset((unsigned long)virt_to_phys(cpu_resume)); /* Should never get here. */ BUG(); }
static void __soft_restart(void *addr) { phys_reset_t phys_reset; /* Take out a flat memory mapping. */ setup_mm_for_reboot(); /* Clean and invalidate caches */ flush_cache_all(); /* Turn off caching */ cpu_proc_fin(); /* Push out any further dirty data, and ensure cache is empty */ flush_cache_all(); /* Push out the dirty data from external caches */ outer_disable(); /* Switch to the identity mapping. */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset((unsigned long)addr); /* Should never get here. */ BUG(); }
void mcpm_cpu_suspend(u64 expected_residency) { phys_reset_t phys_reset; BUG_ON(!platform_ops); BUG_ON(!irqs_disabled()); /* Very similar to mcpm_cpu_power_down() */ setup_mm_for_reboot(); platform_ops->suspend(expected_residency); phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset(virt_to_phys(mcpm_entry_point)); BUG(); }
void mcpm_cpu_power_down(void) { phys_reset_t phys_reset; if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down)) return; BUG_ON(!irqs_disabled()); /* * Do this before calling into the power_down method, * as it might not always be safe to do afterwards. */ setup_mm_for_reboot(); platform_ops->power_down(); /* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the * power_down method might not be able to actually enter a * powered down state with the WFI instruction if the power_up * method has removed the required reset condition. The * power_down method is then allowed to return. We must perform * a re-entry in the kernel as if the power_up method just had * deasserted reset on the CPU. * * To simplify race issues, the platform specific implementation * must accommodate for the possibility of unordered calls to * power_down and power_up with a usage count. Therefore, if a * call to power_up is issued for a CPU that is not down, then * the next call to power_down must not attempt a full shutdown * but only do the minimum (normally disabling L1 cache and CPU * coherency) and return just as if a concurrent power_up request * had happened as described above. */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset(virt_to_phys(mcpm_entry_point)); /* should never get here */ BUG(); }
static int __init nocache_trampoline(unsigned long _arg) { void (*cache_disable)(void) = (void *)_arg; unsigned int mpidr = read_cpuid_mpidr(); unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); phys_reset_t phys_reset; mcpm_set_entry_vector(cpu, cluster, cpu_resume); setup_mm_for_reboot(); __mcpm_cpu_going_down(cpu, cluster); BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); cache_disable(); __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); __mcpm_cpu_down(cpu, cluster); phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset(virt_to_phys(mcpm_entry_point)); BUG(); }
void mcpm_cpu_power_down(void) { unsigned int mpidr, cpu, cluster; bool cpu_going_down, last_man; phys_reset_t phys_reset; mpidr = read_cpuid_mpidr(); cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); if (WARN_ON_ONCE(!platform_ops)) return; BUG_ON(!irqs_disabled()); setup_mm_for_reboot(); __mcpm_cpu_going_down(cpu, cluster); arch_spin_lock(&mcpm_lock); BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); mcpm_cpu_use_count[cluster][cpu]--; BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && mcpm_cpu_use_count[cluster][cpu] != 1); cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; last_man = mcpm_cluster_unused(cluster); if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { platform_ops->cpu_powerdown_prepare(cpu, cluster); platform_ops->cluster_powerdown_prepare(cluster); arch_spin_unlock(&mcpm_lock); platform_ops->cluster_cache_disable(); __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); } else { if (cpu_going_down) platform_ops->cpu_powerdown_prepare(cpu, cluster); arch_spin_unlock(&mcpm_lock); /* * If cpu_going_down is false here, that means a power_up * request raced ahead of us. Even if we do not want to * shut this CPU down, the caller still expects execution * to return through the system resume entry path, like * when the WFI is aborted due to a new IRQ or the like.. * So let's continue with cache cleaning in all cases. */ platform_ops->cpu_cache_disable(); } __mcpm_cpu_down(cpu, cluster); /* Now we are prepared for power-down, do it: */ if (cpu_going_down) wfi(); /* * It is possible for a power_up request to happen concurrently * with a power_down request for the same CPU. In this case the * CPU might not be able to actually enter a powered down state * with the WFI instruction if the power_up request has removed * the required reset condition. We must perform a re-entry in * the kernel as if the power_up method just had deasserted reset * on the CPU. */ phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset(virt_to_phys(mcpm_entry_point)); /* should never get here */ BUG(); }