/* * cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { int ret; unsigned long flags; /* * From this point debug exceptions are disabled to prevent * updates to mdscr register (saved and restored along with * general purpose registers) from kernel debuggers. */ local_dbg_save(flags); /* * Function graph tracer state gets incosistent when the kernel * calls functions that never return (aka suspend finishers) hence * disable graph tracing during their execution. */ pause_graph_tracing(); /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly * set-up on function return. */ ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { /* * We are resuming from reset with the idmap active in TTBR0_EL1. * We must uninstall the idmap and restore the expected MMU * state before we can possibly return to userspace. */ cpu_uninstall_idmap(); /* * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ set_my_cpu_offset(per_cpu_offset(smp_processor_id())); /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. */ if (hw_breakpoint_restore) hw_breakpoint_restore(NULL); } unpause_graph_tracing(); /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully * renabled if it was enabled when core started shutdown. */ local_dbg_restore(flags); return ret; }
/** * cpu_suspend * * @arg: argument to pass to the finisher function */ int cpu_suspend(unsigned long arg) { struct mm_struct *mm = current->active_mm; int ret, cpu = smp_processor_id(); unsigned long flags; /* * If cpu_ops have not been registered or suspend * has not been initialized, cpu_suspend call fails early. */ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) return -EOPNOTSUPP; /* * From this point debug exceptions are disabled to prevent * updates to mdscr register (saved and restored along with * general purpose registers) from kernel debuggers. */ local_dbg_save(flags); /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly * set-up on function return. */ ret = __cpu_suspend(arg); pclog(); if (ret == 0) { cpu_switch_mm(mm->pgd, mm); flush_tlb_all(); /* * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ set_my_cpu_offset(per_cpu_offset(cpu)); /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. */ if (hw_breakpoint_restore) hw_breakpoint_restore(NULL); } /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully * renabled if it was enabled when core started shutdown. */ local_dbg_restore(flags); return ret; }
int swsusp_arch_suspend(void) { int ret = 0; unsigned long flags; struct sleep_stack_data state; if (cpus_are_stuck_in_kernel()) { pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); return -EBUSY; } local_dbg_save(flags); if (__cpu_suspend_enter(&state)) { sleep_cpu = smp_processor_id(); ret = swsusp_save(); } else { /* Clean kernel core startup/idle code to PoC*/ dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ if (el2_reset_needed()) dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); /* * Tell the hibernation core that we've just restored * the memory */ in_suspend = 0; sleep_cpu = -EINVAL; __cpu_suspend_exit(); } local_dbg_restore(flags); return ret; }
/* * __cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; int ret; unsigned long flags; /* * From this point debug exceptions are disabled to prevent * updates to mdscr register (saved and restored along with * general purpose registers) from kernel debuggers. */ local_dbg_save(flags); /* * Function graph tracer state gets incosistent when the kernel * calls functions that never return (aka suspend finishers) hence * disable graph tracing during their execution. */ pause_graph_tracing(); /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly * set-up on function return. */ ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { /* * We are resuming from reset with TTBR0_EL1 set to the * idmap to enable the MMU; restore the active_mm mappings in * TTBR0_EL1 unless the active_mm == &init_mm, in which case * the thread entered __cpu_suspend with TTBR0_EL1 set to * reserved TTBR0 page tables and should be restored as such. */ if (mm == &init_mm) cpu_set_reserved_ttbr0(); else cpu_switch_mm(mm->pgd, mm); flush_tlb_all(); /* * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ set_my_cpu_offset(per_cpu_offset(smp_processor_id())); /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. */ if (hw_breakpoint_restore) hw_breakpoint_restore(NULL); } unpause_graph_tracing(); /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully * renabled if it was enabled when core started shutdown. */ local_dbg_restore(flags); return ret; }
/* * cpu_suspend * * arg: argument to pass to the finisher function * fn: finisher function pointer * */ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) { struct mm_struct *mm = current->active_mm; int ret; unsigned long flags; /* * From this point debug exceptions are disabled to prevent * updates to mdscr register (saved and restored along with * general purpose registers) from kernel debuggers. */ local_dbg_save(flags); /* * mm context saved on the stack, it will be restored when * the cpu comes out of reset through the identity mapped * page tables, so that the thread address space is properly * set-up on function return. */ ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { /* * We are resuming from reset with TTBR0_EL1 set to the * idmap to enable the MMU; set the TTBR0 to the reserved * page tables to prevent speculative TLB allocations, flush * the local tlb and set the default tcr_el1.t0sz so that * the TTBR0 address space set-up is properly restored. * If the current active_mm != &init_mm we entered cpu_suspend * with mappings in TTBR0 that must be restored, so we switch * them back to complete the address space configuration * restoration before returning. */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); if (mm != &init_mm) cpu_switch_mm(mm->pgd, mm); /* * Restore per-cpu offset before any kernel * subsystem relying on it has a chance to run. */ set_my_cpu_offset(per_cpu_offset(smp_processor_id())); /* * Restore HW breakpoint registers to sane values * before debug exceptions are possibly reenabled * through local_dbg_restore. */ if (hw_breakpoint_restore) hw_breakpoint_restore(NULL); } /* * Restore pstate flags. OS lock and mdscr have been already * restored, so from this point onwards, debugging is fully * renabled if it was enabled when core started shutdown. */ local_dbg_restore(flags); return ret; }