void switch_32bit_app_setting_bit(struct task_struct *prev, struct task_struct *next) { if (prev->mm && unlikely(is_compat_thread(task_thread_info(prev)))) clear_app_setting_bit_for_32bit_apps(); if (next->mm && unlikely(is_compat_thread(task_thread_info(next)))) set_app_setting_bit_for_32bit_apps(); }
u64 perf_reg_abi(struct task_struct *task) { if (is_compat_thread(task_thread_info(task))) return PERF_SAMPLE_REGS_ABI_32; else return PERF_SAMPLE_REGS_ABI_64; }
const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_COMPAT if (is_compat_thread(task_thread_info(task))) return &user_aarch32_view; #endif return &user_aarch64_view; }
noinline static void mt_switch(struct task_struct *prev, struct task_struct *next) { int cpu; int prev_state=0, next_state=0; #ifdef __aarch64__ prev_state = !(is_compat_thread(task_thread_info(prev))); next_state = !(is_compat_thread(task_thread_info(next))); #endif cpu = smp_processor_id(); if (per_cpu(first_log, cpu)) { MET_PRINTK("%d, %d, %d, %d\n", prev->pid, prev_state, next->pid, next_state); per_cpu(first_log, cpu) = 0; } if (prev_state != next_state) MET_PRINTK("%d, %d, %d, %d\n", prev->pid, prev_state, next->pid, next_state); }
/* * Are the current registers suitable for user mode? (used to maintain * security in signal handlers) */ int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task) { if (!test_tsk_thread_flag(task, TIF_SINGLESTEP)) regs->pstate &= ~DBG_SPSR_SS; if (is_compat_thread(task_thread_info(task))) return valid_compat_regs(regs); else return valid_native_regs(regs); }
static void tls_thread_switch(struct task_struct *next) { tls_preserve_current_state(); if (is_compat_thread(task_thread_info(next))) write_sysreg(next->thread.uw.tp_value, tpidrro_el0); else if (!arm64_kernel_unmapped_at_el0()) write_sysreg(0, tpidrro_el0); write_sysreg(*task_user_tls(next), tpidr_el0); }
static void tls_thread_switch(struct task_struct *next) { unsigned long tpidr, tpidrro; tls_preserve_current_state(); tpidr = *task_user_tls(next); tpidrro = is_compat_thread(task_thread_info(next)) ? next->thread.tp_value : 0; write_sysreg(tpidr, tpidr_el0); write_sysreg(tpidrro, tpidrro_el0); }
static int is_compat_bp(struct perf_event *bp) { struct task_struct *tsk = bp->hw.target; /* * tsk can be NULL for per-cpu (non-ptrace) breakpoints. * In this case, use the native interface, since we don't have * the notion of a "compat CPU" and could end up relying on * deprecated behaviour if we use unaligned watchpoints in * AArch64 state. */ return tsk && is_compat_thread(task_thread_info(tsk)); }
const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_COMPAT /* * Core dumping of 32-bit tasks or compat ptrace requests must use the * user_aarch32_view compatible with arm32. Native ptrace requests on * 32-bit children use an extended user_aarch32_ptrace_view to allow * access to the TLS register. */ if (is_compat_task()) return &user_aarch32_view; else if (is_compat_thread(task_thread_info(task))) return &user_aarch32_ptrace_view; #endif return &user_aarch64_view; }
int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; /* * Read the current TLS pointer from tpidr_el0 as it may be * out-of-sync with the saved value. */ *task_user_tls(p) = read_sysreg(tpidr_el0); if (stack_start) { if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; else childregs->sp = stack_start; } /* * If a TLS pointer was passed to clone (4th argument), use it * for the new thread. */ if (clone_flags & CLONE_SETTLS) p->thread.tp_value = childregs->regs[3]; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_const_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; ptrace_hw_copy_thread(p); return 0; }
int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); /* * Unalias p->thread.sve_state (if any) from the parent task * and disable discard SVE state for p: */ clear_tsk_thread_flag(p, TIF_SVE); p->thread.sve_state = NULL; /* * In case p was allocated the same task_struct pointer as some * other recently-exited task, make sure p is disassociated from * any cpu that may have run that now-exited task recently. * Otherwise we could erroneously skip reloading the FPSIMD * registers for p. */ fpsimd_flush_task_state(p); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; /* * Read the current TLS pointer from tpidr_el0 as it may be * out-of-sync with the saved value. */ *task_user_tls(p) = read_sysreg(tpidr_el0); if (stack_start) { if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; else childregs->sp = stack_start; } /* * If a TLS pointer was passed to clone (4th argument), use it * for the new thread. */ if (clone_flags & CLONE_SETTLS) p->thread.uw.tp_value = childregs->regs[3]; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_const_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; if (system_uses_irq_prio_masking()) childregs->pmr_save = GIC_PRIO_IRQON; p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; ptrace_hw_copy_thread(p); return 0; }