int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = p->thread_info; struct pt_regs *childregs; long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)ti + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ #if defined(CONFIG_BINFMT_IRIX) if (current->personality != PER_LINUX) { /* Under IRIX things are a little different. */ childregs->regs[3] = 1; regs->regs[3] = 0; } #endif childregs->regs[2] = 0; /* Child gets zero as return value */ regs->regs[2] = p->pid; if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); clear_tsk_thread_flag(p, TIF_USEDFPU); if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; return 0; }
void save_processor_state(void) { saved_status = read_c0_status(); if (is_fpu_owner()) save_fp(current); if (cpu_has_dsp) save_dsp(current); }
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { /* * Save any process state which is live in hardware registers to the * parent context prior to duplication. This prevents the new child * state becoming stale if the parent is preempted before copy_thread() * gets a chance to save the parent's live hardware registers to the * child context. */ preempt_disable(); if (is_msa_enabled()) save_msa(current); else if (is_fpu_owner()) _save_fp(current); save_dsp(current); preempt_enable(); *dst = *src; return 0; }
int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; unsigned long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ regs->regs[2] = p->pid; if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC restores TCStatus after Status, and the CU bits * are aliased there. */ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); #endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; #ifdef CONFIG_PERFCTR perfctr_copy_task(p, regs); #endif return 0; }
int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg, struct task_struct *p) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs, *regs = current_pt_regs(); unsigned long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; /* Put the stack after the struct pt_regs. */ childksp = (unsigned long) childregs; p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); if (unlikely(p->flags & PF_KTHREAD)) { unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); ti->addr_limit = KERNEL_DS; p->thread.reg16 = usp; /* fn */ p->thread.reg17 = arg; p->thread.reg29 = childksp; p->thread.reg31 = (unsigned long) ret_from_kernel_thread; #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | ((status & (ST0_KUC | ST0_IEC)) << 2); #else status |= ST0_EXL; #endif childregs->cp0_status = status; return 0; } *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ childregs->regs[2] = 0; /* Child gets zero as return value */ childregs->regs[29] = usp; ti->addr_limit = USER_DS; p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); #ifdef CONFIG_MIPS_MT_SMTC /* * SMTC restores TCStatus after Status, and the CU bits * are aliased there. */ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); #endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; return 0; }
int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; *childregs = *regs; childregs->regs[7] = 0; /* Clear error flag */ #if defined(CONFIG_BINFMT_IRIX) if (current->personality != PER_LINUX) { /* Under IRIX things are a little different. */ childregs->regs[3] = 1; regs->regs[3] = 0; } #endif childregs->regs[2] = 0; /* Child gets zero as return value */ regs->regs[2] = p->pid; if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks lose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF /* * FPU affinity support is cleaner if we track the * user-visible CPU affinity from the very beginning. * The generic cpus_allowed mask will already have * been copied from the parent before copy_thread * is invoked. */ p->thread.user_cpus_allowed = p->cpus_allowed; #endif /* CONFIG_MIPS_MT_FPAFF */ if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; return 0; }
int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct thread_info *ti = task_thread_info(p); struct pt_regs *childregs; unsigned long childksp; p->set_child_tid = p->clear_child_tid = NULL; childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; preempt_disable(); if (is_fpu_owner()) save_fp(p); if (cpu_has_dsp) save_dsp(p); preempt_enable(); /* */ childregs = (struct pt_regs *) childksp - 1; /* */ childksp = (unsigned long) childregs; *childregs = *regs; childregs->regs[7] = 0; /* */ childregs->regs[2] = 0; /* */ if (childregs->cp0_status & ST0_CU0) { childregs->regs[28] = (unsigned long) ti; childregs->regs[29] = childksp; ti->addr_limit = KERNEL_DS; } else { childregs->regs[29] = usp; ti->addr_limit = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* */ p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); #ifdef CONFIG_MIPS_MT_SMTC /* */ childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); #endif clear_tsk_thread_flag(p, TIF_USEDFPU); #ifdef CONFIG_MIPS_MT_FPAFF clear_tsk_thread_flag(p, TIF_FPUBOUND); #endif /* */ if (clone_flags & CLONE_SETTLS) ti->tp_value = regs->regs[7]; return 0; }