int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct * p, struct pt_regs * regs) { struct pt_regs * childregs; long childksp; extern void save_fp(void*); childksp = (unsigned long)p + KERNEL_STACK_SIZE - 32; if (last_task_used_math == current) if (mips_cpu.options & MIPS_CPU_FPU) { __enable_fpu(); save_fp(p); } /* set up new TSS. */ childregs = (struct pt_regs *) childksp - 1; *childregs = *regs; set_gpreg(childregs, 7, 0); /* Clear error flag */ if(current->personality == PER_LINUX) { set_gpreg(childregs, 2, 0); /* Child gets zero as return value */ set_gpreg(regs, 2, p->pid); } else { /* Under IRIX things are a little different. */ set_gpreg(childregs, 2, 0); set_gpreg(childregs, 3, 1); set_gpreg(regs, 2, p->pid); set_gpreg(regs, 3, 0); } if (childregs->cp0_status & ST0_CU0) { set_gpreg(childregs, 28, (unsigned long) p); set_gpreg(childregs, 29, childksp); p->thread.current_ds = KERNEL_DS; } else { set_gpreg(childregs, 29, usp); p->thread.current_ds = USER_DS; } p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; /* * New tasks loose permission to use the fpu. This accelerates context * switching for most programs since they don't use the fpu. */ #ifdef CONFIG_PS2 /* keep COP2 usable bit to share the VPU0 context */ p->thread.cp0_status = read_32bit_cp0_register(CP0_STATUS) & ~(ST0_CU1|KU_MASK); childregs->cp0_status &= ~(ST0_CU1); #else p->thread.cp0_status = read_32bit_cp0_register(CP0_STATUS) & ~(ST0_CU2|ST0_CU1|KU_MASK); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); #endif return 0; }
void exit_thread(void) { /* Forget lazy fpu state */ if (last_task_used_math == current && mips_cpu.options & MIPS_CPU_FPU) { __enable_fpu(); __asm__ __volatile__("cfc1\t$0,$31"); last_task_used_math = NULL; } }
/* * Get the FPU Implementation/Revision. */ static inline unsigned long cpu_get_fpu_id(void) { unsigned long tmp, fpu_id; tmp = read_c0_status(); __enable_fpu(); fpu_id = read_32bit_cp1_register(CP1_REVISION); write_c0_status(tmp); return fpu_id; }
void flush_thread(void) { /* Forget lazy fpu state */ if (IS_FPU_OWNER()) { if (mips_cpu.options & MIPS_CPU_FPU) { __enable_fpu(); __asm__ __volatile__("cfc1\t$0,$31"); } CLEAR_FPU_OWNER(); } }
void flush_thread(void) { /* Mark fpu context to be cleared */ current->used_math = 0; /* Forget lazy fpu state */ if (last_task_used_math == current && mips_cpu.options & MIPS_CPU_FPU) { struct pt_regs *regs; /* Make CURRENT lose fpu */ __enable_fpu(); __asm__ __volatile__("cfc1\t$0,$31"); __disable_fpu(); last_task_used_math = NULL; regs = (struct pt_regs *) ((unsigned long) current + KERNEL_STACK_SIZE - 32 - sizeof(struct pt_regs)); regs->cp0_status &= ~ST0_CU1; } }
int dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) { unsigned long long *fregs; int i; unsigned long tmp; if (!task->used_math) return 0; if(!(mips_cpu.options & MIPS_CPU_FPU)) { fregs = (unsigned long long *) task->thread.fpu.soft.regs; } else { fregs = (unsigned long long *) &task->thread.fpu.hard.fp_regs[0]; if (last_task_used_math == task) { __enable_fpu(); save_fp (task); __disable_fpu(); last_task_used_math = NULL; regs->cp0_status &= ~ST0_CU1; } } /* * The odd registers are actually the high * order bits of the values stored in the even * registers - unless we're using r2k_switch.S. */ for (i = 0; i < 32; i++) { #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_R5900) if (mips_cpu.options & MIPS_CPU_FPU) tmp = *(unsigned long *)(fregs + i); else #endif if (i & 1) tmp = (unsigned long) (fregs[(i & ~1)] >> 32); else tmp = (unsigned long) (fregs[i] & 0xffffffff); *(unsigned long *)(&(*r)[i]) = tmp; }