/* * fill in the extra register state area specified with the specified lwp's * platform-dependent floating-point extra register state information. * NOTE: 'lwp' might not correspond to 'curthread' since this is * called from code in /proc to get the registers of another lwp. */ void xregs_getfpfiller(klwp_id_t lwp, caddr_t xrp) { prxregset_t *xregs = (prxregset_t *)xrp; kfpu_t *fp = lwptofpu(lwp); uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); uint64_t gsr; /* * fp_fksave() does not flush the GSR register into * the lwp area, so do it now */ kpreempt_disable(); if (ttolwp(curthread) == lwp && fpu_exists) { fp->fpu_fprs = _fp_read_fprs(); if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { _fp_write_fprs(fprs); fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; } save_gsr(fp); } gsr = get_gsr(fp); kpreempt_enable(); PRXREG_GSR(xregs) = gsr; }
void fp_enable(void) { klwp_id_t lwp; kfpu_t *fp; lwp = ttolwp(curthread); ASSERT(lwp != NULL); fp = lwptofpu(lwp); if (fpu_exists) { if (fp->fpu_en) { #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fpu disabled, but already enabled\n"); #endif if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { fp->fpu_fprs = FPRS_FEF; #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fpu disabled, saved fprs disabled\n"); #endif } _fp_write_fprs(FPRS_FEF); fp_restore(fp); } else { fp->fpu_en = 1; fp->fpu_fsr = 0; fp->fpu_fprs = FPRS_FEF; _fp_write_fprs(FPRS_FEF); fp_clearregs(fp); } } else { int i; if (!fp->fpu_en) { fp->fpu_en = 1; fp->fpu_fsr = 0; for (i = 0; i < 32; i++) fp->fpu_fr.fpu_regs[i] = (uint_t)-1; /* NaN */ for (i = 16; i < 32; i++) /* NaN */ fp->fpu_fr.fpu_dregs[i] = (uint64_t)-1; } } }
/*ARGSUSED1*/ void fp_free(kfpu_t *fp, int isexec) { int s; uint32_t fprs = 0; if (curthread->t_lwp != NULL && lwptofpu(curthread->t_lwp) == fp) { fp->fpu_en = 0; fp->fpu_fprs = fprs; s = splhigh(); _fp_write_fprs(fprs); splx(s); } }
void setfpasrs(klwp_t *lwp, asrset_t asr) { kfpu_t *fp = lwptofpu(lwp); uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); kpreempt_disable(); if (ttolwp(curthread) == lwp) fp->fpu_fprs = _fp_read_fprs(); if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { set_gsr(asr[ASR_GSR], fp); if (fpu_exists && ttolwp(curthread) == lwp) { if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { _fp_write_fprs(fprs); fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; } restore_gsr(fp); } } kpreempt_enable(); }
/* * set the specified lwp's platform-dependent floating-point * extra register state based on the specified input */ void xregs_setfpfiller(klwp_id_t lwp, caddr_t xrp) { prxregset_t *xregs = (prxregset_t *)xrp; kfpu_t *fp = lwptofpu(lwp); uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); uint64_t gsr = PRXREG_GSR(xregs); kpreempt_disable(); set_gsr(gsr, lwptofpu(lwp)); if ((lwp == ttolwp(curthread)) && fpu_exists) { fp->fpu_fprs = _fp_read_fprs(); if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { _fp_write_fprs(fprs); fp->fpu_fprs = (V9_FPU_FPRS_TYPE)fprs; } restore_gsr(lwptofpu(lwp)); } kpreempt_enable(); }
/* * For use by procfs to save the floating point context of the thread. * Note the if (ttolwp(lwp) == curthread) in prstop, which calls * this function, ensures that it is safe to read the fprs here. */ void fp_prsave(kfpu_t *fp) { if ((fp->fpu_en) || (fp->fpu_fprs & FPRS_FEF)) { kpreempt_disable(); if (fpu_exists) { fp->fpu_fprs = _fp_read_fprs(); if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL); _fp_write_fprs(fprs); fp->fpu_fprs = fprs; #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fp_prsave with fp disabled!"); #endif } fp_fksave(fp); } kpreempt_enable(); } }
/* * fp_disabled normally occurs when the first floating point in a non-threaded * program causes an fp_disabled trap. For threaded programs, the ILP32 threads * library calls the .setpsr fasttrap, which has been modified to also set the * appropriate bits in fpu_en and fpu_fprs, as well as to enable the %fprs, * as before. The LP64 threads library will write to the %fprs directly, * so fpu_en will never get updated for LP64 threaded programs, * although fpu_fprs will, via resume. */ void fp_disabled(struct regs *rp) { klwp_id_t lwp; kfpu_t *fp; int ftt; #ifdef SF_ERRATA_30 /* call causes fp-disabled */ /* * This code is here because sometimes the call instruction * generates an fp_disabled trap when the call offset is large. */ if (spitfire_call_bug) { uint_t instr = 0; extern void trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr); if (USERMODE(rp->r_tstate)) { (void) fuword32((void *)rp->r_pc, &instr); } else { instr = *(uint_t *)(rp->r_pc); } if ((instr & 0xc0000000) == 0x40000000) { ill_fpcalls++; trap(rp, NULL, T_UNIMP_INSTR, 0); return; } } #endif /* SF_ERRATA_30 - call causes fp-disabled */ #ifdef CHEETAH_ERRATUM_109 /* interrupts not taken during fpops */ /* * UltraSPARC III will report spurious fp-disabled exceptions when * the pipe is full of fpops and an interrupt is triggered. By the * time we get here the interrupt has been taken and we just need * to return to where we came from and try again. */ if (fpu_exists && _fp_read_fprs() & FPRS_FEF) return; #endif /* CHEETAH_ERRATUM_109 */ lwp = ttolwp(curthread); ASSERT(lwp != NULL); fp = lwptofpu(lwp); if (fpu_exists) { kpreempt_disable(); if (fp->fpu_en) { #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fpu disabled, but already enabled\n"); #endif if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { fp->fpu_fprs = FPRS_FEF; #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fpu disabled, saved fprs disabled\n"); #endif } _fp_write_fprs(FPRS_FEF); fp_restore(fp); } else { fp->fpu_en = 1; fp->fpu_fsr = 0; fp->fpu_fprs = FPRS_FEF; _fp_write_fprs(FPRS_FEF); fp_clearregs(fp); } kpreempt_enable(); } else { fp_simd_type fpsd; int i; (void) flush_user_windows_to_stack(NULL); if (!fp->fpu_en) { fp->fpu_en = 1; fp->fpu_fsr = 0; for (i = 0; i < 32; i++) fp->fpu_fr.fpu_regs[i] = (uint_t)-1; /* NaN */ for (i = 16; i < 32; i++) /* NaN */ fp->fpu_fr.fpu_dregs[i] = (uint64_t)-1; } if (ftt = fp_emulator(&fpsd, (fp_inst_type *)rp->r_pc, rp, (ulong_t *)rp->r_sp, fp)) { fp->fpu_q_entrysize = sizeof (struct _fpq); fp_traps(&fpsd, ftt, rp); } } }