void fp_precise(struct regs *rp) { fp_simd_type fpsd; int inst_ftt; union { uint_t i; fp_inst_type inst; } kluge; klwp_t *lwp = ttolwp(curthread); kfpu_t *fp = lwptofpu(lwp); uint64_t gsr; int mstate; if (fpu_exists) save_gsr(fp); gsr = get_gsr(fp); /* * Get the instruction to be emulated from the pc saved by the trap. * Note that the kernel is NOT prepared to handle a kernel fp * exception if it can't pass successfully through the fp simulator. * * If the trap occurred in user mode, set lwp_state to LWP_SYS for the * purposes of clock accounting and switch to the LMS_TRAP microstate. */ if (USERMODE(rp->r_tstate)) { inst_ftt = _fp_read_inst((uint32_t *)rp->r_pc, &kluge.i, &fpsd); mstate = new_mstate(curthread, LMS_TRAP); lwp->lwp_state = LWP_SYS; } else { kluge.i = *(uint_t *)rp->r_pc; inst_ftt = ftt_none; } if (inst_ftt != ftt_none) { /* * Save the bad address and post the signal. * It can only be an ftt_alignment or ftt_fault trap. * XXX - How can this work w/mainsail and do_unaligned? */ fpsd.fp_trapaddr = (caddr_t)rp->r_pc; fp_traps(&fpsd, inst_ftt, rp); } else { /* * Conjure up a floating point queue and advance the pc/npc * to fake a deferred fp trap. We now run the fp simulator * in fp_precise, while allowing setfpregs to call fp_runq, * because this allows us to do the ugly machinations to * inc/dec the pc depending on the trap type, as per * bugid 1210159. fp_runq is still going to have the * generic "how do I connect the "fp queue to the pc/npc" * problem alluded to in bugid 1192883, which is only a * problem for a restorecontext of a v8 fp queue on a * v9 system, which seems like the .000000001% case (on v9)! */ struct _fpq *pfpq = &fp->fpu_q->FQu.fpq; fp_simd_type fpsd; int fptrap; pfpq->fpq_addr = (uint_t *)rp->r_pc; pfpq->fpq_instr = kluge.i; fp->fpu_qcnt = 1; fp->fpu_q_entrysize = sizeof (struct _fpq); kpreempt_disable(); (void) flush_user_windows_to_stack(NULL); fptrap = fpu_vis_sim((fp_simd_type *)&fpsd, (fp_inst_type *)pfpq->fpq_addr, rp, (fsr_type *)&fp->fpu_fsr, gsr, kluge.i); /* update the hardware fp fsr state for sake of ucontext */ if (fpu_exists) _fp_write_pfsr(&fp->fpu_fsr); if (fptrap) { /* back up the pc if the signal needs to be precise */ if (fptrap != ftt_ieee) { fp->fpu_qcnt = 0; } /* post signal */ fp_traps(&fpsd, fptrap, rp); /* decrement queue count for ieee exceptions */ if (fptrap == ftt_ieee) { fp->fpu_qcnt = 0; } } else { fp->fpu_qcnt = 0; } /* update the software pcb copies of hardware fp registers */ if (fpu_exists) { fp_save(fp); } kpreempt_enable(); } /* * Reset lwp_state to LWP_USER for the purposes of clock accounting, * and restore the previously saved microstate. */ if (USERMODE(rp->r_tstate)) { (void) new_mstate(curthread, mstate); lwp->lwp_state = LWP_USER; } }
int prusrio(proc_t *p, enum uio_rw rw, struct uio *uiop, int old) { /* longlong-aligned short buffer */ longlong_t buffer[STACK_BUF_SIZE / sizeof (longlong_t)]; int error = 0; void *bp; int allocated; ssize_t total = uiop->uio_resid; uintptr_t addr; size_t len; /* for short reads/writes, use the on-stack buffer */ if (uiop->uio_resid <= STACK_BUF_SIZE) { bp = buffer; allocated = 0; } else { bp = kmem_alloc(PAGESIZE, KM_SLEEP); allocated = 1; } #if defined(__sparc) if (p == curproc) (void) flush_user_windows_to_stack(NULL); #endif switch (rw) { case UIO_READ: while (uiop->uio_resid != 0) { addr = uiop->uio_offset; len = MIN(uiop->uio_resid, PAGESIZE - (addr & PAGEOFFSET)); if ((error = uread(p, bp, len, addr)) != 0 || (error = uiomove(bp, len, UIO_READ, uiop)) != 0) break; } /* * ENXIO indicates that a page didn't exist. If the I/O was * truncated, return success; otherwise convert the error into * EIO. When obeying new /proc semantics, we don't return an * error for a read that begins at an invalid address. */ if (error == ENXIO) { if (total != uiop->uio_resid || !old) error = 0; else error = EIO; } break; case UIO_WRITE: while (uiop->uio_resid != 0) { addr = uiop->uio_offset; len = MIN(uiop->uio_resid, PAGESIZE - (addr & PAGEOFFSET)); if ((error = uiomove(bp, len, UIO_WRITE, uiop)) != 0) break; if ((error = uwrite(p, bp, len, addr)) != 0) { uiop->uio_resid += len; uiop->uio_loffset -= len; break; } } /* * ENXIO indicates that a page didn't exist. If the I/O was * truncated, return success; otherwise convert the error * into EIO. */ if (error == ENXIO) { if (total != uiop->uio_resid) error = 0; else error = EIO; } break; default: panic("prusrio: rw=%d neither UIO_READ not UIO_WRITE", rw); /*NOTREACHED*/ } if (allocated) kmem_free(bp, PAGESIZE); return (error); }
/* * fp_disabled normally occurs when the first floating point in a non-threaded * program causes an fp_disabled trap. For threaded programs, the ILP32 threads * library calls the .setpsr fasttrap, which has been modified to also set the * appropriate bits in fpu_en and fpu_fprs, as well as to enable the %fprs, * as before. The LP64 threads library will write to the %fprs directly, * so fpu_en will never get updated for LP64 threaded programs, * although fpu_fprs will, via resume. */ void fp_disabled(struct regs *rp) { klwp_id_t lwp; kfpu_t *fp; int ftt; #ifdef SF_ERRATA_30 /* call causes fp-disabled */ /* * This code is here because sometimes the call instruction * generates an fp_disabled trap when the call offset is large. */ if (spitfire_call_bug) { uint_t instr = 0; extern void trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr); if (USERMODE(rp->r_tstate)) { (void) fuword32((void *)rp->r_pc, &instr); } else { instr = *(uint_t *)(rp->r_pc); } if ((instr & 0xc0000000) == 0x40000000) { ill_fpcalls++; trap(rp, NULL, T_UNIMP_INSTR, 0); return; } } #endif /* SF_ERRATA_30 - call causes fp-disabled */ #ifdef CHEETAH_ERRATUM_109 /* interrupts not taken during fpops */ /* * UltraSPARC III will report spurious fp-disabled exceptions when * the pipe is full of fpops and an interrupt is triggered. By the * time we get here the interrupt has been taken and we just need * to return to where we came from and try again. */ if (fpu_exists && _fp_read_fprs() & FPRS_FEF) return; #endif /* CHEETAH_ERRATUM_109 */ lwp = ttolwp(curthread); ASSERT(lwp != NULL); fp = lwptofpu(lwp); if (fpu_exists) { kpreempt_disable(); if (fp->fpu_en) { #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fpu disabled, but already enabled\n"); #endif if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) { fp->fpu_fprs = FPRS_FEF; #ifdef DEBUG if (fpdispr) cmn_err(CE_NOTE, "fpu disabled, saved fprs disabled\n"); #endif } _fp_write_fprs(FPRS_FEF); fp_restore(fp); } else { fp->fpu_en = 1; fp->fpu_fsr = 0; fp->fpu_fprs = FPRS_FEF; _fp_write_fprs(FPRS_FEF); fp_clearregs(fp); } kpreempt_enable(); } else { fp_simd_type fpsd; int i; (void) flush_user_windows_to_stack(NULL); if (!fp->fpu_en) { fp->fpu_en = 1; fp->fpu_fsr = 0; for (i = 0; i < 32; i++) fp->fpu_fr.fpu_regs[i] = (uint_t)-1; /* NaN */ for (i = 16; i < 32; i++) /* NaN */ fp->fpu_fr.fpu_dregs[i] = (uint64_t)-1; } if (ftt = fp_emulator(&fpsd, (fp_inst_type *)rp->r_pc, rp, (ulong_t *)rp->r_sp, fp)) { fp->fpu_q_entrysize = sizeof (struct _fpq); fp_traps(&fpsd, ftt, rp); } } }
/* ARGSUSED */ static int64_t cfork(int isvfork, int isfork1, int flags) { proc_t *p = ttoproc(curthread); struct as *as; proc_t *cp, **orphpp; klwp_t *clone; kthread_t *t; task_t *tk; rval_t r; int error; int i; rctl_set_t *dup_set; rctl_alloc_gp_t *dup_gp; rctl_entity_p_t e; lwpdir_t *ldp; lwpent_t *lep; lwpent_t *clep; /* * Allow only these two flags. */ if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) { error = EINVAL; goto forkerr; } /* * fork is not supported for the /proc agent lwp. */ if (curthread == p->p_agenttp) { error = ENOTSUP; goto forkerr; } if ((error = secpolicy_basic_fork(CRED())) != 0) goto forkerr; /* * If the calling lwp is doing a fork1() then the * other lwps in this process are not duplicated and * don't need to be held where their kernel stacks can be * cloned. If doing forkall(), the process is held with * SHOLDFORK, so that the lwps are at a point where their * stacks can be copied which is on entry or exit from * the kernel. */ if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) { aston(curthread); error = EINTR; goto forkerr; } #if defined(__sparc) /* * Ensure that the user stack is fully constructed * before creating the child process structure. */ (void) flush_user_windows_to_stack(NULL); #endif mutex_enter(&p->p_lock); /* * If this is vfork(), cancel any suspend request we might * have gotten from some other thread via lwp_suspend(). * Otherwise we could end up with a deadlock on return * from the vfork() in both the parent and the child. */ if (isvfork) curthread->t_proc_flag &= ~TP_HOLDLWP; /* * Prevent our resource set associations from being changed during fork. */ pool_barrier_enter(); mutex_exit(&p->p_lock); /* * Create a child proc struct. Place a VN_HOLD on appropriate vnodes. */ if (getproc(&cp, 0) < 0) { mutex_enter(&p->p_lock); pool_barrier_exit(); continuelwps(p); mutex_exit(&p->p_lock); error = EAGAIN; goto forkerr; } TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p); /* * Assign an address space to child */ if (isvfork) { /* * Clear any watched areas and remember the * watched pages for restoring in vfwait(). */ as = p->p_as; if (avl_numnodes(&as->a_wpage) != 0) { AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); as_clearwatch(as); p->p_wpage = as->a_wpage; avl_create(&as->a_wpage, wp_compare, sizeof (struct watched_page), offsetof(struct watched_page, wp_link)); AS_LOCK_EXIT(as, &as->a_lock); } cp->p_as = as; cp->p_flag |= SVFORK; } else {