void smp_sched_handler(void) { unsigned flgs; unsigned cpu = cpuid; flgs = sched_ipi_data[cpu].flags; if (flgs) { struct proc * p; p = (struct proc *)sched_ipi_data[cpu].data; if (flgs & SCHED_IPI_STOP_PROC) { RTS_SET(p, RTS_PROC_STOP); } if (flgs & SCHED_IPI_SAVE_CTX) { /* all context has been saved already, FPU remains */ if (proc_used_fpu(p) && get_cpulocal_var(fpu_owner) == p) { disable_fpu_exception(); save_local_fpu(p, FALSE /*retain*/); /* we're preparing to migrate somewhere else */ release_fpu(p); } } if (flgs & SCHED_IPI_VM_INHIBIT) { RTS_SET(p, RTS_VMINHIBIT); } } __insn_barrier(); sched_ipi_data[cpu].flags = 0; }
/*===========================================================================* * do_setmcontext * *===========================================================================*/ int do_setmcontext(struct proc * caller, message * m_ptr) { /* Set machine context of a process */ register struct proc *rp; int proc_nr, r; mcontext_t mc; if (!isokendpt(m_ptr->m_lsys_krn_sys_setmcontext.endpt, &proc_nr)) return(EINVAL); rp = proc_addr(proc_nr); /* Get the mcontext structure into our address space. */ if ((r = data_copy(m_ptr->m_lsys_krn_sys_setmcontext.endpt, m_ptr->m_lsys_krn_sys_setmcontext.ctx_ptr, KERNEL, (vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK) return(r); #if defined(__i386__) /* Copy FPU state */ if (mc.mc_flags & _MC_FPU_SAVED) { rp->p_misc_flags |= MF_FPU_INITIALIZED; assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE); memcpy(rp->p_seg.fpu_state, &(mc.__fpregs.__fp_reg_set), FPU_XFP_SIZE); } else rp->p_misc_flags &= ~MF_FPU_INITIALIZED; /* force reloading FPU in either case */ release_fpu(rp); #endif return(OK); }
static inline int setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; int fpvalid; fpvalid = !!used_math(); err |= __put_user(fpvalid, &sc->sc_fpvalid); if (! fpvalid) return err; if (current == last_task_used_math) { grab_fpu(); fpsave(¤t->thread.fpu.hard); release_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.fpu.hard, (sizeof(long long) * 32) + (sizeof(int) * 1)); clear_used_math(); return err; }
/*===========================================================================* * do_sigreturn * *===========================================================================*/ PUBLIC int do_sigreturn(struct proc * caller, message * m_ptr) { /* POSIX style signals require sys_sigreturn to put things in order before * the signalled process can resume execution */ struct sigcontext sc; register struct proc *rp; int proc_nr, r; if (! isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL); if (iskerneln(proc_nr)) return(EPERM); rp = proc_addr(proc_nr); /* Copy in the sigcontext structure. */ if((r=data_copy(m_ptr->SIG_ENDPT, (vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &sc, sizeof(struct sigcontext))) != OK) return r; /* Restore user bits of psw from sc, maintain system bits from proc. */ sc.sc_psw = (sc.sc_psw & X86_FLAGS_USER) | (rp->p_reg.psw & ~X86_FLAGS_USER); #if (_MINIX_CHIP == _CHIP_INTEL) /* Don't panic kernel if user gave bad selectors. */ sc.sc_cs = rp->p_reg.cs; sc.sc_ds = rp->p_reg.ds; sc.sc_es = rp->p_reg.es; sc.sc_ss = rp->p_reg.ss; #if _WORD_SIZE == 4 sc.sc_fs = rp->p_reg.fs; sc.sc_gs = rp->p_reg.gs; #endif #endif /* Restore the registers. */ memcpy(&rp->p_reg, &sc.sc_regs, sizeof(sigregs)); #if (_MINIX_CHIP == _CHIP_INTEL) if(sc.sc_flags & MF_FPU_INITIALIZED) { memcpy(rp->p_fpu_state.fpu_save_area_p, &sc.sc_fpu_state, FPU_XFP_SIZE); rp->p_misc_flags |= MF_FPU_INITIALIZED; /* Restore math usage flag. */ /* force reloading FPU */ if (fpu_owner == rp) release_fpu(); } #endif rp->p_misc_flags |= MF_CONTEXT_SET; return(OK); }
/*===========================================================================* * do_exec * *===========================================================================*/ int do_exec(struct proc * caller, message * m_ptr) { /* Handle sys_exec(). A process has done a successful EXEC. Patch it up. */ register struct proc *rp; int proc_nr; char name[PROC_NAME_LEN]; if(!isokendpt(m_ptr->PR_ENDPT, &proc_nr)) return EINVAL; rp = proc_addr(proc_nr); if(rp->p_misc_flags & MF_DELIVERMSG) { rp->p_misc_flags &= ~MF_DELIVERMSG; } /* Save command name for debugging, ps(1) output, etc. */ if(data_copy(caller->p_endpoint, (vir_bytes) m_ptr->PR_NAME_PTR, KERNEL, (vir_bytes) name, (phys_bytes) sizeof(name) - 1) != OK) strncpy(name, "<unset>", PROC_NAME_LEN); name[sizeof(name)-1] = '\0'; /* Set process state. */ arch_proc_init(rp, (u32_t) m_ptr->PR_IP_PTR, (u32_t) m_ptr->PR_STACK_PTR, name); /* No reply to EXEC call */ RTS_UNSET(rp, RTS_RECEIVING); /* Mark fpu_regs contents as not significant, so fpu * will be initialized, when it's used next time. */ rp->p_misc_flags &= ~MF_FPU_INITIALIZED; /* force reloading FPU if the current process is the owner */ release_fpu(rp); return(OK); }
/*===========================================================================* * do_clear * *===========================================================================*/ int do_clear(struct proc * caller, message * m_ptr) { /* Handle sys_clear. Only the PM can request other process slots to be cleared * when a process has exited. * The routine to clean up a process table slot cancels outstanding timers, * possibly removes the process from the message queues, and resets certain * process table fields to the default values. */ struct proc *rc; int exit_p; int i; if(!isokendpt(m_ptr->PR_ENDPT, &exit_p)) { /* get exiting process */ return EINVAL; } rc = proc_addr(exit_p); /* clean up */ release_address_space(rc); /* Don't clear if already cleared. */ if(isemptyp(rc)) return OK; /* Check the table with IRQ hooks to see if hooks should be released. */ for (i=0; i < NR_IRQ_HOOKS; i++) { if (rc->p_endpoint == irq_hooks[i].proc_nr_e) { rm_irq_handler(&irq_hooks[i]); /* remove interrupt handler */ irq_hooks[i].proc_nr_e = NONE; /* mark hook as free */ } } /* Remove the process' ability to send and receive messages */ clear_endpoint(rc); /* Turn off any alarm timers at the clock. */ reset_timer(&priv(rc)->s_alarm_timer); /* Make sure that the exiting process is no longer scheduled, * and mark slot as FREE. Also mark saved fpu contents as not significant. */ RTS_SETFLAGS(rc, RTS_SLOT_FREE); /* release FPU */ release_fpu(rc); rc->p_misc_flags &= ~MF_FPU_INITIALIZED; /* Release the process table slot. If this is a system process, also * release its privilege structure. Further cleanup is not needed at * this point. All important fields are reinitialized when the * slots are assigned to another, new process. */ if (priv(rc)->s_flags & SYS_PROC) priv(rc)->s_proc_nr = NONE; #if 0 /* Clean up virtual memory */ if (rc->p_misc_flags & MF_VM) { vm_map_default(rc); } #endif return OK; }