Ejemplo n.º 1
0
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
    int err = 0;
    int fpvalid;

    fpvalid = !!used_math();
    err |= __put_user(fpvalid, &sc->sc_fpvalid);
    if (! fpvalid)
        return err;

    if (current == last_task_used_math) {
        enable_fpu();
        save_fpu(current);
        disable_fpu();
        last_task_used_math = NULL;
        regs->sr |= SR_FD;
    }

    err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
                          (sizeof(long long) * 32) + (sizeof(int) * 1));
    clear_used_math();

    return err;
}
Ejemplo n.º 2
0
static int
fix_unaligned(struct thread *td, struct trapframe *frame)
{
#if 0
	struct thread	*fputhread;
	int		indicator, reg;
	double		*fpr;

	indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);

	switch (indicator) {
	case EXC_ALI_LFD:
	case EXC_ALI_STFD:
		reg = EXC_ALI_RST(frame->dsisr);
		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
		fputhread = PCPU_GET(fputhread);
		/* Juggle the FPU to ensure that we've initialized
		 * the FPRs, and that their current state is in
		 * the PCB.
		 */
		if (fputhread != td) {
			if (fputhread)
				save_fpu(fputhread);
			enable_fpu(td);
		}
		save_fpu(td);

		if (indicator == EXC_ALI_LFD) {
			if (copyin((void *)frame->dar, fpr,
			    sizeof(double)) != 0)
				return -1;
			enable_fpu(td);
		} else {
			if (copyout(fpr, (void *)frame->dar,
			    sizeof(double)) != 0)
				return -1;
		}
		return 0;
		break;
	}

#endif
	return (-1);
}
Ejemplo n.º 3
0
int exec_thread( unsigned int cpu_id, struct thread *t, 
				 unsigned int milliseconds )
{
	assert( t != NULL );
	
	set_fpu_trap();
	
	set_map( t->process->map );

	cpu[ cpu_id ].sched.running = 1;			// Marked as running before
	cpu[ cpu_id ].sched.current_thread = t;		// Mark the thread.
	
	release_spinlock( &(cpu[ cpu_id ].sched.lock_scheduler) ); 
			// Other CPU's can register their need to mess with this CPU's tables.
	

					
		do
		{
		  	sysenter_set_esp( t->stack_kernel );
	
			cpu[ cpu_id ].system_tss->esp0 = t->stack_kernel;
			cpu[ cpu_id ].system_tss->esp  = t->stack;
			cpu[ cpu_id ].system_tss->cr3  = (uint32_t)t->process->map;

			set_apic_distance( cpu_id, milliseconds );
	
			stats_time( cpu_id, &(cpu[ cpu_id ].st_schedulerTime) ); // Scheduler time.

			t->stack = __switch_thread( t->stack );

			stats_time_start( &(cpu[ cpu_id ].st_schedulerTime) ); // Scheduler
		   
		} while ( cpu[cpu_id].sched.locked == 1 ); // in case...

	// If math was used....
	if ( t->math_state > 0 ) save_fpu( t );
	
	cpu[ cpu_id ].sched.current_thread = NULL;
	cpu[ cpu_id ].sched.running = 0;
			// WARNING: Don't use the *t pointer anymore after this.

			// Synchronization point occurs here. Table messing. 

	acquire_spinlock( &(cpu[cpu_id].sched.lock_scheduler) );

	
	return 0;
}
Ejemplo n.º 4
0
/*===========================================================================*
 *			      do_getmcontext				     *
 *===========================================================================*/
int do_getmcontext(struct proc * caller, message * m_ptr)
{
/* Retrieve machine context of a process */

  register struct proc *rp;
  int proc_nr, r;
  mcontext_t mc;

  if (!isokendpt(m_ptr->m_lsys_krn_sys_getmcontext.endpt, &proc_nr))
	return(EINVAL);
  if (iskerneln(proc_nr)) return(EPERM);
  rp = proc_addr(proc_nr);

#if defined(__i386__)
  if (!proc_used_fpu(rp))
	return(OK);	/* No state to copy */
#endif

  /* Get the mcontext structure into our address space.  */
  if ((r = data_copy(m_ptr->m_lsys_krn_sys_getmcontext.endpt,
		m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr, KERNEL,
		(vir_bytes) &mc, (phys_bytes) sizeof(mcontext_t))) != OK)
	return(r);

  mc.mc_flags = 0;
#if defined(__i386__)
  /* Copy FPU state */
  if (proc_used_fpu(rp)) {
	/* make sure that the FPU context is saved into proc structure first */
	save_fpu(rp);
	mc.mc_flags = (rp->p_misc_flags & MF_FPU_INITIALIZED) ? _MC_FPU_SAVED : 0;
	assert(sizeof(mc.__fpregs.__fp_reg_set) == FPU_XFP_SIZE);
	memcpy(&(mc.__fpregs.__fp_reg_set), rp->p_seg.fpu_state, FPU_XFP_SIZE);
  } 
#endif


  /* Copy the mcontext structure to the user's address space. */
  if ((r = data_copy(KERNEL, (vir_bytes) &mc,
	m_ptr->m_lsys_krn_sys_getmcontext.endpt,
	m_ptr->m_lsys_krn_sys_getmcontext.ctx_ptr,
	(phys_bytes) sizeof(mcontext_t))) != OK)
	return(r);

  return(OK);
}
Ejemplo n.º 5
0
/**********************************************************************
 *		fpe_handler
 *
 * Handler for SIGFPE.
 */
static void fpe_handler( int signal, siginfo_t *info, ucontext_t *ucontext )
{
    EXCEPTION_RECORD rec;
    CONTEXT context;

    switch ( info->si_code )
    {
    case FPE_FLTSUB:
        rec.ExceptionCode = EXCEPTION_ARRAY_BOUNDS_EXCEEDED;
        break;
    case FPE_INTDIV:
        rec.ExceptionCode = EXCEPTION_INT_DIVIDE_BY_ZERO;
        break;
    case FPE_INTOVF:
        rec.ExceptionCode = EXCEPTION_INT_OVERFLOW;
        break;
    case FPE_FLTDIV:
        rec.ExceptionCode = EXCEPTION_FLT_DIVIDE_BY_ZERO;
        break;
    case FPE_FLTOVF:
        rec.ExceptionCode = EXCEPTION_FLT_OVERFLOW;
        break;
    case FPE_FLTUND:
        rec.ExceptionCode = EXCEPTION_FLT_UNDERFLOW;
        break;
    case FPE_FLTRES:
        rec.ExceptionCode = EXCEPTION_FLT_INEXACT_RESULT;
        break;
    case FPE_FLTINV:
    default:
        rec.ExceptionCode = EXCEPTION_FLT_INVALID_OPERATION;
        break;
    }

    save_context( &context, ucontext );
    save_fpu( &context, ucontext );
    rec.ExceptionFlags   = EXCEPTION_CONTINUABLE;
    rec.ExceptionRecord  = NULL;
    rec.ExceptionAddress = (LPVOID)context.pc;
    rec.NumberParameters = 0;
    __regs_RtlRaiseException( &rec, &context );
    restore_context( &context, ucontext );
    restore_fpu( &context, ucontext );
}
Ejemplo n.º 6
0
static inline int setup_sigcontext_fpu(struct pt_regs *regs,
				       struct sigcontext __user *sc)
{
	struct task_struct *tsk = current;
	int ret = 0;

	__put_user_error(used_math(), &sc->used_math_flag, ret);

	if (!used_math())
		return ret;

	preempt_disable();
#if IS_ENABLED(CONFIG_LAZY_FPU)
	if (last_task_used_math == tsk)
		save_fpu(last_task_used_math);
#else
	unlazy_fpu(tsk);
#endif
	ret = __copy_to_user(&sc->fpu, &tsk->thread.fpu,
			     sizeof(struct fpu_struct));
	preempt_enable();
	return ret;
}
Ejemplo n.º 7
0
void cpu_save(QEMUFile *f, void *opaque)
{
    CPUMIPSState *env = opaque;
    int i;

    /* Save active TC */
    save_tc(f, &env->active_tc);

    /* Save active FPU */
    save_fpu(f, &env->active_fpu);

    /* Save MVP */
    qemu_put_sbe32s(f, &env->mvp->CP0_MVPControl);
    qemu_put_sbe32s(f, &env->mvp->CP0_MVPConf0);
    qemu_put_sbe32s(f, &env->mvp->CP0_MVPConf1);

    /* Save TLB */
    qemu_put_be32s(f, &env->tlb->nb_tlb);
    for(i = 0; i < MIPS_TLB_MAX; i++) {
        uint16_t flags = ((env->tlb->mmu.r4k.tlb[i].G << 10) |
                          (env->tlb->mmu.r4k.tlb[i].C0 << 7) |
                          (env->tlb->mmu.r4k.tlb[i].C1 << 4) |
                          (env->tlb->mmu.r4k.tlb[i].V0 << 3) |
                          (env->tlb->mmu.r4k.tlb[i].V1 << 2) |
                          (env->tlb->mmu.r4k.tlb[i].D0 << 1) |
                          (env->tlb->mmu.r4k.tlb[i].D1 << 0));
        uint8_t asid;

        qemu_put_betls(f, &env->tlb->mmu.r4k.tlb[i].VPN);
        qemu_put_be32s(f, &env->tlb->mmu.r4k.tlb[i].PageMask);
        asid = env->tlb->mmu.r4k.tlb[i].ASID;
        qemu_put_8s(f, &asid);
        qemu_put_be16s(f, &flags);
        qemu_put_betls(f, &env->tlb->mmu.r4k.tlb[i].PFN[0]);
        qemu_put_betls(f, &env->tlb->mmu.r4k.tlb[i].PFN[1]);
    }

    /* Save CPU metastate */
    qemu_put_be32s(f, &env->current_tc);
    qemu_put_be32s(f, &env->current_fpu);
    qemu_put_sbe32s(f, &env->error_code);
    qemu_put_be32s(f, &env->hflags);
    qemu_put_betls(f, &env->btarget);
    i = env->bcond;
    qemu_put_sbe32s(f, &i);

    /* Save remaining CP1 registers */
    qemu_put_sbe32s(f, &env->CP0_Index);
    qemu_put_sbe32s(f, &env->CP0_Random);
    qemu_put_sbe32s(f, &env->CP0_VPEControl);
    qemu_put_sbe32s(f, &env->CP0_VPEConf0);
    qemu_put_sbe32s(f, &env->CP0_VPEConf1);
    qemu_put_betls(f, &env->CP0_YQMask);
    qemu_put_betls(f, &env->CP0_VPESchedule);
    qemu_put_betls(f, &env->CP0_VPEScheFBack);
    qemu_put_sbe32s(f, &env->CP0_VPEOpt);
    qemu_put_betls(f, &env->CP0_EntryLo0);
    qemu_put_betls(f, &env->CP0_EntryLo1);
    qemu_put_betls(f, &env->CP0_Context);
    qemu_put_sbe32s(f, &env->CP0_PageMask);
    qemu_put_sbe32s(f, &env->CP0_PageGrain);
    qemu_put_sbe32s(f, &env->CP0_Wired);
    qemu_put_sbe32s(f, &env->CP0_SRSConf0);
    qemu_put_sbe32s(f, &env->CP0_SRSConf1);
    qemu_put_sbe32s(f, &env->CP0_SRSConf2);
    qemu_put_sbe32s(f, &env->CP0_SRSConf3);
    qemu_put_sbe32s(f, &env->CP0_SRSConf4);
    qemu_put_sbe32s(f, &env->CP0_HWREna);
    qemu_put_betls(f, &env->CP0_BadVAddr);
    qemu_put_sbe32s(f, &env->CP0_Count);
    qemu_put_betls(f, &env->CP0_EntryHi);
    qemu_put_sbe32s(f, &env->CP0_Compare);
    qemu_put_sbe32s(f, &env->CP0_Status);
    qemu_put_sbe32s(f, &env->CP0_IntCtl);
    qemu_put_sbe32s(f, &env->CP0_SRSCtl);
    qemu_put_sbe32s(f, &env->CP0_SRSMap);
    qemu_put_sbe32s(f, &env->CP0_Cause);
    qemu_put_betls(f, &env->CP0_EPC);
    qemu_put_sbe32s(f, &env->CP0_PRid);
    qemu_put_sbe32s(f, &env->CP0_EBase);
    qemu_put_sbe32s(f, &env->CP0_Config0);
    qemu_put_sbe32s(f, &env->CP0_Config1);
    qemu_put_sbe32s(f, &env->CP0_Config2);
    qemu_put_sbe32s(f, &env->CP0_Config3);
    qemu_put_sbe32s(f, &env->CP0_Config6);
    qemu_put_sbe32s(f, &env->CP0_Config7);
    qemu_put_betls(f, &env->lladdr);
    for(i = 0; i < 8; i++)
        qemu_put_betls(f, &env->CP0_WatchLo[i]);
    for(i = 0; i < 8; i++)
        qemu_put_sbe32s(f, &env->CP0_WatchHi[i]);
    qemu_put_betls(f, &env->CP0_XContext);
    qemu_put_sbe32s(f, &env->CP0_Framemask);
    qemu_put_sbe32s(f, &env->CP0_Debug);
    qemu_put_betls(f, &env->CP0_DEPC);
    qemu_put_sbe32s(f, &env->CP0_Performance0);
    qemu_put_sbe32s(f, &env->CP0_TagLo);
    qemu_put_sbe32s(f, &env->CP0_DataLo);
    qemu_put_sbe32s(f, &env->CP0_TagHi);
    qemu_put_sbe32s(f, &env->CP0_DataHi);
    qemu_put_betls(f, &env->CP0_ErrorEPC);
    qemu_put_sbe32s(f, &env->CP0_DESAVE);

    /* Save inactive TC state */
    for (i = 0; i < MIPS_SHADOW_SET_MAX; i++)
        save_tc(f, &env->tcs[i]);
    for (i = 0; i < MIPS_FPU_MAX; i++)
        save_fpu(f, &env->fpus[i]);
}
Ejemplo n.º 8
0
/*===========================================================================*
 *				do_fork					     *
 *===========================================================================*/
int do_fork(struct proc * caller, message * m_ptr)
{
/* Handle sys_fork().
 * m_lsys_krn_sys_fork.endpt has forked.
 * The child is m_lsys_krn_sys_fork.slot.
 */
#if defined(__i386__)
  char *old_fpu_save_area_p;
#endif
  register struct proc *rpc;		/* child process pointer */
  struct proc *rpp;			/* parent process pointer */
  int gen;
  int p_proc;
  int namelen;

  if(!isokendpt(m_ptr->m_lsys_krn_sys_fork.endpt, &p_proc))
	return EINVAL;

  rpp = proc_addr(p_proc);
  rpc = proc_addr(m_ptr->m_lsys_krn_sys_fork.slot);
  if (isemptyp(rpp) || ! isemptyp(rpc)) return(EINVAL);

  assert(!(rpp->p_misc_flags & MF_DELIVERMSG));

  /* needs to be receiving so we know where the message buffer is */
  if(!RTS_ISSET(rpp, RTS_RECEIVING)) {
	printf("kernel: fork not done synchronously?\n");
	return EINVAL;
  }

  /* make sure that the FPU context is saved in parent before copy */
  save_fpu(rpp);
  /* Copy parent 'proc' struct to child. And reinitialize some fields. */
  gen = _ENDPOINT_G(rpc->p_endpoint);
#if defined(__i386__)
  old_fpu_save_area_p = rpc->p_seg.fpu_state;
#endif
  *rpc = *rpp;				/* copy 'proc' struct */
#if defined(__i386__)
  rpc->p_seg.fpu_state = old_fpu_save_area_p;
  if(proc_used_fpu(rpp))
	memcpy(rpc->p_seg.fpu_state, rpp->p_seg.fpu_state, FPU_XFP_SIZE);
#endif
  if(++gen >= _ENDPOINT_MAX_GENERATION)	/* increase generation */
	gen = 1;			/* generation number wraparound */
  rpc->p_nr = m_ptr->m_lsys_krn_sys_fork.slot;	/* this was obliterated by copy */
  rpc->p_endpoint = _ENDPOINT(gen, rpc->p_nr);	/* new endpoint of slot */

  rpc->p_reg.retreg = 0;	/* child sees pid = 0 to know it is child */
  rpc->p_user_time = 0;		/* set all the accounting times to 0 */
  rpc->p_sys_time = 0;

  rpc->p_misc_flags &=
	~(MF_VIRT_TIMER | MF_PROF_TIMER | MF_SC_TRACE | MF_SPROF_SEEN | MF_STEP);
  rpc->p_virt_left = 0;		/* disable, clear the process-virtual timers */
  rpc->p_prof_left = 0;

  /* Mark process name as being a forked copy */
  namelen = strlen(rpc->p_name);
#define FORKSTR "*F"
  if(namelen+strlen(FORKSTR) < sizeof(rpc->p_name))
	strcat(rpc->p_name, FORKSTR);

  /* the child process is not runnable until it's scheduled. */
  RTS_SET(rpc, RTS_NO_QUANTUM);
  reset_proc_accounting(rpc);

  rpc->p_cpu_time_left = 0;
  rpc->p_cycles = 0;
  rpc->p_kcall_cycles = 0;
  rpc->p_kipc_cycles = 0;
  rpc->p_signal_received = 0;

  /* If the parent is a privileged process, take away the privileges from the 
   * child process and inhibit it from running by setting the NO_PRIV flag.
   * The caller should explicitly set the new privileges before executing.
   */
  if (priv(rpp)->s_flags & SYS_PROC) {
      rpc->p_priv = priv_addr(USER_PRIV_ID);
      rpc->p_rts_flags |= RTS_NO_PRIV;
  }

  /* Calculate endpoint identifier, so caller knows what it is. */
  m_ptr->m_krn_lsys_sys_fork.endpt = rpc->p_endpoint;
  m_ptr->m_krn_lsys_sys_fork.msgaddr = rpp->p_delivermsg_vir;

  /* Don't schedule process in VM mode until it has a new pagetable. */
  if(m_ptr->m_lsys_krn_sys_fork.flags & PFF_VMINHIBIT) {
  	RTS_SET(rpc, RTS_VMINHIBIT);
  }

  /* 
   * Only one in group should have RTS_SIGNALED, child doesn't inherit tracing.
   */
  RTS_UNSET(rpc, (RTS_SIGNALED | RTS_SIG_PENDING | RTS_P_STOP));
  (void) sigemptyset(&rpc->p_pending);

#if defined(__i386__)
  rpc->p_seg.p_cr3 = 0;
  rpc->p_seg.p_cr3_v = NULL;
#elif defined(__arm__)
  rpc->p_seg.p_ttbr = 0;
  rpc->p_seg.p_ttbr_v = NULL;
#endif

  return OK;
}
Ejemplo n.º 9
0
Archivo: trap.c Proyecto: OPSF/freebsd
void
trap(struct trapframe *frame)
{
	struct thread	*td;
	struct proc	*p;
#ifdef KDTRACE_HOOKS
	uint32_t inst;
#endif
	int		sig, type, user;
	u_int		ucode;
	ksiginfo_t	ksi;

	PCPU_INC(cnt.v_trap);

	td = curthread;
	p = td->td_proc;

	type = ucode = frame->exc;
	sig = 0;
	user = frame->srr1 & PSL_PR;

	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
	    trapname(type), user ? "user" : "kernel");

#ifdef KDTRACE_HOOKS
	/*
	 * A trap can occur while DTrace executes a probe. Before
	 * executing the probe, DTrace blocks re-scheduling and sets
	 * a flag in its per-cpu flags to indicate that it doesn't
	 * want to fault. On returning from the probe, the no-fault
	 * flag is cleared and finally re-scheduling is enabled.
	 *
	 * If the DTrace kernel module has registered a trap handler,
	 * call it and if it returns non-zero, assume that it has
	 * handled the trap and modified the trap frame so that this
	 * function can return normally.
	 */
	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
		return;
#endif

	if (user) {
		td->td_pticks = 0;
		td->td_frame = frame;
		if (td->td_cowgen != p->p_cowgen)
			thread_cow_update(td);

		/* User Mode Traps */
		switch (type) {
		case EXC_RUNMODETRC:
		case EXC_TRC:
			frame->srr1 &= ~PSL_SE;
			sig = SIGTRAP;
			ucode = TRAP_TRACE;
			break;

#ifdef __powerpc64__
		case EXC_ISE:
		case EXC_DSE:
			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
			    (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
				sig = SIGSEGV;
				ucode = SEGV_MAPERR;
			}
			break;
#endif
		case EXC_DSI:
		case EXC_ISI:
			sig = trap_pfault(frame, 1);
			if (sig == SIGSEGV)
				ucode = SEGV_MAPERR;
			break;

		case EXC_SC:
			syscall(frame);
			break;

		case EXC_FPU:
			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
			    ("FPU already enabled for thread"));
			enable_fpu(td);
			break;

		case EXC_VEC:
			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
			    ("Altivec already enabled for thread"));
			enable_vec(td);
			break;

		case EXC_VSX:
			KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
			    ("VSX already enabled for thread"));
			if (!(td->td_pcb->pcb_flags & PCB_VEC))
				enable_vec(td);
			if (!(td->td_pcb->pcb_flags & PCB_FPU))
				save_fpu(td);
			td->td_pcb->pcb_flags |= PCB_VSX;
			enable_fpu(td);
			break;

		case EXC_VECAST_G4:
		case EXC_VECAST_G5:
			/*
			 * We get a VPU assist exception for IEEE mode
			 * vector operations on denormalized floats.
			 * Emulating this is a giant pain, so for now,
			 * just switch off IEEE mode and treat them as
			 * zero.
			 */

			save_vec(td);
			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
			enable_vec(td);
			break;

		case EXC_ALI:
			if (fix_unaligned(td, frame) != 0) {
				sig = SIGBUS;
				ucode = BUS_ADRALN;
			}
			else
				frame->srr0 += 4;
			break;

		case EXC_DEBUG:	/* Single stepping */
			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
			frame->srr1 &= ~PSL_DE;
			frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC);
			sig = SIGTRAP;
			ucode = TRAP_TRACE;
			break;

		case EXC_PGM:
			/* Identify the trap reason */
#ifdef AIM
			if (frame->srr1 & EXC_PGM_TRAP) {
#else
			if (frame->cpu.booke.esr & ESR_PTR) {
#endif
#ifdef KDTRACE_HOOKS
				inst = fuword32((const void *)frame->srr0);
				if (inst == 0x0FFFDDDD &&
				    dtrace_pid_probe_ptr != NULL) {
					struct reg regs;
					fill_regs(td, &regs);
					(*dtrace_pid_probe_ptr)(&regs);
					break;
				}
#endif
 				sig = SIGTRAP;
				ucode = TRAP_BRKPT;
			} else {
				sig = ppc_instr_emulate(frame, td->td_pcb);
				if (sig == SIGILL) {
					if (frame->srr1 & EXC_PGM_PRIV)
						ucode = ILL_PRVOPC;
					else if (frame->srr1 & EXC_PGM_ILLEGAL)
						ucode = ILL_ILLOPC;
				} else if (sig == SIGFPE)
					ucode = FPE_FLTINV;	/* Punt for now, invalid operation. */
			}
			break;

		case EXC_MCHK:
			/*
			 * Note that this may not be recoverable for the user
			 * process, depending on the type of machine check,
			 * but it at least prevents the kernel from dying.
			 */
			sig = SIGBUS;
			ucode = BUS_OBJERR;
			break;

		default:
			trap_fatal(frame);
		}
	} else {
		/* Kernel Mode Traps */

		KASSERT(cold || td->td_ucred != NULL,
		    ("kernel trap doesn't have ucred"));
		switch (type) {
#ifdef KDTRACE_HOOKS
		case EXC_PGM:
			if (frame->srr1 & EXC_PGM_TRAP) {
				if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
					if (dtrace_invop_jump_addr != NULL) {
						dtrace_invop_jump_addr(frame);
						return;
					}
				}
			}
			break;
#endif
#ifdef __powerpc64__
		case EXC_DSE:
			if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
				__asm __volatile ("slbmte %0, %1" ::
					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
					"r"(USER_SLB_SLBE));
				return;
			}
			break;
#endif
		case EXC_DSI:
			if (trap_pfault(frame, 0) == 0)
 				return;
			break;
		case EXC_MCHK:
			if (handle_onfault(frame))
 				return;
			break;
		default:
			break;
		}
		trap_fatal(frame);
	}
Ejemplo n.º 10
0
/*
 * Finish a fork operation, with process p2 nearly set up.
 */
void
cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct trapframe *tf;
	struct callframe *cf;
	struct switchframe *sf;
	caddr_t stktop1, stktop2;
	extern void fork_trampoline(void);
	struct pcb *pcb = &p2->p_addr->u_pcb;
	struct cpu_info *ci = curcpu();

	if (p1 == ci->ci_fpuproc)
		save_fpu();
	*pcb = p1->p_addr->u_pcb;
	
#ifdef ALTIVEC
	if (p1->p_addr->u_pcb.pcb_vr != NULL) {
		if (p1 == ci->ci_vecproc)
			save_vec(p1);
		pcb->pcb_vr = pool_get(&ppc_vecpl, PR_WAITOK);
		*pcb->pcb_vr = *p1->p_addr->u_pcb.pcb_vr;
	} else
		pcb->pcb_vr = NULL;

#endif /* ALTIVEC */

	pcb->pcb_pm = p2->p_vmspace->vm_map.pmap;

	pmap_extract(pmap_kernel(),
	    (vaddr_t)pcb->pcb_pm, (paddr_t *)&pcb->pcb_pmreal);
	
	/*
	 * Setup the trap frame for the new process
	 */
	stktop1 = (caddr_t)trapframe(p1);
	stktop2 = (caddr_t)trapframe(p2);
	bcopy(stktop1, stktop2, sizeof(struct trapframe));

	/*
	 * If specified, give the child a different stack.
	 */
	if (stack != NULL) {
		tf = trapframe(p2);
		tf->fixreg[1] = (register_t)stack + stacksize;
	}

	stktop2 = (caddr_t)((u_long)stktop2 & ~15);  /* Align stack pointer */
	
	/*
	 * There happens to be a callframe, too.
	 */
	cf = (struct callframe *)stktop2;
	cf->lr = (int)fork_trampoline;
	
	/*
	 * Below the trap frame, there is another call frame:
	 */
	stktop2 -= 16;
	cf = (struct callframe *)stktop2;
	cf->r31 = (register_t)func;
	cf->r30 = (register_t)arg;
	
	/*
	 * Below that, we allocate the switch frame:
	 */
	/* must match SFRAMELEN in genassym */
	stktop2 -= roundup(sizeof *sf, 16);

	sf = (struct switchframe *)stktop2;
	bzero((void *)sf, sizeof *sf);		/* just in case */
	sf->sp = (int)cf;
	sf->user_sr = pmap_kernel()->pm_sr[PPC_USER_SR]; /* just in case */
	pcb->pcb_sp = (int)stktop2;
}
Ejemplo n.º 11
0
/*===========================================================================*
 *			      do_sigsend				     *
 *===========================================================================*/
int do_sigsend(struct proc * caller, message * m_ptr)
{
/* Handle sys_sigsend, POSIX-style signal handling. */

  struct sigmsg smsg;
  register struct proc *rp;
  struct sigcontext sc, *scp;
  struct sigframe fr, *frp;
  int proc_nr, r;

  if (!isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL);
  if (iskerneln(proc_nr)) return(EPERM);
  rp = proc_addr(proc_nr);

  /* Get the sigmsg structure into our address space.  */
  if((r=data_copy_vmcheck(caller, caller->p_endpoint,
		(vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &smsg,
		(phys_bytes) sizeof(struct sigmsg))) != OK)
	return r;

  /* Compute the user stack pointer where sigcontext will be stored. */
  smsg.sm_stkptr = arch_get_sp(rp);
  scp = (struct sigcontext *) smsg.sm_stkptr - 1;

  /* Copy the registers to the sigcontext structure. */
  memcpy(&sc.sc_regs, (char *) &rp->p_reg, sizeof(sigregs));

#if defined(__i386__)
  sc.trap_style = rp->p_seg.p_kern_trap_style;

  if(sc.trap_style == KTS_NONE) {
  	printf("do_sigsend: sigsend an unsaved process\n");
	return EINVAL;
  }

    if(proc_used_fpu(rp)) {
	    /* save the FPU context before saving it to the sig context */
	    save_fpu(rp);
	    memcpy(&sc.sc_fpu_state, rp->p_seg.fpu_state, FPU_XFP_SIZE);
    }
#endif

  /* Finish the sigcontext initialization. */
  sc.sc_mask = smsg.sm_mask;
  sc.sc_flags = rp->p_misc_flags & MF_FPU_INITIALIZED;

  /* Copy the sigcontext structure to the user's stack. */
  if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &sc, m_ptr->SIG_ENDPT,
	(vir_bytes) scp, (vir_bytes) sizeof(struct sigcontext))) != OK)
      return r;

  /* Initialize the sigframe structure. */
  frp = (struct sigframe *) scp - 1;
  fr.sf_scpcopy = scp;
  fr.sf_retadr2= (void (*)()) rp->p_reg.pc;
  fr.sf_fp = rp->p_reg.fp;
  rp->p_reg.fp = (reg_t) &frp->sf_fp;
  fr.sf_scp = scp;

  fpu_sigcontext(rp, &fr, &sc);

  fr.sf_signo = smsg.sm_signo;
  fr.sf_retadr = (void (*)()) smsg.sm_sigreturn;

#if defined(__arm__)
  /* use the ARM link register to set the return address from the signal
   * handler
   */
  rp->p_reg.lr = (reg_t) fr.sf_retadr;
  if(rp->p_reg.lr & 1) { printf("sigsend: LSB LR makes no sense.\n"); }

  /* pass signal handler parameters in registers */
  rp->p_reg.retreg = (reg_t) fr.sf_signo;
  rp->p_reg.r1 = (reg_t) fr.sf_code;
  rp->p_reg.r2 = (reg_t) fr.sf_scp;
  rp->p_misc_flags |= MF_CONTEXT_SET;
#endif

  /* Copy the sigframe structure to the user's stack. */
  if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &fr,
	m_ptr->SIG_ENDPT, (vir_bytes) frp, 
      (vir_bytes) sizeof(struct sigframe))) != OK)
      return r;

  /* Reset user registers to execute the signal handler. */
  rp->p_reg.sp = (reg_t) frp;
  rp->p_reg.pc = (reg_t) smsg.sm_sighandler;

  /* Signal handler should get clean FPU. */
  rp->p_misc_flags &= ~MF_FPU_INITIALIZED;

  if(!RTS_ISSET(rp, RTS_PROC_STOP)) {
	printf("system: warning: sigsend a running process\n");
	printf("caller stack: ");
	proc_stacktrace(caller);
  }

  return(OK);
}
Ejemplo n.º 12
0
/* Task Switch occurs here */
Registers_t *_ThreadingSwitch(Registers_t *Regs, int PreEmptive, uint32_t *TimeSlice, 
							 uint32_t *TaskPriority)
{
	/* We'll need these */
	Cpu_t Cpu;
	MCoreThread_t *mThread;
	x86Thread_t *tx86;

	/* Sanity */
	if (ThreadingIsEnabled() != 1)
		return Regs;

	/* Get CPU */
	Cpu = ApicGetCpu();

	/* Get thread */
	mThread = ThreadingGetCurrentThread(Cpu);

	/* What the f**k?? */
	assert(mThread != NULL && Regs != NULL);

	/* Cast */
	tx86 = (x86Thread_t*)mThread->ThreadData;

	/* Save FPU/MMX/SSE State */
	if (tx86->Flags & X86_THREAD_USEDFPU)
		save_fpu(tx86->FpuBuffer);

	/* Save stack */
	if (mThread->Flags & THREADING_USERMODE)
		tx86->UserContext = Regs;
	else
		tx86->Context = Regs;

	/* Switch */
	mThread = ThreadingSwitch(Cpu, mThread, (uint8_t)PreEmptive);
	tx86 = (x86Thread_t*)mThread->ThreadData;

	/* Update user variables */
	*TimeSlice = mThread->TimeSlice;
	*TaskPriority = mThread->Priority;

	/* Update Addressing Space */
	MmVirtualSwitchPageDirectory(Cpu, 
		(PageDirectory_t*)mThread->AddrSpace->PageDirectory, mThread->AddrSpace->Cr3);

	/* Set TSS */
	TssUpdateStack(Cpu, (Addr_t)tx86->Context);

	/* Finish Transition */
	if (mThread->Flags & THREADING_TRANSITION)
	{
		mThread->Flags &= ~THREADING_TRANSITION;
		mThread->Flags |= THREADING_USERMODE;
	}

	/* Clear FPU/MMX/SSE */
	tx86->Flags &= ~X86_THREAD_USEDFPU;

	/* Set TS bit in CR0 */
	set_ts();

	/* Return new stack */
	if (mThread->Flags & THREADING_USERMODE)
		return tx86->UserContext;
	else
		return tx86->Context;
}
Ejemplo n.º 13
0
/*===========================================================================*
 *			      do_sigsend				     *
 *===========================================================================*/
PUBLIC int do_sigsend(struct proc * caller, message * m_ptr)
{
/* Handle sys_sigsend, POSIX-style signal handling. */

  struct sigmsg smsg;
  register struct proc *rp;
  struct sigcontext sc, *scp;
  struct sigframe fr, *frp;
  int proc_nr, r;

  if (!isokendpt(m_ptr->SIG_ENDPT, &proc_nr)) return(EINVAL);
  if (iskerneln(proc_nr)) return(EPERM);
  rp = proc_addr(proc_nr);

  /* Get the sigmsg structure into our address space.  */
  if((r=data_copy_vmcheck(caller, caller->p_endpoint,
		(vir_bytes) m_ptr->SIG_CTXT_PTR, KERNEL, (vir_bytes) &smsg,
		(phys_bytes) sizeof(struct sigmsg))) != OK)
	return r;

  /* Compute the user stack pointer where sigcontext will be stored. */
  scp = (struct sigcontext *) smsg.sm_stkptr - 1;

  /* Copy the registers to the sigcontext structure. */
  memcpy(&sc.sc_regs, (char *) &rp->p_reg, sizeof(sigregs));
  #if (_MINIX_CHIP == _CHIP_INTEL)
    if(proc_used_fpu(rp)) {
	    /* save the FPU context before saving it to the sig context */
	    save_fpu(rp);
	    memcpy(&sc.sc_fpu_state, rp->p_fpu_state.fpu_save_area_p,
	   	 FPU_XFP_SIZE);
    }
  #endif

  /* Finish the sigcontext initialization. */
  sc.sc_mask = smsg.sm_mask;
  sc.sc_flags = rp->p_misc_flags & MF_FPU_INITIALIZED;

  /* Copy the sigcontext structure to the user's stack. */
  if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &sc, m_ptr->SIG_ENDPT,
	(vir_bytes) scp, (vir_bytes) sizeof(struct sigcontext))) != OK)
      return r;

  /* Initialize the sigframe structure. */
  frp = (struct sigframe *) scp - 1;
  fr.sf_scpcopy = scp;
  fr.sf_retadr2= (void (*)()) rp->p_reg.pc;
  fr.sf_fp = rp->p_reg.fp;
  rp->p_reg.fp = (reg_t) &frp->sf_fp;
  fr.sf_scp = scp;

  fpu_sigcontext(rp, &fr, &sc);

  fr.sf_signo = smsg.sm_signo;
  fr.sf_retadr = (void (*)()) smsg.sm_sigreturn;

  /* Copy the sigframe structure to the user's stack. */
  if((r=data_copy_vmcheck(caller, KERNEL, (vir_bytes) &fr,
	m_ptr->SIG_ENDPT, (vir_bytes) frp, 
      (vir_bytes) sizeof(struct sigframe))) != OK)
      return r;

  /* Reset user registers to execute the signal handler. */
  rp->p_reg.sp = (reg_t) frp;
  rp->p_reg.pc = (reg_t) smsg.sm_sighandler;

  /* Signal handler should get clean FPU. */
  rp->p_misc_flags &= ~MF_FPU_INITIALIZED;

  if(!RTS_ISSET(rp, RTS_PROC_STOP)) {
	printf("system: warning: sigsend a running process\n");
	printf("caller stack: ");
	proc_stacktrace(caller);
  }

  return(OK);
}