int elf_cpu_load_file(linker_file_t lf __unused) { /* * The pmap code does not do an icache sync upon establishing executable * mappings in the kernel pmap. It's an optimization based on the fact * that kernel memory allocations always have EXECUTABLE protection even * when the memory isn't going to hold executable code. The only time * kernel memory holding instructions does need a sync is after loading * a kernel module, and that's when this function gets called. Normal * data cache maintenance has already been done by the IO code, and TLB * maintenance has been done by the pmap code, so all we have to do here * is invalidate the instruction cache (which also invalidates the * branch predictor cache on platforms that have one). */ cpu_icache_sync_all(); return (0); }
/* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * in u. to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user specified pc. */ void sendsig(sig_t catcher, int sig, int returnmask, u_long code, int type, union sigval val) { struct proc *p = curproc; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp = p->p_sigacts; int oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; int onstack = 0; tf = process_frame(p); /* Do we need to jump onto the signal stack? */ /* Allocate space for the signal handler context. */ if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && (psp->ps_sigonstack & sigmask(sig))) { onstack = 1; fp = (struct sigframe *)((caddr_t)psp->ps_sigstk.ss_sp + psp->ps_sigstk.ss_size); } else fp = (struct sigframe *)tf->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (void *)STACKALIGN(fp); /* Build stack frame for signal trampoline. */ frame.sf_signum = sig; frame.sf_sip = NULL; frame.sf_scp = &fp->sf_sc; frame.sf_handler = catcher; /* Save register context. */ frame.sf_sc.sc_r0 = tf->tf_r0; frame.sf_sc.sc_r1 = tf->tf_r1; frame.sf_sc.sc_r2 = tf->tf_r2; frame.sf_sc.sc_r3 = tf->tf_r3; frame.sf_sc.sc_r4 = tf->tf_r4; frame.sf_sc.sc_r5 = tf->tf_r5; frame.sf_sc.sc_r6 = tf->tf_r6; frame.sf_sc.sc_r7 = tf->tf_r7; frame.sf_sc.sc_r8 = tf->tf_r8; frame.sf_sc.sc_r9 = tf->tf_r9; frame.sf_sc.sc_r10 = tf->tf_r10; frame.sf_sc.sc_r11 = tf->tf_r11; frame.sf_sc.sc_r12 = tf->tf_r12; frame.sf_sc.sc_usr_sp = tf->tf_usr_sp; frame.sf_sc.sc_usr_lr = tf->tf_usr_lr; frame.sf_sc.sc_svc_lr = tf->tf_svc_lr; frame.sf_sc.sc_pc = tf->tf_pc; frame.sf_sc.sc_spsr = tf->tf_spsr; /* Save signal stack. */ frame.sf_sc.sc_onstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; /* Save signal mask. */ frame.sf_sc.sc_mask = returnmask; if (psp->ps_siginfo & sigmask(sig)) { frame.sf_sip = &fp->sf_si; initsiginfo(&frame.sf_si, sig, code, type, val); } if (copyout(&frame, fp, sizeof(frame)) != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ sigexit(p, SIGILL); /* NOTREACHED */ } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ /* * this was all in the switch below, seemed daft to duplicate it, if * we do a new trampoline version it might change then */ tf->tf_r0 = sig; tf->tf_r1 = (int)frame.sf_sip; tf->tf_r2 = (int)frame.sf_scp; tf->tf_pc = (int)frame.sf_handler; tf->tf_usr_sp = (int)fp; tf->tf_usr_lr = (int)p->p_sigcode; /* XXX This should not be needed. */ cpu_icache_sync_all(); /* Remember that we're now on the signal stack. */ if (onstack) psp->ps_sigstk.ss_flags |= SS_ONSTACK; }