void process_pending_signals(void *cpu_env) { struct emulated_sigaction *k; struct sigqueue *q; target_ulong handler; int sig; if (!signal_pending) return; k = sigact_table; for(sig = 1; sig <= NSIG; sig++) { if (k->pending) goto handle_signal; k++; } /* if no signal is pending, just return */ signal_pending = 0; return; handle_signal: #ifdef DEBUG_SIGNAL fprintf(stderr, "qemu: process signal %d\n", sig); #endif /* dequeue signal */ q = k->first; k->first = q->next; if (!k->first) k->pending = 0; sig = gdb_handlesig (cpu_env, sig); if (!sig) { fprintf (stderr, "Lost signal\n"); abort(); } handler = k->sa.sa_handler; if (handler == SIG_DFL) { /* default handler : ignore some signal. The other are fatal */ if (sig != SIGCHLD && sig != SIGURG && sig != SIGWINCH) { force_sig(sig); } } else if (handler == SIG_IGN) { /* ignore sig */ } else if (handler == SIG_ERR) { force_sig(sig); } else { setup_frame(sig, k, 0, cpu_env); if (k->sa.sa_flags & SA_RESETHAND) k->sa.sa_handler = SIG_DFL; } if (q != &k->info) free_sigqueue(q); }
/* ------------------------------------------------------------ thread type syscall handling */ long do_thread_syscall(void *cpu_env, int num, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7, uint32_t arg8) { extern uint32_t cthread_set_self(uint32_t); extern uint32_t processor_facilities_used(); long ret = 0; arg1 = tswap32(arg1); arg2 = tswap32(arg2); arg3 = tswap32(arg3); arg4 = tswap32(arg4); arg5 = tswap32(arg5); arg6 = tswap32(arg6); arg7 = tswap32(arg7); arg8 = tswap32(arg8); DPRINTF("thread syscall %d : " , num); switch(num) { #ifdef TARGET_I386 case 0x3: #endif case 0x7FF1: /* cthread_set_self */ DPRINTF("cthread_set_self(0x%x)\n", (unsigned int)arg1); ret = cthread_set_self(arg1); #ifdef TARGET_I386 /* we need to update the LDT with the address of the thread */ write_dt((void *)(((CPUX86State *) cpu_env)->ldt.base + (4 * sizeof(uint64_t))), arg1, 1, DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_S_MASK | (3 << DESC_DPL_SHIFT) | (0x2 << DESC_TYPE_SHIFT)); /* New i386 convention, %gs should be set to our this LDT entry */ cpu_x86_load_seg(cpu_env, R_GS, 0x27); /* Old i386 convention, the kernel returns the selector for the cthread (pre-10.4.8?)*/ ret = 0x27; #endif break; case 0x7FF2: /* Called the super-fast pthread_self handler by the apple guys */ DPRINTF("pthread_self()\n"); ret = (uint32_t)pthread_self(); break; case 0x7FF3: DPRINTF("processor_facilities_used()\n"); #ifdef __i386__ qerror("processor_facilities_used: not implemented!\n"); #else ret = (uint32_t)processor_facilities_used(); #endif break; default: gemu_log("qemu: Unsupported thread syscall: %d(0x%x)\n", num, num); gdb_handlesig (cpu_env, SIGTRAP); exit(0); break; } return ret; }
long unimpl_unix_syscall(void *cpu_env, int num) { if( (num < 0) || (num > SYS_MAXSYSCALL-1) ) qerror("unix syscall %d is out of unix syscall bounds (0-%d) " , num, SYS_MAXSYSCALL-1); gemu_log("qemu: Unsupported unix syscall %s %d\n", unix_syscall_table[num].name , num); gdb_handlesig (cpu_env, SIGTRAP); exit(-1); }
void cpu_loop(CPUAlphaState *env) { CPUState *cs = CPU(alpha_env_get_cpu(env)); int trapnr; target_siginfo_t info; abi_long sysret; while (1) { bool arch_interrupt = true; cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); switch (trapnr) { case EXCP_RESET: fprintf(stderr, "Reset requested. Exit\n"); exit(EXIT_FAILURE); break; case EXCP_MCHK: fprintf(stderr, "Machine check exception. Exit\n"); exit(EXIT_FAILURE); break; case EXCP_SMP_INTERRUPT: case EXCP_CLK_INTERRUPT: case EXCP_DEV_INTERRUPT: fprintf(stderr, "External interrupt. Exit\n"); exit(EXIT_FAILURE); break; case EXCP_MMFAULT: info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_UNALIGN: info.si_signo = TARGET_SIGBUS; info.si_errno = 0; info.si_code = TARGET_BUS_ADRALN; info._sifields._sigfault._addr = env->trap_arg0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_OPCDEC: do_sigill: info.si_signo = TARGET_SIGILL; info.si_errno = 0; info.si_code = TARGET_ILL_ILLOPC; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_ARITH: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTINV; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case EXCP_FEN: /* No-op. Linux simply re-enables the FPU. */ break; case EXCP_CALL_PAL: switch (env->error_code) { case 0x80: /* BPT */ info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case 0x81: /* BUGCHK */ info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; info.si_code = 0; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case 0x83: /* CALLSYS */ trapnr = env->ir[IR_V0]; sysret = do_syscall(env, trapnr, env->ir[IR_A0], env->ir[IR_A1], env->ir[IR_A2], env->ir[IR_A3], env->ir[IR_A4], env->ir[IR_A5], 0, 0); if (sysret == -TARGET_ERESTARTSYS) { env->pc -= 4; break; } if (sysret == -TARGET_QEMU_ESIGRETURN) { break; } /* Syscall writes 0 to V0 to bypass error check, similar to how this is handled internal to Linux kernel. (Ab)use trapnr temporarily as boolean indicating error. */ trapnr = (env->ir[IR_V0] != 0 && sysret < 0); env->ir[IR_V0] = (trapnr ? -sysret : sysret); env->ir[IR_A3] = trapnr; break; case 0x86: /* IMB */ /* ??? We can probably elide the code using page_unprotect that is checking for self-modifying code. Instead we could simply call tb_flush here. Until we work out the changes required to turn off the extra write protection, this can be a no-op. */ break; case 0x9E: /* RDUNIQUE */ /* Handled in the translator for usermode. */ abort(); case 0x9F: /* WRUNIQUE */ /* Handled in the translator for usermode. */ abort(); case 0xAA: /* GENTRAP */ info.si_signo = TARGET_SIGFPE; switch (env->ir[IR_A0]) { case TARGET_GEN_INTOVF: info.si_code = TARGET_FPE_INTOVF; break; case TARGET_GEN_INTDIV: info.si_code = TARGET_FPE_INTDIV; break; case TARGET_GEN_FLTOVF: info.si_code = TARGET_FPE_FLTOVF; break; case TARGET_GEN_FLTUND: info.si_code = TARGET_FPE_FLTUND; break; case TARGET_GEN_FLTINV: info.si_code = TARGET_FPE_FLTINV; break; case TARGET_GEN_FLTINE: info.si_code = TARGET_FPE_FLTRES; break; case TARGET_GEN_ROPRAND: info.si_code = 0; break; default: info.si_signo = TARGET_SIGTRAP; info.si_code = 0; break; } info.si_errno = 0; info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; default: goto do_sigill; } break; case EXCP_DEBUG: info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP); if (info.si_signo) { info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } else { arch_interrupt = false; } break; case EXCP_INTERRUPT: /* Just indicate that signals should be handled asap. */ break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); arch_interrupt = false; break; default: fprintf(stderr, "Unhandled trap: 0x%x\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); /* Most of the traps imply a transition through PALcode, which implies an REI instruction has been executed. Which means that RX and LOCK_ADDR should be cleared. But there are a few exceptions for traps internal to QEMU. */ if (arch_interrupt) { env->flags &= ~ENV_FLAG_RX_FLAG; env->lock_addr = -1; } } }
void cpu_loop(CPUMBState *env) { CPUState *cs = CPU(mb_env_get_cpu(env)); int trapnr, ret; target_siginfo_t info; while (1) { cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); process_queued_cpu_work(cs); switch (trapnr) { case 0xaa: { info.si_signo = TARGET_SIGSEGV; info.si_errno = 0; /* XXX: check env->error_code */ info.si_code = TARGET_SEGV_MAPERR; info._sifields._sigfault._addr = 0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; case EXCP_BREAK: /* Return address is 4 bytes after the call. */ env->regs[14] += 4; env->sregs[SR_PC] = env->regs[14]; ret = do_syscall(env, env->regs[12], env->regs[5], env->regs[6], env->regs[7], env->regs[8], env->regs[9], env->regs[10], 0, 0); if (ret == -TARGET_ERESTARTSYS) { /* Wind back to before the syscall. */ env->sregs[SR_PC] -= 4; } else if (ret != -TARGET_QEMU_ESIGRETURN) { env->regs[3] = ret; } /* All syscall exits result in guest r14 being equal to the * PC we return to, because the kernel syscall exit "rtbd" does * this. (This is true even for sigreturn(); note that r14 is * not a userspace-usable register, as the kernel may clobber it * at any point.) */ env->regs[14] = env->sregs[SR_PC]; break; case EXCP_HW_EXCP: env->regs[17] = env->sregs[SR_PC] + 4; if (env->iflags & D_FLAG) { env->sregs[SR_ESR] |= 1 << 12; env->sregs[SR_PC] -= 4; /* FIXME: if branch was immed, replay the imm as well. */ } env->iflags &= ~(IMM_FLAG | D_FLAG); switch (env->sregs[SR_ESR] & 31) { case ESR_EC_DIVZERO: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; info.si_code = TARGET_FPE_FLTDIV; info._sifields._sigfault._addr = 0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; case ESR_EC_FPU: info.si_signo = TARGET_SIGFPE; info.si_errno = 0; if (env->sregs[SR_FSR] & FSR_IO) { info.si_code = TARGET_FPE_FLTINV; } if (env->sregs[SR_FSR] & FSR_DZ) { info.si_code = TARGET_FPE_FLTDIV; } info._sifields._sigfault._addr = 0; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; default: printf ("Unhandled hw-exception: 0x%x\n", env->sregs[SR_ESR] & ESR_EC_MASK); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); break; } break; case EXCP_DEBUG: { int sig; sig = gdb_handlesig(cs, TARGET_SIGTRAP); if (sig) { info.si_signo = sig; info.si_errno = 0; info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); } } break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; default: printf ("Unhandled trap: 0x%x\n", trapnr); cpu_dump_state(cs, stderr, fprintf, 0); exit(EXIT_FAILURE); } process_pending_signals (env); } }
long do_mach_syscall(void *cpu_env, int num, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7, uint32_t arg8) { extern uint32_t mach_reply_port(); long ret = 0; arg1 = tswap32(arg1); arg2 = tswap32(arg2); arg3 = tswap32(arg3); arg4 = tswap32(arg4); arg5 = tswap32(arg5); arg6 = tswap32(arg6); arg7 = tswap32(arg7); arg8 = tswap32(arg8); DPRINTF("mach syscall %d : " , num); switch(num) { /* see xnu/osfmk/mach/syscall_sw.h */ case -26: DPRINTF("mach_reply_port()\n"); ret = mach_reply_port(); break; case -27: DPRINTF("mach_thread_self()\n"); ret = mach_thread_self(); break; case -28: DPRINTF("mach_task_self()\n"); ret = mach_task_self(); break; case -29: DPRINTF("mach_host_self()\n"); ret = mach_host_self(); break; case -31: DPRINTF("mach_msg_trap(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3, arg4, arg5, arg6, arg7); ret = target_mach_msg_trap((mach_msg_header_t *)arg1, arg2, arg3, arg4, arg5, arg6, arg7); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -33: DPRINTF("semaphore_signal_trap(0x%x)\n", arg1); ret = semaphore_signal_trap(arg1); break; case -34: DPRINTF("semaphore_signal_all_trap(0x%x)\n", arg1); ret = semaphore_signal_all_trap(arg1); break; case -35: DPRINTF("semaphore_signal_thread_trap(0x%x)\n", arg1, arg2); ret = semaphore_signal_thread_trap(arg1,arg2); break; #endif case -36: DPRINTF("semaphore_wait_trap(0x%x)\n", arg1); extern int semaphore_wait_trap(int); // XXX: is there any header for that? ret = semaphore_wait_trap(arg1); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -37: DPRINTF("semaphore_wait_signal_trap(0x%x, 0x%x)\n", arg1, arg2); ret = semaphore_wait_signal_trap(arg1,arg2); break; #endif case -43: DPRINTF("map_fd(0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3, arg4, arg5); ret = map_fd(arg1, arg2, (void*)arg3, arg4, arg5); tswap32s((uint32_t*)arg3); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -61: DPRINTF("syscall_thread_switch(0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3); ret = syscall_thread_switch(arg1, arg2, arg3); // just a hint to the scheduler; can drop? break; #endif case -89: DPRINTF("mach_timebase_info(0x%x)\n", arg1); struct mach_timebase_info info; ret = mach_timebase_info(&info); if(!is_error(ret)) { struct mach_timebase_info *outInfo = (void*)arg1; outInfo->numer = tswap32(info.numer); outInfo->denom = tswap32(info.denom); } break; case -90: DPRINTF("mach_wait_until()\n"); extern int mach_wait_until(uint64_t); // XXX: is there any header for that? ret = mach_wait_until(((uint64_t)arg2<<32) | (uint64_t)arg1); break; case -91: DPRINTF("mk_timer_create()\n"); extern int mk_timer_create(); // XXX: is there any header for that? ret = mk_timer_create(); break; case -92: DPRINTF("mk_timer_destroy()\n"); extern int mk_timer_destroy(int); // XXX: is there any header for that? ret = mk_timer_destroy(arg1); break; case -93: DPRINTF("mk_timer_create()\n"); extern int mk_timer_arm(int, uint64_t); // XXX: is there any header for that? ret = mk_timer_arm(arg1, ((uint64_t)arg3<<32) | (uint64_t)arg2); break; case -94: DPRINTF("mk_timer_cancel()\n"); extern int mk_timer_cancel(int, uint64_t *); // XXX: is there any header for that? ret = mk_timer_cancel(arg1, (uint64_t *)arg2); if((!is_error(ret)) && arg2) tswap64s((uint64_t *)arg2); break; default: gemu_log("qemu: Unsupported mach syscall: %d(0x%x)\n", num, num); gdb_handlesig (cpu_env, SIGTRAP); exit(0); break; } return ret; }