static sim_cia step_once (SIM_CPU *cpu) { SIM_DESC sd = CPU_STATE (cpu); bu32 insn_len, oldpc = PCREG; int i; bool ssstep; if (TRACE_ANY_P (cpu)) trace_prefix (sd, cpu, NULL_CIA, oldpc, TRACE_LINENUM_P (cpu), NULL, 0, " "); /* Use a space for gcc warnings. */ /* Handle hardware single stepping when lower than EVT3, and when SYSCFG has already had the SSSTEP bit enabled. */ ssstep = false; if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT && (SYSCFGREG & SYSCFG_SSSTEP)) { int ivg = cec_get_ivg (cpu); if (ivg == -1 || ivg > 3) ssstep = true; } #if 0 /* XXX: Is this what happens on the hardware ? */ if (cec_get_ivg (cpu) == EVT_EMU) cec_return (cpu, EVT_EMU); #endif BFIN_CPU_STATE.did_jump = false; insn_len = interp_insn_bfin (cpu, oldpc); /* If we executed this insn successfully, then we always decrement the loop counter. We don't want to update the PC though if the last insn happened to be a change in code flow (jump/etc...). */ if (!BFIN_CPU_STATE.did_jump) SET_PCREG (hwloop_get_next_pc (cpu, oldpc, insn_len)); for (i = 1; i >= 0; --i) if (LCREG (i) && oldpc == LBREG (i)) { SET_LCREG (i, LCREG (i) - 1); if (LCREG (i)) break; } ++ PROFILE_TOTAL_INSN_COUNT (CPU_PROFILE_DATA (cpu)); /* Handle hardware single stepping only if we're still lower than EVT3. XXX: May not be entirely correct wrt EXCPT insns. */ if (ssstep) { int ivg = cec_get_ivg (cpu); if (ivg == -1 || ivg > 3) { INSN_LEN = 0; cec_exception (cpu, VEC_STEP); } } return oldpc; }
/* Handle a program interrupt or a software interrupt in non-operating mode. */ void frv_non_operating_interrupt ( SIM_CPU *current_cpu, enum frv_interrupt_kind kind, IADDR pc ) { SIM_DESC sd = CPU_STATE (current_cpu); switch (kind) { case FRV_INTERRUPT_LEVEL_1: case FRV_INTERRUPT_LEVEL_2: case FRV_INTERRUPT_LEVEL_3: case FRV_INTERRUPT_LEVEL_4: case FRV_INTERRUPT_LEVEL_5: case FRV_INTERRUPT_LEVEL_6: case FRV_INTERRUPT_LEVEL_7: case FRV_INTERRUPT_LEVEL_8: case FRV_INTERRUPT_LEVEL_9: case FRV_INTERRUPT_LEVEL_10: case FRV_INTERRUPT_LEVEL_11: case FRV_INTERRUPT_LEVEL_12: case FRV_INTERRUPT_LEVEL_13: case FRV_INTERRUPT_LEVEL_14: case FRV_INTERRUPT_LEVEL_15: sim_engine_abort (sd, current_cpu, pc, "interrupt: external %d\n", kind + 1); break; case FRV_TRAP_INSTRUCTION: break; /* handle as in operating mode. */ case FRV_COMMIT_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: commit_exception\n"); break; case FRV_DIVISION_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: division_exception\n"); break; case FRV_DATA_STORE_ERROR: sim_engine_abort (sd, current_cpu, pc, "interrupt: data_store_error\n"); break; case FRV_DATA_ACCESS_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: data_access_exception\n"); break; case FRV_DATA_ACCESS_MMU_MISS: sim_engine_abort (sd, current_cpu, pc, "interrupt: data_access_mmu_miss\n"); break; case FRV_DATA_ACCESS_ERROR: sim_engine_abort (sd, current_cpu, pc, "interrupt: data_access_error\n"); break; case FRV_MP_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: mp_exception\n"); break; case FRV_FP_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: fp_exception\n"); break; case FRV_MEM_ADDRESS_NOT_ALIGNED: sim_engine_abort (sd, current_cpu, pc, "interrupt: mem_address_not_aligned\n"); break; case FRV_REGISTER_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: register_exception\n"); break; case FRV_MP_DISABLED: sim_engine_abort (sd, current_cpu, pc, "interrupt: mp_disabled\n"); break; case FRV_FP_DISABLED: sim_engine_abort (sd, current_cpu, pc, "interrupt: fp_disabled\n"); break; case FRV_PRIVILEGED_INSTRUCTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: privileged_instruction\n"); break; case FRV_ILLEGAL_INSTRUCTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: illegal_instruction\n"); break; case FRV_INSTRUCTION_ACCESS_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: instruction_access_exception\n"); break; case FRV_INSTRUCTION_ACCESS_MMU_MISS: sim_engine_abort (sd, current_cpu, pc, "interrupt: instruction_access_mmu_miss\n"); break; case FRV_INSTRUCTION_ACCESS_ERROR: sim_engine_abort (sd, current_cpu, pc, "interrupt: insn_access_error\n"); break; case FRV_COMPOUND_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: compound_exception\n"); break; case FRV_BREAK_EXCEPTION: sim_engine_abort (sd, current_cpu, pc, "interrupt: break_exception\n"); break; case FRV_RESET: sim_engine_abort (sd, current_cpu, pc, "interrupt: reset\n"); break; default: sim_engine_abort (sd, current_cpu, pc, "unhandled interrupt kind: %d\n", kind); break; } }
void MY (f_model_insn_before) (SIM_CPU *current_cpu, int first_p ATTRIBUTE_UNUSED) { /* To give the impression that we actually know what PC is, we have to dump register contents *before* the *next* insn, not after the *previous* insn. Uhh... */ /* FIXME: Move this to separate, overridable function. */ if ((CPU_CRIS_MISC_PROFILE (current_cpu)->flags & FLAG_CRIS_MISC_PROFILE_XSIM_TRACE) #ifdef GET_H_INSN_PREFIXED_P /* For versions with prefixed insns, trace the combination as one insn. */ && !GET_H_INSN_PREFIXED_P () #endif && 1) { int i; char flags[7]; unsigned64 cycle_count; SIM_DESC sd = CPU_STATE (current_cpu); cris_trace_printf (sd, current_cpu, "%lx ", 0xffffffffUL & (unsigned long) (CPU (h_pc))); for (i = 0; i < 15; i++) cris_trace_printf (sd, current_cpu, "%lx ", 0xffffffffUL & (unsigned long) (XCONCAT3(crisv,BASENUM, f_h_gr_get) (current_cpu, i))); flags[0] = GET_H_IBIT () != 0 ? 'I' : 'i'; flags[1] = GET_H_XBIT () != 0 ? 'X' : 'x'; flags[2] = GET_H_NBIT () != 0 ? 'N' : 'n'; flags[3] = GET_H_ZBIT () != 0 ? 'Z' : 'z'; flags[4] = GET_H_VBIT () != 0 ? 'V' : 'v'; flags[5] = GET_H_CBIT () != 0 ? 'C' : 'c'; flags[6] = 0; /* For anything else than basic tracing we'd add stall cycles for e.g. unaligned accesses. FIXME: add --cris-trace=x options to match --cris-cycles=x. */ cycle_count = (CPU_CRIS_MISC_PROFILE (current_cpu)->basic_cycle_count - CPU_CRIS_PREV_MISC_PROFILE (current_cpu)->basic_cycle_count); /* Emit ACR after flags and cycle count for this insn. */ if (BASENUM == 32) cris_trace_printf (sd, current_cpu, "%s %d %lx\n", flags, (int) cycle_count, 0xffffffffUL & (unsigned long) (XCONCAT3(crisv,BASENUM, f_h_gr_get) (current_cpu, 15))); else cris_trace_printf (sd, current_cpu, "%s %d\n", flags, (int) cycle_count); CPU_CRIS_PREV_MISC_PROFILE (current_cpu)[0] = CPU_CRIS_MISC_PROFILE (current_cpu)[0]; } }
void bfin_syscall (SIM_CPU *cpu) { SIM_DESC sd = CPU_STATE (cpu); const char * const *argv = (void *)STATE_PROG_ARGV (sd); host_callback *cb = STATE_CALLBACK (sd); bu32 args[6]; CB_SYSCALL sc; char *p; char _tbuf[1024 * 3], *tbuf = _tbuf, tstr[1024]; int fmt_ret_hex = 0; CB_SYSCALL_INIT (&sc); if (STATE_ENVIRONMENT (sd) == USER_ENVIRONMENT) { /* Linux syscall. */ sc.func = PREG (0); sc.arg1 = args[0] = DREG (0); sc.arg2 = args[1] = DREG (1); sc.arg3 = args[2] = DREG (2); sc.arg4 = args[3] = DREG (3); /*sc.arg5 =*/ args[4] = DREG (4); /*sc.arg6 =*/ args[5] = DREG (5); } else { /* libgloss syscall. */ sc.func = PREG (0); sc.arg1 = args[0] = GET_LONG (DREG (0)); sc.arg2 = args[1] = GET_LONG (DREG (0) + 4); sc.arg3 = args[2] = GET_LONG (DREG (0) + 8); sc.arg4 = args[3] = GET_LONG (DREG (0) + 12); /*sc.arg5 =*/ args[4] = GET_LONG (DREG (0) + 16); /*sc.arg6 =*/ args[5] = GET_LONG (DREG (0) + 20); } sc.p1 = (PTR) sd; sc.p2 = (PTR) cpu; sc.read_mem = syscall_read_mem; sc.write_mem = syscall_write_mem; /* Common cb_syscall() handles most functions. */ switch (cb_target_to_host_syscall (cb, sc.func)) { case CB_SYS_exit: tbuf += sprintf (tbuf, "exit(%i)", args[0]); sim_engine_halt (sd, cpu, NULL, PCREG, sim_exited, sc.arg1); #ifdef CB_SYS_argc case CB_SYS_argc: tbuf += sprintf (tbuf, "argc()"); sc.result = count_argc (argv); break; case CB_SYS_argnlen: { tbuf += sprintf (tbuf, "argnlen(%u)", args[0]); if (sc.arg1 < count_argc (argv)) sc.result = strlen (argv[sc.arg1]); else sc.result = -1; } break; case CB_SYS_argn: { tbuf += sprintf (tbuf, "argn(%u)", args[0]); if (sc.arg1 < count_argc (argv)) { const char *argn = argv[sc.arg1]; int len = strlen (argn); int written = sc.write_mem (cb, &sc, sc.arg2, argn, len + 1); if (written == len + 1) sc.result = sc.arg2; else sc.result = -1; } else sc.result = -1; } break; #endif case CB_SYS_gettimeofday: { struct timeval _tv, *tv = &_tv; struct timezone _tz, *tz = &_tz; tbuf += sprintf (tbuf, "gettimeofday(%#x, %#x)", args[0], args[1]); if (sc.arg1 == 0) tv = NULL; if (sc.arg2 == 0) tz = NULL; sc.result = gettimeofday (tv, tz); if (sc.result == 0) { bu32 t; if (tv) { t = tv->tv_sec; sc.write_mem (cb, &sc, sc.arg1, (void *)&t, 4); t = tv->tv_usec; sc.write_mem (cb, &sc, sc.arg1 + 4, (void *)&t, 4); } if (sc.arg2) { t = tz->tz_minuteswest; sc.write_mem (cb, &sc, sc.arg1, (void *)&t, 4); t = tz->tz_dsttime; sc.write_mem (cb, &sc, sc.arg1 + 4, (void *)&t, 4); } } else goto sys_finish; } break; case CB_SYS_ioctl: /* XXX: hack just enough to get basic stdio w/uClibc ... */ tbuf += sprintf (tbuf, "ioctl(%i, %#x, %u)", args[0], args[1], args[2]); if (sc.arg2 == 0x5401) { sc.result = !isatty (sc.arg1); sc.errcode = 0; } else { sc.result = -1; sc.errcode = TARGET_EINVAL; } break; case CB_SYS_mmap2: { static bu32 heap = BFIN_DEFAULT_MEM_SIZE / 2; fmt_ret_hex = 1; tbuf += sprintf (tbuf, "mmap2(%#x, %u, %#x, %#x, %i, %u)", args[0], args[1], args[2], args[3], args[4], args[5]); sc.errcode = 0; if (sc.arg4 & 0x20 /*MAP_ANONYMOUS*/) /* XXX: We don't handle zeroing, but default is all zeros. */; else if (args[4] >= MAX_CALLBACK_FDS) sc.errcode = TARGET_ENOSYS; else { #ifdef HAVE_PREAD char *data = xmalloc (sc.arg2); /* XXX: Should add a cb->pread. */ if (pread (cb->fdmap[args[4]], data, sc.arg2, args[5] << 12) == sc.arg2) sc.write_mem (cb, &sc, heap, data, sc.arg2); else sc.errcode = TARGET_EINVAL; free (data); #else sc.errcode = TARGET_ENOSYS; #endif } if (sc.errcode) { sc.result = -1; break; } sc.result = heap; heap += sc.arg2; /* Keep it page aligned. */ heap = ALIGN (heap, 4096); break; } case CB_SYS_munmap: /* XXX: meh, just lie for mmap(). */ tbuf += sprintf (tbuf, "munmap(%#x, %u)", args[0], args[1]); sc.result = 0; break; case CB_SYS_dup2: tbuf += sprintf (tbuf, "dup2(%i, %i)", args[0], args[1]); if (sc.arg1 >= MAX_CALLBACK_FDS || sc.arg2 >= MAX_CALLBACK_FDS) { sc.result = -1; sc.errcode = TARGET_EINVAL; } else { sc.result = dup2 (cb->fdmap[sc.arg1], cb->fdmap[sc.arg2]); goto sys_finish; } break; case CB_SYS__llseek: tbuf += sprintf (tbuf, "llseek(%i, %u, %u, %#x, %u)", args[0], args[1], args[2], args[3], args[4]); sc.func = TARGET_LINUX_SYS_lseek; if (sc.arg2) { sc.result = -1; sc.errcode = TARGET_EINVAL; } else { sc.arg2 = sc.arg3; sc.arg3 = args[4]; cb_syscall (cb, &sc); if (sc.result != -1) { bu32 z = 0; sc.write_mem (cb, &sc, args[3], (void *)&sc.result, 4); sc.write_mem (cb, &sc, args[3] + 4, (void *)&z, 4); } } break; /* XXX: Should add a cb->pread. */ case CB_SYS_pread: tbuf += sprintf (tbuf, "pread(%i, %#x, %u, %i)", args[0], args[1], args[2], args[3]); if (sc.arg1 >= MAX_CALLBACK_FDS) { sc.result = -1; sc.errcode = TARGET_EINVAL; } else { long old_pos, read_result, read_errcode; /* Get current filepos. */ sc.func = TARGET_LINUX_SYS_lseek; sc.arg2 = 0; sc.arg3 = SEEK_CUR; cb_syscall (cb, &sc); if (sc.result == -1) break; old_pos = sc.result; /* Move to the new pos. */ sc.func = TARGET_LINUX_SYS_lseek; sc.arg2 = args[3]; sc.arg3 = SEEK_SET; cb_syscall (cb, &sc); if (sc.result == -1) break; /* Read the data. */ sc.func = TARGET_LINUX_SYS_read; sc.arg2 = args[1]; sc.arg3 = args[2]; cb_syscall (cb, &sc); read_result = sc.result; read_errcode = sc.errcode; /* Move back to the old pos. */ sc.func = TARGET_LINUX_SYS_lseek; sc.arg2 = old_pos; sc.arg3 = SEEK_SET; cb_syscall (cb, &sc); sc.result = read_result; sc.errcode = read_errcode; } break; case CB_SYS_getcwd: tbuf += sprintf (tbuf, "getcwd(%#x, %u)", args[0], args[1]); p = alloca (sc.arg2); if (getcwd (p, sc.arg2) == NULL) { sc.result = -1; sc.errcode = TARGET_EINVAL; } else { sc.write_mem (cb, &sc, sc.arg1, p, sc.arg2); sc.result = sc.arg1; } break; case CB_SYS_stat64: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "stat64(%#x:\"%s\", %u)", args[0], tstr, args[1]); cb->stat_map = stat_map_64; sc.func = TARGET_LINUX_SYS_stat; cb_syscall (cb, &sc); cb->stat_map = stat_map_32; break; case CB_SYS_lstat64: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "lstat64(%#x:\"%s\", %u)", args[0], tstr, args[1]); cb->stat_map = stat_map_64; sc.func = TARGET_LINUX_SYS_lstat; cb_syscall (cb, &sc); cb->stat_map = stat_map_32; break; case CB_SYS_fstat64: tbuf += sprintf (tbuf, "fstat64(%#x, %u)", args[0], args[1]); cb->stat_map = stat_map_64; sc.func = TARGET_LINUX_SYS_fstat; cb_syscall (cb, &sc); cb->stat_map = stat_map_32; break; case CB_SYS_ftruncate64: tbuf += sprintf (tbuf, "ftruncate64(%u, %u)", args[0], args[1]); sc.func = TARGET_LINUX_SYS_ftruncate; cb_syscall (cb, &sc); break; case CB_SYS_getuid: case CB_SYS_getuid32: tbuf += sprintf (tbuf, "getuid()"); sc.result = getuid (); goto sys_finish; case CB_SYS_getgid: case CB_SYS_getgid32: tbuf += sprintf (tbuf, "getgid()"); sc.result = getgid (); goto sys_finish; case CB_SYS_setuid: sc.arg1 &= 0xffff; case CB_SYS_setuid32: tbuf += sprintf (tbuf, "setuid(%u)", args[0]); sc.result = setuid (sc.arg1); goto sys_finish; case CB_SYS_setgid: sc.arg1 &= 0xffff; case CB_SYS_setgid32: tbuf += sprintf (tbuf, "setgid(%u)", args[0]); sc.result = setgid (sc.arg1); goto sys_finish; case CB_SYS_getpid: tbuf += sprintf (tbuf, "getpid()"); sc.result = getpid (); goto sys_finish; case CB_SYS_kill: tbuf += sprintf (tbuf, "kill(%u, %i)", args[0], args[1]); /* Only let the app kill itself. */ if (sc.arg1 != getpid ()) { sc.result = -1; sc.errcode = TARGET_EPERM; } else { #ifdef HAVE_KILL sc.result = kill (sc.arg1, sc.arg2); goto sys_finish; #else sc.result = -1; sc.errcode = TARGET_ENOSYS; #endif } break; case CB_SYS_open: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "open(%#x:\"%s\", %#x, %o)", args[0], tstr, args[1], args[2]); goto case_default; case CB_SYS_close: tbuf += sprintf (tbuf, "close(%i)", args[0]); goto case_default; case CB_SYS_read: tbuf += sprintf (tbuf, "read(%i, %#x, %u)", args[0], args[1], args[2]); goto case_default; case CB_SYS_write: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[1])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "write(%i, %#x:\"%s\", %u)", args[0], args[1], tstr, args[2]); goto case_default; case CB_SYS_lseek: tbuf += sprintf (tbuf, "lseek(%i, %i, %i)", args[0], args[1], args[2]); goto case_default; case CB_SYS_unlink: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "unlink(%#x:\"%s\")", args[0], tstr); goto case_default; case CB_SYS_truncate: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "truncate(%#x:\"%s\", %i)", args[0], tstr, args[1]); goto case_default; case CB_SYS_ftruncate: tbuf += sprintf (tbuf, "ftruncate(%i, %i)", args[0], args[1]); goto case_default; case CB_SYS_rename: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "rename(%#x:\"%s\", ", args[0], tstr); if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[1])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "%#x:\"%s\")", args[1], tstr); goto case_default; case CB_SYS_stat: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "stat(%#x:\"%s\", %#x)", args[0], tstr, args[1]); goto case_default; case CB_SYS_fstat: tbuf += sprintf (tbuf, "fstat(%i, %#x)", args[0], args[1]); goto case_default; case CB_SYS_lstat: if (cb_get_string (cb, &sc, tstr, sizeof (tstr), args[0])) strcpy (tstr, "???"); tbuf += sprintf (tbuf, "lstat(%#x:\"%s\", %#x)", args[0], tstr, args[1]); goto case_default; case CB_SYS_pipe: tbuf += sprintf (tbuf, "pipe(%#x, %#x)", args[0], args[1]); goto case_default; default: tbuf += sprintf (tbuf, "???_%i(%#x, %#x, %#x, %#x, %#x, %#x)", sc.func, args[0], args[1], args[2], args[3], args[4], args[5]); case_default: cb_syscall (cb, &sc); break; sys_finish: if (sc.result == -1) { cb->last_errno = errno; sc.errcode = cb->get_errno (cb); } } TRACE_EVENTS (cpu, "syscall_%i(%#x, %#x, %#x, %#x, %#x, %#x) = %li (error = %i)", sc.func, args[0], args[1], args[2], args[3], args[4], args[5], sc.result, sc.errcode); tbuf += sprintf (tbuf, " = "); if (STATE_ENVIRONMENT (sd) == USER_ENVIRONMENT) { if (sc.result == -1) { tbuf += sprintf (tbuf, "-1 (error = %i)", sc.errcode); if (sc.errcode == cb_host_to_target_errno (cb, ENOSYS)) { sim_io_eprintf (sd, "bfin-sim: %#x: unimplemented syscall %i\n", PCREG, sc.func); } SET_DREG (0, -sc.errcode); } else { if (fmt_ret_hex) tbuf += sprintf (tbuf, "%#lx", sc.result); else tbuf += sprintf (tbuf, "%lu", sc.result); SET_DREG (0, sc.result); } } else { tbuf += sprintf (tbuf, "%lu (error = %i)", sc.result, sc.errcode); SET_DREG (0, sc.result); SET_DREG (1, sc.result2); SET_DREG (2, sc.errcode); } TRACE_SYSCALL (cpu, "%s", _tbuf); }
/* Update the mask of pending interrupts. This operation must be called when the state of some 68HC11 IO register changes. It looks the different registers that indicate a pending interrupt (timer, SCI, SPI, ...) and records the interrupt if it's there and enabled. */ void interrupts_update_pending (struct interrupts *interrupts) { int i; uint8 *ioregs; unsigned long clear_mask; unsigned long set_mask; clear_mask = 0; set_mask = 0; ioregs = &interrupts->cpu->ios[0]; for (i = 0; i < TableSize(idefs); i++) { struct interrupt_def *idef = &idefs[i]; uint8 data; /* Look if the interrupt is enabled. */ if (idef->enable_paddr) { data = ioregs[idef->enable_paddr]; if (!(data & idef->enabled_mask)) { /* Disable it. */ clear_mask |= (1 << idef->int_number); continue; } } /* Interrupt is enabled, see if it's there. */ data = ioregs[idef->int_paddr]; if (!(data & idef->int_mask)) { /* Disable it. */ clear_mask |= (1 << idef->int_number); continue; } /* Ok, raise it. */ set_mask |= (1 << idef->int_number); } /* Some interrupts are shared (M6811_INT_SCI) so clear the interrupts before setting the new ones. */ interrupts->pending_mask &= ~clear_mask; interrupts->pending_mask |= set_mask; /* Keep track of when the interrupt is raised by the device. Also implements the breakpoint-on-interrupt. */ if (set_mask) { signed64 cycle = cpu_current_cycle (interrupts->cpu); int must_stop = 0; for (i = 0; i < M6811_INT_NUMBER; i++) { if (!(set_mask & (1 << i))) continue; interrupts->interrupts[i].cpu_cycle = cycle; if (interrupts->interrupts[i].stop_mode & SIM_STOP_WHEN_RAISED) { must_stop = 1; sim_io_printf (CPU_STATE (interrupts->cpu), "Interrupt %s raised\n", interrupt_names[i]); } } if (must_stop) sim_engine_halt (CPU_STATE (interrupts->cpu), interrupts->cpu, 0, cpu_get_pc (interrupts->cpu), sim_stopped, SIM_SIGTRAP); } }
BI frvbf_check_non_excepting_load ( SIM_CPU *current_cpu, SI base_index, SI disp_index, SI target_index, SI immediate_disp, QI data_size, BI is_float ) { BI rc = 1; /* perform the load. */ SIM_DESC sd = CPU_STATE (current_cpu); int daec = 0; int rec = 0; int ec = 0; USI necr; int do_elos; SI NE_flags[2]; SI NE_base; SI nesr; SI ne_index; FRV_REGISTER_CONTROL *control; SI address = GET_H_GR (base_index); if (disp_index >= 0) address += GET_H_GR (disp_index); else address += immediate_disp; /* Check for interrupt factors. */ switch (data_size) { case NESR_UQI_SIZE: case NESR_QI_SIZE: break; case NESR_UHI_SIZE: case NESR_HI_SIZE: if (address & 1) ec = 1; break; case NESR_SI_SIZE: if (address & 3) ec = 1; break; case NESR_DI_SIZE: if (address & 7) ec = 1; if (target_index & 1) rec = 1; break; case NESR_XI_SIZE: if (address & 0xf) ec = 1; if (target_index & 3) rec = 1; break; default: { IADDR pc = GET_H_PC (); sim_engine_abort (sd, current_cpu, pc, "check_non_excepting_load: Incorrect data_size\n"); break; } } control = CPU_REGISTER_CONTROL (current_cpu); if (control->spr[H_SPR_NECR].implemented) { necr = GET_NECR (); do_elos = GET_NECR_VALID (necr) && GET_NECR_ELOS (necr); } else do_elos = 0; /* NECR, NESR, NEEAR are only implemented for the full frv machine. */ if (do_elos) { ne_index = next_available_nesr (current_cpu, NO_NESR); if (ne_index == NO_NESR) { IADDR pc = GET_H_PC (); sim_engine_abort (sd, current_cpu, pc, "No available NESR register\n"); } /* Fill in the basic fields of the NESR. */ nesr = GET_NESR (ne_index); SET_NESR_VALID (nesr); SET_NESR_EAV (nesr); SET_NESR_DRN (nesr, target_index); SET_NESR_SIZE (nesr, data_size); SET_NESR_NEAN (nesr, ne_index); if (is_float) SET_NESR_FR (nesr); else CLEAR_NESR_FR (nesr); /* Set the corresponding NEEAR. */ SET_NEEAR (ne_index, address); SET_NESR_DAEC (nesr, 0); SET_NESR_REC (nesr, 0); SET_NESR_EC (nesr, 0); } /* Set the NE flag corresponding to the target register if an interrupt factor was detected. daec is not checked here yet, but is declared for future reference. */ if (is_float) NE_base = H_SPR_FNER0; else NE_base = H_SPR_GNER0; GET_NE_FLAGS (NE_flags, NE_base); if (rec) { SET_NE_FLAG (NE_flags, target_index); if (do_elos) SET_NESR_REC (nesr, NESR_REGISTER_NOT_ALIGNED); } if (ec) { SET_NE_FLAG (NE_flags, target_index); if (do_elos) SET_NESR_EC (nesr, NESR_MEM_ADDRESS_NOT_ALIGNED); } if (do_elos) SET_NESR (ne_index, nesr); /* If no interrupt factor was detected then set the NE flag on the target register if the NE flag on one of the input registers is already set. */ if (! rec && ! ec && ! daec) { BI ne_flag = GET_NE_FLAG (NE_flags, base_index); if (disp_index >= 0) ne_flag |= GET_NE_FLAG (NE_flags, disp_index); if (ne_flag) { SET_NE_FLAG (NE_flags, target_index); rc = 0; /* Do not perform the load. */ } else CLEAR_NE_FLAG (NE_flags, target_index); } SET_NE_FLAGS (NE_base, NE_flags); return rc; /* perform the load? */ }
void cec_return (SIM_CPU *cpu, int ivg) { SIM_DESC sd = CPU_STATE (cpu); struct bfin_cec *cec; bool snen; int curr_ivg; bu32 oldpc, newpc; oldpc = PCREG; BFIN_CPU_STATE.did_jump = true; if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT) { SET_PCREG (cec_read_ret_reg (cpu, ivg)); TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC"); return; } cec = CEC_STATE (cpu); /* XXX: This isn't entirely correct ... */ cec->ipend &= ~IVG_EMU_B; curr_ivg = _cec_get_ivg (cec); if (curr_ivg == -1) curr_ivg = IVG_USER; if (ivg == -1) ivg = curr_ivg; TRACE_EVENTS (cpu, "returning from EVT%i (should be EVT%i)", curr_ivg, ivg); /* Not allowed to return from usermode. */ if (curr_ivg == IVG_USER) cec_exception (cpu, VEC_ILL_RES); if (ivg > IVG15 || ivg < 0) sim_io_error (sd, "%s: ivg %i out of range !", __func__, ivg); _cec_require_supervisor (cpu, cec); switch (ivg) { case IVG_EMU: /* RTE -- only valid in emulation mode. */ /* XXX: What does the hardware do ? */ if (curr_ivg != IVG_EMU) cec_exception (cpu, VEC_ILL_RES); break; case IVG_NMI: /* RTN -- only valid in NMI. */ /* XXX: What does the hardware do ? */ if (curr_ivg != IVG_NMI) cec_exception (cpu, VEC_ILL_RES); break; case IVG_EVX: /* RTX -- only valid in exception. */ /* XXX: What does the hardware do ? */ if (curr_ivg != IVG_EVX) cec_exception (cpu, VEC_ILL_RES); break; default: /* RTI -- not valid in emulation, nmi, exception, or user. */ /* XXX: What does the hardware do ? */ if (curr_ivg == IVG_EMU || curr_ivg == IVG_NMI || curr_ivg == IVG_EVX || curr_ivg == IVG_USER) cec_exception (cpu, VEC_ILL_RES); break; case IVG_IRPTEN: /* XXX: Is this even possible ? */ excp_to_sim_halt (sim_stopped, SIM_SIGABRT); break; } newpc = cec_read_ret_reg (cpu, ivg); /* XXX: Does this nested trick work on EMU/NMI/EVX ? */ snen = (newpc & 1); /* XXX: Delayed clear shows bad PCREG register trace above ? */ SET_PCREG (newpc & ~1); TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (from EVT%i)", ivg); /* Update ipend after the TRACE_BRANCH so dv-bfin_trace knows current CEC state wrt overflow. */ if (!snen) cec->ipend &= ~(1 << ivg); /* Disable global interrupt mask to let any interrupt take over, but only when we were already in a RTI level. Only way we could have raised at that point is if it was cleared in the first place. */ if (ivg >= IVG_IVHW || ivg == IVG_RST) cec_irpten_disable (cpu, cec); /* When going from super to user, we clear LSB in LB regs in case it was set on the transition up. Also need to load SP alias with USP. */ if (_cec_get_ivg (cec) == -1) { int i; for (i = 0; i < 2; ++i) if (LBREG (i) & 1) SET_LBREG (i, LBREG (i) & ~1); SET_KSPREG (SPREG); SET_SPREG (USPREG); } /* Check for pending interrupts before we return to usermode. */ _cec_check_pending (cpu, cec); }
/* Set ESFR0, EPCRx, ESRx, EARx and EDRx, according to the given program interrupt. */ static void set_exception_status_registers ( SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item ) { struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind]; int slot = (item->vpc - previous_vliw_pc) / 4; int reg_index = -1; int set_ear = 0; int set_edr = 0; int set_daec = 0; int set_epcr = 0; SI esr = 0; SIM_DESC sd = CPU_STATE (current_cpu); /* If the interrupt is strict (precise) or the interrupt is on the insns in the I0 pipe, then set the 0 registers. */ if (interrupt->precise) { reg_index = 0; if (interrupt->kind == FRV_REGISTER_EXCEPTION) SET_ESR_REC (esr, item->u.rec); else if (interrupt->kind == FRV_INSTRUCTION_ACCESS_EXCEPTION) SET_ESR_IAEC (esr, item->u.iaec); /* For fr550, don't set epcr for precise interrupts. */ if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550) set_epcr = 1; } else { switch (interrupt->kind) { case FRV_DIVISION_EXCEPTION: set_isr_exception_fields (current_cpu, item); /* fall thru to set reg_index. */ case FRV_COMMIT_EXCEPTION: /* For fr550, always use ESR0. */ if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) reg_index = 0; else if (item->slot == UNIT_I0) reg_index = 0; else if (item->slot == UNIT_I1) reg_index = 1; set_epcr = 1; break; case FRV_DATA_STORE_ERROR: reg_index = 14; /* Use ESR14. */ break; case FRV_DATA_ACCESS_ERROR: reg_index = 15; /* Use ESR15, EPCR15. */ set_ear = 1; break; case FRV_DATA_ACCESS_EXCEPTION: set_daec = 1; /* fall through */ case FRV_DATA_ACCESS_MMU_MISS: case FRV_MEM_ADDRESS_NOT_ALIGNED: /* Get the appropriate ESR, EPCR, EAR and EDR. EAR will be set. EDR will not be set if this is a store insn. */ set_ear = 1; /* For fr550, never use EDRx. */ if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550) if (item->u.data_written.length != 0) set_edr = 1; reg_index = esr_for_data_access_exception (current_cpu, item); set_epcr = 1; break; case FRV_MP_EXCEPTION: /* For fr550, use EPCR2 and ESR2. */ if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) { reg_index = 2; set_epcr = 1; } break; /* MSR0-1, FQ0-9 are already set. */ case FRV_FP_EXCEPTION: set_fp_exception_registers (current_cpu, item); /* For fr550, use EPCR2 and ESR2. */ if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) { reg_index = 2; set_epcr = 1; } break; default: { SIM_DESC sd = CPU_STATE (current_cpu); IADDR pc = CPU_PC_GET (current_cpu); sim_engine_abort (sd, current_cpu, pc, "invalid non-strict program interrupt kind: %d\n", interrupt->kind); break; } } } /* non-strict (imprecise) interrupt */ /* Now fill in the selected exception status registers. */ if (reg_index != -1) { /* Now set the exception status registers. */ SET_ESFR_FLAG (reg_index); SET_ESR_EC (esr, interrupt->ec); if (set_epcr) { if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) SET_EPCR (reg_index, previous_vliw_pc); else SET_EPCR (reg_index, item->vpc); } if (set_ear) { SET_EAR (reg_index, item->eaddress); SET_ESR_EAV (esr); } else CLEAR_ESR_EAV (esr); if (set_edr) { int edn = set_edr_register (current_cpu, item, 0/* EDR0-3 */); SET_ESR_EDN (esr, edn); SET_ESR_EDV (esr); } else CLEAR_ESR_EDV (esr); if (set_daec) SET_ESR_DAEC (esr, item->u.daec); SET_ESR_VALID (esr); SET_ESR (reg_index, esr); } }
void cec_exception (SIM_CPU *cpu, int excp) { SIM_DESC sd = CPU_STATE (cpu); int sigrc = -1; TRACE_EVENTS (cpu, "processing exception %#x in EVT%i", excp, cec_get_ivg (cpu)); /* Ideally what would happen here for real hardware exceptions (not fake sim ones) is that: - For service exceptions (excp <= 0x11): RETX is the _next_ PC which can be tricky with jumps/hardware loops/... - For error exceptions (excp > 0x11): RETX is the _current_ PC (i.e. the one causing the exception) - PC is loaded with EVT3 MMR - ILAT/IPEND in CEC is updated depending on current IVG level - the fault address MMRs get updated with data/instruction info - Execution continues on in the EVT3 handler */ /* Handle simulator exceptions first. */ switch (excp) { case VEC_SIM_HLT: excp_to_sim_halt (sim_exited, 0); return; case VEC_SIM_ABORT: excp_to_sim_halt (sim_exited, 1); return; case VEC_SIM_TRAP: /* GDB expects us to step over EMUEXCPT. */ /* XXX: What about hwloops and EMUEXCPT at the end? Pretty sure gdb doesn't handle this already... */ SET_PCREG (PCREG + 2); /* Only trap when we are running in gdb. */ if (STATE_OPEN_KIND (sd) == SIM_OPEN_DEBUG) excp_to_sim_halt (sim_stopped, SIM_SIGTRAP); return; case VEC_SIM_DBGA: /* If running in gdb, simply trap. */ if (STATE_OPEN_KIND (sd) == SIM_OPEN_DEBUG) excp_to_sim_halt (sim_stopped, SIM_SIGTRAP); else excp_to_sim_halt (sim_exited, 2); } if (excp <= 0x3f) { SET_EXCAUSE (excp); if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT) { /* ICPLB regs always get updated. */ /* XXX: Should optimize this call path ... */ if (excp != VEC_MISALI_I && excp != VEC_MISALI_D && excp != VEC_CPLB_I_M && excp != VEC_CPLB_M && excp != VEC_CPLB_I_VL && excp != VEC_CPLB_VL && excp != VEC_CPLB_I_MHIT && excp != VEC_CPLB_MHIT) mmu_log_ifault (cpu); _cec_raise (cpu, CEC_STATE (cpu), IVG_EVX); /* We need to restart the engine so that we don't return and continue processing this bad insn. */ if (EXCAUSE >= 0x20) sim_engine_restart (sd, cpu, NULL, PCREG); return; } } TRACE_EVENTS (cpu, "running virtual exception handler"); switch (excp) { case VEC_SYS: bfin_syscall (cpu); break; case VEC_EXCPT01: /* Userspace gdb breakpoint. */ sigrc = SIM_SIGTRAP; break; case VEC_UNDEF_I: /* Undefined instruction. */ sigrc = SIM_SIGILL; break; case VEC_ILL_RES: /* Illegal supervisor resource. */ case VEC_MISALI_I: /* Misaligned instruction. */ sigrc = SIM_SIGBUS; break; case VEC_CPLB_M: case VEC_CPLB_I_M: sigrc = SIM_SIGSEGV; break; default: sim_io_eprintf (sd, "Unhandled exception %#x at 0x%08x (%s)\n", excp, PCREG, excp_decoded[excp]); sigrc = SIM_SIGILL; break; } if (sigrc != -1) excp_to_sim_halt (sim_stopped, sigrc); }
static void _cec_raise (SIM_CPU *cpu, struct bfin_cec *cec, int ivg) { SIM_DESC sd = CPU_STATE (cpu); int curr_ivg = _cec_get_ivg (cec); bool snen; bool irpten; TRACE_EVENTS (cpu, "processing request for EVT%i while at EVT%i", ivg, curr_ivg); irpten = (cec->ipend & IVG_IRPTEN_B); snen = (SYSCFGREG & SYSCFG_SNEN); if (curr_ivg == -1) curr_ivg = IVG_USER; /* Just check for higher latched interrupts. */ if (ivg == -1) { if (irpten) goto done; /* All interrupts are masked anyways. */ ivg = __cec_get_ivg (cec->ilat & cec->imask); if (ivg < 0) goto done; /* Nothing latched. */ if (ivg > curr_ivg) goto done; /* Nothing higher latched. */ if (!snen && ivg == curr_ivg) goto done; /* Self nesting disabled. */ /* Still here, so fall through to raise to higher pending. */ } cec->ilat |= (1 << ivg); if (ivg <= IVG_EVX) { /* These two are always processed. */ if (ivg == IVG_EMU || ivg == IVG_RST) goto process_int; /* Anything lower might trigger a double fault. */ if (curr_ivg <= ivg) { /* Double fault ! :( */ SET_EXCAUSE (VEC_UNCOV); /* XXX: SET_RETXREG (...); */ sim_io_error (sd, "%s: double fault at 0x%08x ! :(", __func__, PCREG); excp_to_sim_halt (sim_stopped, SIM_SIGABRT); } /* No double fault -> always process. */ goto process_int; } else if (irpten && curr_ivg != IVG_USER) { /* Interrupts are globally masked. */ } else if (!(cec->imask & (1 << ivg))) { /* This interrupt is masked. */ } else if (ivg < curr_ivg || (snen && ivg == curr_ivg)) { /* Do transition! */ bu32 oldpc; process_int: cec->ipend |= (1 << ivg); cec->ilat &= ~(1 << ivg); /* Interrupts are processed in between insns which means the return point is the insn-to-be-executed (which is the current PC). But exceptions are handled while executing an insn, so we may have to advance the PC ourselves when setting RETX. XXX: Advancing the PC should only be for "service" exceptions, and handling them after executing the insn should be OK, which means we might be able to use the event interface for it. */ oldpc = PCREG; switch (ivg) { case IVG_EMU: /* Signal the JTAG ICE. */ /* XXX: what happens with 'raise 0' ? */ SET_RETEREG (oldpc); excp_to_sim_halt (sim_stopped, SIM_SIGTRAP); /* XXX: Need an easy way for gdb to signal it isnt here. */ cec->ipend &= ~IVG_EMU_B; break; case IVG_RST: /* Have the core reset simply exit (i.e. "shutdown"). */ excp_to_sim_halt (sim_exited, 0); break; case IVG_NMI: /* XXX: Should check this. */ SET_RETNREG (oldpc); break; case IVG_EVX: /* Non-service exceptions point to the excepting instruction. */ if (EXCAUSE >= 0x20) SET_RETXREG (oldpc); else { bu32 nextpc = hwloop_get_next_pc (cpu, oldpc, INSN_LEN); SET_RETXREG (nextpc); } break; case IVG_IRPTEN: /* XXX: what happens with 'raise 4' ? */ sim_io_error (sd, "%s: what to do with 'raise 4' ?", __func__); break; default: SET_RETIREG (oldpc | (ivg == curr_ivg ? 1 : 0)); break; } /* If EVT_OVERRIDE is in effect (IVG7+), use the reset address. */ if ((cec->evt_override & 0xff80) & (1 << ivg)) SET_PCREG (cec_get_reset_evt (cpu)); else SET_PCREG (cec_get_evt (cpu, ivg)); TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (to EVT%i):", ivg); BFIN_CPU_STATE.did_jump = true; /* Enable the global interrupt mask upon interrupt entry. */ if (ivg >= IVG_IVHW) cec_irpten_enable (cpu, cec); } /* When moving between states, don't let internal states bleed through. */ DIS_ALGN_EXPT &= ~1; /* When going from user to super, we set LSB in LB regs to avoid misbehavior and/or malicious code. Also need to load SP alias with KSP. */ if (curr_ivg == IVG_USER) { int i; for (i = 0; i < 2; ++i) if (!(LBREG (i) & 1)) SET_LBREG (i, LBREG (i) | 1); SET_USPREG (SPREG); SET_SPREG (KSPREG); } done: TRACE_EVENTS (cpu, "now at EVT%i", _cec_get_ivg (cec)); }
void frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache) { int elements; int i, j; SIM_DESC sd; /* Set defaults for fields which are not initialized. */ sd = CPU_STATE (cpu); switch (STATE_ARCHITECTURE (sd)->mach) { case bfd_mach_fr400: case bfd_mach_fr450: if (cache->configured_sets == 0) cache->configured_sets = 512; if (cache->configured_ways == 0) cache->configured_ways = 2; if (cache->line_size == 0) cache->line_size = 32; if (cache->memory_latency == 0) cache->memory_latency = 20; break; case bfd_mach_fr550: if (cache->configured_sets == 0) cache->configured_sets = 128; if (cache->configured_ways == 0) cache->configured_ways = 4; if (cache->line_size == 0) cache->line_size = 64; if (cache->memory_latency == 0) cache->memory_latency = 20; break; default: if (cache->configured_sets == 0) cache->configured_sets = 64; if (cache->configured_ways == 0) cache->configured_ways = 4; if (cache->line_size == 0) cache->line_size = 64; if (cache->memory_latency == 0) cache->memory_latency = 20; break; } frv_cache_reconfigure (cpu, cache); /* First allocate the cache storage based on the given dimensions. */ elements = cache->sets * cache->ways; cache->tag_storage = (FRV_CACHE_TAG *) zalloc (elements * sizeof (*cache->tag_storage)); cache->data_storage = (char *) xmalloc (elements * cache->line_size); /* Initialize the pipelines and status buffers. */ for (i = LS; i < FRV_CACHE_PIPELINES; ++i) { cache->pipeline[i].requests = NULL; cache->pipeline[i].status.flush.valid = 0; cache->pipeline[i].status.return_buffer.valid = 0; cache->pipeline[i].status.return_buffer.data = (char *) xmalloc (cache->line_size); for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j) cache->pipeline[i].stages[j].request = NULL; } cache->BARS.valid = 0; cache->NARS.valid = 0; /* Now set the cache state. */ cache->cpu = cpu; cache->statistics.accesses = 0; cache->statistics.hits = 0; }
USI fr30_int (SIM_CPU *current_cpu, PCADDR pc, int num) { SIM_DESC sd = CPU_STATE (current_cpu); host_callback *cb = STATE_CALLBACK (sd); #ifdef SIM_HAVE_BREAKPOINTS /* Check for breakpoints "owned" by the simulator first, regardless of --environment. */ if (num == TRAP_BREAKPOINT) { /* First try sim-break.c. If it's a breakpoint the simulator "owns" it doesn't return. Otherwise it returns and let's us try. */ sim_handle_breakpoint (sd, current_cpu, pc); /* Fall through. */ } #endif if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT) { /* The new pc is the trap vector entry. We assume there's a branch there to some handler. */ USI new_pc; setup_int (current_cpu, pc); fr30bf_h_ibit_set (current_cpu, 0); new_pc = GETMEMSI (current_cpu, pc, fr30bf_h_dr_get (current_cpu, H_DR_TBR) + 1024 - ((num + 1) * 4)); return new_pc; } switch (num) { case TRAP_SYSCALL : { /* TODO: find out what the ABI for this is */ CB_SYSCALL s; CB_SYSCALL_INIT (&s); s.func = fr30bf_h_gr_get (current_cpu, 0); s.arg1 = fr30bf_h_gr_get (current_cpu, 4); s.arg2 = fr30bf_h_gr_get (current_cpu, 5); s.arg3 = fr30bf_h_gr_get (current_cpu, 6); if (s.func == TARGET_SYS_exit) { sim_engine_halt (sd, current_cpu, NULL, pc, sim_exited, s.arg1); } s.p1 = (PTR) sd; s.p2 = (PTR) current_cpu; s.read_mem = syscall_read_mem; s.write_mem = syscall_write_mem; cb_syscall (cb, &s); fr30bf_h_gr_set (current_cpu, 2, s.errcode); /* TODO: check this one */ fr30bf_h_gr_set (current_cpu, 4, s.result); fr30bf_h_gr_set (current_cpu, 1, s.result2); /* TODO: check this one */ break; } case TRAP_BREAKPOINT: sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP); break; default : { USI new_pc; setup_int (current_cpu, pc); fr30bf_h_ibit_set (current_cpu, 0); new_pc = GETMEMSI (current_cpu, pc, fr30bf_h_dr_get (current_cpu, H_DR_TBR) + 1024 - ((num + 1) * 4)); return new_pc; } } /* Fake an "reti" insn. Since we didn't push anything to stack, all we need to do is update pc. */ return pc + 2; }
/* Process the current interrupt if there is one. This operation must be called after each instruction to handle the interrupts. If interrupts are masked, it does nothing. */ int interrupts_process (struct interrupts *interrupts) { int id; uint8 ccr; /* See if interrupts are enabled/disabled and keep track of the number of cycles the interrupts are masked. Such information is then reported by the info command. */ ccr = cpu_get_ccr (interrupts->cpu); if (ccr & M6811_I_BIT) { if (interrupts->start_mask_cycle < 0) interrupts->start_mask_cycle = cpu_current_cycle (interrupts->cpu); } else if (interrupts->start_mask_cycle >= 0 && (ccr & M6811_I_BIT) == 0) { signed64 t = cpu_current_cycle (interrupts->cpu); t -= interrupts->start_mask_cycle; if (t < interrupts->min_mask_cycles) interrupts->min_mask_cycles = t; if (t > interrupts->max_mask_cycles) interrupts->max_mask_cycles = t; interrupts->start_mask_cycle = -1; interrupts->last_mask_cycles = t; } if (ccr & M6811_X_BIT) { if (interrupts->xirq_start_mask_cycle < 0) interrupts->xirq_start_mask_cycle = cpu_current_cycle (interrupts->cpu); } else if (interrupts->xirq_start_mask_cycle >= 0 && (ccr & M6811_X_BIT) == 0) { signed64 t = cpu_current_cycle (interrupts->cpu); t -= interrupts->xirq_start_mask_cycle; if (t < interrupts->xirq_min_mask_cycles) interrupts->xirq_min_mask_cycles = t; if (t > interrupts->xirq_max_mask_cycles) interrupts->xirq_max_mask_cycles = t; interrupts->xirq_start_mask_cycle = -1; interrupts->xirq_last_mask_cycles = t; } id = interrupts_get_current (interrupts); if (id >= 0) { uint16 addr; struct interrupt_history *h; /* Implement the breakpoint-on-interrupt. */ if (interrupts->interrupts[id].stop_mode & SIM_STOP_WHEN_TAKEN) { sim_io_printf (CPU_STATE (interrupts->cpu), "Interrupt %s will be handled\n", interrupt_names[id]); sim_engine_halt (CPU_STATE (interrupts->cpu), interrupts->cpu, 0, cpu_get_pc (interrupts->cpu), sim_stopped, SIM_SIGTRAP); } cpu_push_all (interrupts->cpu); addr = memory_read16 (interrupts->cpu, interrupts->vectors_addr + id * 2); cpu_call (interrupts->cpu, addr); /* Now, protect from nested interrupts. */ if (id == M6811_INT_XIRQ) { cpu_set_ccr_X (interrupts->cpu, 1); } else { cpu_set_ccr_I (interrupts->cpu, 1); } /* Update the interrupt history table. */ h = &interrupts->interrupts_history[interrupts->history_index]; h->type = id; h->taken_cycle = cpu_current_cycle (interrupts->cpu); h->raised_cycle = interrupts->interrupts[id].cpu_cycle; if (interrupts->history_index >= MAX_INT_HISTORY-1) interrupts->history_index = 0; else interrupts->history_index++; interrupts->nb_interrupts_raised++; cpu_add_cycles (interrupts->cpu, 14); return 1; } return 0; }
/* Save data written to memory into the interrupt state so that it can be copied to the appropriate EDR register, if necessary, in the event of an interrupt. */ void frv_save_data_written_for_interrupts ( SIM_CPU *current_cpu, CGEN_WRITE_QUEUE_ELEMENT *item ) { /* Record the slot containing the insn doing the write in the interrupt state. */ frv_interrupt_state.slot = CGEN_WRITE_QUEUE_ELEMENT_PIPE (item); /* Now record any data written to memory in the interrupt state. */ switch (CGEN_WRITE_QUEUE_ELEMENT_KIND (item)) { case CGEN_BI_WRITE: case CGEN_QI_WRITE: case CGEN_SI_WRITE: case CGEN_SF_WRITE: case CGEN_PC_WRITE: case CGEN_FN_HI_WRITE: case CGEN_FN_SI_WRITE: case CGEN_FN_SF_WRITE: case CGEN_FN_DI_WRITE: case CGEN_FN_DF_WRITE: case CGEN_FN_XI_WRITE: case CGEN_FN_PC_WRITE: break; /* Ignore writes to registers. */ case CGEN_MEM_QI_WRITE: frv_interrupt_state.data_written.length = 1; frv_interrupt_state.data_written.words[0] = item->kinds.mem_qi_write.value; break; case CGEN_MEM_HI_WRITE: frv_interrupt_state.data_written.length = 1; frv_interrupt_state.data_written.words[0] = item->kinds.mem_hi_write.value; break; case CGEN_MEM_SI_WRITE: frv_interrupt_state.data_written.length = 1; frv_interrupt_state.data_written.words[0] = item->kinds.mem_si_write.value; break; case CGEN_MEM_DI_WRITE: frv_interrupt_state.data_written.length = 2; frv_interrupt_state.data_written.words[0] = item->kinds.mem_di_write.value >> 32; frv_interrupt_state.data_written.words[1] = item->kinds.mem_di_write.value; break; case CGEN_MEM_DF_WRITE: frv_interrupt_state.data_written.length = 2; frv_interrupt_state.data_written.words[0] = item->kinds.mem_df_write.value >> 32; frv_interrupt_state.data_written.words[1] = item->kinds.mem_df_write.value; break; case CGEN_MEM_XI_WRITE: frv_interrupt_state.data_written.length = 4; frv_interrupt_state.data_written.words[0] = item->kinds.mem_xi_write.value[0]; frv_interrupt_state.data_written.words[1] = item->kinds.mem_xi_write.value[1]; frv_interrupt_state.data_written.words[2] = item->kinds.mem_xi_write.value[2]; frv_interrupt_state.data_written.words[3] = item->kinds.mem_xi_write.value[3]; break; case CGEN_FN_MEM_QI_WRITE: frv_interrupt_state.data_written.length = 1; frv_interrupt_state.data_written.words[0] = item->kinds.fn_mem_qi_write.value; break; case CGEN_FN_MEM_HI_WRITE: frv_interrupt_state.data_written.length = 1; frv_interrupt_state.data_written.words[0] = item->kinds.fn_mem_hi_write.value; break; case CGEN_FN_MEM_SI_WRITE: frv_interrupt_state.data_written.length = 1; frv_interrupt_state.data_written.words[0] = item->kinds.fn_mem_si_write.value; break; case CGEN_FN_MEM_DI_WRITE: frv_interrupt_state.data_written.length = 2; frv_interrupt_state.data_written.words[0] = item->kinds.fn_mem_di_write.value >> 32; frv_interrupt_state.data_written.words[1] = item->kinds.fn_mem_di_write.value; break; case CGEN_FN_MEM_DF_WRITE: frv_interrupt_state.data_written.length = 2; frv_interrupt_state.data_written.words[0] = item->kinds.fn_mem_df_write.value >> 32; frv_interrupt_state.data_written.words[1] = item->kinds.fn_mem_df_write.value; break; case CGEN_FN_MEM_XI_WRITE: frv_interrupt_state.data_written.length = 4; frv_interrupt_state.data_written.words[0] = item->kinds.fn_mem_xi_write.value[0]; frv_interrupt_state.data_written.words[1] = item->kinds.fn_mem_xi_write.value[1]; frv_interrupt_state.data_written.words[2] = item->kinds.fn_mem_xi_write.value[2]; frv_interrupt_state.data_written.words[3] = item->kinds.fn_mem_xi_write.value[3]; break; default: { SIM_DESC sd = CPU_STATE (current_cpu); IADDR pc = CPU_PC_GET (current_cpu); sim_engine_abort (sd, current_cpu, pc, "unknown write kind during save for interrupt\n"); } break; } }
/* Handle TRA and TIRA insns. */ void frv_itrap (SIM_CPU *current_cpu, PCADDR pc, USI base, SI offset) { SIM_DESC sd = CPU_STATE (current_cpu); host_callback *cb = STATE_CALLBACK (sd); USI num = ((base + offset) & 0x7f) + 0x80; #ifdef SIM_HAVE_BREAKPOINTS /* Check for breakpoints "owned" by the simulator first, regardless of --environment. */ if (num == TRAP_BREAKPOINT) { /* First try sim-break.c. If it's a breakpoint the simulator "owns" it doesn't return. Otherwise it returns and let's us try. */ sim_handle_breakpoint (sd, current_cpu, pc); /* Fall through. */ } #endif if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT) { frv_queue_software_interrupt (current_cpu, num); return; } switch (num) { case TRAP_SYSCALL : { CB_SYSCALL s; CB_SYSCALL_INIT (&s); s.func = GET_H_GR (7); s.arg1 = GET_H_GR (8); s.arg2 = GET_H_GR (9); s.arg3 = GET_H_GR (10); if (s.func == TARGET_SYS_exit) { sim_engine_halt (sd, current_cpu, NULL, pc, sim_exited, s.arg1); } s.p1 = (PTR) sd; s.p2 = (PTR) current_cpu; s.read_mem = syscall_read_mem; s.write_mem = syscall_write_mem; cb_syscall (cb, &s); SET_H_GR (8, s.result); SET_H_GR (9, s.result2); SET_H_GR (10, s.errcode); break; } case TRAP_BREAKPOINT: sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP); break; /* Add support for dumping registers, either at fixed traps, or all unknown traps if configured with --enable-sim-trapdump. */ default: #if !TRAPDUMP frv_queue_software_interrupt (current_cpu, num); return; #endif #ifdef TRAP_REGDUMP1 case TRAP_REGDUMP1: #endif #ifdef TRAP_REGDUMP2 case TRAP_REGDUMP2: #endif #if TRAPDUMP || (defined (TRAP_REGDUMP1)) || (defined (TRAP_REGDUMP2)) { char buf[256]; int i, j; buf[0] = 0; if (STATE_TEXT_SECTION (sd) && pc >= STATE_TEXT_START (sd) && pc < STATE_TEXT_END (sd)) { const char *pc_filename = (const char *)0; const char *pc_function = (const char *)0; unsigned int pc_linenum = 0; if (bfd_find_nearest_line (STATE_PROG_BFD (sd), STATE_TEXT_SECTION (sd), (struct bfd_symbol **) 0, pc - STATE_TEXT_START (sd), &pc_filename, &pc_function, &pc_linenum) && (pc_function || pc_filename)) { char *p = buf+2; buf[0] = ' '; buf[1] = '('; if (pc_function) { strcpy (p, pc_function); p += strlen (p); } else { char *q = (char *) strrchr (pc_filename, '/'); strcpy (p, (q) ? q+1 : pc_filename); p += strlen (p); } if (pc_linenum) { sprintf (p, " line %d", pc_linenum); p += strlen (p); } p[0] = ')'; p[1] = '\0'; if ((p+1) - buf > sizeof (buf)) abort (); } } sim_io_printf (sd, "\nRegister dump, pc = 0x%.8x%s, base = %u, offset = %d\n", (unsigned)pc, buf, (unsigned)base, (int)offset); for (i = 0; i < 64; i += 8) { long g0 = (long)GET_H_GR (i); long g1 = (long)GET_H_GR (i+1); long g2 = (long)GET_H_GR (i+2); long g3 = (long)GET_H_GR (i+3); long g4 = (long)GET_H_GR (i+4); long g5 = (long)GET_H_GR (i+5); long g6 = (long)GET_H_GR (i+6); long g7 = (long)GET_H_GR (i+7); if ((g0 | g1 | g2 | g3 | g4 | g5 | g6 | g7) != 0) sim_io_printf (sd, "\tgr%02d - gr%02d: 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx\n", i, i+7, g0, g1, g2, g3, g4, g5, g6, g7); } for (i = 0; i < 64; i += 8) { long f0 = (long)GET_H_FR (i); long f1 = (long)GET_H_FR (i+1); long f2 = (long)GET_H_FR (i+2); long f3 = (long)GET_H_FR (i+3); long f4 = (long)GET_H_FR (i+4); long f5 = (long)GET_H_FR (i+5); long f6 = (long)GET_H_FR (i+6); long f7 = (long)GET_H_FR (i+7); if ((f0 | f1 | f2 | f3 | f4 | f5 | f6 | f7) != 0) sim_io_printf (sd, "\tfr%02d - fr%02d: 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx\n", i, i+7, f0, f1, f2, f3, f4, f5, f6, f7); } sim_io_printf (sd, "\tlr/lcr/cc/ccc: 0x%.8lx 0x%.8lx 0x%.8lx 0x%.8lx\n", (long)GET_H_SPR (272), (long)GET_H_SPR (273), (long)GET_H_SPR (256), (long)GET_H_SPR (263)); } break; #endif } }
/* Handle an individual interrupt. */ static void handle_interrupt (SIM_CPU *current_cpu, IADDR pc) { struct frv_interrupt *interrupt; int writeback_done = 0; while (1) { /* Interrupts are queued in priority order with the highest priority last. */ int index = frv_interrupt_state.queue_index - 1; struct frv_interrupt_queue_element *item = & frv_interrupt_state.queue[index]; interrupt = & frv_interrupt_table[item->kind]; switch (interrupt->iclass) { case FRV_EXTERNAL_INTERRUPT: /* Perform writeback first. This may cause a higher priority interrupt. */ if (! writeback_done) { frvbf_perform_writeback (current_cpu); writeback_done = 1; continue; } frv_external_interrupt (current_cpu, item, pc); return; case FRV_SOFTWARE_INTERRUPT: frv_interrupt_state.queue_index = index; frv_software_interrupt (current_cpu, item, pc); return; case FRV_PROGRAM_INTERRUPT: /* If the program interrupt is not strict (imprecise), then perform writeback first. This may, in turn, cause a higher priority interrupt. */ if (! interrupt->precise && ! writeback_done) { frv_interrupt_state.imprecise_interrupt = item; frvbf_perform_writeback (current_cpu); writeback_done = 1; continue; } frv_interrupt_state.queue_index = index; frv_program_interrupt (current_cpu, item, pc); return; case FRV_BREAK_INTERRUPT: frv_interrupt_state.queue_index = index; frv_break_interrupt (current_cpu, interrupt, pc); return; case FRV_RESET_INTERRUPT: break; default: break; } frv_interrupt_state.queue_index = index; break; /* out of loop. */ } /* We should never get here. */ { SIM_DESC sd = CPU_STATE (current_cpu); sim_engine_abort (sd, current_cpu, pc, "interrupt class not supported %d\n", interrupt->iclass); } }
/* Return from trap. */ USI frv_rett (SIM_CPU *current_cpu, PCADDR pc, BI debug_field) { USI new_pc; /* if (normal running mode and debug_field==0 PC=PCSR PSR.ET=1 PSR.S=PSR.PS else if (debug running mode and debug_field==1) PC=(BPCSR) PSR.ET=BPSR.BET PSR.S=BPSR.BS change to normal running mode */ int psr_s = GET_H_PSR_S (); int psr_et = GET_H_PSR_ET (); /* Check for exceptions in the priority order listed in the FRV Architecture Volume 2. */ if (! psr_s) { /* Halt if PSR.ET is not set. See chapter 6 of the LSI. */ if (! psr_et) { SIM_DESC sd = CPU_STATE (current_cpu); sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP); } /* privileged_instruction interrupt will have already been queued by frv_detect_insn_access_interrupts. */ new_pc = pc + 4; } else if (psr_et) { /* Halt if PSR.S is set. See chapter 6 of the LSI. */ if (psr_s) { SIM_DESC sd = CPU_STATE (current_cpu); sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP); } frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION); new_pc = pc + 4; } else if (! CPU_DEBUG_STATE (current_cpu) && debug_field == 0) { USI psr = GET_PSR (); /* Return from normal running state. */ new_pc = GET_H_SPR (H_SPR_PCSR); SET_PSR_ET (psr, 1); SET_PSR_S (psr, GET_PSR_PS (psr)); sim_queue_fn_si_write (current_cpu, frvbf_h_spr_set, H_SPR_PSR, psr); } else if (CPU_DEBUG_STATE (current_cpu) && debug_field == 1) { USI psr = GET_PSR (); /* Return from debug state. */ new_pc = GET_H_SPR (H_SPR_BPCSR); SET_PSR_ET (psr, GET_H_BPSR_BET ()); SET_PSR_S (psr, GET_H_BPSR_BS ()); sim_queue_fn_si_write (current_cpu, frvbf_h_spr_set, H_SPR_PSR, psr); CPU_DEBUG_STATE (current_cpu) = 0; } else new_pc = pc + 4; return new_pc; }
void scache_print_profile (SIM_CPU *cpu, int verbose) { SIM_DESC sd = CPU_STATE (cpu); unsigned long hits = CPU_SCACHE_HITS (cpu); unsigned long misses = CPU_SCACHE_MISSES (cpu); char buf[20]; unsigned long max_val; unsigned long *lengths; int i; if (CPU_SCACHE_SIZE (cpu) == 0) return; sim_io_printf (sd, "Simulator Cache Statistics\n\n"); /* One could use PROFILE_LABEL_WIDTH here. I chose not to. */ sim_io_printf (sd, " Cache size: %s\n", sim_add_commas (buf, sizeof (buf), CPU_SCACHE_SIZE (cpu))); sim_io_printf (sd, " Hits: %s\n", sim_add_commas (buf, sizeof (buf), hits)); sim_io_printf (sd, " Misses: %s\n", sim_add_commas (buf, sizeof (buf), misses)); if (hits + misses != 0) sim_io_printf (sd, " Hit rate: %.2f%%\n", ((double) hits / ((double) hits + (double) misses)) * 100); #if WITH_SCACHE_PBB sim_io_printf (sd, "\n"); sim_io_printf (sd, " Hash table size: %s\n", sim_add_commas (buf, sizeof (buf), CPU_SCACHE_NUM_HASH_CHAINS (cpu))); sim_io_printf (sd, " Max hash list length: %s\n", sim_add_commas (buf, sizeof (buf), CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu))); sim_io_printf (sd, " Max insn chain length: %s\n", sim_add_commas (buf, sizeof (buf), CPU_SCACHE_MAX_CHAIN_LENGTH (cpu))); sim_io_printf (sd, " Cache full flushes: %s\n", sim_add_commas (buf, sizeof (buf), CPU_SCACHE_FULL_FLUSHES (cpu))); sim_io_printf (sd, "\n"); if (verbose) { sim_io_printf (sd, " Insn chain lengths:\n\n"); max_val = 0; lengths = CPU_SCACHE_CHAIN_LENGTHS (cpu); for (i = 1; i < CPU_SCACHE_MAX_CHAIN_LENGTH (cpu); ++i) if (lengths[i] > max_val) max_val = lengths[i]; for (i = 1; i < CPU_SCACHE_MAX_CHAIN_LENGTH (cpu); ++i) { sim_io_printf (sd, " %2d: %*s: ", i, max_val < 10000 ? 5 : 10, sim_add_commas (buf, sizeof (buf), lengths[i])); sim_profile_print_bar (sd, cpu, PROFILE_HISTOGRAM_WIDTH, lengths[i], max_val); sim_io_printf (sd, "\n"); } sim_io_printf (sd, "\n"); } #endif /* WITH_SCACHE_PBB */ }
USI m32r_trap (SIM_CPU *current_cpu, PCADDR pc, int num) { SIM_DESC sd = CPU_STATE (current_cpu); host_callback *cb = STATE_CALLBACK (sd); #ifdef SIM_HAVE_BREAKPOINTS /* Check for breakpoints "owned" by the simulator first, regardless of --environment. */ if (num == TRAP_BREAKPOINT) { /* First try sim-break.c. If it's a breakpoint the simulator "owns" it doesn't return. Otherwise it returns and let's us try. */ sim_handle_breakpoint (sd, current_cpu, pc); /* Fall through. */ } #endif if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT) { /* The new pc is the trap vector entry. We assume there's a branch there to some handler. Use cr5 as EVB (EIT Vector Base) register. */ /* USI new_pc = EIT_TRAP_BASE_ADDR + num * 4; */ USI new_pc = m32rbf_h_cr_get (current_cpu, 5) + 0x40 + num * 4; return new_pc; } switch (num) { case TRAP_SYSCALL : { long result, result2; int errcode; sim_syscall_multi (current_cpu, m32rbf_h_gr_get (current_cpu, 0), m32rbf_h_gr_get (current_cpu, 1), m32rbf_h_gr_get (current_cpu, 2), m32rbf_h_gr_get (current_cpu, 3), m32rbf_h_gr_get (current_cpu, 4), &result, &result2, &errcode); m32rbf_h_gr_set (current_cpu, 2, errcode); m32rbf_h_gr_set (current_cpu, 0, result); m32rbf_h_gr_set (current_cpu, 1, result2); break; } case TRAP_BREAKPOINT: sim_engine_halt (sd, current_cpu, NULL, pc, sim_stopped, SIM_SIGTRAP); break; case TRAP_FLUSH_CACHE: /* Do nothing. */ break; default : { /* USI new_pc = EIT_TRAP_BASE_ADDR + num * 4; */ /* Use cr5 as EVB (EIT Vector Base) register. */ USI new_pc = m32rbf_h_cr_get (current_cpu, 5) + 0x40 + num * 4; return new_pc; } } /* Fake an "rte" insn. */ /* FIXME: Should duplicate all of rte processing. */ return (pc & -4) + 4; }