void sim_engine_abort (SIM_DESC sd, sim_cpu *cpu, sim_cia cia, const char *fmt, ...) { ASSERT (sd == NULL || STATE_MAGIC (sd) == SIM_MAGIC_NUMBER); if (sd != NULL) { va_list ap; va_start (ap, fmt); sim_io_evprintf (sd, fmt, ap); va_end (ap); sim_io_error (sd, "\n"); } else { va_list ap; va_start (ap, fmt); vfprintf (stderr, fmt, ap); va_end (ap); fprintf (stderr, "\n"); abort (); } }
void sim_engine_halt (SIM_DESC sd, sim_cpu *last_cpu, sim_cpu *next_cpu, /* NULL - use default */ sim_cia cia, enum sim_stop reason, int sigrc) { sim_engine *engine = STATE_ENGINE (sd); ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER); if (engine->jmpbuf != NULL) { jmp_buf *halt_buf = engine->jmpbuf; engine->last_cpu = last_cpu; engine->next_cpu = next_cpu; engine->reason = reason; engine->sigrc = sigrc; SIM_ENGINE_HALT_HOOK (sd, last_cpu, cia); #ifdef SIM_CPU_EXCEPTION_SUSPEND if (last_cpu != NULL && reason != sim_exited) SIM_CPU_EXCEPTION_SUSPEND (sd, last_cpu, sim_signal_to_host (sd, sigrc)); #endif longjmp (*halt_buf, sim_engine_halt_jmpval); } else { sim_io_error (sd, "sim_halt - bad long jump"); abort (); } }
void sim_engine_vabort (SIM_DESC sd, sim_cpu *cpu, sim_cia cia, const char *fmt, va_list ap) { ASSERT (sd == NULL || STATE_MAGIC (sd) == SIM_MAGIC_NUMBER); if (sd == NULL) { vfprintf (stderr, fmt, ap); fprintf (stderr, "\nQuit\n"); abort (); } else if (STATE_ENGINE (sd)->jmpbuf == NULL) { sim_io_evprintf (sd, fmt, ap); sim_io_eprintf (sd, "\n"); sim_io_error (sd, "Quit Simulator"); abort (); } else { sim_io_evprintf (sd, fmt, ap); sim_io_eprintf (sd, "\n"); sim_engine_halt (sd, cpu, NULL, cia, sim_stopped, SIM_SIGABRT); } }
void sim_io_flush_stderr (SIM_DESC sd) { switch (CURRENT_STDIO) { case DO_USE_STDIO: STATE_CALLBACK (sd)->flush_stderr (STATE_CALLBACK (sd)); break; case DONT_USE_STDIO: break; default: sim_io_error (sd, "sim_io_flush_stderr: unaccounted switch\n"); break; } }
int sim_io_write_stdout (SIM_DESC sd, const char *buf, int len) { switch (CURRENT_STDIO) { case DO_USE_STDIO: return STATE_CALLBACK (sd)->write_stdout (STATE_CALLBACK (sd), buf, len); break; case DONT_USE_STDIO: return STATE_CALLBACK (sd)->write (STATE_CALLBACK (sd), 1, buf, len); break; default: sim_io_error (sd, "sim_io_write_stdout: unaccounted switch\n"); break; } return 0; }
int sim_io_read_stdin (SIM_DESC sd, char *buf, int len) { switch (CURRENT_STDIO) { case DO_USE_STDIO: return STATE_CALLBACK (sd)->read_stdin (STATE_CALLBACK (sd), buf, len); break; case DONT_USE_STDIO: return STATE_CALLBACK (sd)->read (STATE_CALLBACK (sd), 0, buf, len); break; default: sim_io_error (sd, "sim_io_read_stdin: unaccounted switch\n"); break; } return 0; }
void sim_engine_restart (SIM_DESC sd, sim_cpu *last_cpu, sim_cpu *next_cpu, sim_cia cia) { sim_engine *engine = STATE_ENGINE (sd); ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER); if (engine->jmpbuf != NULL) { jmp_buf *halt_buf = engine->jmpbuf; engine->last_cpu = last_cpu; engine->next_cpu = next_cpu; SIM_ENGINE_RESTART_HOOK (sd, last_cpu, cia); longjmp (*halt_buf, sim_engine_restart_jmpval); } else sim_io_error (sd, "sim_restart - bad long jump"); }
static sim_memopt * do_memopt_add (SIM_DESC sd, int level, int space, address_word addr, address_word nr_bytes, unsigned modulo, sim_memopt **entry, void *buffer) { void *fill_buffer; unsigned fill_length; void *free_buffer; unsigned long free_length; if (buffer != NULL) { /* Buffer already given. sim_memory_uninstall will free it. */ sim_core_attach (sd, NULL, level, access_read_write_exec, space, addr, nr_bytes, modulo, NULL, buffer); free_buffer = buffer; free_length = 0; fill_buffer = buffer; fill_length = (modulo == 0) ? nr_bytes : modulo; } else { /* Allocate new well-aligned buffer, just as sim_core_attach(). */ void *aligned_buffer; int padding = (addr % sizeof (unsigned64)); unsigned long bytes; #ifdef HAVE_MMAP struct stat s; if (mmap_next_fd >= 0) { /* Check that given file is big enough. */ int rc = fstat (mmap_next_fd, &s); if (rc < 0) sim_io_error (sd, "Error, unable to stat file: %s\n", strerror (errno)); /* Autosize the mapping to the file length. */ if (nr_bytes == 0) nr_bytes = s.st_size; } #endif bytes = (modulo == 0 ? nr_bytes : modulo) + padding; free_buffer = NULL; free_length = bytes; #ifdef HAVE_MMAP /* Memory map or malloc(). */ if (mmap_next_fd >= 0) { /* Some kernels will SIGBUS the application if mmap'd file is not large enough. */ if (s.st_size < bytes) { sim_io_error (sd, "Error, cannot confirm that mmap file is large enough " "(>= %ld bytes)\n", bytes); } free_buffer = mmap (0, bytes, PROT_READ|PROT_WRITE, MAP_SHARED, mmap_next_fd, 0); if (free_buffer == 0 || free_buffer == (char*)-1) /* MAP_FAILED */ { sim_io_error (sd, "Error, cannot mmap file (%s).\n", strerror (errno)); } } #endif /* Need heap allocation? */ if (free_buffer == NULL) { /* If filling with non-zero value, do not use clearing allocator. */ if (fill_byte_flag && fill_byte_value != 0) free_buffer = xmalloc (bytes); /* don't clear */ else free_buffer = zalloc (bytes); /* clear */ } aligned_buffer = (char*) free_buffer + padding; sim_core_attach (sd, NULL, level, access_read_write_exec, space, addr, nr_bytes, modulo, NULL, aligned_buffer); fill_buffer = aligned_buffer; fill_length = (modulo == 0) ? nr_bytes : modulo; /* If we just used a clearing allocator, and are about to fill with zero, truncate the redundant fill operation. */ if (fill_byte_flag && fill_byte_value == 0) fill_length = 1; /* avoid boundary length=0 case */ } if (fill_byte_flag) { ASSERT (fill_buffer != 0); memset ((char*) fill_buffer, fill_byte_value, fill_length); } while ((*entry) != NULL) entry = &(*entry)->next; (*entry) = ZALLOC (sim_memopt); (*entry)->level = level; (*entry)->space = space; (*entry)->addr = addr; (*entry)->nr_bytes = nr_bytes; (*entry)->modulo = modulo; (*entry)->buffer = free_buffer; /* Record memory unmapping info. */ if (mmap_next_fd >= 0) { (*entry)->munmap_length = free_length; close (mmap_next_fd); mmap_next_fd = -1; } else (*entry)->munmap_length = 0; return (*entry); }
void cec_return (SIM_CPU *cpu, int ivg) { SIM_DESC sd = CPU_STATE (cpu); struct bfin_cec *cec; bool snen; int curr_ivg; bu32 oldpc, newpc; oldpc = PCREG; BFIN_CPU_STATE.did_jump = true; if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT) { SET_PCREG (cec_read_ret_reg (cpu, ivg)); TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC"); return; } cec = CEC_STATE (cpu); /* XXX: This isn't entirely correct ... */ cec->ipend &= ~IVG_EMU_B; curr_ivg = _cec_get_ivg (cec); if (curr_ivg == -1) curr_ivg = IVG_USER; if (ivg == -1) ivg = curr_ivg; TRACE_EVENTS (cpu, "returning from EVT%i (should be EVT%i)", curr_ivg, ivg); /* Not allowed to return from usermode. */ if (curr_ivg == IVG_USER) cec_exception (cpu, VEC_ILL_RES); if (ivg > IVG15 || ivg < 0) sim_io_error (sd, "%s: ivg %i out of range !", __func__, ivg); _cec_require_supervisor (cpu, cec); switch (ivg) { case IVG_EMU: /* RTE -- only valid in emulation mode. */ /* XXX: What does the hardware do ? */ if (curr_ivg != IVG_EMU) cec_exception (cpu, VEC_ILL_RES); break; case IVG_NMI: /* RTN -- only valid in NMI. */ /* XXX: What does the hardware do ? */ if (curr_ivg != IVG_NMI) cec_exception (cpu, VEC_ILL_RES); break; case IVG_EVX: /* RTX -- only valid in exception. */ /* XXX: What does the hardware do ? */ if (curr_ivg != IVG_EVX) cec_exception (cpu, VEC_ILL_RES); break; default: /* RTI -- not valid in emulation, nmi, exception, or user. */ /* XXX: What does the hardware do ? */ if (curr_ivg == IVG_EMU || curr_ivg == IVG_NMI || curr_ivg == IVG_EVX || curr_ivg == IVG_USER) cec_exception (cpu, VEC_ILL_RES); break; case IVG_IRPTEN: /* XXX: Is this even possible ? */ excp_to_sim_halt (sim_stopped, SIM_SIGABRT); break; } newpc = cec_read_ret_reg (cpu, ivg); /* XXX: Does this nested trick work on EMU/NMI/EVX ? */ snen = (newpc & 1); /* XXX: Delayed clear shows bad PCREG register trace above ? */ SET_PCREG (newpc & ~1); TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (from EVT%i)", ivg); /* Update ipend after the TRACE_BRANCH so dv-bfin_trace knows current CEC state wrt overflow. */ if (!snen) cec->ipend &= ~(1 << ivg); /* Disable global interrupt mask to let any interrupt take over, but only when we were already in a RTI level. Only way we could have raised at that point is if it was cleared in the first place. */ if (ivg >= IVG_IVHW || ivg == IVG_RST) cec_irpten_disable (cpu, cec); /* When going from super to user, we clear LSB in LB regs in case it was set on the transition up. Also need to load SP alias with USP. */ if (_cec_get_ivg (cec) == -1) { int i; for (i = 0; i < 2; ++i) if (LBREG (i) & 1) SET_LBREG (i, LBREG (i) & ~1); SET_KSPREG (SPREG); SET_SPREG (USPREG); } /* Check for pending interrupts before we return to usermode. */ _cec_check_pending (cpu, cec); }
static void _cec_raise (SIM_CPU *cpu, struct bfin_cec *cec, int ivg) { SIM_DESC sd = CPU_STATE (cpu); int curr_ivg = _cec_get_ivg (cec); bool snen; bool irpten; TRACE_EVENTS (cpu, "processing request for EVT%i while at EVT%i", ivg, curr_ivg); irpten = (cec->ipend & IVG_IRPTEN_B); snen = (SYSCFGREG & SYSCFG_SNEN); if (curr_ivg == -1) curr_ivg = IVG_USER; /* Just check for higher latched interrupts. */ if (ivg == -1) { if (irpten) goto done; /* All interrupts are masked anyways. */ ivg = __cec_get_ivg (cec->ilat & cec->imask); if (ivg < 0) goto done; /* Nothing latched. */ if (ivg > curr_ivg) goto done; /* Nothing higher latched. */ if (!snen && ivg == curr_ivg) goto done; /* Self nesting disabled. */ /* Still here, so fall through to raise to higher pending. */ } cec->ilat |= (1 << ivg); if (ivg <= IVG_EVX) { /* These two are always processed. */ if (ivg == IVG_EMU || ivg == IVG_RST) goto process_int; /* Anything lower might trigger a double fault. */ if (curr_ivg <= ivg) { /* Double fault ! :( */ SET_EXCAUSE (VEC_UNCOV); /* XXX: SET_RETXREG (...); */ sim_io_error (sd, "%s: double fault at 0x%08x ! :(", __func__, PCREG); excp_to_sim_halt (sim_stopped, SIM_SIGABRT); } /* No double fault -> always process. */ goto process_int; } else if (irpten && curr_ivg != IVG_USER) { /* Interrupts are globally masked. */ } else if (!(cec->imask & (1 << ivg))) { /* This interrupt is masked. */ } else if (ivg < curr_ivg || (snen && ivg == curr_ivg)) { /* Do transition! */ bu32 oldpc; process_int: cec->ipend |= (1 << ivg); cec->ilat &= ~(1 << ivg); /* Interrupts are processed in between insns which means the return point is the insn-to-be-executed (which is the current PC). But exceptions are handled while executing an insn, so we may have to advance the PC ourselves when setting RETX. XXX: Advancing the PC should only be for "service" exceptions, and handling them after executing the insn should be OK, which means we might be able to use the event interface for it. */ oldpc = PCREG; switch (ivg) { case IVG_EMU: /* Signal the JTAG ICE. */ /* XXX: what happens with 'raise 0' ? */ SET_RETEREG (oldpc); excp_to_sim_halt (sim_stopped, SIM_SIGTRAP); /* XXX: Need an easy way for gdb to signal it isnt here. */ cec->ipend &= ~IVG_EMU_B; break; case IVG_RST: /* Have the core reset simply exit (i.e. "shutdown"). */ excp_to_sim_halt (sim_exited, 0); break; case IVG_NMI: /* XXX: Should check this. */ SET_RETNREG (oldpc); break; case IVG_EVX: /* Non-service exceptions point to the excepting instruction. */ if (EXCAUSE >= 0x20) SET_RETXREG (oldpc); else { bu32 nextpc = hwloop_get_next_pc (cpu, oldpc, INSN_LEN); SET_RETXREG (nextpc); } break; case IVG_IRPTEN: /* XXX: what happens with 'raise 4' ? */ sim_io_error (sd, "%s: what to do with 'raise 4' ?", __func__); break; default: SET_RETIREG (oldpc | (ivg == curr_ivg ? 1 : 0)); break; } /* If EVT_OVERRIDE is in effect (IVG7+), use the reset address. */ if ((cec->evt_override & 0xff80) & (1 << ivg)) SET_PCREG (cec_get_reset_evt (cpu)); else SET_PCREG (cec_get_evt (cpu, ivg)); TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (to EVT%i):", ivg); BFIN_CPU_STATE.did_jump = true; /* Enable the global interrupt mask upon interrupt entry. */ if (ivg >= IVG_IVHW) cec_irpten_enable (cpu, cec); } /* When moving between states, don't let internal states bleed through. */ DIS_ALGN_EXPT &= ~1; /* When going from user to super, we set LSB in LB regs to avoid misbehavior and/or malicious code. Also need to load SP alias with KSP. */ if (curr_ivg == IVG_USER) { int i; for (i = 0; i < 2; ++i) if (!(LBREG (i) & 1)) SET_LBREG (i, LBREG (i) | 1); SET_USPREG (SPREG); SET_SPREG (KSPREG); } done: TRACE_EVENTS (cpu, "now at EVT%i", _cec_get_ivg (cec)); }
void sim_core_attach (SIM_DESC sd, sim_cpu *cpu, int level, unsigned mapmask, int space, address_word addr, address_word nr_bytes, unsigned modulo, #if WITH_HW struct hw *client, #else device *client, #endif void *optional_buffer) { sim_core *memory = STATE_CORE (sd); unsigned map; void *buffer; void *free_buffer; /* check for for attempt to use unimplemented per-processor core map */ if (cpu != NULL) sim_io_error (sd, "sim_core_map_attach - processor specific memory map not yet supported"); /* verify modulo memory */ if (!WITH_MODULO_MEMORY && modulo != 0) { #if (WITH_DEVICES) device_error (client, "sim_core_attach - internal error - modulo memory disabled"); #endif #if (WITH_HW) sim_hw_abort (sd, client, "sim_core_attach - internal error - modulo memory disabled"); #endif sim_io_error (sd, "sim_core_attach - internal error - modulo memory disabled"); } if (client != NULL && modulo != 0) { #if (WITH_DEVICES) device_error (client, "sim_core_attach - internal error - modulo and callback memory conflict"); #endif #if (WITH_HW) sim_hw_abort (sd, client, "sim_core_attach - internal error - modulo and callback memory conflict"); #endif sim_io_error (sd, "sim_core_attach - internal error - modulo and callback memory conflict"); } if (modulo != 0) { unsigned mask = modulo - 1; /* any zero bits */ while (mask >= sizeof (unsigned64)) /* minimum modulo */ { if ((mask & 1) == 0) mask = 0; else mask >>= 1; } if (mask != sizeof (unsigned64) - 1) { #if (WITH_DEVICES) device_error (client, "sim_core_attach - internal error - modulo %lx not power of two", (long) modulo); #endif #if (WITH_HW) sim_hw_abort (sd, client, "sim_core_attach - internal error - modulo %lx not power of two", (long) modulo); #endif sim_io_error (sd, "sim_core_attach - internal error - modulo %lx not power of two", (long) modulo); } }
static void sim_core_map_attach (SIM_DESC sd, sim_core_map *access_map, int level, int space, address_word addr, address_word nr_bytes, unsigned modulo, #if WITH_HW struct hw *client, /*callback/default*/ #else device *client, /*callback/default*/ #endif void *buffer, /*raw_memory*/ void *free_buffer) /*raw_memory*/ { /* find the insertion point for this additional mapping and then insert */ sim_core_mapping *next_mapping; sim_core_mapping **last_mapping; SIM_ASSERT ((client == NULL) != (buffer == NULL)); SIM_ASSERT ((client == NULL) >= (free_buffer != NULL)); /* actually do occasionally get a zero size map */ if (nr_bytes == 0) { #if (WITH_DEVICES) device_error (client, "called on sim_core_map_attach with size zero"); #endif #if (WITH_HW) sim_hw_abort (sd, client, "called on sim_core_map_attach with size zero"); #endif sim_io_error (sd, "called on sim_core_map_attach with size zero"); } /* find the insertion point (between last/next) */ next_mapping = access_map->first; last_mapping = &access_map->first; while (next_mapping != NULL && (next_mapping->level < level || (next_mapping->level == level && next_mapping->bound < addr))) { /* provided levels are the same */ /* assert: next_mapping->base > all bases before next_mapping */ /* assert: next_mapping->bound >= all bounds before next_mapping */ last_mapping = &next_mapping->next; next_mapping = next_mapping->next; } /* check insertion point correct */ SIM_ASSERT (next_mapping == NULL || next_mapping->level >= level); if (next_mapping != NULL && next_mapping->level == level && next_mapping->base < (addr + (nr_bytes - 1))) { #if (WITH_DEVICES) device_error (client, "memory map %d:0x%lx..0x%lx (%ld bytes) overlaps %d:0x%lx..0x%lx (%ld bytes)", space, (long) addr, (long) (addr + nr_bytes - 1), (long) nr_bytes, next_mapping->space, (long) next_mapping->base, (long) next_mapping->bound, (long) next_mapping->nr_bytes); #endif #if WITH_HW sim_hw_abort (sd, client, "memory map %d:0x%lx..0x%lx (%ld bytes) overlaps %d:0x%lx..0x%lx (%ld bytes)", space, (long) addr, (long) (addr + (nr_bytes - 1)), (long) nr_bytes, next_mapping->space, (long) next_mapping->base, (long) next_mapping->bound, (long) next_mapping->nr_bytes); #endif sim_io_error (sd, "memory map %d:0x%lx..0x%lx (%ld bytes) overlaps %d:0x%lx..0x%lx (%ld bytes)", space, (long) addr, (long) (addr + (nr_bytes - 1)), (long) nr_bytes, next_mapping->space, (long) next_mapping->base, (long) next_mapping->bound, (long) next_mapping->nr_bytes); } /* create/insert the new mapping */ *last_mapping = new_sim_core_mapping (sd, level, space, addr, nr_bytes, modulo, client, buffer, free_buffer); (*last_mapping)->next = next_mapping; }