/* Determine whether the given address should be accessed without using the cache. */ static int non_cache_access (FRV_CACHE *cache, USI address) { int hsr0; SIM_DESC sd; SIM_CPU *current_cpu = cache->cpu; sd = CPU_STATE (current_cpu); switch (STATE_ARCHITECTURE (sd)->mach) { case bfd_mach_fr400: case bfd_mach_fr450: if (address >= 0xff000000 || address >= 0xfe000000 && address <= 0xfeffffff) return 1; /* non-cache access */ break; case bfd_mach_fr550: if (address >= 0xff000000 || address >= 0xfeff0000 && address <= 0xfeffffff) return 1; /* non-cache access */ if (cache == CPU_INSN_CACHE (current_cpu)) { if (address >= 0xfe000000 && address <= 0xfe007fff) return 1; /* non-cache access */ } else if (address >= 0xfe400000 && address <= 0xfe407fff) return 1; /* non-cache access */ break; default: if (address >= 0xff000000 || address >= 0xfeff0000 && address <= 0xfeffffff) return 1; /* non-cache access */ if (cache == CPU_INSN_CACHE (current_cpu)) { if (address >= 0xfe000000 && address <= 0xfe003fff) return 1; /* non-cache access */ } else if (address >= 0xfe400000 && address <= 0xfe403fff) return 1; /* non-cache access */ break; } hsr0 = GET_HSR0 (); if (GET_HSR0_RME (hsr0)) return ram_access (cache, address); return 0; /* cache-access */ }
static void parse_cache_option (SIM_DESC sd, char *arg, char *cache_name, int is_data_cache) { int i; address_word ways = 0, sets = 0, linesize = 0; if (arg != NULL) { char *chp = arg; /* parse the arguments */ chp = parse_size (chp, &ways); ways = check_pow2 (ways, "WAYS", cache_name, sd); if (*chp == ',') { chp = parse_size (chp + 1, &sets); sets = check_pow2 (sets, "SETS", cache_name, sd); if (*chp == ',') { chp = parse_size (chp + 1, &linesize); linesize = check_pow2 (linesize, "LINESIZE", cache_name, sd); } } } for (i = 0; i < MAX_NR_PROCESSORS; ++i) { SIM_CPU *current_cpu = STATE_CPU (sd, i); FRV_CACHE *cache = is_data_cache ? CPU_DATA_CACHE (current_cpu) : CPU_INSN_CACHE (current_cpu); cache->ways = ways; cache->sets = sets; cache->line_size = linesize; frv_cache_init (current_cpu, cache); } }
/* Invalidate the entire cache. Flush the data if requested. */ int frv_cache_invalidate_all (FRV_CACHE *cache, int flush) { /* See if this data is already in the cache. */ int elements = cache->sets * cache->ways; FRV_CACHE_TAG *tag = cache->tag_storage; SIM_CPU *cpu; int i; for(i = 0; i < elements; ++i, ++tag) { /* If a flush is requested, then flush it if it is dirty. */ if (tag->valid && tag->dirty && flush) write_line_to_memory (cache, tag); tag->valid = 0; tag->locked = 0; } /* If this is the insn cache, then flush the cpu's scache as well. */ cpu = cache->cpu; if (cache == CPU_INSN_CACHE (cpu)) scache_flush_cpu (cpu); /* Invalidate both return buffers. */ cache->pipeline[LS].status.return_buffer.valid = 0; cache->pipeline[LD].status.return_buffer.valid = 0; return 1; /* TODO - number of cycles unknown */ }
/* Invalidate the cache line containing the given address. Flush the data if requested. Returns the number of cycles required to write the data. */ int frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush) { /* See if this data is already in the cache. */ FRV_CACHE_TAG *tag; int found; /* Check for non-cache access. This operation is still perfromed even if the cache is not currently enabled. */ if (non_cache_access (cache, address)) return 1; /* If the line is found, invalidate it. If a flush is requested, then flush it if it is dirty. */ found = get_tag (cache, address, &tag); if (found) { SIM_CPU *cpu; /* If a flush is requested, then flush it if it is dirty. */ if (tag->dirty && flush) write_line_to_memory (cache, tag); set_least_recently_used (cache, tag); tag->valid = 0; tag->locked = 0; /* If this is the insn cache, then flush the cpu's scache as well. */ cpu = cache->cpu; if (cache == CPU_INSN_CACHE (cpu)) scache_flush_cpu (cpu); } invalidate_return_buffer (cache, address); return 1; /* TODO - number of cycles unknown */ }
/* Determine whether the given address is RAM access, assuming that HSR0.RME is set. */ static int ram_access (FRV_CACHE *cache, USI address) { int ihsr8; int cwe; USI start, end, way_size; SIM_CPU *current_cpu = cache->cpu; SIM_DESC sd = CPU_STATE (current_cpu); switch (STATE_ARCHITECTURE (sd)->mach) { case bfd_mach_fr550: /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */ ihsr8 = GET_IHSR8 (); if (cache == CPU_INSN_CACHE (current_cpu)) { start = 0xfe000000; end = 0xfe008000; cwe = GET_IHSR8_ICWE (ihsr8); } else { start = 0xfe400000; end = 0xfe408000; cwe = GET_IHSR8_DCWE (ihsr8); } way_size = (end - start) / 4; end -= way_size * cwe; return address >= start && address < end; default: break; } return 1; /* RAM access */ }
/* Reset the cache configuration based on registers in the cpu. */ void frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache) { int ihsr8; int icdm; SIM_DESC sd; /* Set defaults for fields which are not initialized. */ sd = CPU_STATE (current_cpu); switch (STATE_ARCHITECTURE (sd)->mach) { case bfd_mach_fr550: if (cache == CPU_INSN_CACHE (current_cpu)) { ihsr8 = GET_IHSR8 (); icdm = GET_IHSR8_ICDM (ihsr8); /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */ if (icdm) { cache->sets = cache->sets * cache->ways; cache->ways = 1; break; } } /* fall through */ default: /* Set the cache to its original settings. */ cache->sets = cache->configured_sets; cache->ways = cache->configured_ways; break; } }
static int syscall_write_mem (host_callback *cb, struct cb_syscall *sc, unsigned long taddr, const char *buf, int bytes) { SIM_DESC sd = (SIM_DESC) sc->p1; SIM_CPU *cpu = (SIM_CPU *) sc->p2; frv_cache_invalidate_all (CPU_INSN_CACHE (cpu), 0); frv_cache_invalidate_all (CPU_DATA_CACHE (cpu), 1); return sim_core_write_buffer (sd, cpu, write_map, buf, taddr, bytes); }
/* Determine whether the given cache is enabled. */ int frv_cache_enabled (FRV_CACHE *cache) { SIM_CPU *current_cpu = cache->cpu; int hsr0 = GET_HSR0 (); if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu)) return 1; if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu)) return 1; return 0; }
/* Perform a hardware reset. */ void frv_hardware_reset (SIM_CPU *cpu) { /* GR, FR and CPR registers are undefined at hardware reset. */ frv_initialize_spr (cpu); /* Reset the RSTR register (in memory). */ if (frv_cache_enabled (CPU_DATA_CACHE (cpu))) frvbf_mem_set_SI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_HARDWARE_RESET); else SETMEMSI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_HARDWARE_RESET); /* Reset the insn and data caches. */ frv_cache_invalidate_all (CPU_INSN_CACHE (cpu), 0/* no flush */); frv_cache_invalidate_all (CPU_DATA_CACHE (cpu), 0/* no flush */); }
void frv_sim_engine_halt_hook (SIM_DESC sd, SIM_CPU *current_cpu, sim_cia cia) { int i; if (current_cpu != NULL) CIA_SET (current_cpu, cia); /* Invalidate the insn and data caches of all cpus. */ for (i = 0; i < MAX_NR_PROCESSORS; ++i) { current_cpu = STATE_CPU (sd, i); frv_cache_invalidate_all (CPU_INSN_CACHE (current_cpu), 0); frv_cache_invalidate_all (CPU_DATA_CACHE (current_cpu), 1); } frv_term (sd); }
USI frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc) { USI hsr0; vpc = check_insn_read_address (current_cpu, vpc, 3); hsr0 = GET_HSR0 (); if (GET_HSR0_ICE (hsr0)) { FRV_CACHE *cache; USI value; /* We don't want this to show up in the cache statistics. That read is done in frvbf_simulate_insn_prefetch. So read the cache or memory passively here. */ cache = CPU_INSN_CACHE (current_cpu); if (frv_cache_read_passive_SI (cache, vpc, &value)) return value; } return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc); }
static SIM_RC frv_option_handler (SIM_DESC sd, sim_cpu *current_cpu, int opt, char *arg, int is_command) { switch (opt) { case 'p' : if (! WITH_PROFILE) sim_io_eprintf (sd, "Profiling not compiled in, `-p' ignored\n"); else { unsigned mask = PROFILE_USEFUL_MASK; if (WITH_PROFILE_CACHE_P) mask |= (1 << PROFILE_CACHE_IDX); if (WITH_PROFILE_PARALLEL_P) mask |= (1 << PROFILE_PARALLEL_IDX); return set_profile_option_mask (sd, "profile", mask, arg); } break; case OPTION_FRV_DATA_CACHE: parse_cache_option (sd, arg, "data_cache", 1/*is_data_cache*/); return SIM_RC_OK; case OPTION_FRV_INSN_CACHE: parse_cache_option (sd, arg, "insn_cache", 0/*is_data_cache*/); return SIM_RC_OK; case OPTION_FRV_PROFILE_CACHE: if (WITH_PROFILE_CACHE_P) return sim_profile_set_option (sd, "-cache", PROFILE_CACHE_IDX, arg); else sim_io_eprintf (sd, "Cache profiling not compiled in, `--profile-cache' ignored\n"); break; case OPTION_FRV_PROFILE_PARALLEL: if (WITH_PROFILE_PARALLEL_P) { unsigned mask = (1 << PROFILE_MODEL_IDX) | (1 << PROFILE_PARALLEL_IDX); return set_profile_option_mask (sd, "-parallel", mask, arg); } else sim_io_eprintf (sd, "Parallel profiling not compiled in, `--profile-parallel' ignored\n"); break; case OPTION_FRV_TIMER: { char *chp = arg; address_word cycles, interrupt; chp = parse_size (chp, &cycles); if (chp == arg) { sim_io_eprintf (sd, "Cycle count required for --timer\n"); return SIM_RC_FAIL; } if (*chp != ',') { sim_io_eprintf (sd, "Interrupt number required for --timer\n"); return SIM_RC_FAIL; } chp = parse_size (chp + 1, &interrupt); if (interrupt < 1 || interrupt > 15) { sim_io_eprintf (sd, "Interrupt number for --timer must be greater than 0 and less that 16\n"); return SIM_RC_FAIL; } frv_interrupt_state.timer.enabled = 1; frv_interrupt_state.timer.value = cycles; frv_interrupt_state.timer.current = 0; frv_interrupt_state.timer.interrupt = FRV_INTERRUPT_LEVEL_1 + interrupt - 1; } return SIM_RC_OK; case OPTION_FRV_MEMORY_LATENCY: { int i; char *chp = arg; address_word cycles; chp = parse_size (chp, &cycles); if (chp == arg) { sim_io_eprintf (sd, "Cycle count required for --memory-latency\n"); return SIM_RC_FAIL; } for (i = 0; i < MAX_NR_PROCESSORS; ++i) { SIM_CPU *current_cpu = STATE_CPU (sd, i); FRV_CACHE *insn_cache = CPU_INSN_CACHE (current_cpu); FRV_CACHE *data_cache = CPU_DATA_CACHE (current_cpu); insn_cache->memory_latency = cycles; data_cache->memory_latency = cycles; } } return SIM_RC_OK; default: sim_io_eprintf (sd, "Unknown FRV option %d\n", opt); return SIM_RC_FAIL; } return SIM_RC_FAIL; }
/* Initialize the frv simulator. */ void frv_initialize (SIM_CPU *current_cpu, SIM_DESC sd) { FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); PROFILE_DATA *p = CPU_PROFILE_DATA (current_cpu); FRV_CACHE *insn_cache = CPU_INSN_CACHE (current_cpu); FRV_CACHE *data_cache = CPU_DATA_CACHE (current_cpu); int insn_cache_enabled = CACHE_INITIALIZED (insn_cache); int data_cache_enabled = CACHE_INITIALIZED (data_cache); USI hsr0; /* Initialize the register control information first since some of the register values are used in further configuration. */ frv_register_control_init (current_cpu); /* We need to ensure that the caches are initialized even if they are not initially enabled (via commandline) because they can be enabled by software. */ if (! insn_cache_enabled) frv_cache_init (current_cpu, CPU_INSN_CACHE (current_cpu)); if (! data_cache_enabled) frv_cache_init (current_cpu, CPU_DATA_CACHE (current_cpu)); /* Set the default cpu frequency if it has not been set on the command line. */ if (PROFILE_CPU_FREQ (p) == 0) PROFILE_CPU_FREQ (p) = 266000000; /* 266MHz */ /* Allocate one cache line of memory containing the address of the reset register Use the largest of the insn cache line size and the data cache line size. */ { int addr = RSTR_ADDRESS; void *aligned_buffer; int bytes; if (CPU_INSN_CACHE (current_cpu)->line_size > CPU_DATA_CACHE (current_cpu)->line_size) bytes = CPU_INSN_CACHE (current_cpu)->line_size; else bytes = CPU_DATA_CACHE (current_cpu)->line_size; /* 'bytes' is a power of 2. Calculate the starting address of the cache line. */ addr &= ~(bytes - 1); aligned_buffer = zalloc (bytes); /* clear */ sim_core_attach (sd, NULL, 0, access_read_write, 0, addr, bytes, 0, NULL, aligned_buffer); } PROFILE_INFO_CPU_CALLBACK(p) = frv_profile_info; ps->insn_fetch_address = -1; ps->branch_address = -1; cgen_init_accurate_fpu (current_cpu, CGEN_CPU_FPU (current_cpu), frvbf_fpu_error); /* Now perform power-on reset. */ frv_power_on_reset (current_cpu); /* Make sure that HSR0.ICE and HSR0.DCE are set properly. */ hsr0 = GET_HSR0 (); if (insn_cache_enabled) SET_HSR0_ICE (hsr0); else CLEAR_HSR0_ICE (hsr0); if (data_cache_enabled) SET_HSR0_DCE (hsr0); else CLEAR_HSR0_DCE (hsr0); SET_HSR0 (hsr0); }