/* Writes data through the given cache. The data is assumed to be in target endian order. Returns the number of cycles required to write the data. */ int frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length) { int copy_back; /* See if this data is already in the cache. */ SIM_CPU *current_cpu = cache->cpu; USI hsr0 = GET_HSR0 (); FRV_CACHE_TAG *tag; int found; if (non_cache_access (cache, address)) { write_data_to_memory (cache, address, data, length); return 1; } found = get_tag (cache, address, &tag); /* Write the data to the cache line if one was available and if it is either a hit or a miss in copy-back mode. The tag may be NULL if all ways were in use and locked on a miss. */ copy_back = GET_HSR0_CBM (GET_HSR0 ()); if (tag != NULL && (found || copy_back)) { int line_offset; /* Load the line from memory first, if it was a miss. */ if (! found) fill_line_from_memory (cache, tag, address); line_offset = address & (cache->line_size - 1); memcpy (tag->line + line_offset, data, length); tag->dirty = 1; /* Update the LRU information for the tags in this set. */ set_most_recently_used (cache, tag); } /* Write the data to memory if there was no line available or we are in write-through (not copy-back mode). */ if (tag == NULL || ! copy_back) { write_data_to_memory (cache, address, data, length); if (tag != NULL) tag->dirty = 0; } return 1; /* TODO - number of cycles unknown */ }
UQI frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address) { USI hsr0 = GET_HSR0 (); FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu); /* Check for access exceptions. */ address = check_data_read_address (current_cpu, address, 0); address = check_readwrite_address (current_cpu, address, 0); /* If we need to count cycles, then the cache operation will be initiated from the model profiling functions. See frvbf_model_.... */ if (model_insn) { CPU_LOAD_ADDRESS (current_cpu) = address; CPU_LOAD_LENGTH (current_cpu) = 1; CPU_LOAD_SIGNED (current_cpu) = 0; return 0xb7; /* any random value */ } if (GET_HSR0_DCE (hsr0)) { int cycles; cycles = frv_cache_read (cache, 0, address); if (cycles != 0) return CACHE_RETURN_DATA (cache, 0, address, UQI, 1); } return GETMEMUQI (current_cpu, pc, address); }
static PCADDR fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask) { if (address & align_mask) { frv_queue_mem_address_not_aligned_interrupt (current_cpu, address); address &= ~align_mask; } if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff) frv_queue_instruction_access_error_interrupt (current_cpu); else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff) frv_queue_instruction_access_exception_interrupt (current_cpu); else { USI hsr0 = GET_HSR0 (); if (! GET_HSR0_RME (hsr0) && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff) frv_queue_instruction_access_exception_interrupt (current_cpu); } return address; }
/* Fill the given cache line from memory. */ static void fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address) { PCADDR pc; int line_alignment; SI read_address; SIM_CPU *current_cpu = cache->cpu; /* If this line is already valid and the cache is in copy-back mode, then write this line to memory before refilling it. Check the dirty bit first, since it is less likely to be set. */ if (tag->dirty && tag->valid) { int hsr0 = GET_HSR0 (); if (GET_HSR0_CBM (hsr0)) write_line_to_memory (cache, tag); } else if (tag->line == NULL) { int line_index = tag - cache->tag_storage; tag->line = cache->data_storage + (line_index * cache->line_size); } pc = CPU_PC_GET (current_cpu); line_alignment = cache->line_size - 1; read_address = address & ~line_alignment; read_data_from_memory (current_cpu, read_address, tag->line, cache->line_size); tag->tag = CACHE_ADDRESS_TAG (cache, address); tag->valid = 1; }
void frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value) { USI hsr0; hsr0 = GET_HSR0 (); if (GET_HSR0_DCE (hsr0)) sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value); else sim_queue_mem_df_write (current_cpu, address, value); frv_set_write_queue_slot (current_cpu); }
/* Determine whether the given cache is enabled. */ int frv_cache_enabled (FRV_CACHE *cache) { SIM_CPU *current_cpu = cache->cpu; int hsr0 = GET_HSR0 (); if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu)) return 1; if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu)) return 1; return 0; }
/* Determine whether the given address should be accessed without using the cache. */ static int non_cache_access (FRV_CACHE *cache, USI address) { int hsr0; SIM_DESC sd; SIM_CPU *current_cpu = cache->cpu; sd = CPU_STATE (current_cpu); switch (STATE_ARCHITECTURE (sd)->mach) { case bfd_mach_fr400: case bfd_mach_fr450: if (address >= 0xff000000 || address >= 0xfe000000 && address <= 0xfeffffff) return 1; /* non-cache access */ break; case bfd_mach_fr550: if (address >= 0xff000000 || address >= 0xfeff0000 && address <= 0xfeffffff) return 1; /* non-cache access */ if (cache == CPU_INSN_CACHE (current_cpu)) { if (address >= 0xfe000000 && address <= 0xfe007fff) return 1; /* non-cache access */ } else if (address >= 0xfe400000 && address <= 0xfe407fff) return 1; /* non-cache access */ break; default: if (address >= 0xff000000 || address >= 0xfeff0000 && address <= 0xfeffffff) return 1; /* non-cache access */ if (cache == CPU_INSN_CACHE (current_cpu)) { if (address >= 0xfe000000 && address <= 0xfe003fff) return 1; /* non-cache access */ } else if (address >= 0xfe400000 && address <= 0xfe403fff) return 1; /* non-cache access */ break; } hsr0 = GET_HSR0 (); if (GET_HSR0_RME (hsr0)) return ram_access (cache, address); return 0; /* cache-access */ }
static SI fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask) { /* No alignment restrictions on fr550 */ if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff || (USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff) frv_queue_data_access_exception_interrupt (current_cpu); else { USI hsr0 = GET_HSR0 (); if (! GET_HSR0_RME (hsr0) && (USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff) frv_queue_data_access_exception_interrupt (current_cpu); } return address; }
static PCADDR fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask) { address &= ~align_mask; if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff) frv_queue_instruction_access_error_interrupt (current_cpu); else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff) frv_queue_instruction_access_exception_interrupt (current_cpu); else { USI hsr0 = GET_HSR0 (); if (! GET_HSR0_RME (hsr0) && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff) frv_queue_instruction_access_exception_interrupt (current_cpu); } return address; }
USI frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc) { USI hsr0; vpc = check_insn_read_address (current_cpu, vpc, 3); hsr0 = GET_HSR0 (); if (GET_HSR0_ICE (hsr0)) { FRV_CACHE *cache; USI value; /* We don't want this to show up in the cache statistics. That read is done in frvbf_simulate_insn_prefetch. So read the cache or memory passively here. */ cache = CPU_INSN_CACHE (current_cpu); if (frv_cache_read_passive_SI (cache, vpc, &value)) return value; } return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc); }
UHI frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address) { USI hsr0; FRV_CACHE *cache; /* Check for access exceptions. */ address = check_data_read_address (current_cpu, address, 1); address = check_readwrite_address (current_cpu, address, 1); /* If we need to count cycles, then the cache operation will be initiated from the model profiling functions. See frvbf_model_.... */ hsr0 = GET_HSR0 (); cache = CPU_DATA_CACHE (current_cpu); if (model_insn) { CPU_LOAD_ADDRESS (current_cpu) = address; CPU_LOAD_LENGTH (current_cpu) = 2; CPU_LOAD_SIGNED (current_cpu) = 0; return 0xb711; /* any random value */ } if (GET_HSR0_DCE (hsr0)) { int cycles; /* Handle access which crosses cache line boundary */ SIM_DESC sd = CPU_STATE (current_cpu); if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) { if (DATA_CROSSES_CACHE_LINE (cache, address, 2)) return read_mem_unaligned_HI (current_cpu, pc, address); } cycles = frv_cache_read (cache, 0, address); if (cycles != 0) return CACHE_RETURN_DATA (cache, 0, address, UHI, 2); } return GETMEMUHI (current_cpu, pc, address); }
/* Check to see the if the RSTR.HR or RSTR.SR bits have been set. If so, handle the appropriate reset interrupt. */ static int check_reset (SIM_CPU *current_cpu, IADDR pc) { int hsr0; int hr; int sr; SI rstr; FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu); IADDR address = RSTR_ADDRESS; /* We don't want this to show up in the cache statistics, so read the cache passively. */ if (! frv_cache_read_passive_SI (cache, address, & rstr)) rstr = sim_core_read_unaligned_4 (current_cpu, pc, read_map, address); hr = GET_RSTR_HR (rstr); sr = GET_RSTR_SR (rstr); if (! hr && ! sr) return 0; /* no reset. */ /* Reinitialize the machine state. */ if (hr) frv_hardware_reset (current_cpu); else frv_software_reset (current_cpu); /* Branch to the reset address. */ hsr0 = GET_HSR0 (); if (GET_HSR0_SA (hsr0)) SET_H_PC (0xff000000); else SET_H_PC (0); return 1; /* reset */ }
static void handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) { SIM_CPU *current_cpu; FRV_CACHE_TAG *tag; int found; int copy_back; SI address = req->address; char *data = req->u.store.data; int length = req->u.store.length; /* If this address interferes with an existing request, then requeue it. */ if (address_interference (cache, address, req, pipe)) { pipeline_requeue_request (& cache->pipeline[pipe]); return; } /* Non-cache access. Write the data directly to memory. */ if (! frv_cache_enabled (cache) || non_cache_access (cache, address)) { write_data_to_memory (cache, address, data, length); return; } /* See if the data is in the cache. */ found = get_tag (cache, address, &tag); /* Write the data to the cache line if one was available and if it is either a hit or a miss in copy-back mode. The tag may be NULL if all ways were in use and locked on a miss. */ current_cpu = cache->cpu; copy_back = GET_HSR0_CBM (GET_HSR0 ()); if (tag != NULL && (found || copy_back)) { int line_offset; /* Load the line from memory first, if it was a miss. */ if (! found) { /* We need to wait for the memory unit to fetch the data. Store this request in the WAR and requeue the store request. */ wait_in_WAR (cache, pipe, req); pipeline_requeue_request (& cache->pipeline[pipe]); /* Decrement the counts of accesses and hits because when the requeued request is processed again, it will appear to be a new access and a hit. */ --cache->statistics.accesses; --cache->statistics.hits; return; } line_offset = address & (cache->line_size - 1); memcpy (tag->line + line_offset, data, length); invalidate_return_buffer (cache, address); tag->dirty = 1; /* Update the LRU information for the tags in this set. */ set_most_recently_used (cache, tag); } /* Write the data to memory if there was no line available or we are in write-through (not copy-back mode). */ if (tag == NULL || ! copy_back) { write_data_to_memory (cache, address, data, length); if (tag != NULL) tag->dirty = 0; } }
/* Initialize the frv simulator. */ void frv_initialize (SIM_CPU *current_cpu, SIM_DESC sd) { FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu); PROFILE_DATA *p = CPU_PROFILE_DATA (current_cpu); FRV_CACHE *insn_cache = CPU_INSN_CACHE (current_cpu); FRV_CACHE *data_cache = CPU_DATA_CACHE (current_cpu); int insn_cache_enabled = CACHE_INITIALIZED (insn_cache); int data_cache_enabled = CACHE_INITIALIZED (data_cache); USI hsr0; /* Initialize the register control information first since some of the register values are used in further configuration. */ frv_register_control_init (current_cpu); /* We need to ensure that the caches are initialized even if they are not initially enabled (via commandline) because they can be enabled by software. */ if (! insn_cache_enabled) frv_cache_init (current_cpu, CPU_INSN_CACHE (current_cpu)); if (! data_cache_enabled) frv_cache_init (current_cpu, CPU_DATA_CACHE (current_cpu)); /* Set the default cpu frequency if it has not been set on the command line. */ if (PROFILE_CPU_FREQ (p) == 0) PROFILE_CPU_FREQ (p) = 266000000; /* 266MHz */ /* Allocate one cache line of memory containing the address of the reset register Use the largest of the insn cache line size and the data cache line size. */ { int addr = RSTR_ADDRESS; void *aligned_buffer; int bytes; if (CPU_INSN_CACHE (current_cpu)->line_size > CPU_DATA_CACHE (current_cpu)->line_size) bytes = CPU_INSN_CACHE (current_cpu)->line_size; else bytes = CPU_DATA_CACHE (current_cpu)->line_size; /* 'bytes' is a power of 2. Calculate the starting address of the cache line. */ addr &= ~(bytes - 1); aligned_buffer = zalloc (bytes); /* clear */ sim_core_attach (sd, NULL, 0, access_read_write, 0, addr, bytes, 0, NULL, aligned_buffer); } PROFILE_INFO_CPU_CALLBACK(p) = frv_profile_info; ps->insn_fetch_address = -1; ps->branch_address = -1; cgen_init_accurate_fpu (current_cpu, CGEN_CPU_FPU (current_cpu), frvbf_fpu_error); /* Now perform power-on reset. */ frv_power_on_reset (current_cpu); /* Make sure that HSR0.ICE and HSR0.DCE are set properly. */ hsr0 = GET_HSR0 (); if (insn_cache_enabled) SET_HSR0_ICE (hsr0); else CLEAR_HSR0_ICE (hsr0); if (data_cache_enabled) SET_HSR0_DCE (hsr0); else CLEAR_HSR0_DCE (hsr0); SET_HSR0 (hsr0); }