static void handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) { FRV_CACHE_TAG *tag; SI address = req->address; /* If this address interferes with an existing request, then requeue it. */ if (address_interference (cache, address, req, pipe)) { pipeline_requeue_request (& cache->pipeline[pipe]); return; } if (frv_cache_enabled (cache) && ! non_cache_access (cache, address)) { int found = get_tag (cache, address, &tag); /* If the data was found, return it to the caller. */ if (found) { set_most_recently_used (cache, tag); copy_line_to_return_buffer (cache, pipe, tag, address); set_return_buffer_reqno (cache, pipe, req->reqno); return; } } /* The data is not in the cache or this is a non-cache access. We need to wait for the memory unit to fetch it. Store this request in the WAR in the meantime. */ wait_in_WAR (cache, pipe, req); }
/* Perform a software reset. */ void frv_software_reset (SIM_CPU *cpu) { /* GR, FR and CPR registers are undefined at software reset. */ frv_reset_spr (cpu); /* Reset the RSTR register (in memory). */ if (frv_cache_enabled (CPU_DATA_CACHE (cpu))) frvbf_mem_set_SI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_SOFTWARE_RESET); else SETMEMSI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_SOFTWARE_RESET); }
/* Perform a power on reset. */ void frv_power_on_reset (SIM_CPU *cpu) { /* GR, FR and CPR registers are undefined at initialization time. */ frv_initialize_spr (cpu); /* Initialize the RSTR register (in memory). */ if (frv_cache_enabled (CPU_DATA_CACHE (cpu))) frvbf_mem_set_SI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_INITIAL_VALUE); else SETMEMSI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_INITIAL_VALUE); }
/* Perform a hardware reset. */ void frv_hardware_reset (SIM_CPU *cpu) { /* GR, FR and CPR registers are undefined at hardware reset. */ frv_initialize_spr (cpu); /* Reset the RSTR register (in memory). */ if (frv_cache_enabled (CPU_DATA_CACHE (cpu))) frvbf_mem_set_SI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_HARDWARE_RESET); else SETMEMSI (cpu, CPU_PC_GET (cpu), RSTR_ADDRESS, RSTR_HARDWARE_RESET); /* Reset the insn and data caches. */ frv_cache_invalidate_all (CPU_INSN_CACHE (cpu), 0/* no flush */); frv_cache_invalidate_all (CPU_DATA_CACHE (cpu), 0/* no flush */); }
static void handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) { char *buffer; FRV_CACHE_TAG *tag; SI address = req->address; if (frv_cache_enabled (cache) && ! non_cache_access (cache, address)) { /* Look for the data in the cache. The statistics of cache hit or miss have already been recorded, so save and restore the stats before and after obtaining the cache line. */ FRV_CACHE_STATISTICS save_stats = cache->statistics; tag = find_or_retrieve_cache_line (cache, address); cache->statistics = save_stats; if (tag != NULL) { if (! req->u.WAR.preload) { copy_line_to_return_buffer (cache, pipe, tag, address); set_return_buffer_reqno (cache, pipe, req->reqno); } else { invalidate_return_buffer (cache, address); if (req->u.WAR.lock) tag->locked = 1; } return; } } /* All cache lines in the set were locked, so just copy the data to the return buffer directly. */ if (! req->u.WAR.preload) { copy_memory_to_return_buffer (cache, pipe, address); set_return_buffer_reqno (cache, pipe, req->reqno); } }
static void handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) { SIM_CPU *current_cpu; FRV_CACHE_TAG *tag; int found; int copy_back; SI address = req->address; char *data = req->u.store.data; int length = req->u.store.length; /* If this address interferes with an existing request, then requeue it. */ if (address_interference (cache, address, req, pipe)) { pipeline_requeue_request (& cache->pipeline[pipe]); return; } /* Non-cache access. Write the data directly to memory. */ if (! frv_cache_enabled (cache) || non_cache_access (cache, address)) { write_data_to_memory (cache, address, data, length); return; } /* See if the data is in the cache. */ found = get_tag (cache, address, &tag); /* Write the data to the cache line if one was available and if it is either a hit or a miss in copy-back mode. The tag may be NULL if all ways were in use and locked on a miss. */ current_cpu = cache->cpu; copy_back = GET_HSR0_CBM (GET_HSR0 ()); if (tag != NULL && (found || copy_back)) { int line_offset; /* Load the line from memory first, if it was a miss. */ if (! found) { /* We need to wait for the memory unit to fetch the data. Store this request in the WAR and requeue the store request. */ wait_in_WAR (cache, pipe, req); pipeline_requeue_request (& cache->pipeline[pipe]); /* Decrement the counts of accesses and hits because when the requeued request is processed again, it will appear to be a new access and a hit. */ --cache->statistics.accesses; --cache->statistics.hits; return; } line_offset = address & (cache->line_size - 1); memcpy (tag->line + line_offset, data, length); invalidate_return_buffer (cache, address); tag->dirty = 1; /* Update the LRU information for the tags in this set. */ set_most_recently_used (cache, tag); } /* Write the data to memory if there was no line available or we are in write-through (not copy-back mode). */ if (tag == NULL || ! copy_back) { write_data_to_memory (cache, address, data, length); if (tag != NULL) tag->dirty = 0; } }
static void handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req) { int found; FRV_CACHE_WAR war; FRV_CACHE_TAG *tag; int length; int lock; int offset; int lines; int line; SI address = req->address; SI cur_address; if (! frv_cache_enabled (cache) || non_cache_access (cache, address)) return; /* preload at least 1 line. */ length = req->u.preload.length; if (length == 0) length = 1; /* Make sure that this request does not interfere with a pending request. */ offset = address & (cache->line_size - 1); lines = 1 + (offset + length - 1) / cache->line_size; cur_address = address & ~(cache->line_size - 1); for (line = 0; line < lines; ++line) { /* If this address interferes with an existing request, then requeue it. */ if (address_interference (cache, cur_address, req, pipe)) { pipeline_requeue_request (& cache->pipeline[pipe]); return; } cur_address += cache->line_size; } /* Now process each cache line. */ /* Careful with this loop -- length is unsigned. */ lock = req->u.preload.lock; cur_address = address & ~(cache->line_size - 1); for (line = 0; line < lines; ++line) { /* If the data was found, then lock it if requested. */ found = get_tag (cache, cur_address, &tag); if (found) { if (lock) tag->locked = 1; } else { /* The data is not in the cache. We need to wait for the memory unit to fetch it. Store this request in the WAR in the meantime. */ wait_in_WAR (cache, pipe, req); } cur_address += cache->line_size; } }