Пример #1
0
/* Invalidate the entire cache. Flush the data if requested.  */
int
frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
{
  /* See if this data is already in the cache.  */
  int elements = cache->sets * cache->ways;
  FRV_CACHE_TAG *tag = cache->tag_storage;
  SIM_CPU *cpu;
  int i;

  for(i = 0; i < elements; ++i, ++tag)
    {
      /* If a flush is requested, then flush it if it is dirty.  */
      if (tag->valid && tag->dirty && flush)
	write_line_to_memory (cache, tag);
      tag->valid = 0;
      tag->locked = 0;
    }


  /* If this is the insn cache, then flush the cpu's scache as well.  */
  cpu = cache->cpu;
  if (cache == CPU_INSN_CACHE (cpu))
    scache_flush_cpu (cpu);

  /* Invalidate both return buffers.  */
  cache->pipeline[LS].status.return_buffer.valid = 0;
  cache->pipeline[LD].status.return_buffer.valid = 0;

  return 1; /* TODO - number of cycles unknown */
}
Пример #2
0
/* Invalidate the cache line containing the given address. Flush the
   data if requested.
   Returns the number of cycles required to write the data.  */
int
frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
{
  /* See if this data is already in the cache.  */
  FRV_CACHE_TAG *tag;
  int found;

  /* Check for non-cache access.  This operation is still perfromed even if
     the cache is not currently enabled.  */
  if (non_cache_access (cache, address))
    return 1;

  /* If the line is found, invalidate it. If a flush is requested, then flush
     it if it is dirty.  */
  found = get_tag (cache, address, &tag);
  if (found)
    {
      SIM_CPU *cpu;
      /* If a flush is requested, then flush it if it is dirty.  */
      if (tag->dirty && flush)
	write_line_to_memory (cache, tag);
      set_least_recently_used (cache, tag);
      tag->valid = 0;
      tag->locked = 0;

      /* If this is the insn cache, then flush the cpu's scache as well.  */
      cpu = cache->cpu;
      if (cache == CPU_INSN_CACHE (cpu))
	scache_flush_cpu (cpu);
    }

  invalidate_return_buffer (cache, address);

  return 1; /* TODO - number of cycles unknown */
}
Пример #3
0
void
scache_flush (SIM_DESC sd)
{
  int c;

  for (c = 0; c < MAX_NR_PROCESSORS; ++c)
    {
      SIM_CPU *cpu = STATE_CPU (sd, c);
      scache_flush_cpu (cpu);
    }
}
Пример #4
0
SCACHE *
scache_lookup_or_alloc (SIM_CPU *cpu, IADDR pc, int n, SCACHE **bufp)
{
  /* FIXME: hash computation is wrong, doesn't take into account
     NUM_HASH_CHAIN_ENTRIES.  A lot of the hash table will be unused!  */
  unsigned int slot = HASH_PC (pc) & (CPU_SCACHE_NUM_HASH_CHAINS (cpu) - 1);
  int i, max_i = CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu);
  SCACHE_MAP *scm;
  SCACHE *sc;

  scm = & CPU_SCACHE_HASH_TABLE (cpu) [slot];
  for (i = 0; i < max_i && scm->pc != UNUSED_ADDR; ++i, ++scm)
    {
      if (scm->pc == pc)
	{
	  PROFILE_COUNT_SCACHE_HIT (cpu);
	  return scm->sc;
	}
    }
  PROFILE_COUNT_SCACHE_MISS (cpu);

  /* The address we want isn't cached.  Bummer.
     If the hash chain we have for this address is full, throw out an entry
     to make room.  */

  if (i == max_i)
    {
      /* Rather than do something sophisticated like LRU, we just throw out
	 a semi-random entry.  Let someone else have the joy of saying how
	 wrong this is.  NEXT_FREE is the entry to throw out and cycles
	 through all possibilities.  */
      static int next_free = 0;

      scm = & CPU_SCACHE_HASH_TABLE (cpu) [slot];
      /* FIXME: This seems rather clumsy.  */
      for (i = 0; i < next_free; ++i, ++scm)
	continue;
      ++next_free;
      if (next_free == CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu))
	next_free = 0;
    }

  /* At this point SCM points to the hash table entry to use.
     Now make sure there's room in the cache.  */
  /* FIXME: Kinda weird to use a next_free adjusted scm when cache is
     flushed.  */

  {
    int elm_size = IMP_PROPS_SCACHE_ELM_SIZE (MACH_IMP_PROPS (CPU_MACH (cpu)));
    int elms_used = (((char *) CPU_SCACHE_NEXT_FREE (cpu)
		      - (char *) CPU_SCACHE_CACHE (cpu))
		     / elm_size);
    int elms_left = CPU_SCACHE_SIZE (cpu) - elms_used;

    if (elms_left < n)
      {
	PROFILE_COUNT_SCACHE_FULL_FLUSH (cpu);
	scache_flush_cpu (cpu);
      }
  }

  sc = CPU_SCACHE_NEXT_FREE (cpu);
  scm->pc = pc;
  scm->sc = sc;

  *bufp = sc;
  return NULL;
}