Exemplo n.º 1
0
static SIM_RC
sim_model_init (SIM_DESC sd)
{
  SIM_CPU *cpu;

  /* If both cpu model and state architecture are set, ensure they're
     compatible.  If only one is set, set the other.  If neither are set,
     use the default model.  STATE_ARCHITECTURE is the bfd_arch_info data
     for the selected "mach" (bfd terminology).  */

  /* Only check cpu 0.  STATE_ARCHITECTURE is for that one only.  */
  /* ??? At present this only supports homogeneous multiprocessors.  */
  cpu = STATE_CPU (sd, 0);

  if (! STATE_ARCHITECTURE (sd)
      && ! CPU_MACH (cpu))
    {
      /* Set the default model.  */
      const MODEL *model = sim_model_lookup (WITH_DEFAULT_MODEL);
      sim_model_set (sd, NULL, model);
    }

  if (STATE_ARCHITECTURE (sd)
      && CPU_MACH (cpu))
    {
      if (strcmp (STATE_ARCHITECTURE (sd)->printable_name,
		  MACH_BFD_NAME (CPU_MACH (cpu))) != 0)
	{
	  sim_io_eprintf (sd, "invalid model `%s' for `%s'\n",
			  MODEL_NAME (CPU_MODEL (cpu)),
			  STATE_ARCHITECTURE (sd)->printable_name);
	  return SIM_RC_FAIL;
	}
    }
  else if (STATE_ARCHITECTURE (sd))
    {
      /* Use the default model for the selected machine.
	 The default model is the first one in the list.  */
      const MACH *mach = sim_mach_lookup_bfd_name (STATE_ARCHITECTURE (sd)->printable_name);

      if (mach == NULL)
	{
	  sim_io_eprintf (sd, "unsupported machine `%s'\n",
			  STATE_ARCHITECTURE (sd)->printable_name);
	  return SIM_RC_FAIL;
	}
      sim_model_set (sd, NULL, MACH_MODELS (mach));
    }
  else
    {
      STATE_ARCHITECTURE (sd) = bfd_scan_arch (MACH_BFD_NAME (CPU_MACH (cpu)));
    }

  return SIM_RC_OK;
}
Exemplo n.º 2
0
static void
prime_cpu (SIM_CPU *cpu, int max_insns)
{
  CPU_MAX_SLICE_INSNS (cpu) = max_insns;
  CPU_INSN_COUNT (cpu) = 0;

  /* Initialize the insn descriptor table.
     This has to be done after all initialization so we just defer it to
     here.  */

  if (MACH_PREPARE_RUN (CPU_MACH (cpu)))
    (* MACH_PREPARE_RUN (CPU_MACH (cpu))) (cpu);
}
Exemplo n.º 3
0
void
scache_flush_cpu (SIM_CPU *cpu)
{
  int i,n;

  /* Don't bother if cache not in use.  */
  if (CPU_SCACHE_SIZE (cpu) == 0)
    return;

#if WITH_SCACHE_PBB
  /* It's important that this be reasonably fast as this can be done when
     the simulation is running.  */
  CPU_SCACHE_NEXT_FREE (cpu) = CPU_SCACHE_CACHE (cpu);
  n = CPU_SCACHE_NUM_HASH_CHAINS (cpu) * CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu);
  /* ??? Might be faster to just set the first entry, then update the
     "last entry" marker during allocation.  */
  for (i = 0; i < n; ++i)
    CPU_SCACHE_HASH_TABLE (cpu) [i] . pc = UNUSED_ADDR;
#else
  {
    int elm_size = IMP_PROPS_SCACHE_ELM_SIZE (MACH_IMP_PROPS (CPU_MACH (cpu)));
    SCACHE *sc;

    /* Technically, this may not be necessary, but it helps debugging.  */
    memset (CPU_SCACHE_CACHE (cpu), 0,
	    CPU_SCACHE_SIZE (cpu) * elm_size);

    for (i = 0, sc = CPU_SCACHE_CACHE (cpu); i < CPU_SCACHE_SIZE (cpu);
	 ++i, sc = (SCACHE *) ((char *) sc + elm_size))
      {
	sc->argbuf.addr = UNUSED_ADDR;
      }
  }
#endif
}
Exemplo n.º 4
0
static void
model_set (sim_cpu *cpu, const MODEL *model)
{
  CPU_MACH (cpu) = MODEL_MACH (model);
  CPU_MODEL (cpu) = model;
  (* MACH_INIT_CPU (MODEL_MACH (model))) (cpu);
  (* MODEL_INIT (model)) (cpu);
}
Exemplo n.º 5
0
void
m32r_core_signal (SIM_DESC sd, SIM_CPU *current_cpu, sim_cia cia,
		  unsigned int map, int nr_bytes, address_word addr,
		  transfer_type transfer, sim_core_signals sig)
{
  if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT)
    {
      m32rbf_h_cr_set (current_cpu, H_CR_BBPC,
                       m32rbf_h_cr_get (current_cpu, H_CR_BPC));
      if (MACH_NUM (CPU_MACH (current_cpu)) == MACH_M32R)
        {
          m32rbf_h_bpsw_set (current_cpu, m32rbf_h_psw_get (current_cpu));
          /* sm not changed */
          m32rbf_h_psw_set (current_cpu, m32rbf_h_psw_get (current_cpu) & 0x80);
        }
      else if (MACH_NUM (CPU_MACH (current_cpu)) == MACH_M32RX)
        {
          m32rxf_h_bpsw_set (current_cpu, m32rxf_h_psw_get (current_cpu));
          /* sm not changed */
          m32rxf_h_psw_set (current_cpu, m32rxf_h_psw_get (current_cpu) & 0x80);
        }
      else
        {
          m32r2f_h_bpsw_set (current_cpu, m32r2f_h_psw_get (current_cpu));
          /* sm not changed */
          m32r2f_h_psw_set (current_cpu, m32r2f_h_psw_get (current_cpu) & 0x80);
        }
      m32rbf_h_cr_set (current_cpu, H_CR_BPC, cia);

      sim_engine_restart (CPU_STATE (current_cpu), current_cpu, NULL,
                          EIT_ADDR_EXCP_ADDR);
    }
  else
    sim_core_signal (sd, current_cpu, cia, map, nr_bytes, addr,
                     transfer, sig);
}
Exemplo n.º 6
0
static SIM_RC
scache_init (SIM_DESC sd)
{
  int c;

  for (c = 0; c < MAX_NR_PROCESSORS; ++c)
    {
      SIM_CPU *cpu = STATE_CPU (sd, c);
      int elm_size = IMP_PROPS_SCACHE_ELM_SIZE (MACH_IMP_PROPS (CPU_MACH (cpu)));

      /* elm_size is 0 if the cpu doesn't not have scache support */
      if (elm_size == 0)
	{
	  CPU_SCACHE_SIZE (cpu) = 0;
	  CPU_SCACHE_CACHE (cpu) = NULL;
	}
      else
	{
	  if (CPU_SCACHE_SIZE (cpu) == 0)
	    CPU_SCACHE_SIZE (cpu) = STATE_SCACHE_SIZE (sd);
	  CPU_SCACHE_CACHE (cpu) =
	    (SCACHE *) xmalloc (CPU_SCACHE_SIZE (cpu) * elm_size);
#if WITH_SCACHE_PBB
	  CPU_SCACHE_MAX_CHAIN_LENGTH (cpu) = MAX_CHAIN_LENGTH;
	  CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu) = MAX_HASH_CHAIN_LENGTH;
	  CPU_SCACHE_NUM_HASH_CHAINS (cpu) = max (MIN_HASH_CHAINS,
						  CPU_SCACHE_SIZE (cpu)
						  / SCACHE_HASH_RATIO);
	  CPU_SCACHE_HASH_TABLE (cpu) =
	    (SCACHE_MAP *) xmalloc (CPU_SCACHE_NUM_HASH_CHAINS (cpu)
				    * CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu)
				    * sizeof (SCACHE_MAP));
	  CPU_SCACHE_PBB_BEGIN (cpu) = (SCACHE *) zalloc (elm_size);
	  CPU_SCACHE_CHAIN_LENGTHS (cpu) =
	    (unsigned long *) zalloc ((CPU_SCACHE_MAX_CHAIN_LENGTH (cpu) + 1)
				      * sizeof (long));
#endif
	}
    }

  scache_flush (sd);

  return SIM_RC_OK;
}
Exemplo n.º 7
0
SCACHE *
scache_lookup_or_alloc (SIM_CPU *cpu, IADDR pc, int n, SCACHE **bufp)
{
  /* FIXME: hash computation is wrong, doesn't take into account
     NUM_HASH_CHAIN_ENTRIES.  A lot of the hash table will be unused!  */
  unsigned int slot = HASH_PC (pc) & (CPU_SCACHE_NUM_HASH_CHAINS (cpu) - 1);
  int i, max_i = CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu);
  SCACHE_MAP *scm;
  SCACHE *sc;

  scm = & CPU_SCACHE_HASH_TABLE (cpu) [slot];
  for (i = 0; i < max_i && scm->pc != UNUSED_ADDR; ++i, ++scm)
    {
      if (scm->pc == pc)
	{
	  PROFILE_COUNT_SCACHE_HIT (cpu);
	  return scm->sc;
	}
    }
  PROFILE_COUNT_SCACHE_MISS (cpu);

  /* The address we want isn't cached.  Bummer.
     If the hash chain we have for this address is full, throw out an entry
     to make room.  */

  if (i == max_i)
    {
      /* Rather than do something sophisticated like LRU, we just throw out
	 a semi-random entry.  Let someone else have the joy of saying how
	 wrong this is.  NEXT_FREE is the entry to throw out and cycles
	 through all possibilities.  */
      static int next_free = 0;

      scm = & CPU_SCACHE_HASH_TABLE (cpu) [slot];
      /* FIXME: This seems rather clumsy.  */
      for (i = 0; i < next_free; ++i, ++scm)
	continue;
      ++next_free;
      if (next_free == CPU_SCACHE_NUM_HASH_CHAIN_ENTRIES (cpu))
	next_free = 0;
    }

  /* At this point SCM points to the hash table entry to use.
     Now make sure there's room in the cache.  */
  /* FIXME: Kinda weird to use a next_free adjusted scm when cache is
     flushed.  */

  {
    int elm_size = IMP_PROPS_SCACHE_ELM_SIZE (MACH_IMP_PROPS (CPU_MACH (cpu)));
    int elms_used = (((char *) CPU_SCACHE_NEXT_FREE (cpu)
		      - (char *) CPU_SCACHE_CACHE (cpu))
		     / elm_size);
    int elms_left = CPU_SCACHE_SIZE (cpu) - elms_used;

    if (elms_left < n)
      {
	PROFILE_COUNT_SCACHE_FULL_FLUSH (cpu);
	scache_flush_cpu (cpu);
      }
  }

  sc = CPU_SCACHE_NEXT_FREE (cpu);
  scm->pc = pc;
  scm->sc = sc;

  *bufp = sc;
  return NULL;
}