Exemple #1
0
static bool ppc_exc_create_branch_op(
  unsigned vector,
  uint32_t *prologue,
  size_t prologue_size
)
{
  static const uintptr_t BRANCH_OP_CODE = 18 << 26;
/*  static const uintptr_t BRANCH_OP_LINK = 0x1; */
  static const uintptr_t BRANCH_OP_ABS = 0x2;
  static const uintptr_t BRANCH_OP_MSK = 0x3ffffff;
  size_t branch_op_index = prologue_size / 4 - 1;
  uintptr_t vector_address = (uintptr_t) ppc_exc_vector_address(vector);
  uintptr_t branch_op_address = vector_address + 4 * branch_op_index;

  /* This value may have BRANCH_OP_LINK set */
  uintptr_t target_address = prologue [branch_op_index];

  uintptr_t branch_target_address = target_address - branch_op_address;

  /*
   * We prefer to use a relative branch.  This has the benefit that custom
   * minimal prologues in a read-only area are relocatable.
   */
  if ((branch_target_address & ~BRANCH_OP_MSK) != 0) {
    /* Target to far for relative branch (PC ± 32M) */
    if (target_address >= 0xfe000001 || target_address < 0x01fffffd) {
      /* Can use an absolute branch */
      branch_target_address = (target_address | BRANCH_OP_ABS) & BRANCH_OP_MSK;
    } else {
      return false;
    }
  }

  prologue [branch_op_index] = BRANCH_OP_CODE | branch_target_address;

  return true;
}
Exemple #2
0
void ppc_exc_initialize_with_vector_base(
  uintptr_t interrupt_stack_begin,
  uintptr_t interrupt_stack_size,
  void *vector_base
)
{
  rtems_status_code sc = RTEMS_SUCCESSFUL;
  const ppc_exc_categories *const categories = ppc_exc_current_categories();
  unsigned vector = 0;
  uint32_t sda_base = 0;
  uint32_t r13 = 0;

  if (categories == NULL) {
    ppc_exc_fatal_error();
  }

  /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
   * early init code put it there.
   */
  __asm__ volatile (
    "lis %0, _SDA_BASE_@h\n"
    "ori %0, %0, _SDA_BASE_@l\n"
    "mr  %1, 13\n"
    : "=r" (sda_base), "=r"(r13)
  );

  if (sda_base != r13) {
    ppc_exc_fatal_error();
  }

  ppc_exc_initialize_interrupt_stack(interrupt_stack_begin, interrupt_stack_size);

#ifndef PPC_EXC_CONFIG_BOOKE_ONLY

  /* Use current MMU / RI settings when running C exception handlers */
  ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);

#ifdef __ALTIVEC__
  /* Need vector unit enabled to save/restore altivec context */
  ppc_exc_msr_bits |= MSR_VE;
#endif

#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */

  if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
    ppc_exc_initialize_booke(vector_base);
  }

  for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
    ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);

    if (category != PPC_EXC_INVALID) {
      void *const vector_address = ppc_exc_vector_address(vector, vector_base);
      uint32_t prologue [16];
      size_t prologue_size = sizeof(prologue);

      sc = ppc_exc_make_prologue(
        vector,
        vector_base,
        category,
        prologue,
        &prologue_size
      );
      if (sc != RTEMS_SUCCESSFUL) {
        ppc_exc_fatal_error();
      }

      ppc_code_copy(vector_address, prologue, prologue_size);
    }
  }

#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
  /* If we are on a classic PPC with MSR_DR enabled then
   * assert that the mapping for at least this task's
   * stack is write-back-caching enabled (see README/CAVEATS)
   * Do this only if the cache is physically enabled.
   * Since it is not easy to figure that out in a
   * generic way we need help from the BSP: BSPs
   * which run entirely w/o the cache may set
   * ppc_exc_cache_wb_check to zero prior to calling
   * this routine.
   *
   * We run this check only after exception handling is
   * initialized so that we have some chance to get
   * information printed if it fails.
   *
   * Note that it is unsafe to ignore this issue; if
   * the check fails, do NOT disable it unless caches
   * are always physically disabled.
   */
  if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
    /* The size of 63 assumes cache lines are at most 32 bytes */
    uint8_t dummy[63];
    uintptr_t p = (uintptr_t) dummy;
    /* If the dcbz instruction raises an alignment exception
     * then the stack is mapped as write-thru or caching-disabled.
     * The low-level code is not capable of dealing with this
     * ATM.
     */
    p = (p + 31U) & ~31U;
    __asm__ volatile ("dcbz 0, %0"::"b" (p));
    /* If we make it thru here then things seem to be OK */
  }
Exemple #3
0
static void ppc_exc_initialize_booke(void *vector_base)
{
  /* Interupt vector prefix register */
  MTIVPR((uint32_t) vector_base);

  if (
    ppc_cpu_is_specific_e200(PPC_e200z0)
      || ppc_cpu_is_specific_e200(PPC_e200z1)
  ) {
    /*
     * These cores have hard wired IVOR registers.  An access will case a
     * program exception.
     */
    return;
  }

  /* Interupt vector offset registers */
  MTIVOR(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
  MTIVOR(1,  ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
  MTIVOR(2,  ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
  MTIVOR(3,  ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
  MTIVOR(4,  ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
  MTIVOR(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
  MTIVOR(6,  ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
  MTIVOR(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
  MTIVOR(8,  ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
  MTIVOR(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
  MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
  MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
  MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
  MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
  MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
  MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
  if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
    MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
    MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
    MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
  }
  if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
    MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
  }
}