/* Set up default SCR values. */ static void el3_init(void) { uint32_t scr; if (get_current_el() != EL3) return; scr = raw_read_scr_el3(); /* Default to non-secure EL1 and EL0. */ scr &= ~(SCR_NS_MASK); scr |= SCR_NS_ENABLE; /* Disable IRQ, FIQ, and external abort interrupt routing. */ scr &= ~(SCR_IRQ_MASK | SCR_FIQ_MASK | SCR_EA_MASK); scr |= SCR_IRQ_DISABLE | SCR_FIQ_DISABLE | SCR_EA_DISABLE; /* Enable HVC */ scr &= ~(SCR_HVC_MASK); scr |= SCR_HVC_ENABLE; /* Disable SMC */ scr &= ~(SCR_SMC_MASK); scr |= SCR_SMC_DISABLE; /* Disable secure instruction fetches. */ scr &= ~(SCR_SIF_MASK); scr |= SCR_SIF_DISABLE; /* All lower exception levels 64-bit by default. */ scr &= ~(SCR_RW_MASK); scr |= SCR_LOWER_AARCH64; /* Disable secure EL1 access to secure timer. */ scr &= ~(SCR_ST_MASK); scr |= SCR_ST_DISABLE; /* Don't trap on WFE or WFI instructions. */ scr &= ~(SCR_TWI_MASK | SCR_TWE_MASK); scr |= SCR_TWI_DISABLE | SCR_TWE_DISABLE; raw_write_scr_el3(scr); isb(); }
static void secmon_start(void *arg) { uint32_t scr; secmon_entry_t entry; struct secmon_params *p; struct secmon_runit *r = arg; entry = r->entry; p = &r->params; /* Obtain secondary entry point for non-BSP CPUs. */ if (!cpu_is_bsp()) entry = secondary_entry_point(entry); printk(BIOS_DEBUG, "CPU%x entering secure monitor %p.\n", cpu_info()->id, entry); /* We want to enforce the following policies: * NS bit is set for lower EL */ scr = raw_read_scr_el3(); scr |= SCR_NS; raw_write_scr_el3(scr); entry(p); }
void transition(struct exc_state *exc_state) { uint32_t scr_mask; uint64_t hcr_mask; uint64_t sctlr; uint32_t current_el = get_current_el(); struct elx_state *elx = &exc_state->elx; struct regs *regs = &exc_state->regs; uint8_t elx_el = get_el_from_spsr(elx->spsr); /* * Policies enforced: * 1. We support only elx --> (elx - 1) transitions * 2. We support transitions to Aarch64 mode only * * If any of the above conditions holds false, then we need a proper way * to update SCR/HCR before removing the checks below */ if ((current_el - elx_el) != 1) die("ARM64 Error: Do not support transition\n"); if (elx->spsr & SPSR_ERET_32) die("ARM64 Error: Do not support eret to Aarch32\n"); else { scr_mask = SCR_LOWER_AARCH64; hcr_mask = HCR_LOWER_AARCH64; } /* SCR: Write to SCR if current EL is EL3 */ if (current_el == EL3) { uint32_t scr = raw_read_scr_el3(); scr |= scr_mask; raw_write_scr_el3(scr); } /* HCR: Write to HCR if current EL is EL2 */ else if (current_el == EL2) { uint64_t hcr = raw_read_hcr_el2(); hcr |= hcr_mask; raw_write_hcr_el2(hcr); } /* ELR/SPSR: Write entry point and processor state of program */ raw_write_elr_current(elx->elr); raw_write_spsr_current(elx->spsr); /* SCTLR: Initialize EL with selected properties */ sctlr = raw_read_sctlr(elx_el); sctlr &= SCTLR_MASK; raw_write_sctlr(sctlr, elx_el); /* SP_ELx: Initialize stack pointer */ raw_write_sp_elx(elx->sp_elx, elx_el); /* Eret to the entry point */ trans_switch(regs); }
/* * startup_save_cpu_data is used to save register values that need to be setup * when a CPU starts booting. This is used by secondary CPUs as well as resume * path to directly setup MMU and other related registers. */ void startup_save_cpu_data(void) { save_element(MAIR_INDEX, raw_read_mair_current()); save_element(TCR_INDEX, raw_read_tcr_current()); save_element(TTBR0_INDEX, raw_read_ttbr0_current()); save_element(VBAR_INDEX, raw_read_vbar_current()); if (get_current_el() == EL3) save_element(SCR_INDEX, raw_read_scr_el3()); dcache_clean_by_mva(_arm64_startup_data, NUM_ELEMENTS * PER_ELEMENT_SIZE_BYTES); }