int ari_online_core(uint32_t ari_base, uint32_t core) { int cpu = read_mpidr() & MPIDR_CPU_MASK; int cluster = (read_mpidr() & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; /* construct the current CPU # */ cpu |= (cluster << 2); /* sanity check target core id */ if ((core >= MCE_CORE_ID_MAX) || (cpu == core)) { ERROR("%s: unsupported core id (%d)\n", __func__, core); return EINVAL; } /* * The Denver cluster has 2 CPUs only - 0, 1. */ if (impl == DENVER_IMPL && ((core == 2) || (core == 3))) { ERROR("%s: unknown core id (%d)\n", __func__, core); return EINVAL; } /* clean the previous response state */ ari_clobber_response(ari_base); return ari_request_wait(ari_base, 0, TEGRA_ARI_ONLINE_CORE, core, 0); }
int midr_match(unsigned int cpu_midr) { unsigned int midr, midr_mask; midr = (unsigned int)read_midr(); midr_mask = (MIDR_IMPL_MASK << MIDR_IMPL_SHIFT) | (MIDR_PN_MASK << MIDR_PN_SHIFT); return ((midr & midr_mask) == (cpu_midr & midr_mask)); }
int tegra_soc_pwr_domain_off(const psci_power_state_t *target_state) { int impl = (read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK; /* Disable Denver's DCO operations */ if (impl == DENVER_IMPL) denver_disable_dco(); /* Turn off CPU */ (void)mce_command_handler(MCE_CMD_ENTER_CSTATE, TEGRA_ARI_CORE_C7, MCE_CORE_SLEEP_TIME_INFINITE, 0); return PSCI_E_SUCCESS; }
static void hikey_boardid_init(void) { u_register_t midr; midr = read_midr(); mmio_write_32(MEMORY_AXI_CHIP_ADDR, midr); INFO("[BDID] [%x] midr: 0x%x\n", MEMORY_AXI_CHIP_ADDR, (unsigned int)midr); mmio_write_32(MEMORY_AXI_BOARD_TYPE_ADDR, 0); mmio_write_32(MEMORY_AXI_BOARD_ID_ADDR, 0x2b); mmio_write_32(ACPU_ARM64_FLAGA, 0x1234); mmio_write_32(ACPU_ARM64_FLAGB, 0x5678); }
/******************************************************************************* * Power down the current CPU cluster ******************************************************************************/ void tegra_fc_cluster_powerdn(uint32_t mpidr) { int cpu = mpidr & MPIDR_CPU_MASK; uint32_t val; VERBOSE("Entering cluster powerdn state...\n"); tegra_fc_cc4_ctrl(cpu, 0); /* hardware L2 flush is faster for A53 only */ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL, read_midr() == CORTEX_A53_MIDR); /* power down the CPU cluster */ val = FLOWCTRL_TURNOFF_CPURAIL << FLOWCTRL_ENABLE_EXT; tegra_fc_prepare_suspend(cpu, val); }
/** * @brief Return address of GIC memory map to _gic.baseaddr. * @param va_base Base address(Physical) of GIC. * @return If target architecture is Cortex-A15 then return success, * otherwise return failed. */ static hvmm_status_t gic_init_baseaddr(uint32_t *va_base) { /* MIDR[15:4], CRn:c0, Op1:0, CRm:c0, Op2:0 == 0xC0F (Cortex-A15) */ /* Cortex-A15 C15 System Control, C15 Registers */ /* Name: Op1, CRm, Op2 */ uint32_t midr; hvmm_status_t result = HVMM_STATUS_UNKNOWN_ERROR; HVMM_TRACE_ENTER(); midr = read_midr(); uart_print("midr:"); uart_print_hex32(midr); uart_print("\n\r"); /* * Note: * We currently support GICv2 with Cortex-A15 only. * Other architectures with GICv2 support will be further * listed and added for support later */ if ((midr & MIDR_MASK_PPN) == MIDR_PPN_CORTEXA15) { /* fall-back to periphbase addr from cbar */ if (va_base == 0) { va_base = (uint32_t *)(uint32_t)(gic_periphbase_pa() & \ 0x00000000FFFFFFFFULL); } _gic.baseaddr = (uint32_t) va_base; uart_print("cbar:"); uart_print_hex32(_gic.baseaddr); uart_print("\n\r"); _gic.ba_gicd = (uint32_t *)(_gic.baseaddr + GIC_OFFSET_GICD); _gic.ba_gicc = (uint32_t *)(_gic.baseaddr + GIC_OFFSET_GICC); _gic.ba_gich = (uint32_t *)(_gic.baseaddr + GIC_OFFSET_GICH); _gic.ba_gicv = (uint32_t *)(_gic.baseaddr + GIC_OFFSET_GICV); _gic.ba_gicvi = (uint32_t *)(_gic.baseaddr + GIC_OFFSET_GICVI); result = HVMM_STATUS_SUCCESS; } else { uart_print("GICv2 Unsupported\n\r"); uart_print("midr.ppn:"); uart_print_hex32(midr & MIDR_MASK_PPN); uart_print("\n\r"); result = HVMM_STATUS_UNSUPPORTED_FEATURE; } HVMM_TRACE_EXIT(); return result; }
static void gic_dump_registers(void) { uint32_t midr; HVMM_TRACE_ENTER(); midr = read_midr(); uart_print("midr:"); uart_print_hex32(midr); uart_print("\n\r"); if ((midr & MIDR_MASK_PPN) == MIDR_PPN_CORTEXA15) { uint32_t value; uart_print("cbar:"); uart_print_hex32(_gic.baseaddr); uart_print("\n\r"); uart_print("ba_gicd:"); uart_print_hex32((uint32_t)_gic.ba_gicd); uart_print("\n\r"); uart_print("ba_gicc:"); uart_print_hex32((uint32_t)_gic.ba_gicc); uart_print("\n\r"); uart_print("ba_gich:"); uart_print_hex32((uint32_t)_gic.ba_gich); uart_print("\n\r"); uart_print("ba_gicv:"); uart_print_hex32((uint32_t)_gic.ba_gicv); uart_print("\n\r"); uart_print("ba_gicvi:"); uart_print_hex32((uint32_t)_gic.ba_gicvi); uart_print("\n\r"); value = _gic.ba_gicd[GICD_CTLR]; uart_print("GICD_CTLR:"); uart_print_hex32(value); uart_print("\n\r"); value = _gic.ba_gicd[GICD_TYPER]; uart_print("GICD_TYPER:"); uart_print_hex32(value); uart_print("\n\r"); value = _gic.ba_gicd[GICD_IIDR]; uart_print("GICD_IIDR:"); uart_print_hex32(value); uart_print("\n\r"); } HVMM_TRACE_EXIT(); }
/******************************************************************************* * Perform any BL31 specific platform actions. Populate the BL33 and BL32 image * info. ******************************************************************************/ void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, u_register_t arg2, u_register_t arg3) { struct tegra_bl31_params *arg_from_bl2 = (struct tegra_bl31_params *) arg0; plat_params_from_bl2_t *plat_params = (plat_params_from_bl2_t *)arg1; image_info_t bl32_img_info = { {0} }; uint64_t tzdram_start, tzdram_end, bl32_start, bl32_end; uint32_t console_clock; int32_t ret; /* * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so * there's no argument to relay from a previous bootloader. Platforms * might use custom ways to get arguments, so provide handlers which * they can override. */ if (arg_from_bl2 == NULL) { arg_from_bl2 = plat_get_bl31_params(); } if (plat_params == NULL) { plat_params = plat_get_bl31_plat_params(); } /* * Copy BL3-3, BL3-2 entry point information. * They are stored in Secure RAM, in BL2's address space. */ assert(arg_from_bl2 != NULL); assert(arg_from_bl2->bl33_ep_info != NULL); bl33_image_ep_info = *arg_from_bl2->bl33_ep_info; if (arg_from_bl2->bl32_ep_info != NULL) { bl32_image_ep_info = *arg_from_bl2->bl32_ep_info; bl32_mem_size = arg_from_bl2->bl32_ep_info->args.arg0; bl32_boot_params = arg_from_bl2->bl32_ep_info->args.arg2; } /* * Parse platform specific parameters - TZDRAM aperture base and size */ assert(plat_params != NULL); plat_bl31_params_from_bl2.tzdram_base = plat_params->tzdram_base; plat_bl31_params_from_bl2.tzdram_size = plat_params->tzdram_size; plat_bl31_params_from_bl2.uart_id = plat_params->uart_id; plat_bl31_params_from_bl2.l2_ecc_parity_prot_dis = plat_params->l2_ecc_parity_prot_dis; /* * It is very important that we run either from TZDRAM or TZSRAM base. * Add an explicit check here. */ if ((plat_bl31_params_from_bl2.tzdram_base != (uint64_t)BL31_BASE) && (TEGRA_TZRAM_BASE != BL31_BASE)) { panic(); } /* * Reference clock used by the FPGAs is a lot slower. */ if (tegra_platform_is_fpga()) { console_clock = TEGRA_BOOT_UART_CLK_13_MHZ; } else { console_clock = TEGRA_BOOT_UART_CLK_408_MHZ; } /* * Get the base address of the UART controller to be used for the * console */ tegra_console_base = plat_get_console_from_id(plat_params->uart_id); if (tegra_console_base != 0U) { /* * Configure the UART port to be used as the console */ (void)console_init(tegra_console_base, console_clock, TEGRA_CONSOLE_BAUDRATE); } /* * The previous bootloader passes the base address of the shared memory * location to store the boot profiler logs. Sanity check the * address and initilise the profiler library, if it looks ok. */ if (plat_params->boot_profiler_shmem_base != 0ULL) { ret = bl31_check_ns_address(plat_params->boot_profiler_shmem_base, PROFILER_SIZE_BYTES); if (ret == (int32_t)0) { /* store the membase for the profiler lib */ plat_bl31_params_from_bl2.boot_profiler_shmem_base = plat_params->boot_profiler_shmem_base; /* initialise the profiler library */ boot_profiler_init(plat_params->boot_profiler_shmem_base, TEGRA_TMRUS_BASE); } } /* * Add timestamp for platform early setup entry. */ boot_profiler_add_record("[TF] early setup entry"); /* * Initialize delay timer */ tegra_delay_timer_init(); /* Early platform setup for Tegra SoCs */ plat_early_platform_setup(); /* * Do initial security configuration to allow DRAM/device access. */ tegra_memctrl_tzdram_setup(plat_bl31_params_from_bl2.tzdram_base, (uint32_t)plat_bl31_params_from_bl2.tzdram_size); /* * The previous bootloader might not have placed the BL32 image * inside the TZDRAM. We check the BL32 image info to find out * the base/PC values and relocate the image if necessary. */ if (arg_from_bl2->bl32_image_info != NULL) { bl32_img_info = *arg_from_bl2->bl32_image_info; /* Relocate BL32 if it resides outside of the TZDRAM */ tzdram_start = plat_bl31_params_from_bl2.tzdram_base; tzdram_end = plat_bl31_params_from_bl2.tzdram_base + plat_bl31_params_from_bl2.tzdram_size; bl32_start = bl32_img_info.image_base; bl32_end = bl32_img_info.image_base + bl32_img_info.image_size; assert(tzdram_end > tzdram_start); assert(bl32_end > bl32_start); assert(bl32_image_ep_info.pc > tzdram_start); assert(bl32_image_ep_info.pc < tzdram_end); /* relocate BL32 */ if ((bl32_start >= tzdram_end) || (bl32_end <= tzdram_start)) { INFO("Relocate BL32 to TZDRAM\n"); (void)memcpy16((void *)(uintptr_t)bl32_image_ep_info.pc, (void *)(uintptr_t)bl32_start, bl32_img_info.image_size); /* clean up non-secure intermediate buffer */ zeromem((void *)(uintptr_t)bl32_start, bl32_img_info.image_size); } } /* * Add timestamp for platform early setup exit. */ boot_profiler_add_record("[TF] early setup exit"); INFO("BL3-1: Boot CPU: %s Processor [%lx]\n", (((read_midr() >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK) == DENVER_IMPL) ? "Denver" : "ARM", read_mpidr()); }
/******************************************************************************* * Prepare the CPU system registers for first entry into secure or normal world * * If execution is requested to hyp mode, HSCTLR is initialized * If execution is requested to non-secure PL1, and the CPU supports * HYP mode then HYP mode is disabled by configuring all necessary HYP mode * registers. ******************************************************************************/ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr, scr, hcptr; cpu_context_t *ctx = cm_get_context(security_state); assert(ctx); if (security_state == NON_SECURE) { scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); if (scr & SCR_HCE_BIT) { /* Use SCTLR value to initialize HSCTLR */ sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); sctlr |= HSCTLR_RES1; /* Temporarily set the NS bit to access HSCTLR */ write_scr(read_scr() | SCR_NS_BIT); /* * Make sure the write to SCR is complete so that * we can access HSCTLR */ isb(); write_hsctlr(sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { /* * Set the NS bit to access NS copies of certain banked * registers */ write_scr(read_scr() | SCR_NS_BIT); isb(); /* PL2 present but unused, need to disable safely */ write_hcr(0); /* HSCTLR : can be ignored when bypassing */ /* HCPTR : disable all traps TCPAC, TTA, TCP */ hcptr = read_hcptr(); hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); write_hcptr(hcptr); /* Enable EL1 access to timer */ write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); /* Reset CNTVOFF_EL2 */ write64_cntvoff(0); /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ write_vpidr(read_midr()); write_vmpidr(read_mpidr()); /* * Reset VTTBR. * Needed because cache maintenance operations depend on * the VMID even when non-secure EL1&0 stage 2 address * translation are disabled. */ write64_vttbr(0); /* * Avoid unexpected debug traps in case where HDCR * is not completely reset by the hardware - set * HDCR.HPMN to PMCR.N and zero the remaining bits. * The HDCR.HPMN and PMCR.N fields are the same size * (5 bits) and HPMN is at offset zero within HDCR. */ write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT); /* * Reset CNTHP_CTL to disable the EL2 physical timer and * therefore prevent timer interrupts. */ write_cnthp_ctl(0); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
/******************************************************************************* * Prepare the CPU system registers for first entry into secure or normal world * * If execution is requested to hyp mode, HSCTLR is initialized * If execution is requested to non-secure PL1, and the CPU supports * HYP mode then HYP mode is disabled by configuring all necessary HYP mode * registers. ******************************************************************************/ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr, scr, hcptr; cpu_context_t *ctx = cm_get_context(security_state); assert(ctx); if (security_state == NON_SECURE) { scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); if (scr & SCR_HCE_BIT) { /* Use SCTLR value to initialize HSCTLR */ sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); sctlr |= HSCTLR_RES1; /* Temporarily set the NS bit to access HSCTLR */ write_scr(read_scr() | SCR_NS_BIT); /* * Make sure the write to SCR is complete so that * we can access HSCTLR */ isb(); write_hsctlr(sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ write_scr(read_scr() | SCR_NS_BIT); isb(); /* PL2 present but unused, need to disable safely */ write_hcr(0); /* HSCTLR : can be ignored when bypassing */ /* HCPTR : disable all traps TCPAC, TTA, TCP */ hcptr = read_hcptr(); hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); write_hcptr(hcptr); /* Enable EL1 access to timer */ write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); /* Reset CNTVOFF_EL2 */ write64_cntvoff(0); /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ write_vpidr(read_midr()); write_vmpidr(read_mpidr()); /* * Reset VTTBR. * Needed because cache maintenance operations depend on * the VMID even when non-secure EL1&0 stage 2 address * translation are disabled. */ write64_vttbr(0); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } } }