void bl31_plat_arch_setup(void) { const mmap_region_t bl_regions[] = { MAP_REGION_FLAT(BL31_BASE, BL31_END - BL31_BASE, MT_MEMORY | MT_RW | MT_SECURE), MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_CODE | MT_SECURE), MAP_REGION_FLAT(BL_RO_DATA_BASE, BL_RO_DATA_END - BL_RO_DATA_END, MT_RO_DATA | MT_SECURE), {0} }; setup_page_tables(bl_regions, plat_k3_mmap); enable_mmu_el3(0); }
/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this is only intializes the MMU ******************************************************************************/ void tsp_plat_arch_setup(void) { const mmap_region_t bl_regions[] = { MAP_REGION_FLAT(BL32_BASE, BL32_END - BL32_BASE, MT_MEMORY | MT_RW | MT_SECURE), MAP_REGION_FLAT(BL_CODE_BASE, BL_CODE_END - BL_CODE_BASE, MT_CODE | MT_SECURE), MAP_REGION_FLAT(BL_RO_DATA_BASE, BL_RO_DATA_END - BL_RO_DATA_BASE, MT_RO_DATA | MT_SECURE), MAP_REGION_FLAT(BL_COHERENT_RAM_BASE, BL_COHERENT_RAM_END - BL_COHERENT_RAM_BASE, MT_DEVICE | MT_RW | MT_SECURE), {0} }; setup_page_tables(bl_regions, plat_arm_get_mmap()); enable_mmu_el1(0); }
void bl31_plat_arch_setup(void) { static const mmap_region_t secure_partition_mmap[] = { #if ENABLE_SPM && SPM_MM MAP_REGION_FLAT(PLAT_SPM_BUF_BASE, PLAT_SPM_BUF_SIZE, MT_RW_DATA | MT_SECURE), MAP_REGION_FLAT(PLAT_SQ_SP_PRIV_BASE, PLAT_SQ_SP_PRIV_SIZE, MT_RW_DATA | MT_SECURE), #endif {0}, }; sq_mmap_setup(BL31_BASE, BL31_SIZE, secure_partition_mmap); enable_mmu_el3(XLAT_TABLE_NC); #if ENABLE_SPM && SPM_MM memcpy((void *)SPM_SHIM_EXCEPTIONS_START, (void *)SPM_SHIM_EXCEPTIONS_LMA, (uintptr_t)SPM_SHIM_EXCEPTIONS_END - (uintptr_t)SPM_SHIM_EXCEPTIONS_START); #endif }
void sp_map_memory_regions(sp_context_t *sp_ctx) { /* This region contains the exception vectors used at S-EL1. */ const mmap_region_t sel1_exception_vectors = MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, SPM_SHIM_EXCEPTIONS_SIZE, MT_CODE | MT_SECURE | MT_PRIVILEGED); mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &sel1_exception_vectors); struct sp_rd_sect_mem_region *rdmem; for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) { map_rdmem(sp_ctx, rdmem); } init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle); }
PLATFORM_MAX_CPUS_PER_CLUSTER, /* No of CPU cores - cluster1 */ PLATFORM_MAX_CPUS_PER_CLUSTER }; /* sets of MMIO ranges setup */ #define MMIO_RANGE_0_ADDR 0x50000000 #define MMIO_RANGE_1_ADDR 0x60000000 #define MMIO_RANGE_2_ADDR 0x70000000 #define MMIO_RANGE_SIZE 0x200000 /* * Table of regions to map using the MMU. */ static const mmap_region_t tegra_mmap[] = { MAP_REGION_FLAT(MMIO_RANGE_0_ADDR, MMIO_RANGE_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(MMIO_RANGE_1_ADDR, MMIO_RANGE_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(MMIO_RANGE_2_ADDR, MMIO_RANGE_SIZE, MT_DEVICE | MT_RW | MT_SECURE), {0} }; /******************************************************************************* * Set up the pagetables as per the platform memory map & initialize the MMU ******************************************************************************/ const mmap_region_t *plat_get_mmio_map(void) { /* MMIO space */ return tegra_mmap; }
* POSSIBILITY OF SUCH DAMAGE. */ #include <arch_helpers.h> #include <debug.h> #include <mmio.h> #include <platform_def.h> #include <plat_private.h> #include <rk3368_def.h> #include <soc.h> static uint32_t plls_con[END_PLL_ID][4]; /* Table of regions to map using the MMU. */ const mmap_region_t plat_rk_mmap[] = { MAP_REGION_FLAT(CCI400_BASE, CCI400_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(GIC400_BASE, GIC400_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(STIME_BASE, STIME_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(SGRF_BASE, SGRF_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE), MAP_REGION_FLAT(PMU_BASE, PMU_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(UART_DBG_BASE, UART_DBG_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(CRU_BASE, CRU_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(DDR_PCTL_BASE, DDR_PCTL_SIZE,
#include <mmio.h> #include <platform.h> #include <platform_def.h> #include <plat_imx8.h> #include <xlat_tables.h> #include <tzc380.h> IMPORT_SYM(uintptr_t, __COHERENT_RAM_START__, BL31_COHERENT_RAM_START); IMPORT_SYM(uintptr_t, __COHERENT_RAM_END__, BL31_COHERENT_RAM_END); IMPORT_SYM(uintptr_t, __RO_START__, BL31_RO_START); IMPORT_SYM(uintptr_t, __RO_END__, BL31_RO_END); IMPORT_SYM(uintptr_t, __RW_START__, BL31_RW_START); IMPORT_SYM(uintptr_t, __RW_END__, BL31_RW_END); static const mmap_region_t imx_mmap[] = { MAP_REGION_FLAT(GPV_BASE, GPV_SIZE, MT_DEVICE | MT_RW), /* GPV map */ MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */ MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW), /* GIC map */ {0}, }; static entry_point_info_t bl32_image_ep_info; static entry_point_info_t bl33_image_ep_info; /* get SPSR for BL33 entry */ static uint32_t get_spsr_for_bl33_entry(void) { unsigned long el_status; unsigned long mode; uint32_t spsr;
#include <assert.h> #include <debug.h> #include <delay_timer.h> #include <dfs.h> #include <dram.h> #include <m0_ctl.h> #include <mmio.h> #include <plat_private.h> #include <platform_def.h> #include <rk3399_def.h> #include <secure.h> #include <soc.h> /* Table of regions to map using the MMU. */ const mmap_region_t plat_rk_mmap[] = { MAP_REGION_FLAT(DEV_RNG0_BASE, DEV_RNG0_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(PMUSRAM_BASE, PMUSRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE), { 0 } }; /* The RockChip power domain tree descriptor */ const unsigned char rockchip_power_domain_tree_desc[] = { /* No of root nodes */ PLATFORM_SYSTEM_COUNT, /* No of children for the root node */ PLATFORM_CLUSTER_COUNT, /* No of children for the first cluster node */ PLATFORM_CLUSTER0_CORE_COUNT, /* No of children for the second cluster node */
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <arch_helpers.h> #include <debug.h> #include <delay_timer.h> #include <mmio.h> #include <platform_def.h> #include <plat_private.h> #include <rk3399_def.h> #include <soc.h> /* Table of regions to map using the MMU. */ const mmap_region_t plat_rk_mmap[] = { MAP_REGION_FLAT(RK3399_DEV_RNG0_BASE, RK3399_DEV_RNG0_SIZE, MT_DEVICE | MT_RW | MT_SECURE), { 0 } }; /* The RockChip power domain tree descriptor */ const unsigned char rockchip_power_domain_tree_desc[] = { /* No of root nodes */ PLATFORM_SYSTEM_COUNT, /* No of children for the root node */ PLATFORM_CLUSTER_COUNT, /* No of children for the first cluster node */ PLATFORM_CLUSTER0_CORE_COUNT, /* No of children for the second cluster node */ PLATFORM_CLUSTER1_CORE_COUNT };
* BL3-1 from BL2. */ static entry_point_info_t bl32_image_ep_info; static entry_point_info_t bl33_image_ep_info; static const int cci_map[] = { PLAT_MT_CCI_CLUSTER0_SL_IFACE_IX, PLAT_MT_CCI_CLUSTER1_SL_IFACE_IX }; static uint32_t cci_map_length = ARRAY_SIZE(cci_map); /* Table of regions to map using the MMU. */ static const mmap_region_t plat_mmap[] = { /* for TF text, RO, RW */ MAP_REGION_FLAT(MTK_DEV_RNG0_BASE, MTK_DEV_RNG0_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(MTK_DEV_RNG1_BASE, MTK_DEV_RNG1_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(RAM_CONSOLE_BASE & ~(PAGE_SIZE_MASK), RAM_CONSOLE_SIZE, MT_DEVICE | MT_RW | MT_NS), { 0 } }; /******************************************************************************* * Macro generating the code for the function setting up the pagetables as per * the platform memory map & initialize the mmu, for the given exception level ******************************************************************************/ #define DEFINE_CONFIGURE_MMU_EL(_el) \ void plat_configure_mmu_el ## _el(unsigned long total_base, \ unsigned long total_size, \
#include <arch.h> #include <arch_helpers.h> #include <assert.h> #include <bl_common.h> #include <debug.h> #include <k3_console.h> #include <k3_gicv3.h> #include <platform_def.h> #include <string.h> #include <ti_sci.h> #include <xlat_tables_v2.h> /* Table of regions to map using the MMU */ const mmap_region_t plat_k3_mmap[] = { MAP_REGION_FLAT(SHARED_RAM_BASE, SHARED_RAM_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(K3_USART_BASE_ADDRESS, K3_USART_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(K3_GICD_BASE, K3_GICD_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(K3_GICR_BASE, K3_GICR_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(SEC_PROXY_RT_BASE, SEC_PROXY_RT_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(SEC_PROXY_SCFG_BASE, SEC_PROXY_SCFG_SIZE, MT_DEVICE | MT_RW | MT_SECURE), MAP_REGION_FLAT(SEC_PROXY_DATA_BASE, SEC_PROXY_DATA_SIZE, MT_DEVICE | MT_RW | MT_SECURE), { /* sentinel */ } }; /* * Placeholder variables for maintaining information about the next image(s) */ static entry_point_info_t bl32_image_ep_info; static entry_point_info_t bl33_image_ep_info;
#include <common/debug.h> #include <context.h> #include <drivers/arm/tzc380.h> #include <drivers/console.h> #include <drivers/generic_delay_timer.h> #include <lib/el3_runtime/context_mgmt.h> #include <lib/mmio.h> #include <lib/xlat_tables/xlat_tables.h> #include <plat/common/platform.h> #include <gpc.h> #include <imx_uart.h> #include <plat_imx8.h> static const mmap_region_t imx_mmap[] = { MAP_REGION_FLAT(IMX_GIC_BASE, IMX_GIC_SIZE, MT_DEVICE | MT_RW), MAP_REGION_FLAT(IMX_AIPS_BASE, IMX_AIPS_SIZE, MT_DEVICE | MT_RW), /* AIPS map */ {0}, }; static entry_point_info_t bl32_image_ep_info; static entry_point_info_t bl33_image_ep_info; /* get SPSR for BL33 entry */ static uint32_t get_spsr_for_bl33_entry(void) { unsigned long el_status; unsigned long mode; uint32_t spsr; /* figure out what mode we enter the non-secure world */
#include "uniphier.h" #define BL2_END (unsigned long)(&__BL2_END__) #define BL2_SIZE ((BL2_END) - (BL2_BASE)) static int uniphier_bl2_kick_scp; void bl2_el3_early_platform_setup(u_register_t x0, u_register_t x1, u_register_t x2, u_register_t x3) { uniphier_console_setup(); } static const struct mmap_region uniphier_bl2_mmap[] = { /* for BL31, BL32 */ MAP_REGION_FLAT(UNIPHIER_SEC_DRAM_BASE, UNIPHIER_SEC_DRAM_SIZE, MT_MEMORY | MT_RW | MT_SECURE), /* for SCP, BL33 */ MAP_REGION_FLAT(UNIPHIER_NS_DRAM_BASE, UNIPHIER_NS_DRAM_SIZE, MT_MEMORY | MT_RW | MT_NS), { .size = 0 }, }; void bl2_el3_plat_arch_setup(void) { unsigned int soc; int skip_scp = 0; int ret; uniphier_mmap_setup(BL2_BASE, BL2_SIZE, uniphier_bl2_mmap); enable_mmu_el3(0);
/* Setup context of the Secure Partition */ void secure_partition_setup(void) { VERBOSE("S-EL1/S-EL0 context setup start...\n"); cpu_context_t *ctx = cm_get_context(SECURE); /* Make sure that we got a Secure context. */ assert(ctx != NULL); /* Assert we are in Secure state. */ assert((read_scr_el3() & SCR_NS_BIT) == 0); /* Disable MMU at EL1. */ disable_mmu_icache_el1(); /* Invalidate TLBs at EL1. */ tlbivmalle1(); /* * General-Purpose registers * ------------------------- */ /* * X0: Virtual address of a buffer shared between EL3 and Secure EL0. * The buffer will be mapped in the Secure EL1 translation regime * with Normal IS WBWA attributes and RO data and Execute Never * instruction access permissions. * * X1: Size of the buffer in bytes * * X2: cookie value (Implementation Defined) * * X3: cookie value (Implementation Defined) * * X4 to X30 = 0 (already done by cm_init_my_context()) */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, PLAT_SPM_BUF_BASE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, PLAT_SPM_BUF_SIZE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, PLAT_SPM_COOKIE_0); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, PLAT_SPM_COOKIE_1); /* * SP_EL0: A non-zero value will indicate to the SP that the SPM has * initialized the stack pointer for the current CPU through * implementation defined means. The value will be 0 otherwise. */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0, PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE); /* * Setup translation tables * ------------------------ */ #if ENABLE_ASSERTIONS /* Get max granularity supported by the platform. */ u_register_t id_aa64prf0_el1 = read_id_aa64pfr0_el1(); int tgran64_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & ID_AA64MMFR0_EL1_TGRAN64_MASK) == ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; int tgran16_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & ID_AA64MMFR0_EL1_TGRAN16_MASK) == ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; int tgran4_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & ID_AA64MMFR0_EL1_TGRAN4_MASK) == ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; uintptr_t max_granule_size; if (tgran64_supported) { max_granule_size = 64 * 1024; } else if (tgran16_supported) { max_granule_size = 16 * 1024; } else { assert(tgran4_supported); max_granule_size = 4 * 1024; } VERBOSE("Max translation granule supported: %lu KiB\n", max_granule_size); uintptr_t max_granule_size_mask = max_granule_size - 1; /* Base must be aligned to the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_size_mask) == 0); /* Size must be a multiple of the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_size_mask) == 0); #endif /* ENABLE_ASSERTIONS */ /* This region contains the exception vectors used at S-EL1. */ const mmap_region_t sel1_exception_vectors = MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, SPM_SHIM_EXCEPTIONS_SIZE, MT_CODE | MT_SECURE | MT_PRIVILEGED); mmap_add_region_ctx(&secure_partition_xlat_ctx, &sel1_exception_vectors); mmap_add_ctx(&secure_partition_xlat_ctx, plat_get_secure_partition_mmap(NULL)); init_xlat_tables_ctx(&secure_partition_xlat_ctx); /* * MMU-related registers * --------------------- */ /* Set attributes in the right indices of the MAIR */ u_register_t mair_el1 = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) | MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) | MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1); /* Setup TCR_EL1. */ u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE); u_register_t tcr_el1 = /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */ (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE)) | /* Inner and outer WBWA, shareable. */ TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | /* Set the granularity to 4KB. */ TCR_TG0_4K | /* Limit Intermediate Physical Address Size. */ tcr_ps_bits << TCR_EL1_IPS_SHIFT | /* Disable translations using TBBR1_EL1. */ TCR_EPD1_BIT /* The remaining fields related to TBBR1_EL1 are left as zero. */ ; tcr_el1 &= ~( /* Enable translations using TBBR0_EL1 */ TCR_EPD0_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1); /* Setup SCTLR_EL1 */ u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_el1 |= /*SCTLR_EL1_RES1 |*/ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */ SCTLR_UCI_BIT | /* RW regions at xlat regime EL1&0 are forced to be XN. */ SCTLR_WXN_BIT | /* Don't trap to EL1 execution of WFI or WFE at EL0. */ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT | /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */ SCTLR_UCT_BIT | /* Don't trap to EL1 execution of DZ ZVA at EL0. */ SCTLR_DZE_BIT | /* Enable SP Alignment check for EL0 */ SCTLR_SA0_BIT | /* Allow cacheable data and instr. accesses to normal memory. */ SCTLR_C_BIT | SCTLR_I_BIT | /* Alignment fault checking enabled when at EL1 and EL0. */ SCTLR_A_BIT | /* Enable MMU. */ SCTLR_M_BIT ; sctlr_el1 &= ~( /* Explicit data accesses at EL0 are little-endian. */ SCTLR_E0E_BIT | /* Accesses to DAIF from EL0 are trapped to EL1. */ SCTLR_UMA_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); /* Point TTBR0_EL1 at the tables of the context created for the SP. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1, (u_register_t)secure_partition_base_xlat_table); /* * Setup other system registers * ---------------------------- */ /* Shim Exception Vector Base Address */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1, SPM_SHIM_EXCEPTIONS_PTR); /* * FPEN: Forbid the Secure Partition to access FP/SIMD registers. * TTA: Enable access to trace registers. * ZEN (v8.2): Trap SVE instructions and access to SVE registers. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL)); /* * Prepare information in buffer shared between EL3 and S-EL0 * ---------------------------------------------------------- */ void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE; /* Copy the boot information into the shared buffer with the SP. */ assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t) <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE)); assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1)); const secure_partition_boot_info_t *sp_boot_info = plat_get_secure_partition_boot_info(NULL); assert(sp_boot_info != NULL); memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info, sizeof(secure_partition_boot_info_t)); /* Pointer to the MP information from the platform port. */ secure_partition_mp_info_t *sp_mp_info = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; assert(sp_mp_info != NULL); /* * Point the shared buffer MP information pointer to where the info will * be populated, just after the boot info. */ ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info = (secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)); /* * Update the shared buffer pointer to where the MP information for the * payload will be populated */ shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; /* * Copy the cpu information into the shared buffer area after the boot * information. */ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT); assert((uintptr_t)shared_buf_ptr <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE - (sp_boot_info->num_cpus * sizeof(*sp_mp_info)))); memcpy(shared_buf_ptr, (const void *) sp_mp_info, sp_boot_info->num_cpus * sizeof(*sp_mp_info)); /* * Calculate the linear indices of cores in boot information for the * secure partition and flag the primary CPU */ sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr; for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) { u_register_t mpidr = sp_mp_info[index].mpidr; sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr); if (plat_my_core_pos() == sp_mp_info[index].linear_id) sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU; } VERBOSE("S-EL1/S-EL0 context setup end.\n"); }