/* get SPSR for BL33 entry */
static uint32_t get_spsr_for_bl33_entry(void)
{
	unsigned long el_status;
	unsigned long mode;
	uint32_t spsr;

	/* figure out what mode we enter the non-secure world */
	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
	el_status &= ID_AA64PFR0_ELX_MASK;

	mode = (el_status) ? MODE_EL2 : MODE_EL1;

	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
	return spsr;
}
/*******************************************************************************
 * This function prepare boot argument for 64 bit kernel entry
 ******************************************************************************/
static entry_point_info_t *bl31_plat_get_next_kernel64_ep_info(void)
{
	entry_point_info_t *next_image_info;
	unsigned long el_status;
	unsigned int mode;

	el_status = 0;
	mode = 0;

	/* Kernel image is always non-secured */
	next_image_info = &bl33_image_ep_info;

	/* Figure out what mode we enter the non-secure world in */
	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
	el_status &= ID_AA64PFR0_ELX_MASK;

	if (el_status) {
		INFO("Kernel_EL2\n");
		mode = MODE_EL2;
	} else{
		INFO("Kernel_EL1\n");
		mode = MODE_EL1;
	}

	INFO("Kernel is 64Bit\n");
	next_image_info->spsr =
		SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
	next_image_info->pc = get_kernel_info_pc();
	next_image_info->args.arg0 = get_kernel_info_r0();
	next_image_info->args.arg1 = get_kernel_info_r1();

	INFO("pc=0x%lx, r0=0x%lx, r1=0x%lx\n",
				 next_image_info->pc,
				 next_image_info->args.arg0,
				 next_image_info->args.arg1);


	SET_SECURITY_STATE(next_image_info->h.attr, NON_SECURE);

	/* None of the images on this platform can have 0x0 as the entrypoint */
	if (next_image_info->pc)
		return next_image_info;
	else
		return NULL;
}
Beispiel #3
0
/*******************************************************************************
 * Gets SPSR for BL33 entry
 ******************************************************************************/
uint32_t arm_get_spsr_for_bl33_entry(void)
{
	unsigned long el_status;
	unsigned int mode;
	uint32_t spsr;

	/* Figure out what mode we enter the non-secure world in */
	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
	el_status &= ID_AA64PFR0_ELX_MASK;

	mode = (el_status) ? MODE_EL2 : MODE_EL1;

	/*
	 * TODO: Consider the possibility of specifying the SPSR in
	 * the FIP ToC and allowing the platform to have a say as
	 * well.
	 */
	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
	return spsr;
}
void bl2_plat_set_bl33_ep_info(image_info_t *image,
			       entry_point_info_t *bl33_ep_info)
{
	unsigned long el_status;
	unsigned int mode;

	/* Figure out what mode we enter the non-secure world in */
	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
	el_status &= ID_AA64PFR0_ELX_MASK;

	if (el_status)
		mode = MODE_EL2;
	else
		mode = MODE_EL1;

	/*
	 * TODO: Consider the possibility of specifying the SPSR in
	 * the FIP ToC and allowing the platform to have a say as
	 * well.
	 */
	bl33_ep_info->spsr = SPSR_64(mode, MODE_SP_ELX,
				       DISABLE_ALL_EXCEPTIONS);
	SET_SECURITY_STATE(bl33_ep_info->h.attr, NON_SECURE);
}
/*******************************************************************************
 * The only thing to do in BL2 is to load further images and pass control to
 * BL31. The memory occupied by BL2 will be reclaimed by BL3_x stages. BL2 runs
 * entirely in S-EL1. Since arm standard c libraries are not PIC, printf et al
 * are not available. We rely on assertions to signal error conditions
 ******************************************************************************/
void bl2_main(void)
{
	meminfo *bl2_tzram_layout;
	bl31_args *bl2_to_bl31_args;
	unsigned long bl31_base, bl32_base = 0, bl33_base, el_status;
	unsigned int bl2_load, bl31_load, mode;

	/* Perform remaining generic architectural setup in S-El1 */
	bl2_arch_setup();

	/* Perform platform setup in BL1 */
	bl2_platform_setup();

	printf("BL2 %s\n\r", build_message);

	/* Find out how much free trusted ram remains after BL2 load */
	bl2_tzram_layout = bl2_plat_sec_mem_layout();

	/*
	 * Load BL31. BL1 tells BL2 whether it has been TOP or BOTTOM loaded.
	 * To avoid fragmentation of trusted SRAM memory, BL31 is always
	 * loaded opposite to BL2. This allows BL31 to reclaim BL2 memory
	 * while maintaining its free space in one contiguous chunk.
	 */
	bl2_load = bl2_tzram_layout->attr & LOAD_MASK;
	assert((bl2_load == TOP_LOAD) || (bl2_load == BOT_LOAD));
	bl31_load = (bl2_load == TOP_LOAD) ? BOT_LOAD : TOP_LOAD;
	bl31_base = load_image(bl2_tzram_layout, BL31_IMAGE_NAME,
	                       bl31_load, BL31_BASE);

	/* Assert if it has not been possible to load BL31 */
	if (bl31_base == 0) {
		ERROR("Failed to load BL3-1.\n");
		panic();
	}

	/*
	 * Get a pointer to the memory the platform has set aside to pass
	 * information to BL31.
	 */
	bl2_to_bl31_args = bl2_get_bl31_args_ptr();

	/*
	 * Load the BL32 image if there's one. It is upto to platform
	 * to specify where BL32 should be loaded if it exists. It
	 * could create space in the secure sram or point to a
	 * completely different memory. A zero size indicates that the
	 * platform does not want to load a BL32 image.
	 */
	if (bl2_to_bl31_args->bl32_meminfo.total_size)
		bl32_base = load_image(&bl2_to_bl31_args->bl32_meminfo,
				       BL32_IMAGE_NAME,
				       bl2_to_bl31_args->bl32_meminfo.attr &
				       LOAD_MASK,
				       BL32_BASE);

	/*
	 * Create a new layout of memory for BL31 as seen by BL2. This
	 * will gobble up all the BL2 memory.
	 */
	init_bl31_mem_layout(bl2_tzram_layout,
			     &bl2_to_bl31_args->bl31_meminfo,
			     bl31_load);

	/* Load the BL33 image in non-secure memory provided by the platform */
	bl33_base = load_image(&bl2_to_bl31_args->bl33_meminfo,
			       BL33_IMAGE_NAME,
			       BOT_LOAD,
			       plat_get_ns_image_entrypoint());
	/* Halt if failed to load normal world firmware. */
	if (bl33_base == 0) {
		ERROR("Failed to load BL3-3.\n");
		panic();
	}

	/*
	 * BL2 also needs to tell BL31 where the non-trusted software image
	 * is located.
	 */
	bl2_to_bl31_args->bl33_image_info.entrypoint = bl33_base;

	/* Figure out what mode we enter the non-secure world in */
	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
	el_status &= ID_AA64PFR0_ELX_MASK;

	if (el_status)
		mode = MODE_EL2;
	else
		mode = MODE_EL1;

	/*
	 * TODO: Consider the possibility of specifying the SPSR in
	 * the FIP ToC and allowing the platform to have a say as
	 * well.
	 */
	bl2_to_bl31_args->bl33_image_info.spsr =
		make_spsr(mode, MODE_SP_ELX, MODE_RW_64);
	bl2_to_bl31_args->bl33_image_info.security_state = NON_SECURE;

	if (bl32_base) {
		/* Fill BL32 image info */
		bl2_to_bl31_args->bl32_image_info.entrypoint = bl32_base;
		bl2_to_bl31_args->bl32_image_info.security_state = SECURE;

		/*
		 * The Secure Payload Dispatcher service is responsible for
		 * setting the SPSR prior to entry into the BL32 image.
		 */
		bl2_to_bl31_args->bl32_image_info.spsr = 0;
	}

	/* Flush the entire BL31 args buffer */
	flush_dcache_range((unsigned long) bl2_to_bl31_args,
			   sizeof(*bl2_to_bl31_args));

	/*
	 * Run BL31 via an SMC to BL1. Information on how to pass control to
	 * the BL32 (if present) and BL33 software images will be passed to
	 * BL31 as an argument.
	 */
	run_image(bl31_base,
		  make_spsr(MODE_EL3, MODE_SP_ELX, MODE_RW_64),
		  SECURE,
		  (void *) bl2_to_bl31_args,
		  NULL);
}
Beispiel #6
0
entry_point_info_t *bl31_plat_get_next_kernel_ep_info(uint32_t type)
{
	entry_point_info_t *next_image_info;
    unsigned long el_status;
    unsigned int mode;

#if RESET_TO_BL31
    next_image_info = (type == NON_SECURE) ?
		&bl33_entrypoint_info :
		&bl32_entrypoint_info;

    mt_get_entry_point_info(type, next_image_info);

#else
	next_image_info = (type == NON_SECURE) ?
		bl2_to_bl31_params->bl33_ep_info :
		bl2_to_bl31_params->bl32_ep_info;
#endif


    /* Figure out what mode we enter the non-secure world in */
	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
	el_status &= ID_AA64PFR0_ELX_MASK;

	if (el_status)
		mode = MODE_EL2;
	else
		mode = MODE_EL1;

#if 0
    if (0 == rw) {
	    printf("LK is AArch32\n");
	    printf("LK start_addr=x0x%x\n", bl33_ep_info->pc);
    	mode = MODE32_svc;
		ee = 0;
		/*
		 * TODO: Choose async. exception bits if HYP mode is not
		 * implemented according to the values of SCR.{AW, FW} bits
		 */
		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;

		bl33_ep_info->spsr = SPSR_MODE32(mode, 0, ee, daif);

		/*
		 * Pass boot argument to LK
		 * ldr     w4, =pl_boot_argument
		 * ldr     w5, =BOOT_ARGUMENT_SIZE
		 */
		bl33_ep_info->args.arg4=(unsigned long)(uintptr_t)&pl_boot_argument;
		bl33_ep_info->args.arg5=(unsigned long)(uintptr_t)BOOT_ARGUMENT_SIZE;
	} else
#endif
    {
        printf("Kernel is 64Bit\n");
		next_image_info->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
        next_image_info->pc = get_kernel_info_pc();
        next_image_info->args.arg0=get_kernel_info_r0();
        next_image_info->args.arg1=get_kernel_info_r1();

        printf("pc=0x%llx, r0=0x%llx, r1=0x%llx\n",
               next_image_info->pc,
               next_image_info->args.arg0,
               next_image_info->args.arg1);

    }
    SET_SECURITY_STATE(next_image_info->h.attr, NON_SECURE);


	/* None of the images on this platform can have 0x0 as the entrypoint */
	if (next_image_info->pc)
		return next_image_info;
	else
		return NULL;
}
/* Setup context of the Secure Partition */
void secure_partition_setup(void)
{
	VERBOSE("S-EL1/S-EL0 context setup start...\n");

	cpu_context_t *ctx = cm_get_context(SECURE);

	/* Make sure that we got a Secure context. */
	assert(ctx != NULL);

	/* Assert we are in Secure state. */
	assert((read_scr_el3() & SCR_NS_BIT) == 0);

	/* Disable MMU at EL1. */
	disable_mmu_icache_el1();

	/* Invalidate TLBs at EL1. */
	tlbivmalle1();

	/*
	 * General-Purpose registers
	 * -------------------------
	 */

	/*
	 * X0: Virtual address of a buffer shared between EL3 and Secure EL0.
	 *     The buffer will be mapped in the Secure EL1 translation regime
	 *     with Normal IS WBWA attributes and RO data and Execute Never
	 *     instruction access permissions.
	 *
	 * X1: Size of the buffer in bytes
	 *
	 * X2: cookie value (Implementation Defined)
	 *
	 * X3: cookie value (Implementation Defined)
	 *
	 * X4 to X30 = 0 (already done by cm_init_my_context())
	 */
	write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, PLAT_SPM_BUF_BASE);
	write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, PLAT_SPM_BUF_SIZE);
	write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, PLAT_SPM_COOKIE_0);
	write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, PLAT_SPM_COOKIE_1);

	/*
	 * SP_EL0: A non-zero value will indicate to the SP that the SPM has
	 * initialized the stack pointer for the current CPU through
	 * implementation defined means. The value will be 0 otherwise.
	 */
	write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0,
			PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE);

	/*
	 * Setup translation tables
	 * ------------------------
	 */

#if ENABLE_ASSERTIONS

	/* Get max granularity supported by the platform. */

	u_register_t id_aa64prf0_el1 = read_id_aa64pfr0_el1();

	int tgran64_supported =
		((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) &
		 ID_AA64MMFR0_EL1_TGRAN64_MASK) ==
		 ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED;

	int tgran16_supported =
		((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) &
		 ID_AA64MMFR0_EL1_TGRAN16_MASK) ==
		 ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED;

	int tgran4_supported =
		((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) &
		 ID_AA64MMFR0_EL1_TGRAN4_MASK) ==
		 ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED;

	uintptr_t max_granule_size;

	if (tgran64_supported) {
		max_granule_size = 64 * 1024;
	} else if (tgran16_supported) {
		max_granule_size = 16 * 1024;
	} else {
		assert(tgran4_supported);
		max_granule_size = 4 * 1024;
	}

	VERBOSE("Max translation granule supported: %lu KiB\n",
		max_granule_size);

	uintptr_t max_granule_size_mask = max_granule_size - 1;

	/* Base must be aligned to the max granularity */
	assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_size_mask) == 0);

	/* Size must be a multiple of the max granularity */
	assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_size_mask) == 0);

#endif /* ENABLE_ASSERTIONS */

	/* This region contains the exception vectors used at S-EL1. */
	const mmap_region_t sel1_exception_vectors =
		MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START,
				SPM_SHIM_EXCEPTIONS_SIZE,
				MT_CODE | MT_SECURE | MT_PRIVILEGED);
	mmap_add_region_ctx(&secure_partition_xlat_ctx,
			    &sel1_exception_vectors);

	mmap_add_ctx(&secure_partition_xlat_ctx,
		     plat_get_secure_partition_mmap(NULL));

	init_xlat_tables_ctx(&secure_partition_xlat_ctx);

	/*
	 * MMU-related registers
	 * ---------------------
	 */

	/* Set attributes in the right indices of the MAIR */
	u_register_t mair_el1 =
		MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) |
		MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) |
		MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);

	write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1);

	/* Setup TCR_EL1. */
	u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE);

	u_register_t tcr_el1 =
		/* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */
		(64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE))		|
		/* Inner and outer WBWA, shareable. */
		TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA	|
		/* Set the granularity to 4KB. */
		TCR_TG0_4K							|
		/* Limit Intermediate Physical Address Size. */
		tcr_ps_bits << TCR_EL1_IPS_SHIFT				|
		/* Disable translations using TBBR1_EL1. */
		TCR_EPD1_BIT
		/* The remaining fields related to TBBR1_EL1 are left as zero. */
	;

	tcr_el1 &= ~(
		/* Enable translations using TBBR0_EL1 */
		TCR_EPD0_BIT
	);

	write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1);

	/* Setup SCTLR_EL1 */
	u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1);

	sctlr_el1 |=
		/*SCTLR_EL1_RES1 |*/
		/* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */
		SCTLR_UCI_BIT							|
		/* RW regions at xlat regime EL1&0 are forced to be XN. */
		SCTLR_WXN_BIT							|
		/* Don't trap to EL1 execution of WFI or WFE at EL0. */
		SCTLR_NTWI_BIT | SCTLR_NTWE_BIT					|
		/* Don't trap to EL1 accesses to CTR_EL0 from EL0. */
		SCTLR_UCT_BIT							|
		/* Don't trap to EL1 execution of DZ ZVA at EL0. */
		SCTLR_DZE_BIT							|
		/* Enable SP Alignment check for EL0 */
		SCTLR_SA0_BIT							|
		/* Allow cacheable data and instr. accesses to normal memory. */
		SCTLR_C_BIT | SCTLR_I_BIT					|
		/* Alignment fault checking enabled when at EL1 and EL0. */
		SCTLR_A_BIT							|
		/* Enable MMU. */
		SCTLR_M_BIT
	;

	sctlr_el1 &= ~(
		/* Explicit data accesses at EL0 are little-endian. */
		SCTLR_E0E_BIT							|
		/* Accesses to DAIF from EL0 are trapped to EL1. */
		SCTLR_UMA_BIT
	);

	write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1);

	/* Point TTBR0_EL1 at the tables of the context created for the SP. */
	write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1,
			(u_register_t)secure_partition_base_xlat_table);

	/*
	 * Setup other system registers
	 * ----------------------------
	 */

	/* Shim Exception Vector Base Address */
	write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1,
			SPM_SHIM_EXCEPTIONS_PTR);

	/*
	 * FPEN: Forbid the Secure Partition to access FP/SIMD registers.
	 * TTA: Enable access to trace registers.
	 * ZEN (v8.2): Trap SVE instructions and access to SVE registers.
	 */
	write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1,
			CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL));

	/*
	 * Prepare information in buffer shared between EL3 and S-EL0
	 * ----------------------------------------------------------
	 */

	void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE;

	/* Copy the boot information into the shared buffer with the SP. */
	assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)
	       <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE));

	assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1));

	const secure_partition_boot_info_t *sp_boot_info =
			plat_get_secure_partition_boot_info(NULL);

	assert(sp_boot_info != NULL);

	memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info,
	       sizeof(secure_partition_boot_info_t));

	/* Pointer to the MP information from the platform port. */
	secure_partition_mp_info_t *sp_mp_info =
		((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info;

	assert(sp_mp_info != NULL);

	/*
	 * Point the shared buffer MP information pointer to where the info will
	 * be populated, just after the boot info.
	 */
	((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info =
		(secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr
				+ sizeof(secure_partition_boot_info_t));

	/*
	 * Update the shared buffer pointer to where the MP information for the
	 * payload will be populated
	 */
	shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info;

	/*
	 * Copy the cpu information into the shared buffer area after the boot
	 * information.
	 */
	assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT);

	assert((uintptr_t)shared_buf_ptr
	       <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE -
		       (sp_boot_info->num_cpus * sizeof(*sp_mp_info))));

	memcpy(shared_buf_ptr, (const void *) sp_mp_info,
		sp_boot_info->num_cpus * sizeof(*sp_mp_info));

	/*
	 * Calculate the linear indices of cores in boot information for the
	 * secure partition and flag the primary CPU
	 */
	sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr;

	for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) {
		u_register_t mpidr = sp_mp_info[index].mpidr;

		sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr);
		if (plat_my_core_pos() == sp_mp_info[index].linear_id)
			sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU;
	}

	VERBOSE("S-EL1/S-EL0 context setup end.\n");
}