/*******************************************************************************
 * This function invokes the PSCI library interface to initialize the
 * non secure cpu context and copies the relevant cpu context register values
 * to smc context. These registers will get programmed during `smc_exit`.
 ******************************************************************************/
static void sp_min_prepare_next_image_entry(void)
{
	entry_point_info_t *next_image_info;
	cpu_context_t *ctx = cm_get_context(NON_SECURE);
	u_register_t ns_sctlr;

	/* Program system registers to proceed to non-secure */
	next_image_info = sp_min_plat_get_bl33_ep_info();
	assert(next_image_info);
	assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));

	INFO("SP_MIN: Preparing exit to normal world\n");

	psci_prepare_next_non_secure_ctx(next_image_info);
	smc_set_next_ctx(NON_SECURE);

	/* Copy r0, lr and spsr from cpu context to SMC context */
	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
			smc_get_next_ctx());

	/* Temporarily set the NS bit to access NS SCTLR */
	write_scr(read_scr() | SCR_NS_BIT);
	isb();
	ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
	write_sctlr(ns_sctlr);
	isb();

	write_scr(read_scr() & ~SCR_NS_BIT);
	isb();
}
/******************************************************************************
 * This function is invoked during warm boot. Invoke the PSCI library
 * warm boot entry point which takes care of Architectural and platform setup/
 * restore. Copy the relevant cpu_context register values to smc context which
 * will get programmed during `smc_exit`.
 *****************************************************************************/
void sp_min_warm_boot(void)
{
	smc_ctx_t *next_smc_ctx;
	cpu_context_t *ctx = cm_get_context(NON_SECURE);
	u_register_t ns_sctlr;

	psci_warmboot_entrypoint();

	smc_set_next_ctx(NON_SECURE);

	next_smc_ctx = smc_get_next_ctx();
	zeromem(next_smc_ctx, sizeof(smc_ctx_t));

	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
			next_smc_ctx);

	/* Temporarily set the NS bit to access NS SCTLR */
	write_scr(read_scr() | SCR_NS_BIT);
	isb();
	ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR);
	write_sctlr(ns_sctlr);
	isb();

	write_scr(read_scr() & ~SCR_NS_BIT);
	isb();
}
/*******************************************************************************
 * The following function initializes the cpu_context 'ctx' for
 * first use, and sets the initial entrypoint state as specified by the
 * entry_point_info structure.
 *
 * The security state to initialize is determined by the SECURE attribute
 * of the entry_point_info. The function returns a pointer to the initialized
 * context and sets this as the next context to return to.
 *
 * The EE and ST attributes are used to configure the endianness and secure
 * timer availability for the new execution context.
 *
 * To prepare the register state for entry call cm_prepare_el3_exit() and
 * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
 * cm_e1_sysreg_context_restore().
 ******************************************************************************/
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
    unsigned int security_state;
    uint32_t scr, sctlr;
    regs_t *reg_ctx;

    assert(ctx);

    security_state = GET_SECURITY_STATE(ep->h.attr);

    /* Clear any residual register values from the context */
    memset(ctx, 0, sizeof(*ctx));

    reg_ctx = get_regs_ctx(ctx);

    /*
     * Base the context SCR on the current value, adjust for entry point
     * specific requirements
     */
    scr = read_scr();
    scr &= ~(SCR_NS_BIT | SCR_HCE_BIT);

    if (security_state != SECURE)
        scr |= SCR_NS_BIT;

    /*
     * Set up SCTLR for the Non Secure context.
     * EE bit is taken from the entrypoint attributes
     * M, C and I bits must be zero (as required by PSCI specification)
     *
     * The target exception level is based on the spsr mode requested.
     * If execution is requested to hyp mode, HVC is enabled
     * via SCR.HCE.
     *
     * Always compute the SCTLR_EL1 value and save in the cpu_context
     * - the HYP registers are set up by cm_preapre_ns_entry() as they
     * are not part of the stored cpu_context
     *
     * TODO: In debug builds the spsr should be validated and checked
     * against the CPU support, security state, endianness and pc
     */
    if (security_state != SECURE) {
        sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
        sctlr |= SCTLR_RES1;
        write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr);
    }

    if (GET_M32(ep->spsr) == MODE32_hyp)
        scr |= SCR_HCE_BIT;

    write_ctx_reg(reg_ctx, CTX_SCR, scr);
    write_ctx_reg(reg_ctx, CTX_LR, ep->pc);
    write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr);

    /*
     * Store the r0-r3 value from the entrypoint into the context
     * Use memcpy as we are in control of the layout of the structures
     */
    memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t));
}
/******************************************************************************
 * This function is invoked during warm boot. Invoke the PSCI library
 * warm boot entry point which takes care of Architectural and platform setup/
 * restore. Copy the relevant cpu_context register values to smc context which
 * will get programmed during `smc_exit`.
 *****************************************************************************/
void sp_min_warm_boot(void)
{
	smc_ctx_t *next_smc_ctx;

	psci_warmboot_entrypoint();

	smc_set_next_ctx(NON_SECURE);

	next_smc_ctx = smc_get_next_ctx();
	memset(next_smc_ctx, 0, sizeof(smc_ctx_t));

	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
			next_smc_ctx);
}
/*******************************************************************************
 * This function invokes the PSCI library interface to initialize the
 * non secure cpu context and copies the relevant cpu context register values
 * to smc context. These registers will get programmed during `smc_exit`.
 ******************************************************************************/
static void sp_min_prepare_next_image_entry(void)
{
	entry_point_info_t *next_image_info;

	/* Program system registers to proceed to non-secure */
	next_image_info = sp_min_plat_get_bl33_ep_info();
	assert(next_image_info);
	assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr));

	INFO("SP_MIN: Preparing exit to normal world\n");

	psci_prepare_next_non_secure_ctx(next_image_info);
	smc_set_next_ctx(NON_SECURE);

	/* Copy r0, lr and spsr from cpu context to SMC context */
	copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)),
			smc_get_next_ctx());
}
/*******************************************************************************
 * Prepare the CPU system registers for first entry into secure or normal world
 *
 * If execution is requested to hyp mode, HSCTLR is initialized
 * If execution is requested to non-secure PL1, and the CPU supports
 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
 * registers.
 ******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
	uint32_t sctlr, scr, hcptr;
	cpu_context_t *ctx = cm_get_context(security_state);

	assert(ctx);

	if (security_state == NON_SECURE) {
		scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
		if (scr & SCR_HCE_BIT) {
			/* Use SCTLR value to initialize HSCTLR */
			sctlr = read_ctx_reg(get_regs_ctx(ctx),
						 CTX_NS_SCTLR);
			sctlr |= HSCTLR_RES1;
			/* Temporarily set the NS bit to access HSCTLR */
			write_scr(read_scr() | SCR_NS_BIT);
			/*
			 * Make sure the write to SCR is complete so that
			 * we can access HSCTLR
			 */
			isb();
			write_hsctlr(sctlr);
			isb();

			write_scr(read_scr() & ~SCR_NS_BIT);
			isb();
		} else if (read_id_pfr1() &
			(ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
			/*
			 * Set the NS bit to access NS copies of certain banked
			 * registers
			 */
			write_scr(read_scr() | SCR_NS_BIT);
			isb();

			/* PL2 present but unused, need to disable safely */
			write_hcr(0);

			/* HSCTLR : can be ignored when bypassing */

			/* HCPTR : disable all traps TCPAC, TTA, TCP */
			hcptr = read_hcptr();
			hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
			write_hcptr(hcptr);

			/* Enable EL1 access to timer */
			write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);

			/* Reset CNTVOFF_EL2 */
			write64_cntvoff(0);

			/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
			write_vpidr(read_midr());
			write_vmpidr(read_mpidr());

			/*
			 * Reset VTTBR.
			 * Needed because cache maintenance operations depend on
			 * the VMID even when non-secure EL1&0 stage 2 address
			 * translation are disabled.
			 */
			write64_vttbr(0);

			/*
			 * Avoid unexpected debug traps in case where HDCR
			 * is not completely reset by the hardware - set
			 * HDCR.HPMN to PMCR.N and zero the remaining bits.
			 * The HDCR.HPMN and PMCR.N fields are the same size
			 * (5 bits) and HPMN is at offset zero within HDCR.
			 */
			write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT);

			/*
			 * Reset CNTHP_CTL to disable the EL2 physical timer and
			 * therefore prevent timer interrupts.
			 */
			write_cnthp_ctl(0);
			isb();

			write_scr(read_scr() & ~SCR_NS_BIT);
			isb();
		}
/*******************************************************************************
 * Prepare the CPU system registers for first entry into secure or normal world
 *
 * If execution is requested to hyp mode, HSCTLR is initialized
 * If execution is requested to non-secure PL1, and the CPU supports
 * HYP mode then HYP mode is disabled by configuring all necessary HYP mode
 * registers.
 ******************************************************************************/
void cm_prepare_el3_exit(uint32_t security_state)
{
    uint32_t sctlr, scr, hcptr;
    cpu_context_t *ctx = cm_get_context(security_state);

    assert(ctx);

    if (security_state == NON_SECURE) {
        scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR);
        if (scr & SCR_HCE_BIT) {
            /* Use SCTLR value to initialize HSCTLR */
            sctlr = read_ctx_reg(get_regs_ctx(ctx),
                                 CTX_NS_SCTLR);
            sctlr |= HSCTLR_RES1;
            /* Temporarily set the NS bit to access HSCTLR */
            write_scr(read_scr() | SCR_NS_BIT);
            /*
             * Make sure the write to SCR is complete so that
             * we can access HSCTLR
             */
            isb();
            write_hsctlr(sctlr);
            isb();

            write_scr(read_scr() & ~SCR_NS_BIT);
            isb();
        } else if (read_id_pfr1() &
                   (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) {
            /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */
            write_scr(read_scr() | SCR_NS_BIT);
            isb();

            /* PL2 present but unused, need to disable safely */
            write_hcr(0);

            /* HSCTLR : can be ignored when bypassing */

            /* HCPTR : disable all traps TCPAC, TTA, TCP */
            hcptr = read_hcptr();
            hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT);
            write_hcptr(hcptr);

            /* Enable EL1 access to timer */
            write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT);

            /* Reset CNTVOFF_EL2 */
            write64_cntvoff(0);

            /* Set VPIDR, VMPIDR to match MIDR, MPIDR */
            write_vpidr(read_midr());
            write_vmpidr(read_mpidr());

            /*
             * Reset VTTBR.
             * Needed because cache maintenance operations depend on
             * the VMID even when non-secure EL1&0 stage 2 address
             * translation are disabled.
             */
            write64_vttbr(0);
            isb();

            write_scr(read_scr() & ~SCR_NS_BIT);
            isb();
        }
    }
}