示例#1
0
/*******************************************************************************
 * RockChip handler called when a power domain has just been powered on after
 * being turned off earlier. The target_state encodes the low power state that
 * each level has woken up from.
 ******************************************************************************/
void rockchip_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
	uint32_t lvl;
	plat_local_state_t lvl_state;
	int ret;

	assert(RK_CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE);

	for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
		lvl_state = target_state->pwr_domain_state[lvl];
		ret = rockchip_soc_hlvl_pwr_dm_on_finish(lvl, lvl_state);
		if (ret == PSCI_E_NOT_SUPPORTED)
			break;
	}

	rockchip_soc_cores_pwr_dm_on_finish();

	/* Perform the common cluster specific operations */
	if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
		/* Enable coherency if this cluster was off */
		plat_cci_enable();
	}

	/* Enable the gic cpu interface */
	plat_rockchip_gic_pcpu_init();

	/* Program the gic per-cpu distributor or re-distributor interface */
	plat_rockchip_gic_cpuif_enable();
}
/*******************************************************************************
 * Perform the very early platform specific architectural setup here. At the
 * moment this is only intializes the mmu in a quick and dirty way.
 * Init MTK propiartary log buffer control field.
 ******************************************************************************/
void bl31_plat_arch_setup(void)
{
	/* Enable non-secure access to CCI-400 registers */
	mmio_write_32(CCI400_BASE + CCI_SEC_ACCESS_OFFSET, 0x1);

	plat_cci_init();
	plat_cci_enable();

	if (gteearg.atf_log_buf_size != 0) {
		INFO("mmap atf buffer : 0x%x, 0x%x\n\r",
			gteearg.atf_log_buf_start,
			gteearg.atf_log_buf_size);

		mmap_add_region(
			gteearg.atf_log_buf_start &
			~(PAGE_SIZE_2MB_MASK),
			gteearg.atf_log_buf_start &
			~(PAGE_SIZE_2MB_MASK),
			PAGE_SIZE_2MB,
			MT_DEVICE | MT_RW | MT_NS);

		INFO("mmap atf buffer (force 2MB aligned):0x%x, 0x%x\n",
			(gteearg.atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)),
		PAGE_SIZE_2MB);
	}
	/*
	 * add TZRAM_BASE to memory map
	 * then set RO and COHERENT to different attribute
	 */
	plat_configure_mmu_el3(
		(TZRAM_BASE & ~(PAGE_SIZE_MASK)),
		(TZRAM_SIZE & ~(PAGE_SIZE_MASK)),
		(BL31_RO_BASE & ~(PAGE_SIZE_MASK)),
		BL31_RO_LIMIT,
		BL31_COHERENT_RAM_BASE,
		BL31_COHERENT_RAM_LIMIT);
	/* Initialize for ATF log buffer */
	if (gteearg.atf_log_buf_size != 0) {
		gteearg.atf_aee_debug_buf_size = ATF_AEE_BUFFER_SIZE;
		gteearg.atf_aee_debug_buf_start =
			gteearg.atf_log_buf_start +
			gteearg.atf_log_buf_size - ATF_AEE_BUFFER_SIZE;
		INFO("ATF log service is registered (0x%x, aee:0x%x)\n",
			gteearg.atf_log_buf_start,
			gteearg.atf_aee_debug_buf_start);
	} else{
		gteearg.atf_aee_debug_buf_size = 0;
		gteearg.atf_aee_debug_buf_start = 0;
	}

	/* Platform code before bl31_main */
	/* compatible to the earlier chipset */

	/* Show to ATF log buffer & UART */
	INFO("BL3-1: %s\n", version_string);
	INFO("BL3-1: %s\n", build_message);

}
/*******************************************************************************
 * Perform the very early platform specific architectural setup here. At the
 * moment this is only intializes the mmu in a quick and dirty way.
 ******************************************************************************/
void bl31_plat_arch_setup(void)
{
	plat_cci_init();
	plat_cci_enable();
	plat_configure_mmu_el3(BL31_RO_BASE,
			       (BL31_COHERENT_RAM_LIMIT - BL31_RO_BASE),
			       BL31_RO_BASE,
			       BL31_RO_LIMIT,
			       BL31_COHERENT_RAM_BASE,
			       BL31_COHERENT_RAM_LIMIT);
}
/*******************************************************************************
 * Perform the very early platform specific architectural setup here. At the
 * moment this is only intializes the mmu in a quick and dirty way.
 ******************************************************************************/
void bl31_plat_arch_setup(void)
{
	plat_cci_init();
	plat_cci_enable();

	plat_configure_mmu_el3(BL_CODE_BASE,
			       BL_COHERENT_RAM_END - BL_CODE_BASE,
			       BL_CODE_BASE,
			       BL_CODE_END,
			       BL_COHERENT_RAM_BASE,
			       BL_COHERENT_RAM_END);
}
示例#5
0
/*******************************************************************************
 * RockChip handler called when a power domain has just been powered on after
 * having been suspended earlier. The target_state encodes the low power state
 * that each level has woken up from.
 * TODO: At the moment we reuse the on finisher and reinitialize the secure
 * context. Need to implement a separate suspend finisher.
 ******************************************************************************/
void rockchip_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
    uint32_t lvl;
    plat_local_state_t lvl_state;

    /* Nothing to be done on waking up from retention from CPU level */
    if (RK_CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
        return;

    /* Perform system domain restore if woken up from system suspend */
    if (!rockchip_ops)
        goto comm_finish;

    if (RK_SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
        if (rockchip_ops->sys_pwr_dm_resume)
            rockchip_ops->sys_pwr_dm_resume();
        goto comm_finish;
    }

    if (rockchip_ops->hlvl_pwr_dm_resume) {
        for (lvl = MPIDR_AFFLVL1; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
            lvl_state = target_state->pwr_domain_state[lvl];
            rockchip_ops->hlvl_pwr_dm_resume(lvl, lvl_state);
        }
    }

    if (rockchip_ops->cores_pwr_dm_resume)
        rockchip_ops->cores_pwr_dm_resume();
    /*
     * Program the gic per-cpu distributor or re-distributor interface.
     * For sys power domain operation, resuming of the gic needs to operate
     * in rockchip_ops->sys_pwr_dm_resume, according to the sys power mode
     * implements.
     */
    plat_rockchip_gic_cpuif_enable();

comm_finish:
    /* Perform the common cluster specific operations */
    if (RK_CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
        /* Enable coherency if this cluster was off */
        plat_cci_enable();
    }
}
示例#6
0
/*******************************************************************************
 * FVP handler called when an affinity instance has just been powered on after
 * being turned off earlier. The level and mpidr determine the affinity
 * instance. The 'state' arg. allows the platform to decide whether the cluster
 * was turned off prior to wakeup and do what's necessary to setup it up
 * correctly.
 ******************************************************************************/
int plat_affinst_on_finish(unsigned long mpidr,
			  unsigned int afflvl,
			  unsigned int state)
{
	int rc = PSCI_E_SUCCESS;
	unsigned ectlr;

	/* Determine if any platform actions need to be executed. */
	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
		return PSCI_E_SUCCESS;

	/* Perform the common cluster specific operations */
	if (afflvl >= MPIDR_AFFLVL1) {
		/*
		 * This CPU might have woken up whilst the cluster was
		 * attempting to power down. In this case the FVP power
		 * controller will have a pending cluster power off request
		 * which needs to be cleared by writing to the PPONR register.
		 * This prevents the power controller from interpreting a
		 * subsequent entry of this cpu into a simple wfi as a power
		 * down request.
		 */
		// plat_pwrc_write_pponr(mpidr);

		enable_scu(mpidr);

		/* Enable coherency if this cluster was off */
		plat_cci_enable();
	}
	
	/*
	 * Ignore the state passed for a cpu. It could only have
	 * been off if we are here.
	 */
	workaround_836870(mpidr);

	/*
	 * Turn on intra-cluster coherency if the MTK_platform flavour supports
	 * it.
	 */
	ectlr = read_cpuectlr();
	ectlr |= CPUECTLR_SMP_BIT;
	write_cpuectlr(ectlr);

	/*
	 * Clear PWKUPR.WEN bit to ensure interrupts do not interfere
	 * with a cpu power down unless the bit is set again
	 */
	// plat_pwrc_clr_wen(mpidr);

	/* Zero the jump address in the mailbox for this cpu */
	plat_program_mailbox(read_mpidr_el1(), 0);

	/* Enable the gic cpu interface */
	// arm_gic_cpuif_setup();
	gic_cpuif_setup(get_plat_config()->gicc_base);
	
	gic_pcpu_distif_setup(get_plat_config()->gicd_base);

	/* TODO: This setup is needed only after a cold boot */
	// arm_gic_pcpu_distif_setup();

	enable_ns_access_to_cpuectlr();

	return rc;
}