/******************************************************************************* * Perform the very early platform specific architectural setup here. At the * moment this is only intializes the mmu in a quick and dirty way. ******************************************************************************/ void bl31_plat_arch_setup() { #if RESET_TO_BL31 mt_cci_setup(); #endif /* Enable non-secure access to CCI-400 registers */ mmio_write_32(CCI400_BASE + CCI_SEC_ACCESS_OFFSET , 0x1); /* set secondary CPUs to AArch64 */ printf("###@@@ MP0_MISC_CONFIG3:0x%08x @@@###\n", mmio_read_32(MP0_MISC_CONFIG3)); mmio_write_32(MP0_MISC_CONFIG3, mmio_read_32(MP0_MISC_CONFIG3) | 0x0000E000); printf("###@@@ MP0_MISC_CONFIG3:0x%08x @@@###\n", mmio_read_32(MP0_MISC_CONFIG3)); { atf_arg_t_ptr teearg = (atf_arg_t_ptr)(uintptr_t)TEE_BOOT_INFO_ADDR; if(teearg->atf_log_buf_size !=0 ) { printf("mmap atf buffer : 0x%x, 0x%x\n\r", teearg->atf_log_buf_start, teearg->atf_log_buf_size); mmap_add_region((teearg->atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)), PAGE_SIZE_2MB, MT_DEVICE | MT_RW | MT_NS); printf("mmap atf buffer (force 2MB aligned): 0x%x, 0x%x\n\r", (teearg->atf_log_buf_start & ~(PAGE_SIZE_2MB_MASK)), PAGE_SIZE_2MB); } } // add TZRAM2_BASE to memory map mmap_add_region(TZRAM2_BASE, ((TZRAM2_SIZE & ~(PAGE_SIZE_MASK)) + PAGE_SIZE), MT_MEMORY | MT_RW | MT_SECURE); // add TZRAM_BASE to memory map // then set RO and COHERENT to different attribute mt_configure_mmu_el3(TZRAM_BASE, ((TZRAM_SIZE & ~(PAGE_SIZE_MASK)) + PAGE_SIZE), BL31_RO_BASE, BL31_RO_LIMIT, BL31_COHERENT_RAM_BASE, BL31_COHERENT_RAM_LIMIT); /* * Without this, access to CPUECTRL from NS EL1 * will cause trap into EL3 */ enable_ns_access_to_cpuectlr(); }
/******************************************************************************* * MTK_platform handler called when an affinity instance has just been powered on after * being turned off earlier. The level and mpidr determine the affinity * instance. The 'state' arg. allows the platform to decide whether the cluster * was turned off prior to wakeup and do what's necessary to setup it up * correctly. ******************************************************************************/ int mt_affinst_on_finish(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned long linear_id, cpu_setup; mailbox_t *mt_mboxes; unsigned int gicd_base, gicc_base, ectlr; switch (afflvl) { case MPIDR_AFFLVL2: if (state == PSCI_STATE_OFF) { // __asm__ __volatile__ ("1: b 1b \n\t"); } gicd_base = mt_get_cfgvar(CONFIG_GICD_ADDR); gic_pcpu_distif_setup(gicd_base); break; case MPIDR_AFFLVL1: /* Enable coherency if this cluster was off */ if (state == PSCI_STATE_OFF) { enable_scu(mpidr); mt_cci_setup(); trace_power_flow(mpidr, CLUSTER_UP); } break; case MPIDR_AFFLVL0: /* * Ignore the state passed for a cpu. It could only have * been off if we are here. */ workaround_836870(mpidr); /* * Turn on intra-cluster coherency if the MTK_platform flavour supports * it. */ cpu_setup = mt_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr |= CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* Zero the jump address in the mailbox for this cpu */ mt_mboxes = (mailbox_t *) (MBOX_OFF); linear_id = platform_get_core_pos(mpidr); mt_mboxes[linear_id].value = 0; flush_dcache_range((unsigned long) &mt_mboxes[linear_id], sizeof(unsigned long)); gicc_base = mt_get_cfgvar(CONFIG_GICC_ADDR); /* Enable the gic cpu interface */ gic_cpuif_setup(gicc_base); //gic_cpu_restore(); #if 0 //fixme, /* Allow access to the System counter timer module */ reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val); reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1)); mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val); #endif enable_ns_access_to_cpuectlr(); trace_power_flow(mpidr, CPU_UP); break; default: assert(0); } return rc; }
/******************************************************************************* * FVP handler called when an affinity instance has just been powered on after * being turned off earlier. The level and mpidr determine the affinity * instance. The 'state' arg. allows the platform to decide whether the cluster * was turned off prior to wakeup and do what's necessary to setup it up * correctly. ******************************************************************************/ int plat_affinst_on_finish(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned ectlr; /* Determine if any platform actions need to be executed. */ if (plat_do_plat_actions(afflvl, state) == -EAGAIN) return PSCI_E_SUCCESS; /* Perform the common cluster specific operations */ if (afflvl >= MPIDR_AFFLVL1) { /* * This CPU might have woken up whilst the cluster was * attempting to power down. In this case the FVP power * controller will have a pending cluster power off request * which needs to be cleared by writing to the PPONR register. * This prevents the power controller from interpreting a * subsequent entry of this cpu into a simple wfi as a power * down request. */ // plat_pwrc_write_pponr(mpidr); enable_scu(mpidr); /* Enable coherency if this cluster was off */ plat_cci_enable(); } /* * Ignore the state passed for a cpu. It could only have * been off if we are here. */ workaround_836870(mpidr); /* * Turn on intra-cluster coherency if the MTK_platform flavour supports * it. */ ectlr = read_cpuectlr(); ectlr |= CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); /* * Clear PWKUPR.WEN bit to ensure interrupts do not interfere * with a cpu power down unless the bit is set again */ // plat_pwrc_clr_wen(mpidr); /* Zero the jump address in the mailbox for this cpu */ plat_program_mailbox(read_mpidr_el1(), 0); /* Enable the gic cpu interface */ // arm_gic_cpuif_setup(); gic_cpuif_setup(get_plat_config()->gicc_base); gic_pcpu_distif_setup(get_plat_config()->gicd_base); /* TODO: This setup is needed only after a cold boot */ // arm_gic_pcpu_distif_setup(); enable_ns_access_to_cpuectlr(); return rc; }