static void __write_cpuxgpt(unsigned int reg_index, unsigned int value) { #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) /* DRV_WriteReg32(INDEX_BASE,reg_index); */ /* DRV_WriteReg32(CTL_BASE,value); */ mcusys_smc_write_phy(INDEX_BASE_PHY, reg_index); mcusys_smc_write_phy(CTL_BASE_PHY, value); #else mcusys_smc_write(INDEX_BASE, reg_index); mcusys_smc_write(CTL_BASE, value); #endif }
static inline void cpu_leave_lowpower(unsigned int cpu) { //HOTPLUG_INFO("cpu_leave_lowpower\n"); if (((cpu == 4) && (cpu_online(5) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) || ((cpu == 5) && (cpu_online(4) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) || ((cpu == 6) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(7) == 0)) || ((cpu == 7) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(6) == 0))) { /* Enable CA15L snoop function */ #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) mcusys_smc_write_phy(virt_to_phys(MP1_AXI_CONFIG), REG_READ(MP1_AXI_CONFIG) & ~ACINACTM); #else //#if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) mcusys_smc_write(MP1_AXI_CONFIG, REG_READ(MP1_AXI_CONFIG) & ~ACINACTM); #endif //#if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) /* Enable snoop requests and DVM message requests */ REG_WRITE(CCI400_SI3_SNOOP_CONTROL, REG_READ(CCI400_SI3_SNOOP_CONTROL) | (SNOOP_REQ | DVM_MSG_REQ)); while (REG_READ(CCI400_STATUS) & CHANGE_PENDING); } /* Set the ACTLR.SMP bit to 1 for SMP mode */ __switch_to_smp(); /* Enable dcache */ __enable_dcache(); }
static unsigned int __read_cpuxgpt(unsigned int reg_index) { unsigned int value = 0; #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) /* DRV_WriteReg32(INDEX_BASE,reg_index); */ mcusys_smc_write_phy(INDEX_BASE_PHY, reg_index); #else mcusys_smc_write(INDEX_BASE, reg_index); #endif value = __raw_readl(CTL_BASE); return value; }
void __init mt_smp_prepare_cpus(unsigned int max_cpus) { #if !defined (CONFIG_ARM_PSCI) /* * 20140512 marc.huang * 1. only need to get core count if !defined(CONFIG_OF) * 2. only set possible cpumask in mt_smp_init_cpus() if !defined(CONFIG_OF) * 3. only set present cpumask in mt_smp_prepare_cpus() if !defined(CONFIG_OF) */ #if !defined(CONFIG_OF) int i; for (i = 0; i < max_cpus; i++) set_cpu_present(i, true); #endif //#if !defined(CONFIG_OF) #ifdef CONFIG_MTK_FPGA /* write the address of slave startup into the system-wide flags register */ mt_reg_sync_writel(virt_to_phys(mt_secondary_startup), SLAVE_JUMP_REG); #endif /* Set all cpus into AArch32 */ mcusys_smc_write(MP0_MISC_CONFIG3, REG_READ(MP0_MISC_CONFIG3) & 0xFFFF0FFF); // mcusys_smc_write(MP1_MISC_CONFIG3, REG_READ(MP1_MISC_CONFIG3) & 0xFFFF0FFF); //#ifndef CONFIG_MTK_FPGA /* enable bootrom power down mode */ REG_WRITE(BOOTROM_SEC_CTRL, REG_READ(BOOTROM_SEC_CTRL) | SW_ROM_PD); //#endif /* write the address of slave startup into boot address register for bootrom power down mode */ #if defined (MT_SMP_VIRTUAL_BOOT_ADDR) mt_reg_sync_writel(virt_to_phys(mt_smp_boot), BOOTROM_BOOT_ADDR); #else mt_reg_sync_writel(virt_to_phys(mt_secondary_startup), BOOTROM_BOOT_ADDR); #endif #endif //#if !defined (CONFIG_ARM_PSCI) /* initial spm_mtcmos memory map */ spm_mtcmos_cpu_init(); }
void __init mt_smp_init_cpus(void) { #if 0 /* Enable CA7 snoop function */ mcusys_smc_write(MP0_AXI_CONFIG, REG_READ(MP0_AXI_CONFIG) & ~ACINACTM); /* Enable snoop requests and DVM message requests*/ REG_WRITE(CCI400_SI4_SNOOP_CONTROL, REG_READ(CCI400_SI4_SNOOP_CONTROL) | (SNOOP_REQ | DVM_MSG_REQ)); while (REG_READ(CCI400_STATUS) & CHANGE_PENDING); #endif pr_emerg("@@@###[AT] num_possible_cpus(): %u ###@@@\n", num_possible_cpus()); pr_emerg("@@@###[AT] num_present_cpus(): %u ###@@@\n", num_present_cpus()); #if !defined(CONFIG_OF) { unsigned int i, ncores; ncores = _mt_smp_get_core_count(); if (ncores > NR_CPUS) { printk(KERN_WARNING "L2CTLR core count (%d) > NR_CPUS (%d)\n", ncores, NR_CPUS); printk(KERN_WARNING "set nr_cores to NR_CPUS (%d)\n", NR_CPUS); ncores = NR_CPUS; } for (i = 0; i < ncores; i++) set_cpu_possible(i, true); } #endif //#if !defined(CONFIG_OF) irq_total_secondary_cpus = num_possible_cpus() - 1; //fix build error //set_smp_cross_call(irq_raise_softirq); //XXX: asssume only boot cpu power on and all non-boot cpus power off after preloader stage //if (ncores > 4) // spm_mtcmos_ctrl_cpusys1_init_1st_bring_up(STA_POWER_ON); //else // spm_mtcmos_ctrl_cpusys1_init_1st_bring_up(STA_POWER_DOWN); }
static void mt_set_pol_via_smc(void __iomem *addr, u32 value) { mcusys_smc_write(addr, value); }
/* * static function */ static inline void cpu_enter_lowpower(unsigned int cpu) { //HOTPLUG_INFO("cpu_enter_lowpower\n"); if (((cpu == 4) && (cpu_online(5) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) || ((cpu == 5) && (cpu_online(4) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) || ((cpu == 6) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(7) == 0)) || ((cpu == 7) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(6) == 0))) { #if 0 /* Clear the SCTLR C bit to prevent further data cache allocation */ __disable_dcache(); /* Clean and invalidate all data from the L1/L2 data cache */ inner_dcache_flush_L1(); //flush_cache_all(); /* Execute a CLREX instruction */ __asm__ __volatile__("clrex"); /* Clean all data from the L2 data cache */ inner_dcache_flush_L2(); #else __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2(); #endif /* Switch the processor from SMP mode to AMP mode by clearing the ACTLR SMP bit */ __switch_to_amp(); /* Execute an ISB instruction to ensure that all of the CP15 register changes from the previous steps have been committed */ isb(); /* Execute a DSB instruction to ensure that all cache, TLB and branch predictor maintenance operations issued by any processor in the multiprocessor device before the SMP bit was cleared have completed */ dsb(); /* Disable snoop requests and DVM message requests */ REG_WRITE(CCI400_SI3_SNOOP_CONTROL, REG_READ(CCI400_SI3_SNOOP_CONTROL) & ~(SNOOP_REQ | DVM_MSG_REQ)); while (REG_READ(CCI400_STATUS) & CHANGE_PENDING); /* Disable CA15L snoop function */ #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) mcusys_smc_write_phy(virt_to_phys(MP1_AXI_CONFIG), REG_READ(MP1_AXI_CONFIG) | ACINACTM); #else //#if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) mcusys_smc_write(MP1_AXI_CONFIG, REG_READ(MP1_AXI_CONFIG) | ACINACTM); #endif //#if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) } else { #if 0 /* Clear the SCTLR C bit to prevent further data cache allocation */ __disable_dcache(); /* Clean and invalidate all data from the L1 data cache */ inner_dcache_flush_L1(); //Just flush the cache. //flush_cache_all(); /* Clean all data from the L2 data cache */ //__inner_clean_dcache_L2(); #else //FIXME: why __disable_dcache__inner_flush_dcache_L1 fail but 2 steps ok? //__disable_dcache__inner_flush_dcache_L1(); __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2(); #endif /* Execute a CLREX instruction */ __asm__ __volatile__("clrex"); /* Switch the processor from SMP mode to AMP mode by clearing the ACTLR SMP bit */ __switch_to_amp(); } }