/* * smp_inner_dcache_flush_all: Flush (clean + invalidate) the entire L1 data cache. * * This can be used ONLY by the M4U driver!! * Other drivers should NOT use this function at all!! * Others should use DMA-mapping APIs!! * * This is the smp version of inner_dcache_flush_all(). * It will use IPI to do flush on all CPUs. * Must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */ void smp_inner_dcache_flush_all(void) { if (in_interrupt()) { printk(KERN_ERR "Cannot invoke smp_inner_dcache_flush_all() in interrupt/softirq context\n"); return ; } get_online_cpus(); on_each_cpu(inner_dcache_flush_L1, NULL, true); inner_dcache_flush_L2(); put_online_cpus(); }
int config_L2(int size) { int cur_size = get_l2c_size(); if (size != SZ_256K && size != SZ_512K) { printk("inlvalid input size %x\n", size); return -1; } if (in_interrupt()) { printk(KERN_ERR "Cannot use %s in interrupt/softirq context\n", __func__); return -1; } if (size == cur_size) { printk("Config L2 size %x is equal to current L2 size %x\n", size, cur_size); return 0; } atomic_set(&L1_flush_done, 0); get_online_cpus(); //printk("[Config L2] Config L2 start, on line cpu = %d\n",num_online_cpus()); /* disable cache and flush L1 */ on_each_cpu((smp_call_func_t)atomic_flush, NULL, true); //while(atomic_read(&L1_flush_done) != num_online_cpus()); //printk("[Config L2] L1 flush done\n"); /* flush L2 */ inner_dcache_flush_L2(); //printk("[Config L2] L2 flush done\n"); /* change L2 size */ config_L2_size(size); //printk("[Config L2] Change L2 flush size done(size = %d)\n",size); /* enable cache */ atomic_set(&L1_flush_done, 0); on_each_cpu((smp_call_func_t)__enable_cache, NULL, true); //update cr_alignment for other kernel function usage cr_alignment = cr_alignment | (0x4); //C1_CBIT put_online_cpus(); printk("Config L2 size %x done\n", size); return 0; }
/* * static function */ static inline void cpu_enter_lowpower(unsigned int cpu) { //HOTPLUG_INFO("cpu_enter_lowpower\n"); if (((cpu == 4) && (cpu_online(5) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) || ((cpu == 5) && (cpu_online(4) == 0) && (cpu_online(6) == 0) && (cpu_online(7) == 0)) || ((cpu == 6) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(7) == 0)) || ((cpu == 7) && (cpu_online(4) == 0) && (cpu_online(5) == 0) && (cpu_online(6) == 0))) { #if 0 /* Clear the SCTLR C bit to prevent further data cache allocation */ __disable_dcache(); /* Clean and invalidate all data from the L1/L2 data cache */ inner_dcache_flush_L1(); //flush_cache_all(); /* Execute a CLREX instruction */ __asm__ __volatile__("clrex"); /* Clean all data from the L2 data cache */ inner_dcache_flush_L2(); #else __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2(); #endif /* Switch the processor from SMP mode to AMP mode by clearing the ACTLR SMP bit */ __switch_to_amp(); /* Execute an ISB instruction to ensure that all of the CP15 register changes from the previous steps have been committed */ isb(); /* Execute a DSB instruction to ensure that all cache, TLB and branch predictor maintenance operations issued by any processor in the multiprocessor device before the SMP bit was cleared have completed */ dsb(); /* Disable snoop requests and DVM message requests */ REG_WRITE(CCI400_SI3_SNOOP_CONTROL, REG_READ(CCI400_SI3_SNOOP_CONTROL) & ~(SNOOP_REQ | DVM_MSG_REQ)); while (REG_READ(CCI400_STATUS) & CHANGE_PENDING); /* Disable CA15L snoop function */ #if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) mcusys_smc_write_phy(virt_to_phys(MP1_AXI_CONFIG), REG_READ(MP1_AXI_CONFIG) | ACINACTM); #else //#if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) mcusys_smc_write(MP1_AXI_CONFIG, REG_READ(MP1_AXI_CONFIG) | ACINACTM); #endif //#if defined(CONFIG_ARM_PSCI) || defined(CONFIG_MTK_PSCI) } else { #if 0 /* Clear the SCTLR C bit to prevent further data cache allocation */ __disable_dcache(); /* Clean and invalidate all data from the L1 data cache */ inner_dcache_flush_L1(); //Just flush the cache. //flush_cache_all(); /* Clean all data from the L2 data cache */ //__inner_clean_dcache_L2(); #else //FIXME: why __disable_dcache__inner_flush_dcache_L1 fail but 2 steps ok? //__disable_dcache__inner_flush_dcache_L1(); __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2(); #endif /* Execute a CLREX instruction */ __asm__ __volatile__("clrex"); /* Switch the processor from SMP mode to AMP mode by clearing the ACTLR SMP bit */ __switch_to_amp(); } }