/** * This function is called at the end of runtime initialization. * * It is called using APPF's translation tables and stack, by the same CPU that * did the early initialization. */ int appf_platform_late_init(struct appf_cluster *cluster) { /* * Clean the APPF code and translation tables from L2 cache, if it's enabled * - this matters as we will disable the L2 during power down. */ if (cluster->l2_address && is_enabled_pl310(cluster->l2_address)) { clean_pl310(cluster->l2_address); } return APPF_OK; }
/** * This function saves all the context that will be lost * when a CPU and cluster enter a low power state. */ static void platform_save_context(struct cpu_cluster *cluster, struct cpu *pcpu) { struct cpu_context *context = &(pcpu->context); struct cpu_cluster_context *cluster_context = &(cluster->context); #if MAX_CPUS != 1 int cluster_down = 0; #endif /* Save perf. monitors first, so we don't interfere too much with counts */ save_performance_monitors(context->pmu_data); save_a9_timers(context->timer_data, cluster->scu_address); save_vfp(context->vfp_data); save_gic_interface(context->gic_interface_data, pcpu->ic_address); save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address); save_cp15(context->cp15_data); save_a9_other(context->other_data); #if 0 //wschen 2011-07-28 save_a9_debug(context->debug_data); #endif #if MAX_CPUS != 1 if (cluster->power_state >= STATUS_DORMANT) { cluster_down = 1; } #endif #if MAX_CPUS != 1 if (cluster_down) { #endif save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address); save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address); #if MAX_CPUS != 1 } #endif save_control_registers(context); save_mmu(context->mmu_data); #if MAX_CPUS != 1 if (cluster_down) { #endif save_a9_scu(cluster_context->scu_data, cluster->scu_address); save_pl310(cluster_context->l2_data, cluster->l2_address); #if MAX_CPUS != 1 } #endif dormant_ret_flag = 0; save_banked_registers(context->banked_registers); if (dormant_ret_flag == 0) { clean_dcache_v7_l1(); clean_pl310(cpu_cluster.l2_address); if (e1_chip) { disable_pl310(cpu_cluster.l2_address); disable_cache_v7_l1(); if (cpu_cluster.power_state == STATUS_DORMANT) { reg_write(SC_PWR_CON0, 0xE105); /* SRAM sleep */ } else { reg_write(SC_PWR_CON0, 0xC12D); /* SRAM power down */ } dsb(); if (cpu_cluster.power_state == STATUS_DORMANT) { reg_write(SC_PWR_CON2, 0xE505); /* SRAM sleep */ } else { reg_write(SC_PWR_CON2, 0xC52D); /* SRAM power down */ } dsb(); } return; } else { return; } }
/** * This function saves all the context that will be lost * when a CPU and cluster enter a low power state. * * This function is called with cluster->context->lock held */ int appf_platform_save_context(struct appf_cluster *cluster, struct appf_cpu *cpu, unsigned flags) { appf_u32 saved_items = 0; appf_u32 cluster_saved_items = 0; struct appf_cpu_context *context = cpu->context; struct appf_cluster_context *cluster_context = cluster->context; int cluster_down; /* Save perf. monitors first, so we don't interfere too much with counts */ if (flags & APPF_SAVE_PMU) { save_performance_monitors(context->pmu_data); saved_items |= SAVED_PMU; } if (flags & APPF_SAVE_TIMERS) { save_a9_timers(context->timer_data, cluster->scu_address); saved_items |= SAVED_TIMERS; } if (flags & APPF_SAVE_VFP) { save_vfp(context->vfp_data); saved_items |= SAVED_VFP; } if(cpu->ic_address) save_gic_interface(context->gic_interface_data, cpu->ic_address); if(cluster->ic_address) save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address); /* TODO: check return value and quit if nonzero! */ save_banked_registers(context->banked_registers); save_cp15(context->cp15_data); save_a9_other(context->other_data); if (flags & APPF_SAVE_DEBUG) { save_a9_debug(context->debug_data); saved_items |= SAVED_DEBUG; } cluster_down = cluster->power_state >= 2; if (cluster_down) { /* if ((flags & APPF_SAVE_TIMERS) && cluster->cpu_version >= 0x0100) { save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address); cluster_saved_items |= SAVED_GLOBAL_TIMER; } save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address); */ } save_control_registers(context); save_mmu(context->mmu_data); context->saved_items = saved_items; // if (cluster_down) { if(cluster->scu_address) save_a9_scu(cluster_context->scu_data, cluster->scu_address); if (flags & APPF_SAVE_L2) { save_pl310(cluster_context->l2_data, cluster->l2_address); cluster_saved_items |= SAVED_L2; } cluster_context->saved_items = cluster_saved_items; } /* * DISABLE DATA CACHES * * First, disable, then clean+invalidate the L1 cache. * * Note that if L1 was to be dormant and we were the last CPU, we would only need to clean some key data * out of L1 and clean+invalidate the stack. */ asm volatile("mov r0,#0"); asm volatile("mcr p15, 0, r0, c7, c5, 0"); disable_clean_inv_dcache_v7_l1(); /* * Next, disable cache coherency */ if (cluster->scu_address) { write_actlr(read_actlr() & ~A9_SMP_BIT); } /* * If the L2 cache is in use, there is still more to do. * * Note that if the L2 cache is not in use, we don't disable the MMU, as clearing the C bit is good enough. */ if (flags & APPF_SAVE_L2) { /* * Disable the MMU (and the L2 cache if necessary), then clean+invalidate the stack in the L2. * This all has to be done one assembler function as we can't use the C stack during these operations. */ disable_clean_inv_cache_pl310(cluster->l2_address, appf_platform_get_stack_pointer() - STACK_SIZE, STACK_SIZE, cluster_down); /* * We need to partially or fully clean the L2, because we will enter reset with cacheing disabled */ if (cluster_down) { /* Clean the whole thing */ clean_pl310(cluster->l2_address); } else { /* * L2 staying on, so just clean everything this CPU will need before the MMU is reenabled * * TODO: some of this data won't change after boottime init, could be cleaned once during late_init */ clean_range_pl310(cluster, sizeof(struct appf_cluster), cluster->l2_address); clean_range_pl310(cpu, sizeof(struct appf_cpu), cluster->l2_address); clean_range_pl310(context, sizeof(struct appf_cpu_context), cluster->l2_address); clean_range_pl310(context->mmu_data, MMU_DATA_SIZE, cluster->l2_address); clean_range_pl310(cluster_context, sizeof(struct appf_cluster_context), cluster->l2_address); } } return APPF_OK; }