/** * This function saves all the context that will be lost * when a CPU and cluster enter a low power state. * * This function is called with cluster->context->lock held */ int appf_platform_save_context(struct appf_cluster *cluster, struct appf_cpu *cpu, unsigned flags) { appf_u32 saved_items = 0; appf_u32 cluster_saved_items = 0; struct appf_cpu_context *context = cpu->context; struct appf_cluster_context *cluster_context = cluster->context; int cluster_down; dbg_prints("save step 1\n"); /* Save perf. monitors first, so we don't interfere too much with counts */ if (flags & APPF_SAVE_PMU) { save_performance_monitors(context->pmu_data); saved_items |= SAVED_PMU; } dbg_prints("save step 2\n"); if (flags & APPF_SAVE_TIMERS) { save_a9_timers(context->timer_data, cluster->scu_address); saved_items |= SAVED_TIMERS; } dbg_prints("save step 3\n"); if (flags & APPF_SAVE_VFP) { save_vfp(context->vfp_data); saved_items |= SAVED_VFP; } dbg_prints("save step 4\n"); if(cluster->ic_address) save_gic_interface(context->gic_interface_data, cluster->ic_address); if(cluster->ic_address) save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address); /* TODO: check return value and quit if nonzero! */ dbg_prints("save step 5\n"); save_banked_registers(context->banked_registers); save_cp15(context->cp15_data); save_a9_other(context->other_data); if (flags & APPF_SAVE_DEBUG) { save_a9_debug(context->debug_data); saved_items |= SAVED_DEBUG; } dbg_prints("save step 6\n"); cluster_down = cluster->power_state >= 2; // if (cluster_down) { if ((flags & APPF_SAVE_TIMERS) && cluster->cpu_version >= 0x0100) { save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address); cluster_saved_items |= SAVED_GLOBAL_TIMER; } save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address); } save_control_registers(context); save_mmu(context->mmu_data); context->saved_items = saved_items; dbg_prints("save step 7\n"); // if (cluster_down) { if(cluster->scu_address) save_a9_scu(cluster_context->scu_data, cluster->scu_address); if (flags & APPF_SAVE_L2) { save_pl310(cluster_context->l2_data, cluster->l2_address); cluster_saved_items |= SAVED_L2; } cluster_context->saved_items = cluster_saved_items; } dbg_prints("save step 8\n"); /* * DISABLE DATA CACHES * * First, disable, then clean+invalidate the L1 cache. * * Note that if L1 was to be dormant and we were the last CPU, we would only need to clean some key data * out of L1 and clean+invalidate the stack. */ //asm volatile("mov r0,#0"); //asm volatile("mcr p15, 0, r0, c7, c5, 0"); //disable_clean_inv_dcache_v7_l1(); //v7_flush_dcache_all(); /* * Next, disable cache coherency */ if (cluster->scu_address) { write_actlr(read_actlr() & ~A9_SMP_BIT); } dbg_prints("save step 9\n"); /* * If the L2 cache is in use, there is still more to do. * * Note that if the L2 cache is not in use, we don't disable the MMU, as clearing the C bit is good enough. */ if (flags & APPF_SAVE_L2) { /* * Disable the MMU (and the L2 cache if necessary), then clean+invalidate the stack in the L2. * This all has to be done one assembler function as we can't use the C stack during these operations. */ dbg_print("falg=",flags) disable_clean_inv_cache_pl310(cluster->l2_address, appf_platform_get_stack_pointer() - STACK_SIZE, STACK_SIZE, cluster_down); /* * We need to partially or fully clean the L2, because we will enter reset with cacheing disabled */ // if (cluster_down) { /* Clean the whole thing */ //clean_pl310(cluster->l2_address); // l2x0_flush_all(); } // else { /* * L2 staying on, so just clean everything this CPU will need before the MMU is reenabled * * TODO: some of this data won't change after boottime init, could be cleaned once during late_init */ // clean_range_pl310(cluster, sizeof(struct appf_cluster), cluster->l2_address); // clean_range_pl310(cpu, sizeof(struct appf_cpu), cluster->l2_address); // clean_range_pl310(context, sizeof(struct appf_cpu_context), cluster->l2_address); // clean_range_pl310(context->mmu_data, MMU_DATA_SIZE, cluster->l2_address); // clean_range_pl310(cluster_context, sizeof(struct appf_cluster_context), cluster->l2_address); } } dbg_prints("save step 10\n"); return APPF_OK; }
/** * This function saves all the context that will be lost * when a CPU and cluster enter a low power state. */ static void platform_save_context(struct cpu_cluster *cluster, struct cpu *pcpu) { struct cpu_context *context = &(pcpu->context); struct cpu_cluster_context *cluster_context = &(cluster->context); #if MAX_CPUS != 1 int cluster_down = 0; #endif /* Save perf. monitors first, so we don't interfere too much with counts */ save_performance_monitors(context->pmu_data); save_a9_timers(context->timer_data, cluster->scu_address); save_vfp(context->vfp_data); save_gic_interface(context->gic_interface_data, pcpu->ic_address); save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address); save_cp15(context->cp15_data); save_a9_other(context->other_data); #if 0 //wschen 2011-07-28 save_a9_debug(context->debug_data); #endif #if MAX_CPUS != 1 if (cluster->power_state >= STATUS_DORMANT) { cluster_down = 1; } #endif #if MAX_CPUS != 1 if (cluster_down) { #endif save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address); save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address); #if MAX_CPUS != 1 } #endif save_control_registers(context); save_mmu(context->mmu_data); #if MAX_CPUS != 1 if (cluster_down) { #endif save_a9_scu(cluster_context->scu_data, cluster->scu_address); save_pl310(cluster_context->l2_data, cluster->l2_address); #if MAX_CPUS != 1 } #endif dormant_ret_flag = 0; save_banked_registers(context->banked_registers); if (dormant_ret_flag == 0) { clean_dcache_v7_l1(); clean_pl310(cpu_cluster.l2_address); if (e1_chip) { disable_pl310(cpu_cluster.l2_address); disable_cache_v7_l1(); if (cpu_cluster.power_state == STATUS_DORMANT) { reg_write(SC_PWR_CON0, 0xE105); /* SRAM sleep */ } else { reg_write(SC_PWR_CON0, 0xC12D); /* SRAM power down */ } dsb(); if (cpu_cluster.power_state == STATUS_DORMANT) { reg_write(SC_PWR_CON2, 0xE505); /* SRAM sleep */ } else { reg_write(SC_PWR_CON2, 0xC52D); /* SRAM power down */ } dsb(); } return; } else { return; } }