static void __save_processor_state(struct ns_banked_cp15_context *ctxt) { /* save preempt state and disable it */ preempt_disable(); // The 32-bit Generic timer context save_generic_timer(&saved_cp15_timer_ctx, 0x0); save_cp15(ctxt->cp15_misc_regs); save_control_registers(ctxt->cp15_ctrl_regs, 0x0); save_mmu(ctxt->cp15_mmu_regs); save_fault_status(&ctxt->ns_cp15_fault_regs); }
void mstar_save_context(void) { platform_smp_boot_secondary_clr(1); mstar_save_int_mask(); save_performance_monitors((appf_u32 *)performance_monitor_save); save_a9_timers((appf_u32*)&a9_timer_save, PERI_ADDRESS(PERI_PHYS)); save_a9_global_timer((appf_u32 *)a9_global_timer_save,PERI_ADDRESS(PERI_PHYS)); save_gic_interface((appf_u32 *)gic_interface_save,(unsigned)_gic_cpu_base_addr,1); save_gic_distributor_private((appf_u32 *)gic_distributor_private_save,(unsigned)_gic_dist_base_addr,1); save_cp15((appf_u32 *)cp15_save); save_a9_other((appf_u32 *)a9_other_save,1); save_gic_distributor_shared((appf_u32 *)gic_distributor_shared_save,(unsigned)_gic_dist_base_addr,1); save_control_registers(control_data, 1); save_mmu(mmu_data); save_a9_scu((appf_u32 *)a9_scu_save,PERI_ADDRESS(PERI_PHYS)); save_pl310((appf_u32*)&pl310_context_save,L2_CACHE_ADDRESS(L2_CACHE_PHYS)); sleep_save_neon_regs(&MStar_Suspend_Buffer[SLEEPSTATE_NEONREG/WORD_SIZE]); }
/** * This function saves all the context that will be lost * when a CPU and cluster enter a low power state. * * This function is called with cluster->context->lock held */ int appf_platform_save_context(struct appf_cluster *cluster, struct appf_cpu *cpu, unsigned flags) { appf_u32 saved_items = 0; appf_u32 cluster_saved_items = 0; struct appf_cpu_context *context = cpu->context; struct appf_cluster_context *cluster_context = cluster->context; int cluster_down; dbg_prints("save step 1\n"); /* Save perf. monitors first, so we don't interfere too much with counts */ if (flags & APPF_SAVE_PMU) { save_performance_monitors(context->pmu_data); saved_items |= SAVED_PMU; } dbg_prints("save step 2\n"); if (flags & APPF_SAVE_TIMERS) { save_a9_timers(context->timer_data, cluster->scu_address); saved_items |= SAVED_TIMERS; } dbg_prints("save step 3\n"); if (flags & APPF_SAVE_VFP) { save_vfp(context->vfp_data); saved_items |= SAVED_VFP; } dbg_prints("save step 4\n"); if(cluster->ic_address) save_gic_interface(context->gic_interface_data, cluster->ic_address); if(cluster->ic_address) save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address); /* TODO: check return value and quit if nonzero! */ dbg_prints("save step 5\n"); save_banked_registers(context->banked_registers); save_cp15(context->cp15_data); save_a9_other(context->other_data); if (flags & APPF_SAVE_DEBUG) { save_a9_debug(context->debug_data); saved_items |= SAVED_DEBUG; } dbg_prints("save step 6\n"); cluster_down = cluster->power_state >= 2; // if (cluster_down) { if ((flags & APPF_SAVE_TIMERS) && cluster->cpu_version >= 0x0100) { save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address); cluster_saved_items |= SAVED_GLOBAL_TIMER; } save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address); } save_control_registers(context); save_mmu(context->mmu_data); context->saved_items = saved_items; dbg_prints("save step 7\n"); // if (cluster_down) { if(cluster->scu_address) save_a9_scu(cluster_context->scu_data, cluster->scu_address); if (flags & APPF_SAVE_L2) { save_pl310(cluster_context->l2_data, cluster->l2_address); cluster_saved_items |= SAVED_L2; } cluster_context->saved_items = cluster_saved_items; } dbg_prints("save step 8\n"); /* * DISABLE DATA CACHES * * First, disable, then clean+invalidate the L1 cache. * * Note that if L1 was to be dormant and we were the last CPU, we would only need to clean some key data * out of L1 and clean+invalidate the stack. */ //asm volatile("mov r0,#0"); //asm volatile("mcr p15, 0, r0, c7, c5, 0"); //disable_clean_inv_dcache_v7_l1(); //v7_flush_dcache_all(); /* * Next, disable cache coherency */ if (cluster->scu_address) { write_actlr(read_actlr() & ~A9_SMP_BIT); } dbg_prints("save step 9\n"); /* * If the L2 cache is in use, there is still more to do. * * Note that if the L2 cache is not in use, we don't disable the MMU, as clearing the C bit is good enough. */ if (flags & APPF_SAVE_L2) { /* * Disable the MMU (and the L2 cache if necessary), then clean+invalidate the stack in the L2. * This all has to be done one assembler function as we can't use the C stack during these operations. */ dbg_print("falg=",flags) disable_clean_inv_cache_pl310(cluster->l2_address, appf_platform_get_stack_pointer() - STACK_SIZE, STACK_SIZE, cluster_down); /* * We need to partially or fully clean the L2, because we will enter reset with cacheing disabled */ // if (cluster_down) { /* Clean the whole thing */ //clean_pl310(cluster->l2_address); // l2x0_flush_all(); } // else { /* * L2 staying on, so just clean everything this CPU will need before the MMU is reenabled * * TODO: some of this data won't change after boottime init, could be cleaned once during late_init */ // clean_range_pl310(cluster, sizeof(struct appf_cluster), cluster->l2_address); // clean_range_pl310(cpu, sizeof(struct appf_cpu), cluster->l2_address); // clean_range_pl310(context, sizeof(struct appf_cpu_context), cluster->l2_address); // clean_range_pl310(context->mmu_data, MMU_DATA_SIZE, cluster->l2_address); // clean_range_pl310(cluster_context, sizeof(struct appf_cluster_context), cluster->l2_address); } } dbg_prints("save step 10\n"); return APPF_OK; }
/*------------------------------------------------------------------------------ Function: mstar_pm_enter Description: Actually enter sleep state Input: (The arguments were used by caller to input data.) state - suspend state (not used) Output: (The arguments were used by caller to receive data.) None. Return: 0 Remark: None. -------------------------------------------------------------------------------*/ static int mstar_pm_enter(suspend_state_t state) { void *pWakeup=0; __asm__ volatile ( "ldr r1, =MSTAR_WAKEUP_ENTRY\n" "str r1, %0" :"=m"(pWakeup)::"r1" ); if(pre_str_max_cnt!=get_str_max_cnt()) { pre_str_max_cnt=get_str_max_cnt(); mstr_cnt=0; } mstr_cnt++; mstar_save_int_mask(); save_performance_monitors((appf_u32 *)performance_monitor_save); save_a9_timers((appf_u32*)&a9_timer_save, PERI_ADDRESS(PERI_PHYS)); save_a9_global_timer((appf_u32 *)a9_global_timer_save,PERI_ADDRESS(PERI_PHYS)); save_gic_interface((appf_u32 *)gic_interface_save,(unsigned)_gic_cpu_base_addr,1); save_gic_distributor_private((appf_u32 *)gic_distributor_private_save,(unsigned)_gic_dist_base_addr,1); save_cp15((appf_u32 *)cp15_save);// CSSELR //save_a9_other((appf_u32 *)a9_other_save,1); //save_v7_debug((appf_u32 *)&a9_dbg_data_save); save_gic_distributor_shared((appf_u32 *)gic_distributor_shared_save,(unsigned)_gic_dist_base_addr,1); //start add save_control_registers(control_data, 1); save_mmu(mmu_data); //end add save_a9_scu((appf_u32 *)a9_scu_save,PERI_ADDRESS(PERI_PHYS)); save_pl310((appf_u32*)&pl310_context_save,L2_CACHE_ADDRESS(L2_CACHE_PHYS)); sleep_save_neon_regs(&MStar_Suspend_Buffer[SLEEPSTATE_NEONREG/WORD_SIZE]); sleep_save_cpu_registers(MStar_Suspend_Buffer); sleep_set_wakeup_save_addr_phy(mstar_virt_to_phy((void*)WAKEUP_SAVE_ADDR),(void*)WAKEUP_SAVE_ADDR); sleep_prepare_last(mstar_virt_to_phy(pWakeup)); write_actlr(read_actlr() & ~A9_SMP_BIT);//add SerPrintf("\nMStar STR waiting power off...\n"); __asm__ volatile ( "nop\n" :::"r0","r1","r2","r3","r4","r5","r6","r7","r8","r9","r10","r12" ); //__asm__ volatile( // "SUSPEND_WAIT:\n" // "nop\n" // "nop\n" // "b SUSPEND_WAIT\n" // ); // pass different password to do ac onoff if(get_str_max_cnt()>0 &&mstr_cnt>=get_str_max_cnt()) { SerPrintf("Max Cnt Ac off...\n"); mstar_str_notifypmmaxcnt_off(); } else { #ifdef CONFIG_MSTAR_STR_CRC if(get_str_crc()) { MDrv_MBX_NotifyPMtoCrcCheck(false); MDrv_MBX_write_kernel_info(); MDrv_MBX_NotifyPMtoCrcCheck(true); while(!MDrv_MBX_recviceAck()); } #endif MDrv_MBX_NotifyPMtoSetPowerOff(); } __asm__ volatile( "WAIT_SLEEP:\n" "nop\n" "nop\n" "b WAIT_SLEEP\n" ); ////////////////////////////////////////////////////////////// __asm__ volatile( "MSTAR_WAKEUP_ENTRY:\n" "bl ensure_environment\n" "bl use_tmp_stack\n" "mov r0, #'K'\n" "bl __PUTCHAR\n" "ldr r1, =exit_addr\n" "sub r0, pc,#4 \n" "b sleep_wakeup_first\n" //sleep_wakeup_first(); "exit_addr: \n" "mov r0, #'L'\n" "bl PUTCHAR_VIRT\n" "ldr r0,=MStar_Suspend_Buffer\n" "bl sleep_restore_cpu_registers\n" //sleep_restore_cpu_registers(MStar_Suspend_Buffer) :::"r0","r1","r2","r3","r4","r5","r6","r7","r8","r9","r10","r12" ); SerPrintf("\nMStar STR Resuming...\n"); sleep_restore_neon_regs(&MStar_Suspend_Buffer[SLEEPSTATE_NEONREG/WORD_SIZE]); restore_a9_scu((appf_u32 *)a9_scu_save,PERI_ADDRESS(PERI_PHYS)); restore_pl310((appf_u32*)&pl310_context_save,L2_CACHE_ADDRESS(L2_CACHE_PHYS), 0); //means power off //start add restore_mmu(mmu_data); restore_control_registers(control_data, 1); //end add //restore_v7_debug((appf_u32 *)&a9_dbg_data_save); restore_gic_distributor_shared((appf_u32 *)gic_distributor_shared_save,(unsigned)_gic_dist_base_addr,1); gic_distributor_set_enabled(TRUE, (unsigned)_gic_dist_base_addr);//add restore_gic_distributor_private((appf_u32 *)gic_distributor_private_save,(unsigned)_gic_dist_base_addr,1); restore_gic_interface((appf_u32 *)gic_interface_save,(unsigned)_gic_cpu_base_addr,1); //restore_a9_other((appf_u32 *)a9_other_save,1); restore_cp15((appf_u32 *)cp15_save); restore_a9_timers((appf_u32*)&a9_timer_save, PERI_ADDRESS(PERI_PHYS)); restore_a9_global_timer((appf_u32 *)a9_global_timer_save,PERI_ADDRESS(PERI_PHYS)); restore_performance_monitors((appf_u32 *)performance_monitor_save); mstar_restore_int_mask(); sleep_clear_wakeup_save_addr_phy(mstar_virt_to_phy((void*)WAKEUP_SAVE_ADDR),(void*)WAKEUP_SAVE_ADDR); platform_smp_boot_secondary_init(1); mstar_sleep_cur_cpu_flush(); #if defined(CONFIG_MP_PLATFORM_ARM) { extern int __init init_irq_fiq_merge(void); init_irq_fiq_merge(); } #endif /* CONFIG_MP_PLATFORM_ARM */ return 0; }
/** * This function saves all the context that will be lost * when a CPU and cluster enter a low power state. */ static void platform_save_context(struct cpu_cluster *cluster, struct cpu *pcpu) { struct cpu_context *context = &(pcpu->context); struct cpu_cluster_context *cluster_context = &(cluster->context); #if MAX_CPUS != 1 int cluster_down = 0; #endif /* Save perf. monitors first, so we don't interfere too much with counts */ save_performance_monitors(context->pmu_data); save_a9_timers(context->timer_data, cluster->scu_address); save_vfp(context->vfp_data); save_gic_interface(context->gic_interface_data, pcpu->ic_address); save_gic_distributor_private(context->gic_dist_private_data, cluster->ic_address); save_cp15(context->cp15_data); save_a9_other(context->other_data); #if 0 //wschen 2011-07-28 save_a9_debug(context->debug_data); #endif #if MAX_CPUS != 1 if (cluster->power_state >= STATUS_DORMANT) { cluster_down = 1; } #endif #if MAX_CPUS != 1 if (cluster_down) { #endif save_a9_global_timer(cluster_context->global_timer_data, cluster->scu_address); save_gic_distributor_shared(cluster_context->gic_dist_shared_data, cluster->ic_address); #if MAX_CPUS != 1 } #endif save_control_registers(context); save_mmu(context->mmu_data); #if MAX_CPUS != 1 if (cluster_down) { #endif save_a9_scu(cluster_context->scu_data, cluster->scu_address); save_pl310(cluster_context->l2_data, cluster->l2_address); #if MAX_CPUS != 1 } #endif dormant_ret_flag = 0; save_banked_registers(context->banked_registers); if (dormant_ret_flag == 0) { clean_dcache_v7_l1(); clean_pl310(cpu_cluster.l2_address); if (e1_chip) { disable_pl310(cpu_cluster.l2_address); disable_cache_v7_l1(); if (cpu_cluster.power_state == STATUS_DORMANT) { reg_write(SC_PWR_CON0, 0xE105); /* SRAM sleep */ } else { reg_write(SC_PWR_CON0, 0xC12D); /* SRAM power down */ } dsb(); if (cpu_cluster.power_state == STATUS_DORMANT) { reg_write(SC_PWR_CON2, 0xE505); /* SRAM sleep */ } else { reg_write(SC_PWR_CON2, 0xC52D); /* SRAM power down */ } dsb(); } return; } else { return; } }