void __cpuinit mt_smp_secondary_init(unsigned int cpu) { #if 0 struct wd_api *wd_api = NULL; //fix build error get_wd_api(&wd_api); if (wd_api) wd_api->wd_cpu_hot_plug_on_notify(cpu); #endif pr_debug("Slave cpu init\n"); HOTPLUG_INFO("platform_secondary_init, cpu: %d\n", cpu); mt_gic_secondary_init(); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ write_pen_release(-1); #if !defined (CONFIG_ARM_PSCI) //cannot enable in secure world fiq_glue_resume(); #endif //#if !defined (CONFIG_ARM_PSCI) /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); }
/* * platform_cpu_die: shutdown a CPU * @cpu: */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; struct wd_api *wd_api = NULL; HOTPLUG_INFO("platform_cpu_die, cpu: %d\n", cpu); get_wd_api(&wd_api); if (wd_api) wd_api->wd_cpu_hot_plug_off_notify(cpu); #ifdef CONFIG_MTK_SCHED_TRACERS trace_cpu_hotplug(cpu, 0, per_cpu(last_event_ts, cpu)); per_cpu(last_event_ts, cpu) = ns2usecs(ftrace_now(cpu)); #endif /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(cpu); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(cpu); if (spurious) HOTPLUG_INFO("platform_do_lowpower, spurious wakeup call, cpu: %d, spurious: %d\n", cpu, spurious); }
void arch_reset(char mode, const char *cmd) { char reboot = 0; int res = 0; struct wd_api *wd_api = NULL; res = get_wd_api(&wd_api); pr_notice("arch_reset: cmd = %s\n", cmd ? : "NULL"); if (cmd && !strcmp(cmd, "charger")) { /* do nothing */ } else if (cmd && !strcmp(cmd, "recovery")) { rtc_mark_recovery(); } else if (cmd && !strcmp(cmd, "bootloader")) { rtc_mark_fast(); } #ifdef CONFIG_MTK_KERNEL_POWER_OFF_CHARGING else if (cmd && !strcmp(cmd, "kpoc")) { rtc_mark_kpoc(); } #endif else { reboot = 1; } if (res) { pr_notice("arch_reset, get wd api error %d\n", res); } else { wd_api->wd_sw_reset(reboot); } }
/* Press key to enter kdb */ void aee_trigger_kdb(void) { int res = 0; struct wd_api *wd_api = NULL; res = get_wd_api(&wd_api); /* disable Watchdog HW, note it will not enable WDT again when kdb return */ if (res) { LOGE("aee_trigger_kdb, get wd api error\n"); } else { wd_api->wd_disable_all(); } #ifdef CONFIG_SCHED_DEBUG sysrq_sched_debug_show(); #endif LOGI("User trigger KDB\n"); mtk_set_kgdboc_var(); kgdb_breakpoint(); LOGI("Exit KDB\n"); #ifdef CONFIG_LOCAL_WDT /* enable local WDT */ if (res) { LOGD("aee_trigger_kdb, get wd api error\n"); } else { wd_api->wd_restart(WD_TYPE_NOLOCK); } #endif }
/* * platform_cpu_die: shutdown a CPU * @cpu: */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; struct wd_api *wd_api = NULL; HOTPLUG_INFO("platform_cpu_die, cpu: %d\n", cpu); get_wd_api(&wd_api); if (wd_api) wd_api->wd_cpu_hot_plug_off_notify(cpu); /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(cpu); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(cpu); if (spurious) HOTPLUG_INFO("platform_do_lowpower, spurious wakeup call, cpu: %d, spurious: %d\n", cpu, spurious); }
void spm_module_init(void) { int r; unsigned long flags; struct wd_api *wd_api; spin_lock_irqsave(&spm_lock, flags); /* enable register control */ spm_write(SPM_POWERON_CONFIG_SET, (SPM_PROJECT_CODE << 16) | (1U << 0)); /* init power control register (select PCM clock to 26M) */ spm_write(SPM_POWER_ON_VAL0, 0); spm_write(SPM_POWER_ON_VAL1, 0x00015820); spm_write(SPM_PCM_PWR_IO_EN, 0); /* reset PCM */ spm_write(SPM_PCM_CON0, CON0_CFG_KEY | CON0_PCM_SW_RESET); spm_write(SPM_PCM_CON0, CON0_CFG_KEY); /* init PCM control register */ spm_write(SPM_PCM_CON0, CON0_CFG_KEY | CON0_IM_SLEEP_DVS); spm_write(SPM_PCM_CON1, CON1_CFG_KEY | CON1_SPM_SRAM_ISO_B | CON1_SPM_SRAM_SLP_B | CON1_IM_NONRP_EN | CON1_MIF_APBEN); spm_write(SPM_PCM_IM_PTR, 0); spm_write(SPM_PCM_IM_LEN, 0); /* SRCLKENA: POWER_ON_VAL1 (PWR_IO_EN[7]=0) or POWER_ON_VAL1|r7 (PWR_IO_EN[7]=1) */ /* CLKSQ: POWER_ON_VAL0 (PWR_IO_EN[0]=0) or r0 (PWR_IO_EN[0]=1) */ /* SRCLKENAI will trigger 26M-wake/sleep event */ spm_write(SPM_CLK_CON, CC_CXO32K_RM_EN_MD); spm_write(SPM_PCM_SRC_REQ, (1U << 1)); /* clean wakeup event raw status */ spm_write(SPM_SLEEP_WAKEUP_EVENT_MASK, 0xffffffff); /* clean ISR status */ spm_write(SPM_SLEEP_ISR_MASK, ISRM_ALL); spm_write(SPM_SLEEP_ISR_STATUS, ISRC_ALL); spm_write(SPM_PCM_SW_INT_CLEAR, PCM_SW_INT_ALL); spin_unlock_irqrestore(&spm_lock, flags); r = request_irq(MT_SPM_IRQ_ID, spm_irq_handler, IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, "mt-spm", NULL); if (r) { spm_error("FAILED TO REQUEST SPM IRQ (%d)\n", r); WARN_ON(1); } get_wd_api(&wd_api); if (wd_api->wd_spmwdt_mode_config && wd_api->wd_thermal_mode_config) { wd_api->wd_spmwdt_mode_config(WD_REQ_EN, WD_REQ_RST_MODE); wd_api->wd_thermal_mode_config(WD_REQ_EN, WD_REQ_RST_MODE); } else { spm_error("FAILED TO GET WD API\n"); WARN_ON(1); } spm_go_to_normal(); /* let PCM help to do thermal protection */ }
static void mrdump_hw_enable(bool enabled) { struct wd_api *wd_api = NULL; get_wd_api(&wd_api); if (wd_api) wd_api->wd_dram_reserved_mode(enabled); }
static void ipanic_kick_wdt(void) { int res = 0; struct wd_api *wd_api = NULL; res = get_wd_api(&wd_api); if (res == 0) wd_api->wd_restart(WD_TYPE_NOLOCK); }
int pwr_loss_reset_thread(void *p) { signed long ret = 0; signed long l_val1 = 0; signed long l_val2 = 0; signed long l_count = 0; struct wd_api *wd_api = NULL; #ifdef PWR_LOSS_MT6573 volatile unsigned short *Reg1 = (unsigned short *)PWR_LOSS_WDT_MODE; volatile unsigned short *Reg2 = (unsigned short *)PWR_LOSS_WDT_LENGTH; volatile unsigned short *Reg3 = (unsigned short *)PWR_LOSS_WDT_RESTART; #endif #ifdef PWR_LOSS_DEBUG printk(KERN_NOTICE "%s Power Loss Test: sleep time = 100sec\n", TAG); #endif while (1){ printk(KERN_WARNING "%s Power Loss Test: wait for reset...!\n", TAG); set_current_state(TASK_UNINTERRUPTIBLE); ret = schedule_timeout(PWR_LOSS_SLEEP_TIME); down_read(&power_loss_info.rwsem); if(power_loss_info.wdt_reboot_support == WDT_REBOOT_OFF) { up_read(&power_loss_info.rwsem); printk(KERN_WARNING "%s Power Loss Test: wdt reboot pause...!\n", TAG); msleep(1000); continue; } up_read(&power_loss_info.rwsem); printk(KERN_ERR "%s Power Loss Test: ret = %d, do reset now...\n", TAG, ret); #ifdef PWR_LOSS_MT6575 #ifdef CONFIG_MTK_MTD_NAND #endif wdt_arch_reset(0xff); #elif defined PWR_LOSS_MT6582 get_wd_api(&wd_api); printk(KERN_ERR "%s Power Loss Test: ret = %d, do reset now...wd_api = 0x%x\n", TAG, ret,wd_api); if (wd_api) wd_api->wd_sw_reset(0Xff); #elif defined PWR_LOSS_MT6573 #ifdef CONFIG_MTK_MTD_NAND if(!mt6573_nandchip_Reset()){ printk(KERN_ERR "%s NAND_MVG mt6573_nandchip_Reset Failed!\n", TAG); } #endif /* reset by watch dog */ *Reg1 = 0x2200; *Reg2 = (0x3F<<5)|0x8; *Reg3 = 0x1971; *Reg1 = 0x2217; #endif while(1); } }
/*XXX Note: 2012/11/19 mtk_wdt_restart prototype is * different on 77 and 89 platform. the owner promise to modify it */ static void ipanic_kick_wdt(void) { int res=0; struct wd_api*wd_api = NULL; res = get_wd_api(&wd_api); if(res) { //aee_wdt_printf("ipanic_kick_wdt, get wd api error\n"); } else { wd_api->wd_restart(WD_TYPE_NOLOCK); } }
static void mrdump_reboot(void) { int res; struct wd_api *wd_api = NULL; res = get_wd_api(&wd_api); if (res) { pr_alert("arch_reset, get wd api error %d\n", res); while (1) cpu_relax(); } else { wd_api->wd_sw_reset(0); } }
static void wdt_fiq(void *arg, void *regs, void *svc_sp) { unsigned int wdt_mode_val; struct wd_api*wd_api = NULL; get_wd_api(&wd_api); wdt_mode_val = DRV_Reg32(MTK_WDT_STATUS); DRV_WriteReg32(MTK_WDT_NONRST_REG, wdt_mode_val); #ifdef CONFIG_MTK_WD_KICKER aee_wdt_printf("\n kick=0x%08x,check=0x%08x \n",wd_api->wd_get_kick_bit(),wd_api->wd_get_check_bit()); #endif aee_wdt_fiq_info(arg, regs, svc_sp); }
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret,res; int i; struct wd_api * wd_api = NULL; /* * We need to tell the secondary core where to find its stack and the * page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; __flush_dcache_area(&secondary_data, sizeof(secondary_data)); /* * Now bring the CPU into our world. */ ret = boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it to come online or * time out. */ wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); if (!cpu_online(cpu)) { pr_crit("CPU%u: failed to come online\n", cpu); #if 1 pr_crit("Trigger WDT RESET\n"); res = get_wd_api(&wd_api); if(res) { pr_crit("get wd api error !!\n"); }else { wd_api -> wd_sw_reset(3); //=> this action will ask system to reboot } #endif ret = -EIO; } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } secondary_data.stack = NULL; return ret; }
/* Hook to Linux PM */ void mtkpasr_phaseone_ops(void) { struct wd_api *wd_api = NULL; /* To restart wdt */ if (get_wd_api(&wd_api) == 0) { mtkpasr_log("PASR kicks WDT!\n"); wd_api->wd_restart(WD_TYPE_NORMAL); } IS_MTKPASR_ENABLED_NORV; /* It means no need to apply this op (Simply for paging or other periodic wakeups) */ if (is_mtkpasr_triggered()) { return; } MTKPASR_START_PROFILE(); /* It will go to MTKPASR stage */ current->flags |= PF_MTKPASR; /* Inform all other memory pools to release their memory */ try_to_shrink_slab(); /* It will leave MTKPASR stage */ current->flags &= ~PF_MTKPASR; #ifdef CONFIG_MTKPASR_MAFL if (mtkpasr_no_phaseone_ops()) goto no_phaseone; #endif mtkpasr_info("\n"); /* Drop cache - linux/mm.h */ drop_pagecache(); #ifdef CONFIG_MTKPASR_MAFL no_phaseone: #endif MTKPASR_END_PROFILE(); }
void last_kmsg_store_to_emmc(void) { int buff_size; struct wd_api*wd_api = NULL; get_wd_api(&wd_api); // if(num_online_cpus() > 1){ if(wd_api->wd_get_check_bit() > 1){ printk(KERN_ERR"ram_console: online cpu %d!\n",wd_api->wd_get_check_bit()); if(boot_finish == 0) return; } /* save log to emmc */ buff_size = ram_console_buffer->sz_buffer; card_dump_func_write((unsigned char *)ram_console_buffer, buff_size, EMMC_ADDR, DUMP_INTO_BOOT_CARD_IPANIC); pr_err("ram_console: save kernel log (0x%x) to emmc!\n", buff_size); }
int pwr_loss_reset_thread(void *p) { signed long ret = 0; signed long sleep_time = PWR_LOSS_SLEEP_TIME; struct wd_api *wd_api = NULL; #if PWR_LOSS_RANDOM_SW_RESET get_random_bytes(&sleep_time, sizeof(signed long)); if (sleep_time < 0) sleep_time &= 0x7fffffff; sleep_time %= PWR_LOSS_SLEEP_MAX_TIME; #ifdef PWR_LOSS_DEBUG pr_err("%s Power Loss Test: sleep time =%ld\n", TAG, sleep_time); #endif #endif while (1) { pr_err("%s Power Loss Test: wait for reset...!\n", TAG); set_current_state(TASK_UNINTERRUPTIBLE); ret = schedule_timeout(sleep_time); down_read(&power_loss_info.rwsem); if (power_loss_info.wdt_reboot_support == WDT_REBOOT_OFF) { up_read(&power_loss_info.rwsem); pr_err("%s Power Loss Test: wdt reboot pause...!\n", TAG); msleep(1000); continue; } up_read(&power_loss_info.rwsem); pr_err("%s Power Loss Test: ret = %ld, do reset now...\n", TAG, ret); get_wd_api(&wd_api); pr_err("%s Power Loss Test: ret = %ld, do reset now...wd_api = 0x%x\n", TAG, ret, (unsigned int)wd_api); if (wd_api) wd_api->wd_sw_reset(0Xff); while (1) ; } }
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret,res; int i; struct wd_api * wd_api = NULL; /* * We need to tell the secondary core where to find its stack and the * page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; __flush_dcache_area(&secondary_data, sizeof(secondary_data)); /* * Now bring the CPU into our world. */ ret = boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it to come online or * time out. */ wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); if (!cpu_online(cpu)) { #if 0 for(i=0x0;i<=4;i++) { REG_WRITE(0x10200404 ,((REG_READ(0x10200404 )&0xffffff00)|i)); pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200408)); } for(i=0x05;i<=0x15;i++) { REG_WRITE(0x10200404 ,((REG_READ(0x10200404 )&0xffffff00)|i)); pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200408)); } for(i=0x20;i<=0x45;i++) { REG_WRITE(0x10200404 ,((REG_READ(0x10200404 )&0xffffff00)|i)); pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200408)); } for(i=0x0;i<=4;i++) { REG_WRITE(0x10200504 ,((REG_READ(0x10200504 )&0xffffff00)|i)); pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200508 )); } for(i=0x05;i<=0x15;i++) { REG_WRITE(0x10200504 ,((REG_READ(0x10200504 )&0xffffff00)|i)); pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200508 )); } for(i=0x20;i<=0x45;i++) { REG_WRITE(0x10200504 ,((REG_READ(0x10200504 )&0xffffff00)|i)); pr_crit("Cluster0: Set 8'h%x : 0x%x\n",i,REG_READ(0x10200508 )); } pr_crit("MPx_AXI_CONFIG: REG 0x1020002c : 0x%x\n", REG_READ(0x1020002c)); pr_crit("MPx_AXI_CONFIG: REG 0x1020022c : 0x%x\n", REG_READ(0x1020022c)); pr_crit("ACLKEN_DIV: REG 0x10200640 : 0x%x\n", REG_READ(0x10200640)); pr_crit("CCI: REG 0x10394000 : 0x%x\n", REG_READ(0x10394000)); pr_crit("CCI: REG 0x10395000 : 0x%x\n", REG_READ(0x10395000)); #endif pr_crit("CPU%u: failed to come online\n", cpu); #if 1 pr_crit("Trigger WDT RESET\n"); res = get_wd_api(&wd_api); if(res) { pr_crit("get wd api error !!\n"); }else { wd_api -> wd_sw_reset(3); //=> this action will ask system to reboot } #endif #if 0 pr_crit("Trigger PMIC full reset.\n"); if(check_pmic_wrap_init()) { mt_pwrap_hal_init(); } pmic_full_reset(); #endif ret = -EIO; } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } secondary_data.stack = NULL; return ret; }
static int wk_proc_cmd_write(struct file *file, const char *buf, size_t count, loff_t *data) { int ret; int timeout; int mode; int kinterval; int en; //enable or disable ext wdt 1<-->enable 0<-->disable struct wd_api *my_wd_api = NULL; ret = get_wd_api(&my_wd_api); if(ret) { printk("get public api error in wd common driver %d",ret); } if (count == 0) return -1; if(count > 255) count = 255; ret = copy_from_user(cmd_buf, buf, count); if (ret < 0) return -1; cmd_buf[count] = '\0'; dbgmsg("Write %s\n", cmd_buf); sscanf(cmd_buf, "%d %d %d %d %d", &mode, &kinterval, &timeout, &debug_sleep,&en); printk("[WDK] mode=%d interval=%d timeout=%d enable =%d\n", mode, kinterval, timeout,en); if (timeout < kinterval) { errmsg("The interval(%d) value should be smaller than timeout value(%d)\n", kinterval, timeout); return -1; } if ((timeout < MIN_KICK_INTERVAL) || (timeout > MAX_KICK_INTERVAL)) { errmsg("The timeout(%d) is invalid (%d - %d)\n", kinterval, MIN_KICK_INTERVAL, MAX_KICK_INTERVAL); return -1; } if ((kinterval < MIN_KICK_INTERVAL) || (kinterval > MAX_KICK_INTERVAL)) { errmsg("The interval(%d) is invalid (%d - %d)\n",kinterval, MIN_KICK_INTERVAL, MAX_KICK_INTERVAL); return -1; } if (!((mode == WDT_IRQ_ONLY_MODE) || (mode == WDT_HW_REBOOT_ONLY_MODE)|| (mode == WDT_DUAL_MODE))) { errmsg("Tha watchdog kicker wdt mode is not correct %d\n",mode); return -1; } if(1 == en) { mtk_wdt_enable(WK_WDT_EN); #ifdef CONFIG_LOCAL_WDT local_wdt_enable(WK_WDT_EN); printk("[WDK] enable local wdt \n"); #endif printk("[WDK] enable wdt \n"); } if(0 == en) { mtk_wdt_enable(WK_WDT_DIS); #ifdef CONFIG_LOCAL_WDT local_wdt_enable(WK_WDT_DIS); printk("[WDK] disable local wdt \n"); #endif printk("[WDK] disable wdt \n"); } spin_lock(&lock); g_enable= en; g_kinterval = kinterval; g_wk_wdt_mode = mode; if(1 == mode) { //irq mode only usefull to 75 mtk_wdt_swsysret_config(0x20000000,1); printk("[WDK] use irq mod \n"); } else if(0 == mode) { //reboot mode only usefull to 75 mtk_wdt_swsysret_config(0x20000000,0); printk("[WDK] use reboot mod \n"); } else if(2 == mode) { my_wd_api->wd_set_mode(WDT_IRQ_ONLY_MODE); } else { printk("[WDK] mode err \n"); } g_timeout = timeout; if(mode != 2) { g_need_config = 1; } spin_unlock(&lock); return count; }
/* * cpu_pdn: * true = CPU dormant * false = CPU standby * pwrlevel: * 0 = AXI is off * 1 = AXI is 26M * pwake_time: * >= 0 = specific wakeup period */ wake_reason_t spm_go_to_sleep_dpidle(u32 spm_flags, u32 spm_data) { u32 sec = 0; u32 dpidle_timer_val = 0; u32 dpidle_wake_src = 0; int wd_ret; struct wake_status wakesta; unsigned long flags; struct mtk_irq_mask mask; struct wd_api *wd_api; static wake_reason_t last_wr = WR_NONE; struct pcm_desc *pcmdesc = __spm_dpidle.pcmdesc; struct pwr_ctrl *pwrctrl = __spm_dpidle.pwrctrl; /* backup original dpidle setting */ dpidle_timer_val = pwrctrl->timer_val; dpidle_wake_src = pwrctrl->wake_src; set_pwrctrl_pcm_flags(pwrctrl, spm_flags); #if SPM_PWAKE_EN sec = spm_get_wake_period(-1 /* FIXME */, last_wr); #endif pwrctrl->timer_val = sec * 32768; pwrctrl->wake_src = spm_get_sleep_wakesrc(); wd_ret = get_wd_api(&wd_api); if (!wd_ret) wd_api->wd_suspend_notify(); spin_lock_irqsave(&__spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(SPM_IRQ0_ID); mt_cirq_clone_gic(); mt_cirq_enable(); /* set PMIC WRAP table for deepidle power control */ mt_cpufreq_set_pmic_phase(PMIC_WRAP_PHASE_DEEPIDLE); spm_crit2("sleep_deepidle, sec = %u, wakesrc = 0x%x [%u]\n", sec, pwrctrl->wake_src, is_cpu_pdn(pwrctrl->pcm_flags)); __spm_reset_and_init_pcm(pcmdesc); __spm_kick_im_to_fetch(pcmdesc); if (request_uart_to_sleep()) { last_wr = WR_UART_BUSY; goto RESTORE_IRQ; } __spm_init_pcm_register(); __spm_init_event_vector(pcmdesc); __spm_set_power_control(pwrctrl); __spm_set_wakeup_event(pwrctrl); __spm_kick_pcm_to_run(pwrctrl); spm_dpidle_pre_process(); spm_trigger_wfi_for_dpidle(pwrctrl); spm_dpidle_post_process(); __spm_get_wakeup_status(&wakesta); __spm_clean_after_wakeup(); request_uart_to_wakeup(); last_wr = __spm_output_wake_reason(&wakesta, pcmdesc, true); RESTORE_IRQ: /* set PMIC WRAP table for normal power control */ mt_cpufreq_set_pmic_phase(PMIC_WRAP_PHASE_NORMAL); mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&__spm_lock, flags); if (!wd_ret) wd_api->wd_resume_notify(); /* restore original dpidle setting */ pwrctrl->timer_val = dpidle_timer_val; pwrctrl->wake_src = dpidle_wake_src; return last_wr; }
void aee_stop_nested_panic(struct pt_regs *regs) { struct thread_info *thread = current_thread_info(); int len = 0; int timeout = 1000000; int res = 0, cpu = 0; struct wd_api *wd_api = NULL; struct pt_regs *excp_regs = NULL; int prev_fiq_step = aee_rr_curr_fiq_step(); /* everytime enter nested_panic flow, add 8 */ static int step_base = -8; step_base = step_base < 48 ? step_base + 8 : 56; aee_rec_step_nested_panic(step_base); local_irq_disable(); preempt_disable(); aee_rec_step_nested_panic(step_base + 1); cpu = get_HW_cpuid(); aee_rec_step_nested_panic(step_base + 2); /*nested panic may happens more than once on many/single cpus */ if (atomic_read(&nested_panic_time) < 3) aee_nested_printf("\nCPU%dpanic%d@%d\n", cpu, nested_panic_time, prev_fiq_step); atomic_inc(&nested_panic_time); switch (atomic_read(&nested_panic_time)) { case 2: aee_print_regs(regs); aee_nested_printf("backtrace:"); aee_print_bt(regs); break; /* must guarantee Only one cpu can run here */ /* first check if thread valid */ case 1: if (virt_addr_valid(thread) && virt_addr_valid(thread->regs_on_excp)) { excp_regs = thread->regs_on_excp; } else { /* if thread invalid, which means wrong sp or thread_info corrupted, check global aee_excp_regs instead */ aee_nested_printf("invalid thread [%x], excp_regs [%x]\n", thread, aee_excp_regs); excp_regs = aee_excp_regs; } aee_nested_printf("Nested panic\n"); if (excp_regs) { aee_nested_printf("Previous\n"); aee_print_regs(excp_regs); } aee_nested_printf("Current\n"); aee_print_regs(regs); /*should not print stack info. this may overwhelms ram console used by fiq */ if (0 != in_fiq_handler()) { aee_nested_printf("in fiq hander\n"); } else { /*Dump first panic stack */ aee_nested_printf("Previous\n"); if (excp_regs) { len = aee_nested_save_stack(excp_regs); aee_nested_printf("\nbacktrace:"); aee_print_bt(excp_regs); } /*Dump second panic stack */ aee_nested_printf("Current\n"); if (virt_addr_valid(regs)) { len = aee_nested_save_stack(regs); aee_nested_printf("\nbacktrace:"); aee_print_bt(regs); } } aee_rec_step_nested_panic(step_base + 5); ipanic_recursive_ke(regs, excp_regs, cpu); aee_rec_step_nested_panic(step_base + 6); /* we donot want a FIQ after this, so disable hwt */ res = get_wd_api(&wd_api); if (res) { aee_nested_printf("get_wd_api error\n"); } else { wd_api->wd_aee_confirm_hwreboot(); } aee_rec_step_nested_panic(step_base + 7); break; default: break; } /* waiting for the WDT timeout */ while (1) { /* output to UART directly to avoid printk nested panic */ /* mt_fiq_printf("%s hang here%d\t", __func__, i++); */ while (timeout--) { udelay(1); } timeout = 1000000; } }
wake_reason_t spm_go_to_sleep(u32 spm_flags, u32 spm_data) { u32 sec = 0; int wd_ret; struct wake_status wakesta; unsigned long flags; struct mtk_irq_mask mask; struct wd_api *wd_api; static wake_reason_t last_wr = WR_NONE; struct pcm_desc *pcmdesc = __spm_suspend.pcmdesc; struct pwr_ctrl *pwrctrl = __spm_suspend.pwrctrl; struct spm_lp_scen *lpscen; lpscen = spm_check_talking_get_lpscen(&__spm_suspend, &spm_flags); pcmdesc = lpscen->pcmdesc; pwrctrl = lpscen->pwrctrl; set_pwrctrl_pcm_flags(pwrctrl, spm_flags); set_pwrctrl_pcm_data(pwrctrl, spm_data); #if SPM_PWAKE_EN sec = spm_get_wake_period(-1 /* FIXME */, last_wr); #endif pwrctrl->timer_val = sec * 32768; wd_ret = get_wd_api(&wd_api); if (!wd_ret) wd_api->wd_suspend_notify(); spm_suspend_pre_process(pwrctrl); spin_lock_irqsave(&__spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(MT_SPM_IRQ_ID); mt_cirq_clone_gic(); mt_cirq_enable(); spm_set_sysclk_settle(); spm_crit2("sec = %u, wakesrc = 0x%x (%u)(%u)\n", sec, pwrctrl->wake_src, is_cpu_pdn(pwrctrl->pcm_flags), is_infra_pdn(pwrctrl->pcm_flags)); if (request_uart_to_sleep()) { last_wr = WR_UART_BUSY; goto RESTORE_IRQ; } __spm_reset_and_init_pcm(pcmdesc); __spm_kick_im_to_fetch(pcmdesc); __spm_init_pcm_register(); __spm_init_event_vector(pcmdesc); __spm_set_power_control(pwrctrl); __spm_set_wakeup_event(pwrctrl); spm_kick_pcm_to_run(pwrctrl); #if 0 if (1 == spm_snapshot_golden_setting) { snapshot_golden_setting(__FUNCTION__, __LINE__); spm_snapshot_golden_setting = 2; } #endif spm_trigger_wfi_for_sleep(pwrctrl); __spm_get_wakeup_status(&wakesta); spm_clean_after_wakeup(); request_uart_to_wakeup(); last_wr = spm_output_wake_reason(&wakesta, pcmdesc); RESTORE_IRQ: mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&__spm_lock, flags); spm_suspend_post_process(pwrctrl); if (!wd_ret) wd_api->wd_resume_notify(); return last_wr; }
/* * cpu_pdn: * true = CPU shutdown * false = CPU standby * infra_pdn: * true = INFRA/DDRPHY power down * false = keep INFRA/DDRPHY power * pwake_time: * >= 0 = specific wakeup period */ wake_reason_t spm_go_to_sleep(bool cpu_pdn, bool infra_pdn, int pwake_time) { u32 sec = 0; int wd_ret; wake_status_t wakesta; unsigned long flags; struct mtk_irq_mask mask; struct wd_api *wd_api; static wake_reason_t last_wr = WR_NONE; const pcm_desc_t *pcmdesc = &pcm_suspend; const bool pcmwdt_en = true; #if SPM_PWAKE_EN sec = spm_get_wake_period(pwake_time, last_wr); #endif wd_ret = get_wd_api(&wd_api); if (!wd_ret) wd_api->wd_suspend_notify(); spin_lock_irqsave(&spm_lock, flags); mt_irq_mask_all(&mask); mt_irq_unmask_for_sleep(MT_SPM_IRQ_ID); mt_cirq_clone_gic(); mt_cirq_enable(); spm_set_sysclk_settle(); spm_crit2("sec = %u, wakesrc = 0x%x (%u)(%u)\n", sec, spm_sleep_wakesrc, cpu_pdn, infra_pdn); spm_reset_and_init_pcm(); spm_kick_im_to_fetch(pcmdesc); if (spm_request_uart_to_sleep()) { last_wr = WR_UART_BUSY; goto RESTORE_IRQ; } spm_init_pcm_register(); spm_init_event_vector(pcmdesc); spm_set_pwrctl_for_sleep(); spm_set_wakeup_event(sec * 32768, spm_sleep_wakesrc); spm_kick_pcm_to_run(cpu_pdn, infra_pdn, pcmwdt_en); spm_trigger_wfi_for_sleep(cpu_pdn, infra_pdn); spm_get_wakeup_status(&wakesta); spm_clean_after_wakeup(pcmwdt_en); last_wr = spm_output_wake_reason(&wakesta, false); RESTORE_IRQ: mt_cirq_flush(); mt_cirq_disable(); mt_irq_mask_restore(&mask); spin_unlock_irqrestore(&spm_lock, flags); //spm_go_to_normal(); /* included in pcm_suspend */ if (!wd_ret) wd_api->wd_resume_notify(); return last_wr; }