static int hang_detect_thread(void *arg) { /* unsigned long flags; */ struct sched_param param = { .sched_priority = RTPM_PRIO_WDT}; LOGE("[Hang_Detect] hang_detect thread starts.\n"); sched_setscheduler(current, SCHED_FIFO, ¶m); while (1) { if ((1==hd_detect_enabled) && (FindTaskByName("system_server")!=-1)) { LOGE("[Hang_Detect] hang_detect thread counts down %d:%d.\n", hang_detect_counter, hd_timeout); if (hang_detect_counter<=0) { ShowStatus () ; } if (hang_detect_counter==0) { LOGE("[Hang_Detect] we should triger HWT ... \n") ; #ifdef CONFIG_MT_ENG_BUILD aee_kernel_warning("\nCRDISPATCH_KEY:SS Hang\n", "we triger HWT"); msleep (10*1000) ; #else aee_kernel_warning("\nCRDISPATCH_KEY:SS Hang\n", "we triger HWT"); msleep (10*1000) ; local_irq_disable () ; while (1); BUG () ; #endif } hang_detect_counter -- ; } else { /* incase of system_server restart, we give 2 mins more.(4*HD_INTER) */ if (1==hd_detect_enabled) { hang_detect_counter = hd_timeout + 4 ; hd_detect_enabled = 0 ; } LOGE("[Hang_Detect] hang_detect disabled.\n") ; } msleep((HD_INTER) * 1000); } return 0 ; }
/** * down_interruptible - acquire the semaphore unless interrupted * @sem: the semaphore to be acquired * * Attempts to acquire the semaphore. If no more tasks are allowed to * acquire the semaphore, calling this function will put the task to sleep. * If the sleep is interrupted by a signal, this function will return -EINTR. * If the semaphore is successfully acquired, this function returns 0. */ int down_interruptible(struct semaphore *sem) { unsigned long flags; int result = 0; spin_lock_irqsave(&sem->lock, flags); if (likely(sem->count > 0)){ sem->count--; }else{ #ifdef CONFIG_MT_LOCK_DEBUG if(sem->owner == current){ char aee_str[40]; printk("[Warning!Recursive Semaphore!][%d:%s], down_interruptible:%s(0x%x)\n", current->pid, current->comm, sem->sem_name, sem); sprintf( aee_str, "Recursive SemLock:%s\n", current->comm); aee_kernel_warning( aee_str,"mtlock debugger\n"); dump_stack(); } #endif result = __down_interruptible(sem); } #ifdef CONFIG_MT_LOCK_DEBUG if(0 == sem->count) sem_set_owner(sem); #endif spin_unlock_irqrestore(&sem->lock, flags); return result; }
static int get_sys_all_cpu_freq_info(void) { int i; int cpu_total_dmips = 0; for (i=0 ; i<NO_CPU_CORES ; i++) { cpufreqs[i] = cpufreq_quick_get(i)/1000; // MHz cpu_total_dmips += cpufreqs[i]; } cpu_total_dmips /= 1000; // TODO: think a way to easy start and stop, and start for only once if (1 == check_dmips_limit) { if (cpu_total_dmips > mtktscpu_limited_dmips) { THRML_ERROR_LOG("cpu %d over limit %d\n", cpu_total_dmips, mtktscpu_limited_dmips); if (dmips_limit_warned == false) { aee_kernel_warning("thermal", "cpu %d over limit %d\n", cpu_total_dmips, mtktscpu_limited_dmips); dmips_limit_warned = true; } } } return 0; }
void mtlte_df_DL_release_buff (MTLTE_DF_RX_QUEUE_TYPE qno, unsigned int buff_amount, struct sk_buff *skb) { if(true == lte_df_core.fl_ctrl_enable[qno]) { if(true == lte_df_core.fl_ctrl_free_skb[qno]) { buff_amount = 1; dev_kfree_skb_any(skb); } atomic_sub(buff_amount, <e_df_core.fl_ctrl_counter[qno]); if((true == lte_df_core.fl_ctrl_full[qno]) && (atomic_read(<e_df_core.fl_ctrl_counter[qno]) <= lte_df_core.fl_ctrl_threshold[qno])) { lte_df_core.fl_ctrl_full[qno] = false; // kick proccess up to re-receive DL packet; mtlte_df_UL_kick_proccess(); } } else { // red screen - kernel api warning char error_srt[64] = {0}; sprintf(error_srt, "DL_release_buff is called without Init"); aee_kernel_warning("[EEMCS] Use flow ctrl API without Init", error_srt); } }
void mtk_audit_hook(char *data) { char scontext[AEE_FILTER_LEN] = {'\0'}; char *pname = scontext; char printBuf[AEE_FILTER_LEN] = {'\0'}; int ret = 0; /*get scontext from avc warning*/ ret = mtk_get_scontext(data,scontext); if(!ret) return ; /*check scontext is in warning list*/ ret = mtk_check_filter(scontext); if(ret >= 0) { pname = mtk_get_process(scontext); if(pname !=0) { printk("[selinux]Enforce: %d, In AEE Warning List scontext: %s\n",selinux_enforcing ,pname); sprintf(printBuf,"Selinux Enforce violation: %s ",pname); #ifdef CONFIG_MTK_AEE_FEATURE if(selinux_enforcing) aee_kernel_warning(printBuf, "\nCR_DISPATCH_PROCESSNAME:%s\n%s",pname,data); #endif } } }
static void spin_bug(raw_spinlock_t *lock, const char *msg) { char aee_str[40]; // if (!debug_locks_off()) // return; spin_dump(lock, msg); sprintf( aee_str, "Spinlock %s :%s\n", current->comm, msg); aee_kernel_warning( aee_str,"spinlock debugger\n"); }
static int proc_write_generate_kernel_notify(struct file* file, const char __user *buf, unsigned long count, void *data) { char msg[164], *colon_ptr; if (count == 0) { return -EINVAL; } if ((count < 5) || (count >= sizeof(msg))) { xlog_printk(ANDROID_LOG_WARN, AEK_LOG_TAG, "aed: %s count sould be >= 5 and <= %d bytes.\n", __func__, sizeof(msg)); return -EINVAL; } if (copy_from_user(msg, buf, count)) { xlog_printk(ANDROID_LOG_WARN, AEK_LOG_TAG, "aed: %s unable to read message\n", __func__); return -EFAULT; } /* Be safe */ msg[count] = 0; if (msg[1] != ':') { return -EINVAL; } colon_ptr = strchr(&msg[2], ':'); if ((colon_ptr == NULL) || ((colon_ptr - msg) > 32)) { xlog_printk(ANDROID_LOG_WARN, AEK_LOG_TAG, "aed: %s cannot find valid module name\n", __func__); return -EINVAL; } *colon_ptr = 0; switch (msg[0]) { case 'R': aee_kernel_reminding(&msg[2], colon_ptr + 1); break; case 'W': aee_kernel_warning(&msg[2], colon_ptr + 1); break; case 'E': aee_kernel_exception(&msg[2], colon_ptr + 1); break; default: return -EINVAL; } return count; }
int aee_kernel_wdt_kick_api(int kinterval) { int ret=0; #ifdef CONFIG_MTK_AEE_POWERKEY_HANG_DETECT if (pwk_start_monitor && (get_boot_mode() == NORMAL_BOOT) && (FindTaskByName("system_server") != -1)) { /* Only in normal_boot! */ LOGE("Press powerkey!! g_boot_mode=%d,wdt_kick_status=0x%x,tickTimes=0x%x,g_kinterval=%d,RT[%lld]\n",get_boot_mode(),wdt_kick_status,hwt_kick_times,kinterval,sched_clock()); hwt_kick_times++; if ((kinterval * hwt_kick_times > 180)) /* only monitor 3 min */ { pwk_start_monitor=0; /* check all modules is ok~~~ */ if ((wdt_kick_status & (WDT_SETBY_Display | WDT_SETBY_SF)) != (WDT_SETBY_Display | WDT_SETBY_SF)) { #ifdef CONFIG_MT_ENG_BUILD ShowStatus(); /* catch task kernel bt */ LOGE("[WDK] Powerkey Tick fail,kick_status 0x%08x,RT[%lld]\n ", wdt_kick_status, sched_clock()); aee_kernel_warning("\nCRDISPATCH_KEY:UI Hang(Powerkey)\n", "Powerkey Monitor"); #else ShowStatus(); /* catch task kernel bt */ LOGE("[WDK] Powerkey Tick fail,kick_status 0x%08x,RT[%lld]\n ", wdt_kick_status, sched_clock()); ret = WDT_PWK_HANG_FORCE_HWT; /* trigger HWT */ #endif } } if ((wdt_kick_status & (WDT_SETBY_Display | WDT_SETBY_SF)) == (WDT_SETBY_Display | WDT_SETBY_SF)) { pwk_start_monitor=0; LOGE("[WDK] Powerkey Tick ok,kick_status 0x%08x,RT[%lld]\n ", wdt_kick_status, sched_clock()); } } #endif return ret; }
static int get_sys_all_cpu_freq_info(void) { int nCPU_freq_temp, i; char szTempBuf[512]; int cpu_total_dmips = 0; for(i=0 ; i<NUMBER_OF_CORE ; i++) { sprintf(szTempBuf, "/sys/devices/system/cpu/cpu%01d/cpufreq/cpuinfo_cur_freq", i); nCPU_freq_temp = get_sys_cpu_freq_info(szTempBuf, 3); if(nCPU_freq_temp > 0) { cpufreqs[i] = nCPU_freq_temp/1000; cpu_total_dmips += nCPU_freq_temp; } else { /* CPU is unplug now */ cpufreqs[i] = nCPU_freq_temp*10; } } cpu_total_dmips /= 1000; // TODO: think a way to easy start and stop, and start for only once if (1 == check_dmips_limit) { if (cpu_total_dmips > mtktscpu_limited_dmips) { THRML_ERROR_LOG("cpu %d over limit %d\n", cpu_total_dmips, mtktscpu_limited_dmips); if (dmips_limit_warned == false) { aee_kernel_warning("thermal", "cpu %d over limit %d\n", cpu_total_dmips, mtktscpu_limited_dmips); dmips_limit_warned = true; } } } return 0; }
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { irqreturn_t retval = IRQ_NONE; unsigned int random = 0, irq = desc->irq_data.irq; #ifdef CONFIG_MTPROF_IRQ_DURATION unsigned long long t1, t2, dur; #ifdef CONFIG_ISR_MONITOR char aee_str[40]; #endif #endif do { irqreturn_t res; trace_irq_handler_entry(irq, action); #ifdef CONFIG_MTPROF_IRQ_DURATION t1 = sched_clock(); res = action->handler(irq, action->dev_id); t2 = sched_clock(); dur = t2 - t1; action->duration += dur; action->count++; action->dur_max = max(dur,action->dur_max); action->dur_min = min(dur,action->dur_min); #ifdef CONFIG_MTPROF_CPUTIME if(mtsched_enabled == 1) { int isr_find = 0; struct mtk_isr_info *mtk_isr_point = current->se.mtk_isr; struct mtk_isr_info *mtk_isr_current = mtk_isr_point; char *isr_name = NULL; current->se.mtk_isr_time += dur; while(mtk_isr_point != NULL) { if(mtk_isr_point->isr_num == irq) { mtk_isr_point->isr_time += dur; mtk_isr_point->isr_count++; isr_find = 1; break; } mtk_isr_current = mtk_isr_point; mtk_isr_point = mtk_isr_point -> next; } if(isr_find == 0) { mtk_isr_point = kmalloc(sizeof(struct mtk_isr_info), GFP_ATOMIC); if(mtk_isr_point == NULL) { printk(KERN_ERR"cant' alloc mtk_isr_info mem!\n"); } else { mtk_isr_point->isr_num = irq; mtk_isr_point->isr_time = dur; mtk_isr_point->isr_count = 1; mtk_isr_point->next = NULL; if(mtk_isr_current == NULL) { current->se.mtk_isr = mtk_isr_point; } else { mtk_isr_current->next = mtk_isr_point; } isr_name = kmalloc(sizeof(action->name),GFP_ATOMIC); if(isr_name != NULL) { strcpy(isr_name, action->name); mtk_isr_point->isr_name = isr_name; } else { printk(KERN_ERR"cant' alloc isr_name mem!\n"); } current->se.mtk_isr_count++; } } } #endif #ifdef CONFIG_ISR_MONITOR if(unlikely(dur>TIME_3MS)){ if(in_white_list(irq)){ printk("[ISR Monitor] Warning! ISR%d:%s too long, %llu ns > 3 ms, t1:%llu, t2:%llu\n", irq, action->name, dur, t1, t2); }else if(dur>TIME_6MS){ sprintf( aee_str, "ISR#%d:%s too long>6ms\n", irq, action->name); aee_kernel_exception( aee_str,"isr_monitor\n"); printk("[ISR Monitor] Warning! ISR%d:%s too long, %llu ns > 10 ms, t1:%llu, t2:%llu\n", irq, action->name, dur, t1, t2); }else{ sprintf( aee_str, "ISR#%d:%s too long>3ms\n", irq, action->name); aee_kernel_warning( aee_str,"isr_monitor\n"); printk("[ISR Monitor] Warning! ISR%d:%s too long, %llu ns > 3 ms, t1:%llu, t2:%llu\n", irq, action->name, dur, t1, t2); } } #endif #else res = action->handler(irq, action->dev_id); #endif trace_irq_handler_exit(irq, action, res); if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pF enabled interrupts\n", irq, action->handler)) local_irq_disable(); switch (res) { case IRQ_WAKE_THREAD: /* * Catch drivers which return WAKE_THREAD but * did not set up a thread function */ if (unlikely(!action->thread_fn)) { warn_no_thread(irq, action); break; } irq_wake_thread(desc, action); /* Fall through to add to randomness */ case IRQ_HANDLED: random |= action->flags; break; default: break; } retval |= res; action = action->next; } while (action); if (random & IRQF_SAMPLE_RANDOM) add_interrupt_randomness(irq); if (!noirqdebug) note_interrupt(irq, desc, retval); return retval; }