void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime = { .sum_exec_runtime = p->se.sum_exec_runtime, }; task_cputime(p, &cputime.utime, &cputime.stime); cputime_adjust(&cputime, &p->prev_cputime, ut, st); } /* * Must be called with siglock held. */ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime; thread_group_cputime(p, &cputime); cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); } #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN static unsigned long long vtime_delta(struct task_struct *tsk) { unsigned long long clock; clock = local_clock(); if (clock < tsk->vtime_snap) return 0; return clock - tsk->vtime_snap; }
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; struct task_cputime sum; /* Check if cputimer isn't running. This is accessed without locking. */ if (!READ_ONCE(cputimer->running)) { /* * The POSIX timer interface allows for absolute time expiry * values through the TIMER_ABSTIME flag, therefore we have * to synchronize the timer to the clock every time we start it. */ thread_group_cputime(tsk, &sum); update_gt_cputime(&cputimer->cputime_atomic, &sum); /* * We're setting cputimer->running without a lock. Ensure * this only gets written to in one operation. We set * running after update_gt_cputime() as a small optimization, * but barriers are not required because update_gt_cputime() * can handle concurrent updates. */ WRITE_ONCE(cputimer->running, true); } sample_cputime_atomic(times, &cputimer->cputime_atomic); }
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime; thread_group_cputime(p, &cputime); *ut = cputime.utime; *st = cputime.stime; }
/* * Sample a process (thread group) clock for the given group_leader task. * Must be called with task sighand lock held for safe while_each_thread() * traversal. */ static int cpu_clock_sample_group(const clockid_t which_clock, struct task_struct *p, unsigned long long *sample) { struct task_cputime cputime; switch (CPUCLOCK_WHICH(which_clock)) { default: return -EINVAL; case CPUCLOCK_PROF: thread_group_cputime(p, &cputime); *sample = cputime_to_expires(cputime.utime + cputime.stime); break; case CPUCLOCK_VIRT: thread_group_cputime(p, &cputime); *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: thread_group_cputime(p, &cputime); *sample = cputime.sum_exec_runtime; break; } return 0; }
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime = { .sum_exec_runtime = p->se.sum_exec_runtime, }; task_cputime(p, &cputime.utime, &cputime.stime); cputime_adjust(&cputime, &p->prev_cputime, ut, st); } /* * Must be called with siglock held. */ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) { struct task_cputime cputime; thread_group_cputime(p, &cputime); cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); }
/* * Record the proc cpu statistic when petting watchdog */ void htc_watchdog_pet_cpu_record(void) { struct task_struct *p; ulong flags; struct task_cputime cputime; if (prev_proc_stat == NULL) return; spin_lock_irqsave(&lock, flags); get_all_cpu_stat(&old_cpu_stat); /* calculate the cpu time of each process */ for_each_process(p) { if (p->pid < MAX_PID) { thread_group_cputime(p, &cputime); prev_proc_stat[p->pid] = cputime.stime + cputime.utime; } } spin_unlock_irqrestore(&lock, flags); }
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) { struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; struct task_cputime sum; unsigned long flags; if (!cputimer->running) { /* * The POSIX timer interface allows for absolute time expiry * values through the TIMER_ABSTIME flag, therefore we have * to synchronize the timer to the clock every time we start * it. */ thread_group_cputime(tsk, &sum); raw_spin_lock_irqsave(&cputimer->lock, flags); cputimer->running = 1; update_gt_cputime(&cputimer->cputime, &sum); } else raw_spin_lock_irqsave(&cputimer->lock, flags); *times = cputimer->cputime; raw_spin_unlock_irqrestore(&cputimer->lock, flags); }
void htc_kernel_top(void) { struct task_struct *p; int top_loading[NUM_BUSY_THREAD_CHECK], i; unsigned long user_time, system_time, io_time; unsigned long irq_time, idle_time, delta_time; ulong flags; struct task_cputime cputime; int dump_top_stack = 0; if (task_ptr_array == NULL || curr_proc_delta == NULL || prev_proc_stat == NULL) return; spin_lock_irqsave(&lock, flags); get_all_cpu_stat(&new_cpu_stat); /* calculate the cpu time of each process */ for_each_process(p) { thread_group_cputime(p, &cputime); if (p->pid < MAX_PID) { curr_proc_delta[p->pid] = (cputime.utime + cputime.stime) - (prev_proc_stat[p->pid]); task_ptr_array[p->pid] = p; } } /* sorting to get the top cpu consumers */ sorting(curr_proc_delta, top_loading); /* calculate the total delta time */ user_time = (unsigned long)((new_cpu_stat.user + new_cpu_stat.nice) - (old_cpu_stat.user + old_cpu_stat.nice)); system_time = (unsigned long)(new_cpu_stat.system - old_cpu_stat.system); io_time = (unsigned long)(new_cpu_stat.iowait - old_cpu_stat.iowait); irq_time = (unsigned long)((new_cpu_stat.irq + new_cpu_stat.softirq) - (old_cpu_stat.irq + old_cpu_stat.softirq)); idle_time = (unsigned long) ((new_cpu_stat.idle + new_cpu_stat.steal + new_cpu_stat.guest) - (old_cpu_stat.idle + old_cpu_stat.steal + old_cpu_stat.guest)); delta_time = user_time + system_time + io_time + irq_time + idle_time; /* * Check if we need to dump the call stack of top CPU consumers * If CPU usage keeps 100% for 90 secs */ if ((full_loading_counter >= 9) && (full_loading_counter % 3 == 0)) dump_top_stack = 1; /* print most time consuming processes */ printk("[K] CPU Usage\t\tPID\t\tName\n"); for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) { printk("[K] %lu%%\t\t%d\t\t%s\t\t\t%d\n", curr_proc_delta[top_loading[i]] * 100 / delta_time, top_loading[i], task_ptr_array[top_loading[i]]->comm, curr_proc_delta[top_loading[i]]); } /* check if dump busy thread stack */ if (dump_top_stack) { struct task_struct *t; for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) { if (task_ptr_array[top_loading[i]] != NULL && task_ptr_array[top_loading[i]]->stime > 0) { t = task_ptr_array[top_loading[i]]; /* dump all the thread stack of this process */ do { printk("\n[K] ###pid:%d name:%s state:%lu ppid:%d stime:%lu utime:%lu\n", t->pid, t->comm, t->state, t->real_parent->pid, t->stime, t->utime); show_stack(t, t->stack); t = next_thread(t); } while (t != task_ptr_array[top_loading[i]]); } } } /* save old values */ for_each_process(p) { if (p->pid < MAX_PID) { thread_group_cputime(p, &cputime); prev_proc_stat[p->pid] = cputime.stime + cputime.utime; } } old_cpu_stat = new_cpu_stat; memset(curr_proc_delta, 0, sizeof(int) * MAX_PID); memset(task_ptr_array, 0, sizeof(int) * MAX_PID); spin_unlock_irqrestore(&lock, flags); }
void htc_kernel_top(void) { struct task_struct *p; int top_loading[NUM_BUSY_THREAD_CHECK], i; unsigned long user_time, system_time, io_time; unsigned long irq_time, idle_time, delta_time; ulong flags; struct task_cputime cputime; int dump_top_stack = 0; if (task_ptr_array == NULL || curr_proc_delta == NULL || prev_proc_stat == NULL) return; spin_lock_irqsave(&lock, flags); get_all_cpu_stat(&new_cpu_stat); for_each_process(p) { thread_group_cputime(p, &cputime); if (p->pid < MAX_PID) { curr_proc_delta[p->pid] = (cputime.utime + cputime.stime) - (prev_proc_stat[p->pid]); task_ptr_array[p->pid] = p; } } sorting(curr_proc_delta, top_loading); user_time = (unsigned long)((new_cpu_stat.cpustat[CPUTIME_USER] + new_cpu_stat.cpustat[CPUTIME_NICE]) - (old_cpu_stat.cpustat[CPUTIME_USER] + old_cpu_stat.cpustat[CPUTIME_NICE])); system_time = (unsigned long)(new_cpu_stat.cpustat[CPUTIME_SYSTEM] - old_cpu_stat.cpustat[CPUTIME_SYSTEM]); io_time = (unsigned long)(new_cpu_stat.cpustat[CPUTIME_IOWAIT] - old_cpu_stat.cpustat[CPUTIME_IOWAIT]); irq_time = (unsigned long)((new_cpu_stat.cpustat[CPUTIME_IRQ] + new_cpu_stat.cpustat[CPUTIME_SOFTIRQ]) - (old_cpu_stat.cpustat[CPUTIME_IRQ] + old_cpu_stat.cpustat[CPUTIME_SOFTIRQ])); idle_time = (unsigned long) ((new_cpu_stat.cpustat[CPUTIME_IDLE] + new_cpu_stat.cpustat[CPUTIME_STEAL] + new_cpu_stat.cpustat[CPUTIME_GUEST]) - (old_cpu_stat.cpustat[CPUTIME_IDLE] + old_cpu_stat.cpustat[CPUTIME_STEAL] + old_cpu_stat.cpustat[CPUTIME_GUEST])); delta_time = user_time + system_time + io_time + irq_time + idle_time; if ((full_loading_counter >= 9) && (full_loading_counter % 3 == 0)) dump_top_stack = 1; printk(KERN_INFO "[K] CPU Usage\t\tPID\t\tName\n"); for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) { printk(KERN_INFO "[K] %8lu%%\t\t%d\t\t%s\t\t%d\n", curr_proc_delta[top_loading[i]] * 100 / delta_time, top_loading[i], task_ptr_array[top_loading[i]]->comm, curr_proc_delta[top_loading[i]]); } if (dump_top_stack) { struct task_struct *t; for (i = 0 ; i < NUM_BUSY_THREAD_CHECK ; i++) { if (task_ptr_array[top_loading[i]] != NULL && task_ptr_array[top_loading[i]]->stime > 0) { t = task_ptr_array[top_loading[i]]; do { printk(KERN_INFO "\n[K] ###pid:%d name:%s state:%lu ppid:%d stime:%lu utime:%lu\n", t->pid, t->comm, t->state, t->real_parent->pid, t->stime, t->utime); show_stack(t, t->stack); t = next_thread(t); } while (t != task_ptr_array[top_loading[i]]); } } } for_each_process(p) { if (p->pid < MAX_PID) { thread_group_cputime(p, &cputime); prev_proc_stat[p->pid] = cputime.stime + cputime.utime; } } old_cpu_stat = new_cpu_stat; memset(curr_proc_delta, 0, sizeof(int) * MAX_PID); memset(task_ptr_array, 0, sizeof(int) * MAX_PID); spin_unlock_irqrestore(&lock, flags); }
/* * When watchdog bark, print the cpu statistic */ void htc_watchdog_top_stat(void) { struct task_struct *p; int top_loading[NUM_BUSY_THREAD_CHECK], i; unsigned long user_time, system_time, io_time; unsigned long irq_time, idle_time, delta_time; ulong flags; struct task_cputime cputime; struct cpu_usage_stat new_cpu_stat; if (task_ptr_array == NULL || curr_proc_delta == NULL || prev_proc_stat == NULL) return; memset(curr_proc_delta, 0, sizeof(int) * MAX_PID); memset(task_ptr_array, 0, sizeof(int) * MAX_PID); printk(KERN_ERR"\n\n[K][%s] Start to dump:\n", __func__); spin_lock_irqsave(&lock, flags); get_all_cpu_stat(&new_cpu_stat); /* calculate the cpu time of each process */ for_each_process(p) { thread_group_cputime(p, &cputime); if (p->pid < MAX_PID) { curr_proc_delta[p->pid] = (cputime.utime + cputime.stime) - (prev_proc_stat[p->pid]); task_ptr_array[p->pid] = p; } } /* sorting to get the top cpu consumers */ sorting(curr_proc_delta, top_loading); /* calculate the total delta time */ user_time = (unsigned long)((new_cpu_stat.user + new_cpu_stat.nice) - (old_cpu_stat.user + old_cpu_stat.nice)); system_time = (unsigned long)(new_cpu_stat.system - old_cpu_stat.system); io_time = (unsigned long)(new_cpu_stat.iowait - old_cpu_stat.iowait); irq_time = (unsigned long)((new_cpu_stat.irq + new_cpu_stat.softirq) - (old_cpu_stat.irq + old_cpu_stat.softirq)); idle_time = (unsigned long) ((new_cpu_stat.idle + new_cpu_stat.steal + new_cpu_stat.guest) - (old_cpu_stat.idle + old_cpu_stat.steal + old_cpu_stat.guest)); delta_time = user_time + system_time + io_time + irq_time + idle_time; /* print most time consuming processes */ printk(KERN_ERR"[K] CPU\t\tPID\t\tName\n"); for (i = 0; i < NUM_BUSY_THREAD_CHECK; i++) { printk(KERN_ERR "[K] %lu%%\t\t%d\t\t%s\n", curr_proc_delta[top_loading[i]] * 100 / delta_time, top_loading[i], task_ptr_array[top_loading[i]]->comm); } spin_unlock_irqrestore(&lock, flags); printk(KERN_ERR "\n"); }