static ushort thread_group_cpu_share(struct task_struct *task) { struct task_cputime times; cputime_t num_load, div_load, total_time; ushort share; my_thread_group_cputime(task, ×); total_time = cputime_add(times.utime, times.stime); /* last_cputime == 0 means that the timer_function has been called for the first time and we have to collect info before doing any check. */ if (unlikely(last_cputime == 0)) { share = 0; printk(KERN_INFO "sendsig: timer initialization completed\n"); } else { /* Let's compute the share of cpu usage for the last WAIT_TIMEOUT seconds */ num_load = cputime_sub(total_time, last_cputime) * 100; div_load = jiffies_to_cputime(wait_timeout * HZ); share = (ushort)cputime_div(num_load, div_load); printk(KERN_DEBUG "sendsig: computed cpu share for process %d: %d\n", pid, share); } /* Update last_cputime */ last_cputime = total_time; return share; }
/* * Divide and limit the result to res >= 1 * * This is necessary to prevent signal delivery starvation, when the result of * the division would be rounded down to 0. */ static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) { cputime_t res = cputime_div(time, div); return max_t(cputime_t, res, 1); }