Ejemplo n.º 1
0
int do_getitimer(int which, struct itimerval *value)
{
    struct task_struct *tsk = current;
    cputime_t cinterval, cval;

    switch (which) {
    case ITIMER_REAL:
        spin_lock_irq(&tsk->sighand->siglock);
        value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
        value->it_interval =
            ktime_to_timeval(tsk->signal->it_real_incr);
        spin_unlock_irq(&tsk->sighand->siglock);
        break;
    case ITIMER_VIRTUAL:
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_virt_expires;
        cinterval = tsk->signal->it_virt_incr;
        if (!cputime_eq(cval, cputime_zero)) {
            struct task_cputime cputime;
            cputime_t utime;

            thread_group_cputimer(tsk, &cputime);
            utime = cputime.utime;
            if (cputime_le(cval, utime)) { /* about to fire */
                cval = jiffies_to_cputime(1);
            } else {
                cval = cputime_sub(cval, utime);
            }
        }
        spin_unlock_irq(&tsk->sighand->siglock);
        cputime_to_timeval(cval, &value->it_value);
        cputime_to_timeval(cinterval, &value->it_interval);
        break;
    case ITIMER_PROF:
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_prof_expires;
        cinterval = tsk->signal->it_prof_incr;
        if (!cputime_eq(cval, cputime_zero)) {
            struct task_cputime times;
            cputime_t ptime;

            thread_group_cputimer(tsk, &times);
            ptime = cputime_add(times.utime, times.stime);
            if (cputime_le(cval, ptime)) { /* about to fire */
                cval = jiffies_to_cputime(1);
            } else {
                cval = cputime_sub(cval, ptime);
            }
        }
        spin_unlock_irq(&tsk->sighand->siglock);
        cputime_to_timeval(cval, &value->it_value);
        cputime_to_timeval(cinterval, &value->it_interval);
        break;
    default:
        return(-EINVAL);
    }
    return 0;
}
Ejemplo n.º 2
0
void end_mtproc_info(struct task_struct *p)
{
	struct mt_proc_struct *mtproc = mt_proc_head;

	mutex_lock(&mt_cputime_lock);
	//check profiling enable flag
	if(0 == mtsched_enabled)
	{
		mutex_unlock(&mt_cputime_lock);
		return;
	}

	//may waste time...
	while(mtproc != NULL)
	{
		if(p->pid != mtproc->pid)
		{
			mtproc = mtproc->next;
		}
		else
		{
			break;
		}
	}
	/*	
	for(i=0;i<proc_count; i++){
	if(p->pid == mt_proc[i]->pid)
	break;
	}
	*/
	if(mtproc == NULL)
	// if(i == proc_count)
	{
		printk("pid:%d can't be found in mtsched proc_info.\n",p->pid);
		mutex_unlock(&mt_cputime_lock);	
		return;
	}
	mtproc->prof_end = sched_clock();
	/* update cputime */
	mtproc->cputime = p->se.sum_exec_runtime;
	mtproc->isr_time = p->se.mtk_isr_time;
	mtproc->isr_count = p->se.mtk_isr_count;
	mtproc->mtk_isr = p->se.mtk_isr;
	p->se.mtk_isr = NULL;
	mt_task_times(p,&mtproc->utime, &mtproc->stime);
	mtproc->utime = cputime_sub(mtproc->utime, mtproc->utime_init);
	mtproc->stime = cputime_sub(mtproc->stime, mtproc->stime_init);

	mutex_unlock(&mt_cputime_lock);
	return;
}
Ejemplo n.º 3
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);

	#ifdef CONFIG_NC_DEBUG
	printk(KERN_INFO "============== CPUFREQ STATS");
	printk(KERN_INFO "CPUFREQ STATS: cpu: %u", stat->cpu);
	printk(KERN_INFO "CPUFREQ STATS: total_trans: %u", stat->total_trans);
	printk(KERN_INFO "CPUFREQ STATS: last_time: %lld", stat->last_time);
	printk(KERN_INFO "CPUFREQ STATS: max_state: %u", stat->max_state);
	printk(KERN_INFO "CPUFREQ STATS: state_num: %u", stat->state_num);
	printk(KERN_INFO "CPUFREQ STATS: last_index: %u", stat->last_index);
	printk(KERN_INFO "CPUFREQ STATS: time_in_state: %llu", cputime64_to_jiffies64(stat->time_in_state));
	printk(KERN_INFO "CPUFREQ STATS: *freq_table: %p", &stat->freq_table);
	#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	printk(KERN_INFO "CPUFREQ STATS: *trans_table: %p", &stat->trans_table);
	#endif
	printk(KERN_INFO "============= END CPUFREQ STATS");
	#endif

	return 0;
}
Ejemplo n.º 4
0
static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
			   struct itimerval *const value)
{
	cputime_t cval, cinterval;
	struct cpu_itimer *it = &tsk->signal->it[clock_id];

	spin_lock_irq(&tsk->sighand->siglock);

	cval = it->expires;
	cinterval = it->incr;
	if (!cputime_eq(cval, cputime_zero)) {
		struct task_cputime cputime;
		cputime_t t;

		thread_group_cputimer(tsk, &cputime);
		if (clock_id == CPUCLOCK_PROF)
			t = cputime_add(cputime.utime, cputime.stime);
		else
			/* CPUCLOCK_VIRT */
			t = cputime.utime;

		if (cputime_le(cval, t))
			/* about to fire */
			cval = cputime_one_jiffy;
		else
			cval = cputime_sub(cval, t);
	}

	spin_unlock_irq(&tsk->sighand->siglock);

	cputime_to_timeval(cval, &value->it_value);
	cputime_to_timeval(cinterval, &value->it_interval);
}
Ejemplo n.º 5
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	struct all_cpufreq_stats *all_stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	all_stat = per_cpu(all_cpufreq_stats, cpu);
	if (!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}
	if (stat->time_in_state) {
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
		if (all_stat)
			all_stat->time_in_state[stat->last_index] +=
					cur_time - stat->last_time;
	}
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Ejemplo n.º 6
0
static ushort thread_group_cpu_share(struct task_struct *task) 
{
  struct task_cputime times;
  cputime_t num_load, div_load, total_time;
  ushort share;

  my_thread_group_cputime(task, &times);  
  total_time = cputime_add(times.utime, times.stime);
  /*
    last_cputime == 0 means that the timer_function has been called
    for the first time and we have to collect info before doing any
    check.
  */
  if (unlikely(last_cputime == 0)) {
    share = 0;
    printk(KERN_INFO "sendsig: timer initialization completed\n");
  } else {
    /*
      Let's compute the share of cpu usage for the last WAIT_TIMEOUT
      seconds
    */
    num_load = cputime_sub(total_time, last_cputime) * 100;
    div_load = jiffies_to_cputime(wait_timeout * HZ);
    share = (ushort)cputime_div(num_load, div_load);
    
    printk(KERN_DEBUG "sendsig: computed cpu share for process %d: %d\n", 
	   pid, share);
  }
  /*
    Update last_cputime
  */
  last_cputime = total_time;

  return share;
}
void update_busfreq_stat(struct busfreq_data *data, unsigned int index)
{
#ifdef BUSFREQ_DEBUG
	unsigned long long cur_time = get_jiffies_64();
	data->time_in_state[index] = cputime64_add(data->time_in_state[index], cputime_sub(cur_time, data->last_time));
	data->last_time = cur_time;
#endif
}
Ejemplo n.º 8
0
static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
						union cpu_time_count a,
						union cpu_time_count b)
{
	if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
		a.sched -= b.sched;
	}  else {
		a.cpu = cputime_sub(a.cpu, b.cpu);
	}
	return a;
}
Ejemplo n.º 9
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}

	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));

	if (cpu == 0)
		cpu0_time_in_state[stat->last_index] =
			cputime64_add(cpu0_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
	else if (cpu == 1)
		cpu1_time_in_state[stat->last_index] =
			cputime64_add(cpu1_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
#ifdef CONFIG_QUAD_CORES_SOC_STAT
	else if (cpu == 2)
		cpu2_time_in_state[stat->last_index] =
			cputime64_add(cpu2_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
	else if (cpu == 3)
		cpu3_time_in_state[stat->last_index] =
			cputime64_add(cpu3_time_in_state[stat->last_index],
			cputime_sub(cur_time, stat->last_time));
#endif
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Ejemplo n.º 10
0
static int
cpufreq_stats_update (unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = cpufreq_stats_table[cpu];
	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Ejemplo n.º 11
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
#if defined (CONFIG_MACH_SAMSUNG_P5)
	if(!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return -1;
	}
#endif

	if (stat->time_in_state && stat->last_index >= 0)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Ejemplo n.º 12
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

    if (cpu_is_offline(cpu))
        return 0;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (!stat) {
		spin_unlock(&cpufreq_stats_lock);
		return 0;
	}

	if (stat->time_in_state && stat->last_index >= 0)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);
	return 0;
}
Ejemplo n.º 13
0
/**
 * acct_update_integrals - update mm integral fields in task_struct
 * @tsk: task_struct for accounting
 */
void acct_update_integrals(struct task_struct *tsk)
{
	if (likely(tsk->mm)) {
		cputime_t time, dtime;
		struct timeval value;
		unsigned long flags;
		u64 delta;

		local_irq_save(flags);
		time = tsk->stime + tsk->utime;
		dtime = cputime_sub(time, tsk->acct_timexpd);
		jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
		delta = value.tv_sec;
		delta = delta * USEC_PER_SEC + value.tv_usec;

		if (delta == 0)
			goto out;
		tsk->acct_timexpd = time;
		tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
		tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
	out:
		local_irq_restore(flags);
	}
}
Ejemplo n.º 14
0
int do_getitimer(int which, struct itimerval *value)
{
	struct task_struct *tsk = current;
	cputime_t cinterval, cval;

	switch (which) {
	case ITIMER_REAL:
		spin_lock_irq(&tsk->sighand->siglock);
		value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
		value->it_interval =
			ktime_to_timeval(tsk->signal->it_real_incr);
		spin_unlock_irq(&tsk->sighand->siglock);
		break;
	case ITIMER_VIRTUAL:
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_virt_expires;
		cinterval = tsk->signal->it_virt_incr;
		if (!cputime_eq(cval, cputime_zero)) {
			struct task_struct *t = tsk;
			cputime_t utime = tsk->signal->utime;
			do {
				utime = cputime_add(utime, t->utime);
				t = next_thread(t);
			} while (t != tsk);
			if (cputime_le(cval, utime)) { /* about to fire */
				cval = jiffies_to_cputime(1);
			} else {
				cval = cputime_sub(cval, utime);
			}
		}
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		cputime_to_timeval(cval, &value->it_value);
		cputime_to_timeval(cinterval, &value->it_interval);
		break;
	case ITIMER_PROF:
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_prof_expires;
		cinterval = tsk->signal->it_prof_incr;
		if (!cputime_eq(cval, cputime_zero)) {
			struct task_struct *t = tsk;
			cputime_t ptime = cputime_add(tsk->signal->utime,
						      tsk->signal->stime);
			do {
				ptime = cputime_add(ptime,
						    cputime_add(t->utime,
								t->stime));
				t = next_thread(t);
			} while (t != tsk);
			if (cputime_le(cval, ptime)) { /* about to fire */
				cval = jiffies_to_cputime(1);
			} else {
				cval = cputime_sub(cval, ptime);
			}
		}
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		cputime_to_timeval(cval, &value->it_value);
		cputime_to_timeval(cinterval, &value->it_interval);
		break;
	default:
		return(-EINVAL);
	}
	return 0;
}