Beispiel #1
0
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);
	utime = stime = cputime_zero;

	if (who == RUSAGE_THREAD) {
		accumulate_thread_rusage(p, r, &utime, &stime);
		goto out;
	}

	if (!lock_task_sighand(p, &flags))
		return;

	switch (who) {
		case RUSAGE_BOTH:
		case RUSAGE_CHILDREN:
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
			r->ru_inblock = p->signal->cinblock;
			r->ru_oublock = p->signal->coublock;

			if (who == RUSAGE_CHILDREN)
				break;

		case RUSAGE_SELF:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			r->ru_inblock += p->signal->inblock;
			r->ru_oublock += p->signal->oublock;
			t = p;
			do {
				accumulate_thread_rusage(t, r, &utime, &stime);
				t = next_thread(t);
			} while (t != p);
			break;

		default:
			BUG();
	}
	unlock_task_sighand(p, &flags);

out:
	cputime_to_timeval(utime, &r->ru_utime);
	cputime_to_timeval(stime, &r->ru_stime);
}
Beispiel #2
0
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);
	utime = stime = cputime_zero;

	rcu_read_lock();
	if (!lock_task_sighand(p, &flags)) {
		rcu_read_unlock();
		return;
	}

	switch (who) {
		case RUSAGE_BOTH:
		case RUSAGE_CHILDREN:
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;

			if (who == RUSAGE_CHILDREN)
				break;

		case RUSAGE_SELF:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			t = p;
			do {
				utime = cputime_add(utime, t->utime);
				stime = cputime_add(stime, t->stime);
				r->ru_nvcsw += t->nvcsw;
				r->ru_nivcsw += t->nivcsw;
				r->ru_minflt += t->min_flt;
				r->ru_majflt += t->maj_flt;
				t = next_thread(t);
			} while (t != p);
			break;

		default:
			BUG();
	}

	unlock_task_sighand(p, &flags);
	rcu_read_unlock();

	cputime_to_timeval(utime, &r->ru_utime);
	cputime_to_timeval(stime, &r->ru_stime);
}
Beispiel #3
0
static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r,
				     cputime_t *utimep, cputime_t *stimep)
{
	*utimep = cputime_add(*utimep, t->utime);
	*stimep = cputime_add(*stimep, t->stime);
	r->ru_nvcsw += t->nvcsw;
	r->ru_nivcsw += t->nivcsw;
	r->ru_minflt += t->min_flt;
	r->ru_majflt += t->maj_flt;
	r->ru_inblock += task_io_get_inblock(t);
	r->ru_oublock += task_io_get_oublock(t);
}
Beispiel #4
0
asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
{
    /*
     *	In the SMP world we might just be unlucky and have one of
     *	the times increment as we use it. Since the value is an
     *	atomically safe type this is just fine. Conceptually its
     *	as if the syscall took an instant longer to occur.
     */
    if (tbuf) {
        struct compat_tms tmp;
        struct task_struct *tsk = current;
        struct task_struct *t;
        cputime_t utime, stime, cutime, cstime;

        read_lock(&tasklist_lock);
        utime = tsk->signal->utime;
        stime = tsk->signal->stime;
        t = tsk;
        do {
            utime = cputime_add(utime, t->utime);
            stime = cputime_add(stime, t->stime);
            t = next_thread(t);
        } while (t != tsk);

        /*
         * While we have tasklist_lock read-locked, no dying thread
         * can be updating current->signal->[us]time.  Instead,
         * we got their counts included in the live thread loop.
         * However, another thread can come in right now and
         * do a wait call that updates current->signal->c[us]time.
         * To make sure we always see that pair updated atomically,
         * we take the siglock around fetching them.
         */
        spin_lock_irq(&tsk->sighand->siglock);
        cutime = tsk->signal->cutime;
        cstime = tsk->signal->cstime;
        spin_unlock_irq(&tsk->sighand->siglock);
        read_unlock(&tasklist_lock);

        tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
        tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
        tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
        tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
        if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
            return -EFAULT;
    }
    return compat_jiffies_to_clock_t(jiffies);
}
Beispiel #5
0
static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
			   struct itimerval *const value)
{
	cputime_t cval, cinterval;
	struct cpu_itimer *it = &tsk->signal->it[clock_id];

	spin_lock_irq(&tsk->sighand->siglock);

	cval = it->expires;
	cinterval = it->incr;
	if (!cputime_eq(cval, cputime_zero)) {
		struct task_cputime cputime;
		cputime_t t;

		thread_group_cputimer(tsk, &cputime);
		if (clock_id == CPUCLOCK_PROF)
			t = cputime_add(cputime.utime, cputime.stime);
		else
			/* CPUCLOCK_VIRT */
			t = cputime.utime;

		if (cputime_le(cval, t))
			/* about to fire */
			cval = cputime_one_jiffy;
		else
			cval = cputime_sub(cval, t);
	}

	spin_unlock_irq(&tsk->sighand->siglock);

	cputime_to_timeval(cval, &value->it_value);
	cputime_to_timeval(cinterval, &value->it_interval);
}
Beispiel #6
0
static ushort thread_group_cpu_share(struct task_struct *task) 
{
  struct task_cputime times;
  cputime_t num_load, div_load, total_time;
  ushort share;

  my_thread_group_cputime(task, &times);  
  total_time = cputime_add(times.utime, times.stime);
  /*
    last_cputime == 0 means that the timer_function has been called
    for the first time and we have to collect info before doing any
    check.
  */
  if (unlikely(last_cputime == 0)) {
    share = 0;
    printk(KERN_INFO "sendsig: timer initialization completed\n");
  } else {
    /*
      Let's compute the share of cpu usage for the last WAIT_TIMEOUT
      seconds
    */
    num_load = cputime_sub(total_time, last_cputime) * 100;
    div_load = jiffies_to_cputime(wait_timeout * HZ);
    share = (ushort)cputime_div(num_load, div_load);
    
    printk(KERN_DEBUG "sendsig: computed cpu share for process %d: %d\n", 
	   pid, share);
  }
  /*
    Update last_cputime
  */
  last_cputime = total_time;

  return share;
}
Beispiel #7
0
int do_getitimer(int which, struct itimerval *value)
{
    struct task_struct *tsk = current;
    cputime_t cinterval, cval;

    switch (which) {
    case ITIMER_REAL:
        spin_lock_irq(&tsk->sighand->siglock);
        value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
        value->it_interval =
            ktime_to_timeval(tsk->signal->it_real_incr);
        spin_unlock_irq(&tsk->sighand->siglock);
        break;
    case ITIMER_VIRTUAL:
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_virt_expires;
        cinterval = tsk->signal->it_virt_incr;
        if (!cputime_eq(cval, cputime_zero)) {
            struct task_cputime cputime;
            cputime_t utime;

            thread_group_cputimer(tsk, &cputime);
            utime = cputime.utime;
            if (cputime_le(cval, utime)) { /* about to fire */
                cval = jiffies_to_cputime(1);
            } else {
                cval = cputime_sub(cval, utime);
            }
        }
        spin_unlock_irq(&tsk->sighand->siglock);
        cputime_to_timeval(cval, &value->it_value);
        cputime_to_timeval(cinterval, &value->it_interval);
        break;
    case ITIMER_PROF:
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_prof_expires;
        cinterval = tsk->signal->it_prof_incr;
        if (!cputime_eq(cval, cputime_zero)) {
            struct task_cputime times;
            cputime_t ptime;

            thread_group_cputimer(tsk, &times);
            ptime = cputime_add(times.utime, times.stime);
            if (cputime_le(cval, ptime)) { /* about to fire */
                cval = jiffies_to_cputime(1);
            } else {
                cval = cputime_sub(cval, ptime);
            }
        }
        spin_unlock_irq(&tsk->sighand->siglock);
        cputime_to_timeval(cval, &value->it_value);
        cputime_to_timeval(cinterval, &value->it_interval);
        break;
    default:
        return(-EINVAL);
    }
    return 0;
}
Beispiel #8
0
static inline void cpu_time_add(const clockid_t which_clock,
				union cpu_time_count *acc,
			        union cpu_time_count val)
{
	if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
		acc->sched += val.sched;
	}  else {
		acc->cpu = cputime_add(acc->cpu, val.cpu);
	}
}
Beispiel #9
0
asmlinkage long sys_times(struct tms __user * tbuf)
{
	/*
	 *	In the SMP world we might just be unlucky and have one of
	 *	the times increment as we use it. Since the value is an
	 *	atomically safe type this is just fine. Conceptually its
	 *	as if the syscall took an instant longer to occur.
	 */
	if (tbuf) {
		struct tms tmp;
		struct task_struct *tsk = current;
		struct task_struct *t;
		cputime_t utime, stime, cutime, cstime;

		spin_lock_irq(&tsk->sighand->siglock);
		utime = tsk->signal->utime;
		stime = tsk->signal->stime;
		t = tsk;
		do {
			utime = cputime_add(utime, t->utime);
			stime = cputime_add(stime, t->stime);
			t = next_thread(t);
		} while (t != tsk);

		cutime = tsk->signal->cutime;
		cstime = tsk->signal->cstime;
		spin_unlock_irq(&tsk->sighand->siglock);

		tmp.tms_utime = cputime_to_clock_t(utime);
		tmp.tms_stime = cputime_to_clock_t(stime);
		tmp.tms_cutime = cputime_to_clock_t(cutime);
		tmp.tms_cstime = cputime_to_clock_t(cstime);
		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
			return -EFAULT;
	}
	return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
/**
 * thread_group_cputime - Sum the thread group time fields across all CPUs.
 *
 * @tsk:	The task we use to identify the thread group.
 * @times:	task_cputime structure in which we return the summed fields.
 *
 * Walk the list of CPUs to sum the per-CPU time fields in the thread group
 * time structure.
 */
void thread_group_cputime(
	struct task_struct *tsk,
	struct task_cputime *times)
{
	struct signal_struct *sig;
	int i;
	struct task_cputime *tot;

	sig = tsk->signal;
	if (unlikely(!sig) || !sig->cputime.totals) {
		times->utime = tsk->utime;
		times->stime = tsk->stime;
		times->sum_exec_runtime = tsk->se.sum_exec_runtime;
		return;
	}
	times->stime = times->utime = cputime_zero;
	times->sum_exec_runtime = 0;
	for_each_possible_cpu(i) {
		tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
		times->utime = cputime_add(times->utime, tot->utime);
		times->stime = cputime_add(times->stime, tot->stime);
		times->sum_exec_runtime += tot->sum_exec_runtime;
	}
}
Beispiel #11
0
static int uptime_read_proc(char *page, char **start, off_t off,
				 int count, int *eof, void *data)
{
	struct timespec uptime;
	struct timespec idle;
	int len;
	cputime_t idletime = cputime_add(init_task.utime, init_task.stime);

	do_posix_clock_monotonic_gettime(&uptime);
	cputime_to_timespec(idletime, &idle);
	len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
			(unsigned long) uptime.tv_sec,
			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
			(unsigned long) idle.tv_sec,
			(idle.tv_nsec / (NSEC_PER_SEC / 100)));

	return proc_calc_metrics(page, start, off, count, eof, len);
}
Beispiel #12
0
void my_thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
        struct signal_struct *sig;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,28)
	struct task_struct *t;
	struct sighand_struct *sighand;

	*times = INIT_CPUTIME;

	rcu_read_lock();
	sighand = rcu_dereference(tsk->sighand);
	if (!sighand)
		goto out;

	sig = tsk->signal;

	t = tsk;
	do {
		times->utime = cputime_add(times->utime, t->utime);
		times->stime = cputime_add(times->stime, t->stime);
		times->sum_exec_runtime += t->se.sum_exec_runtime;

		t = next_thread(t);
	} while (t != tsk);

	times->utime = cputime_add(times->utime, sig->utime);
	times->stime = cputime_add(times->stime, sig->stime);
	times->sum_exec_runtime += sig->sum_sched_runtime;
out:
	rcu_read_unlock();
#else
	int i;
	struct task_cputime *tot;

	sig = tsk->signal;
	if (unlikely(!sig) || !sig->cputime.totals) {
		times->utime = tsk->utime;
		times->stime = tsk->stime;
		times->sum_exec_runtime = tsk->se.sum_exec_runtime;
		return;
	}
	times->stime = times->utime = cputime_zero;
	times->sum_exec_runtime = 0;
	for_each_possible_cpu(i) {
		tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
		times->utime = cputime_add(times->utime, tot->utime);
		times->stime = cputime_add(times->stime, tot->stime);
		times->sum_exec_runtime += tot->sum_exec_runtime;
	}
#endif
}
Beispiel #13
0
static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
			   const struct itimerval *const value,
			   struct itimerval *const ovalue)
{
	cputime_t cval, nval, cinterval, ninterval;
	s64 ns_ninterval, ns_nval;
	u32 error, incr_error;
	struct cpu_itimer *it = &tsk->signal->it[clock_id];

	nval = timeval_to_cputime(&value->it_value);
	ns_nval = timeval_to_ns(&value->it_value);
	ninterval = timeval_to_cputime(&value->it_interval);
	ns_ninterval = timeval_to_ns(&value->it_interval);

	error = cputime_sub_ns(nval, ns_nval);
	incr_error = cputime_sub_ns(ninterval, ns_ninterval);

	spin_lock_irq(&tsk->sighand->siglock);

	cval = it->expires;
	cinterval = it->incr;
	if (!cputime_eq(cval, cputime_zero) ||
	    !cputime_eq(nval, cputime_zero)) {
		if (cputime_gt(nval, cputime_zero))
			nval = cputime_add(nval, cputime_one_jiffy);
		set_process_cpu_timer(tsk, clock_id, &nval, &cval);
	}
	it->expires = nval;
	it->incr = ninterval;
	it->error = error;
	it->incr_error = incr_error;
	trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
			   ITIMER_VIRTUAL : ITIMER_PROF, value, nval);

	spin_unlock_irq(&tsk->sighand->siglock);

	if (ovalue) {
		cputime_to_timeval(cval, &ovalue->it_value);
		cputime_to_timeval(cinterval, &ovalue->it_interval);
	}
}
Beispiel #14
0
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
    struct task_struct *tsk = current;
    struct hrtimer *timer;
    ktime_t expires;
    cputime_t cval, cinterval, nval, ninterval;

    /*
     * Validate the timevals in value.
     */
    if (!timeval_valid(&value->it_value) ||
            !timeval_valid(&value->it_interval))
        return -EINVAL;

    switch (which) {
    case ITIMER_REAL:
again:
        spin_lock_irq(&tsk->sighand->siglock);
        timer = &tsk->signal->real_timer;
        if (ovalue) {
            ovalue->it_value = itimer_get_remtime(timer);
            ovalue->it_interval
                = ktime_to_timeval(tsk->signal->it_real_incr);
        }
        /* We are sharing ->siglock with it_real_fn() */
        if (hrtimer_try_to_cancel(timer) < 0) {
            spin_unlock_irq(&tsk->sighand->siglock);
            hrtimer_wait_for_timer(&tsk->signal->real_timer);
            goto again;
        }
        expires = timeval_to_ktime(value->it_value);
        if (expires.tv64 != 0) {
            tsk->signal->it_real_incr =
                timeval_to_ktime(value->it_interval);
            hrtimer_start(timer, expires, HRTIMER_MODE_REL);
        } else
            tsk->signal->it_real_incr.tv64 = 0;

        spin_unlock_irq(&tsk->sighand->siglock);
        break;
    case ITIMER_VIRTUAL:
        nval = timeval_to_cputime(&value->it_value);
        ninterval = timeval_to_cputime(&value->it_interval);
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_virt_expires;
        cinterval = tsk->signal->it_virt_incr;
        if (!cputime_eq(cval, cputime_zero) ||
                !cputime_eq(nval, cputime_zero)) {
            if (cputime_gt(nval, cputime_zero))
                nval = cputime_add(nval,
                                   jiffies_to_cputime(1));
            set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
                                  &nval, &cval);
        }
        tsk->signal->it_virt_expires = nval;
        tsk->signal->it_virt_incr = ninterval;
        spin_unlock_irq(&tsk->sighand->siglock);
        if (ovalue) {
            cputime_to_timeval(cval, &ovalue->it_value);
            cputime_to_timeval(cinterval, &ovalue->it_interval);
        }
        break;
    case ITIMER_PROF:
        nval = timeval_to_cputime(&value->it_value);
        ninterval = timeval_to_cputime(&value->it_interval);
        spin_lock_irq(&tsk->sighand->siglock);
        cval = tsk->signal->it_prof_expires;
        cinterval = tsk->signal->it_prof_incr;
        if (!cputime_eq(cval, cputime_zero) ||
                !cputime_eq(nval, cputime_zero)) {
            if (cputime_gt(nval, cputime_zero))
                nval = cputime_add(nval,
                                   jiffies_to_cputime(1));
            set_process_cpu_timer(tsk, CPUCLOCK_PROF,
                                  &nval, &cval);
        }
        tsk->signal->it_prof_expires = nval;
        tsk->signal->it_prof_incr = ninterval;
        spin_unlock_irq(&tsk->sighand->siglock);
        if (ovalue) {
            cputime_to_timeval(cval, &ovalue->it_value);
            cputime_to_timeval(cinterval, &ovalue->it_interval);
        }
        break;
    default:
        return -EINVAL;
    }
    return 0;
}
Beispiel #15
0
int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
{
	struct task_struct *tsk = current;
	struct hrtimer *timer;
	ktime_t expires;
	cputime_t cval, cinterval, nval, ninterval;

	/*
	 * Validate the timevals in value.
	 *
	 * Note: Although the spec requires that invalid values shall
	 * return -EINVAL, we just fixup the value and print a limited
	 * number of warnings in order not to break users of this
	 * historical misfeature.
	 *
	 * Scheduled for replacement in March 2007
	 */
	check_itimerval(value);

	switch (which) {
	case ITIMER_REAL:
again:
		spin_lock_irq(&tsk->sighand->siglock);
		timer = &tsk->signal->real_timer;
		if (ovalue) {
			ovalue->it_value = itimer_get_remtime(timer);
			ovalue->it_interval
				= ktime_to_timeval(tsk->signal->it_real_incr);
		}
		/* We are sharing ->siglock with it_real_fn() */
		if (hrtimer_try_to_cancel(timer) < 0) {
			spin_unlock_irq(&tsk->sighand->siglock);
			goto again;
		}
		tsk->signal->it_real_incr =
			timeval_to_ktime(value->it_interval);
		expires = timeval_to_ktime(value->it_value);
		if (expires.tv64 != 0)
			hrtimer_start(timer, expires, HRTIMER_REL);
		spin_unlock_irq(&tsk->sighand->siglock);
		break;
	case ITIMER_VIRTUAL:
		nval = timeval_to_cputime(&value->it_value);
		ninterval = timeval_to_cputime(&value->it_interval);
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_virt_expires;
		cinterval = tsk->signal->it_virt_incr;
		if (!cputime_eq(cval, cputime_zero) ||
		    !cputime_eq(nval, cputime_zero)) {
			if (cputime_gt(nval, cputime_zero))
				nval = cputime_add(nval,
						   jiffies_to_cputime(1));
			set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
					      &nval, &cval);
		}
		tsk->signal->it_virt_expires = nval;
		tsk->signal->it_virt_incr = ninterval;
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		if (ovalue) {
			cputime_to_timeval(cval, &ovalue->it_value);
			cputime_to_timeval(cinterval, &ovalue->it_interval);
		}
		break;
	case ITIMER_PROF:
		nval = timeval_to_cputime(&value->it_value);
		ninterval = timeval_to_cputime(&value->it_interval);
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_prof_expires;
		cinterval = tsk->signal->it_prof_incr;
		if (!cputime_eq(cval, cputime_zero) ||
		    !cputime_eq(nval, cputime_zero)) {
			if (cputime_gt(nval, cputime_zero))
				nval = cputime_add(nval,
						   jiffies_to_cputime(1));
			set_process_cpu_timer(tsk, CPUCLOCK_PROF,
					      &nval, &cval);
		}
		tsk->signal->it_prof_expires = nval;
		tsk->signal->it_prof_incr = ninterval;
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		if (ovalue) {
			cputime_to_timeval(cval, &ovalue->it_value);
			cputime_to_timeval(cinterval, &ovalue->it_interval);
		}
		break;
	default:
		return -EINVAL;
	}
	return 0;
}
Beispiel #16
0
int do_getitimer(int which, struct itimerval *value)
{
	struct task_struct *tsk = current;
	cputime_t cinterval, cval;

	switch (which) {
	case ITIMER_REAL:
		spin_lock_irq(&tsk->sighand->siglock);
		value->it_value = itimer_get_remtime(&tsk->signal->real_timer);
		value->it_interval =
			ktime_to_timeval(tsk->signal->it_real_incr);
		spin_unlock_irq(&tsk->sighand->siglock);
		break;
	case ITIMER_VIRTUAL:
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_virt_expires;
		cinterval = tsk->signal->it_virt_incr;
		if (!cputime_eq(cval, cputime_zero)) {
			struct task_struct *t = tsk;
			cputime_t utime = tsk->signal->utime;
			do {
				utime = cputime_add(utime, t->utime);
				t = next_thread(t);
			} while (t != tsk);
			if (cputime_le(cval, utime)) { /* about to fire */
				cval = jiffies_to_cputime(1);
			} else {
				cval = cputime_sub(cval, utime);
			}
		}
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		cputime_to_timeval(cval, &value->it_value);
		cputime_to_timeval(cinterval, &value->it_interval);
		break;
	case ITIMER_PROF:
		read_lock(&tasklist_lock);
		spin_lock_irq(&tsk->sighand->siglock);
		cval = tsk->signal->it_prof_expires;
		cinterval = tsk->signal->it_prof_incr;
		if (!cputime_eq(cval, cputime_zero)) {
			struct task_struct *t = tsk;
			cputime_t ptime = cputime_add(tsk->signal->utime,
						      tsk->signal->stime);
			do {
				ptime = cputime_add(ptime,
						    cputime_add(t->utime,
								t->stime));
				t = next_thread(t);
			} while (t != tsk);
			if (cputime_le(cval, ptime)) { /* about to fire */
				cval = jiffies_to_cputime(1);
			} else {
				cval = cputime_sub(cval, ptime);
			}
		}
		spin_unlock_irq(&tsk->sighand->siglock);
		read_unlock(&tasklist_lock);
		cputime_to_timeval(cval, &value->it_value);
		cputime_to_timeval(cinterval, &value->it_interval);
		break;
	default:
		return(-EINVAL);
	}
	return 0;
}
Beispiel #17
0
asmlinkage long sys_times(struct tms __user * tbuf)
{
    /*
     *	In the SMP world we might just be unlucky and have one of
     *	the times increment as we use it. Since the value is an
     *	atomically safe type this is just fine. Conceptually its
     *	as if the syscall took an instant longer to occur.
     */
    if (tbuf) {
        struct tms tmp;
        cputime_t utime, stime, cutime, cstime;

#ifdef CONFIG_SMP
        if (thread_group_empty(current)) {
            /*
             * Single thread case without the use of any locks.
             *
             * We may race with release_task if two threads are
             * executing. However, release task first adds up the
             * counters (__exit_signal) before  removing the task
             * from the process tasklist (__unhash_process).
             * __exit_signal also acquires and releases the
             * siglock which results in the proper memory ordering
             * so that the list modifications are always visible
             * after the counters have been updated.
             *
             * If the counters have been updated by the second thread
             * but the thread has not yet been removed from the list
             * then the other branch will be executing which will
             * block on tasklist_lock until the exit handling of the
             * other task is finished.
             *
             * This also implies that the sighand->siglock cannot
             * be held by another processor. So we can also
             * skip acquiring that lock.
             */
            utime = cputime_add(current->signal->utime, current->utime);
            stime = cputime_add(current->signal->utime, current->stime);
            cutime = current->signal->cutime;
            cstime = current->signal->cstime;
        } else
#endif
        {

            /* Process with multiple threads */
            struct task_struct *tsk = current;
            struct task_struct *t;

            read_lock(&tasklist_lock);
            utime = tsk->signal->utime;
            stime = tsk->signal->stime;
            t = tsk;
            do {
                utime = cputime_add(utime, t->utime);
                stime = cputime_add(stime, t->stime);
                t = next_thread(t);
            } while (t != tsk);

            /*
             * While we have tasklist_lock read-locked, no dying thread
             * can be updating current->signal->[us]time.  Instead,
             * we got their counts included in the live thread loop.
             * However, another thread can come in right now and
             * do a wait call that updates current->signal->c[us]time.
             * To make sure we always see that pair updated atomically,
             * we take the siglock around fetching them.
             */
            spin_lock_irq(&tsk->sighand->siglock);
            cutime = tsk->signal->cutime;
            cstime = tsk->signal->cstime;
            spin_unlock_irq(&tsk->sighand->siglock);
            read_unlock(&tasklist_lock);
        }
        tmp.tms_utime = cputime_to_clock_t(utime);
        tmp.tms_stime = cputime_to_clock_t(stime);
        tmp.tms_cutime = cputime_to_clock_t(cutime);
        tmp.tms_cstime = cputime_to_clock_t(cstime);
        if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
            return -EFAULT;
    }
    return (long) jiffies_64_to_clock_t(get_jiffies_64());
}
Beispiel #18
0
void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);

	if (unlikely(!p->signal))
		return;

	switch (who) {
		case RUSAGE_CHILDREN:
			spin_lock_irqsave(&p->sighand->siglock, flags);
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
			spin_unlock_irqrestore(&p->sighand->siglock, flags);
			cputime_to_timeval(utime, &r->ru_utime);
			cputime_to_timeval(stime, &r->ru_stime);
			break;
		case RUSAGE_SELF:
			spin_lock_irqsave(&p->sighand->siglock, flags);
			utime = stime = cputime_zero;
			goto sum_group;
		case RUSAGE_BOTH:
			spin_lock_irqsave(&p->sighand->siglock, flags);
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
		sum_group:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			t = p;
			do {
				utime = cputime_add(utime, t->utime);
				stime = cputime_add(stime, t->stime);
				r->ru_nvcsw += t->nvcsw;
				r->ru_nivcsw += t->nivcsw;
				r->ru_minflt += t->min_flt;
				r->ru_majflt += t->maj_flt;
				t = next_thread(t);
			} while (t != p);
			spin_unlock_irqrestore(&p->sighand->siglock, flags);
			cputime_to_timeval(utime, &r->ru_utime);
			cputime_to_timeval(stime, &r->ru_stime);
			break;
		default:
			BUG();
	}
}