Ejemplo n.º 1
0
static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
	struct autogroup *prev;
	struct task_struct *t;
	unsigned long flags;

	BUG_ON(!lock_task_sighand(p, &flags));

	prev = p->signal->autogroup;
	if (prev == ag) {
		unlock_task_sighand(p, &flags);
		return;
	}

	p->signal->autogroup = autogroup_kref_get(ag);
	/*
	 * We can't avoid sched_move_task() after we changed signal->autogroup,
	 * this process can already run with task_group() == prev->tg or we can
	 * race with cgroup code which can read autogroup = prev under rq->lock.
	 * In the latter case for_each_thread() can not miss a migrating thread,
	 * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
	 * can't be removed from thread list, we hold ->siglock.
	 *
	 * If an exiting thread was already removed from thread list we rely on
	 * sched_autogroup_exit_task().
	 */
	for_each_thread(p, t)
		sched_move_task(t);

	unlock_task_sighand(p, &flags);
	autogroup_kref_put(prev);
}
Ejemplo n.º 2
0
SYSCALL_DEFINE2( smunch, int, pid, unsigned long, bit_pattern) {
	struct task_struct *target;
	unsigned long flags;
	rcu_read_lock();
	target=find_task_by_vpid(pid);
	rcu_read_unlock();
	if(!target) {
	printk(KERN_ALERT "TARGET DOESNT EXIST\n"); 
	return -1;}
	
	if(!lock_task_sighand(target, &flags)) {
        printk(KERN_ALERT "Could not acquire sighand lock\n");
        return -1;}
	if(!thread_group_empty(target)) {
                printk(KERN_ALERT "Multi threaded process\n");
		unlock_task_sighand(target , &flags);
		return -1;
	}
	if((target->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) && (bit_pattern & (1<<(SIGKILL-1)))) {
		printk(KERN_ALERT " killing process with PID %d\n",pid);
		unlock_task_sighand(target, &flags);
		release_task(target);
	}
	else {
                sigaddsetmask(&(target->signal->shared_pending.signal),bit_pattern);
			wake_up_process(target);
		unlock_task_sighand(target, &flags);
	}
	return 0;
}
Ejemplo n.º 3
0
static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
	struct autogroup *prev;
	struct task_struct *t;
	unsigned long flags;

	BUG_ON(!lock_task_sighand(p, &flags));

	prev = p->signal->autogroup;
	if (prev == ag) {
		unlock_task_sighand(p, &flags);
		return;
	}

	p->signal->autogroup = autogroup_kref_get(ag);

	if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
                goto out;

	t = p;
	do {
		sched_move_task(t);
	} while_each_thread(p, t);

out:
	unlock_task_sighand(p, &flags);
	autogroup_kref_put(prev);
}
Ejemplo n.º 4
0
static int posix_cpu_clock_get_task(struct task_struct *tsk,
				    const clockid_t which_clock,
				    struct timespec *tp)
{
	int err = -EINVAL;
	unsigned long long rtn;

	if (CPUCLOCK_PERTHREAD(which_clock)) {
		if (same_thread_group(tsk, current))
			err = cpu_clock_sample(which_clock, tsk, &rtn);
	} else {
		unsigned long flags;
		struct sighand_struct *sighand;

		/*
		 * while_each_thread() is not yet entirely RCU safe,
		 * keep locking the group while sampling process
		 * clock for now.
		 */
		sighand = lock_task_sighand(tsk, &flags);
		if (!sighand)
			return err;

		if (tsk == current || thread_group_leader(tsk))
			err = cpu_clock_sample_group(which_clock, tsk, &rtn);

		unlock_task_sighand(tsk, &flags);
	}

	if (!err)
		sample_to_timespec(which_clock, rtn, tp);

	return err;
}
Ejemplo n.º 5
0
/*
 * Clean up a CPU-clock timer that is about to be destroyed.
 * This is called from timer deletion with the timer already locked.
 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 * and try again.  (This happens when the timer is in the middle of firing.)
 */
static int posix_cpu_timer_del(struct k_itimer *timer)
{
	int ret = 0;
	unsigned long flags;
	struct sighand_struct *sighand;
	struct task_struct *p = timer->it.cpu.task;

	WARN_ON_ONCE(p == NULL);

	/*
	 * Protect against sighand release/switch in exit/exec and process/
	 * thread timer list entry concurrent read/writes.
	 */
	sighand = lock_task_sighand(p, &flags);
	if (unlikely(sighand == NULL)) {
		/*
		 * We raced with the reaping of the task.
		 * The deletion should have cleared us off the list.
		 */
		WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
	} else {
		if (timer->it.cpu.firing)
			ret = TIMER_RETRY;
		else
			list_del(&timer->it.cpu.entry);

		unlock_task_sighand(p, &flags);
	}

	if (!ret)
		put_task_struct(p);

	return ret;
}
Ejemplo n.º 6
0
int tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
{
	struct tty_audit_buf *buf = ERR_PTR(-EPERM);
	unsigned long flags;

	if (!lock_task_sighand(tsk, &flags))
		return -ESRCH;

	if (tsk->signal->audit_tty) {
		buf = tsk->signal->tty_audit_buf;
		if (buf)
			atomic_inc(&buf->count);
	}
	unlock_task_sighand(tsk, &flags);

	/*
                                       
                                           
  */
	if (!buf || IS_ERR(buf))
		return PTR_ERR(buf);

	mutex_lock(&buf->mutex);
	tty_audit_buf_push(tsk, loginuid, sessionid, buf);
	mutex_unlock(&buf->mutex);

	tty_audit_buf_put(buf);
	return 0;
}
Ejemplo n.º 7
0
/**
 * tty_audit_push_current -	Flush current's pending audit data
 *
 * Try to lock sighand and get a reference to the tty audit buffer if available.
 * Flush the buffer or return an appropriate error code.
 */
int tty_audit_push_current(void)
{
	struct tty_audit_buf *buf = ERR_PTR(-EPERM);
	struct task_struct *tsk = current;
	unsigned long flags;

	if (!lock_task_sighand(tsk, &flags))
		return -ESRCH;

	if (tsk->signal->audit_tty) {
		buf = tsk->signal->tty_audit_buf;
		if (buf)
			atomic_inc(&buf->count);
	}
	unlock_task_sighand(tsk, &flags);

	/*
	 * Return 0 when signal->audit_tty set
	 * but tsk->signal->tty_audit_buf == NULL.
	 */
	if (!buf || IS_ERR(buf))
		return PTR_ERR(buf);

	mutex_lock(&buf->mutex);
	tty_audit_buf_push(buf);
	mutex_unlock(&buf->mutex);

	tty_audit_buf_put(buf);
	return 0;
}
Ejemplo n.º 8
0
static void fake_signal_wake_up(struct task_struct *p)
{
	unsigned long flags;

	if (lock_task_sighand(p, &flags)) {
		signal_wake_up(p, 0);
		unlock_task_sighand(p, &flags);
	}
}
Ejemplo n.º 9
0
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);
	utime = stime = cputime_zero;

	rcu_read_lock();
	if (!lock_task_sighand(p, &flags)) {
		rcu_read_unlock();
		return;
	}

	switch (who) {
		case RUSAGE_BOTH:
		case RUSAGE_CHILDREN:
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;

			if (who == RUSAGE_CHILDREN)
				break;

		case RUSAGE_SELF:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			t = p;
			do {
				utime = cputime_add(utime, t->utime);
				stime = cputime_add(stime, t->stime);
				r->ru_nvcsw += t->nvcsw;
				r->ru_nivcsw += t->nivcsw;
				r->ru_minflt += t->min_flt;
				r->ru_majflt += t->maj_flt;
				t = next_thread(t);
			} while (t != p);
			break;

		default:
			BUG();
	}

	unlock_task_sighand(p, &flags);
	rcu_read_unlock();

	cputime_to_timeval(utime, &r->ru_utime);
	cputime_to_timeval(stime, &r->ru_stime);
}
Ejemplo n.º 10
0
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
{
	struct task_struct *t;
	unsigned long flags;
	cputime_t utime, stime;

	memset((char *) r, 0, sizeof *r);
	utime = stime = cputime_zero;

	if (who == RUSAGE_THREAD) {
		accumulate_thread_rusage(p, r, &utime, &stime);
		goto out;
	}

	if (!lock_task_sighand(p, &flags))
		return;

	switch (who) {
		case RUSAGE_BOTH:
		case RUSAGE_CHILDREN:
			utime = p->signal->cutime;
			stime = p->signal->cstime;
			r->ru_nvcsw = p->signal->cnvcsw;
			r->ru_nivcsw = p->signal->cnivcsw;
			r->ru_minflt = p->signal->cmin_flt;
			r->ru_majflt = p->signal->cmaj_flt;
			r->ru_inblock = p->signal->cinblock;
			r->ru_oublock = p->signal->coublock;

			if (who == RUSAGE_CHILDREN)
				break;

		case RUSAGE_SELF:
			utime = cputime_add(utime, p->signal->utime);
			stime = cputime_add(stime, p->signal->stime);
			r->ru_nvcsw += p->signal->nvcsw;
			r->ru_nivcsw += p->signal->nivcsw;
			r->ru_minflt += p->signal->min_flt;
			r->ru_majflt += p->signal->maj_flt;
			r->ru_inblock += p->signal->inblock;
			r->ru_oublock += p->signal->oublock;
			t = p;
			do {
				accumulate_thread_rusage(t, r, &utime, &stime);
				t = next_thread(t);
			} while (t != p);
			break;

		default:
			BUG();
	}
	unlock_task_sighand(p, &flags);

out:
	cputime_to_timeval(utime, &r->ru_utime);
	cputime_to_timeval(stime, &r->ru_stime);
}
Ejemplo n.º 11
0
// Return count of each signal under a process
SYSCALL_DEFINE1(get_sigcounter, int, signumber){
	unsigned long flags;
	struct task_struct *p;
	pid_t pid = current->pid;
	p = pid_task(find_vpid(pid), PIDTYPE_PID);
	lock_task_sighand(p, &flags);
	pr_alert("%d\n", p->sighand->sigcounter[signumber]);
	unlock_task_sighand(p, &flags);
	return(1);
}
Ejemplo n.º 12
0
SYSCALL_DEFINE2(smunch,int,pid,unsigned long,bit_pattern)
{
	unsigned long flags;	
	struct task_struct *task; 		
		
	rcu_read_lock();
		task = pid_task(find_vpid(pid),PIDTYPE_PID);
	rcu_read_unlock();	
	if(!task) return -1;	 // Process not present
	if(!lock_task_sighand(task,&flags))
	{
		//Process refuses to give the lock. Either dead/dying 
		unlock_task_sighand(task,&flags);
		return -1;		
	}
			
	if(!thread_group_empty(task))
	{
		printk(KERN_ALERT "\nMULTI-Threaded Process, Exiting without processing");
		ret=-1; goto return_path;
	}
	printk(KERN_ALERT "\nExit State:%XH,State=%XH\n",task->exit_state,task->state);	//Info to user
	if(task->state & TASK_UNINTERRUPTIBLE)
	printk(KERN_ALERT "\nProcess is in Uniterruptible Wait-DeepSleep!!"); // Info to User	
	if(bit_pattern & (1UL<<(SIGKILL-1)) && (task->exit_state & EXIT_ZOMBIE))
	{ 
		printk(KERN_ALERT "\nSIGKILL present while Process is Zombie, releasing task!!");		
		unlock_task_sighand(task,&flags);	 	
		release_task(task);  // detach_pid is called from release_task()
		return 0; 
	}
	 /* If !SIGKILL || (ordinary process) || DeepSleep, sending all signals. It is Users responsility to note that signals will get handled from 1-64 order*/
	printk(KERN_ALERT "!SIGKILL || (ordinary process) || DeepSleep, sending all signals!");
	task->signal->shared_pending.signal.sig[0] = bit_pattern;
	set_tsk_thread_flag(task,TIF_SIGPENDING);	
	signal_wake_up(task,1); 
	ret=0;		
	return_path:
	unlock_task_sighand(task,&flags);
	return ret;
}
Ejemplo n.º 13
0
// Initialize count for each signal under a process
SYSCALL_DEFINE1(init_sigcounter, pid_t, pid){
	unsigned long flags;
	int i;
	struct task_struct *p;
	p = pid_task(find_vpid(pid), PIDTYPE_PID);
	lock_task_sighand(p, &flags);
	for(i = 0; i < 64; i++){
	p->sighand->sigcounter[i] = 0;
	}
	unlock_task_sighand(p, &flags);
	return(1);
}
Ejemplo n.º 14
0
static inline struct autogroup *autogroup_task_get(struct task_struct *p)
{
	struct autogroup *ag;
	unsigned long flags;

	if (!lock_task_sighand(p, &flags))
		return autogroup_kref_get(&autogroup_default);

	ag = autogroup_kref_get(p->signal->autogroup);
	unlock_task_sighand(p, &flags);

	return ag;
}
Ejemplo n.º 15
0
int init_sigcounter(int pid){
	struct task_struct *p;
	struct sighand_struct *sighand;
	int i;
	unsigned long flags;
	p = pid_task(find_vpid(pid), PIDTYPE_PID);
	lock_task_sighand(p, &flags);	
	sighand = p -> sighand;
	for(i = 0; i < 64; ++i){
		sighand -> sigcounter[i] = 0;
	}
	unlock_task_sighand(p, &flags);	
	return 0;
}
Ejemplo n.º 16
0
static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
{
	unsigned long flags;
	int error = -ESRCH;

	if (lock_task_sighand(child, &flags)) {
		error = -EINVAL;
		if (likely(child->last_siginfo != NULL)) {
			*child->last_siginfo = *info;
			error = 0;
		}
		unlock_task_sighand(child, &flags);
	}
	return error;
}
Ejemplo n.º 17
0
static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
			struct core_state *core_state, int exit_code)
{
	struct task_struct *g, *p;
	unsigned long flags;
	int nr = -EAGAIN;

	spin_lock_irq(&tsk->sighand->siglock);
	if (!signal_group_exit(tsk->signal)) {
		mm->core_state = core_state;
		nr = zap_process(tsk, exit_code);
		tsk->signal->group_exit_task = tsk;
		/* ignore all signals except SIGKILL, see prepare_signal() */
		tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
	}
	spin_unlock_irq(&tsk->sighand->siglock);
	if (unlikely(nr < 0))
		return nr;

	tsk->flags = PF_DUMPCORE;
	if (atomic_read(&mm->mm_users) == nr + 1)
		goto done;
	/*
	 * We should find and kill all tasks which use this mm, and we should
	 * count them correctly into ->nr_threads. We don't take tasklist
	 * lock, but this is safe wrt:
	 *
	 * fork:
	 *	None of sub-threads can fork after zap_process(leader). All
	 *	processes which were created before this point should be
	 *	visible to zap_threads() because copy_process() adds the new
	 *	process to the tail of init_task.tasks list, and lock/unlock
	 *	of ->siglock provides a memory barrier.
	 *
	 * do_exit:
	 *	The caller holds mm->mmap_sem. This means that the task which
	 *	uses this mm can't pass exit_mm(), so it can't exit or clear
	 *	its ->mm.
	 *
	 * de_thread:
	 *	It does list_replace_rcu(&leader->tasks, &current->tasks),
	 *	we must see either old or new leader, this does not matter.
	 *	However, it can change p->sighand, so lock_task_sighand(p)
	 *	must be used. Since p->mm != NULL and we hold ->mmap_sem
	 *	it can't fail.
	 *
	 *	Note also that "g" can be the old leader with ->mm == NULL
	 *	and already unhashed and thus removed from ->thread_group.
	 *	This is OK, __unhash_process()->list_del_rcu() does not
	 *	clear the ->next pointer, we will find the new leader via
	 *	next_thread().
	 */
	rcu_read_lock();
	for_each_process(g) {
		if (g == tsk->group_leader)
			continue;
		if (g->flags & PF_KTHREAD)
			continue;
		p = g;
		do {
			if (p->mm) {
				if (unlikely(p->mm == mm)) {
					lock_task_sighand(p, &flags);
					nr += zap_process(p, exit_code);
					p->signal->flags = SIGNAL_GROUP_EXIT;
					unlock_task_sighand(p, &flags);
				}
				break;
			}
		} while_each_thread(g, p);
	}
	rcu_read_unlock();
done:
	atomic_set(&core_state->nr_threads, nr);
	return nr;
}
Ejemplo n.º 18
0
static int task_get_unused_fd_flags(struct shfile_proc *proc, int flags)
{
	struct files_struct *files = proc->files;
	int fd, error;
	struct fdtable *fdt;
	unsigned long rlim_cur;
	unsigned long irqs;

	if (files == NULL)
		return -ESRCH;

	error = -EMFILE;
	spin_lock(&files->file_lock);

repeat:
	fdt = files_fdtable(files);
	fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);

	/*
	 * N.B. For clone tasks sharing a files structure, this test
	 * will limit the total number of files that can be opened.
	 */
	rlim_cur = 0;
	if (lock_task_sighand(proc->tsk, &irqs)) {
		rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
		unlock_task_sighand(proc->tsk, &irqs);
	}
	if (fd >= rlim_cur)
		goto out;

	/* Do we need to expand the fd array or fd set?  */
	error = expand_files(files, fd);
	if (error < 0)
		goto out;

	if (error) {
		/*
		 * If we needed to expand the fs array we
		 * might have blocked - try again.
		 */
		error = -EMFILE;
		goto repeat;
	}

	__set_open_fd(fd, fdt);
	if (flags & O_CLOEXEC)
		__set_close_on_exec(fd, fdt);
	else
		__clear_close_on_exec(fd, fdt);
	files->next_fd = fd + 1;
#if 1
	/* Sanity check */
	if (fdt->fd[fd] != NULL) {
		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
		fdt->fd[fd] = NULL;
	}
#endif
	error = fd;

out:
	spin_unlock(&files->file_lock);
	return error;
}
Ejemplo n.º 19
0
static int
ptrace_start(long pid, long request,
	     struct task_struct **childp,
	     struct utrace_attached_engine **enginep,
	     struct ptrace_state **statep)

{
	struct task_struct *child;
	struct utrace_attached_engine *engine;
	struct ptrace_state *state;
	int ret;

	NO_LOCKS;

	if (request == PTRACE_TRACEME)
		return ptrace_traceme();

	ret = -ESRCH;
	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);
	pr_debug("ptrace pid %ld => %p\n", pid, child);
	if (!child)
		goto out;

	ret = -EPERM;
	if (pid == 1)		/* you may not mess with init */
		goto out_tsk;

	if (request == PTRACE_ATTACH) {
		ret = ptrace_attach(child);
		goto out_tsk;
	}

	rcu_read_lock();
	engine = utrace_attach(child, UTRACE_ATTACH_MATCH_OPS,
			       &ptrace_utrace_ops, NULL);
	ret = -ESRCH;
	if (IS_ERR(engine) || engine == NULL)
		goto out_tsk_rcu;
	state = rcu_dereference(engine->data);
	if (state == NULL || state->parent != current)
		goto out_tsk_rcu;
	/*
	 * Traditional ptrace behavior demands that the target already be
	 * quiescent, but not dead.
	 */
	if (request != PTRACE_KILL
	    && !(engine->flags & UTRACE_ACTION_QUIESCE)) {
		/*
		 * If it's in job control stop, turn it into proper quiescence.
		 */
		struct sighand_struct *sighand;
		unsigned long flags;
		sighand = lock_task_sighand(child, &flags);
		if (likely(sighand != NULL)) {
			if (child->state == TASK_STOPPED)
				ret = 0;
			unlock_task_sighand(child, &flags);
		}
		if (ret == 0) {
			ret = ptrace_update(child, state,
					    UTRACE_ACTION_QUIESCE, 0);
			if (unlikely(ret == -EALREADY))
				ret = -ESRCH;
			if (unlikely(ret))
				BUG_ON(ret != -ESRCH);
		}

		if (ret) {
			pr_debug("%d not stopped (%lu)\n",
				 child->pid, child->state);
			goto out_tsk_rcu;
		}

		ret = -ESRCH;  /* Return value for exit_state bail-out.  */
	}

	atomic_inc(&state->refcnt);
	rcu_read_unlock();

	NO_LOCKS;

	/*
	 * We do this for all requests to match traditional ptrace behavior.
	 * If the machine state synchronization done at context switch time
	 * includes e.g. writing back to user memory, we want to make sure
	 * that has finished before a PTRACE_PEEKDATA can fetch the results.
	 * On most machines, only regset data is affected by context switch
	 * and calling utrace_regset later on will take care of that, so
	 * this is superfluous.
	 *
	 * To do this purely in utrace terms, we could do:
	 *  (void) utrace_regset(child, engine, utrace_native_view(child), 0);
	 */
	if (request != PTRACE_KILL) {
		wait_task_inactive(child);
		while (child->state != TASK_TRACED && child->state != TASK_STOPPED) {
			if (child->exit_state) {
				__ptrace_state_free(state);
				goto out_tsk;
			}

			task_lock(child);
			if (child->mm && child->mm->core_waiters) {
				task_unlock(child);
				__ptrace_state_free(state);
				goto out_tsk;
			}
			task_unlock(child);

			/*
			 * This is a dismal kludge, but it only comes up on ia64.
			 * It might be blocked inside regset->writeback() called
			 * from ptrace_report(), when it's on its way to quiescing
			 * in TASK_TRACED real soon now.  We actually need that
			 * writeback call to have finished, before a PTRACE_PEEKDATA
			 * here, for example.  So keep waiting until it's really there.
			 */
			yield();
			wait_task_inactive(child);
		}
	}
	wait_task_inactive(child);

	*childp = child;
	*enginep = engine;
	*statep = state;
	return -EIO;

out_tsk_rcu:
	rcu_read_unlock();
out_tsk:
	NO_LOCKS;
	put_task_struct(child);
out:
	return ret;
}
Ejemplo n.º 20
0
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
	unsigned long nr_switches;
	unsigned long flags;
	int num_threads = 1;

	if (lock_task_sighand(p, &flags)) {
		num_threads = atomic_read(&p->signal->count);
		unlock_task_sighand(p, &flags);
	}

	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
	SEQ_printf(m,
		"---------------------------------------------------------\n");
#define __P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
#define P(F) \
	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))

	PN(se.exec_start);
	PN(se.vruntime);
	PN(se.sum_exec_runtime);
	PN(se.avg_overlap);
	PN(se.avg_wakeup);
	PN(se.avg_running);

	nr_switches = p->nvcsw + p->nivcsw;

#ifdef CONFIG_SCHEDSTATS
	PN(se.wait_start);
	PN(se.sleep_start);
	PN(se.block_start);
	PN(se.sleep_max);
	PN(se.block_max);
	PN(se.exec_max);
	PN(se.slice_max);
	PN(se.wait_max);
	PN(se.wait_sum);
	P(se.wait_count);
	PN(se.iowait_sum);
	P(se.iowait_count);
	P(sched_info.bkl_count);
	P(se.nr_migrations);
	P(se.nr_migrations_cold);
	P(se.nr_failed_migrations_affine);
	P(se.nr_failed_migrations_running);
	P(se.nr_failed_migrations_hot);
	P(se.nr_forced_migrations);
	P(se.nr_wakeups);
	P(se.nr_wakeups_sync);
	P(se.nr_wakeups_migrate);
	P(se.nr_wakeups_local);
	P(se.nr_wakeups_remote);
	P(se.nr_wakeups_affine);
	P(se.nr_wakeups_affine_attempts);
	P(se.nr_wakeups_passive);
	P(se.nr_wakeups_idle);

	{
		u64 avg_atom, avg_per_cpu;

		avg_atom = p->se.sum_exec_runtime;
		if (nr_switches)
			do_div(avg_atom, nr_switches);
		else
			avg_atom = -1LL;

		avg_per_cpu = p->se.sum_exec_runtime;
		if (p->se.nr_migrations) {
			avg_per_cpu = div64_u64(avg_per_cpu,
						p->se.nr_migrations);
		} else {
			avg_per_cpu = -1LL;
		}

		__PN(avg_atom);
		__PN(avg_per_cpu);
	}
#endif
	__P(nr_switches);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_voluntary_switches", (long long)p->nvcsw);
	SEQ_printf(m, "%-35s:%21Ld\n",
		   "nr_involuntary_switches", (long long)p->nivcsw);

	P(se.load.weight);
	P(policy);
	P(prio);
#undef PN
#undef __PN
#undef P
#undef __P

	{
		unsigned int this_cpu = raw_smp_processor_id();
		u64 t0, t1;

		t0 = cpu_clock(this_cpu);
		t1 = cpu_clock(this_cpu);
		SEQ_printf(m, "%-35s:%21Ld\n",
			   "clock-delta", (long long)(t1-t0));
	}
}