Пример #1
0
int sched_setscheduler_nocheck_binder(struct task_struct *p, int policy,
				      const struct sched_param *param)
{
	int retval;

	retval = sched_setscheduler_nocheck_core(p, policy, param);
	if (!retval) {
		int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
		if (!rt_policy(policy))
			prio = __normal_prio(p);
		else
			prio = MAX_RT_PRIO-1 - prio;
		update_prio_tracer(task_pid_nr(p), prio, policy, 2);
	}
	return retval;
}
void set_user_nice_syscall(struct task_struct *p, long nice)
{
	set_user_nice_core(p, nice);
	update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, PTS_USER);
}
Пример #3
0
void set_user_nice_binder(struct task_struct *p, long nice)
{
	set_user_nice_core(p, nice);
	update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, 2);
}
Пример #4
0
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
	long nr;

	/*
	 * Do some preliminary argument and permissions checking before we
	 * actually start allocating stuff
	 */
	if (clone_flags & CLONE_NEWUSER) {
		if (clone_flags & CLONE_THREAD){
			printk("[%d:%s] fork fail at clone_thread, flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags);
			return -EINVAL;
		}
		/* hopefully this check will go away when userns support is
		 * complete
		 */
		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
			!capable(CAP_SETGID)){
			printk("[%d:%s] fork fail at capable not match, flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags);
			return -EPERM;
		}
	}
	/*
	 * Determine whether and which event to report to ptracer.  When
	 * called from kernel_thread or CLONE_UNTRACED is explicitly
	 * requested, no event is reported; otherwise, report if the event
	 * for the type of forking is enabled.
	 */
	if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
		if (clone_flags & CLONE_VFORK)
			trace = PTRACE_EVENT_VFORK;
		else if ((clone_flags & CSIGNAL) != SIGCHLD)
			trace = PTRACE_EVENT_CLONE;
		else
			trace = PTRACE_EVENT_FORK;

		if (likely(!ptrace_event_enabled(current, trace)))
			trace = 0;
	}

	p = copy_process(clone_flags, stack_start, regs, stack_size,
			 child_tidptr, NULL, trace);
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		trace_sched_process_fork(current, p);

		nr = task_pid_vnr(p);

		if (clone_flags & CLONE_PARENT_SETTID)
			put_user(nr, parent_tidptr);

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
			get_task_struct(p);
		}

#ifdef CONFIG_SCHEDSTATS
		/* mt shceduler profiling*/
		save_mtproc_info(p, sched_clock());
		printk(KERN_DEBUG "[%d:%d:%s] fork [%d:%d:%s] flag=0x%lx\n", current->tgid, current->pid, current->comm, p->tgid, p->pid, p->comm, clone_flags);
#endif
		wake_up_new_task(p);

		/* forking complete and child started to run, tell ptracer */
		if (unlikely(trace))
			ptrace_event(trace, nr);

		if (clone_flags & CLONE_VFORK) {
			if (!wait_for_vfork_done(p, &vfork))
				ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
		}
#ifdef CONFIG_MT_PRIO_TRACER
		create_prio_tracer(task_pid_nr(p));
		update_prio_tracer(task_pid_nr(p), p->prio, p->policy, PTS_KRNL);
#endif
	} else {
		nr = PTR_ERR(p);
		printk("[%d:%s] fork fail:[0x%x, %d]\n", current->pid, current->comm, (unsigned int)p,(int) nr);
	}
	return nr;
}