Пример #1
0
static inline void resched_task(task_t *p)
{
#ifdef CONFIG_SMP
	preempt_disable();
	if (/* balabala */ && (task_cpu(p) != smp_processor_id()))
		smp_send_reschedule(task_cpu(p));
	preempt_enable();
#else
	set_tsk_need_resched(p);
#endif
}
Пример #2
0
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
    struct trace_array_cpu *data;
    int cpu = smp_processor_id();
    unsigned long flags;
    long disabled;
    int pc;

    if (likely(!tracer_enabled))
        return;

    tracing_record_cmdline(p);
    tracing_record_cmdline(current);

    if ((wakeup_rt && !rt_task(p)) ||
            p->prio >= wakeup_prio ||
            p->prio >= current->prio)
        return;

    pc = preempt_count();
    disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
    if (unlikely(disabled != 1))
        goto out;

    /* interrupts should be off from try_to_wake_up */
    __raw_spin_lock(&wakeup_lock);

    /* check for races. */
    if (!tracer_enabled || p->prio >= wakeup_prio)
        goto out_locked;

    /* reset the trace */
    __wakeup_reset(wakeup_trace);

    wakeup_cpu = task_cpu(p);
    wakeup_current_cpu = wakeup_cpu;
    wakeup_prio = p->prio;

    wakeup_task = p;
    get_task_struct(wakeup_task);

    local_save_flags(flags);

    data = wakeup_trace->data[wakeup_cpu];
    data->preempt_timestamp = ftrace_now(cpu);
    tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);

    /*
     * We must be careful in using CALLER_ADDR2. But since wake_up
     * is not called by an assembly function  (where as schedule is)
     * it should be safe to use it here.
     */
    trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
    __raw_spin_unlock(&wakeup_lock);
out:
    atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Пример #3
0
void
tracing_sched_switch_trace(struct trace_array *tr,
			   struct task_struct *prev,
			   struct task_struct *next,
			   unsigned long flags, int pc)
{
	struct ftrace_event_call *call = &event_context_switch;
	struct ring_buffer *buffer = tr->buffer;
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;

	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->prev_pid			= prev->pid;
	entry->prev_prio		= prev->prio;
	entry->prev_state		= prev->state;
	entry->next_pid			= next->pid;
	entry->next_prio		= next->prio;
	entry->next_state		= next->state;
	entry->next_cpu	= task_cpu(next);

	if (!filter_check_discard(call, entry, buffer, event))
		trace_buffer_unlock_commit(buffer, event, flags, pc);
}
Пример #4
0
static void
print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{
	if (rq->curr == p)
		SEQ_printf(m, "R");
	else
		SEQ_printf(m, " ");

	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
		p->comm, task_pid_nr(p),
		SPLIT_NS(p->se.vruntime),
		(long long)(p->nvcsw + p->nivcsw),
		p->prio);
#ifdef CONFIG_SCHEDSTATS
	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
		SPLIT_NS(p->se.vruntime),
		SPLIT_NS(p->se.sum_exec_runtime),
		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
#else
	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
#endif
#ifdef CONFIG_NUMA_BALANCING
	SEQ_printf(m, " %d", cpu_to_node(task_cpu(p)));
#endif
#ifdef CONFIG_CGROUP_SCHED
	SEQ_printf(m, " %s", task_group_path(task_group(p)));
#endif

	SEQ_printf(m, "\n");
}
Пример #5
0
void print_rq_at_KE(struct seq_file *m, struct rq *rq, int rq_cpu)
{
	struct task_struct *g, *p;
	unsigned long flags;
	int locked;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
	"            task   PID         tree-key  switches  prio"
	"     exec-runtime         sum-exec        sum-sleep\n"
	"------------------------------------------------------"
	"----------------------------------------------------\n");

	//read_lock_irqsave(&tasklist_lock, flags);
	locked = read_trylock_n_irqsave(&tasklist_lock, &flags, m, "print_rq_at_KE");

	do_each_thread(g, p) {
		if (!p->on_rq || task_cpu(p) != rq_cpu)
			continue;

		print_task(m, rq, p);
	} while_each_thread(g, p);

	if (locked)
		read_unlock_irqrestore(&tasklist_lock, flags);
}
Пример #6
0
void
tracing_sched_wakeup_trace(struct trace_array *tr,
			   struct task_struct *wakee,
			   struct task_struct *curr,
			   unsigned long flags, int pc)
{
	struct ftrace_event_call *call = &event_wakeup;
	struct ring_buffer_event *event;
	struct ctx_switch_entry *entry;
	struct ring_buffer *buffer = tr->buffer;

	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->prev_pid			= curr->pid;
	entry->prev_prio		= curr->prio;
	entry->prev_state		= curr->state;
	entry->next_pid			= wakee->pid;
	entry->next_prio		= wakee->prio;
	entry->next_state		= wakee->state;
	entry->next_cpu			= task_cpu(wakee);

	if (!filter_check_discard(call, entry, buffer, event))
		ring_buffer_unlock_commit(buffer, event);
	ftrace_trace_stack(tr->buffer, flags, 6, pc);
	ftrace_trace_userstack(tr->buffer, flags, pc);
}
static void dump_tasks_info(void)
{
	struct task_struct *p;
	struct task_struct *task;

	pr_info("[ pid ]   uid	tgid total_vm	   rss cpu oom_adj oom_score_adj name\n");
	for_each_process(p) {
		/* check unkillable tasks */
		if (is_global_init(p))
			continue;
		if (p->flags & PF_KTHREAD)
			continue;

		task = find_lock_task_mm(p);
		if (!task) {
			/*
			* This is a kthread or all of p's threads have already
			* detached their mm's.	There's no need to report
			* them; they can't be oom killed anyway.
			*/
			continue;
		}

		pr_info("[%5d] %5d %5d %8lu %8lu %3u	 %3d	     %5d %s\n",
		task->pid, task_uid(task), task->tgid,
		task->mm->total_vm, get_mm_rss(task->mm),
		task_cpu(task), task->signal->oom_adj,
		task->signal->oom_score_adj, task->comm);
		task_unlock(task);
	}
}
Пример #8
0
void sched_state_will_schedule(struct task_struct* tsk)
{
	/* Litmus hack: we only care about processor-local invocations of
	 * set_tsk_need_resched(). We can't reliably set the flag remotely
	 * since it might race with other updates to the scheduling state.  We
	 * can't rely on the runqueue lock protecting updates to the sched
	 * state since processors do not acquire the runqueue locks for all
	 * updates to the sched state (to avoid acquiring two runqueue locks at
	 * the same time). Further, if tsk is residing on a remote processor,
	 * then that processor doesn't actually know yet that it is going to
	 * reschedule; it still must receive an IPI (unless a local invocation
	 * races).
	 */
	if (likely(task_cpu(tsk) == smp_processor_id())) {
		VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE);
		if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK))
			set_sched_state(PICKED_WRONG_TASK);
		else
			set_sched_state(WILL_SCHEDULE);
	} else
		/* Litmus tasks should never be subject to a remote
		 * set_tsk_need_resched(). */
		BUG_ON(is_realtime(tsk));
	TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
		   __builtin_return_address(0));
}
Пример #9
0
/*
 * RTCC idle handler, called when CPU is idle
 */
static int rtcc_idle_handler(struct notifier_block *nb, unsigned long val, void *data)
{
	if (likely(!atomic_read(&krtccd_enabled)))
		return 0;

	if (likely(atomic_read(&need_to_reclaim) == 0))
		return 0;

	// To prevent RTCC from running too frequently
	if (likely(time_before(jiffies, prev_jiffy + rtcc_reclaim_jiffies)))
		return 0;

	if (unlikely(atomic_read(&kswapd_running) == 1))
		return 0;

	if (unlikely(idle_cpu(task_cpu(krtccd)) && this_cpu_loadx(3) == 0) || rtcc_boost_mode) {
		if (likely(atomic_read(&krtccd_running) == 0)) {
			atomic_set(&krtccd_running, 1);

			wake_up_process(krtccd);
			prev_jiffy = jiffies;
		}
	}

	return 0;
}
Пример #10
0
static int
select_task_rq_other_rr(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
{
	if (sd_flag != SD_BALANCE_WAKE)
		return smp_processor_id();

	return task_cpu(p);
}
Пример #11
0
void linsched_disable_migrations(void)
{
	int i;

	for (i = 0; i < curr_task_id; i++)
		set_cpus_allowed(__linsched_tasks[i],
				 cpumask_of_cpu(
					 task_cpu(__linsched_tasks[i])));
}
Пример #12
0
notrace void probe_sched_wakeup(void *_data, struct task_struct *p, int success)
{
	struct marker *marker;
	struct serialize_int_int_short data;

	data.f1 = p->pid;
	data.f2 = task_cpu(p);
	data.f3 = p->state;

	marker = &GET_MARKER(kernel, sched_try_wakeup);
	ltt_specialized_trace(marker, marker->single.probe_private,
		&data, serialize_sizeof(data), sizeof(int));
}
Пример #13
0
static void
probe_wakeup(struct rq *rq, struct task_struct *p, int success)
{
	int cpu = smp_processor_id();
	unsigned long flags;
	long disabled;
	int pc;

	if (likely(!tracer_enabled))
		return;

	tracing_record_cmdline(p);
	tracing_record_cmdline(current);

	if (likely(!rt_task(p)) ||
			p->prio >= wakeup_prio ||
			p->prio >= current->prio)
		return;

	pc = preempt_count();
	disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
	if (unlikely(disabled != 1))
		goto out;

	/* interrupts should be off from try_to_wake_up */
	__raw_spin_lock(&wakeup_lock);

	/* check for races. */
	if (!tracer_enabled || p->prio >= wakeup_prio)
		goto out_locked;

	/* reset the trace */
	__wakeup_reset(wakeup_trace);

	wakeup_cpu = task_cpu(p);
	wakeup_prio = p->prio;

	wakeup_task = p;
	get_task_struct(wakeup_task);

	local_save_flags(flags);

	wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
	trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
		       CALLER_ADDR1, CALLER_ADDR2, flags, pc);

out_locked:
	__raw_spin_unlock(&wakeup_lock);
out:
	atomic_dec(&wakeup_trace->data[cpu]->disabled);
}
Пример #14
0
static void dump_one_task_info(struct task_struct *tsk)
{
	char stat_array[3] = { 'R', 'S', 'D'};
	char stat_ch;

	stat_ch = tsk->state <= TASK_UNINTERRUPTIBLE ?
		stat_array[tsk->state] : '?';
	pr_info("%8d %8d %8d %16lld %c(%d) %4d    %p %s\n",
			tsk->pid, (int)(tsk->utime), (int)(tsk->stime),
			tsk->se.exec_start, stat_ch, (int)(tsk->state),
			task_cpu(tsk), tsk, tsk->comm);

	show_stack(tsk, NULL);
}
Пример #15
0
static struct task_struct *pick_next_stealable_pjob_rtws(struct rtws_rq *rtws_rq)
{
    struct sched_rtws_entity *rtws_se;
    struct task_struct *p;
    struct rq *rq = rq_of_rtws_rq(rtws_rq);

    if (!has_stealable_pjobs(rtws_rq))
        return NULL;

    rtws_se = rb_entry(rtws_rq->leftmost_stealable_pjob, struct sched_rtws_entity, stealable_pjob_node);
    BUG_ON(!rtws_se);

    p = task_of_rtws_se(rtws_se);
    if(rq->cpu != task_cpu(p) || task_current(rq, p) || !p->se.on_rq || !rtws_task(p)){
        printk(KERN_INFO "RTWS :: cannot steal this task \n");
        return NULL;
    }
    BUG_ON(rq->cpu != task_cpu(p));
    BUG_ON(task_current(rq, p));
    BUG_ON(!p->se.on_rq);
    BUG_ON(!rtws_task(p));

    return p;
}
Пример #16
0
static inline void check_for_tasks(int cpu)
{
	struct task_struct *p;

	write_lock_irq(&tasklist_lock);
	for_each_process(p) {
		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
		    (p->utime || p->stime))
			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
				"(state = %ld, flags = %x)\n",
				p->comm, task_pid_nr(p), cpu,
				p->state, p->flags);
	}
	write_unlock_irq(&tasklist_lock);
}
Пример #17
0
/* Force a migration of task to the dest_cpu.
 * If migr is set, allow migrations after the forced migration... otherwise,
 * do not allow them. (We need to disable migrations so that the forced
 * migration takes place correctly.)
 * Returns old cpu of task.
 */
int linsched_force_migration(struct task_struct *task, int dest_cpu, int migr)
{
	int old_cpu = task_cpu(task);
	
	linsched_disable_migrations();
	set_cpus_allowed(task, cpumask_of_cpu(dest_cpu));
	linsched_change_cpu(old_cpu);
	schedule();
	linsched_change_cpu(dest_cpu);
	schedule();
	if (migr)
		linsched_enable_migrations();

	return old_cpu;
}
Пример #18
0
void acct_update_power(struct task_struct *task, cputime_t cputime) {
	struct cpufreq_power_stats *powerstats;
	struct cpufreq_stats *stats;
	unsigned int cpu_num, curr;

	if (!task)
		return;
	cpu_num = task_cpu(task);
	powerstats = per_cpu(cpufreq_power_stats, cpu_num);
	stats = per_cpu(cpufreq_stats_table, cpu_num);
	if (!powerstats || !stats)
		return;

	curr = powerstats->curr[stats->last_index];
	task->cpu_power += curr * cputime_to_usecs(cputime);
}
Пример #19
0
static inline void check_for_tasks(int cpu)
{
	struct task_struct *p;
	cputime_t utime, stime;

	write_lock_irq(&tasklist_lock);
	for_each_process(p) {
		task_cputime(p, &utime, &stime);
		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
		    (utime || stime))
			pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n",
				p->comm, task_pid_nr(p), cpu,
				p->state, p->flags);
	}
	write_unlock_irq(&tasklist_lock);
}
Пример #20
0
/*
 * irqsoff uses its own tracer function to keep the overhead down:
 */
static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
{
	struct trace_array *tr = wakeup_trace;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int resched;
	int cpu;
	int pc;

	if (likely(!wakeup_task))
		return;

	pc = preempt_count();
	resched = ftrace_preempt_disable();

	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (unlikely(disabled != 1))
		goto out;

	local_irq_save(flags);
	__raw_spin_lock(&wakeup_lock);

	if (unlikely(!wakeup_task))
		goto unlock;

	/*
	 * The task can't disappear because it needs to
	 * wake up first, and we have the wakeup_lock.
	 */
	if (task_cpu(wakeup_task) != cpu)
		goto unlock;

	trace_function(tr, ip, parent_ip, flags, pc);

 unlock:
	__raw_spin_unlock(&wakeup_lock);
	local_irq_restore(flags);

 out:
	atomic_dec(&data->disabled);

	ftrace_preempt_enable(resched);
}
Пример #21
0
static void dump_tasks(void)
{
	struct task_struct *p;
	struct task_struct *task;

	pr_info("[ pid ]   uid  total_vm      rss cpu oom_adj  name\n");
	for_each_process(p) {
		task = find_lock_task_mm(p);
		if (!task) {
			continue;
		}

		pr_info("[%5d] %5d  %8lu %8lu %3u     %3d  %s\n",
				task->pid, task_uid(task),
				task->mm->total_vm, get_mm_rss(task->mm),
				task_cpu(task), task->signal->oom_adj, task->comm);
		task_unlock(task);
	}
}
Пример #22
0
static void pfair_task_new(struct task_struct * t, int on_rq, int running)
{
	unsigned long 		flags;

	TRACE("pfair: task new %d state:%d\n", t->pid, t->state);

	raw_spin_lock_irqsave(&pfair_lock, flags);
	if (running)
		t->rt_param.scheduled_on = task_cpu(t);
	else
		t->rt_param.scheduled_on = NO_CPU;

	prepare_release(t, pfair_time + 1);
	tsk_pfair(t)->sporadic_release = 0;
	pfair_add_release(t);
	check_preempt(t);

	raw_spin_unlock_irqrestore(&pfair_lock, flags);
}
Пример #23
0
static inline void check_for_tasks(int cpu)
{
	struct task_struct *p;

	write_lock_irq(&tasklist_lock);
	for_each_process(p) {
		if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
		    (!cputime_eq(p->utime, cputime_zero) ||
		     !cputime_eq(p->stime, cputime_zero)))
#ifdef CONFIG_DEBUG_PRINTK
			printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
				"(state = %ld, flags = %x)\n",
				p->comm, task_pid_nr(p), cpu,
				p->state, p->flags);
#else
			;
#endif
	}
	write_unlock_irq(&tasklist_lock);
}
Пример #24
0
void resched_task(struct task_struct *p)
{
	int cpu;

	assert_raw_spin_locked(&task_rq(p)->lock);

	if (test_tsk_need_resched(p))
		return;

	set_tsk_need_resched(p);

	cpu = task_cpu(p);
	if (cpu == smp_processor_id())
		return;

	/* NEED_RESCHED must be visible before we test polling */
	smp_mb();
	if (!tsk_is_polling(p))
		smp_send_reschedule(cpu);
}
Пример #25
0
/* Take this CPU down. */
static int __ref take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	unsigned int cpu = (unsigned long)param->hcpu;
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);

	if (task_cpu(param->caller) == cpu)
		move_task_off_dead_cpu(cpu, param->caller);
	/* Force idle task to run as soon as we yield: it should
	   immediately notice cpu is offline and die quickly. */
	sched_idle_next();
	return 0;
}
Пример #26
0
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
	struct task_struct *g, *p;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
	"            task   PID         tree-key  switches  prio"
	"     exec-runtime         sum-exec        sum-sleep\n"
	"------------------------------------------------------"
	"----------------------------------------------------\n");

	rcu_read_lock();
	for_each_process_thread(g, p) {
		if (task_cpu(p) != rq_cpu)
			continue;

		print_task(m, rq, p);
	}
	rcu_read_unlock();
}
Пример #27
0
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
	struct task_struct *g, *p;
	unsigned long flags;

	SEQ_printf(m,
	"\nrunnable tasks:\n"
	"            task   PID         tree-key  switches  prio"
	"     exec-runtime         sum-exec        sum-sleep\n"
	"------------------------------------------------------"
	"----------------------------------------------------\n");

	read_lock_irqsave(&tasklist_lock, flags);
	for_each_process_thread(g, p) {
		if (!p->on_rq || task_cpu(p) != rq_cpu)
			continue;

		print_task(m, rq, p);
	}
	read_unlock_irqrestore(&tasklist_lock, flags);
}
void could_cswap(void)
{
	if (atomic_read(&s_reclaim.need_to_reclaim) == 0)
		return;

	if (time_before(jiffies, prev_jiffy + minimum_interval_time))
		return;

	if (atomic_read(&s_reclaim.lmk_running) == 1 || atomic_read(&kswapd_thread_on) == 1) 
		return;

	if (get_nr_swap_pages() < minimum_freeswap_pages)
		return;

	if (idle_cpu(task_cpu(s_reclaim.kcompcached)) && this_cpu_loadx(4) == 0) {
		if (atomic_read(&s_reclaim.kcompcached_running) == 0) {
			wake_up_process(s_reclaim.kcompcached);
			atomic_set(&s_reclaim.kcompcached_running, 1);
			prev_jiffy = jiffies;
		}
	}
}
Пример #29
0
void psi_task_change(struct task_struct *task, int clear, int set)
{
	int cpu = task_cpu(task);
	struct psi_group *group;
	bool wake_clock = true;
	void *iter = NULL;

	if (!task->pid)
		return;

	if (((task->psi_flags & set) ||
	     (task->psi_flags & clear) != clear) &&
	    !psi_bug) {
		printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
				task->pid, task->comm, cpu,
				task->psi_flags, clear, set);
		psi_bug = 1;
	}

	task->psi_flags &= ~clear;
	task->psi_flags |= set;

	/*
	 * Periodic aggregation shuts off if there is a period of no
	 * task changes, so we wake it back up if necessary. However,
	 * don't do this if the task change is the aggregation worker
	 * itself going to sleep, or we'll ping-pong forever.
	 */
	if (unlikely((clear & TSK_RUNNING) &&
		     (task->flags & PF_WQ_WORKER) &&
		     wq_worker_last_func(task) == psi_update_work))
		wake_clock = false;

	while ((group = iterate_groups(task, &iter))) {
		psi_group_change(group, cpu, clear, set);
		if (wake_clock && !delayed_work_pending(&group->clock_work))
			schedule_delayed_work(&group->clock_work, PSI_FREQ);
	}
}
Пример #30
0
static inline void check_for_tasks(int dead_cpu)
{
	struct task_struct *g, *p;

	read_lock_irq(&tasklist_lock);
	do_each_thread(g, p) {
		if (!p->on_rq)
			continue;
		/*
		 * We do the check with unlocked task_rq(p)->lock.
		 * Order the reading to do not warn about a task,
		 * which was running on this cpu in the past, and
		 * it's just been woken on another cpu.
		 */
		rmb();
		if (task_cpu(p) != dead_cpu)
			continue;

		pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
			p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
	} while_each_thread(g, p);
	read_unlock_irq(&tasklist_lock);
}