Esempio n. 1
0
/*
 * RTCC idle handler, called when CPU is idle
 */
static int rtcc_idle_handler(struct notifier_block *nb, unsigned long val, void *data)
{
	if (likely(!atomic_read(&krtccd_enabled)))
		return 0;

	if (likely(atomic_read(&need_to_reclaim) == 0))
		return 0;

	// To prevent RTCC from running too frequently
	if (likely(time_before(jiffies, prev_jiffy + rtcc_reclaim_jiffies)))
		return 0;

	if (unlikely(atomic_read(&kswapd_running) == 1))
		return 0;

	if (unlikely(idle_cpu(task_cpu(krtccd)) && this_cpu_loadx(3) == 0) || rtcc_boost_mode) {
		if (likely(atomic_read(&krtccd_running) == 0)) {
			atomic_set(&krtccd_running, 1);

			wake_up_process(krtccd);
			prev_jiffy = jiffies;
		}
	}

	return 0;
}
Esempio n. 2
0
int main(void) {
	int i;

	/* TODO: run test_printk() */
	/*
	test_printk();
*/
	disable_interrupt();
	init_pcbs();
	setup_irq(8, irq0_handle);
	setup_irq(0x80, irq1_handle);
	setup_irq(9, keyboard_int);
	/*
	kthread_create(test1_a, stack1+KSTACK_SIZE);
	kthread_create(test1_b, stack2+KSTACK_SIZE);
	kthread_create(test1_c, stack3+KSTACK_SIZE);
	kthread_create(test_thread_a, stack1+KSTACK_SIZE);
	kthread_create(test_thread_b, stack2+KSTACK_SIZE);
	
	kthread_create(test_msg_c, stack1+KSTACK_SIZE);
	kthread_create(test_msg_d, stack2+KSTACK_SIZE);
	
	kthread_create(test_thread_in_thread, stack1+KSTACK_SIZE);
	*/
	UserThread_create(0x2000);
	kthread_create(tty_driver, stack2+KSTACK_SIZE);
        enable_interrupt(); 
	
	while (1) {
		idle_cpu();
	}
}
Esempio n. 3
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);

		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Esempio n. 4
0
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);

		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	while (!idle_cpu(cpu))
		cpu_relax();

	
	__cpu_die(cpu);

	
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

	#ifdef CONFIG_HTC_ACPU_DEBUG
	{
		unsigned int status = 0;
		msm_proc_comm(PCOM_BACKUP_CPU_STATUS, (unsigned*)&cpu, (unsigned*) &status);
	}
	#endif
out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Esempio n. 5
0
void panic(char *str) {
    /* TODO: you can modify it as your wish */
    disable_interrupt();
    printk("\n\n** Kernel panic: %s **\n", str);
    while (1) {
        idle_cpu();
    }
}
Esempio n. 6
0
/*
 * Enter an interrupt context.
 */
void irq_enter(void)
{
	__irq_enter();
#ifdef CONFIG_NO_HZ
	if (idle_cpu(smp_processor_id()))
		tick_nohz_update_jiffies();
#endif
}
/*
 * Get the preferred target CPU for NOHZ
 */
static int hrtimer_get_target(int this_cpu, int pinned)
{
#ifdef CONFIG_NO_HZ
	if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
		return get_nohz_timer_target();
#endif
	return this_cpu;
}
Esempio n. 8
0
/*
 * Get the preferred target CPU for NOHZ
 */
static int hrtimer_get_target(int this_cpu, int pinned)
{
#ifdef CONFIG_NO_HZ
#ifdef CONFIG_SCHED_BFS
	if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu)) {
		int preferred_cpu = get_nohz_load_balancer();

		if (preferred_cpu >= 0)
			return preferred_cpu;
	}
#else
	if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
		return get_nohz_timer_target();
#endif
#endif
	return this_cpu;
}
Esempio n. 9
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.caller = current,
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	set_cpu_active(cpu, false);
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		set_cpu_active(cpu, true);

		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		set_cpu_active(cpu, true);
		/* CPU didn't die: tell everyone.  Can't complain. */
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);

		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/* Wait for it to sleep (leaving idle task). */
	while (!idle_cpu(cpu))
		yield();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Esempio n. 10
0
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (skip_cpu_offline)
		return -EACCES;

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}
	smpboot_park_threads(cpu);

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	while (!idle_cpu(cpu))
		cpu_relax();

	
	__cpu_die(cpu);

	
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
Esempio n. 11
0
void thread_b(void){
	int tmp;
	while(1){
		tmp = cnt+1;
		printk("\ndata%d \n", tmp);
		cnt = tmp;
		for (tmp = 0; tmp < 20; tmp++);
		idle_cpu();
	}
}
Esempio n. 12
0
static int rcu_debugfs_show(struct seq_file *m, void *unused)
{
	int cpu, q, s[2], msecs;

	raw_local_irq_disable();
	msecs = div_s64(sched_clock() - rcu_timestamp, NSEC_PER_MSEC);
	raw_local_irq_enable();

	seq_printf(m, "%14u: #batches seen\n",
		rcu_stats.nbatches);
	seq_printf(m, "%14u: #barriers seen\n",
		atomic_read(&rcu_stats.nbarriers));
	seq_printf(m, "%14llu: #callbacks invoked\n",
		rcu_stats.ninvoked);
	seq_printf(m, "%14u: #callbacks left to invoke\n",
		atomic_read(&rcu_stats.nleft));
	seq_printf(m, "%14u: #msecs since last end-of-batch\n",
		msecs);
	seq_printf(m, "%14u: #passes forced (0 is best)\n",
		rcu_stats.nforced);
	seq_printf(m, "\n");

	for_each_online_cpu(cpu)
		seq_printf(m, "%4d ", cpu);
	seq_printf(m, "  CPU\n");

	s[1] = s[0] = 0;
	for_each_online_cpu(cpu) {
		struct rcu_data *rd = &rcu_data[cpu];
		int w = ACCESS_ONCE(rd->which) & 1;
		seq_printf(m, "%c%c%c%d ",
			'-',
			idle_cpu(cpu) ? 'I' : '-',
			rd->wait ? 'W' : '-',
			w);
		s[w]++;
	}
	seq_printf(m, "  FLAGS\n");

	for (q = 0; q < 2; q++) {
		for_each_online_cpu(cpu) {
			struct rcu_data *rd = &rcu_data[cpu];
			struct rcu_list *l = &rd->cblist[q];
			seq_printf(m, "%4d ", l->count);
		}
		seq_printf(m, "  Q%d%c\n", q, " *"[s[q] > s[q^1]]);
	}
	seq_printf(m, "\nFLAGS:\n");
	seq_printf(m, "  I - cpu idle, 0|1 - Q0 or Q1 is current Q, other is previous Q,\n");
	seq_printf(m, "  W - cpu does not permit current batch to end (waiting),\n");
	seq_printf(m, "  * - marks the Q that is current for most CPUs.\n");

	return 0;
}
Esempio n. 13
0
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.
 */
void rcu_check_callbacks(int cpu, int user)
{
	if (user ||
	    (idle_cpu(cpu) &&
	     !in_softirq() &&
	     hardirq_count() <= (1 << HARDIRQ_SHIFT)))
		rcu_sched_qs(cpu);
	else if (!in_softirq())
		rcu_bh_qs(cpu);
	rcu_preempt_check_callbacks();
}
Esempio n. 14
0
void rcu_check_callbacks(int cpu, int user)
{
	if (user || 
	    (idle_cpu(cpu) && !in_softirq() && 
				hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
		rcu_qsctr_inc(cpu);
		rcu_bh_qsctr_inc(cpu);
	} else if (!in_softirq())
		rcu_bh_qsctr_inc(cpu);
	tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
}
Esempio n. 15
0
void test_server(void){
	struct message ms;

	while (1){
		receive(&ms);
		printk("SERVER receive message (%d,%d,%d) from PID:%d", 
			ms.p1, ms.p2, ms.p3, ms.source);
		send(2, &ms);
		idle_cpu();
		}
}
Esempio n. 16
0
/*
 * Check to see if the scheduling-clock interrupt came from an extended
 * quiescent state, and, if so, tell RCU about it.
 */
void rcu_check_callbacks(int cpu, int user)
{
	if (!rcu_needs_cpu(0))
		return;	/* RCU doesn't need anything to be done. */
	if (user ||
	    (idle_cpu(cpu) &&
	     !in_softirq() &&
	     hardirq_count() <= (1 << HARDIRQ_SHIFT)))
		rcu_qsctr_inc(cpu);
	else if (!in_softirq())
		rcu_bh_qsctr_inc(cpu);
}
Esempio n. 17
0
static inline void tick_irq_exit(void)
{
#ifdef CONFIG_NO_HZ_COMMON
	int cpu = smp_processor_id();

	/* Make sure that timer wheel updates are propagated */
	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
		if (!in_interrupt())
			tick_nohz_irq_exit();
	}
#endif
}
Esempio n. 18
0
/*
 * Check if the conditions for ending the current batch are true. If
 * so then end it.
 *
 * Must be invoked periodically, and the periodic invocations must be
 * far enough apart in time for the previous batch to become quiescent.
 * This is a few tens of microseconds unless NMIs are involved; an NMI
 * stretches out the requirement by the duration of the NMI.
 *
 * "Quiescent" means the owning cpu is no longer appending callbacks
 * and has completed execution of a trailing write-memory-barrier insn.
 */
static void __rcu_delimit_batches(struct rcu_list *pending)
{
       struct rcu_data *rd;
       struct rcu_list *plist;
       int cpu, eob, prev;

       if (!rcu_scheduler_active)
               return;

       rcu_stats.nlast++;

       /* If an NMI occured then the previous batch may not yet be
        * quiescent.  Let's wait till it is.
        */
       if (rcu_nmi_seen) {
               rcu_nmi_seen = 0;
               rcu_stats.nmis++;
               return;
       }

       /*
        * Find out if the current batch has ended
        * (end-of-batch).
        */
       eob = 1;
       for_each_online_cpu(cpu) {
               rd = &rcu_data[cpu];
               if (rd->wait) {
                       rd->wait = preempt_count_cpu(cpu) > idle_cpu(cpu);
                       if (rd->wait) {
                               eob = 0;
                               break;
                       }
               }
       }

       /*
        * Exit if batch has not ended.  But first, tickle all non-cooperating
        * CPUs if enough time has passed.
        */
       if (eob == 0) {
               if (rcu_wdog_ctr >= rcu_wdog_lim) {
                       rcu_wdog_ctr = 0;
                       rcu_stats.nforced++;
                       for_each_online_cpu(cpu) {
                               if (rcu_data[cpu].wait)
                                       force_cpu_resched(cpu);
                       }
               }
               rcu_wdog_ctr += rcu_hz_period_us;
               return;
       }
Esempio n. 19
0
static void showacpu(void *dummy)
{
	unsigned long flags;

	/* Idle CPUs have no interesting backtrace. */
	if (idle_cpu(smp_processor_id()))
		return;

	spin_lock_irqsave(&show_lock, flags);
	printk(KERN_INFO "CPU%d:\n", smp_processor_id());
	show_stack(NULL, NULL);
	spin_unlock_irqrestore(&show_lock, flags);
}
Esempio n. 20
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
	account_system_vtime(current);
	trace_hardirq_exit();
	sub_preempt_count(IRQ_EXIT_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();

#ifdef CONFIG_NO_HZ
	/* Make sure that timer wheel updates are propagated */
	if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
		tick_nohz_stop_sched_tick();
#endif
	preempt_enable_no_resched();
}
Esempio n. 21
0
void test_sender(void){
	struct message ms;

	ms.p1 = 1;
	ms.p2 = 2;
	ms.p3 = 3;
	while (1){
		printk("SENDER send message (%d,%d,%d) to SERVER\n",
			ms.p1, ms.p2, ms.p3);

		invoke(SERVER, &ms);
		ms.p3++;
		idle_cpu();
		}
}
Esempio n. 22
0
/*{{{  void ccsp_safe_pause_timeout (sched_t *sched)*/
void ccsp_safe_pause_timeout (sched_t *sched)
{
	unsigned int sync;
	Time now;

	#ifdef DEBUG_RTS
	fprintf(stderr, "USERPROC: ccsp_safe_pause_timeout() entered\n");
	#endif

	if (sched->tq_fptr == NULL) {
		return;
	} else if (Time_PastTimeout (sched)) {
		return;
	}

	now = Time_GetTime(sched);
	if (Time_AFTER (sched->tq_fptr->time, now)) {
		unsigned int usecs = Time_MINUS (sched->tq_fptr->time, now);

		if (usecs < min_sleep) {
			while (!(sync = att_safe_swap (&(sched->sync), 0))) {
				int i = 10;
				
				while (i--) {
					idle_cpu ();
				}

				if (Time_PastTimeout (sched)) {
					break;
				}

				serialise ();
			}

			if (sync) {
				/* restore detected flags */
				att_safe_or (&(sched->sync), sync);
			}
		} else {
			ccsp_set_next_alarm (sched, usecs);
			ccsp_safe_pause (sched);
		}
	}

	#ifdef DEBUG_RTS
	fprintf(stderr, "USERPROC: ccsp_safe_pause_timeout() about to exit (return 0)\n");
	#endif
}
Esempio n. 23
0
/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
	local_irq_disable();
#else
	WARN_ON_ONCE(!irqs_disabled());
#endif

	account_irq_exit_time(current);
	trace_hardirq_exit();
	sub_preempt_count(HARDIRQ_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();

#ifdef CONFIG_NO_HZ
	/* Make sure that timer wheel updates are propagated */
	if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
		tick_nohz_irq_exit();
#endif
	rcu_irq_exit();
}
void could_cswap(void)
{
	if (atomic_read(&s_reclaim.need_to_reclaim) == 0)
		return;

	if (time_before(jiffies, prev_jiffy + minimum_interval_time))
		return;

	if (atomic_read(&s_reclaim.lmk_running) == 1 || atomic_read(&kswapd_thread_on) == 1) 
		return;

	if (get_nr_swap_pages() < minimum_freeswap_pages)
		return;

	if (idle_cpu(task_cpu(s_reclaim.kcompcached)) && this_cpu_loadx(4) == 0) {
		if (atomic_read(&s_reclaim.kcompcached_running) == 0) {
			wake_up_process(s_reclaim.kcompcached);
			atomic_set(&s_reclaim.kcompcached_running, 1);
			prev_jiffy = jiffies;
		}
	}
}
Esempio n. 25
0
void
game_init(void) {
	/* Setting up a 100Hz timer */
	init_100hz_timer();

	/* Setting up the interrupt descriptor table, 
	   this procedure installs ``actual'' interrupt handlers */
	init_idt();

	/* Setting up the interrupt controller i8259,
	   the interrupt code will start at 32,
	   and i8259 will send end-of-interrupt signal automatically */
	init_intr();

	/* Setting up the game interrupt handlers.
	   the start_frame and key_stroke procedures are wrapped by
	   our interrupt handler to make interrupts transparent.
	   For implementation, you simply need to know that start_frame
	   is called for every timer interrupt, and so does key_stroke
	   for every keyboard interrupt (press or release). */
	set_timer_intr_handler(start_frame);
	set_keyboard_intr_handler(key_stroke);

	/* Calls the game logic.
	   All game logic related codes are in game.c */
	init_game();

	/* Just a ``sti'' instruction */
	enable_interrupt();

	/* The main loop now needs to do nothing but wait for
	   coming endless interrupts.
	   All game tasks are done in the interrupt handler. */
	while (1) {
		idle_cpu();
	}
}
Esempio n. 26
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.caller = current,
		.mod = mod,
		.hcpu = hcpu,
	};
	unsigned long timeout;
	unsigned long flags;
	struct task_struct *g, *p;

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	set_cpu_active(cpu, false);
	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		set_cpu_active(cpu, true);

		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		set_cpu_active(cpu, true);
		/* CPU didn't die: tell everyone.  Can't complain. */
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);

		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	timeout = jiffies + HZ;
	/* Wait for it to sleep (leaving idle task). */
	while (!idle_cpu(cpu)) {
		msleep(1);
		if (time_after(jiffies, timeout)) {
			printk("%s: CPU%d not idle after offline. Running tasks:\n", __func__, cpu);
			read_lock_irqsave(&tasklist_lock, flags);
			do_each_thread(g, p) {
				if (!p->se.on_rq || task_cpu(p) != cpu)
					continue;
				sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock_irqrestore(&tasklist_lock, flags);
			timeout = jiffies + HZ;
		}
	}

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}
/* ic, extend hcube helper functionalities */
int __ref op_is_online_idle_core(unsigned int cpu)
{
	int ret = idle_cpu(cpu) && !nr_iowait_cpu(cpu);
	return ret;
}
Esempio n. 28
0
/*
 * Check if the conditions for ending the current batch are true. If
 * so then end it.
 *
 * Must be invoked periodically, and the periodic invocations must be
 * far enough apart in time for the previous batch to become quiescent.
 * This is a few tens of microseconds unless NMIs are involved; an NMI
 * stretches out the requirement by the duration of the NMI.
 *
 * "Quiescent" means the owning cpu is no longer appending callbacks
 * and has completed execution of a trailing write-memory-barrier insn.
 */
static void __rcu_delimit_batches(struct rcu_list *pending)
{
	struct rcu_data *rd;
	int cpu, eob;
	u64 rcu_now;

	/* If an NMI occured then the previous batch may not yet be
	 * quiescent.  Let's wait till it is.
	 */
	if (rcu_nmi_seen) {
		rcu_nmi_seen = 0;
		return;
	}

	if (!rcu_scheduler_active)
		return;

	/*
	 * Find out if the current batch has ended
	 * (end-of-batch).
	 */
	eob = 1;
	for_each_online_cpu(cpu) {
		rd = &rcu_data[cpu];
		if (rd->wait) {
			eob = 0;
			break;
		}
	}

	/*
	 * Force end-of-batch if too much time (n seconds) has
	 * gone by.  The forcing method is slightly questionable,
	 * hence the WARN_ON.
	 */
	rcu_now = sched_clock();
	if (!eob && !rcu_timestamp
	&& ((rcu_now - rcu_timestamp) > 3LL * NSEC_PER_SEC)) {
		rcu_stats.nforced++;
		WARN_ON_ONCE(1);
		eob = 1;
	}

	/*
	 * Just return if the current batch has not yet
	 * ended.  Also, keep track of just how long it
	 * has been since we've actually seen end-of-batch.
	 */

	if (!eob)
		return;

	rcu_timestamp = rcu_now;

	/*
	 * End the current RCU batch and start a new one.
	 */
	for_each_present_cpu(cpu) {
		rd = &rcu_data[cpu];
		rcu_end_batch(rd, pending);
		if (cpu_online(cpu)) /* wins race with offlining every time */
			rd->wait = preempt_count_cpu(cpu) > idle_cpu(cpu);
		else
			rd->wait = 0;
	}
	rcu_stats.nbatches++;
}
Esempio n. 29
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int mycpu, err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};
	cpumask_var_t cpumask;
	cpumask_var_t cpumask_org;

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	/* Move the downtaker off the unplug cpu */
	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
		return -ENOMEM;
	if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL))  {
		free_cpumask_var(cpumask);
		return -ENOMEM;
	}

	cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
	set_cpus_allowed_ptr(current, cpumask);
	free_cpumask_var(cpumask);
	migrate_disable();
	mycpu = smp_processor_id();
	if (mycpu == cpu) {
		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
		migrate_enable();
		err = -EBUSY;
		goto restore_cpus;
	}

	cpu_hotplug_begin();
	err = cpu_unplug_begin(cpu);
	if (err) {
		printk("cpu_unplug_begin(%d) failed\n", cpu);
		goto out_cancel;
	}

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}

	__cpu_unplug_wait(cpu);
	smpboot_park_threads(cpu);

	/* Notifiers are done. Don't let any more tasks pin this CPU. */
	cpu_unplug_sync(cpu);

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_unplug_done(cpu);
out_cancel:
	migrate_enable();
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
restore_cpus:
	set_cpus_allowed_ptr(current, cpumask_org);
	free_cpumask_var(cpumask_org);
	return err;
}
Esempio n. 30
0
File: cpu.c Progetto: borkmann/kasan
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		pr_warn("%s: attempt to take down CPU %u failed\n",
			__func__, cpu);
		goto out_release;
	}

	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
	 *
	 * Do sync before park smpboot threads to take care the rcu boost case.
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

	smpboot_park_threads(cpu);

	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}