Ejemplo n.º 1
0
void put_online_cpus(void)
{
	if (cpu_hotplug.active_writer == current)
		return;
	hotplug_lock();
	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
		wake_up_process(cpu_hotplug.active_writer);
	hotplug_unlock();

}
Ejemplo n.º 2
0
void get_online_cpus(void)
{
	might_sleep();
	if (cpu_hotplug.active_writer == current)
		return;
	hotplug_lock();
	cpu_hotplug.refcount++;
	hotplug_unlock();

}
Ejemplo n.º 3
0
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
 * get_online_cpus() not an api which is called all that often.
 *
 */
static void cpu_hotplug_begin(void)
{
	cpu_hotplug.active_writer = current;

	for (;;) {
		hotplug_lock();
		if (likely(!cpu_hotplug.refcount))
			break;
		__set_current_state(TASK_UNINTERRUPTIBLE);
		hotplug_unlock();
		schedule();
	}
}
Ejemplo n.º 4
0
static void cpu_unplug_done(unsigned int cpu)
{
	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);

	hp->unplug = NULL;
	/* Let all tasks know cpu unplug is finished before cleaning up */
	smp_wmb();

	if (hp->sync_tsk)
		kthread_stop(hp->sync_tsk);

	if (hp->grab_lock) {
		hotplug_unlock(hp);
		/* protected by cpu_hotplug.lock */
		hp->grab_lock = 0;
	}
	tell_sched_cpu_down_done(cpu);
}
Ejemplo n.º 5
0
/**
 * pin_current_cpu - Prevent the current cpu from being unplugged
 *
 * Lightweight version of get_online_cpus() to prevent cpu from being
 * unplugged when code runs in a migration disabled region.
 *
 * Must be called with preemption disabled (preempt_count = 1)!
 */
void pin_current_cpu(void)
{
	struct hotplug_pcp *hp;

retry:
	hp = &__get_cpu_var(hotplug_pcp);

	if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
	    hp->unplug == current || (current->flags & PF_STOMPER)) {
		hp->refcount++;
		return;
	}
	preempt_enable();
	hotplug_lock();
	hotplug_unlock();
	preempt_disable();
	goto retry;
}
Ejemplo n.º 6
0
/**
 * pin_current_cpu - Prevent the current cpu from being unplugged
 *
 * Lightweight version of get_online_cpus() to prevent cpu from being
 * unplugged when code runs in a migration disabled region.
 *
 * Must be called with preemption disabled (preempt_count = 1)!
 */
void pin_current_cpu(void)
{
	struct hotplug_pcp *hp;
	int force = 0;

retry:
	hp = &__get_cpu_var(hotplug_pcp);

	if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
	    hp->unplug == current || (current->flags & PF_STOMPER)) {
		hp->refcount++;
		return;
	}

	if (hp->grab_lock) {
		preempt_enable();
		hotplug_lock(hp);
		hotplug_unlock(hp);
	} else {
		preempt_enable();
		/*
		 * Try to push this task off of this CPU.
		 */
		if (!migrate_me()) {
			preempt_disable();
			hp = &__get_cpu_var(hotplug_pcp);
			if (!hp->grab_lock) {
				/*
				 * Just let it continue it's already pinned
				 * or about to sleep.
				 */
				force = 1;
				goto retry;
			}
			preempt_enable();
		}
	}
	preempt_disable();
	goto retry;
}
Ejemplo n.º 7
0
static void cpu_hotplug_done(void)
{
	cpu_hotplug.active_writer = NULL;
	hotplug_unlock();
}