Example #1
0
void put_online_cpus(void)
{
	if (cpu_hotplug.active_writer == current)
		return;
	hotplug_lock();
	if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
		wake_up_process(cpu_hotplug.active_writer);
	hotplug_unlock();

}
Example #2
0
void get_online_cpus(void)
{
	might_sleep();
	if (cpu_hotplug.active_writer == current)
		return;
	hotplug_lock();
	cpu_hotplug.refcount++;
	hotplug_unlock();

}
Example #3
0
/*
 * This ensures that the hotplug operation can begin only when the
 * refcount goes to zero.
 *
 * Note that during a cpu-hotplug operation, the new readers, if any,
 * will be blocked by the cpu_hotplug.lock
 *
 * Since cpu_hotplug_begin() is always called after invoking
 * cpu_maps_update_begin(), we can be sure that only one writer is active.
 *
 * Note that theoretically, there is a possibility of a livelock:
 * - Refcount goes to zero, last reader wakes up the sleeping
 *   writer.
 * - Last reader unlocks the cpu_hotplug.lock.
 * - A new reader arrives at this moment, bumps up the refcount.
 * - The writer acquires the cpu_hotplug.lock finds the refcount
 *   non zero and goes to sleep again.
 *
 * However, this is very difficult to achieve in practice since
 * get_online_cpus() not an api which is called all that often.
 *
 */
static void cpu_hotplug_begin(void)
{
	cpu_hotplug.active_writer = current;

	for (;;) {
		hotplug_lock();
		if (likely(!cpu_hotplug.refcount))
			break;
		__set_current_state(TASK_UNINTERRUPTIBLE);
		hotplug_unlock();
		schedule();
	}
}
Example #4
0
/**
 * pin_current_cpu - Prevent the current cpu from being unplugged
 *
 * Lightweight version of get_online_cpus() to prevent cpu from being
 * unplugged when code runs in a migration disabled region.
 *
 * Must be called with preemption disabled (preempt_count = 1)!
 */
void pin_current_cpu(void)
{
	struct hotplug_pcp *hp;

retry:
	hp = &__get_cpu_var(hotplug_pcp);

	if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
	    hp->unplug == current || (current->flags & PF_STOMPER)) {
		hp->refcount++;
		return;
	}
	preempt_enable();
	hotplug_lock();
	hotplug_unlock();
	preempt_disable();
	goto retry;
}
Example #5
0
/**
 * pin_current_cpu - Prevent the current cpu from being unplugged
 *
 * Lightweight version of get_online_cpus() to prevent cpu from being
 * unplugged when code runs in a migration disabled region.
 *
 * Must be called with preemption disabled (preempt_count = 1)!
 */
void pin_current_cpu(void)
{
	struct hotplug_pcp *hp;
	int force = 0;

retry:
	hp = &__get_cpu_var(hotplug_pcp);

	if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
	    hp->unplug == current || (current->flags & PF_STOMPER)) {
		hp->refcount++;
		return;
	}

	if (hp->grab_lock) {
		preempt_enable();
		hotplug_lock(hp);
		hotplug_unlock(hp);
	} else {
		preempt_enable();
		/*
		 * Try to push this task off of this CPU.
		 */
		if (!migrate_me()) {
			preempt_disable();
			hp = &__get_cpu_var(hotplug_pcp);
			if (!hp->grab_lock) {
				/*
				 * Just let it continue it's already pinned
				 * or about to sleep.
				 */
				force = 1;
				goto retry;
			}
			preempt_enable();
		}
	}
	preempt_disable();
	goto retry;
}
Example #6
0
static void cpu_unplug_sync(unsigned int cpu)
{
	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);

	init_completion(&hp->synced);
	/* The completion needs to be initialzied before setting grab_lock */
	smp_wmb();

	/* Grab the mutex before setting grab_lock */
	hotplug_lock(hp);
	hp->grab_lock = 1;

	/*
	 * The CPU notifiers have been completed.
	 * Wait for tasks to get out of pinned CPU sections and have new
	 * tasks block until the CPU is completely down.
	 */
	__cpu_unplug_sync(hp);

	/* All done with the sync thread */
	kthread_stop(hp->sync_tsk);
	hp->sync_tsk = NULL;
}