void spm_hotplug_off(unsigned long mpidr) { unsigned long linear_id; linear_id = platform_get_core_pos(mpidr); spm_lock_get(); if (is_hotplug_ready() == 0) { spm_mcdi_wakeup_all_cores(); mmio_clrbits_32(SPM_PCM_RESERVE, PCM_HOTPLUG_VALID_MASK); spm_go_to_hotplug(); set_hotplug_ready(); } mmio_clrsetbits_32(SPM_PCM_RESERVE, PCM_HOTPLUG_VALID_MASK, (1 << linear_id) | (1 << (linear_id + PCM_HOTPLUG_VALID_SHIFT))); spm_lock_release(); }
/* * This ensures that the hotplug operation can begin only when the * refcount goes to zero. * * Note that during a cpu-hotplug operation, the new readers, if any, * will be blocked by the cpu_hotplug.lock * * Since cpu_hotplug_begin() is always called after invoking * cpu_maps_update_begin(), we can be sure that only one writer is active. * * Note that theoretically, there is a possibility of a livelock: * - Refcount goes to zero, last reader wakes up the sleeping * writer. * - Last reader unlocks the cpu_hotplug.lock. * - A new reader arrives at this moment, bumps up the refcount. * - The writer acquires the cpu_hotplug.lock finds the refcount * non zero and goes to sleep again. * * However, this is very difficult to achieve in practice since * get_online_cpus() not an api which is called all that often. * */ static void cpu_hotplug_begin(void) { cpu_hotplug.active_writer = current; for (;;) { mutex_lock(&cpu_hotplug.lock); if (likely(!cpu_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); mutex_unlock(&cpu_hotplug.lock); schedule(); } /******************************************************************************* * 20131225 marc.huang * * CPU Hotplug and idle integration * *******************************************************************************/ atomic_inc(&is_in_hotplug); spm_mcdi_wakeup_all_cores(); /******************************************************************************/ }