/*==========================================================================* * Name: smp_flush_cache_all * * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other * CPUs in the system. * * Born on Date: 2003-05-28 * * Arguments: NONE * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_cache_all(void) { cpumask_t cpumask; unsigned long *mask; preempt_disable(); cpumask_copy(&cpumask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &cpumask); spin_lock(&flushcache_lock); mask=cpumask_bits(&cpumask); atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); _flush_cache_copyback_all(); while (flushcache_cpumask) mb(); spin_unlock(&flushcache_lock); preempt_enable(); }
/* * Remove a CPU from broadcasting */ void tick_shutdown_broadcast(unsigned int *cpup) { struct clock_event_device *bc; unsigned long flags; unsigned int cpu = *cpup; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { if (bc && cpumask_empty(tick_get_broadcast_mask())) clockevents_shutdown(bc); } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); }
static void tick_do_broadcast(struct cpumask *mask) { int cpu = smp_processor_id(); struct tick_device *td; if (cpumask_test_cpu(cpu, mask)) { cpumask_clear_cpu(cpu, mask); td = &per_cpu(tick_cpu_device, cpu); td->evtdev->event_handler(td->evtdev); } if (!cpumask_empty(mask)) { td = &per_cpu(tick_cpu_device, cpumask_first(mask)); td->evtdev->broadcast(mask); } }
asmlinkage #endif void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned int cpu; unsigned int sender; union smp_flush_state *f; cpu = smp_processor_id(); /* * orig_rax contains the negated interrupt vector. * Use that to determine where the sender put the data. */ sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; f = &flush_state[sender]; if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) { if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (f->flush_va == TLB_FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(f->flush_va); } else leave_mm(cpu); } out: ack_APIC_irq(); smp_mb__before_clear_bit(); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); smp_mb__after_clear_bit(); inc_irq_stat(irq_tlb_count); }
/* Parse the boot-time nohz CPU list from the kernel parameters. */ static int __init tick_nohz_full_setup(char *str) { int cpu; alloc_bootmem_cpumask_var(&nohz_full_mask); if (cpulist_parse(str, nohz_full_mask) < 0) { pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); return 1; } cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, nohz_full_mask)) { pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu); cpumask_clear_cpu(cpu, nohz_full_mask); } have_nohz_full_mask = true; return 1; }
/** * smp_cache_interrupt - Handle IPI request to flush caches. * * Handle a request delivered by IPI to flush the current CPU's * caches. The parameters are stored in smp_cache_*. */ void smp_cache_interrupt(void) { unsigned long opr_mask = smp_cache_mask; switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) { case SMP_DCACHE_NOP: break; case SMP_DCACHE_INV: mn10300_local_dcache_inv(); break; case SMP_DCACHE_INV_RANGE: mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end); break; case SMP_DCACHE_FLUSH: mn10300_local_dcache_flush(); break; case SMP_DCACHE_FLUSH_RANGE: mn10300_local_dcache_flush_range(smp_cache_start, smp_cache_end); break; case SMP_DCACHE_FLUSH_INV: mn10300_local_dcache_flush_inv(); break; case SMP_DCACHE_FLUSH_INV_RANGE: mn10300_local_dcache_flush_inv_range(smp_cache_start, smp_cache_end); break; } switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) { case SMP_ICACHE_NOP: break; case SMP_ICACHE_INV: mn10300_local_icache_inv(); break; case SMP_ICACHE_INV_RANGE: mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end); break; } cpumask_clear_cpu(smp_processor_id(), &smp_cache_ipi_map); }
/* * Try to steal tags from a remote cpu's percpu freelist. * * We first check how many percpu freelists have tags - we don't steal tags * unless enough percpu freelists have tags on them that it's possible more than * half the total tags could be stuck on remote percpu freelists. * * Then we iterate through the cpus until we find some tags - we don't attempt * to find the "best" cpu to steal from, to keep cacheline bouncing to a * minimum. */ static inline void steal_tags(struct percpu_ida *pool, struct percpu_ida_cpu *tags) { unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; struct percpu_ida_cpu *remote; for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2; cpus_have_tags--) { cpu = cpumask_next(cpu, &pool->cpus_have_tags); if (cpu >= nr_cpu_ids) { cpu = cpumask_first(&pool->cpus_have_tags); if (cpu >= nr_cpu_ids) BUG(); } pool->cpu_last_stolen = cpu; remote = per_cpu_ptr(pool->tag_cpu, cpu); cpumask_clear_cpu(cpu, &pool->cpus_have_tags); if (remote == tags) continue; spin_lock(&remote->lock); if (remote->nr_free) { memcpy(tags->freelist, remote->freelist, sizeof(unsigned) * remote->nr_free); tags->nr_free = remote->nr_free; remote->nr_free = 0; } spin_unlock(&remote->lock); if (tags->nr_free) break; } }
void native_send_call_func_ipi(const struct cpumask *mask) { cpumask_var_t allbutself; if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); return; } cpumask_copy(allbutself, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), allbutself); if (cpumask_equal(mask, allbutself) && cpumask_equal(cpu_online_mask, cpu_callout_mask)) apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); else apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); free_cpumask_var(allbutself); }
static int __kprobes arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { int cpu; cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; arch_spin_lock(&lock); printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); show_regs(regs); arch_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return NMI_HANDLED; } return NMI_DONE; }
void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags) { ASSERT(local_irq_is_enabled()); if ( cpumask_test_cpu(smp_processor_id(), mask) ) flush_area_local(va, flags); if ( !cpumask_subset(mask, cpumask_of(smp_processor_id())) ) { spin_lock(&flush_lock); cpumask_and(&flush_cpumask, mask, &cpu_online_map); cpumask_clear_cpu(smp_processor_id(), &flush_cpumask); flush_va = va; flush_flags = flags; send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR); while ( !cpumask_empty(&flush_cpumask) ) cpu_relax(); spin_unlock(&flush_lock); } }
int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); struct task_struct *p; int ret; ret = mp_ops->cpu_disable(cpu); if (ret) return ret; /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU */ migrate_irqs(); /* * Stop the local timer for this CPU. */ local_timer_stop(cpu); /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. */ flush_cache_all(); local_flush_tlb_all(); read_lock(&tasklist_lock); for_each_process(p) if (p->mm) cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); read_unlock(&tasklist_lock); return 0; }
bool nmi_cpu_backtrace(struct pt_regs *regs) { int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { if (regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", cpu, instruction_pointer(regs)); } else { pr_warn("NMI backtrace for cpu %d\n", cpu); if (regs) show_regs(regs); else dump_stack(); } cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } return false; }
/* Shut down the current CPU */ void __cpu_disable(void) { unsigned int cpu = get_processor_id(); local_irq_disable(); gic_disable_cpu(); /* Allow any queued timer interrupts to get serviced */ local_irq_enable(); mdelay(1); local_irq_disable(); /* It's now safe to remove this processor from the online map */ cpumask_clear_cpu(cpu, &cpu_online_map); if ( cpu_disable_scheduler(cpu) ) BUG(); smp_mb(); /* Return to caller; eventually the IPI mechanism will unwind and the * scheduler will drop to the idle loop, which will call stop_cpu(). */ }
void smp_send_stop(void) { unsigned long timeout; if (num_online_cpus() > 1) { cpumask_t mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); smp_cross_call(&mask, IPI_CPU_STOP); } /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && timeout--) udelay(1); if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); }
/* On return cpumask will be altered to indicate CPUs changed. * CPUs with states changed will be set in the mask, * CPUs with status unchanged will be unset in the mask. */ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, cpumask_var_t cpus) { int cpu; int cpuret = 0; int ret = 0; if (cpumask_empty(cpus)) return 0; for_each_cpu(cpu, cpus) { switch (state) { case DOWN: cpuret = cpu_down(cpu); break; case UP: cpuret = cpu_up(cpu); break; } if (cpuret) { pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", __func__, ((state == UP) ? "up" : "down"), cpu, cpuret); if (!ret) ret = cpuret; if (state == UP) { /* clear bits for unchanged cpus, return */ cpumask_shift_right(cpus, cpus, cpu); cpumask_shift_left(cpus, cpus, cpu); break; } else { /* clear bit for unchanged cpu, continue */ cpumask_clear_cpu(cpu, cpus); } } } return ret; }
static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; #ifdef CONFIG_HOTPLUG_CPU struct task_struct *p; #endif /* We don't touch CPU 0 map, it's allocated at aboot and kept * around forever */ if (cpu == boot_cpuid) return NOTIFY_OK; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); kfree(stale_map[cpu]); stale_map[cpu] = NULL; /* We also clear the cpu_vm_mask bits of CPUs going away */ read_lock(&tasklist_lock); for_each_process(p) { if (p->mm) cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); } read_unlock(&tasklist_lock); break; #endif /* CONFIG_HOTPLUG_CPU */ } return NOTIFY_OK; }
/* * Broadcast the event to the cpus, which are set in the mask (mangled). */ static bool tick_do_broadcast(struct cpumask *mask) { int cpu = smp_processor_id(); struct tick_device *td; bool local = false; /* * Check, if the current cpu is in the mask */ if (cpumask_test_cpu(cpu, mask)) { struct clock_event_device *bc = tick_broadcast_device.evtdev; cpumask_clear_cpu(cpu, mask); /* * We only run the local handler, if the broadcast * device is not hrtimer based. Otherwise we run into * a hrtimer recursion. * * local timer_interrupt() * local_handler() * expire_hrtimers() * bc_handler() * local_handler() * expire_hrtimers() */ local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); } if (!cpumask_empty(mask)) { /* * It might be necessary to actually check whether the devices * have different broadcast functions. For now, just use the * one of the first device. This works as long as we have this * misfeature only on x86 (lapic) */ td = &per_cpu(tick_cpu_device, cpumask_first(mask)); td->evtdev->broadcast(mask); } return local; }
static int mc_timer_init(void) { cpumask_t cpu; mc_timer_thread = kthread_create(kthread_worker_fn, &mc_timer_worker, "mc_timer"); if (IS_ERR(mc_timer_thread)) { mc_timer_thread = NULL; pr_err("%s: timer thread creation failed!", __func__); return -EFAULT; } wake_up_process(mc_timer_thread); cpumask_setall(&cpu); cpumask_clear_cpu(DEFAULT_BIG_CORE, &cpu); set_cpus_allowed(mc_timer_thread, cpu); hrtimer_init(&mc_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mc_hrtimer.function = mc_hrtimer_func; return 0; }
static void round_robin_cpu(unsigned int tsk_index) { struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); cpumask_var_t tmp; int cpu; unsigned long min_weight = -1; unsigned long uninitialized_var(preferred_cpu); if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); cpumask_andnot(tmp, cpu_online_mask, tmp); /* avoid HT sibilings if possible */ if (cpumask_empty(tmp)) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { mutex_unlock(&round_robin_lock); return; } for_each_cpu(cpu, tmp) { if (cpu_weight[cpu] < min_weight) { min_weight = cpu_weight[cpu]; preferred_cpu = cpu; } } if (tsk_in_cpu[tsk_index] != -1) cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); tsk_in_cpu[tsk_index] = preferred_cpu; cpumask_set_cpu(preferred_cpu, pad_busy_cpus); cpu_weight[preferred_cpu]++; mutex_unlock(&round_robin_lock); set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); }
static void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { unsigned int cpu = smp_processor_id(); s_time_t expires = per_cpu(timer_deadline, cpu); __monitor((void *)&mwait_wakeup(cpu), 0, 0); smp_mb(); /* * Timer deadline passing is the event on which we will be woken via * cpuidle_mwait_wakeup. So check it now that the location is armed. */ if ( expires > NOW() || expires == 0 ) { cpumask_set_cpu(cpu, &cpuidle_mwait_flags); __mwait(eax, ecx); cpumask_clear_cpu(cpu, &cpuidle_mwait_flags); } if ( expires <= NOW() && expires > 0 ) raise_softirq(TIMER_SOFTIRQ); }
/* * This function will be called by secondary cpus or by kexec cpu * if soft-reset is activated to stop some CPUs. */ void crash_kexec_secondary(struct pt_regs *regs) { int cpu = smp_processor_id(); unsigned long flags; int msecs = 5; local_irq_save(flags); /* Wait 5ms if the kexec CPU is not entered yet. */ while (crashing_cpu < 0) { if (--msecs < 0) { /* * Either kdump image is not loaded or * kdump process is not started - Probably xmon * exited using 'x'(exit and recover) or * kexec_should_crash() failed for all running tasks. */ cpumask_clear_cpu(cpu, &cpus_in_sr); local_irq_restore(flags); return; } mdelay(1); cpu_relax(); } if (cpu == crashing_cpu) { /* * Panic CPU will enter this func only via soft-reset. * Wait until all secondary CPUs entered and * then start kexec boot. */ crash_soft_reset_check(cpu); cpumask_set_cpu(crashing_cpu, &cpus_in_crash); if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(1, 0); machine_kexec(kexec_crash_image); /* NOTREACHED */ } crash_ipi_callback(regs); }
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); if (!tick_device_is_functional(dev)) { dev->event_handler = tick_handle_periodic; cpumask_set_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_start_periodic(tick_broadcast_device.evtdev); ret = 1; } else { if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { int cpu = smp_processor_id(); cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); tick_broadcast_clear_oneshot(cpu); } } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); return ret; }
/* * Platform NMI handler calls this to ack */ int smp_handle_nmi_ipi(struct pt_regs *regs) { void (*fn)(struct pt_regs *); unsigned long flags; int me = raw_smp_processor_id(); int ret = 0; /* * Unexpected NMIs are possible here because the interrupt may not * be able to distinguish NMI IPIs from other types of NMIs, or * because the caller may have timed out. */ nmi_ipi_lock_start(&flags); if (!nmi_ipi_busy_count) goto out; if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask)) goto out; fn = nmi_ipi_function; if (!fn) goto out; cpumask_clear_cpu(me, &nmi_ipi_pending_mask); nmi_ipi_busy_count++; nmi_ipi_unlock(); ret = 1; fn(regs); nmi_ipi_lock(); nmi_ipi_busy_count--; out: nmi_ipi_unlock_end(&flags); return ret; }
static void salinfo_log_new_read(int cpu, struct salinfo_data *data) { struct salinfo_data_saved *data_saved; unsigned long flags; int i; int saved_size = ARRAY_SIZE(data->data_saved); data->saved_num = 0; spin_lock_irqsave(&data_saved_lock, flags); retry: for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) { if (data_saved->buffer && data_saved->cpu == cpu) { sal_log_record_header_t *rh = (sal_log_record_header_t *)(data_saved->buffer); data->log_size = data_saved->size; memcpy(data->log_buffer, rh, data->log_size); barrier(); /* id check must not be moved */ if (rh->id == data_saved->id) { data->saved_num = i+1; break; } /* saved record changed by mca.c since interrupt, discard it */ shift1_data_saved(data, i); goto retry; } } spin_unlock_irqrestore(&data_saved_lock, flags); if (!data->saved_num) work_on_cpu_safe(cpu, salinfo_log_read_cpu, data); if (!data->log_size) { data->state = STATE_NO_DATA; cpumask_clear_cpu(cpu, &data->cpu_event); } else { data->state = STATE_LOG_RECORD; } }
void rq_attach_root(struct rq *rq, struct root_domain *rd) { struct root_domain *old_rd = NULL; unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); if (rq->rd) { old_rd = rq->rd; if (cpumask_test_cpu(rq->cpu, old_rd->online)) set_rq_offline(rq); cpumask_clear_cpu(rq->cpu, old_rd->span); /* * If we dont want to free the old_rd yet then * set old_rd to NULL to skip the freeing later * in this function: */ if (!atomic_dec_and_test(&old_rd->refcount)) old_rd = NULL; } atomic_inc(&rd->refcount); rq->rd = rd; cpumask_set_cpu(rq->cpu, rd->span); if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); raw_spin_unlock_irqrestore(&rq->lock, flags); if (old_rd) call_rcu_sched(&old_rd->rcu, free_rootdomain); }
/* This gets called just before system reboots */ void opal_flash_term_callback(void) { struct cpumask mask; if (update_flash_data.status != FLASH_IMG_READY) return; pr_alert("FLASH: Flashing new firmware\n"); pr_alert("FLASH: Image is %u bytes\n", image_data.size); pr_alert("FLASH: Performing flash and reboot/shutdown\n"); pr_alert("FLASH: This will take several minutes. Do not power off!\n"); /* Small delay to help getting the above message out */ msleep(500); /* Return secondary CPUs to firmware */ cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) smp_call_function_many(&mask, flash_return_cpu, NULL, false); /* Hard disable interrupts */ hard_irq_disable(); }
void arch_trigger_all_cpu_backtrace(bool include_self) { int i; int cpu = get_cpu(); if (test_and_set_bit(0, &backtrace_flag)) { /* * If there is already a trigger_all_cpu_backtrace() in progress * (backtrace_flag == 1), don't output double cpu dump infos. */ put_cpu(); return; } cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); if (!include_self) cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); if (!cpumask_empty(to_cpumask(backtrace_mask))) { pr_info("sending NMI to %s CPUs:\n", (include_self ? "all" : "other")); apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR); } /* Wait for up to 10 seconds for all CPUs to do the backtrace */ for (i = 0; i < 10 * 1000; i++) { if (cpumask_empty(to_cpumask(backtrace_mask))) break; mdelay(1); touch_softlockup_watchdog(); } clear_bit(0, &backtrace_flag); smp_mb__after_clear_bit(); put_cpu(); }
/* * cpudl_set - update the cpudl max-heap * @cp: the cpudl max-heap context * @cpu: the target cpu * @dl: the new earliest deadline for this cpu * * Notes: assumes cpu_rq(cpu)->lock is locked * * Returns: (void) */ void cpudl_set(struct cpudl *cp, int cpu, u64 dl) { int old_idx; unsigned long flags; WARN_ON(!cpu_present(cpu)); raw_spin_lock_irqsave(&cp->lock, flags); old_idx = cp->elements[cpu].idx; if (old_idx == IDX_INVALID) { int new_idx = cp->size++; cp->elements[new_idx].dl = dl; cp->elements[new_idx].cpu = cpu; cp->elements[cpu].idx = new_idx; cpudl_heapify_up(cp, new_idx); cpumask_clear_cpu(cpu, cp->free_cpus); } else { cp->elements[old_idx].dl = dl; cpudl_heapify(cp, old_idx); } raw_spin_unlock_irqrestore(&cp->lock, flags); }
int __cpu_disable(void) { struct device_node *l2_cache; int cpu = smp_processor_id(); int base, i; int err; if (!smp_ops->cpu_disable) return -ENOSYS; err = smp_ops->cpu_disable(); if (err) return err; /* Update sibling maps */ base = cpu_first_thread_in_core(cpu); for (i = 0; i < threads_per_core; i++) { cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_present_cpu(i) { struct device_node *np = cpu_to_l2cache(i); if (!np) continue; if (np == l2_cache) { cpumask_clear_cpu(cpu, cpu_core_mask(i)); cpumask_clear_cpu(i, cpu_core_mask(cpu)); } of_node_put(np); } of_node_put(l2_cache); return 0; }
static void exit_round_robin(unsigned int tsk_index) { struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); tsk_in_cpu[tsk_index] = -1; }