/* * We cannot call mmdrop() because we are in interrupt context, * instead update mm->cpu_vm_mask. * * We need to reload %cr3 since the page tables may be going * away from under us.. */ static inline void leave_mm (unsigned long cpu) { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) BUG(); cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask); load_cr3(swapper_pg_dir); }
static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) { int me = smp_processor_id(); ia64_vector vector; unsigned long flags; for (vector = IA64_FIRST_DEVICE_VECTOR; vector < IA64_LAST_DEVICE_VECTOR; vector++) { int irq; struct irq_desc *desc; struct irq_cfg *cfg; irq = __get_cpu_var(vector_irq)[vector]; if (irq < 0) continue; desc = irq_to_desc(irq); cfg = irq_cfg + irq; raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; if (!cpu_isset(me, cfg->old_domain)) goto unlock; spin_lock_irqsave(&vector_lock, flags); __get_cpu_var(vector_irq)[vector] = -1; cpu_clear(me, vector_table[vector]); spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; }
fastcall void smp_invalidate_interrupt(struct pt_regs *regs) { unsigned long cpu; cpu = get_cpu(); if (current->active_mm) load_user_cs_desc(cpu, current->active_mm); if (!cpu_isset(cpu, flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) { if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) { if (flush_va == FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); } else leave_mm(cpu); } ack_APIC_irq(); smp_mb__before_clear_bit(); cpu_clear(cpu, flush_cpumask); smp_mb__after_clear_bit(); out: put_cpu_no_resched(); }
asmlinkage void smp_invalidate_interrupt (void) { unsigned long cpu; cpu = get_cpu(); if (!cpu_isset(cpu, flush_cpumask)) goto out; /* * This was a BUG() but until someone can quote me the * line from the intel manual that guarantees an IPI to * multiple CPUs is retried _only_ on the erroring CPUs * its staying as a return * * BUG(); */ if (flush_mm == read_pda(active_mm)) { if (read_pda(mmu_state) == TLBSTATE_OK) { if (flush_va == FLUSH_ALL) local_flush_tlb(); else __flush_tlb_one(flush_va); } else leave_mm(cpu); } ack_APIC_irq(); cpu_clear(cpu, flush_cpumask); out: put_cpu_no_resched(); }
/*==========================================================================* * Name: smp_flush_tlb_page * * Description: This routine flushes one page. * * Born on Date: 2002.02.05 * * Arguments: *vma - a pointer to the vma struct include va * va - virtual address for flush TLB * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * *==========================================================================*/ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) { struct mm_struct *mm = vma->vm_mm; int cpu_id; cpumask_t cpu_mask; unsigned long *mmc; unsigned long flags; preempt_disable(); cpu_id = smp_processor_id(); mmc = &mm->context[cpu_id]; cpu_mask = mm->cpu_vm_mask; cpu_clear(cpu_id, cpu_mask); #ifdef DEBUG_SMP if (!mm) BUG(); #endif if (*mmc != NO_CONTEXT) { local_irq_save(flags); va &= PAGE_MASK; va |= (*mmc & MMU_CONTEXT_ASID_MASK); __flush_tlb_page(va); local_irq_restore(flags); } if (!cpus_empty(cpu_mask)) flush_tlb_others(cpu_mask, mm, vma, va); preempt_enable(); }
void cpu_idle_wait(void) { unsigned int cpu, this_cpu = get_cpu(); cpumask_t map, tmp = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); put_cpu(); cpus_clear(map); for_each_online_cpu(cpu) { per_cpu(cpu_idle_state, cpu) = 1; cpu_set(cpu, map); } __get_cpu_var(cpu_idle_state) = 0; wmb(); do { ssleep(1); for_each_online_cpu(cpu) { if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) cpu_clear(cpu, map); } cpus_and(map, map, cpu_online_map); } while (!cpus_empty(map)); set_cpus_allowed(current, tmp); }
int jzsoc_cpu_disable(void) { unsigned int cpu = smp_processor_id(); // unsigned int status; if (cpu == 0) /* FIXME */ return -EBUSY; cpu_clear(cpu, cpu_online_map); cpu_clear(cpu, cpu_callin_map); local_irq_disable(); percpu_timer_stop(); cpu_reim &= ~(1 << cpu); smp_disable_interrupt(cpu); smp_enable_interrupt(0); reset_irq_resp_fifo(cpu); return 0; }
static void stop_this_cpu(void *unused) { cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); for (;;) cpu_relax(); }
static void remove_siblinginfo(int cpu) { int last = 0; if (cpu_data(cpu)->threads_per_core == 1 && cpu_data(cpu)->cores_per_socket == 1) { cpu_clear(cpu, cpu_core_map[cpu]); cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu)); return; } last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0); /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); }
/* * Wait until all CPUs are entered via soft-reset. */ static void crash_soft_reset_check(int cpu) { unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ cpu_clear(cpu, cpus_in_sr); while (atomic_read(&enter_on_soft_reset) != ncpus) cpu_relax(); }
static int brcmstb_cpu_disable(void) { unsigned int cpu = smp_processor_id(); if (cpu == 0) return -EBUSY; printk(KERN_INFO "SMP: CPU%d is offline\n", cpu); cpu_clear(cpu, cpu_online_map); cpu_clear(cpu, cpu_callin_map); local_flush_tlb_all(); local_flush_icache_range(0, ~0); return 0; }
void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) { cpumask_t cpu_mask = mm->cpu_vm_mask; cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); local_flush_sig_insns(mm, insn_addr); }
void cpu_execute(struct cpu* cpu) { cpu->pc += 2; uint8_t vx = cpu->v[cpu->opcode.x]; uint8_t vy = cpu->v[cpu->opcode.y]; switch (cpu->opcode.op) { case 0x0: switch (cpu->opcode.n) { case 0x0: return cpu_clear(cpu); case 0xE: return cpu_jump(cpu, cpu->stack[--cpu->sp]); default: return cpu_error(cpu); } case 0x1: return cpu_jump(cpu, cpu->opcode.addr); case 0x2: return cpu_call(cpu, cpu->opcode.addr); case 0x3: return cpu_skip(cpu, vx == cpu->opcode.kk); case 0x4: return cpu_skip(cpu, vx != cpu->opcode.kk); case 0x5: return cpu_skip(cpu, vx == vy); case 0x6: return cpu_assign_register(cpu, cpu->opcode.x, cpu->opcode.kk); case 0x7: return cpu_assign_register(cpu, cpu->opcode.x, vx + cpu->opcode.kk); case 0x8: switch (cpu->opcode.n) { case 0x0: return cpu_assign_register(cpu, cpu->opcode.x, vy); case 0x1: return cpu_assign_register(cpu, cpu->opcode.x, vx | vy); case 0x2: return cpu_assign_register(cpu, cpu->opcode.x, vx & vy); case 0x3: return cpu_assign_register(cpu, cpu->opcode.x, vx ^ vy); case 0x4: return cpu_add_carry(cpu, vx, vy); case 0x5: return cpu_subtract_borrow(cpu, vx, vy); case 0x6: return cpu_shift_right(cpu); case 0x7: return cpu_subtract_borrow(cpu, vy, vx); case 0xE: return cpu_shift_left(cpu); default: return cpu_error(cpu); } case 0x9: return cpu_skip(cpu, vx != vy); case 0xA: return cpu_assign_i(cpu, cpu->opcode.addr); case 0xB: return cpu_jump(cpu, cpu->opcode.addr + cpu->v[0]); case 0xC: return cpu_random(cpu); case 0xD: return cpu_draw(cpu); case 0xE: switch (cpu->opcode.kk) { case 0x9E: return cpu_skip(cpu, SDL_GetKeyboardState(NULL)[key_map[vx]]); case 0xA1: return cpu_skip(cpu, !SDL_GetKeyboardState(NULL)[key_map[vx]]); default: return cpu_error(cpu); } case 0xF: switch (cpu->opcode.kk) { case 0x07: return cpu_assign_register(cpu, cpu->opcode.x, cpu->delay_timer); case 0x0A: return cpu_wait_key_press(cpu); case 0x15: return cpu_assign_delay_timer(cpu, vx); case 0x18: return cpu_assign_sound_timer(cpu, vx); case 0x1E: return cpu_assign_i(cpu, cpu->i + vx); case 0x29: return cpu_assign_i(cpu, vx * 5); case 0x33: return cpu_store_bcd(cpu); case 0x55: return cpu_copy_to_memory(cpu); case 0x65: return cpu_copy_from_memory(cpu); default: return cpu_error(cpu); } } return cpu_error(cpu); }
static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu) { u64 gr28, gr29, gr30, gr31; struct ia64_pal_retval result = {0, 0, 0, 0}; struct cache_flush_args args = {0, 0, 0, 0}; long psr; gr28 = gr29 = gr30 = gr31 = 0; kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31); if (gr31 != 0) printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu); /* Always call Host Pal in int=1 */ gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS; args.cache_type = gr29; args.operation = gr30; smp_call_function(remote_pal_cache_flush, (void *)&args, 1); if (args.status != 0) printk(KERN_ERR"pal_cache_flush error!," "status:0x%lx\n", args.status); /* * Call Host PAL cache flush * Clear psr.ic when call PAL_CACHE_FLUSH */ local_irq_save(psr); result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1, &result.v0); local_irq_restore(psr); if (result.status != 0) printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld" "in1:%lx,in2:%lx\n", vcpu, result.status, gr29, gr30); #if 0 if (gr29 == PAL_CACHE_TYPE_COHERENT) { cpus_setall(vcpu->arch.cache_coherent_map); cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map); cpus_setall(cpu_cache_coherent_map); cpu_clear(vcpu->cpu, cpu_cache_coherent_map); } #endif return result; }
static void cluster_send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) cluster_send_IPI_mask(mask, vector); }
/* * cpu went through a quiescent state since the beginning of the grace period. * Clear it from the cpu mask and complete the grace period if it was the last * cpu. Start another grace period if someone has further entries pending */ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) { cpu_clear(cpu, rcp->cpumask); if (cpus_empty(rcp->cpumask)) { /* batch completed ! */ rcp->completed = rcp->cur; rcu_start_batch(rcp); } }
static inline void play_dead(void) { idle_task_exit(); local_irq_disable(); cpu_clear(smp_processor_id(), cpu_initialized); preempt_enable_no_resched(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); }
/* undo a mapping between cpu and node. */ static inline void unmap_cpu_to_node(int cpu) { int node; printk("Unmapping cpu %d from all nodes\n", cpu); for (node = 0; node < MAX_NUMNODES; node ++) cpu_clear(cpu, node_2_cpu_mask[node]); cpu_2_node[cpu] = 0; }
static void __cpuinit topology_remove_dev(unsigned int cpu) { struct sys_device *sys_dev = get_cpu_sysdev(cpu); if (!cpu_isset(cpu, topology_dev_map)) return; cpu_clear(cpu, topology_dev_map); sysfs_remove_group(&sys_dev->kobj, &topology_attr_group); }
static void stop_this_cpu(void *dummy) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); local_irq_enable(); /* May need to service _machine_restart IPI */ for (;;); /* Wait if available. */ }
static int __devinit profile_cpu_callback(struct notifier_block *info, unsigned long action, void *__cpu) { int node, cpu = (unsigned long)__cpu; struct page *page; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: node = cpu_to_node(cpu); per_cpu(cpu_profile_flip, cpu) = 0; if (!per_cpu(cpu_profile_hits, cpu)[1]) { page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 0); if (!page) return NOTIFY_BAD; per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); } if (!per_cpu(cpu_profile_hits, cpu)[0]) { page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 0); if (!page) goto out_free; per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); } break; out_free: page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); return NOTIFY_BAD; case CPU_ONLINE: case CPU_ONLINE_FROZEN: cpu_set(cpu, prof_cpu_mask); break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: cpu_clear(cpu, prof_cpu_mask); if (per_cpu(cpu_profile_hits, cpu)[0]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); per_cpu(cpu_profile_hits, cpu)[0] = NULL; __free_page(page); } if (per_cpu(cpu_profile_hits, cpu)[1]) { page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); } break; } return NOTIFY_OK; }
/* * Start the HZ tick on the current CPU. * Only cpu_idle may call this function. */ static void start_hz_timer(void) { BUG_ON(!in_interrupt()); if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) return; account_ticks(get_clock()); set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); cpu_clear(smp_processor_id(), nohz_cpu_mask); }
void cpumask_raise_softirq(cpumask_t mask, unsigned int nr) { int cpu; for_each_cpu_mask(cpu, mask) if ( test_and_set_bit(nr, &softirq_pending(cpu)) ) cpu_clear(cpu, mask); smp_send_event_check_mask(&mask); }
/* ** Yoink this CPU from the runnable list... ** */ static void halt_processor(void) { /* REVISIT : redirect I/O Interrupts to another CPU? */ /* REVISIT : does PM *know* this CPU isn't available? */ cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); for (;;) ; }
void smp_stop_cpu(void) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); disable_local_APIC(); local_irq_enable(); }
void smp_flush_cache_mm(struct mm_struct *mm) { if(mm->context != NO_CONTEXT) { cpumask_t cpu_mask = mm->cpu_vm_mask; cpu_clear(smp_processor_id(), cpu_mask); if (!cpus_empty(cpu_mask)) xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); local_flush_cache_mm(mm); } }
static void stop_this_cpu(void *dummy) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); for (;;) { if (cpu_wait) (*cpu_wait)(); /* Wait if available. */ } }
void switch_ipi_to_APIC_timer(void *cpumask) { cpumask_t mask = *(cpumask_t *)cpumask; int cpu = smp_processor_id(); if (cpu_isset(cpu, mask) && cpu_isset(cpu, timer_bcast_ipi)) { cpu_clear(cpu, timer_bcast_ipi); enable_APIC_timer(); } }
int acpi_unmap_lsapic(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; cpu_clear(cpu, cpu_present_map); #ifdef CONFIG_ACPI_NUMA /* NUMA specific cleanup's */ #endif return (0); }
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) { cpumask_t allbutself; unsigned int i, nr_cpus; int ret; BUG_ON(!local_irq_is_enabled()); allbutself = cpu_online_map; cpu_clear(smp_processor_id(), allbutself); nr_cpus = cpus_weight(allbutself); if ( nr_cpus == 0 ) { BUG_ON(cpu != smp_processor_id()); return (*fn)(data); } /* Note: We shouldn't spin on lock when it's held by others since others * is expecting this cpus to enter softirq context. Or else deadlock * is caused. */ if ( !spin_trylock(&stopmachine_lock) ) return -EBUSY; stopmachine_data.fn = fn; stopmachine_data.fn_data = data; stopmachine_data.nr_cpus = nr_cpus; stopmachine_data.fn_cpu = cpu; atomic_set(&stopmachine_data.done, 0); stopmachine_data.state = STOPMACHINE_START; smp_wmb(); for_each_cpu_mask ( i, allbutself ) cpu_raise_softirq(i, STOPMACHINE_SOFTIRQ); stopmachine_set_state(STOPMACHINE_PREPARE); local_irq_disable(); stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); if ( cpu == smp_processor_id() ) stopmachine_data.fn_result = (*fn)(data); stopmachine_set_state(STOPMACHINE_INVOKE); ret = stopmachine_data.fn_result; stopmachine_set_state(STOPMACHINE_EXIT); local_irq_enable(); spin_unlock(&stopmachine_lock); return ret; }