/** * Sends a reschedule inter-processor interrupt to the target CPU. * This causes the target CPU to call schedule(). */ void arch_xcall_reschedule(id_t cpu) { if (cpu == this_cpu) set_bit(TF_NEED_RESCHED_BIT, ¤t->arch.flags); else lapic_send_ipi(cpu, LWK_XCALL_RESCHEDULE_VECTOR); }
/** * x86_64 specific code for carrying out inter-CPU function calls. * This function should not be called directly. Call xcall_function() instead. * * Arguments: * [IN] cpu_mask: The target CPUs of the cross-call. * [IN] func: The function to execute on each target CPU. * [IN] info: Argument to pass to func(). * [IN] wait: true = wait for cross-call to fully complete. * * Returns: * Success: 0 * Failure: Error code */ int arch_xcall_function( cpumask_t cpu_mask, void (*func)(void *info), void * info, bool wait ) { struct xcall_data_struct data; unsigned int num_cpus; unsigned int cpu; BUG_ON(irqs_disabled()); /* Count how many CPUs are being targeted */ num_cpus = cpus_weight(cpu_mask); if (!num_cpus) return 0; /* Fill in the xcall data structure on our stack */ data.func = func; data.info = info; atomic_set(&data.started, 0); if (wait) atomic_set(&data.finished, 0); data.wait = wait; /* Spin with IRQs enabled */ while (!spin_trylock_irq(&xcall_data_lock)) ; /* IRQs are now disabled */ /* Set the global xcall data pointer */ xcall_data = &data; wmb(); /* Send inter-processor interrupts to the target CPUs */ for_each_cpu_mask(cpu, cpu_mask) lapic_send_ipi(cpu, XCALL_FUNCTION_VECTOR); /* Wait for initiation responses */ while (atomic_read(&data.started) != num_cpus) cpu_relax(); /* If requested, wait for completion responses */ if (wait) { while (atomic_read(&data.finished) != num_cpus) cpu_relax(); } spin_unlock_irq(&xcall_data_lock); return 0; }
/** * Interrupts the physical CPU corresponding to the specified logical guest cpu. * If (vector == 0) then it is just an interrupt with no effect, this merely kicks the * core out of the guest context * * NOTE: * This is dependent on the implementation of xcall_reschedule(). Currently * xcall_reschedule does not explicitly call schedule() on the destination CPU, * but instead relies on the return to user space to handle it. Because * palacios is a kernel thread schedule will not be called, which is correct. * We should have a default palacios IRQ that just handles the IPI and returns immediately * with no side effects. */ static void palacios_interrupt_cpu( struct v3_vm_info* vm, int cpu_id, int vector ) { if (cpu_id != current->cpu_id) { if (vector == 0) xcall_reschedule(cpu_id); else lapic_send_ipi(cpu_id, vector); } }
/* XXX naive */ static void shootdown_tlb_all(pgd_t *pgdir) { int i; //dump_processors(); for(i=0;i<sysconf.lcpu_count;i++){ struct cpu *cpu = per_cpu_ptr(cpus, i); if(cpu->id == myid()) continue; if(cpu->arch_data.tlb_cr3 != PADDR(pgdir)) continue; //kprintf("XX_TLB_SHUTDOWN %d %d\n", myid(), i); lapic_send_ipi(cpu, T_TLBFLUSH); } }
static void pmSendIPI(int cpu) { lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT); }
/** * Sends a reschedule inter-processor interrupt to the target CPU. * This causes the target CPU to call schedule(). */ void arch_xcall_reschedule(id_t cpu) { lapic_send_ipi(cpu, XCALL_RESCHEDULE_VECTOR); }