static void apic_send_ipi(unsigned int target_cpu_id, u32 orig_icr_hi, u32 icr_lo) { if (!cell_owns_cpu(this_cell(), target_cpu_id)) { printk("WARNING: CPU %d specified IPI destination outside " "cell boundaries, ICR.hi=%x\n", this_cpu_id(), orig_icr_hi); return; } switch (icr_lo & APIC_ICR_DLVR_MASK) { case APIC_ICR_DLVR_NMI: /* TODO: must be sent via hypervisor */ printk("Ignoring NMI IPI\n"); break; case APIC_ICR_DLVR_INIT: x86_send_init_sipi(target_cpu_id, X86_INIT, -1); break; case APIC_ICR_DLVR_SIPI: x86_send_init_sipi(target_cpu_id, X86_SIPI, icr_lo & APIC_ICR_VECTOR_MASK); break; default: apic_ops.send_ipi(per_cpu(target_cpu_id)->apic_id, icr_lo); } }
static enum mmio_result mmio_handle_subpage(void *arg, struct mmio_access *mmio) { const struct jailhouse_memory *mem = arg; u64 perm = mmio->is_write ? JAILHOUSE_MEM_WRITE : JAILHOUSE_MEM_READ; unsigned long page_virt = TEMPORARY_MAPPING_BASE + this_cpu_id() * PAGE_SIZE * NUM_TEMPORARY_PAGES; unsigned long page_phys = ((unsigned long)mem->phys_start + mmio->address) & PAGE_MASK; unsigned long virt_base; int err; /* check read/write access permissions */ if (!(mem->flags & perm)) goto invalid_access; /* width bit according to access size needs to be set */ if (!((mmio->size << JAILHOUSE_MEM_IO_WIDTH_SHIFT) & mem->flags)) goto invalid_access; /* naturally unaligned access needs to be allowed explicitly */ if (mmio->address & (mmio->size - 1) && !(mem->flags & JAILHOUSE_MEM_IO_UNALIGNED)) goto invalid_access; err = paging_create(&hv_paging_structs, page_phys, PAGE_SIZE, page_virt, PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE, PAGING_NON_COHERENT); if (err) goto invalid_access; /* * This virt_base gives the following effective virtual address in * mmio_perform_access: * * page_virt + (mem->phys_start & ~PAGE_MASK) + * (mmio->address & ~PAGE_MASK) * * Reason: mmio_perform_access does addr = base + mmio->address. */ virt_base = page_virt + (mem->phys_start & ~PAGE_MASK) - (mmio->address & PAGE_MASK); mmio_perform_access((void *)virt_base, mmio); return MMIO_HANDLED; invalid_access: panic_printk("FATAL: Invalid MMIO %s, address: %x, size: %x\n", mmio->is_write ? "write" : "read", mem->phys_start + mmio->address, mmio->size); return MMIO_ERROR; }
void arch_flush_cell_vcpu_caches(struct cell *cell) { unsigned int cpu; for_each_cpu(cpu, cell->cpu_set) if (cpu == this_cpu_id()) { vcpu_tlb_flush(); } else { per_cpu(cpu)->flush_vcpu_caches = true; /* make sure the value is written before we kick * the remote core */ memory_barrier(); apic_send_nmi_ipi(per_cpu(cpu)); } }