struct vcpu *__init alloc_dom0_vcpu0(struct domain *dom0) { unsigned int max_vcpus = dom0_max_vcpus(); dom0->node_affinity = dom0_nodes; dom0->auto_node_affinity = !dom0_nr_pxms; dom0->vcpu = xzalloc_array(struct vcpu *, max_vcpus); if ( !dom0->vcpu ) return NULL; dom0->max_vcpus = max_vcpus; return dom0_setup_vcpu(dom0, 0, cpumask_last(&dom0_cpus) /* so it wraps around to first pcpu */); }
static bool __send_ipi_mask(const struct cpumask *mask, int vector) { int cur_cpu, vcpu; struct hv_send_ipi ipi_arg; int ret = 1; trace_hyperv_send_ipi_mask(mask, vector); if (cpumask_empty(mask)) return true; if (!hv_hypercall_pg) return false; if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) return false; /* * From the supplied CPU set we need to figure out if we can get away * with cheaper HVCALL_SEND_IPI hypercall. This is possible when the * highest VP number in the set is < 64. As VP numbers are usually in * ascending order and match Linux CPU ids, here is an optimization: * we check the VP number for the highest bit in the supplied set first * so we can quickly find out if using HVCALL_SEND_IPI_EX hypercall is * a must. We will also check all VP numbers when walking the supplied * CPU set to remain correct in all cases. */ if (hv_cpu_number_to_vp_number(cpumask_last(mask)) >= 64) goto do_ex_hypercall; ipi_arg.vector = vector; ipi_arg.cpu_mask = 0; for_each_cpu(cur_cpu, mask) { vcpu = hv_cpu_number_to_vp_number(cur_cpu); if (vcpu == VP_INVAL) return false; /* * This particular version of the IPI hypercall can * only target upto 64 CPUs. */ if (vcpu >= 64) goto do_ex_hypercall; __set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask); }
static void hyperv_flush_tlb_others(const struct cpumask *cpus, const struct flush_tlb_info *info) { int cpu, vcpu, gva_n, max_gvas; struct hv_tlb_flush **flush_pcpu; struct hv_tlb_flush *flush; u64 status = U64_MAX; unsigned long flags; trace_hyperv_mmu_flush_tlb_others(cpus, info); if (!hv_hypercall_pg) goto do_native; if (cpumask_empty(cpus)) return; local_irq_save(flags); flush_pcpu = (struct hv_tlb_flush **) this_cpu_ptr(hyperv_pcpu_input_arg); flush = *flush_pcpu; if (unlikely(!flush)) { local_irq_restore(flags); goto do_native; } if (info->mm) { /* * AddressSpace argument must match the CR3 with PCID bits * stripped out. */ flush->address_space = virt_to_phys(info->mm->pgd); flush->address_space &= CR3_ADDR_MASK; flush->flags = 0; } else { flush->address_space = 0; flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES; } flush->processor_mask = 0; if (cpumask_equal(cpus, cpu_present_mask)) { flush->flags |= HV_FLUSH_ALL_PROCESSORS; } else { /* * From the supplied CPU set we need to figure out if we can get * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE} * hypercalls. This is possible when the highest VP number in * the set is < 64. As VP numbers are usually in ascending order * and match Linux CPU ids, here is an optimization: we check * the VP number for the highest bit in the supplied set first * so we can quickly find out if using *_EX hypercalls is a * must. We will also check all VP numbers when walking the * supplied CPU set to remain correct in all cases. */ if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64) goto do_ex_hypercall; for_each_cpu(cpu, cpus) { vcpu = hv_cpu_number_to_vp_number(cpu); if (vcpu == VP_INVAL) { local_irq_restore(flags); goto do_native; } if (vcpu >= 64) goto do_ex_hypercall; __set_bit(vcpu, (unsigned long *) &flush->processor_mask); } }