void cpumask_raise_softirq(cpumask_t mask, unsigned int nr) { int cpu; for_each_cpu_mask(cpu, mask) if ( test_and_set_bit(nr, &softirq_pending(cpu)) ) cpu_clear(cpu, mask); smp_send_event_check_mask(&mask); }
int viridian_hypercall(struct cpu_user_regs *regs) { struct vcpu *curr = current; struct domain *currd = curr->domain; int mode = hvm_guest_x86_mode(curr); unsigned long input_params_gpa, output_params_gpa; uint16_t status = HV_STATUS_SUCCESS; union hypercall_input { uint64_t raw; struct { uint16_t call_code; uint16_t fast:1; uint16_t rsvd1:15; uint16_t rep_count:12; uint16_t rsvd2:4; uint16_t rep_start:12; uint16_t rsvd3:4; }; } input; union hypercall_output { uint64_t raw; struct { uint16_t result; uint16_t rsvd1; uint32_t rep_complete:12; uint32_t rsvd2:20; }; } output = { 0 }; ASSERT(is_viridian_domain(currd)); switch ( mode ) { case 8: input.raw = regs->rcx; input_params_gpa = regs->rdx; output_params_gpa = regs->r8; break; case 4: input.raw = (regs->rdx << 32) | regs->_eax; input_params_gpa = (regs->rbx << 32) | regs->_ecx; output_params_gpa = (regs->rdi << 32) | regs->_esi; break; default: goto out; } switch ( input.call_code ) { case HvNotifyLongSpinWait: /* * See Microsoft Hypervisor Top Level Spec. section 18.5.1. */ perfc_incr(mshv_call_long_wait); do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void)); status = HV_STATUS_SUCCESS; break; case HvFlushVirtualAddressSpace: case HvFlushVirtualAddressList: { cpumask_t *pcpu_mask; struct vcpu *v; struct { uint64_t address_space; uint64_t flags; uint64_t vcpu_mask; } input_params; /* * See Microsoft Hypervisor Top Level Spec. sections 12.4.2 * and 12.4.3. */ perfc_incr(mshv_call_flush); /* These hypercalls should never use the fast-call convention. */ status = HV_STATUS_INVALID_PARAMETER; if ( input.fast ) break; /* Get input parameters. */ if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa, sizeof(input_params)) != HVMCOPY_okay ) break; /* * It is not clear from the spec. if we are supposed to * include current virtual CPU in the set or not in this case, * so err on the safe side. */ if ( input_params.flags & HV_FLUSH_ALL_PROCESSORS ) input_params.vcpu_mask = ~0ul; pcpu_mask = &this_cpu(ipi_cpumask); cpumask_clear(pcpu_mask); /* * For each specified virtual CPU flush all ASIDs to invalidate * TLB entries the next time it is scheduled and then, if it * is currently running, add its physical CPU to a mask of * those which need to be interrupted to force a flush. */ for_each_vcpu ( currd, v ) { if ( v->vcpu_id >= (sizeof(input_params.vcpu_mask) * 8) ) break; if ( !(input_params.vcpu_mask & (1ul << v->vcpu_id)) ) continue; hvm_asid_flush_vcpu(v); if ( v != curr && v->is_running ) __cpumask_set_cpu(v->processor, pcpu_mask); } /* * Since ASIDs have now been flushed it just remains to * force any CPUs currently running target vCPUs out of non- * root mode. It's possible that re-scheduling has taken place * so we may unnecessarily IPI some CPUs. */ if ( !cpumask_empty(pcpu_mask) ) smp_send_event_check_mask(pcpu_mask); output.rep_complete = input.rep_count; status = HV_STATUS_SUCCESS; break; } default: status = HV_STATUS_INVALID_HYPERCALL_CODE; break; } out: output.result = status; switch (mode) { case 8: regs->rax = output.raw; break; default: regs->rdx = output.raw >> 32; regs->rax = (uint32_t)output.raw; break; } return HVM_HCALL_completed; }