int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) { struct vcpu *v = current; struct domain *d = v->domain; if ( !is_viridian_domain(d) ) return 0; switch ( idx ) { case VIRIDIAN_MSR_GUEST_OS_ID: perfc_incr(mshv_rdmsr_osid); *val = d->arch.hvm_domain.viridian.guest_os_id.raw; break; case VIRIDIAN_MSR_HYPERCALL: perfc_incr(mshv_rdmsr_hc_page); *val = d->arch.hvm_domain.viridian.hypercall_gpa.raw; break; case VIRIDIAN_MSR_VP_INDEX: perfc_incr(mshv_rdmsr_vp_index); *val = v->vcpu_id; break; case VIRIDIAN_MSR_TSC_FREQUENCY: perfc_incr(mshv_rdmsr_tsc_frequency); *val = (uint64_t)d->arch.tsc_khz * 1000ull; break; case VIRIDIAN_MSR_APIC_FREQUENCY: perfc_incr(mshv_rdmsr_apic_frequency); *val = 1000000000ull / APIC_BUS_CYCLE_NS; break; case VIRIDIAN_MSR_ICR: perfc_incr(mshv_rdmsr_icr); *val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) | vlapic_get_reg(vcpu_vlapic(v), APIC_ICR)); break; case VIRIDIAN_MSR_TPR: perfc_incr(mshv_rdmsr_tpr); *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI); break; case VIRIDIAN_MSR_APIC_ASSIST: perfc_incr(mshv_rdmsr_apic_msr); *val = v->arch.hvm_vcpu.viridian.apic_assist.raw; break; default: return 0; } return 1; }
void vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vcpu *v = current; struct vpmu_struct *vpmu = vcpu_vpmu(v); if ( vpmu->arch_vpmu_ops ) { struct vlapic *vlapic = vcpu_vlapic(v); u32 vlapic_lvtpc; if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) || !is_vlapic_lvtpc_enabled(vlapic) ) return; vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC); switch ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) ) { case APIC_MODE_FIXED: vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0); break; case APIC_MODE_NMI: v->nmi_pending = 1; break; } } }
int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) { struct vcpu *v = current; if ( !is_viridian_domain(v->domain) ) return 0; switch ( idx ) { case VIRIDIAN_MSR_GUEST_OS_ID: perfc_incr(mshv_rdmsr_osid); *val = v->domain->arch.hvm_domain.viridian.guest_os_id.raw; break; case VIRIDIAN_MSR_HYPERCALL: perfc_incr(mshv_rdmsr_hc_page); *val = v->domain->arch.hvm_domain.viridian.hypercall_gpa.raw; break; case VIRIDIAN_MSR_VP_INDEX: perfc_incr(mshv_rdmsr_vp_index); *val = v->vcpu_id; break; case VIRIDIAN_MSR_ICR: perfc_incr(mshv_rdmsr_icr); *val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) | vlapic_get_reg(vcpu_vlapic(v), APIC_ICR)); break; case VIRIDIAN_MSR_TPR: perfc_incr(mshv_rdmsr_tpr); *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI); break; default: return 0; } return 1; }
void vpmu_do_interrupt(struct cpu_user_regs *regs) { struct vcpu *sampled = current, *sampling; struct vpmu_struct *vpmu; struct vlapic *vlapic; u32 vlapic_lvtpc; /* * dom0 will handle interrupt for special domains (e.g. idle domain) or, * in XENPMU_MODE_ALL, for everyone. */ if ( (vpmu_mode & XENPMU_MODE_ALL) || (sampled->domain->domain_id >= DOMID_FIRST_RESERVED) ) { sampling = choose_hwdom_vcpu(); if ( !sampling ) return; } else sampling = sampled; vpmu = vcpu_vpmu(sampling); if ( !vpmu->arch_vpmu_ops ) return; /* PV(H) guest */ if ( !is_hvm_vcpu(sampling) || (vpmu_mode & XENPMU_MODE_ALL) ) { const struct cpu_user_regs *cur_regs; uint64_t *flags = &vpmu->xenpmu_data->pmu.pmu_flags; domid_t domid; if ( !vpmu->xenpmu_data ) return; if ( is_pvh_vcpu(sampling) && !(vpmu_mode & XENPMU_MODE_ALL) && !vpmu->arch_vpmu_ops->do_interrupt(regs) ) return; if ( vpmu_is_set(vpmu, VPMU_CACHED) ) return; /* PV guest will be reading PMU MSRs from xenpmu_data */ vpmu_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1); vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED); if ( has_hvm_container_vcpu(sampled) ) *flags = 0; else *flags = PMU_SAMPLE_PV; if ( sampled == sampling ) domid = DOMID_SELF; else domid = sampled->domain->domain_id; /* Store appropriate registers in xenpmu_data */ /* FIXME: 32-bit PVH should go here as well */ if ( is_pv_32bit_vcpu(sampling) ) { /* * 32-bit dom0 cannot process Xen's addresses (which are 64 bit) * and therefore we treat it the same way as a non-privileged * PV 32-bit domain. */ struct compat_pmu_regs *cmp; cur_regs = guest_cpu_user_regs(); cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs; cmp->ip = cur_regs->rip; cmp->sp = cur_regs->rsp; cmp->flags = cur_regs->eflags; cmp->ss = cur_regs->ss; cmp->cs = cur_regs->cs; if ( (cmp->cs & 3) > 1 ) *flags |= PMU_SAMPLE_USER; } else { struct xen_pmu_regs *r = &vpmu->xenpmu_data->pmu.r.regs; if ( (vpmu_mode & XENPMU_MODE_SELF) ) cur_regs = guest_cpu_user_regs(); else if ( !guest_mode(regs) && is_hardware_domain(sampling->domain) ) { cur_regs = regs; domid = DOMID_XEN; } else cur_regs = guest_cpu_user_regs(); r->ip = cur_regs->rip; r->sp = cur_regs->rsp; r->flags = cur_regs->eflags; if ( !has_hvm_container_vcpu(sampled) ) { r->ss = cur_regs->ss; r->cs = cur_regs->cs; if ( !(sampled->arch.flags & TF_kernel_mode) ) *flags |= PMU_SAMPLE_USER; } else { struct segment_register seg; hvm_get_segment_register(sampled, x86_seg_cs, &seg); r->cs = seg.sel; hvm_get_segment_register(sampled, x86_seg_ss, &seg); r->ss = seg.sel; r->cpl = seg.attr.fields.dpl; if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ) *flags |= PMU_SAMPLE_REAL; } } vpmu->xenpmu_data->domain_id = domid; vpmu->xenpmu_data->vcpu_id = sampled->vcpu_id; if ( is_hardware_domain(sampling->domain) ) vpmu->xenpmu_data->pcpu_id = smp_processor_id(); else vpmu->xenpmu_data->pcpu_id = sampled->vcpu_id; vpmu->hw_lapic_lvtpc |= APIC_LVT_MASKED; apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc); *flags |= PMU_CACHED; vpmu_set(vpmu, VPMU_CACHED); send_guest_vcpu_virq(sampling, VIRQ_XENPMU); return; } /* HVM guests */ vlapic = vcpu_vlapic(sampling); /* We don't support (yet) HVM dom0 */ ASSERT(sampling == sampled); if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) || !is_vlapic_lvtpc_enabled(vlapic) ) return; vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC); switch ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) ) { case APIC_MODE_FIXED: vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0); break; case APIC_MODE_NMI: sampling->nmi_pending = 1; break; } }
int rdmsr_viridian_regs(uint32_t idx, uint64_t *val) { struct vcpu *v = current; struct domain *d = v->domain; if ( !is_viridian_domain(d) ) return 0; switch ( idx ) { case VIRIDIAN_MSR_GUEST_OS_ID: perfc_incr(mshv_rdmsr_osid); *val = d->arch.hvm_domain.viridian.guest_os_id.raw; break; case VIRIDIAN_MSR_HYPERCALL: perfc_incr(mshv_rdmsr_hc_page); *val = d->arch.hvm_domain.viridian.hypercall_gpa.raw; break; case VIRIDIAN_MSR_VP_INDEX: perfc_incr(mshv_rdmsr_vp_index); *val = v->vcpu_id; break; case VIRIDIAN_MSR_TSC_FREQUENCY: if ( viridian_feature_mask(d) & HVMPV_no_freq ) return 0; perfc_incr(mshv_rdmsr_tsc_frequency); *val = (uint64_t)d->arch.tsc_khz * 1000ull; break; case VIRIDIAN_MSR_APIC_FREQUENCY: if ( viridian_feature_mask(d) & HVMPV_no_freq ) return 0; perfc_incr(mshv_rdmsr_apic_frequency); *val = 1000000000ull / APIC_BUS_CYCLE_NS; break; case VIRIDIAN_MSR_ICR: perfc_incr(mshv_rdmsr_icr); *val = (((uint64_t)vlapic_get_reg(vcpu_vlapic(v), APIC_ICR2) << 32) | vlapic_get_reg(vcpu_vlapic(v), APIC_ICR)); break; case VIRIDIAN_MSR_TPR: perfc_incr(mshv_rdmsr_tpr); *val = vlapic_get_reg(vcpu_vlapic(v), APIC_TASKPRI); break; case VIRIDIAN_MSR_APIC_ASSIST: perfc_incr(mshv_rdmsr_apic_msr); *val = v->arch.hvm_vcpu.viridian.apic_assist.msr.raw; break; case VIRIDIAN_MSR_REFERENCE_TSC: if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) ) return 0; perfc_incr(mshv_rdmsr_tsc_msr); *val = d->arch.hvm_domain.viridian.reference_tsc.raw; break; case VIRIDIAN_MSR_TIME_REF_COUNT: { struct viridian_time_ref_count *trc; trc = &d->arch.hvm_domain.viridian.time_ref_count; if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) ) return 0; if ( !test_and_set_bit(_TRC_accessed, &trc->flags) ) printk(XENLOG_G_INFO "d%d: VIRIDIAN MSR_TIME_REF_COUNT: accessed\n", d->domain_id); perfc_incr(mshv_rdmsr_time_ref_count); *val = raw_trc_val(d) + trc->off; break; } default: return 0; } return 1; }