Exemple #1
0
static int amd_vpmu_save(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctx = vpmu->context;
    unsigned int i;

    /*
     * Stop the counters. If we came here via vpmu_save_force (i.e.
     * when VPMU_CONTEXT_SAVE is set) counters are already stopped.
     */
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
    {
        vpmu_set(vpmu, VPMU_FROZEN);

        for ( i = 0; i < num_counters; i++ )
            wrmsrl(ctrls[i], 0);

        return 0;
    }

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        return 0;

    context_save(v);

    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
        amd_vpmu_unset_msr_bitmap(v);

    return 1;
}
Exemple #2
0
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
                             uint64_t supported)
{
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    ASSERT(!supported);

    /* For all counters, enable guest only mode for HVM guest */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        !(is_guest_mode(msr_content)) )
    {
        set_guest_mode(msr_content);
    }

    /* check if the first counter is enabled */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
            return 1;
        vpmu_set(vpmu, VPMU_RUNNING);
        apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;

        if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
            amd_vpmu_set_msr_bitmap(v);
    }

    /* stop saving & restore if guest stops first counter */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
        vpmu_reset(vpmu, VPMU_RUNNING);
        if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
            amd_vpmu_unset_msr_bitmap(v);
        release_pmu_ownship(PMU_OWNER_HVM);
    }

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
    {
        context_load(v);
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
        vpmu_reset(vpmu, VPMU_FROZEN);
    }

    /* Update vpmu context immediately */
    context_update(msr, msr_content);

    /* Write to hw counters */
    wrmsrl(msr, msr_content);
    return 1;
}
Exemple #3
0
static void amd_vpmu_destroy(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
        amd_vpmu_unset_msr_bitmap(v);

    xfree(vpmu->context);
    vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);

    if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        vpmu_reset(vpmu, VPMU_RUNNING);
        release_pmu_ownship(PMU_OWNER_HVM);
    }
}