Exemple #1
0
Fichier : vpmu.c Projet : Fantu/Xen
void vpmu_destroy(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return;

    /*
     * Need to clear last_vcpu in case it points to v.
     * We can check here non-atomically whether it is 'v' since
     * last_vcpu can never become 'v' again at this point.
     * We will test it again in vpmu_clear_last() with interrupts
     * disabled to make sure we don't clear someone else.
     */
    if ( per_cpu(last_vcpu, vpmu->last_pcpu) == v )
        on_selected_cpus(cpumask_of(vpmu->last_pcpu),
                         vpmu_clear_last, v, 1);

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
    {
        /* Unload VPMU first. This will stop counters */
        on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu),
                         vpmu_save_force, v, 1);
         vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
    }

    spin_lock(&vpmu_lock);
    if ( !is_hardware_domain(v->domain) )
        vpmu_count--;
    spin_unlock(&vpmu_lock);
}
Exemple #2
0
void vpmu_load(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    int pcpu = smp_processor_id();
    struct vcpu *prev = NULL;

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return;

    /* First time this VCPU is running here */
    if ( vpmu->last_pcpu != pcpu )
    {
        /*
         * Get the context from last pcpu that we ran on. Note that if another
         * VCPU is running there it must have saved this VPCU's context before
         * startig to run (see below).
         * There should be no race since remote pcpu will disable interrupts
         * before saving the context.
         */
        if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        {
            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
            on_selected_cpus(cpumask_of(vpmu->last_pcpu),
                             vpmu_save_force, (void *)v, 1);
            vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
        }
    } 

    /* Prevent forced context save from remote CPU */
    local_irq_disable();

    prev = per_cpu(last_vcpu, pcpu);

    if ( prev != v && prev )
    {
        vpmu = vcpu_vpmu(prev);

        /* Someone ran here before us */
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
        vpmu_save_force(prev);
        vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);

        vpmu = vcpu_vpmu(v);
    }

    local_irq_enable();

    /* Only when PMU is counting, we load PMU context immediately. */
    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) )
        return;

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
    {
        apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
        /* Arch code needs to set VPMU_CONTEXT_LOADED */
        vpmu->arch_vpmu_ops->arch_vpmu_load(v);
    }
}
Exemple #3
0
Fichier : vpmu.c Projet : Fantu/Xen
static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params)
{
    struct vcpu *v;
    struct vpmu_struct *vpmu;
    uint64_t mfn;
    void *xenpmu_data;

    if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
        return;

    v = d->vcpu[params->vcpu];
    if ( v != current )
        vcpu_pause(v);

    vpmu = vcpu_vpmu(v);
    spin_lock(&vpmu->vpmu_lock);

    vpmu_destroy(v);
    xenpmu_data = vpmu->xenpmu_data;
    vpmu->xenpmu_data = NULL;

    spin_unlock(&vpmu->vpmu_lock);

    if ( xenpmu_data )
    {
        mfn = domain_page_map_to_mfn(xenpmu_data);
        ASSERT(mfn_valid(mfn));
        unmap_domain_page_global(xenpmu_data);
        put_page_and_type(mfn_to_page(mfn));
    }

    if ( v != current )
        vcpu_unpause(v);
}
Exemple #4
0
Fichier : vpmu.c Projet : Fantu/Xen
/* Dump some vpmu informations on console. Used in keyhandler dump_domains(). */
void vpmu_dump(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_dump )
        vpmu->arch_vpmu_ops->arch_vpmu_dump(v);
}
Exemple #5
0
void vpmu_do_interrupt(struct cpu_user_regs *regs)
{
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( vpmu->arch_vpmu_ops )
    {
        struct vlapic *vlapic = vcpu_vlapic(v);
        u32 vlapic_lvtpc;

        if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) ||
             !is_vlapic_lvtpc_enabled(vlapic) )
            return;

        vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);

        switch ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) )
        {
        case APIC_MODE_FIXED:
            vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0);
            break;
        case APIC_MODE_NMI:
            v->nmi_pending = 1;
            break;
        }
    }
}
Exemple #6
0
void vpmu_initialise(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    uint8_t vendor = current_cpu_data.x86_vendor;

    if ( is_pvh_vcpu(v) )
        return;

    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        vpmu_destroy(v);
    vpmu_clear(vpmu);
    vpmu->context = NULL;

    switch ( vendor )
    {
    case X86_VENDOR_AMD:
        if ( svm_vpmu_initialise(v, opt_vpmu_enabled) != 0 )
            opt_vpmu_enabled = 0;
        break;

    case X86_VENDOR_INTEL:
        if ( vmx_vpmu_initialise(v, opt_vpmu_enabled) != 0 )
            opt_vpmu_enabled = 0;
        break;

    default:
        printk("VPMU: Initialization failed. "
               "Unknown CPU vendor %d\n", vendor);
        opt_vpmu_enabled = 0;
        break;
    }
}
Exemple #7
0
static int amd_vpmu_save(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctx = vpmu->context;
    unsigned int i;

    /*
     * Stop the counters. If we came here via vpmu_save_force (i.e.
     * when VPMU_CONTEXT_SAVE is set) counters are already stopped.
     */
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
    {
        vpmu_set(vpmu, VPMU_FROZEN);

        for ( i = 0; i < num_counters; i++ )
            wrmsrl(ctrls[i], 0);

        return 0;
    }

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        return 0;

    context_save(v);

    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
        amd_vpmu_unset_msr_bitmap(v);

    return 1;
}
Exemple #8
0
static void context_update(unsigned int msr, u64 msr_content)
{
    unsigned int i;
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctxt = vpmu->context;

    if ( k7_counters_mirrored &&
        ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)) )
    {
        msr = get_fam15h_addr(msr);
    }

    for ( i = 0; i < num_counters; i++ )
    {
       if ( msr == ctrls[i] )
       {
           ctxt->ctrls[i] = msr_content;
           return;
       }
        else if (msr == counters[i] )
        {
            ctxt->counters[i] = msr_content;
            return;
        }
    }
}
Exemple #9
0
int svm_vpmu_initialise(struct vcpu *v, unsigned int vpmu_flags)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    uint8_t family = current_cpu_data.x86;
    int ret = 0;

    /* vpmu enabled? */
    if ( !vpmu_flags )
        return 0;

    switch ( family )
    {
    case 0x10:
    case 0x12:
    case 0x14:
    case 0x15:
    case 0x16:
        ret = amd_vpmu_initialise(v);
        if ( !ret )
            vpmu->arch_vpmu_ops = &amd_vpmu_ops;
        return ret;
    }

    printk("VPMU: Initialization failed. "
           "AMD processor family %d has not "
           "been supported\n", family);
    return -EINVAL;
}
Exemple #10
0
int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(current);

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_rdmsr )
        return vpmu->arch_vpmu_ops->do_rdmsr(msr, msr_content);
    return 0;
}
Exemple #11
0
int vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content, uint64_t supported)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(current);

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_wrmsr )
        return vpmu->arch_vpmu_ops->do_wrmsr(msr, msr_content, supported);
    return 0;
}
Exemple #12
0
static void ppro_free_msr(struct vcpu *v)
{
	struct vpmu_struct *vpmu = vcpu_vpmu(v);

	if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) )
		return;
	xfree(vpmu->context);
	vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED);
}
Exemple #13
0
Fichier : vpmu.c Projet : Fantu/Xen
void vpmu_do_cpuid(unsigned int input,
                   unsigned int *eax, unsigned int *ebx,
                   unsigned int *ecx, unsigned int *edx)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(current);

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->do_cpuid )
        vpmu->arch_vpmu_ops->do_cpuid(input, eax, ebx, ecx, edx);
}
Exemple #14
0
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
                             uint64_t supported)
{
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    ASSERT(!supported);

    /* For all counters, enable guest only mode for HVM guest */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        !(is_guest_mode(msr_content)) )
    {
        set_guest_mode(msr_content);
    }

    /* check if the first counter is enabled */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
            return 1;
        vpmu_set(vpmu, VPMU_RUNNING);
        apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;

        if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
            amd_vpmu_set_msr_bitmap(v);
    }

    /* stop saving & restore if guest stops first counter */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
        vpmu_reset(vpmu, VPMU_RUNNING);
        if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
            amd_vpmu_unset_msr_bitmap(v);
        release_pmu_ownship(PMU_OWNER_HVM);
    }

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
    {
        context_load(v);
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
        vpmu_reset(vpmu, VPMU_FROZEN);
    }

    /* Update vpmu context immediately */
    context_update(msr, msr_content);

    /* Write to hw counters */
    wrmsrl(msr, msr_content);
    return 1;
}
Exemple #15
0
static inline void context_save(struct vcpu *v)
{
    unsigned int i;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctxt = vpmu->context;

    /* No need to save controls -- they are saved in amd_vpmu_do_wrmsr */
    for ( i = 0; i < num_counters; i++ )
        rdmsrl(counters[i], ctxt->counters[i]);
}
Exemple #16
0
static void put_vpmu(struct vcpu *v)
{
    spin_lock(&vpmu_lock);

    if ( !vpmu_available(v) )
        goto out;

    if ( !is_hardware_domain(v->domain) &&
         (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
    {
        vpmu_count--;
        vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);
    }
    else if ( is_hardware_domain(v->domain) &&
              (vpmu_mode != XENPMU_MODE_OFF) )
        vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);

 out:
    spin_unlock(&vpmu_lock);
}
Exemple #17
0
static inline void context_load(struct vcpu *v)
{
    unsigned int i;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctxt = vpmu->context;

    for ( i = 0; i < num_counters; i++ )
    {
        wrmsrl(counters[i], ctxt->counters[i]);
        wrmsrl(ctrls[i], ctxt->ctrls[i]);
    }
}
Exemple #18
0
static int ppro_check_ctrs(unsigned int const cpu,
                           struct op_msrs const * const msrs,
                           struct cpu_user_regs const * const regs)
{
	u64 val;
	int i;
	int ovf = 0;
	unsigned long eip = regs->eip;
	int mode = xenoprofile_get_mode(current, regs);
	struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context;

	for (i = 0 ; i < num_counters; ++i) {
		if (!reset_value[i])
			continue;
		rdmsrl(msrs->counters[i].addr, val);
		if (CTR_OVERFLOWED(val)) {
			xenoprof_log_event(current, regs, eip, mode, i);
			wrmsrl(msrs->counters[i].addr, -reset_value[i]);
			if ( is_passive(current->domain) && (mode != 2) &&
				vpmu_is_set(vcpu_vpmu(current),
                                            VPMU_PASSIVE_DOMAIN_ALLOCATED) )
			{
				if ( IS_ACTIVE(msrs_content[i].control) )
				{
					msrs_content[i].counter = val;
					if ( IS_ENABLE(msrs_content[i].control) )
						ovf = 2;
				}
			}
			if ( !ovf )
				ovf = 1;
		}
	}

	/* Only P6 based Pentium M need to re-unmask the apic vector but it
	 * doesn't hurt other P6 variant */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	return ovf;
}
Exemple #19
0
static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content)
{
	struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
	switch ( type )
	{
	case MSR_TYPE_ARCH_COUNTER:
		*msr_content = msrs[index].counter;
		break;
	case MSR_TYPE_ARCH_CTRL:
		*msr_content = msrs[index].control;
		break;
	}
}
Exemple #20
0
static void get_vpmu(struct vcpu *v)
{
    spin_lock(&vpmu_lock);

    /*
     * Keep count of VPMUs in the system so that we won't try to change
     * vpmu_mode while a guest might be using one.
     * vpmu_mode can be safely updated while dom0's VPMUs are active and
     * so we don't need to include it in the count.
     */
    if ( !is_hardware_domain(v->domain) &&
        (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
    {
        vpmu_count++;
        vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);
    }
    else if ( is_hardware_domain(v->domain) &&
              (vpmu_mode != XENPMU_MODE_OFF) )
        vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);

    spin_unlock(&vpmu_lock);
}
Exemple #21
0
static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
{
    unsigned int i;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctxt = vpmu->context;

    for ( i = 0; i < num_counters; i++ )
    {
        svm_intercept_msr(v, counters[i], MSR_INTERCEPT_RW);
        svm_intercept_msr(v, ctrls[i], MSR_INTERCEPT_RW);
    }

    ctxt->msr_bitmap_set = 0;
}
Exemple #22
0
int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
                uint64_t supported, bool_t is_write)
{
    struct vcpu *curr = current;
    struct vpmu_struct *vpmu;
    const struct arch_vpmu_ops *ops;
    int ret = 0;

    /*
     * Hide the PMU MSRs if vpmu is not configured, or the hardware domain is
     * profiling the whole system.
     */
    if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
         ((vpmu_mode & XENPMU_MODE_ALL) &&
          !is_hardware_domain(curr->domain)) )
         goto nop;

    vpmu = vcpu_vpmu(curr);
    ops = vpmu->arch_vpmu_ops;
    if ( !ops )
        goto nop;

    if ( is_write && ops->do_wrmsr )
        ret = ops->do_wrmsr(msr, *msr_content, supported);
    else if ( !is_write && ops->do_rdmsr )
        ret = ops->do_rdmsr(msr, msr_content);
    else
        goto nop;

    /*
     * We may have received a PMU interrupt while handling MSR access
     * and since do_wr/rdmsr may load VPMU context we should save
     * (and unload) it again.
     */
    if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
        vpmu_is_set(vpmu, VPMU_CACHED) )
    {
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
        ops->arch_vpmu_save(curr, 0);
        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
    }

    return ret;

 nop:
    if ( !is_write && (msr != MSR_IA32_MISC_ENABLE) )
        *msr_content = 0;

    return 0;
}
Exemple #23
0
Fichier : vpmu.c Projet : Fantu/Xen
static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
{
    struct vcpu *v;
    struct vpmu_struct *vpmu;
    struct page_info *page;
    uint64_t gfn = params->val;

    if ( (vpmu_mode == XENPMU_MODE_OFF) ||
         ((vpmu_mode & XENPMU_MODE_ALL) && !is_hardware_domain(d)) )
        return -EINVAL;

    if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
        return -EINVAL;

    page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
    if ( !page )
        return -EINVAL;

    if ( !get_page_type(page, PGT_writable_page) )
    {
        put_page(page);
        return -EINVAL;
    }

    v = d->vcpu[params->vcpu];
    vpmu = vcpu_vpmu(v);

    spin_lock(&vpmu->vpmu_lock);

    if ( v->arch.vpmu.xenpmu_data )
    {
        spin_unlock(&vpmu->vpmu_lock);
        put_page_and_type(page);
        return -EEXIST;
    }

    v->arch.vpmu.xenpmu_data = __map_domain_page_global(page);
    if ( !v->arch.vpmu.xenpmu_data )
    {
        spin_unlock(&vpmu->vpmu_lock);
        put_page_and_type(page);
        return -ENOMEM;
    }

    vpmu_initialise(v);

    spin_unlock(&vpmu->vpmu_lock);

    return 0;
}
Exemple #24
0
static int passive_domain_msr_op_checks(unsigned int msr, int *typep, int *indexp)
{
	struct vpmu_struct *vpmu = vcpu_vpmu(current);
	if ( model == NULL )
		return 0;
	if ( model->is_arch_pmu_msr == NULL )
		return 0;
	if ( !model->is_arch_pmu_msr(msr, typep, indexp) )
		return 0;

	if ( !vpmu_is_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED) )
		if ( ! model->allocated_msr(current) )
			return 0;
	return 1;
}
Exemple #25
0
static void vpmu_save_force(void *arg)
{
    struct vcpu *v = (struct vcpu *)arg;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        return;

    if ( vpmu->arch_vpmu_ops )
        (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v);

    vpmu_reset(vpmu, VPMU_CONTEXT_SAVE);

    per_cpu(last_vcpu, smp_processor_id()) = NULL;
}
Exemple #26
0
static void amd_vpmu_destroy(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
        amd_vpmu_unset_msr_bitmap(v);

    xfree(vpmu->context);
    vpmu_reset(vpmu, VPMU_CONTEXT_ALLOCATED);

    if ( vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        vpmu_reset(vpmu, VPMU_RUNNING);
        release_pmu_ownship(PMU_OWNER_HVM);
    }
}
Exemple #27
0
Fichier : vpmu.c Projet : Fantu/Xen
void vpmu_lvtpc_update(uint32_t val)
{
    struct vpmu_struct *vpmu;
    struct vcpu *curr = current;

    if ( likely(vpmu_mode == XENPMU_MODE_OFF) )
        return;

    vpmu = vcpu_vpmu(curr);

    vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | (val & APIC_LVT_MASKED);

    /* Postpone APIC updates for PV(H) guests if PMU interrupt is pending */
    if ( is_hvm_vcpu(curr) || !vpmu->xenpmu_data ||
         !vpmu_is_set(vpmu, VPMU_CACHED) )
        apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
}
Exemple #28
0
static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
    {
        context_load(v);
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
        vpmu_reset(vpmu, VPMU_FROZEN);
    }

    rdmsrl(msr, *msr_content);

    return 1;
}
Exemple #29
0
Fichier : vpmu.c Projet : Fantu/Xen
void vpmu_save(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    int pcpu = smp_processor_id();

    if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_ALLOCATED | VPMU_CONTEXT_LOADED) )
       return;

    vpmu->last_pcpu = pcpu;
    per_cpu(last_vcpu, pcpu) = v;

    if ( vpmu->arch_vpmu_ops )
        if ( vpmu->arch_vpmu_ops->arch_vpmu_save(v, 0) )
            vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);

    apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
}
Exemple #30
0
static int amd_vpmu_initialise(struct vcpu *v)
{
    struct amd_vpmu_context *ctxt;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    uint8_t family = current_cpu_data.x86;

    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return 0;

    if ( counters == NULL )
    {
         switch ( family )
	 {
	 case 0x15:
	     num_counters = F15H_NUM_COUNTERS;
	     counters = AMD_F15H_COUNTERS;
	     ctrls = AMD_F15H_CTRLS;
	     k7_counters_mirrored = 1;
	     break;
	 case 0x10:
	 case 0x12:
	 case 0x14:
	 case 0x16:
	 default:
	     num_counters = F10H_NUM_COUNTERS;
	     counters = AMD_F10H_COUNTERS;
	     ctrls = AMD_F10H_CTRLS;
	     k7_counters_mirrored = 0;
	     break;
	 }
    }

    ctxt = xzalloc(struct amd_vpmu_context);
    if ( !ctxt )
    {
        gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
            " PMU feature is unavailable on domain %d vcpu %d.\n",
            v->vcpu_id, v->domain->domain_id);
        return -ENOMEM;
    }

    vpmu->context = ctxt;
    vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
    return 0;
}