Esempio n. 1
0
void vpmu_load(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    int pcpu = smp_processor_id();
    struct vcpu *prev = NULL;

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return;

    /* First time this VCPU is running here */
    if ( vpmu->last_pcpu != pcpu )
    {
        /*
         * Get the context from last pcpu that we ran on. Note that if another
         * VCPU is running there it must have saved this VPCU's context before
         * startig to run (see below).
         * There should be no race since remote pcpu will disable interrupts
         * before saving the context.
         */
        if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        {
            vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
            on_selected_cpus(cpumask_of(vpmu->last_pcpu),
                             vpmu_save_force, (void *)v, 1);
            vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);
        }
    } 

    /* Prevent forced context save from remote CPU */
    local_irq_disable();

    prev = per_cpu(last_vcpu, pcpu);

    if ( prev != v && prev )
    {
        vpmu = vcpu_vpmu(prev);

        /* Someone ran here before us */
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
        vpmu_save_force(prev);
        vpmu_reset(vpmu, VPMU_CONTEXT_LOADED);

        vpmu = vcpu_vpmu(v);
    }

    local_irq_enable();

    /* Only when PMU is counting, we load PMU context immediately. */
    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) )
        return;

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_load )
    {
        apic_write_around(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
        /* Arch code needs to set VPMU_CONTEXT_LOADED */
        vpmu->arch_vpmu_ops->arch_vpmu_load(v);
    }
}
Esempio n. 2
0
static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
                             uint64_t supported)
{
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    ASSERT(!supported);

    /* For all counters, enable guest only mode for HVM guest */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        !(is_guest_mode(msr_content)) )
    {
        set_guest_mode(msr_content);
    }

    /* check if the first counter is enabled */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
            return 1;
        vpmu_set(vpmu, VPMU_RUNNING);
        apic_write(APIC_LVTPC, PMU_APIC_VECTOR);
        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR;

        if ( !((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
            amd_vpmu_set_msr_bitmap(v);
    }

    /* stop saving & restore if guest stops first counter */
    if ( (get_pmu_reg_type(msr) == MSR_TYPE_CTRL) &&
        (is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
    {
        apic_write(APIC_LVTPC, PMU_APIC_VECTOR | APIC_LVT_MASKED);
        vpmu->hw_lapic_lvtpc = PMU_APIC_VECTOR | APIC_LVT_MASKED;
        vpmu_reset(vpmu, VPMU_RUNNING);
        if ( ((struct amd_vpmu_context *)vpmu->context)->msr_bitmap_set )
            amd_vpmu_unset_msr_bitmap(v);
        release_pmu_ownship(PMU_OWNER_HVM);
    }

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
    {
        context_load(v);
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
        vpmu_reset(vpmu, VPMU_FROZEN);
    }

    /* Update vpmu context immediately */
    context_update(msr, msr_content);

    /* Write to hw counters */
    wrmsrl(msr, msr_content);
    return 1;
}
Esempio n. 3
0
static int amd_vpmu_save(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctx = vpmu->context;
    unsigned int i;

    /*
     * Stop the counters. If we came here via vpmu_save_force (i.e.
     * when VPMU_CONTEXT_SAVE is set) counters are already stopped.
     */
    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
    {
        vpmu_set(vpmu, VPMU_FROZEN);

        for ( i = 0; i < num_counters; i++ )
            wrmsrl(ctrls[i], 0);

        return 0;
    }

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        return 0;

    context_save(v);

    if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && ctx->msr_bitmap_set )
        amd_vpmu_unset_msr_bitmap(v);

    return 1;
}
Esempio n. 4
0
File: vpmu.c Progetto: mirage/xen
static void get_vpmu(struct vcpu *v)
{
    spin_lock(&vpmu_lock);

    /*
     * Keep count of VPMUs in the system so that we won't try to change
     * vpmu_mode while a guest might be using one.
     * vpmu_mode can be safely updated while dom0's VPMUs are active and
     * so we don't need to include it in the count.
     */
    if ( !is_hardware_domain(v->domain) &&
        (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
    {
        vpmu_count++;
        vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);
    }
    else if ( is_hardware_domain(v->domain) &&
              (vpmu_mode != XENPMU_MODE_OFF) )
        vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);

    spin_unlock(&vpmu_lock);
}
Esempio n. 5
0
File: vpmu.c Progetto: mirage/xen
int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
                uint64_t supported, bool_t is_write)
{
    struct vcpu *curr = current;
    struct vpmu_struct *vpmu;
    const struct arch_vpmu_ops *ops;
    int ret = 0;

    /*
     * Hide the PMU MSRs if vpmu is not configured, or the hardware domain is
     * profiling the whole system.
     */
    if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
         ((vpmu_mode & XENPMU_MODE_ALL) &&
          !is_hardware_domain(curr->domain)) )
         goto nop;

    vpmu = vcpu_vpmu(curr);
    ops = vpmu->arch_vpmu_ops;
    if ( !ops )
        goto nop;

    if ( is_write && ops->do_wrmsr )
        ret = ops->do_wrmsr(msr, *msr_content, supported);
    else if ( !is_write && ops->do_rdmsr )
        ret = ops->do_rdmsr(msr, msr_content);
    else
        goto nop;

    /*
     * We may have received a PMU interrupt while handling MSR access
     * and since do_wr/rdmsr may load VPMU context we should save
     * (and unload) it again.
     */
    if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
        vpmu_is_set(vpmu, VPMU_CACHED) )
    {
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
        ops->arch_vpmu_save(curr, 0);
        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
    }

    return ret;

 nop:
    if ( !is_write && (msr != MSR_IA32_MISC_ENABLE) )
        *msr_content = 0;

    return 0;
}
Esempio n. 6
0
static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
{
    struct vcpu *v = current;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED)
        || vpmu_is_set(vpmu, VPMU_FROZEN) )
    {
        context_load(v);
        vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
        vpmu_reset(vpmu, VPMU_FROZEN);
    }

    rdmsrl(msr, *msr_content);

    return 1;
}
Esempio n. 7
0
File: vpmu.c Progetto: Fantu/Xen
static void vpmu_save_force(void *arg)
{
    struct vcpu *v = (struct vcpu *)arg;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
        return;

    vpmu_set(vpmu, VPMU_CONTEXT_SAVE);

    if ( vpmu->arch_vpmu_ops )
        (void)vpmu->arch_vpmu_ops->arch_vpmu_save(v, 0);

    vpmu_reset(vpmu, VPMU_CONTEXT_SAVE);

    per_cpu(last_vcpu, smp_processor_id()) = NULL;
}
Esempio n. 8
0
static int amd_vpmu_initialise(struct vcpu *v)
{
    struct amd_vpmu_context *ctxt;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    uint8_t family = current_cpu_data.x86;

    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return 0;

    if ( counters == NULL )
    {
         switch ( family )
	 {
	 case 0x15:
	     num_counters = F15H_NUM_COUNTERS;
	     counters = AMD_F15H_COUNTERS;
	     ctrls = AMD_F15H_CTRLS;
	     k7_counters_mirrored = 1;
	     break;
	 case 0x10:
	 case 0x12:
	 case 0x14:
	 case 0x16:
	 default:
	     num_counters = F10H_NUM_COUNTERS;
	     counters = AMD_F10H_COUNTERS;
	     ctrls = AMD_F10H_CTRLS;
	     k7_counters_mirrored = 0;
	     break;
	 }
    }

    ctxt = xzalloc(struct amd_vpmu_context);
    if ( !ctxt )
    {
        gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
            " PMU feature is unavailable on domain %d vcpu %d.\n",
            v->vcpu_id, v->domain->domain_id);
        return -ENOMEM;
    }

    vpmu->context = ctxt;
    vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
    return 0;
}
Esempio n. 9
0
static int ppro_allocate_msr(struct vcpu *v)
{
	struct vpmu_struct *vpmu = vcpu_vpmu(v);
	struct arch_msr_pair *msr_content;

	msr_content = xzalloc_array(struct arch_msr_pair, num_counters);
	if ( !msr_content )
		goto out;
	vpmu->context = (void *)msr_content;
	vpmu_clear(vpmu);
	vpmu_set(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED);
	return 1;
out:
	printk(XENLOG_G_WARNING "Insufficient memory for oprofile,"
	       " oprofile is unavailable on dom%d vcpu%d\n",
	       v->vcpu_id, v->domain->domain_id);
	return 0;
}
Esempio n. 10
0
static void amd_vpmu_load(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    struct amd_vpmu_context *ctxt = vpmu->context;

    vpmu_reset(vpmu, VPMU_FROZEN);

    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
    {
        unsigned int i;

        for ( i = 0; i < num_counters; i++ )
            wrmsrl(ctrls[i], ctxt->ctrls[i]);

        return;
    }

    vpmu_set(vpmu, VPMU_CONTEXT_LOADED);

    context_load(v);
}
Esempio n. 11
0
File: vpmu.c Progetto: Fantu/Xen
void vpmu_do_interrupt(struct cpu_user_regs *regs)
{
    struct vcpu *sampled = current, *sampling;
    struct vpmu_struct *vpmu;
    struct vlapic *vlapic;
    u32 vlapic_lvtpc;

    /*
     * dom0 will handle interrupt for special domains (e.g. idle domain) or,
     * in XENPMU_MODE_ALL, for everyone.
     */
    if ( (vpmu_mode & XENPMU_MODE_ALL) ||
         (sampled->domain->domain_id >= DOMID_FIRST_RESERVED) )
    {
        sampling = choose_hwdom_vcpu();
        if ( !sampling )
            return;
    }
    else
        sampling = sampled;

    vpmu = vcpu_vpmu(sampling);
    if ( !vpmu->arch_vpmu_ops )
        return;

    /* PV(H) guest */
    if ( !is_hvm_vcpu(sampling) || (vpmu_mode & XENPMU_MODE_ALL) )
    {
        const struct cpu_user_regs *cur_regs;
        uint64_t *flags = &vpmu->xenpmu_data->pmu.pmu_flags;
        domid_t domid;

        if ( !vpmu->xenpmu_data )
            return;

        if ( is_pvh_vcpu(sampling) &&
             !(vpmu_mode & XENPMU_MODE_ALL) &&
             !vpmu->arch_vpmu_ops->do_interrupt(regs) )
            return;

        if ( vpmu_is_set(vpmu, VPMU_CACHED) )
            return;

        /* PV guest will be reading PMU MSRs from xenpmu_data */
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
        vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1);
        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);

        if ( has_hvm_container_vcpu(sampled) )
            *flags = 0;
        else
            *flags = PMU_SAMPLE_PV;

        if ( sampled == sampling )
            domid = DOMID_SELF;
        else
            domid = sampled->domain->domain_id;

        /* Store appropriate registers in xenpmu_data */
        /* FIXME: 32-bit PVH should go here as well */
        if ( is_pv_32bit_vcpu(sampling) )
        {
            /*
             * 32-bit dom0 cannot process Xen's addresses (which are 64 bit)
             * and therefore we treat it the same way as a non-privileged
             * PV 32-bit domain.
             */
            struct compat_pmu_regs *cmp;

            cur_regs = guest_cpu_user_regs();

            cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
            cmp->ip = cur_regs->rip;
            cmp->sp = cur_regs->rsp;
            cmp->flags = cur_regs->eflags;
            cmp->ss = cur_regs->ss;
            cmp->cs = cur_regs->cs;
            if ( (cmp->cs & 3) > 1 )
                *flags |= PMU_SAMPLE_USER;
        }
        else
        {
            struct xen_pmu_regs *r = &vpmu->xenpmu_data->pmu.r.regs;

            if ( (vpmu_mode & XENPMU_MODE_SELF) )
                cur_regs = guest_cpu_user_regs();
            else if ( !guest_mode(regs) &&
                      is_hardware_domain(sampling->domain) )
            {
                cur_regs = regs;
                domid = DOMID_XEN;
            }
            else
                cur_regs = guest_cpu_user_regs();

            r->ip = cur_regs->rip;
            r->sp = cur_regs->rsp;
            r->flags = cur_regs->eflags;

            if ( !has_hvm_container_vcpu(sampled) )
            {
                r->ss = cur_regs->ss;
                r->cs = cur_regs->cs;
                if ( !(sampled->arch.flags & TF_kernel_mode) )
                    *flags |= PMU_SAMPLE_USER;
            }
            else
            {
                struct segment_register seg;

                hvm_get_segment_register(sampled, x86_seg_cs, &seg);
                r->cs = seg.sel;
                hvm_get_segment_register(sampled, x86_seg_ss, &seg);
                r->ss = seg.sel;
                r->cpl = seg.attr.fields.dpl;
                if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
                    *flags |= PMU_SAMPLE_REAL;
            }
        }

        vpmu->xenpmu_data->domain_id = domid;
        vpmu->xenpmu_data->vcpu_id = sampled->vcpu_id;
        if ( is_hardware_domain(sampling->domain) )
            vpmu->xenpmu_data->pcpu_id = smp_processor_id();
        else
            vpmu->xenpmu_data->pcpu_id = sampled->vcpu_id;

        vpmu->hw_lapic_lvtpc |= APIC_LVT_MASKED;
        apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
        *flags |= PMU_CACHED;
        vpmu_set(vpmu, VPMU_CACHED);

        send_guest_vcpu_virq(sampling, VIRQ_XENPMU);

        return;
    }

    /* HVM guests */
    vlapic = vcpu_vlapic(sampling);

    /* We don't support (yet) HVM dom0 */
    ASSERT(sampling == sampled);

    if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) ||
         !is_vlapic_lvtpc_enabled(vlapic) )
        return;

    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);

    switch ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) )
    {
    case APIC_MODE_FIXED:
        vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0);
        break;
    case APIC_MODE_NMI:
        sampling->nmi_pending = 1;
        break;
    }
}