Ejemplo n.º 1
0
Archivo: vtimer.c Proyecto: 0day-ci/xen
int domain_vtimer_init(struct domain *d, struct xen_arch_domainconfig *config)
{
    d->arch.phys_timer_base.offset = NOW();
    d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0);
    d->time_offset_seconds = ticks_to_ns(d->arch.virt_timer_base.offset - boot_count);
    do_div(d->time_offset_seconds, 1000000000);

    config->clock_frequency = timer_dt_clock_frequency;

    /* At this stage vgic_reserve_virq can't fail */
    if ( is_hardware_domain(d) )
    {
        if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_SECURE_PPI)) )
            BUG();

        if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_NONSECURE_PPI)) )
            BUG();

        if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_VIRT_PPI)) )
            BUG();
    }
    else
    {
        if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_S_PPI) )
            BUG();

        if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_NS_PPI) )
            BUG();

        if ( !vgic_reserve_virq(d, GUEST_TIMER_VIRT_PPI) )
            BUG();
    }

    return 0;
}
Ejemplo n.º 2
0
Archivo: vgic.c Proyecto: columbia/xen
int domain_vgic_init(struct domain *d)
{
    int i;

    d->arch.vgic.ctlr = 0;

    if ( is_hardware_domain(d) )
        d->arch.vgic.nr_spis = gic_number_lines() - 32;
    else
        d->arch.vgic.nr_spis = 0; /* We don't need SPIs for the guest */

    switch ( gic_hw_version() )
    {
#ifdef CONFIG_ARM_64
    case GIC_V3:
        if ( vgic_v3_init(d) )
           return -ENODEV;
        break;
#endif
    case GIC_V2:
        if ( vgic_v2_init(d) )
            return -ENODEV;
        break;
    default:
        return -ENODEV;
    }

    spin_lock_init(&d->arch.vgic.lock);

    d->arch.vgic.shared_irqs =
        xzalloc_array(struct vgic_irq_rank, DOMAIN_NR_RANKS(d));
    if ( d->arch.vgic.shared_irqs == NULL )
        return -ENOMEM;

    d->arch.vgic.pending_irqs =
        xzalloc_array(struct pending_irq, d->arch.vgic.nr_spis);
    if ( d->arch.vgic.pending_irqs == NULL )
        return -ENOMEM;

    for (i=0; i<d->arch.vgic.nr_spis; i++)
    {
        INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].inflight);
        INIT_LIST_HEAD(&d->arch.vgic.pending_irqs[i].lr_queue);
    }
    for (i=0; i<DOMAIN_NR_RANKS(d); i++)
        spin_lock_init(&d->arch.vgic.shared_irqs[i].lock);

    d->arch.vgic.handler->domain_init(d);

    d->arch.vgic.allocated_irqs =
        xzalloc_array(unsigned long, BITS_TO_LONGS(vgic_num_irqs(d)));
    if ( !d->arch.vgic.allocated_irqs )
        return -ENOMEM;

    /* vIRQ0-15 (SGIs) are reserved */
    for ( i = 0; i < NR_GIC_SGI; i++ )
        set_bit(i, d->arch.vgic.allocated_irqs);

    return 0;
}
Ejemplo n.º 3
0
int domain_vtimer_init(struct domain *d)
{
    d->arch.phys_timer_base.offset = NOW();
    d->arch.virt_timer_base.offset = READ_SYSREG64(CNTPCT_EL0);

    /* At this stage vgic_reserve_virq can't fail */
    if ( is_hardware_domain(d) )
    {
        if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_SECURE_PPI)) )
            BUG();

        if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_PHYS_NONSECURE_PPI)) )
            BUG();

        if ( !vgic_reserve_virq(d, timer_get_irq(TIMER_VIRT_PPI)) )
            BUG();
    }
    else
    {
        if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_S_PPI) )
            BUG();

        if ( !vgic_reserve_virq(d, GUEST_TIMER_PHYS_NS_PPI) )
            BUG();

        if ( !vgic_reserve_virq(d, GUEST_TIMER_VIRT_PPI) )
            BUG();
    }

    return 0;
}
Ejemplo n.º 4
0
/*
 * Currently all CPUs are redenzevous at the MCE softirq handler, no
 * need to consider paging p2m type
 * Currently only support HVM guest with EPT paging mode
 * XXX following situation missed:
 * PoD, Foreign mapped, Granted, Shared
 */
int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
{
    mfn_t r_mfn;
    p2m_type_t pt;
    int rc;

    /* Always trust dom0's MCE handler will prevent future access */
    if ( is_hardware_domain(d) )
        return 0;

    if (!mfn_valid(mfn_x(mfn)))
        return -EINVAL;

    if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
        return -ENOSYS;

    rc = -1;
    r_mfn = get_gfn_query(d, gfn, &pt);
    if ( p2m_to_mask(pt) & P2M_UNMAP_TYPES)
    {
        ASSERT(mfn_x(r_mfn) == mfn_x(mfn));
        rc = p2m_change_type_one(d, gfn, pt, p2m_ram_broken);
    }
    put_gfn(d, gfn);

    return rc;
}
Ejemplo n.º 5
0
Archivo: vpmu.c Proyecto: Fantu/Xen
void vpmu_destroy(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);

    if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return;

    /*
     * Need to clear last_vcpu in case it points to v.
     * We can check here non-atomically whether it is 'v' since
     * last_vcpu can never become 'v' again at this point.
     * We will test it again in vpmu_clear_last() with interrupts
     * disabled to make sure we don't clear someone else.
     */
    if ( per_cpu(last_vcpu, vpmu->last_pcpu) == v )
        on_selected_cpus(cpumask_of(vpmu->last_pcpu),
                         vpmu_clear_last, v, 1);

    if ( vpmu->arch_vpmu_ops && vpmu->arch_vpmu_ops->arch_vpmu_destroy )
    {
        /* Unload VPMU first. This will stop counters */
        on_selected_cpus(cpumask_of(vcpu_vpmu(v)->last_pcpu),
                         vpmu_save_force, v, 1);
         vpmu->arch_vpmu_ops->arch_vpmu_destroy(v);
    }

    spin_lock(&vpmu_lock);
    if ( !is_hardware_domain(v->domain) )
        vpmu_count--;
    spin_unlock(&vpmu_lock);
}
Ejemplo n.º 6
0
int vcpu_vtimer_init(struct vcpu *v)
{
    struct vtimer *t = &v->arch.phys_timer;
    bool_t d0 = is_hardware_domain(v->domain);

    /*
     * Hardware domain uses the hardware interrupts, guests get the virtual
     * platform.
     */

    init_timer(&t->timer, phys_timer_expired, t, v->processor);
    t->ctl = 0;
    t->cval = NOW();
    t->irq = d0
        ? timer_get_irq(TIMER_PHYS_NONSECURE_PPI)
        : GUEST_TIMER_PHYS_NS_PPI;
    t->v = v;

    t = &v->arch.virt_timer;
    init_timer(&t->timer, virt_timer_expired, t, v->processor);
    t->ctl = 0;
    t->irq = d0
        ? timer_get_irq(TIMER_VIRT_PPI)
        : GUEST_TIMER_VIRT_PPI;
    t->v = v;

    v->arch.vtimer_initialized = 1;

    return 0;
}
Ejemplo n.º 7
0
int arch_domain_create(struct domain *d, unsigned int domcr_flags)
{
    int rc;

    d->arch.relmem = RELMEM_not_started;

    /* Idle domains do not need this setup */
    if ( is_idle_domain(d) )
        return 0;

    if ( (rc = p2m_init(d)) != 0 )
        goto fail;

    rc = -ENOMEM;
    if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
        goto fail;

    /* Default the virtual ID to match the physical */
    d->arch.vpidr = boot_cpu_data.midr.bits;

    clear_page(d->shared_info);
    share_xen_page_with_guest(
        virt_to_page(d->shared_info), d, XENSHARE_writable);

    if ( (rc = p2m_alloc_table(d)) != 0 )
        goto fail;

    if ( (rc = gicv_setup(d)) != 0 )
        goto fail;

    if ( (rc = domain_vgic_init(d)) != 0 )
        goto fail;

    if ( (rc = domain_vtimer_init(d)) != 0 )
        goto fail;

    if ( d->domain_id )
        d->arch.evtchn_irq = GUEST_EVTCHN_PPI;
    else
        d->arch.evtchn_irq = platform_dom0_evtchn_ppi();

    /*
     * Virtual UART is only used by linux early printk and decompress code.
     * Only use it for the hardware domain because the linux kernel may not
     * support multi-platform.
     */
    if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
        goto fail;

    if ( (rc = iommu_domain_init(d)) != 0 )
        goto fail;

    return 0;

fail:
    d->is_dying = DOMDYING_dead;
    arch_domain_destroy(d);

    return rc;
}
Ejemplo n.º 8
0
Archivo: vpmu.c Proyecto: mirage/xen
static void put_vpmu(struct vcpu *v)
{
    spin_lock(&vpmu_lock);

    if ( !vpmu_available(v) )
        goto out;

    if ( !is_hardware_domain(v->domain) &&
         (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
    {
        vpmu_count--;
        vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);
    }
    else if ( is_hardware_domain(v->domain) &&
              (vpmu_mode != XENPMU_MODE_OFF) )
        vpmu_reset(vcpu_vpmu(v), VPMU_AVAILABLE);

 out:
    spin_unlock(&vpmu_lock);
}
Ejemplo n.º 9
0
Archivo: vpmu.c Proyecto: mirage/xen
static void get_vpmu(struct vcpu *v)
{
    spin_lock(&vpmu_lock);

    /*
     * Keep count of VPMUs in the system so that we won't try to change
     * vpmu_mode while a guest might be using one.
     * vpmu_mode can be safely updated while dom0's VPMUs are active and
     * so we don't need to include it in the count.
     */
    if ( !is_hardware_domain(v->domain) &&
        (vpmu_mode & (XENPMU_MODE_SELF | XENPMU_MODE_HV)) )
    {
        vpmu_count++;
        vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);
    }
    else if ( is_hardware_domain(v->domain) &&
              (vpmu_mode != XENPMU_MODE_OFF) )
        vpmu_set(vcpu_vpmu(v), VPMU_AVAILABLE);

    spin_unlock(&vpmu_lock);
}
Ejemplo n.º 10
0
Archivo: vpmu.c Proyecto: Fantu/Xen
static int pvpmu_init(struct domain *d, xen_pmu_params_t *params)
{
    struct vcpu *v;
    struct vpmu_struct *vpmu;
    struct page_info *page;
    uint64_t gfn = params->val;

    if ( (vpmu_mode == XENPMU_MODE_OFF) ||
         ((vpmu_mode & XENPMU_MODE_ALL) && !is_hardware_domain(d)) )
        return -EINVAL;

    if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) )
        return -EINVAL;

    page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
    if ( !page )
        return -EINVAL;

    if ( !get_page_type(page, PGT_writable_page) )
    {
        put_page(page);
        return -EINVAL;
    }

    v = d->vcpu[params->vcpu];
    vpmu = vcpu_vpmu(v);

    spin_lock(&vpmu->vpmu_lock);

    if ( v->arch.vpmu.xenpmu_data )
    {
        spin_unlock(&vpmu->vpmu_lock);
        put_page_and_type(page);
        return -EEXIST;
    }

    v->arch.vpmu.xenpmu_data = __map_domain_page_global(page);
    if ( !v->arch.vpmu.xenpmu_data )
    {
        spin_unlock(&vpmu->vpmu_lock);
        put_page_and_type(page);
        return -ENOMEM;
    }

    vpmu_initialise(v);

    spin_unlock(&vpmu->vpmu_lock);

    return 0;
}
Ejemplo n.º 11
0
Archivo: vpmu.c Proyecto: mirage/xen
int vpmu_do_msr(unsigned int msr, uint64_t *msr_content,
                uint64_t supported, bool_t is_write)
{
    struct vcpu *curr = current;
    struct vpmu_struct *vpmu;
    const struct arch_vpmu_ops *ops;
    int ret = 0;

    /*
     * Hide the PMU MSRs if vpmu is not configured, or the hardware domain is
     * profiling the whole system.
     */
    if ( likely(vpmu_mode == XENPMU_MODE_OFF) ||
         ((vpmu_mode & XENPMU_MODE_ALL) &&
          !is_hardware_domain(curr->domain)) )
         goto nop;

    vpmu = vcpu_vpmu(curr);
    ops = vpmu->arch_vpmu_ops;
    if ( !ops )
        goto nop;

    if ( is_write && ops->do_wrmsr )
        ret = ops->do_wrmsr(msr, *msr_content, supported);
    else if ( !is_write && ops->do_rdmsr )
        ret = ops->do_rdmsr(msr, msr_content);
    else
        goto nop;

    /*
     * We may have received a PMU interrupt while handling MSR access
     * and since do_wr/rdmsr may load VPMU context we should save
     * (and unload) it again.
     */
    if ( !has_vlapic(curr->domain) && vpmu->xenpmu_data &&
        vpmu_is_set(vpmu, VPMU_CACHED) )
    {
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE);
        ops->arch_vpmu_save(curr, 0);
        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
    }

    return ret;

 nop:
    if ( !is_write && (msr != MSR_IA32_MISC_ENABLE) )
        *msr_content = 0;

    return 0;
}
Ejemplo n.º 12
0
Archivo: memory.c Proyecto: blue236/xen
static unsigned int max_order(const struct domain *d)
{
    unsigned int order = domu_max_order;

#ifdef HAS_PASSTHROUGH
    if ( cache_flush_permitted(d) && order < ptdom_max_order )
        order = ptdom_max_order;
#endif

    if ( is_control_domain(d) && order < ctldom_max_order )
        order = ctldom_max_order;

    if ( is_hardware_domain(d) && order < hwdom_max_order )
        order = hwdom_max_order;

    return min(order, MAX_ORDER + 0U);
}
Ejemplo n.º 13
0
int domain_vuart_init(struct domain *d)
{
    ASSERT( is_hardware_domain(d) );

    d->arch.vuart.info = serial_vuart_info(SERHND_DTUART);
    if ( !d->arch.vuart.info )
        return 0;

    spin_lock_init(&d->arch.vuart.lock);
    d->arch.vuart.idx = 0;

    d->arch.vuart.buf = xzalloc_array(char, VUART_BUF_SIZE);
    if ( !d->arch.vuart.buf )
        return -ENOMEM;

    register_mmio_handler(d, &vuart_mmio_handler,
                          d->arch.vuart.info->base_addr,
                          d->arch.vuart.info->size,
                          NULL);

    return 0;
}
Ejemplo n.º 14
0
Archivo: vpmu.c Proyecto: Fantu/Xen
void vpmu_initialise(struct vcpu *v)
{
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    uint8_t vendor = current_cpu_data.x86_vendor;
    int ret;
    bool_t is_priv_vpmu = is_hardware_domain(v->domain);

    BUILD_BUG_ON(sizeof(struct xen_pmu_intel_ctxt) > XENPMU_CTXT_PAD_SZ);
    BUILD_BUG_ON(sizeof(struct xen_pmu_amd_ctxt) > XENPMU_CTXT_PAD_SZ);
    BUILD_BUG_ON(sizeof(struct xen_pmu_regs) > XENPMU_REGS_PAD_SZ);
    BUILD_BUG_ON(sizeof(struct compat_pmu_regs) > XENPMU_REGS_PAD_SZ);

    ASSERT(!vpmu->flags && !vpmu->context);

    if ( !is_priv_vpmu )
    {
        /*
         * Count active VPMUs so that we won't try to change vpmu_mode while
         * they are in use.
         * vpmu_mode can be safely updated while dom0's VPMUs are active and
         * so we don't need to include it in the count.
         */
        spin_lock(&vpmu_lock);
        vpmu_count++;
        spin_unlock(&vpmu_lock);
    }

    switch ( vendor )
    {
    case X86_VENDOR_AMD:
        ret = svm_vpmu_initialise(v);
        break;

    case X86_VENDOR_INTEL:
        ret = vmx_vpmu_initialise(v);
        break;

    default:
        if ( vpmu_mode != XENPMU_MODE_OFF )
        {
            printk(XENLOG_G_WARNING "VPMU: Unknown CPU vendor %d. "
                   "Disabling VPMU\n", vendor);
            opt_vpmu_enabled = 0;
            vpmu_mode = XENPMU_MODE_OFF;
        }
        return; /* Don't bother restoring vpmu_count, VPMU is off forever */
    }

    if ( ret )
        printk(XENLOG_G_WARNING "VPMU: Initialization failed for %pv\n", v);

    /* Intel needs to initialize VPMU ops even if VPMU is not in use */
    if ( !is_priv_vpmu &&
         (ret || (vpmu_mode == XENPMU_MODE_OFF) ||
          (vpmu_mode == XENPMU_MODE_ALL)) )
    {
        spin_lock(&vpmu_lock);
        vpmu_count--;
        spin_unlock(&vpmu_lock);
    }
}
Ejemplo n.º 15
0
Archivo: vpmu.c Proyecto: Fantu/Xen
void vpmu_do_interrupt(struct cpu_user_regs *regs)
{
    struct vcpu *sampled = current, *sampling;
    struct vpmu_struct *vpmu;
    struct vlapic *vlapic;
    u32 vlapic_lvtpc;

    /*
     * dom0 will handle interrupt for special domains (e.g. idle domain) or,
     * in XENPMU_MODE_ALL, for everyone.
     */
    if ( (vpmu_mode & XENPMU_MODE_ALL) ||
         (sampled->domain->domain_id >= DOMID_FIRST_RESERVED) )
    {
        sampling = choose_hwdom_vcpu();
        if ( !sampling )
            return;
    }
    else
        sampling = sampled;

    vpmu = vcpu_vpmu(sampling);
    if ( !vpmu->arch_vpmu_ops )
        return;

    /* PV(H) guest */
    if ( !is_hvm_vcpu(sampling) || (vpmu_mode & XENPMU_MODE_ALL) )
    {
        const struct cpu_user_regs *cur_regs;
        uint64_t *flags = &vpmu->xenpmu_data->pmu.pmu_flags;
        domid_t domid;

        if ( !vpmu->xenpmu_data )
            return;

        if ( is_pvh_vcpu(sampling) &&
             !(vpmu_mode & XENPMU_MODE_ALL) &&
             !vpmu->arch_vpmu_ops->do_interrupt(regs) )
            return;

        if ( vpmu_is_set(vpmu, VPMU_CACHED) )
            return;

        /* PV guest will be reading PMU MSRs from xenpmu_data */
        vpmu_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
        vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1);
        vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);

        if ( has_hvm_container_vcpu(sampled) )
            *flags = 0;
        else
            *flags = PMU_SAMPLE_PV;

        if ( sampled == sampling )
            domid = DOMID_SELF;
        else
            domid = sampled->domain->domain_id;

        /* Store appropriate registers in xenpmu_data */
        /* FIXME: 32-bit PVH should go here as well */
        if ( is_pv_32bit_vcpu(sampling) )
        {
            /*
             * 32-bit dom0 cannot process Xen's addresses (which are 64 bit)
             * and therefore we treat it the same way as a non-privileged
             * PV 32-bit domain.
             */
            struct compat_pmu_regs *cmp;

            cur_regs = guest_cpu_user_regs();

            cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
            cmp->ip = cur_regs->rip;
            cmp->sp = cur_regs->rsp;
            cmp->flags = cur_regs->eflags;
            cmp->ss = cur_regs->ss;
            cmp->cs = cur_regs->cs;
            if ( (cmp->cs & 3) > 1 )
                *flags |= PMU_SAMPLE_USER;
        }
        else
        {
            struct xen_pmu_regs *r = &vpmu->xenpmu_data->pmu.r.regs;

            if ( (vpmu_mode & XENPMU_MODE_SELF) )
                cur_regs = guest_cpu_user_regs();
            else if ( !guest_mode(regs) &&
                      is_hardware_domain(sampling->domain) )
            {
                cur_regs = regs;
                domid = DOMID_XEN;
            }
            else
                cur_regs = guest_cpu_user_regs();

            r->ip = cur_regs->rip;
            r->sp = cur_regs->rsp;
            r->flags = cur_regs->eflags;

            if ( !has_hvm_container_vcpu(sampled) )
            {
                r->ss = cur_regs->ss;
                r->cs = cur_regs->cs;
                if ( !(sampled->arch.flags & TF_kernel_mode) )
                    *flags |= PMU_SAMPLE_USER;
            }
            else
            {
                struct segment_register seg;

                hvm_get_segment_register(sampled, x86_seg_cs, &seg);
                r->cs = seg.sel;
                hvm_get_segment_register(sampled, x86_seg_ss, &seg);
                r->ss = seg.sel;
                r->cpl = seg.attr.fields.dpl;
                if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
                    *flags |= PMU_SAMPLE_REAL;
            }
        }

        vpmu->xenpmu_data->domain_id = domid;
        vpmu->xenpmu_data->vcpu_id = sampled->vcpu_id;
        if ( is_hardware_domain(sampling->domain) )
            vpmu->xenpmu_data->pcpu_id = smp_processor_id();
        else
            vpmu->xenpmu_data->pcpu_id = sampled->vcpu_id;

        vpmu->hw_lapic_lvtpc |= APIC_LVT_MASKED;
        apic_write(APIC_LVTPC, vpmu->hw_lapic_lvtpc);
        *flags |= PMU_CACHED;
        vpmu_set(vpmu, VPMU_CACHED);

        send_guest_vcpu_virq(sampling, VIRQ_XENPMU);

        return;
    }

    /* HVM guests */
    vlapic = vcpu_vlapic(sampling);

    /* We don't support (yet) HVM dom0 */
    ASSERT(sampling == sampled);

    if ( !vpmu->arch_vpmu_ops->do_interrupt(regs) ||
         !is_vlapic_lvtpc_enabled(vlapic) )
        return;

    vlapic_lvtpc = vlapic_get_reg(vlapic, APIC_LVTPC);

    switch ( GET_APIC_DELIVERY_MODE(vlapic_lvtpc) )
    {
    case APIC_MODE_FIXED:
        vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0);
        break;
    case APIC_MODE_NMI:
        sampling->nmi_pending = 1;
        break;
    }
}
Ejemplo n.º 16
0
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p,
                     struct msi_info *msi)
{
    struct domain *d = current->domain;
    int pirq, irq, ret = 0;
    void *map_data = NULL;

    if ( domid == DOMID_SELF && is_hvm_domain(d) )
    {
        /*
         * Only makes sense for vector-based callback, else HVM-IRQ logic
         * calls back into itself and deadlocks on hvm_domain.irq_lock.
         */
        if ( !is_hvm_pv_evtchn_domain(d) )
            return -EINVAL;

        return physdev_hvm_map_pirq(d, type, index, pirq_p);
    }

    d = rcu_lock_domain_by_any_id(domid);
    if ( d == NULL )
        return -ESRCH;

    ret = xsm_map_domain_pirq(XSM_TARGET, d);
    if ( ret )
        goto free_domain;

    /* Verify or get irq. */
    switch ( type )
    {
    case MAP_PIRQ_TYPE_GSI:
        if ( *index < 0 || *index >= nr_irqs_gsi )
        {
            dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
                    d->domain_id, *index);
            ret = -EINVAL;
            goto free_domain;
        }

        irq = domain_pirq_to_irq(current->domain, *index);
        if ( irq <= 0 )
        {
            if ( is_hardware_domain(current->domain) )
                irq = *index;
            else {
                dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
                        d->domain_id);
                ret = -EINVAL;
                goto free_domain;
            }
        }
        break;

    case MAP_PIRQ_TYPE_MSI:
        if ( !msi->table_base )
            msi->entry_nr = 1;
        irq = *index;
        if ( irq == -1 )
    case MAP_PIRQ_TYPE_MULTI_MSI:
            irq = create_irq(NUMA_NO_NODE);

        if ( irq < nr_irqs_gsi || irq >= nr_irqs )
        {
            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
                    d->domain_id);
            ret = -EINVAL;
            goto free_domain;
        }

        msi->irq = irq;
        map_data = msi;
        break;

    default:
        dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
                d->domain_id, type);
        ret = -EINVAL;
        goto free_domain;
    }

    spin_lock(&pcidevs_lock);
    /* Verify or get pirq. */
    spin_lock(&d->event_lock);
    pirq = domain_irq_to_pirq(d, irq);
    if ( *pirq_p < 0 )
    {
        if ( pirq )
        {
            dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
                    d->domain_id, *index, *pirq_p, pirq);
            if ( pirq < 0 )
            {
                ret = -EBUSY;
                goto done;
            }
        }
        else if ( type == MAP_PIRQ_TYPE_MULTI_MSI )
        {
            if ( msi->entry_nr <= 0 || msi->entry_nr > 32 )
                ret = -EDOM;
            else if ( msi->entry_nr != 1 && !iommu_intremap )
                ret = -EOPNOTSUPP;
            else
            {
                while ( msi->entry_nr & (msi->entry_nr - 1) )
                    msi->entry_nr += msi->entry_nr & -msi->entry_nr;
                pirq = get_free_pirqs(d, msi->entry_nr);
                if ( pirq < 0 )
                {
                    while ( (msi->entry_nr >>= 1) > 1 )
                        if ( get_free_pirqs(d, msi->entry_nr) > 0 )
                            break;
                    dprintk(XENLOG_G_ERR, "dom%d: no block of %d free pirqs\n",
                            d->domain_id, msi->entry_nr << 1);
                    ret = pirq;
                }
            }
            if ( ret < 0 )
                goto done;
        }
        else
        {
            pirq = get_free_pirq(d, type);
            if ( pirq < 0 )
            {
                dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
                ret = pirq;
                goto done;
            }
        }
    }
Ejemplo n.º 17
0
struct domain *domain_create(
    domid_t domid, unsigned int domcr_flags, uint32_t ssidref)
{
    struct domain *d, **pd, *old_hwdom = NULL;
    enum { INIT_xsm = 1u<<0, INIT_watchdog = 1u<<1, INIT_rangeset = 1u<<2,
           INIT_evtchn = 1u<<3, INIT_gnttab = 1u<<4, INIT_arch = 1u<<5 };
    int err, init_status = 0;
    int poolid = CPUPOOLID_NONE;

    if ( (d = alloc_domain_struct()) == NULL )
        return ERR_PTR(-ENOMEM);

    d->domain_id = domid;

    lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain");

    if ( (err = xsm_alloc_security_domain(d)) != 0 )
        goto fail;
    init_status |= INIT_xsm;

    watchdog_domain_init(d);
    init_status |= INIT_watchdog;

    atomic_set(&d->refcnt, 1);
    spin_lock_init_prof(d, domain_lock);
    spin_lock_init_prof(d, page_alloc_lock);
    spin_lock_init(&d->hypercall_deadlock_mutex);
    INIT_PAGE_LIST_HEAD(&d->page_list);
    INIT_PAGE_LIST_HEAD(&d->xenpage_list);

    spin_lock_init(&d->node_affinity_lock);
    d->node_affinity = NODE_MASK_ALL;
    d->auto_node_affinity = 1;

    spin_lock_init(&d->shutdown_lock);
    d->shutdown_code = -1;

    spin_lock_init(&d->pbuf_lock);

    err = -ENOMEM;
    if ( !zalloc_cpumask_var(&d->domain_dirty_cpumask) )
        goto fail;

    if ( domcr_flags & DOMCRF_hvm )
        d->guest_type = guest_type_hvm;
    else if ( domcr_flags & DOMCRF_pvh )
        d->guest_type = guest_type_pvh;

    if ( domid == 0 || domid == hardware_domid )
    {
        if ( hardware_domid < 0 || hardware_domid >= DOMID_FIRST_RESERVED )
            panic("The value of hardware_dom must be a valid domain ID");
        d->is_pinned = opt_dom0_vcpus_pin;
        d->disable_migrate = 1;
        old_hwdom = hardware_domain;
        hardware_domain = d;
    }

    rangeset_domain_initialise(d);
    init_status |= INIT_rangeset;

    d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex);
    d->irq_caps   = rangeset_new(d, "Interrupts", 0);
    if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) )
        goto fail;

    if ( domcr_flags & DOMCRF_dummy )
        return d;

    if ( !is_idle_domain(d) )
    {
        if ( (err = xsm_domain_create(XSM_HOOK, d, ssidref)) != 0 )
            goto fail;

        d->is_paused_by_controller = 1;
        atomic_inc(&d->pause_count);

        if ( !is_hardware_domain(d) )
            d->nr_pirqs = nr_static_irqs + extra_domU_irqs;
        else
            d->nr_pirqs = nr_static_irqs + extra_dom0_irqs;
        if ( d->nr_pirqs > nr_irqs )
            d->nr_pirqs = nr_irqs;

        radix_tree_init(&d->pirq_tree);

        if ( (err = evtchn_init(d)) != 0 )
            goto fail;
        init_status |= INIT_evtchn;

        if ( (err = grant_table_create(d)) != 0 )
            goto fail;
        init_status |= INIT_gnttab;

        poolid = 0;

        err = -ENOMEM;
        d->mem_event = xzalloc(struct mem_event_per_domain);
        if ( !d->mem_event )
            goto fail;

        d->pbuf = xzalloc_array(char, DOMAIN_PBUF_SIZE);
        if ( !d->pbuf )
            goto fail;
    }

    if ( (err = arch_domain_create(d, domcr_flags)) != 0 )
        goto fail;
    init_status |= INIT_arch;

    if ( (err = cpupool_add_domain(d, poolid)) != 0 )
        goto fail;

    if ( (err = sched_init_domain(d)) != 0 )
        goto fail;

    if ( (err = late_hwdom_init(d)) != 0 )
        goto fail;

    if ( !is_idle_domain(d) )
    {
        spin_lock(&domlist_update_lock);
        pd = &domain_list; /* NB. domain_list maintained in order of domid. */
        for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list )
            if ( (*pd)->domain_id > d->domain_id )
                break;
        d->next_in_list = *pd;
        d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)];
        rcu_assign_pointer(*pd, d);
        rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d);
        spin_unlock(&domlist_update_lock);
    }

    return d;

 fail:
    d->is_dying = DOMDYING_dead;
    if ( hardware_domain == d )
        hardware_domain = old_hwdom;
    atomic_set(&d->refcnt, DOMAIN_DESTROYED);
    xfree(d->mem_event);
    xfree(d->pbuf);
    if ( init_status & INIT_arch )
        arch_domain_destroy(d);
    if ( init_status & INIT_gnttab )
        grant_table_destroy(d);
    if ( init_status & INIT_evtchn )
    {
        evtchn_destroy(d);
        evtchn_destroy_final(d);
        radix_tree_destroy(&d->pirq_tree, free_pirq_struct);
    }
    if ( init_status & INIT_rangeset )
        rangeset_domain_destroy(d);
    if ( init_status & INIT_watchdog )
        watchdog_domain_destroy(d);
    if ( init_status & INIT_xsm )
        xsm_free_security_domain(d);
    free_cpumask_var(d->domain_dirty_cpumask);
    free_domain_struct(d);
    return ERR_PTR(err);
}
Ejemplo n.º 18
0
int arch_domain_create(struct domain *d, unsigned int domcr_flags,
                       struct xen_arch_domainconfig *config)
{
    int rc;
    uint8_t gic_version;

    d->arch.relmem = RELMEM_not_started;

    /* Idle domains do not need this setup */
    if ( is_idle_domain(d) )
        return 0;

    ASSERT(config != NULL);
    if ( (rc = p2m_init(d)) != 0 )
        goto fail;

    rc = -ENOMEM;
    if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
        goto fail;

    /* Default the virtual ID to match the physical */
    d->arch.vpidr = boot_cpu_data.midr.bits;

    clear_page(d->shared_info);
    share_xen_page_with_guest(
        virt_to_page(d->shared_info), d, XENSHARE_writable);

    if ( (rc = domain_io_init(d)) != 0 )
        goto fail;

    if ( (rc = p2m_alloc_table(d)) != 0 )
        goto fail;

    /*
     * Currently the vGIC is emulating the same version of the
     * hardware GIC. Only the value XEN_DOMCTL_CONFIG_GIC_DEFAULT
     * is allowed. The DOMCTL will return the actual version of the
     * GIC.
     */
    rc = -EOPNOTSUPP;
    if ( config->gic_version != XEN_DOMCTL_CONFIG_GIC_DEFAULT )
        goto fail;

    switch ( gic_hw_version() )
    {
    case GIC_V3:
        gic_version = XEN_DOMCTL_CONFIG_GIC_V3;
        break;
    case GIC_V2:
        gic_version = XEN_DOMCTL_CONFIG_GIC_V2;
        break;
    default:
        BUG();
    }
    config->gic_version = gic_version;

    if ( (rc = gicv_setup(d)) != 0 )
        goto fail;

    if ( (rc = domain_vgic_init(d)) != 0 )
        goto fail;

    if ( (rc = domain_vtimer_init(d)) != 0 )
        goto fail;

    /*
     * The hardware domain will get a PPI later in
     * arch/arm/domain_build.c  depending on the
     * interrupt map of the hardware.
     */
    if ( !is_hardware_domain(d) )
    {
        d->arch.evtchn_irq = GUEST_EVTCHN_PPI;
        /* At this stage vgic_reserve_virq should never fail */
        if ( !vgic_reserve_virq(d, GUEST_EVTCHN_PPI) )
            BUG();
    }

    /*
     * Virtual UART is only used by linux early printk and decompress code.
     * Only use it for the hardware domain because the linux kernel may not
     * support multi-platform.
     */
    if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
        goto fail;

    if ( (rc = iommu_domain_init(d)) != 0 )
        goto fail;

    return 0;

fail:
    d->is_dying = DOMDYING_dead;
    arch_domain_destroy(d);

    return rc;
}
Ejemplo n.º 19
0
Archivo: domain.c Proyecto: Xilinx/xen
int arch_domain_create(struct domain *d, unsigned int domcr_flags,
                       struct xen_arch_domainconfig *config)
{
    int rc, count = 0;

    BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
    d->arch.relmem = RELMEM_not_started;

    /* Idle domains do not need this setup */
    if ( is_idle_domain(d) )
        return 0;

    ASSERT(config != NULL);

    /* p2m_init relies on some value initialized by the IOMMU subsystem */
    if ( (rc = iommu_domain_init(d)) != 0 )
        goto fail;

    if ( (rc = p2m_init(d)) != 0 )
        goto fail;

    rc = -ENOMEM;
    if ( (d->shared_info = alloc_xenheap_pages(0, 0)) == NULL )
        goto fail;

    /* Default the virtual ID to match the physical */
    d->arch.vpidr = boot_cpu_data.midr.bits;

    clear_page(d->shared_info);
    share_xen_page_with_guest(
        virt_to_page(d->shared_info), d, XENSHARE_writable);

    switch ( config->gic_version )
    {
    case XEN_DOMCTL_CONFIG_GIC_NATIVE:
        switch ( gic_hw_version () )
        {
        case GIC_V2:
            config->gic_version = XEN_DOMCTL_CONFIG_GIC_V2;
            d->arch.vgic.version = GIC_V2;
            break;

        case GIC_V3:
            config->gic_version = XEN_DOMCTL_CONFIG_GIC_V3;
            d->arch.vgic.version = GIC_V3;
            break;

        default:
            BUG();
        }
        break;

    case XEN_DOMCTL_CONFIG_GIC_V2:
        d->arch.vgic.version = GIC_V2;
        break;

    case XEN_DOMCTL_CONFIG_GIC_V3:
        d->arch.vgic.version = GIC_V3;
        break;

    default:
        rc = -EOPNOTSUPP;
        goto fail;
    }

    if ( (rc = domain_vgic_register(d, &count)) != 0 )
        goto fail;

    if ( (rc = domain_io_init(d, count + MAX_IO_HANDLER)) != 0 )
        goto fail;

    if ( (rc = domain_vgic_init(d, config->nr_spis)) != 0 )
        goto fail;

    if ( (rc = domain_vtimer_init(d, config)) != 0 )
        goto fail;

    update_domain_wallclock_time(d);

    /*
     * The hardware domain will get a PPI later in
     * arch/arm/domain_build.c  depending on the
     * interrupt map of the hardware.
     */
    if ( !is_hardware_domain(d) )
    {
        d->arch.evtchn_irq = GUEST_EVTCHN_PPI;
        /* At this stage vgic_reserve_virq should never fail */
        if ( !vgic_reserve_virq(d, GUEST_EVTCHN_PPI) )
            BUG();
    }

    /*
     * Virtual UART is only used by linux early printk and decompress code.
     * Only use it for the hardware domain because the linux kernel may not
     * support multi-platform.
     */
    if ( is_hardware_domain(d) && (rc = domain_vuart_init(d)) )
        goto fail;

    return 0;

fail:
    d->is_dying = DOMDYING_dead;
    arch_domain_destroy(d);

    return rc;
}
Ejemplo n.º 20
0
int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p,
                     struct msi_info *msi)
{
    struct domain *d = current->domain;
    int pirq, irq, ret = 0;
    void *map_data = NULL;

    if ( domid == DOMID_SELF && is_hvm_domain(d) )
    {
        /*
         * Only makes sense for vector-based callback, else HVM-IRQ logic
         * calls back into itself and deadlocks on hvm_domain.irq_lock.
         */
        if ( !is_hvm_pv_evtchn_domain(d) )
            return -EINVAL;

        return physdev_hvm_map_pirq(d, type, index, pirq_p);
    }

    d = rcu_lock_domain_by_any_id(domid);
    if ( d == NULL )
        return -ESRCH;

    ret = xsm_map_domain_pirq(XSM_TARGET, d);
    if ( ret )
        goto free_domain;

    /* Verify or get irq. */
    switch ( type )
    {
    case MAP_PIRQ_TYPE_GSI:
        if ( *index < 0 || *index >= nr_irqs_gsi )
        {
            dprintk(XENLOG_G_ERR, "dom%d: map invalid irq %d\n",
                    d->domain_id, *index);
            ret = -EINVAL;
            goto free_domain;
        }

        irq = domain_pirq_to_irq(current->domain, *index);
        if ( irq <= 0 )
        {
            if ( is_hardware_domain(current->domain) )
                irq = *index;
            else {
                dprintk(XENLOG_G_ERR, "dom%d: map pirq with incorrect irq!\n",
                        d->domain_id);
                ret = -EINVAL;
                goto free_domain;
            }
        }
        break;

    case MAP_PIRQ_TYPE_MSI:
        irq = *index;
        if ( irq == -1 )
            irq = create_irq(NUMA_NO_NODE);

        if ( irq < nr_irqs_gsi || irq >= nr_irqs )
        {
            dprintk(XENLOG_G_ERR, "dom%d: can't create irq for msi!\n",
                    d->domain_id);
            ret = -EINVAL;
            goto free_domain;
        }

        msi->irq = irq;
        map_data = msi;
        break;

    default:
        dprintk(XENLOG_G_ERR, "dom%d: wrong map_pirq type %x\n",
                d->domain_id, type);
        ret = -EINVAL;
        goto free_domain;
    }

    spin_lock(&pcidevs_lock);
    /* Verify or get pirq. */
    spin_lock(&d->event_lock);
    pirq = domain_irq_to_pirq(d, irq);
    if ( *pirq_p < 0 )
    {
        if ( pirq )
        {
            dprintk(XENLOG_G_ERR, "dom%d: %d:%d already mapped to %d\n",
                    d->domain_id, *index, *pirq_p, pirq);
            if ( pirq < 0 )
            {
                ret = -EBUSY;
                goto done;
            }
        }
        else
        {
            pirq = get_free_pirq(d, type);
            if ( pirq < 0 )
            {
                dprintk(XENLOG_G_ERR, "dom%d: no free pirq\n", d->domain_id);
                ret = pirq;
                goto done;
            }
        }
    }
    else
    {
        if ( pirq && pirq != *pirq_p )
        {
            dprintk(XENLOG_G_ERR, "dom%d: pirq %d conflicts with irq %d\n",
                    d->domain_id, *index, *pirq_p);
            ret = -EEXIST;
            goto done;
        }
        else
            pirq = *pirq_p;
    }

    ret = map_domain_pirq(d, pirq, irq, type, map_data);
    if ( ret == 0 )
        *pirq_p = pirq;

 done:
    spin_unlock(&d->event_lock);
    spin_unlock(&pcidevs_lock);
    if ( (ret != 0) && (type == MAP_PIRQ_TYPE_MSI) && (*index == -1) )
        destroy_irq(irq);
 free_domain:
    rcu_unlock_domain(d);
    return ret;
}