Esempio n. 1
0
File: save.c Progetto: amodj/Utopia
int arch_hvm_load(struct domain *d, struct hvm_save_header *hdr)
{
    uint32_t eax, ebx, ecx, edx;

    if ( hdr->magic != HVM_FILE_MAGIC )
    {
        gdprintk(XENLOG_ERR, 
                 "HVM restore: bad magic number %#"PRIx32"\n", hdr->magic);
        return -1;
    }

    if ( hdr->version != HVM_FILE_VERSION )
    {
        gdprintk(XENLOG_ERR, 
                 "HVM restore: unsupported version %u\n", hdr->version);
        return -1;
    }

    cpuid(1, &eax, &ebx, &ecx, &edx);
    /* TODO: need to define how big a difference is acceptable? */
    if ( hdr->cpuid != eax )
        gdprintk(XENLOG_WARNING, "HVM restore: saved CPUID (%#"PRIx32") "
               "does not match host (%#"PRIx32").\n", hdr->cpuid, eax);

    /* VGA state is not saved/restored, so we nobble the cache. */
    d->arch.hvm_domain.stdvga.cache = 0;

    return 0;
}
Esempio n. 2
0
int guest_remove_page(struct domain *d, unsigned long gmfn)
{
    struct page_info *page;
    unsigned long mfn;

    mfn = gmfn_to_mfn(d, gmfn);
    if ( unlikely(!mfn_valid(mfn)) )
    {
        gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
                d->domain_id, gmfn);
        return 0;
    }
            
    page = mfn_to_page(mfn);
    if ( unlikely(!get_page(page, d)) )
    {
        gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
        return 0;
    }

    if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
        put_page_and_type(page);
            
    if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
        put_page(page);

    guest_physmap_remove_page(d, gmfn, mfn, 0);

    put_page(page);

    return 1;
}
Esempio n. 3
0
int vmsi_deliver(struct domain *d, int pirq)
{
    struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
    uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
    int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
    uint8_t dest = (uint8_t)flags;
    uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
    uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >> GLFAGS_SHIFT_DELIV_MODE;
    uint8_t trig_mode = (flags & VMSI_TRIG_MODE) >> GLFAGS_SHIFT_TRG_MODE;
    struct vlapic *target;
    struct vcpu *v;

    HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
                "msi: dest=%x dest_mode=%x delivery_mode=%x "
                "vector=%x trig_mode=%x\n",
                dest, dest_mode, delivery_mode, vector, trig_mode);

    if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
    {
        gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
        return 0;
    }

    switch ( delivery_mode )
    {
    case dest_LowestPrio:
    {
        target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
        if ( target != NULL )
            vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
        else
            HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
                        "vector=%x delivery_mode=%x\n",
                        vector, dest_LowestPrio);
        break;
    }

    case dest_Fixed:
    case dest_ExtINT:
    {
        for_each_vcpu ( d, v )
            if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
                                   0, dest, dest_mode) )
                vmsi_inj_irq(d, vcpu_vlapic(v),
                             vector, trig_mode, delivery_mode);
        break;
    }

    case dest_SMI:
    case dest_NMI:
    case dest_INIT:
    case dest__reserved_2:
    default:
        gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
                 delivery_mode);
        break;
    }
    return 1;
}
Esempio n. 4
0
File: realmode.c Progetto: CPFL/xen
static void realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
{
    struct vcpu *curr = current;
    int rc;

    perfc_incr(realmode_emulations);

    rc = hvm_emulate_one(hvmemul_ctxt);

    if ( rc == X86EMUL_UNHANDLEABLE )
    {
        gdprintk(XENLOG_ERR, "Failed to emulate insn.\n");
        goto fail;
    }

    if ( rc == X86EMUL_EXCEPTION )
    {
        if ( !hvmemul_ctxt->exn_pending )
        {
            unsigned long intr_info;

            __vmread(VM_ENTRY_INTR_INFO, &intr_info);
            __vmwrite(VM_ENTRY_INTR_INFO, 0);
            if ( !(intr_info & INTR_INFO_VALID_MASK) )
            {
                gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
                goto fail;
            }
            hvmemul_ctxt->trap.vector = (uint8_t)intr_info;
            hvmemul_ctxt->trap.insn_len = 0;
        }

        if ( unlikely(curr->domain->debugger_attached) &&
             ((hvmemul_ctxt->trap.vector == TRAP_debug) ||
              (hvmemul_ctxt->trap.vector == TRAP_int3)) )
        {
            domain_pause_for_debugger();
        }
        else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
        {
            gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
                     hvmemul_ctxt->trap.vector);
            goto fail;
        }
        else
        {
            realmode_deliver_exception(
                hvmemul_ctxt->trap.vector,
                hvmemul_ctxt->trap.insn_len,
                hvmemul_ctxt);
        }
    }

    return;

 fail:
    hvm_dump_emulation_state(XENLOG_G_ERR "Real-mode", hvmemul_ctxt);
    domain_crash(curr->domain);
}
Esempio n. 5
0
static void dump_apic_assist(const struct vcpu *v)
{
    gdprintk(XENLOG_INFO, "APIC_ASSIST[%d]:\n", v->vcpu_id);
    gdprintk(XENLOG_INFO, "\tenabled: %x\n",
            v->arch.hvm_vcpu.viridian.apic_assist.fields.enabled);
    gdprintk(XENLOG_INFO, "\tpfn: %lx\n",
            (unsigned long)v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn);
}
Esempio n. 6
0
static void dump_hypercall(const struct domain *d)
{
    gdprintk(XENLOG_INFO, "HYPERCALL:\n");
    gdprintk(XENLOG_INFO, "\tenabled: %x\n",
            d->arch.hvm_domain.viridian.hypercall_gpa.fields.enabled);
    gdprintk(XENLOG_INFO, "\tpfn: %lx\n",
            (unsigned long)d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn);
}
Esempio n. 7
0
int guest_remove_page(struct domain *d, unsigned long gmfn)
{
    struct page_info *page;
#ifdef CONFIG_X86
    p2m_type_t p2mt;
#endif
    unsigned long mfn;

#ifdef CONFIG_X86
    mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(d), gmfn, &p2mt)); 
    if ( unlikely(p2m_is_paging(p2mt)) )
    {
        guest_physmap_remove_page(d, gmfn, mfn, 0);
        p2m_mem_paging_drop_page(p2m_get_hostp2m(d), gmfn);
        return 1;
    }
#else
    mfn = gmfn_to_mfn(d, gmfn);
#endif
    if ( unlikely(!mfn_valid(mfn)) )
    {
        gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
                d->domain_id, gmfn);
        return 0;
    }
            
    page = mfn_to_page(mfn);
#ifdef CONFIG_X86
    /* If gmfn is shared, just drop the guest reference (which may or may not
     * free the page) */
    if(p2m_is_shared(p2mt))
    {
        put_page_and_type(page);
        guest_physmap_remove_page(d, gmfn, mfn, 0);
        return 1;
    }

#endif /* CONFIG_X86 */
    if ( unlikely(!get_page(page, d)) )
    {
        gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id);
        return 0;
    }

    if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
        put_page_and_type(page);
            
    if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
        put_page(page);

    guest_physmap_remove_page(d, gmfn, mfn, 0);

    put_page(page);

    return 1;
}
Esempio n. 8
0
int hvm_save(struct domain *d, hvm_domain_context_t *h)
{
    char *c;
    struct hvm_save_header hdr;
    struct hvm_save_end end;
    hvm_save_handler handler;
    uint16_t i;

    hdr.magic = HVM_FILE_MAGIC;
    hdr.version = HVM_FILE_VERSION;

    /* Save xen changeset */
    c = strrchr(xen_changeset(), ':');
    if ( c )
        hdr.changeset = simple_strtoll(c, NULL, 16);
    else 
        hdr.changeset = -1ULL; /* Unknown */

    arch_hvm_save(d, &hdr);

    if ( hvm_save_entry(HEADER, 0, h, &hdr) != 0 )
    {
        gdprintk(XENLOG_ERR, "HVM save: failed to write header\n");
        return -EFAULT;
    } 

    /* Save all available kinds of state */
    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
    {
        handler = hvm_sr_handlers[i].save;
        if ( handler != NULL ) 
        {
            gdprintk(XENLOG_INFO, "HVM save: %s\n",  hvm_sr_handlers[i].name);
            if ( handler(d, h) != 0 ) 
            {
                gdprintk(XENLOG_ERR, 
                         "HVM save: failed to save type %"PRIu16"\n", i);
                return -EFAULT;
            } 
        }
    }

    /* Save an end-of-file marker */
    if ( hvm_save_entry(END, 0, h, &end) != 0 )
    {
        /* Run out of data */
        gdprintk(XENLOG_ERR, "HVM save: no room for end marker.\n");
        return -EFAULT;
    }

    /* Save macros should not have let us overrun */
    ASSERT(h->cur <= h->size);
    return 0;
}
int mem_event_check_ring(struct domain *d)
{
    struct vcpu *curr = current;
    int free_requests;
    int ring_full;

    if ( !d->mem_event.ring_page )
        return -1;

    mem_event_ring_lock(d);

    free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
    if ( unlikely(free_requests < 2) )
    {
        gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
        WARN_ON(free_requests == 0);
    }
    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;

    if ( (curr->domain->domain_id == d->domain_id) && ring_full )
    {
        set_bit(_VPF_mem_event, &curr->pause_flags);
        vcpu_sleep_nosync(curr);
    }

    mem_event_ring_unlock(d);

    return ring_full;
}
Esempio n. 10
0
static void initialize_apic_assist(struct vcpu *v)
{
    struct domain *d = v->domain;
    unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.fields.pfn;
    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
    uint8_t *p;

    /*
     * We don't yet make use of the APIC assist page but by setting
     * the CPUID3A_MSR_APIC_ACCESS bit in CPUID leaf 40000003 we are duty
     * bound to support the MSR. We therefore do just enough to keep windows
     * happy.
     *
     * See http://msdn.microsoft.com/en-us/library/ff538657%28VS.85%29.aspx for
     * details of how Windows uses the page.
     */

    if ( !page || !get_page_type(page, PGT_writable_page) )
    {
        if ( page )
            put_page(page);
        gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn,
                 page_to_mfn(page));
        return;
    }

    p = __map_domain_page(page);

    *(u32 *)p = 0;

    unmap_domain_page(p);

    put_page_and_type(page);
}
Esempio n. 11
0
File: viridian.c Progetto: djs55/xen
static void enable_hypercall_page(struct domain *d)
{
    unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
    uint8_t *p;

    if ( !page || !get_page_type(page, PGT_writable_page) )
    {
        if ( page )
            put_page(page);
        gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n",
                 gmfn, page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
        return;
    }

    p = __map_domain_page(page);

    /*
     * We set the bit 31 in %eax (reserved field in the Viridian hypercall
     * calling convention) to differentiate Xen and Viridian hypercalls.
     */
    *(u8  *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */
    *(u32 *)(p + 1) = 0x80000000;
    *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
    *(u8  *)(p + 6) = 0x01;
    *(u8  *)(p + 7) = (cpu_has_vmx ? 0xc1 : 0xd9);
    *(u8  *)(p + 8) = 0xc3; /* ret */
    memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */

    unmap_domain_page(p);

    put_page_and_type(page);
}
Esempio n. 12
0
File: rtc.c Progetto: 0day-ci/xen
static int handle_rtc_io(
    int dir, unsigned int port, unsigned int bytes, uint32_t *val)
{
    struct RTCState *vrtc = vcpu_vrtc(current);

    if ( bytes != 1 )
    {
        gdprintk(XENLOG_WARNING, "HVM_RTC bad access\n");
        *val = ~0;
        return X86EMUL_OKAY;
    }
    
    if ( dir == IOREQ_WRITE )
    {
        if ( rtc_ioport_write(vrtc, port, (uint8_t)*val) )
            return X86EMUL_OKAY;
    }
    else if ( vrtc->hw.cmos_index < RTC_CMOS_SIZE )
    {
        *val = rtc_ioport_read(vrtc, port);
        return X86EMUL_OKAY;
    }

    return X86EMUL_UNHANDLEABLE;
}
Esempio n. 13
0
static int
fpswa_get_domain_addr(struct vcpu *v, unsigned long gpaddr, size_t size,
		      void **virt, struct page_info **page, const char *name)
{
	int cross_page_boundary;

	if (gpaddr == 0) {
		*virt = 0;
		return 0;
	}

	cross_page_boundary = (((gpaddr & ~PAGE_MASK) + size) > PAGE_SIZE);
	if (unlikely(cross_page_boundary)) {
		/* this case isn't implemented */
		gdprintk(XENLOG_ERR,
			 "%s: fpswa hypercall is called with "
			 "page crossing argument %s 0x%lx\n",
			 __func__, name, gpaddr);
		return -ENOSYS;
	}

again:
        *virt = domain_mpa_to_imva(v->domain, gpaddr);
        *page = virt_to_page(*virt);
        if (get_page(*page, current->domain) == 0) {
                if (page_get_owner(*page) != current->domain) {
			*page = NULL;
			return -EFAULT;
		}
                goto again;
        }

	return 0;
}
Esempio n. 14
0
int
set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
{
    struct p2m_domain *p2m = p2m_get_hostp2m(d);
    int rc = 0;
    p2m_access_t a;
    p2m_type_t ot;
    mfn_t omfn;
    unsigned long pg_type;

    if ( !paging_mode_translate(p2m->domain) )
        return 0;

    gfn_lock(p2m, gfn, 0);
    omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
    /* At the moment we only allow p2m change if gfn has already been made
     * sharable first */
    ASSERT(p2m_is_shared(ot));
    ASSERT(mfn_valid(omfn));
    /* Set the m2p entry to invalid only if there are no further type
     * refs to this page as shared */
    pg_type = read_atomic(&(mfn_to_page(omfn)->u.inuse.type_info));
    if ( (pg_type & PGT_count_mask) == 0
         || (pg_type & PGT_type_mask) != PGT_shared_page )
        set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);

    P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared, p2m->default_access);
    gfn_unlock(p2m, gfn, 0);
    if ( 0 == rc )
        gdprintk(XENLOG_ERR,
            "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
            mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
    return rc;
}
Esempio n. 15
0
int
clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
{
    int rc = 0;
    mfn_t mfn;
    p2m_access_t a;
    p2m_type_t t;
    struct p2m_domain *p2m = p2m_get_hostp2m(d);

    if ( !paging_mode_translate(d) )
        return 0;

    gfn_lock(p2m, gfn, 0);
    mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL);

    /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */
    if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) )
    {
        gdprintk(XENLOG_ERR,
            "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
        goto out;
    }
    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access);

out:
    gfn_unlock(p2m, gfn, 0);

    return rc;
}
Esempio n. 16
0
int
set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
{
    int rc = 0;
    p2m_access_t a;
    p2m_type_t ot;
    mfn_t omfn;
    struct p2m_domain *p2m = p2m_get_hostp2m(d);

    if ( !paging_mode_translate(d) )
        return 0;

    gfn_lock(p2m, gfn, 0);
    omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL);
    if ( p2m_is_grant(ot) )
    {
        p2m_unlock(p2m);
        domain_crash(d);
        return 0;
    }
    else if ( p2m_is_ram(ot) )
    {
        ASSERT(mfn_valid(omfn));
        set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
    }

    P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access);
    gfn_unlock(p2m, gfn, 0);
    if ( 0 == rc )
        gdprintk(XENLOG_ERR,
            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
            mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
    return rc;
}
Esempio n. 17
0
static void enable_hypercall_page(void)
{
    struct domain *d = current->domain;
    unsigned long gmfn = d->arch.hvm_domain.viridian.hypercall_gpa.fields.pfn;
    unsigned long mfn = gmfn_to_mfn(d, gmfn);
    uint8_t *p;

    if ( !mfn_valid(mfn) ||
         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )
    {
        gdprintk(XENLOG_WARNING, "Bad GMFN %lx (MFN %lx)\n", gmfn, mfn);
        return;
    }

    p = map_domain_page(mfn);

    /*
     * We set the bit 31 in %eax (reserved field in the Viridian hypercall
     * calling convention) to differentiate Xen and Viridian hypercalls.
     */
    *(u8  *)(p + 0) = 0x0d; /* orl $0x80000000, %eax */
    *(u32 *)(p + 1) = 0x80000000;
    *(u8  *)(p + 5) = 0x0f; /* vmcall/vmmcall */
    *(u8  *)(p + 6) = 0x01;
    *(u8  *)(p + 7) = ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
                       ? 0xc1 : 0xd9);
    *(u8  *)(p + 8) = 0xc3; /* ret */
    memset(p + 9, 0xcc, PAGE_SIZE - 9); /* int3, int3, ... */

    unmap_domain_page(p);

    put_page_and_type(mfn_to_page(mfn));
}
Esempio n. 18
0
/* get_page() to prevent another vcpu freeing the page. */
static int
xencomm_get_page(unsigned long paddr, struct page_info **page)
{
    unsigned long maddr = paddr_to_maddr(paddr);
    if ( maddr == 0 )
        return -EFAULT;
        
    *page = maddr_to_page(maddr);
    if ( get_page(*page, current->domain) == 0 )
    {
        if ( page_get_owner(*page) != current->domain )
        {
            /*
             * This page might be a page granted by another domain, or
             * this page is freed with decrease reservation hypercall at
             * the same time.
             */
            gdprintk(XENLOG_WARNING,
                     "bad page is passed. paddr 0x%lx maddr 0x%lx\n",
                     paddr, maddr);
            return -EFAULT;
        }

        /* Try again. */
        cpu_relax();
        return -EAGAIN;
    }

    return 0;
}
Esempio n. 19
0
unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
    struct vcpu *v, unsigned long gva, uint32_t *pfec)
{
    gdprintk(XENLOG_ERR,
             "Guest paging level is greater than host paging level!\n");
    domain_crash(v->domain);
    return INVALID_GFN;
}
Esempio n. 20
0
static void populate_physmap(struct memop_args *a)
{
    struct page_info *page;
    unsigned long i, j;
    xen_pfn_t gpfn, mfn;
    struct domain *d = a->domain;

    if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                     a->nr_extents-1) )
        return;

    if ( (a->extent_order != 0) &&
         !multipage_allocation_permitted(current->domain) )
        return;

    for ( i = a->nr_done; i < a->nr_extents; i++ )
    {
        if ( hypercall_preempt_check() )
        {
            a->preempted = 1;
            goto out;
        }

        if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) )
            goto out;

        page = alloc_domheap_pages(d, a->extent_order, a->memflags);
        if ( unlikely(page == NULL) ) 
        {
            gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
                     "id=%d memflags=%x (%ld of %d)\n",
                     a->extent_order, d->domain_id, a->memflags,
                     i, a->nr_extents);
            goto out;
        }

        mfn = page_to_mfn(page);
        guest_physmap_add_page(d, gpfn, mfn, a->extent_order);

        if ( !paging_mode_translate(d) )
        {
            for ( j = 0; j < (1 << a->extent_order); j++ )
                set_gpfn_from_mfn(mfn + j, gpfn + j);

            /* Inform the domain of the new page's machine address. */ 
            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                goto out;
        }
    }

 out:
    a->nr_done = i;
}
Esempio n. 21
0
File: viridian.c Progetto: djs55/xen
static void initialize_apic_assist(struct vcpu *v)
{
    struct domain *d = v->domain;
    unsigned long gmfn = v->arch.hvm_vcpu.viridian.apic_assist.msr.fields.pfn;
    struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
    void *va;

    /*
     * See section 13.3.4.1 of the specification for details of this
     * enlightenment.
     */

    if ( !page )
        goto fail;

    if ( !get_page_type(page, PGT_writable_page) )
    {
        put_page(page);
        goto fail;
    }

    va = __map_domain_page_global(page);
    if ( !va )
    {
        put_page_and_type(page);
        goto fail;
    }

    *(uint32_t *)va = 0;

    if ( viridian_feature_mask(v->domain) & HVMPV_apic_assist )
    {
        /*
         * If we overwrite an existing address here then something has
         * gone wrong and a domain page will leak. Instead crash the
         * domain to make the problem obvious.
         */
        if ( v->arch.hvm_vcpu.viridian.apic_assist.va )
            domain_crash(d);

        v->arch.hvm_vcpu.viridian.apic_assist.va = va;
        return;
    }

    unmap_domain_page_global(va);
    put_page_and_type(page);
    return;

 fail:
    gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", gmfn,
             page ? page_to_mfn(page) : mfn_x(INVALID_MFN));
}
Esempio n. 22
0
static inline int
get_maptrack_handle(
    struct grant_table *lgt)
{
    int                   i;
    grant_handle_t        handle;
    struct grant_mapping *new_mt;
    unsigned int          new_mt_limit, nr_frames;

    if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
    {
        spin_lock(&lgt->lock);

        if ( unlikely((handle = __get_maptrack_handle(lgt)) == -1) )
        {
            nr_frames = nr_maptrack_frames(lgt);
            if ( nr_frames >= max_nr_maptrack_frames() )
            {
                spin_unlock(&lgt->lock);
                return -1;
            }

            new_mt = alloc_xenheap_page();
            if ( new_mt == NULL )
            {
                spin_unlock(&lgt->lock);
                return -1;
            }

            clear_page(new_mt);

            new_mt_limit = lgt->maptrack_limit + MAPTRACK_PER_PAGE;

            for ( i = lgt->maptrack_limit; i < new_mt_limit; i++ )
            {
                new_mt[i % MAPTRACK_PER_PAGE].ref = i+1;
                new_mt[i % MAPTRACK_PER_PAGE].flags = 0;
            }

            lgt->maptrack[nr_frames] = new_mt;
            lgt->maptrack_limit      = new_mt_limit;

            gdprintk(XENLOG_INFO,
                     "Increased maptrack size to %u frames.\n", nr_frames + 1);
            handle = __get_maptrack_handle(lgt);
        }

        spin_unlock(&lgt->lock);
    }
    return handle;
}
Esempio n. 23
0
/*
 * This must be preceded by a call to claim_slot(), and is guaranteed to
 * succeed.  As a side-effect however, the vCPU may be paused if the ring is
 * overly full and its continued execution would cause stalling and excessive
 * waiting.  The vCPU will be automatically unpaused when the ring clears.
 */
void vm_event_put_request(struct domain *d,
                          struct vm_event_domain *ved,
                          vm_event_request_t *req)
{
    vm_event_front_ring_t *front_ring;
    int free_req;
    unsigned int avail_req;
    RING_IDX req_prod;

    if ( current->domain != d )
    {
        req->flags |= VM_EVENT_FLAG_FOREIGN;
#ifndef NDEBUG
        if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
            gdprintk(XENLOG_G_WARNING, "d%dv%d was not paused.\n",
                     d->domain_id, req->vcpu_id);
#endif
    }

    req->version = VM_EVENT_INTERFACE_VERSION;

    vm_event_ring_lock(ved);

    /* Due to the reservations, this step must succeed. */
    front_ring = &ved->front_ring;
    free_req = RING_FREE_REQUESTS(front_ring);
    ASSERT(free_req > 0);

    /* Copy request */
    req_prod = front_ring->req_prod_pvt;
    memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
    req_prod++;

    /* Update ring */
    front_ring->req_prod_pvt = req_prod;
    RING_PUSH_REQUESTS(front_ring);

    /* We've actually *used* our reservation, so release the slot. */
    vm_event_release_slot(d, ved);

    /* Give this vCPU a black eye if necessary, on the way out.
     * See the comments above wake_blocked() for more information
     * on how this mechanism works to avoid waiting. */
    avail_req = vm_event_ring_available(ved);
    if( current->domain == d && avail_req < d->max_vcpus )
        vm_event_mark_and_pause(current, ved);

    vm_event_ring_unlock(ved);

    notify_via_xen_event_channel(d, ved->xen_port);
}
Esempio n. 24
0
File: pmtimer.c Progetto: CPFL/xen
/* Handle port I/O to the PM1a_STS and PM1a_EN registers */
static int handle_evt_io(
    int dir, uint32_t port, uint32_t bytes, uint32_t *val)
{
    struct vcpu *v = current;
    PMTState *s = &v->domain->arch.hvm_domain.pl_time.vpmt;
    uint32_t addr, data, byte;
    int i;

    addr = port -
        ((v->domain->arch.hvm_domain.params[
            HVM_PARAM_ACPI_IOPORTS_LOCATION] == 0) ?
         PM1a_STS_ADDR_V0 : PM1a_STS_ADDR_V1);

    spin_lock(&s->lock);

    if ( dir == IOREQ_WRITE )
    {
        /* Handle this I/O one byte at a time */
        for ( i = bytes, data = *val;
              i > 0;
              i--, addr++, data >>= 8 )
        {
            byte = data & 0xff;
            switch ( addr )
            {
                /* PM1a_STS register bits are write-to-clear */
            case 0 /* PM1a_STS_ADDR */:
                s->pm.pm1a_sts &= ~byte;
                break;
            case 1 /* PM1a_STS_ADDR + 1 */:
                s->pm.pm1a_sts &= ~(byte << 8);
                break;
            case 2 /* PM1a_EN_ADDR */:
                s->pm.pm1a_en = (s->pm.pm1a_en & 0xff00) | byte;
                break;
            case 3 /* PM1a_EN_ADDR + 1 */:
                s->pm.pm1a_en = (s->pm.pm1a_en & 0xff) | (byte << 8);
                break;
            default:
                gdprintk(XENLOG_WARNING, 
                         "Bad ACPI PM register write: %x bytes (%x) at %x\n", 
                         bytes, *val, port);
            }
        }
        /* Fix up the SCI state to match the new register state */
        pmt_update_sci(s);
    }
    else /* p->dir == IOREQ_READ */
    {
Esempio n. 25
0
static struct vmcs_struct *vmx_alloc_vmcs(void)
{
    struct vmcs_struct *vmcs;

    if ( (vmcs = alloc_xenheap_page()) == NULL )
    {
        gdprintk(XENLOG_WARNING, "Failed to allocate VMCS.\n");
        return NULL;
    }

    clear_page(vmcs);
    vmcs->vmcs_revision_id = vmcs_revision_id;

    return vmcs;
}
Esempio n. 26
0
static void increase_reservation(struct memop_args *a)
{
    struct page_info *page;
    unsigned long i;
    xen_pfn_t mfn;
    struct domain *d = a->domain;

    if ( !guest_handle_is_null(a->extent_list) &&
         !guest_handle_subrange_okay(a->extent_list, a->nr_done,
                                     a->nr_extents-1) )
        return;

    if ( !multipage_allocation_permitted(current->domain, a->extent_order) )
        return;

    mcd_mem_inc_trap(a->domain, (a->nr_extents - a->nr_done));

    for ( i = a->nr_done; i < a->nr_extents; i++ )
    {
        if ( hypercall_preempt_check() )
        {
            a->preempted = 1;
            goto out;
        }

        page = alloc_domheap_pages(d, a->extent_order, a->memflags);
        if ( unlikely(page == NULL) ) 
        {
            gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: "
                    "id=%d memflags=%x (%ld of %d)\n",
                     a->extent_order, d->domain_id, a->memflags,
                     i, a->nr_extents);
            goto out;
        }

        /* Inform the domain of the new page's machine address. */ 
        if ( !guest_handle_is_null(a->extent_list) )
        {
            mfn = page_to_mfn(page);
            if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) )
                goto out;
        }
    }

 out:
    a->nr_done = i;
    mcd_mem_upt_trap(d);
}
Esempio n. 27
0
/*
 * Dom0 issues this hypercall in place of writing pm1a_cnt. Xen then
 * takes over the control and put the system into sleep state really.
 */
int acpi_enter_sleep(struct xenpf_enter_acpi_sleep *sleep)
{
    if ( sleep->flags & XENPF_ACPI_SLEEP_EXTENDED )
    {
        if ( !acpi_sinfo.sleep_control.address ||
             !acpi_sinfo.sleep_status.address )
            return -EPERM;

        if ( sleep->flags & ~XENPF_ACPI_SLEEP_EXTENDED )
            return -EINVAL;

        if ( sleep->val_a > ACPI_SLEEP_TYPE_MAX ||
             (sleep->val_b != ACPI_SLEEP_TYPE_INVALID &&
              sleep->val_b > ACPI_SLEEP_TYPE_MAX) )
            return -ERANGE;

        acpi_sinfo.sleep_type_a = sleep->val_a;
        acpi_sinfo.sleep_type_b = sleep->val_b;

        acpi_sinfo.sleep_extended = 1;
    }

    else if ( !acpi_sinfo.pm1a_cnt_blk.address )
        return -EPERM;

    /* Sanity check */
    else if ( sleep->val_b &&
              ((sleep->val_a ^ sleep->val_b) & ACPI_BITMASK_SLEEP_ENABLE) )
    {
        gdprintk(XENLOG_ERR, "Mismatched pm1a/pm1b setting.");
        return -EINVAL;
    }

    else if ( sleep->flags )
        return -EINVAL;

    else
    {
        acpi_sinfo.pm1a_cnt_val = sleep->val_a;
        acpi_sinfo.pm1b_cnt_val = sleep->val_b;
        acpi_sinfo.sleep_extended = 0;
    }

    acpi_sinfo.sleep_state = sleep->sleep_state;

    return continue_hypercall_on_cpu(0, enter_state_helper, &acpi_sinfo);
}
Esempio n. 28
0
static inline int hpet_check_access_length(
    unsigned long addr, unsigned long len)
{
    if ( (addr & (len - 1)) || (len > 8) )
    {
        /*
         * According to ICH9 specification, unaligned accesses may result
         * in unexpected behaviour or master abort, but should not crash/hang.
         * Hence we read all-ones, drop writes, and log a warning.
         */
        gdprintk(XENLOG_WARNING, "HPET: access across register boundary: "
                 "%lx %lx\n", addr, len);
        return -EINVAL;
    }

    return 0;
}
Esempio n. 29
0
static int amd_vpmu_initialise(struct vcpu *v)
{
    struct amd_vpmu_context *ctxt;
    struct vpmu_struct *vpmu = vcpu_vpmu(v);
    uint8_t family = current_cpu_data.x86;

    if ( vpmu_is_set(vpmu, VPMU_CONTEXT_ALLOCATED) )
        return 0;

    if ( counters == NULL )
    {
         switch ( family )
	 {
	 case 0x15:
	     num_counters = F15H_NUM_COUNTERS;
	     counters = AMD_F15H_COUNTERS;
	     ctrls = AMD_F15H_CTRLS;
	     k7_counters_mirrored = 1;
	     break;
	 case 0x10:
	 case 0x12:
	 case 0x14:
	 case 0x16:
	 default:
	     num_counters = F10H_NUM_COUNTERS;
	     counters = AMD_F10H_COUNTERS;
	     ctrls = AMD_F10H_CTRLS;
	     k7_counters_mirrored = 0;
	     break;
	 }
    }

    ctxt = xzalloc(struct amd_vpmu_context);
    if ( !ctxt )
    {
        gdprintk(XENLOG_WARNING, "Insufficient memory for PMU, "
            " PMU feature is unavailable on domain %d vcpu %d.\n",
            v->vcpu_id, v->domain->domain_id);
        return -ENOMEM;
    }

    vpmu->context = ctxt;
    vpmu_set(vpmu, VPMU_CONTEXT_ALLOCATED);
    return 0;
}
Esempio n. 30
0
static void dump_guest_os_id(const struct domain *d)
{
    gdprintk(XENLOG_INFO, "GUEST_OS_ID:\n");
    gdprintk(XENLOG_INFO, "\tvendor: %x\n",
            d->arch.hvm_domain.viridian.guest_os_id.fields.vendor);
    gdprintk(XENLOG_INFO, "\tos: %x\n",
            d->arch.hvm_domain.viridian.guest_os_id.fields.os);
    gdprintk(XENLOG_INFO, "\tmajor: %x\n",
            d->arch.hvm_domain.viridian.guest_os_id.fields.major);
    gdprintk(XENLOG_INFO, "\tminor: %x\n",
            d->arch.hvm_domain.viridian.guest_os_id.fields.minor);
    gdprintk(XENLOG_INFO, "\tsp: %x\n",
            d->arch.hvm_domain.viridian.guest_os_id.fields.service_pack);
    gdprintk(XENLOG_INFO, "\tbuild: %x\n",
            d->arch.hvm_domain.viridian.guest_os_id.fields.build_number);
}