Esempio n. 1
0
int hap_disable_log_dirty(struct domain *d)
{
    hap_lock(d);
    d->arch.paging.mode &= ~PG_log_dirty;
    hap_unlock(d);

    /* set l1e entries of P2M table with normal mode */
    p2m_change_type_global(d, p2m_ram_logdirty, p2m_ram_rw);
    return 0;
}
Esempio n. 2
0
/* hap code to call when log_dirty is enable. return 0 if no problem found. */
int hap_enable_log_dirty(struct domain *d)
{
    /* turn on PG_log_dirty bit in paging mode */
    hap_lock(d);
    d->arch.paging.mode |= PG_log_dirty;
    hap_unlock(d);

    /* set l1e entries of P2M table to be read-only. */
    p2m_change_type_global(d, p2m_ram_rw, p2m_ram_logdirty);
    flush_tlb_mask(d->domain_dirty_cpumask);
    return 0;
}
Esempio n. 3
0
void hap_free_p2m_page(struct domain *d, struct page_info *pg)
{
    hap_lock(d);
    ASSERT(page_get_owner(pg) == d);
    /* Should have just the one ref we gave it in alloc_p2m_page() */
    if ( (pg->count_info & PGC_count_mask) != 1 )
        HAP_ERROR("Odd p2m page count c=%#x t=%"PRtype_info"\n",
                  pg->count_info, pg->u.inuse.type_info);
    pg->count_info = 0;
    /* Free should not decrement domain's total allocation, since
     * these pages were allocated without an owner. */
    page_set_owner(pg, NULL);
    free_domheap_page(pg);
    d->arch.paging.hap.p2m_pages--;
    ASSERT(d->arch.paging.hap.p2m_pages >= 0);
    hap_unlock(d);
}
Esempio n. 4
0
static int hap_disable_vram_tracking(struct domain *d)
{
    int i;
    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;

    if ( !dirty_vram )
        return -EINVAL;

    hap_lock(d);
    d->arch.paging.mode &= ~PG_log_dirty;
    hap_unlock(d);

    /* set l1e entries of P2M table with normal mode */
    for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_logdirty, p2m_ram_rw);

    flush_tlb_mask(&d->domain_dirty_cpumask);
    return 0;
}
Esempio n. 5
0
static int hap_enable_vram_tracking(struct domain *d)
{
    int i;
    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;

    if ( !dirty_vram )
        return -EINVAL;

    /* turn on PG_log_dirty bit in paging mode */
    hap_lock(d);
    d->arch.paging.mode |= PG_log_dirty;
    hap_unlock(d);

    /* set l1e entries of P2M table to be read-only. */
    for (i = dirty_vram->begin_pfn; i < dirty_vram->end_pfn; i++)
        p2m_change_type(p2m_get_hostp2m(d), i, p2m_ram_rw, p2m_ram_logdirty);

    flush_tlb_mask(&d->domain_dirty_cpumask);
    return 0;
}
Esempio n. 6
0
static struct page_info *hap_alloc_p2m_page(struct domain *d)
{
    struct page_info *pg;

    hap_lock(d);
    pg = hap_alloc(d);

#if CONFIG_PAGING_LEVELS == 3
    /* Under PAE mode, top-level P2M table should be allocated below 4GB space
     * because the size of h_cr3 is only 32-bit. We use alloc_domheap_pages to
     * force this requirement, and exchange the guaranteed 32-bit-clean
     * page for the one we just hap_alloc()ed. */
    if ( d->arch.paging.hap.p2m_pages == 0
         && mfn_x(page_to_mfn(pg)) >= (1UL << (32 - PAGE_SHIFT)) )
    {
        free_domheap_page(pg);
        pg = alloc_domheap_pages(NULL, 0, MEMF_bits(32));
        if ( likely(pg != NULL) )
        {
            void *p = hap_map_domain_page(page_to_mfn(pg));
            clear_page(p);
            hap_unmap_domain_page(p);
        }
    }
#endif

    if ( likely(pg != NULL) )
    {
        d->arch.paging.hap.total_pages--;
        d->arch.paging.hap.p2m_pages++;
        page_set_owner(pg, d);
        pg->count_info = 1;
    }

    hap_unlock(d);
    return pg;
}