void compat_show_guest_stack(struct vcpu *v, struct cpu_user_regs *regs, int debug_stack_lines) { unsigned int i, *stack, addr, mask = STACK_SIZE; stack = (unsigned int *)(unsigned long)regs->_esp; printk("Guest stack trace from esp=%08lx:\n ", (unsigned long)stack); if ( !__compat_access_ok(v->domain, stack, sizeof(*stack)) ) { printk("Guest-inaccessible memory.\n"); return; } if ( v != current ) { struct vcpu *vcpu; unsigned long mfn; ASSERT(guest_kernel_mode(v, regs)); mfn = read_cr3() >> PAGE_SHIFT; for_each_vcpu( v->domain, vcpu ) if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn ) break; if ( !vcpu ) { stack = do_page_walk(v, (unsigned long)stack); if ( (unsigned long)stack < PAGE_SIZE ) { printk("Inaccessible guest memory.\n"); return; } mask = PAGE_SIZE; } } for ( i = 0; i < debug_stack_lines * 8; i++ ) { if ( (((long)stack - 1) ^ ((long)(stack + 1) - 1)) & mask ) break; if ( __get_user(addr, stack) ) { if ( i != 0 ) printk("\n "); printk("Fault while accessing guest memory."); i = 1; break; } if ( (i != 0) && ((i % 8) == 0) ) printk("\n "); printk(" %08x", addr); stack++; } if ( i == 0 ) printk("Stack empty."); printk("\n"); }
// Allocate a new p2m table for a domain. // // The structure of the p2m table is that of a pagetable for xen (i.e. it is // controlled by CONFIG_PAGING_LEVELS). // // Returns 0 for success or -errno. // int p2m_alloc_table(struct p2m_domain *p2m) { struct page_info *p2m_top; struct domain *d = p2m->domain; p2m_lock(p2m); if ( !p2m_is_nestedp2m(p2m) && !page_list_empty(&d->page_list) ) { P2M_ERROR("dom %d already has memory allocated\n", d->domain_id); p2m_unlock(p2m); return -EINVAL; } if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 ) { P2M_ERROR("p2m already allocated for this domain\n"); p2m_unlock(p2m); return -EINVAL; } P2M_PRINTK("allocating p2m table\n"); p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table); if ( p2m_top == NULL ) { p2m_unlock(p2m); return -ENOMEM; } p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top)); if ( hap_enabled(d) ) iommu_share_p2m_table(d); P2M_PRINTK("populating p2m table\n"); /* Initialise physmap tables for slot zero. Other code assumes this. */ p2m->defer_nested_flush = 1; if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access) ) goto error; p2m->defer_nested_flush = 0; P2M_PRINTK("p2m table initialised (%u pages)\n", page_count); p2m_unlock(p2m); return 0; spin_unlock(&p2m->domain->page_alloc_lock); error: P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%" PRI_mfn "\n", gfn, mfn_x(mfn)); p2m_unlock(p2m); return -ENOMEM; }
long p2m_pt_audit_p2m(struct p2m_domain *p2m) { unsigned long entry_count = 0, pmbad = 0; unsigned long mfn, gfn, m2pfn; int test_linear; struct domain *d = p2m->domain; ASSERT(p2m_locked_by_me(p2m)); ASSERT(pod_locked_by_me(p2m)); test_linear = ( (d == current->domain) && !pagetable_is_null(current->arch.monitor_table) ); if ( test_linear ) flush_tlb_local(); /* Audit part one: walk the domain's p2m table, checking the entries. */ if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 ) { l2_pgentry_t *l2e; l1_pgentry_t *l1e; int i1, i2; #if CONFIG_PAGING_LEVELS == 4 l4_pgentry_t *l4e; l3_pgentry_t *l3e; int i4, i3; l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #else /* CONFIG_PAGING_LEVELS == 3 */ l3_pgentry_t *l3e; int i3; l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #endif gfn = 0; #if CONFIG_PAGING_LEVELS >= 4 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ ) { if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) ) { gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4])))); #endif for ( i3 = 0; i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); i3++ ) { if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) ) { gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } /* check for 1GB super page */ if ( l3e_get_flags(l3e[i3]) & _PAGE_PSE ) { mfn = l3e_get_pfn(l3e[i3]); ASSERT(mfn_valid(_mfn(mfn))); /* we have to cover 512x512 4K pages */ for ( i2 = 0; i2 < (L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES); i2++) { m2pfn = get_gpfn_from_mfn(mfn+i2); if ( m2pfn != (gfn + i2) ) { pmbad++; P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn+i2, mfn+i2, m2pfn); BUG(); } gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } } l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3])))); for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ ) { if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) ) { if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) && ( p2m_flags_to_type(l2e_get_flags(l2e[i2])) == p2m_populate_on_demand ) ) entry_count+=SUPERPAGE_PAGES; gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } /* check for super page */ if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE ) { mfn = l2e_get_pfn(l2e[i2]); ASSERT(mfn_valid(_mfn(mfn))); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++) { m2pfn = get_gpfn_from_mfn(mfn+i1); /* Allow shared M2Ps */ if ( (m2pfn != (gfn + i1)) && (m2pfn != SHARED_M2P_ENTRY) ) { pmbad++; P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn+i1, mfn+i1, m2pfn); BUG(); } } gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2])))); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ ) { p2m_type_t type; type = p2m_flags_to_type(l1e_get_flags(l1e[i1])); if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) ) { if ( type == p2m_populate_on_demand ) entry_count++; continue; } mfn = l1e_get_pfn(l1e[i1]); ASSERT(mfn_valid(_mfn(mfn))); m2pfn = get_gpfn_from_mfn(mfn); if ( m2pfn != gfn && type != p2m_mmio_direct && !p2m_is_grant(type) && !p2m_is_shared(type) ) { pmbad++; printk("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn, mfn, m2pfn); P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn, mfn, m2pfn); BUG(); } } unmap_domain_page(l1e); } unmap_domain_page(l2e); } #if CONFIG_PAGING_LEVELS >= 4 unmap_domain_page(l3e); } #endif #if CONFIG_PAGING_LEVELS == 4 unmap_domain_page(l4e); #else /* CONFIG_PAGING_LEVELS == 3 */ unmap_domain_page(l3e); #endif } if ( entry_count != p2m->pod.entry_count ) { printk("%s: refcounted entry count %ld, audit count %lu!\n", __func__, p2m->pod.entry_count, entry_count); BUG(); } return pmbad; }
/* Walk the whole p2m table, changing any entries of the old type * to the new type. This is used in hardware-assisted paging to * quickly enable or diable log-dirty tracking */ static void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt) { unsigned long mfn, gfn, flags; l1_pgentry_t l1e_content; l1_pgentry_t *l1e; l2_pgentry_t *l2e; mfn_t l1mfn, l2mfn, l3mfn; unsigned long i1, i2, i3; l3_pgentry_t *l3e; #if CONFIG_PAGING_LEVELS == 4 l4_pgentry_t *l4e; unsigned long i4; #endif /* CONFIG_PAGING_LEVELS == 4 */ BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt)); BUG_ON(ot != nt && (ot == p2m_mmio_direct || nt == p2m_mmio_direct)); if ( !paging_mode_translate(p2m->domain) ) return; if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 ) return; ASSERT(p2m_locked_by_me(p2m)); #if CONFIG_PAGING_LEVELS == 4 l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #else /* CONFIG_PAGING_LEVELS == 3 */ l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #endif #if CONFIG_PAGING_LEVELS >= 4 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ ) { if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) ) { continue; } l3mfn = _mfn(l4e_get_pfn(l4e[i4])); l3e = map_domain_page(l4e_get_pfn(l4e[i4])); #endif for ( i3 = 0; i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); i3++ ) { if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) ) { continue; } if ( (l3e_get_flags(l3e[i3]) & _PAGE_PSE) ) { flags = l3e_get_flags(l3e[i3]); if ( p2m_flags_to_type(flags) != ot ) continue; mfn = l3e_get_pfn(l3e[i3]); gfn = get_gpfn_from_mfn(mfn); flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); p2m->write_p2m_entry(p2m, gfn, (l1_pgentry_t *)&l3e[i3], l3mfn, l1e_content, 3); continue; } l2mfn = _mfn(l3e_get_pfn(l3e[i3])); l2e = map_domain_page(l3e_get_pfn(l3e[i3])); for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ ) { if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) ) { continue; } if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) ) { flags = l2e_get_flags(l2e[i2]); if ( p2m_flags_to_type(flags) != ot ) continue; mfn = l2e_get_pfn(l2e[i2]); /* Do not use get_gpfn_from_mfn because it may return SHARED_M2P_ENTRY */ gfn = (i2 + (i3 #if CONFIG_PAGING_LEVELS >= 4 + (i4 * L3_PAGETABLE_ENTRIES) #endif ) * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); p2m->write_p2m_entry(p2m, gfn, (l1_pgentry_t *)&l2e[i2], l2mfn, l1e_content, 2); continue; } l1mfn = _mfn(l2e_get_pfn(l2e[i2])); l1e = map_domain_page(mfn_x(l1mfn)); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ ) { flags = l1e_get_flags(l1e[i1]); if ( p2m_flags_to_type(flags) != ot ) continue; mfn = l1e_get_pfn(l1e[i1]); gfn = i1 + (i2 + (i3 #if CONFIG_PAGING_LEVELS >= 4 + (i4 * L3_PAGETABLE_ENTRIES) #endif ) * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; /* create a new 1le entry with the new type */ flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = p2m_l1e_from_pfn(mfn, flags); p2m->write_p2m_entry(p2m, gfn, &l1e[i1], l1mfn, l1e_content, 1); } unmap_domain_page(l1e); } unmap_domain_page(l2e); } #if CONFIG_PAGING_LEVELS >= 4 unmap_domain_page(l3e); } #endif #if CONFIG_PAGING_LEVELS == 4 unmap_domain_page(l4e); #else /* CONFIG_PAGING_LEVELS == 3 */ unmap_domain_page(l3e); #endif }