/* get_page() to prevent another vcpu freeing the page. */ static int xencomm_get_page(unsigned long paddr, struct page_info **page) { unsigned long maddr = paddr_to_maddr(paddr); if ( maddr == 0 ) return -EFAULT; *page = maddr_to_page(maddr); if ( get_page(*page, current->domain) == 0 ) { if ( page_get_owner(*page) != current->domain ) { /* * This page might be a page granted by another domain, or * this page is freed with decrease reservation hypercall at * the same time. */ gdprintk(XENLOG_WARNING, "bad page is passed. paddr 0x%lx maddr 0x%lx\n", paddr, maddr); return -EFAULT; } /* Try again. */ cpu_relax(); return -EAGAIN; } return 0; }
static void xenoprof_shared_gmfn_with_guest( struct domain *d, unsigned long maddr, unsigned long gmaddr, int npages) { int i; for ( i = 0; i < npages; i++, maddr += PAGE_SIZE, gmaddr += PAGE_SIZE ) { BUG_ON(page_get_owner(maddr_to_page(maddr)) != d); xenoprof_shared_gmfn(d, gmaddr, maddr); } }
static u64 iommu_l2e_from_pfn(struct page_info *table, int level, unsigned long io_pfn) { unsigned long offset; void *pde = NULL; void *table_vaddr; u64 next_table_maddr = 0; BUG_ON( table == NULL || level == 0 ); while ( level > 1 ) { offset = io_pfn >> ((PTE_PER_TABLE_SHIFT * (level - IOMMU_PAGING_MODE_LEVEL_1))); offset &= ~PTE_PER_TABLE_MASK; table_vaddr = map_domain_page(page_to_mfn(table)); pde = table_vaddr + (offset * IOMMU_PAGE_TABLE_ENTRY_SIZE); next_table_maddr = amd_iommu_get_next_table_from_pte(pde); if ( !amd_iommu_is_pte_present(pde) ) { if ( next_table_maddr == 0 ) { table = alloc_amd_iommu_pgtable(); if ( table == NULL ) return 0; next_table_maddr = page_to_maddr(table); amd_iommu_set_page_directory_entry( (u32 *)pde, next_table_maddr, level - 1); } else /* should never reach here */ return 0; } unmap_domain_page(table_vaddr); table = maddr_to_page(next_table_maddr); level--; } return next_table_maddr; }
void free_pgtable_maddr(u64 maddr) { if ( maddr != 0 ) free_domheap_page(maddr_to_page(maddr)); }