unsigned long xen_pgd_val(pgd_t pgd) { unsigned long ret = pgd.pgd; if (ret) ret = machine_to_phys(XMADDR(ret)).paddr | 1; return ret; }
unsigned long long xen_pmd_val(pmd_t pmd) { unsigned long long ret = pmd.pmd; if (ret) ret = machine_to_phys(XMADDR(ret)).paddr | 1; return ret; }
xmaddr_t arbitrary_virt_to_machine(unsigned long address) { pte_t *pte = lookup_address(address); unsigned offset = address & PAGE_MASK; BUG_ON(pte == NULL); return XMADDR((pte_mfn(*pte) << PAGE_SHIFT) + offset); }
unsigned long xen_pte_val(pte_t pte) { unsigned long ret = pte.pte_low; if (ret & _PAGE_PRESENT) ret = machine_to_phys(XMADDR(ret)).paddr; return ret; }
unsigned long long xen_pte_val(pte_t pte) { unsigned long long ret = 0; if (pte.pte_low) { ret = ((unsigned long long)pte.pte_high << 32) | pte.pte_low; ret = machine_to_phys(XMADDR(ret)).paddr | 1; } return ret; }
xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; pte_t *pte; unsigned offset; if (virt_addr_valid(vaddr)) return virt_to_machine(vaddr); pte = lookup_address(address, &level); BUG_ON(pte == NULL); offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); }
xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; pte_t *pte; unsigned offset; /* * if the PFN is in the linear mapped vaddr range, we can just use * the (quick) virt_to_machine() p2m lookup */ if (virt_addr_valid(vaddr)) return virt_to_machine(vaddr); /* otherwise we have to do a (slower) full page-table walk */ pte = lookup_address(address, &level); BUG_ON(pte == NULL); offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); }
static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) { return machine_to_phys(XMADDR(baddr)).paddr; }