void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return __ioremap_caller(phys_addr, size, pgprot2cachemode(__pgprot(prot_val)), __builtin_return_address(0)); }
/* * Map 'pfn' using protections 'prot' */ void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, translate non-WB request to UC- just in * case the caller set the PWT bit to prot directly without using * pgprot_writecombine(). UC- translates to uncached if the MTRR * is UC or WC. UC- gets the real intention, of the user, which is * "WC if the MTRR is WC, UC if you can't do that." */ if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB) prot = __pgprot(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); }
static int ept_set_pfnmap_epte(struct vmx_vcpu *vcpu, int make_write, unsigned long gpa, unsigned long hva) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; epte_t *epte, flags; unsigned long pfn; int ret; int cache_control; down_read(&mm->mmap_sem); vma = find_vma(mm, hva); if (!vma) { up_read(&mm->mmap_sem); return -EFAULT; } if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { up_read(&mm->mmap_sem); return -EFAULT; } ret = follow_pfn(vma, hva, &pfn); if (ret) { up_read(&mm->mmap_sem); return ret; } if (pgprot2cachemode(vma->vm_page_prot) == _PAGE_CACHE_MODE_WB) cache_control = EPTE_TYPE_WB; else if (pgprot2cachemode(vma->vm_page_prot) == _PAGE_CACHE_MODE_WC) cache_control = EPTE_TYPE_WC; else cache_control = EPTE_TYPE_UC; up_read(&mm->mmap_sem); /* NOTE: pfn's can not be huge pages, which is quite a relief here */ spin_lock(&vcpu->ept_lock); ret = ept_lookup_gpa(vcpu, (void *) gpa, 0, 1, &epte); if (ret) { spin_unlock(&vcpu->ept_lock); printk(KERN_ERR "ept: failed to lookup EPT entry\n"); return ret; } flags = __EPTE_READ | __EPTE_TYPE(cache_control) | __EPTE_IPAT | __EPTE_PFNMAP; if (make_write) flags |= __EPTE_WRITE; if (vcpu->ept_ad_enabled) { /* premark A/D to avoid extra memory references */ flags |= __EPTE_A; if (make_write) flags |= __EPTE_D; } if (epte_present(*epte)) ept_clear_epte(epte); *epte = epte_addr(pfn << PAGE_SHIFT) | flags; spin_unlock(&vcpu->ept_lock); return 0; }