/* * Unmapping vs dcache management: * * If a guest maps certain memory pages as uncached, all writes will * bypass the data cache and go directly to RAM. However, the CPUs * can still speculate reads (not writes) and fill cache lines with * data. * * Those cache lines will be *clean* cache lines though, so a * clean+invalidate operation is equivalent to an invalidate * operation, because no cache lines are marked dirty. * * Those clean cache lines could be filled prior to an uncached write * by the guest, and the cache coherent IO subsystem would therefore * end up writing old data to disk. * * This is why right after unmapping a page/section and invalidating * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure * the IO subsystem will never hit in the cache. */ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { phys_addr_t start_addr = addr; pte_t *pte, *start_pte; start_pte = pte = pte_offset_kernel(pmd, addr); do { if (!pte_none(*pte)) { pte_t old_pte = *pte; kvm_set_pte(pte, __pte(0)); kvm_tlb_flush_vmid_ipa(kvm, addr); /* No need to invalidate the cache for device mappings */ if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) kvm_flush_dcache_pte(old_pte); put_page(virt_to_page(pte)); } } while (pte++, addr += PAGE_SIZE, addr != end); if (kvm_pte_table_empty(kvm, start_pte)) clear_pmd_entry(kvm, pmd, start_addr); }
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = kvm_pud_addr_end(addr, end); continue; } if (pud_huge(*pud)) { /* * If we are dealing with a huge pud, just clear it and * move on. */ clear_pud_entry(kvm, pud, addr); addr = kvm_pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = kvm_pmd_addr_end(addr, end); continue; } if (!kvm_pmd_huge(*pmd)) { pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); next = addr + PAGE_SIZE; } /* * If the pmd entry is to be cleared, walk back up the ladder */ if (kvm_pmd_huge(*pmd) || page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = kvm_pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = kvm_pud_addr_end(addr, end); } } addr = next; } }
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { phys_addr_t start_addr = addr; pte_t *pte, *start_pte; start_pte = pte = pte_offset_kernel(pmd, addr); do { if (!pte_none(*pte)) { kvm_set_pte(pte, __pte(0)); put_page(virt_to_page(pte)); kvm_tlb_flush_vmid_ipa(kvm, addr); } } while (pte++, addr += PAGE_SIZE, addr != end); if (kvm_pte_table_empty(kvm, start_pte)) clear_pmd_entry(kvm, pmd, start_addr); }
static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { addr = pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { addr = pmd_addr_end(addr, end); continue; } pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ if (page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = pmd_addr_end(addr, end); if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = pud_addr_end(addr, end); } } addr = next; } }