static inline pgprot_t ktext_set_nocache(pgprot_t prot) { if (!ktext_nocache) prot = hv_pte_set_nc(prot); else prot = hv_pte_set_no_alloc_l2(prot); return prot; }
static inline pgprot_t ktext_set_nocache(pgprot_t prot) { if (!ktext_nocache) prot = hv_pte_set_nc(prot); #if CHIP_HAS_NC_AND_NOALLOC_BITS() else prot = hv_pte_set_no_alloc_l2(prot); #endif return prot; }
/* Update the home of a PTE if necessary (can also be used for a pgprot_t). */ pte_t pte_set_home(pte_t pte, int home) { /* Check for non-linear file mapping "PTEs" and pass them through. */ if (pte_file(pte)) return pte; #if CHIP_HAS_MMIO() /* Check for MMIO mappings and pass them through. */ if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO) return pte; #endif /* * Only immutable pages get NC mappings. If we have a * non-coherent PTE, but the underlying page is not * immutable, it's likely the result of a forced * caching setting running up against ptrace setting * the page to be writable underneath. In this case, * just keep the PTE coherent. */ if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { pte = hv_pte_clear_nc(pte); pr_err("non-immutable page incoherently referenced: %#llx\n", pte.val); } switch (home) { case PAGE_HOME_UNCACHED: pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); break; case PAGE_HOME_INCOHERENT: pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); break; case PAGE_HOME_IMMUTABLE: /* * We could home this page anywhere, since it's immutable, * but by default just home it to follow "hash_default". */ BUG_ON(hv_pte_get_writable(pte)); if (pte_get_forcecache(pte)) { /* Upgrade "force any cpu" to "No L3" for immutable. */ if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3 && pte_get_anyhome(pte)) { pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); } } else #if CHIP_HAS_CBOX_HOME_MAP() if (hash_default) pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); else #endif pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); pte = hv_pte_set_nc(pte); break; #if CHIP_HAS_CBOX_HOME_MAP() case PAGE_HOME_HASH: pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); break; #endif default: BUG_ON(home < 0 || home >= NR_CPUS || !cpu_is_valid_lotar(home)); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); pte = set_remote_cache_cpu(pte, home); break; } #if CHIP_HAS_NC_AND_NOALLOC_BITS() if (noallocl2) pte = hv_pte_set_no_alloc_l2(pte); /* Simplify "no local and no l3" to "uncached" */ if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) && hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); } #endif /* Checking this case here gives a better panic than from the hv. */ BUG_ON(hv_pte_get_mode(pte) == 0); return pte; }
int arch_vm_area_flags(struct mm_struct *mm, unsigned long flags, unsigned long vm_flags, pid_t *pid_ptr, pgprot_t *prot_ptr) { pgprot_t prot = __pgprot(0); pid_t pid = 0; #if CHIP_HAS_NC_AND_NOALLOC_BITS() if (flags & MAP_CACHE_NO_L1) prot = hv_pte_set_no_alloc_l1(prot); if (flags & MAP_CACHE_NO_L2) prot = hv_pte_set_no_alloc_l2(prot); #endif #if CHIP_HAS_CBOX_HOME_MAP() /* Certain types of mapping have standard hash-for-home defaults. */ if (!(flags & _MAP_CACHE_HOME)) { if ((flags & (MAP_GROWSDOWN | MAP_ANONYMOUS)) == (MAP_GROWSDOWN | MAP_ANONYMOUS)) flags |= ucache_flags(STACK); else if ((flags & MAP_ANONYMOUS) == MAP_ANONYMOUS) flags |= ucache_flags(HEAP); else if ((flags & (MAP_ANONYMOUS | MAP_PRIVATE)) == MAP_PRIVATE) flags |= (vm_flags & PROT_WRITE) ? ucache_flags(DATA) : ucache_flags(TEXT); } #endif /* * If the only request is for what the kernel does naturally, * remove it, to avoid unnecessary use of VM_DONTMERGE. */ if (flags & MAP_ANONYMOUS) { switch (flags & _MAP_CACHE_MKHOME(_MAP_CACHE_HOME_MASK)) { #if CHIP_HAS_CBOX_HOME_MAP() case MAP_CACHE_HOME_HASH: if (hash_default) flags &= ~MAP_CACHE_HOME_HASH; break; #endif case MAP_CACHE_HOME_SINGLE: if (!hash_default) flags &= ~MAP_CACHE_HOME_SINGLE; break; } } if (flags & _MAP_CACHE_HOME) prot = pte_set_forcecache(prot); if ((flags & _MAP_CACHE_MKHOME(_MAP_CACHE_HOME_MASK)) == MAP_CACHE_HOME_NONE) { /* * We special-case setting the home cache to "none". * If the user isn't indicating willingness to tolerate * incoherence, and is caching locally on the cpu, we * fail a writable mapping, or enforce a readonly mapping. */ if (!(flags & _MAP_CACHE_INCOHERENT) && (flags & MAP_CACHE_NO_LOCAL) != MAP_CACHE_NO_LOCAL) { if (vm_flags & VM_WRITE) return -EINVAL; } if ((flags & MAP_CACHE_NO_LOCAL) == MAP_CACHE_NO_LOCAL) prot = hv_pte_set_mode(prot, HV_PTE_MODE_UNCACHED); else prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); } else if (flags & _MAP_CACHE_HOME) { /* Extract the cpu (or magic cookie). */ int cpu = (flags >> _MAP_CACHE_HOME_SHIFT) & _MAP_CACHE_HOME_MASK; switch (cpu) { case _MAP_CACHE_HOME_SINGLE: /* * This is the default case; we set "anyhome" * and the OS will pick the cpu for us in pfn_pte() * by examining the page_home() of the page. */ prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_TILE_L3); prot = pte_set_anyhome(prot); break; #if CHIP_HAS_CBOX_HOME_MAP() case _MAP_CACHE_HOME_HASH: /* Mark this page for home-map hash caching. */ prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); break; #endif case _MAP_CACHE_HOME_TASK: pid = current->pid; /*FALLTHROUGH*/ case _MAP_CACHE_HOME_HERE: cpu = smp_processor_id(); /*FALLTHROUGH*/ default: if (cpu < 0 || cpu >= nr_cpu_ids || !cpu_is_valid_lotar(cpu)) return -EINVAL; prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_TILE_L3); prot = set_remote_cache_cpu(prot, cpu); } }