void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) { unsigned int idx, cpu = smp_processor_id(); int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); unsigned long vaddr, flags; pte_t pte, *ptep; idx = KM_L1_CACHE + KM_TYPE_NR * cpu; vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ptep = TOP_PTE(vaddr); pte = mk_pte(page, kmap_prot); BUG_ON(pte_val(*ptep) != pte_val(pte)); BUG_ON(*depth <= 0); raw_local_irq_save(flags); (*depth)--; if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { set_pte_ext(ptep, saved_pte, 0); local_flush_tlb_kernel_page(vaddr); } raw_local_irq_restore(flags); if (!in_interrupt()) preempt_enable(); }
void *kmap_atomic(struct page *page, enum km_type type) { unsigned int idx; unsigned long vaddr; void *kmap; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); kmap = kmap_high_get(page); if (kmap) return kmap; idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so this TLB flush ensures the TLB is updated with the * new mapping. */ local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) { unsigned int idx, cpu = smp_processor_id(); int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); unsigned long vaddr, flags; pte_t pte, *ptep; idx = KM_L1_CACHE + KM_TYPE_NR * cpu; vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ptep = TOP_PTE(vaddr); pte = mk_pte(page, kmap_prot); if (!in_interrupt()) preempt_disable(); raw_local_irq_save(flags); (*depth)++; if (pte_val(*ptep) == pte_val(pte)) { *saved_pte = pte; } else { *saved_pte = *ptep; set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); } raw_local_irq_restore(flags); return (void *)vaddr; }
void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }
static inline void set_fixmap_pte(int idx, pte_t pte) { unsigned long vaddr = __fix_to_virt(idx); pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); }
void flush_tlb_kernel_page(unsigned long kaddr) { if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); }
void flush_tlb_kernel_page(unsigned long kaddr) { if (IS_ENABLED(CONFIG_L4)) { l4x_unmap_sync_all(); return; } if (tlb_ops_need_broadcast()) { struct tlb_args ta; ta.ta_start = kaddr; on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); } else local_flush_tlb_kernel_page(kaddr); broadcast_tlb_a15_erratum(); }
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) { unsigned int idx; unsigned long vaddr; pagefault_disable(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
void *__kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; void *kmap; int type; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); #ifdef CONFIG_DEBUG_HIGHMEM /* * There is no cache coherency issue when non VIVT, so force the * dedicated kmap usage for better debugging purposes in that case. */ if (!cache_is_vivt()) kmap = NULL; else #endif kmap = kmap_high_get(page); if (kmap) return kmap; type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so this TLB flush ensures the TLB is updated with the * new mapping. */ local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
static inline unsigned long l2_start_va(unsigned long paddr) { #ifdef CONFIG_HIGHMEM /* * Let's do our own fixmap stuff in a minimal way here. * Because range ops can't be done on physical addresses, * we simply install a virtual mapping for it only for the * TLB lookup to occur, hence no need to flush the untouched * memory mapping. This is protected with the disabling of * interrupts by the caller. */ unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); local_flush_tlb_kernel_page(vaddr); return vaddr + (paddr & ~PAGE_MASK); #else return __phys_to_virt(paddr); #endif }
static inline void ipi_flush_tlb_kernel_page(void *arg) { struct tlb_args *ta = (struct tlb_args *)arg; local_flush_tlb_kernel_page(ta->ta_start); }