struct tte * tsb_tte_lookup(pmap_t pm, vm_offset_t va) { struct tte *bucket; struct tte *tp; u_long sz; u_int i; if (pm == kernel_pmap) { PMAP_STATS_INC(tsb_nlookup_k); tp = tsb_kvtotte(va); if (tte_match(tp, va)) return (tp); } else { PMAP_LOCK_ASSERT(pm, MA_OWNED); PMAP_STATS_INC(tsb_nlookup_u); for (sz = TS_MIN; sz <= TS_MAX; sz++) { bucket = tsb_vtobucket(pm, sz, va); for (i = 0; i < TSB_BUCKET_SIZE; i++) { tp = &bucket[i]; if (tte_match(tp, va)) return (tp); } } } return (NULL); }
void tlb_page_demap(struct pmap *pm, vm_offset_t va) { u_long flags; void *cookie; register_t s; PMAP_STATS_INC(tlb_npage_demap); cookie = ipi_tlb_page_demap(pm, va); if (pm->pm_active & PCPU_GET(cpumask)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_page_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; s = intr_disable(); stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); flush(KERNBASE); intr_restore(s); } ipi_wait(cookie); }
void tlb_context_demap(struct pmap *pm) { void *cookie; register_t s; /* * It is important that we are not interrupted or preempted while * doing the IPIs. The interrupted CPU may hold locks, and since * it will wait for the CPU that sent the IPI, this can lead * to a deadlock when an interrupt comes in on that CPU and it's * handler tries to grab one of that locks. This will only happen for * spin locks, but these IPI types are delivered even if normal * interrupts are disabled, so the lock critical section will not * protect the target processor from entering the IPI handler with * the lock held. */ PMAP_STATS_INC(tlb_ncontext_demap); cookie = ipi_tlb_context_demap(pm); if (pm->pm_active & PCPU_GET(cpumask)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_context_demap: inactive pmap?")); s = intr_disable(); stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0); flush(KERNBASE); intr_restore(s); } ipi_wait(cookie); }
/* * Traverse the tsb of a pmap, calling the callback function for any tte entry * that has a virtual address between start and end. If this function returns 0, * tsb_foreach() terminates. * This is used by pmap_remove(), pmap_protect(), and pmap_copy() in the case * that the number of pages in the range given to them reaches the * dimensions of the tsb size as an optimization. */ void tsb_foreach(pmap_t pm1, pmap_t pm2, vm_offset_t start, vm_offset_t end, tsb_callback_t *callback) { vm_offset_t va; struct tte *tp; struct tte *tsbp; uintptr_t i; uintptr_t n; PMAP_STATS_INC(tsb_nforeach); if (pm1 == kernel_pmap) { tsbp = tsb_kernel; n = tsb_kernel_size / sizeof(struct tte); } else { tsbp = pm1->pm_tsb; n = TSB_SIZE; } for (i = 0; i < n; i++) { tp = &tsbp[i]; if ((tp->tte_data & TD_V) != 0) { va = TTE_GET_VA(tp); if (va >= start && va < end) { if (!callback(pm1, pm2, tp, va)) break; } } } }
void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) { vm_offset_t va; void *cookie; u_long flags; register_t s; PMAP_STATS_INC(tlb_nrange_demap); cookie = ipi_tlb_range_demap(pm, start, end); s = intr_disable(); if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_range_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; for (va = start; va < end; va += PAGE_SIZE) { stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); flush(KERNBASE); } } intr_restore(s); ipi_wait(cookie); }
void uma_small_free(void *mem, vm_size_t size, u_int8_t flags) { vm_page_t m; PMAP_STATS_INC(uma_nsmall_free); m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem)); m->wire_count--; vm_page_free(m); atomic_subtract_int(&vm_cnt.v_wire_count, 1); }
void * uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { vm_paddr_t pa; vm_page_t m; int pflags; void *va; PMAP_STATS_INC(uma_nsmall_alloc); *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; for (;;) { m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); else VM_WAIT; } else break; } pa = VM_PAGE_TO_PHYS(m); if (dcache_color_ignore == 0 && m->md.color != DCACHE_COLOR(pa)) { KASSERT(m->md.colors[0] == 0 && m->md.colors[1] == 0, ("uma_small_alloc: free page %p still has mappings!", m)); PMAP_STATS_INC(uma_nsmall_alloc_oc); m->md.color = DCACHE_COLOR(pa); dcache_page_inval(pa); } va = (void *)TLB_PHYS_TO_DIRECT(pa); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) cpu_block_zero(va, PAGE_SIZE); return (va); }
struct tte * tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t va, u_long sz, u_long data) { struct tte *bucket; struct tte *rtp; struct tte *tp; vm_offset_t ova; int b0; int i; if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) { CTR5(KTR_SPARE2, "tsb_tte_enter: off colour va=%#lx pa=%#lx o=%p ot=%d pi=%#lx", va, VM_PAGE_TO_PHYS(m), m->object, m->object ? m->object->type : -1, m->pindex); if (pm == kernel_pmap) PMAP_STATS_INC(tsb_nenter_k_oc); else PMAP_STATS_INC(tsb_nenter_u_oc); } rw_assert(&tte_list_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pm, MA_OWNED); if (pm == kernel_pmap) { PMAP_STATS_INC(tsb_nenter_k); tp = tsb_kvtotte(va); KASSERT((tp->tte_data & TD_V) == 0, ("tsb_tte_enter: replacing valid kernel mapping")); goto enter; } PMAP_STATS_INC(tsb_nenter_u); bucket = tsb_vtobucket(pm, sz, va); tp = NULL; rtp = NULL; b0 = rd(tick) & (TSB_BUCKET_SIZE - 1); i = b0; do { if ((bucket[i].tte_data & TD_V) == 0) { tp = &bucket[i]; break; } if (tp == NULL) { if ((bucket[i].tte_data & TD_REF) == 0) tp = &bucket[i]; else if (rtp == NULL) rtp = &bucket[i]; } } while ((i = (i + 1) & (TSB_BUCKET_SIZE - 1)) != b0); if (tp == NULL) tp = rtp; if ((tp->tte_data & TD_V) != 0) { PMAP_STATS_INC(tsb_nrepl); ova = TTE_GET_VA(tp); pmap_remove_tte(pm, NULL, tp, ova); tlb_page_demap(pm, ova); } enter: if ((m->flags & PG_FICTITIOUS) == 0) { data |= TD_CP; if ((m->oflags & VPO_UNMANAGED) == 0) { pm->pm_stats.resident_count++; data |= TD_PV; } if (pmap_cache_enter(m, va) != 0) data |= TD_CV; TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); } else data |= TD_FAKE | TD_E; tp->tte_vpn = TV_VPN(va, sz); tp->tte_data = data; return (tp); }