struct tte * tsb_tte_lookup(pmap_t pm, vm_offset_t va) { struct tte *bucket; struct tte *tp; u_long sz; u_int i; if (pm == kernel_pmap) { PMAP_STATS_INC(tsb_nlookup_k); tp = tsb_kvtotte(va); if (tte_match(tp, va)) return (tp); } else { PMAP_LOCK_ASSERT(pm, MA_OWNED); PMAP_STATS_INC(tsb_nlookup_u); for (sz = TS_MIN; sz <= TS_MAX; sz++) { bucket = tsb_vtobucket(pm, sz, va); for (i = 0; i < TSB_BUCKET_SIZE; i++) { tp = &bucket[i]; if (tte_match(tp, va)) return (tp); } } } return (NULL); }
int _kvm_kvatop(kvm_t *kd, u_long va, off_t *pa) { struct tte tte; off_t tte_off; u_long vpn; off_t pa_off; u_long pg_off; int rest; pg_off = va & PAGE_MASK; if (va >= VM_MIN_DIRECT_ADDRESS) pa_off = TLB_DIRECT_TO_PHYS(va) & ~PAGE_MASK; else { vpn = btop(va); tte_off = kd->vmst->vm_tsb_off + ((vpn & kd->vmst->vm_tsb_mask) << TTE_SHIFT); if (!_kvm_read_phys(kd, tte_off, &tte, sizeof(tte))) goto invalid; if (!tte_match(&tte, va)) goto invalid; pa_off = TTE_GET_PA(&tte); } rest = PAGE_SIZE - pg_off; pa_off = _kvm_find_off(kd->vmst, pa_off, rest); if (pa_off == KVM_OFF_NOTFOUND) goto invalid; *pa = pa_off + pg_off; return (rest); invalid: _kvm_err(kd, 0, "invalid address (%lx)", va); return (0); }