long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, unsigned long flags) { struct pmb_entry *pmbp; unsigned long wanted; int pmb_flags, i; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { if (flags & _PAGE_WT) pmb_flags = PMB_WT; else pmb_flags = PMB_C; } else pmb_flags = PMB_WT | PMB_UB; pmbp = NULL; wanted = size; again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { struct pmb_entry *pmbe; int ret; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); if (IS_ERR(pmbe)) return PTR_ERR(pmbe); ret = set_pmb_entry(pmbe); if (ret != 0) { pmb_free(pmbe); return -EBUSY; } phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ if (likely(pmbp)) pmbp->link = pmbe; pmbp = pmbe; } if (size >= 0x1000000) goto again; return wanted - size; }
void pmb_unmap(unsigned long addr) { struct pmb_entry **p, *pmbe; for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) if (pmbe->vpn == addr) break; if (unlikely(!pmbe)) return; WARN_ON(!test_bit(pmbe->entry, &pmb_map)); do { struct pmb_entry *pmblink = pmbe; clear_pmb_entry(pmbe); pmbe = pmblink->link; pmb_free(pmblink); } while (pmbe); }
static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) { do { struct pmb_entry *pmblink = pmbe; /* * We may be called before this pmb_entry has been * entered into the PMB table via set_pmb_entry(), but * that's OK because we've allocated a unique slot for * this entry in pmb_alloc() (even if we haven't filled * it yet). * * Therefore, calling __clear_pmb_entry() is safe as no * other mapping can be using that slot. */ __clear_pmb_entry(pmbe); pmbe = pmblink->link; pmb_free(pmblink); } while (pmbe && --depth); }