long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, unsigned long flags) { struct pmb_entry *pmbp; unsigned long wanted; int pmb_flags, i; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { if (flags & _PAGE_WT) pmb_flags = PMB_WT; else pmb_flags = PMB_C; } else pmb_flags = PMB_WT | PMB_UB; pmbp = NULL; wanted = size; again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { struct pmb_entry *pmbe; int ret; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); if (IS_ERR(pmbe)) return PTR_ERR(pmbe); ret = set_pmb_entry(pmbe); if (ret != 0) { pmb_free(pmbe); return -EBUSY; } phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ if (likely(pmbp)) pmbp->link = pmbe; pmbp = pmbe; } if (size >= 0x1000000) goto again; return wanted - size; }
/* * Sync our software copy of the PMB mappings with those in hardware. The * mappings in the hardware PMB were either set up by the bootloader or * very early on by the kernel. */ static void __init pmb_synchronize(void) { struct pmb_entry *pmbp = NULL; int i, j; /* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid * PPN range. Specifically, we only care about existing mappings * that impact the cached/uncached sections. * * Note that touching these can be a bit of a minefield; the boot * loader can establish multi-page mappings with the same caching * attributes, so we need to ensure that we aren't modifying a * mapping that we're presently executing from, or may execute * from in the case of straddling page boundaries. * * In the future we will have to tidy up after the boot loader by * jumping between the cached and uncached mappings and tearing * down alternating mappings while executing from the other. */ for (i = 0; i < NR_PMB_ENTRIES; i++) { unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; unsigned long irqflags; unsigned int size; struct pmb_entry *pmbe; addr = mk_pmb_addr(i); data = mk_pmb_data(i); addr_val = __raw_readl(addr); data_val = __raw_readl(data); /* * Skip over any bogus entries */ if (!(data_val & PMB_V) || !(addr_val & PMB_V)) continue; ppn = data_val & PMB_PFN_MASK; vpn = addr_val & PMB_PFN_MASK; /* * Only preserve in-range mappings. */ if (!pmb_ppn_in_range(ppn)) { /* * Invalidate anything out of bounds. */ writel_uncached(addr_val & ~PMB_V, addr); writel_uncached(data_val & ~PMB_V, data); continue; } /* * Update the caching attributes if necessary */ if (data_val & PMB_C) { data_val &= ~PMB_CACHE_MASK; data_val |= pmb_cache_flags(); writel_uncached(data_val, data); } size = data_val & PMB_SZ_MASK; flags = size | (data_val & PMB_CACHE_MASK); pmbe = pmb_alloc(vpn, ppn, flags, i); if (IS_ERR(pmbe)) { WARN_ON_ONCE(1); continue; } spin_lock_irqsave(&pmbe->lock, irqflags); for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size) pmbe->size = pmb_sizes[j].size; if (pmbp) { spin_lock(&pmbp->lock); /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, * setup the entry links accordingly. Compound mappings * are later coalesced. */ if (pmb_can_merge(pmbp, pmbe)) pmbp->link = pmbe; spin_unlock(&pmbp->lock); } pmbp = pmbe; spin_unlock_irqrestore(&pmbe->lock, irqflags); } }
long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; long err; u64 flags; flags = pgprot_val(prot); pmb_flags = PMB_WT | PMB_UB; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { pmb_flags |= PMB_C; if ((flags & _PAGE_WT) == 0) pmb_flags &= ~(PMB_WT | PMB_UB); } pmbp = NULL; wanted = size; again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { unsigned long flags; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, PMB_NO_ENTRY); if (IS_ERR(pmbe)) { err = PTR_ERR(pmbe); goto out; } spin_lock_irqsave(&pmbe->lock, flags); __set_pmb_entry(pmbe); phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; pmbe->size = pmb_sizes[i].size; /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ if (likely(pmbp)) { spin_lock(&pmbp->lock); pmbp->link = pmbe; spin_unlock(&pmbp->lock); } pmbp = pmbe; /* * Instead of trying smaller sizes on every iteration * (even if we succeed in allocating space), try using * pmb_sizes[i].size again. */ i--; spin_unlock_irqrestore(&pmbe->lock, flags); } if (size >= SZ_16M) goto again; return wanted - size; out: pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; }