static void set_pmb_entry(struct pmb_entry *pmbe) { unsigned long flags; spin_lock_irqsave(&pmbe->lock, flags); __set_pmb_entry(pmbe); spin_unlock_irqrestore(&pmbe->lock, flags); }
int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) { int ret; jump_to_uncached(); ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); back_to_cached(); return ret; }
static void __init pmb_resize(void) { int i; /* * If the uncached mapping was constructed by the kernel, it will * already be a reasonable size. */ if (uncached_size == SZ_16M) return; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe; unsigned long flags; if (!test_bit(i, pmb_map)) continue; pmbe = &pmb_entry_list[i]; if (pmbe->vpn != uncached_start) continue; /* * Found it, now resize it. */ spin_lock_irqsave(&pmbe->lock, flags); pmbe->size = SZ_16M; pmbe->flags &= ~PMB_SZ_MASK; pmbe->flags |= pmb_size_to_flags(pmbe->size); uncached_resize(pmbe->size); __set_pmb_entry(pmbe); spin_unlock_irqrestore(&pmbe->lock, flags); } read_lock(&pmb_rwlock); }
static void __init pmb_merge(struct pmb_entry *head) { unsigned long span, newsize; struct pmb_entry *tail; int i = 1, depth = 0; span = newsize = head->size; tail = head->link; while (tail) { span += tail->size; if (pmb_size_valid(span)) { newsize = span; depth = i; } /* This is the end of the line.. */ if (!tail->link) break; tail = tail->link; i++; } /* * The merged page size must be valid. */ if (!pmb_size_valid(newsize)) return; head->flags &= ~PMB_SZ_MASK; head->flags |= pmb_size_to_flags(newsize); head->size = newsize; __pmb_unmap_entry(head->link, depth); __set_pmb_entry(head); }
static int __uses_jump_to_uncached pmb_init(void) { unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); unsigned int entry, i; BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, SLAB_PANIC, pmb_cache_ctor); jump_to_uncached(); /* * Ordering is important, P2 must be mapped in the PMB before we * can set PMB.SE, and P1 must be mapped before we jump back to * P1 space. */ for (entry = 0; entry < nr_entries; entry++) { struct pmb_entry *pmbe = pmb_init_map + entry; __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); } ctrl_outl(0, PMB_IRMCR); /* PMB.SE and UB[7] */ ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); /* Flush out the TLB */ i = ctrl_inl(MMUCR); i |= MMUCR_TI; ctrl_outl(i, MMUCR); back_to_cached(); return 0; }
long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, pgprot_t prot) { struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; long err; u64 flags; flags = pgprot_val(prot); pmb_flags = PMB_WT | PMB_UB; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { pmb_flags |= PMB_C; if ((flags & _PAGE_WT) == 0) pmb_flags &= ~(PMB_WT | PMB_UB); } pmbp = NULL; wanted = size; again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { unsigned long flags; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, PMB_NO_ENTRY); if (IS_ERR(pmbe)) { err = PTR_ERR(pmbe); goto out; } spin_lock_irqsave(&pmbe->lock, flags); __set_pmb_entry(pmbe); phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; pmbe->size = pmb_sizes[i].size; /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ if (likely(pmbp)) { spin_lock(&pmbp->lock); pmbp->link = pmbe; spin_unlock(&pmbp->lock); } pmbp = pmbe; /* * Instead of trying smaller sizes on every iteration * (even if we succeed in allocating space), try using * pmb_sizes[i].size again. */ i--; spin_unlock_irqrestore(&pmbe->lock, flags); } if (size >= SZ_16M) goto again; return wanted - size; out: pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); return err; }