static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) { static pm_message_t prev_state; int i; /* Restore the PMB after a resume from hibernation */ if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; read_lock(&pmb_rwlock); for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) { pmbe = &pmb_entry_list[i]; set_pmb_entry(pmbe); } } read_unlock(&pmb_rwlock); } prev_state = state; return 0; }
long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, unsigned long flags) { struct pmb_entry *pmbp; unsigned long wanted; int pmb_flags, i; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { if (flags & _PAGE_WT) pmb_flags = PMB_WT; else pmb_flags = PMB_C; } else pmb_flags = PMB_WT | PMB_UB; pmbp = NULL; wanted = size; again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { struct pmb_entry *pmbe; int ret; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); if (IS_ERR(pmbe)) return PTR_ERR(pmbe); ret = set_pmb_entry(pmbe); if (ret != 0) { pmb_free(pmbe); return -EBUSY; } phys += pmb_sizes[i].size; vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. */ if (likely(pmbp)) pmbp->link = pmbe; pmbp = pmbe; } if (size >= 0x1000000) goto again; return wanted - size; }
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) { static pm_message_t prev_state; /* Restore the PMB after a resume from hibernation */ if (state.event == PM_EVENT_ON && prev_state.event == PM_EVENT_FREEZE) { struct pmb_entry *pmbe; spin_lock_irq(&pmb_list_lock); for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) set_pmb_entry(pmbe); spin_unlock_irq(&pmb_list_lock); } prev_state = state; return 0; }