static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) { struct pbe *pbe; struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); if (PageNosave(page) && PageNosaveFree(page)) /* We have allocated the "original" page frame and we can * use it directly to store the loaded page. */ return page_address(page); /* The "original" page frame has not been allocated and we have to * use a "safe" page frame to store the loaded page. */ pbe = chain_alloc(ca, sizeof(struct pbe)); if (!pbe) { swsusp_free(); return NULL; } pbe->orig_address = (unsigned long)page_address(page); pbe->address = (unsigned long)safe_pages_list; safe_pages_list = safe_pages_list->next; pbe->next = restore_pblist; restore_pblist = pbe; return (void *)pbe->address; }
void swsusp_free(void) { BUG_ON(PageNosave(virt_to_page(pagedir_save))); BUG_ON(PageNosaveFree(virt_to_page(pagedir_save))); free_image_pages(); free_pages((unsigned long) pagedir_save, pagedir_order); }
/* * Returns true if given address/order collides with any orig_address */ static int __init does_collide_order(unsigned long addr, int order) { int i; for (i=0; i < (1<<order); i++) if (!PageNosaveFree(virt_to_page(addr + i * PAGE_SIZE))) return 1; return 0; }
static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; if (safe_needed) do { res = (void *)get_zeroed_page(gfp_mask); if (res && PageNosaveFree(virt_to_page(res))) /* This is for swsusp_free() */ SetPageNosave(virt_to_page(res)); } while (res && PageNosaveFree(virt_to_page(res))); else res = (void *)get_zeroed_page(gfp_mask); if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); } return res; }
static int save_highmem_zone(struct zone *zone) { unsigned long zone_pfn; mark_free_pages(zone); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { struct page *page; struct highmem_page *save; void *kaddr; unsigned long pfn = zone_pfn + zone->zone_start_pfn; if (!(pfn%1000)) printk("."); if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); /* * This condition results from rvmalloc() sans vmalloc_32() * and architectural memory reservations. This should be * corrected eventually when the cases giving rise to this * are better understood. */ if (PageReserved(page)) { printk("highmem reserved page?!\n"); continue; } BUG_ON(PageNosave(page)); if (PageNosaveFree(page)) continue; save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC); if (!save) return -ENOMEM; save->next = highmem_copy; save->page = page; save->data = (void *) get_zeroed_page(GFP_ATOMIC); if (!save->data) { kfree(save); return -ENOMEM; } kaddr = kmap_atomic(page, KM_USER0); memcpy(save->data, kaddr, PAGE_SIZE); kunmap_atomic(kaddr, KM_USER0); highmem_copy = save; } return 0; }
void swsusp_free(void) { struct zone *zone; unsigned long zone_pfn; for_each_zone(zone) { for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) if (pfn_valid(zone_pfn + zone->zone_start_pfn)) { struct page *page; page = pfn_to_page(zone_pfn + zone->zone_start_pfn); if (PageNosave(page) && PageNosaveFree(page)) { ClearPageNosave(page); ClearPageNosaveFree(page); free_page((long) page_address(page)); } } } }
static struct page *saveable_page(unsigned long pfn) { struct page *page; if (!pfn_valid(pfn)) return NULL; page = pfn_to_page(pfn); if (PageNosave(page)) return NULL; if (PageReserved(page) && pfn_is_nosave(pfn)) return NULL; if (PageNosaveFree(page)) return NULL; return page; }
static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) while (res && PageNosaveFree(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ SetPageNosave(virt_to_page(res)); allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); } return res; }
static int saveable(struct zone *zone, unsigned long *zone_pfn) { unsigned long pfn = *zone_pfn + zone->zone_start_pfn; struct page *page; if (!pfn_valid(pfn)) return 0; page = pfn_to_page(pfn); BUG_ON(PageReserved(page) && PageNosave(page)); if (PageNosave(page)) return 0; if (PageReserved(page) && pfn_is_nosave(pfn)) { pr_debug("[nosave pfn 0x%lx]", pfn); return 0; } if (PageNosaveFree(page)) return 0; return 1; }
void swsusp_free(void) { struct zone *zone; unsigned long pfn, max_zone_pfn; for_each_zone(zone) { max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (pfn_valid(pfn)) { struct page *page = pfn_to_page(pfn); if (PageNosave(page) && PageNosaveFree(page)) { ClearPageNosave(page); ClearPageNosaveFree(page); free_page((long) page_address(page)); } } } nr_copy_pages = 0; nr_meta_pages = 0; restore_pblist = NULL; buffer = NULL; }
unsigned int count_highmem_pages(void) { struct zone *zone; unsigned long zone_pfn; unsigned int n = 0; for_each_zone (zone) if (is_highmem(zone)) { mark_free_pages(zone); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { struct page *page; unsigned long pfn = zone_pfn + zone->zone_start_pfn; if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); if (PageReserved(page)) continue; if (PageNosaveFree(page)) continue; n++; } } return n; }
static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) { unsigned int nr_pages; struct linked_page *sp_list, *lp; int error; error = mark_unsafe_pages(bm); if (error) goto Free; error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); if (error) goto Free; duplicate_memory_bitmap(new_bm, bm); memory_bm_free(bm, PG_UNSAFE_KEEP); /* Reserve some safe pages for potential later use. * * NOTE: This way we make sure there will be enough safe pages for the * chain_alloc() in get_buffer(). It is a bit wasteful, but * nr_copy_pages cannot be greater than 50% of the memory anyway. */ sp_list = NULL; /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ nr_pages = nr_copy_pages - allocated_unsafe_pages; nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); while (nr_pages > 0) { lp = alloc_image_page(GFP_ATOMIC, PG_SAFE); if (!lp) { error = -ENOMEM; goto Free; } lp->next = sp_list; sp_list = lp; nr_pages--; } /* Preallocate memory for the image */ safe_pages_list = NULL; nr_pages = nr_copy_pages - allocated_unsafe_pages; while (nr_pages > 0) { lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); if (!lp) { error = -ENOMEM; goto Free; } if (!PageNosaveFree(virt_to_page(lp))) { /* The page is "safe", add it to the list */ lp->next = safe_pages_list; safe_pages_list = lp; } /* Mark the page as allocated */ SetPageNosave(virt_to_page(lp)); SetPageNosaveFree(virt_to_page(lp)); nr_pages--; } /* Free the reserved safe pages so that chain_alloc() can use them */ while (sp_list) { lp = sp_list->next; free_image_page(sp_list, PG_UNSAFE_CLEAR); sp_list = lp; } return 0; Free: swsusp_free(); return error; }