static void copy_data_pages(struct pbe *pblist) { struct zone *zone; unsigned long zone_pfn; struct pbe *pbe, *p; pbe = pblist; for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); /* This is necessary for swsusp_free() */ for_each_pb_page (p, pblist) SetPageNosaveFree(virt_to_page(p)); for_each_pbe (p, pblist) SetPageNosaveFree(virt_to_page(p->address)); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { if (saveable(zone, &zone_pfn)) { struct page *page; page = pfn_to_page(zone_pfn + zone->zone_start_pfn); BUG_ON(!pbe); pbe->orig_address = (unsigned long)page_address(page); /* copy_page is not usable for copying task structs. */ memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); pbe = pbe->next; } } } BUG_ON(pbe); }
static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages) { int error; error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); if (error) goto Free; error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); if (error) goto Free; while (nr_pages-- > 0) { struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD); if (!page) goto Free; SetPageNosave(page); SetPageNosaveFree(page); memory_bm_set_bit(copy_bm, page_to_pfn(page)); } return 0; Free: swsusp_free(); return -ENOMEM; }
static int mark_unsafe_pages(struct memory_bitmap *bm) { struct zone *zone; unsigned long pfn, max_zone_pfn; /* Clear page flags */ for_each_zone (zone) { max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (pfn_valid(pfn)) ClearPageNosaveFree(pfn_to_page(pfn)); } /* Mark pages that correspond to the "original" pfns as "unsafe" */ memory_bm_position_reset(bm); do { pfn = memory_bm_next_pfn(bm); if (likely(pfn != BM_END_OF_MAP)) { if (likely(pfn_valid(pfn))) SetPageNosaveFree(pfn_to_page(pfn)); else return -EFAULT; } } while (pfn != BM_END_OF_MAP); allocated_unsafe_pages = 0; return 0; }
static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) while (res && PageNosaveFree(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ SetPageNosave(virt_to_page(res)); allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); } return res; }
static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; if (safe_needed) do { res = (void *)get_zeroed_page(gfp_mask); if (res && PageNosaveFree(virt_to_page(res))) /* This is for swsusp_free() */ SetPageNosave(virt_to_page(res)); } while (res && PageNosaveFree(virt_to_page(res))); else res = (void *)get_zeroed_page(gfp_mask); if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); } return res; }
static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) { unsigned int nr_pages; struct linked_page *sp_list, *lp; int error; error = mark_unsafe_pages(bm); if (error) goto Free; error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); if (error) goto Free; duplicate_memory_bitmap(new_bm, bm); memory_bm_free(bm, PG_UNSAFE_KEEP); /* Reserve some safe pages for potential later use. * * NOTE: This way we make sure there will be enough safe pages for the * chain_alloc() in get_buffer(). It is a bit wasteful, but * nr_copy_pages cannot be greater than 50% of the memory anyway. */ sp_list = NULL; /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ nr_pages = nr_copy_pages - allocated_unsafe_pages; nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); while (nr_pages > 0) { lp = alloc_image_page(GFP_ATOMIC, PG_SAFE); if (!lp) { error = -ENOMEM; goto Free; } lp->next = sp_list; sp_list = lp; nr_pages--; } /* Preallocate memory for the image */ safe_pages_list = NULL; nr_pages = nr_copy_pages - allocated_unsafe_pages; while (nr_pages > 0) { lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); if (!lp) { error = -ENOMEM; goto Free; } if (!PageNosaveFree(virt_to_page(lp))) { /* The page is "safe", add it to the list */ lp->next = safe_pages_list; safe_pages_list = lp; } /* Mark the page as allocated */ SetPageNosave(virt_to_page(lp)); SetPageNosaveFree(virt_to_page(lp)); nr_pages--; } /* Free the reserved safe pages so that chain_alloc() can use them */ while (sp_list) { lp = sp_list->next; free_image_page(sp_list, PG_UNSAFE_CLEAR); sp_list = lp; } return 0; Free: swsusp_free(); return error; }
static int __init swsusp_pagedir_relocate(void) { /* * We have to avoid recursion (not to overflow kernel stack), * and that's why code looks pretty cryptic */ suspend_pagedir_t *old_pagedir = pagedir_nosave; void **eaten_memory = NULL; void **c = eaten_memory, *m, *f; int ret = 0; struct zone *zone; int i; struct pbe *p; unsigned long zone_pfn; printk("Relocating pagedir "); /* Set page flags */ for_each_zone(zone) { for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) SetPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); } /* Clear orig address */ for(i = 0, p = pagedir_nosave; i < nr_copy_pages; i++, p++) { ClearPageNosaveFree(virt_to_page(p->orig_address)); } if (!does_collide_order((unsigned long)old_pagedir, pagedir_order)) { printk("not necessary\n"); return check_pagedir(); } while ((m = (void *) __get_free_pages(GFP_ATOMIC, pagedir_order)) != NULL) { if (!does_collide_order((unsigned long)m, pagedir_order)) break; eaten_memory = m; printk( "." ); *eaten_memory = c; c = eaten_memory; } if (!m) { printk("out of memory\n"); ret = -ENOMEM; } else { pagedir_nosave = memcpy(m, old_pagedir, PAGE_SIZE << pagedir_order); } c = eaten_memory; while (c) { printk(":"); f = c; c = *c; free_pages((unsigned long)f, pagedir_order); } if (ret) return ret; printk("|\n"); return check_pagedir(); }