static suspend_pagedir_t *create_suspend_pagedir(int nr_copy_pages) { int i; suspend_pagedir_t *pagedir; struct pbe *p; struct page *page; pagedir_order = get_bitmask_order(SUSPEND_PD_PAGES(nr_copy_pages)); p = pagedir = (suspend_pagedir_t *)__get_free_pages(GFP_ATOMIC | __GFP_COLD, pagedir_order); if(!pagedir) return NULL; page = virt_to_page(pagedir); for(i=0; i < 1<<pagedir_order; i++) SetPageNosave(page++); while(nr_copy_pages--) { p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD); if(!p->address) { free_suspend_pagedir((unsigned long) pagedir); return NULL; } SetPageNosave(virt_to_page(p->address)); p->orig_address = 0; p++; } return pagedir; }
int allocate_checksum_pages(void) { int pages_needed = checksum_pages_needed(); if (!toi_checksum_ops.enabled) return 0; while (pages_allocated < pages_needed) { unsigned long *new_page = (unsigned long *) toi_get_zeroed_page(15, TOI_ATOMIC_GFP); if (!new_page) { printk(KERN_ERR "Unable to allocate checksum pages.\n"); return -ENOMEM; } SetPageNosave(virt_to_page(new_page)); (*new_page) = page_list; page_list = (unsigned long) new_page; pages_allocated++; } next_page = (unsigned long) page_list; checksum_index = 0; return 0; }
static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages) { int error; error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); if (error) goto Free; error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); if (error) goto Free; while (nr_pages-- > 0) { struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD); if (!page) goto Free; SetPageNosave(page); SetPageNosaveFree(page); memory_bm_set_bit(copy_bm, page_to_pfn(page)); } return 0; Free: swsusp_free(); return -ENOMEM; }
static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) while (res && PageNosaveFree(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ SetPageNosave(virt_to_page(res)); allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); } return res; }
static inline void *alloc_image_page(gfp_t gfp_mask, int safe_needed) { void *res; if (safe_needed) do { res = (void *)get_zeroed_page(gfp_mask); if (res && PageNosaveFree(virt_to_page(res))) /* This is for swsusp_free() */ SetPageNosave(virt_to_page(res)); } while (res && PageNosaveFree(virt_to_page(res))); else res = (void *)get_zeroed_page(gfp_mask); if (res) { SetPageNosave(virt_to_page(res)); SetPageNosaveFree(virt_to_page(res)); } return res; }
static int alloc_image_pages(void) { struct pbe * p; for_each_pbe(p, pagedir_save) { p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD); if (!p->address) return -ENOMEM; SetPageNosave(virt_to_page(p->address)); }
static int alloc_image_pages(void) { struct pbe * p; int i; for (i = 0, p = pagedir_save; i < nr_copy_pages; i++, p++) { p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD); if (!p->address) return -ENOMEM; SetPageNosave(virt_to_page(p->address)); } return 0; }
static int alloc_image_pages(void) { struct pbe * p; int i; for (i = 0, p = pagedir_save; i < pmdisk_pages; i++, p++) { p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD); if(!p->address) goto Error; SetPageNosave(virt_to_page(p->address)); } return 0; Error: do { if (p->address) free_page(p->address); p->address = 0; } while (p-- > pagedir_save); return -ENOMEM; }
static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) { unsigned int nr_pages; struct linked_page *sp_list, *lp; int error; error = mark_unsafe_pages(bm); if (error) goto Free; error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); if (error) goto Free; duplicate_memory_bitmap(new_bm, bm); memory_bm_free(bm, PG_UNSAFE_KEEP); /* Reserve some safe pages for potential later use. * * NOTE: This way we make sure there will be enough safe pages for the * chain_alloc() in get_buffer(). It is a bit wasteful, but * nr_copy_pages cannot be greater than 50% of the memory anyway. */ sp_list = NULL; /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ nr_pages = nr_copy_pages - allocated_unsafe_pages; nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); while (nr_pages > 0) { lp = alloc_image_page(GFP_ATOMIC, PG_SAFE); if (!lp) { error = -ENOMEM; goto Free; } lp->next = sp_list; sp_list = lp; nr_pages--; } /* Preallocate memory for the image */ safe_pages_list = NULL; nr_pages = nr_copy_pages - allocated_unsafe_pages; while (nr_pages > 0) { lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); if (!lp) { error = -ENOMEM; goto Free; } if (!PageNosaveFree(virt_to_page(lp))) { /* The page is "safe", add it to the list */ lp->next = safe_pages_list; safe_pages_list = lp; } /* Mark the page as allocated */ SetPageNosave(virt_to_page(lp)); SetPageNosaveFree(virt_to_page(lp)); nr_pages--; } /* Free the reserved safe pages so that chain_alloc() can use them */ while (sp_list) { lp = sp_list->next; free_image_page(sp_list, PG_UNSAFE_CLEAR); sp_list = lp; } return 0; Free: swsusp_free(); return error; }