static void copy_data_pages(void) { struct zone *zone; unsigned long zone_pfn; struct pbe * pbe = pagedir_nosave; int to_copy = nr_copy_pages; for_each_zone(zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { if (saveable(zone, &zone_pfn)) { struct page * page; page = pfn_to_page(zone_pfn + zone->zone_start_pfn); pbe->orig_address = (long) page_address(page); /* copy_page is not usable for copying task structs. */ memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); pbe++; to_copy--; } } } BUG_ON(to_copy); }
void __init set_highmem_pages_init(void) { struct zone *zone; int nid; /* * Explicitly reset zone->managed_pages because set_highmem_pages_init() * is invoked before free_all_bootmem() */ reset_all_zones_managed_pages(); for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } }
void __init set_highmem_pages_init(void) { struct zone *zone; int nid; for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); /* XEN: init high-mem pages outside initial allocation. */ if (zone_start_pfn < xen_start_info->nr_pages) zone_start_pfn = xen_start_info->nr_pages; for (; zone_start_pfn < zone_end_pfn; zone_start_pfn++) { ClearPageReserved(pfn_to_page(zone_start_pfn)); init_page_count(pfn_to_page(zone_start_pfn)); } } totalram_pages += totalhigh_pages; }
static void copy_data_pages(struct pbe *pblist) { struct zone *zone; unsigned long zone_pfn; struct pbe *pbe, *p; pbe = pblist; for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); /* This is necessary for swsusp_free() */ for_each_pb_page (p, pblist) SetPageNosaveFree(virt_to_page(p)); for_each_pbe (p, pblist) SetPageNosaveFree(virt_to_page(p->address)); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { if (saveable(zone, &zone_pfn)) { struct page *page; page = pfn_to_page(zone_pfn + zone->zone_start_pfn); BUG_ON(!pbe); pbe->orig_address = (unsigned long)page_address(page); /* copy_page is not usable for copying task structs. */ memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); pbe = pbe->next; } } } BUG_ON(pbe); }
int swsusp_shrink_memory(void) { long size, tmp; struct zone *zone; unsigned long pages = 0; unsigned int i = 0; char *p = "-\\|/"; printk("Shrinking memory... "); do { size = 2 * count_highmem_pages(); size += size / 50 + count_data_pages(); size += (size + PBES_PER_PAGE - 1) / PBES_PER_PAGE + PAGES_FOR_IO; tmp = size; for_each_zone (zone) if (!is_highmem(zone)) tmp -= zone->free_pages; if (tmp > 0) { tmp = shrink_all_memory(SHRINK_BITE); if (!tmp) return -ENOMEM; pages += tmp; } else if (size > image_size / PAGE_SIZE) { tmp = shrink_all_memory(SHRINK_BITE); pages += tmp; } printk("\b%c", p[i++%4]); } while (tmp > 0); printk("\bdone (%lu pages freed)\n", pages); return 0; }
static void copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) { struct zone *zone; unsigned long pfn; for_each_zone (zone) { unsigned long max_zone_pfn; if (is_highmem(zone)) continue; mark_free_pages(zone); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) if (saveable_page(pfn)) memory_bm_set_bit(orig_bm, pfn); } memory_bm_position_reset(orig_bm); memory_bm_position_reset(copy_bm); do { pfn = memory_bm_next_pfn(orig_bm); if (likely(pfn != BM_END_OF_MAP)) { struct page *page; void *src; page = pfn_to_page(pfn); src = page_address(page); page = pfn_to_page(memory_bm_next_pfn(copy_bm)); copy_data_page(page_address(page), src); } } while (pfn != BM_END_OF_MAP); }
static void search_dead_task_and_mark(struct page* page, unsigned int nr_pages) { struct dead_task_struct* t; int high = is_highmem(page_zone(page)); unsigned long flags; spin_lock_irqsave(&dtask_list_lock, flags); #if 0 t = find_dtask_from_tree(pid); if( t ) { __write_mem_usage_to_dtask(t, nr_pages, high); if (t->mem_info.total_alloc_size <= 0) { delete_dtask_from_tree(t); goto out; } } #else for_each_dtask(t) { if( if_dtask_is_page_onwer (t, page)) { __write_mem_usage_to_dtask(t, nr_pages, high); if (t->mem_info.total_alloc_size == 0) { free_dtask(t); } break; } } #endif spin_unlock_irqrestore(&dtask_list_lock, flags); }
int swsusp_shrink_memory(void) { long tmp; struct zone *zone; unsigned long pages = 0; unsigned int i = 0; char *p = "-\\|/"; struct timeval start, stop; printk("Shrinking memory... "); do_gettimeofday(&start); do { long size, highmem_size; highmem_size = count_highmem_pages(); size = count_data_pages() + PAGES_FOR_IO; tmp = size; size += highmem_size; for_each_zone (zone) if (populated_zone(zone)) { if (is_highmem(zone)) { highmem_size -= zone_page_state(zone, NR_FREE_PAGES); } else { tmp -= zone_page_state(zone, NR_FREE_PAGES); tmp += zone->lowmem_reserve[ZONE_NORMAL]; tmp += snapshot_additional_pages(zone); } } if (highmem_size < 0) highmem_size = 0; tmp += highmem_size; if (tmp > 0) { tmp = __shrink_memory(tmp); if (!tmp) return -ENOMEM; pages += tmp; } else if (size > image_size / PAGE_SIZE) { tmp = __shrink_memory(size - (image_size / PAGE_SIZE)); pages += tmp; } printk("\b%c", p[i++%4]); } while (tmp > 0); do_gettimeofday(&stop); printk("\bdone (%lu pages freed)\n", pages); swsusp_show_speed(&start, &stop, pages, "Freed"); return 0; }
int save_highmem(void) { struct zone *zone; int res = 0; pr_debug("swsusp: Saving Highmem\n"); for_each_zone (zone) { if (is_highmem(zone)) res = save_highmem_zone(zone); if (res) return res; } return 0; }
static unsigned count_data_pages(void) { struct zone *zone; unsigned long zone_pfn; unsigned int n = 0; for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) n += saveable(zone, &zone_pfn); } return n; }
int save_highmem(void) { struct zone *zone; int res = 0; pr_debug("swsusp: Saving Highmem"); drain_local_pages(); for_each_zone (zone) { if (is_highmem(zone)) res = save_highmem_zone(zone); if (res) return res; } printk("\n"); return 0; }
static int enough_free_mem(unsigned int nr_pages) { struct zone *zone; unsigned int free = 0, meta = 0; for_each_zone (zone) if (!is_highmem(zone)) { free += zone->free_pages; meta += snapshot_additional_pages(zone); } pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n", nr_pages, PAGES_FOR_IO, meta, free); return free > nr_pages + PAGES_FOR_IO + meta; }
unsigned int count_data_pages(void) { struct zone *zone; unsigned long pfn, max_zone_pfn; unsigned int n = 0; for_each_zone (zone) { if (is_highmem(zone)) continue; mark_free_pages(zone); max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) n += !!saveable_page(pfn); } return n; }
static int save_highmem(void) { #ifdef CONFIG_HIGHMEM struct zone *zone; int res = 0; pr_debug("swsusp: Saving Highmem\n"); for_each_zone(zone) { if (is_highmem(zone)) res = save_highmem_zone(zone); if (res) return res; } #endif return 0; }
void writeLog_alloc_info(unsigned int order, struct page *page) { struct task_struct *tsk = current; unsigned int nr_pages = (1 << order); int high = is_highmem(page_zone(page)); int i; #ifdef CONFIG_CPU_HAS_ASID if (unlikely(need_to_backup_asid(tsk))) task_backup_asid(tsk); #endif write_alloc_mem_usage( &tsk->leak_detector, nr_pages, high); for(i=0 ; i < nr_pages ; i++) { (page + i)->onwer_info.pid = tsk->pid; (page + i)->onwer_info.sid = tsk->lt_info.sid; (page + i)->onwer_info.group_leader_pid = tsk->group_leader->pid; (page + i)->onwer_info.magic = PAGE_MAGIC; } }
void __init set_highmem_pages_init(void) { struct zone *zone; int nid; for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } totalram_pages += totalhigh_pages; }
unsigned int count_highmem_pages(void) { struct zone *zone; unsigned long zone_pfn; unsigned int n = 0; for_each_zone (zone) if (is_highmem(zone)) { mark_free_pages(zone); for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { struct page *page; unsigned long pfn = zone_pfn + zone->zone_start_pfn; if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); if (PageReserved(page)) continue; if (PageNosaveFree(page)) continue; n++; } } return n; }
static int search_task_and_mark(struct page* page, unsigned int nr_pages) { struct task_struct* p; struct task_struct* t; pid_t glpid = page->onwer_info.group_leader_pid; int high = is_highmem(page_zone(page)); unsigned long flags; int task_found = 0; read_lock_irqsave(&tasklist_lock, flags); for_each_process(p) { if(p->pid == glpid) { t = p; do { if( if_task_is_page_onwer(t, page )) { task_found = 1; write_free_mem_usage(&t->leak_detector, nr_pages, high); goto out_tasklist_loop; } } while_each_thread(p, t); break; } }
static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) { struct chain_allocator ca; struct zone *zone; struct zone_bitmap *zone_bm; struct bm_block *bb; unsigned int nr; chain_init(&ca, gfp_mask, safe_needed); /* Compute the number of zones */ nr = 0; for_each_zone (zone) if (populated_zone(zone) && !is_highmem(zone)) nr++; /* Allocate the list of zones bitmap objects */ zone_bm = create_zone_bm_list(nr, &ca); bm->zone_bm_list = zone_bm; if (!zone_bm) { chain_free(&ca, PG_UNSAFE_CLEAR); return -ENOMEM; } /* Initialize the zone bitmap objects */ for_each_zone (zone) { unsigned long pfn; if (!populated_zone(zone) || is_highmem(zone)) continue; zone_bm->start_pfn = zone->zone_start_pfn; zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; /* Allocate the list of bitmap block objects */ nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); bb = create_bm_block_list(nr, &ca); zone_bm->bm_blocks = bb; zone_bm->cur_block = bb; if (!bb) goto Free; nr = zone->spanned_pages; pfn = zone->zone_start_pfn; /* Initialize the bitmap block objects */ while (bb) { unsigned long *ptr; ptr = alloc_image_page(gfp_mask, safe_needed); bb->data = ptr; if (!ptr) goto Free; bb->start_pfn = pfn; if (nr >= BM_BITS_PER_BLOCK) { pfn += BM_BITS_PER_BLOCK; bb->size = BM_CHUNKS_PER_BLOCK; nr -= BM_BITS_PER_BLOCK; } else { /* This is executed only once in the loop */ pfn += nr; bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); } bb->end_pfn = pfn; bb = bb->next; } zone_bm = zone_bm->next; } bm->p_list = ca.chain; memory_bm_position_reset(bm); return 0; Free: bm->p_list = ca.chain; memory_bm_free(bm, PG_UNSAFE_CLEAR); return -ENOMEM; }