/** * toi_copy_pageset1 - do the atomic copy of pageset1 * * Make the atomic copy of pageset1. We can't use copy_page (as we once did) * because we can't be sure what side effects it has. On my old Duron, with * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt * count at resume time 4 instead of 3. * * We don't want to call kmap_atomic unconditionally because it has the side * effect of incrementing the preempt count, which will leave it one too high * post resume (the page containing the preempt count will be copied after * its incremented. This is essentially the same problem. **/ void toi_copy_pageset1(void) { int i; unsigned long source_index, dest_index; memory_bm_position_reset(pageset1_map); memory_bm_position_reset(pageset1_copy_map); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); for (i = 0; i < pagedir1.size; i++) { unsigned long *origvirt, *copyvirt; struct page *origpage, *copypage; int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2; origpage = pfn_to_page(source_index); copypage = pfn_to_page(dest_index); origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage); copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage); was_present1 = kernel_page_present(origpage); if (!was_present1) kernel_map_pages(origpage, 1, 1); was_present2 = kernel_page_present(copypage); if (!was_present2) kernel_map_pages(copypage, 1, 1); while (loop >= 0) { *(copyvirt + loop) = *(origvirt + loop); loop--; } if (!was_present1) kernel_map_pages(origpage, 1, 0); if (!was_present2) kernel_map_pages(copypage, 1, 0); if (PageHighMem(origpage)) kunmap_atomic(origvirt); if (PageHighMem(copypage)) kunmap_atomic(copyvirt); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); } }
void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; struct page *isolated_page = NULL; unsigned int order; unsigned long page_idx, buddy_idx; struct page *buddy; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) goto out; /* * Because freepage with more than pageblock_order on isolated * pageblock is restricted to merge due to freepage counting problem, * it is possible that there is free buddy page. * move_freepages_block() doesn't care of merge so we need other * approach in order to merge them. Isolation and free will make * these pages to be merged. */ if (PageBuddy(page)) { order = page_order(page); if (order >= pageblock_order) { page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); if (pfn_valid_within(page_to_pfn(buddy)) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); isolated_page = page; } } } /* * If we isolate freepage with more than pageblock_order, there * should be no freepage in the range, so we could avoid costly * pageblock scanning for freepage moving. */ if (!isolated_page) { nr_pages = move_freepages_block(zone, page, migratetype); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); if (isolated_page) __free_pages(isolated_page, order); }
/** * toi_copy_pageset1 - do the atomic copy of pageset1 * * Make the atomic copy of pageset1. We can't use copy_page (as we once did) * because we can't be sure what side effects it has. On my old Duron, with * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt * count at resume time 4 instead of 3. * * We don't want to call kmap_atomic unconditionally because it has the side * effect of incrementing the preempt count, which will leave it one too high * post resume (the page containing the preempt count will be copied after * its incremented. This is essentially the same problem. **/ void toi_copy_pageset1(void) { int i; unsigned long source_index, dest_index; memory_bm_position_reset(pageset1_map); memory_bm_position_reset(pageset1_copy_map); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); for (i = 0; i < pagedir1.size; i++) { unsigned long *origvirt, *copyvirt; struct page *origpage, *copypage; int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2; #ifdef CONFIG_TOI_ENHANCE if (!pfn_valid(source_index) || !pfn_valid(dest_index)) { pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i, dest_index, source_index); set_abort_result(TOI_ARCH_PREPARE_FAILED); return; } #endif origpage = pfn_to_page(source_index); copypage = pfn_to_page(dest_index); origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage); copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage); was_present1 = kernel_page_present(origpage); if (!was_present1) kernel_map_pages(origpage, 1, 1); was_present2 = kernel_page_present(copypage); if (!was_present2) kernel_map_pages(copypage, 1, 1); while (loop >= 0) { *(copyvirt + loop) = *(origvirt + loop); loop--; } if (!was_present1) kernel_map_pages(origpage, 1, 0); if (!was_present2) kernel_map_pages(copypage, 1, 0); if (PageHighMem(origpage)) kunmap_atomic(origvirt); if (PageHighMem(copypage)) kunmap_atomic(copyvirt); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); } }