/** * toi_copy_pageset1 - do the atomic copy of pageset1 * * Make the atomic copy of pageset1. We can't use copy_page (as we once did) * because we can't be sure what side effects it has. On my old Duron, with * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt * count at resume time 4 instead of 3. * * We don't want to call kmap_atomic unconditionally because it has the side * effect of incrementing the preempt count, which will leave it one too high * post resume (the page containing the preempt count will be copied after * its incremented. This is essentially the same problem. **/ void toi_copy_pageset1(void) { int i; unsigned long source_index, dest_index; memory_bm_position_reset(pageset1_map); memory_bm_position_reset(pageset1_copy_map); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); for (i = 0; i < pagedir1.size; i++) { unsigned long *origvirt, *copyvirt; struct page *origpage, *copypage; int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2; origpage = pfn_to_page(source_index); copypage = pfn_to_page(dest_index); origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage); copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage); was_present1 = kernel_page_present(origpage); if (!was_present1) kernel_map_pages(origpage, 1, 1); was_present2 = kernel_page_present(copypage); if (!was_present2) kernel_map_pages(copypage, 1, 1); while (loop >= 0) { *(copyvirt + loop) = *(origvirt + loop); loop--; } if (!was_present1) kernel_map_pages(origpage, 1, 0); if (!was_present2) kernel_map_pages(copypage, 1, 0); if (PageHighMem(origpage)) kunmap_atomic(origvirt); if (PageHighMem(copypage)) kunmap_atomic(copyvirt); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); } }
/** * toi_copy_pageset1 - do the atomic copy of pageset1 * * Make the atomic copy of pageset1. We can't use copy_page (as we once did) * because we can't be sure what side effects it has. On my old Duron, with * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt * count at resume time 4 instead of 3. * * We don't want to call kmap_atomic unconditionally because it has the side * effect of incrementing the preempt count, which will leave it one too high * post resume (the page containing the preempt count will be copied after * its incremented. This is essentially the same problem. **/ void toi_copy_pageset1(void) { int i; unsigned long source_index, dest_index; memory_bm_position_reset(pageset1_map); memory_bm_position_reset(pageset1_copy_map); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); for (i = 0; i < pagedir1.size; i++) { unsigned long *origvirt, *copyvirt; struct page *origpage, *copypage; int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2; #ifdef CONFIG_TOI_ENHANCE if (!pfn_valid(source_index) || !pfn_valid(dest_index)) { pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i, dest_index, source_index); set_abort_result(TOI_ARCH_PREPARE_FAILED); return; } #endif origpage = pfn_to_page(source_index); copypage = pfn_to_page(dest_index); origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage); copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage); was_present1 = kernel_page_present(origpage); if (!was_present1) kernel_map_pages(origpage, 1, 1); was_present2 = kernel_page_present(copypage); if (!was_present2) kernel_map_pages(copypage, 1, 1); while (loop >= 0) { *(copyvirt + loop) = *(origvirt + loop); loop--; } if (!was_present1) kernel_map_pages(origpage, 1, 0); if (!was_present2) kernel_map_pages(copypage, 1, 0); if (PageHighMem(origpage)) kunmap_atomic(origvirt); if (PageHighMem(copypage)) kunmap_atomic(copyvirt); source_index = memory_bm_next_pfn(pageset1_map); dest_index = memory_bm_next_pfn(pageset1_copy_map); } }