/** * __toi_post_context_save - steps after saving the cpu context * * Steps taken after saving the CPU state to make the actual * atomic copy. * * Called from swsusp_save in snapshot.c via toi_post_context_save. **/ int __toi_post_context_save(void) { unsigned long old_ps1_size = pagedir1.size; check_checksums(); free_checksum_pages(); toi_recalculate_image_contents(1); extra_pd1_pages_used = pagedir1.size > old_ps1_size ? pagedir1.size - old_ps1_size : 0; if (extra_pd1_pages_used > extra_pd1_pages_allowance) { printk(KERN_INFO "Pageset1 has grown by %lu pages. " "extra_pages_allowance is currently only %lu.\n", pagedir1.size - old_ps1_size, extra_pd1_pages_allowance); /* * Highlevel code will see this, clear the state and * retry if we haven't already done so twice. */ if (any_to_free(1)) { set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL); return 1; } if (try_allocate_extra_memory()) { printk(KERN_INFO "Failed to allocate the extra memory" " needed. Restarting the process."); set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL); return 1; } printk(KERN_INFO "However it looks like there's enough" " free ram and storage to handle this, so " " continuing anyway."); /* * What if try_allocate_extra_memory above calls * toi_allocate_extra_pagedir_memory and it allocs a new * slab page via toi_kzalloc which should be in ps1? So... */ toi_recalculate_image_contents(1); } if (!test_action_state(TOI_TEST_FILTER_SPEED) && !test_action_state(TOI_TEST_BIO)) toi_copy_pageset1(); return 0; }
/** * ui_nl_set_state - Update toi_action based on a message from userui. * * @n: The bit (1 << bit) to set. */ static void ui_nl_set_state(int n) { /* Only let them change certain settings */ static const u32 toi_action_mask = (1 << TOI_REBOOT) | (1 << TOI_PAUSE) | (1 << TOI_LOGALL) | (1 << TOI_SINGLESTEP) | (1 << TOI_PAUSE_NEAR_PAGESET_END); toi_bkd.toi_action = (toi_bkd.toi_action & (~toi_action_mask)) | (n & toi_action_mask); if (!test_action_state(TOI_PAUSE) && !test_action_state(TOI_SINGLESTEP)) wake_up_interruptible(&userui_wait_for_key); }
/** * submit - submit BIO request * @writing: READ or WRITE. * @dev: The block device we're using. * @first_block: The first sector we're using. * @page: The page being used for I/O. * @free_group: If writing, the group that was used in allocating the page * and which will be used in freeing the page from the completion * routine. * * Based on Patrick Mochell's pmdisk code from long ago: "Straight from the * textbook - allocate and initialize the bio. If we're writing, make sure * the page is marked as dirty. Then submit it and carry on." * * If we're just testing the speed of our own code, we fake having done all * the hard work and all toi_end_bio immediately. **/ static int submit(int writing, struct block_device *dev, sector_t first_block, struct page *page, int free_group) { struct bio *bio = NULL; int cur_outstanding_io, result; /* * Shouldn't throttle if reading - can deadlock in the single * threaded case as pages are only freed when we use the * readahead. */ if (writing) { result = throttle_if_needed(MEMORY_ONLY | THROTTLE_WAIT); if (result) return result; } while (!bio) { bio = bio_alloc(TOI_ATOMIC_GFP, 1); if (!bio) { set_free_mem_throttle(); do_bio_wait(1); } } bio->bi_bdev = dev; bio->bi_sector = first_block; bio->bi_private = (void *)((unsigned long)free_group); bio->bi_end_io = toi_end_bio; bio->bi_flags |= (1 << BIO_TOI); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { printk(KERN_DEBUG "ERROR: adding page to bio at %lld\n", (unsigned long long)first_block); bio_put(bio); return -EFAULT; } bio_get(bio); cur_outstanding_io = atomic_add_return(1, &toi_io_in_progress); if (writing) { if (cur_outstanding_io > max_outstanding_writes) max_outstanding_writes = cur_outstanding_io; } else { if (cur_outstanding_io > max_outstanding_reads) max_outstanding_reads = cur_outstanding_io; } /* Still read the header! */ if (unlikely(test_action_state(TOI_TEST_BIO) && writing)) { /* Fake having done the hard work */ set_bit(BIO_UPTODATE, &bio->bi_flags); toi_end_bio(bio, 0); } else submit_bio(writing | REQ_SYNC, bio); return 0; }
void check_checksums(void) { int pfn, index = 0, cpu = smp_processor_id(); char current_checksum[CHECKSUM_SIZE]; struct cpu_context *ctx = &per_cpu(contexts, cpu); if (!toi_checksum_ops.enabled) { toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksumming disabled."); return; } next_page = (unsigned long) page_list; toi_num_resaved = 0; this_checksum = 0; toi_message(TOI_IO, TOI_VERBOSE, 0, "Verifying checksums."); memory_bm_position_reset(pageset2_map); for (pfn = memory_bm_next_pfn(pageset2_map); pfn != BM_END_OF_MAP; pfn = memory_bm_next_pfn(pageset2_map)) { int ret; char *pa; struct page *page = pfn_to_page(pfn); if (index % CHECKSUMS_PER_PAGE) { this_checksum += CHECKSUM_SIZE; } else { this_checksum = next_page + sizeof(void *); next_page = *((unsigned long *) next_page); } /* Done when IRQs disabled so must be atomic */ pa = kmap_atomic(page); memcpy(ctx->buf, pa, PAGE_SIZE); kunmap_atomic(pa); ret = crypto_hash_digest(&ctx->desc, ctx->sg, PAGE_SIZE, current_checksum); if (ret) { printk(KERN_INFO "Digest failed. Returned %d.\n", ret); return; } if (memcmp(current_checksum, (char *) this_checksum, CHECKSUM_SIZE)) { toi_message(TOI_IO, TOI_VERBOSE, 0, "Resaving %ld.", pfn); SetPageResave(pfn_to_page(pfn)); toi_num_resaved++; if (test_action_state(TOI_ABORT_ON_RESAVE_NEEDED)) set_abort_result(TOI_RESAVE_NEEDED); } index++; } toi_message(TOI_IO, TOI_VERBOSE, 0, "Checksum verification complete."); }
/** * ui_nl_set_state - Update toi_action based on a message from userui. * * @n: The bit (1 << bit) to set. */ static void ui_nl_set_state(int n) { /* Only let them change certain settings */ static const u32 toi_action_mask = (1 << TOI_REBOOT) | (1 << TOI_PAUSE) | (1 << TOI_LOGALL) | (1 << TOI_SINGLESTEP) | (1 << TOI_PAUSE_NEAR_PAGESET_END); static unsigned long new_action; new_action = (toi_bkd.toi_action & (~toi_action_mask)) | (n & toi_action_mask); printk(KERN_DEBUG "n is %x. Action flags being changed from %lx " "to %lx.", n, toi_bkd.toi_action, new_action); toi_bkd.toi_action = new_action; if (!test_action_state(TOI_PAUSE) && !test_action_state(TOI_SINGLESTEP)) wake_up_interruptible(&userui_wait_for_key); }
static void free_update_stats(int fail_num, int size) { BUG_ON(fail_num >= TOI_ALLOC_PATHS); atomic_inc(&toi_free_count[fail_num]); if (unlikely(atomic_read(&toi_free_count[fail_num]) > atomic_read(&toi_alloc_count[fail_num]))) dump_stack(); if (unlikely(test_action_state(TOI_GET_MAX_MEM_ALLOCD))) { mutex_lock(&toi_alloc_mutex); cur_allocd -= size; toi_cur_allocd[fail_num]--; mutex_unlock(&toi_alloc_mutex); } }
/** * toi_end_atomic - post atomic copy/restore routines * @stage: What step to start at. * @suspend_time: Whether we're suspending or resuming. * @error: Whether we're recovering from an error. **/ void toi_end_atomic(int stage, int suspend_time, int error) { pm_message_t msg = suspend_time ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE; switch (stage) { case ATOMIC_ALL_STEPS: if (!suspend_time) { events_check_enabled = false; platform_leave(1); } case ATOMIC_STEP_SYSCORE_RESUME: syscore_resume(); case ATOMIC_STEP_IRQS: local_irq_enable(); case ATOMIC_STEP_CPU_HOTPLUG: if (test_action_state(TOI_LATE_CPU_HOTPLUG)) enable_nonboot_cpus(); case ATOMIC_STEP_PLATFORM_FINISH: if (!suspend_time && error & 2) platform_restore_cleanup(1); else platform_finish(1); dpm_resume_start(msg); case ATOMIC_STEP_DEVICE_RESUME: if (suspend_time && (error & 2)) platform_recover(1); dpm_resume(msg); if (error || !toi_in_suspend()) pm_restore_gfp_mask(); ftrace_start(); resume_console(); case ATOMIC_STEP_DPM_COMPLETE: dpm_complete(msg); case ATOMIC_STEP_PLATFORM_END: platform_end(1); toi_prepare_status(DONT_CLEAR_BAR, "Post atomic."); } }
static void alloc_update_stats(int fail_num, void *result, int size) { if (!result) { atomic_inc(&toi_fail_count[fail_num]); return; } atomic_inc(&toi_alloc_count[fail_num]); if (unlikely(test_action_state(TOI_GET_MAX_MEM_ALLOCD))) { mutex_lock(&toi_alloc_mutex); toi_cur_allocd[fail_num]++; cur_allocd += size; if (unlikely(cur_allocd > max_allocd)) { int i; for (i = 0; i < TOI_ALLOC_PATHS; i++) toi_max_allocd[i] = toi_cur_allocd[i]; max_allocd = cur_allocd; } mutex_unlock(&toi_alloc_mutex); } }
static void __toi_power_down(int method) { int error; toi_cond_pause(1, test_action_state(TOI_REBOOT) ? "Ready to reboot." : "Powering down."); if (test_result_state(TOI_ABORTED)) goto out; if (test_action_state(TOI_REBOOT)) kernel_restart(NULL); switch (method) { case 0: break; case 3: /* * Re-read the overwritten part of pageset2 to make post-resume * faster. */ if (read_pageset2(1)) panic("Attempt to reload pagedir 2 failed. " "Try rebooting."); pm_prepare_console(); error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); if (!error) { pm_restore_gfp_mask(); error = suspend_devices_and_enter(PM_SUSPEND_MEM); pm_restrict_gfp_mask(); if (!error) did_suspend_to_both = 1; } pm_notifier_call_chain(PM_POST_SUSPEND); pm_restore_console(); // jonathan.jmchen: FIXME, Create API to add another wakeup source to power down, // if system is idle after xxx (e.g., 5 min) without user interaction!! /* Success - we're now post-resume-from-ram */ if (did_suspend_to_both) return; /* Failed to suspend to ram - do normal power off */ break; case 4: /* * If succeeds, doesn't return. If fails, do a simple * powerdown. */ hibernation_platform_enter(); break; case 5: /* Historic entry only now */ break; } if (method && method != 5) toi_cond_pause(1, "Falling back to alternate power off method."); if (test_result_state(TOI_ABORTED)) goto out; kernel_power_off(); kernel_halt(); toi_cond_pause(1, "Powerdown failed."); while (1) cpu_relax(); out: if (read_pageset2(1)) panic("Attempt to reload pagedir 2 failed. Try rebooting."); return; }
/** * toi_go_atomic - do the actual atomic copy/restore * @state: The state to use for dpm_suspend_start & power_down calls. * @suspend_time: Whether we're suspending or resuming. **/ int toi_go_atomic(pm_message_t state, int suspend_time) { if (suspend_time) { if (platform_begin(1)) { set_abort_result(TOI_PLATFORM_PREP_FAILED); toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3); return 1; } if (dpm_prepare(PMSG_FREEZE)) { set_abort_result(TOI_DPM_PREPARE_FAILED); dpm_complete(PMSG_RECOVER); toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3); return 1; } } suspend_console(); ftrace_stop(); pm_restrict_gfp_mask(); if (suspend_time) { if (dpm_suspend(state)) { set_abort_result(TOI_DPM_SUSPEND_FAILED); toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3); return 1; } } else { if (dpm_suspend_start(state)) { set_abort_result(TOI_DPM_SUSPEND_FAILED); toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3); return 1; } } /* At this point, dpm_suspend_start() has been called, but *not* * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now. * Otherwise, drivers for some devices (e.g. interrupt controllers) * become desynchronized with the actual state of the hardware * at resume time, and evil weirdness ensues. */ if (dpm_suspend_end(state)) { set_abort_result(TOI_DEVICE_REFUSED); toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 1); return 1; } if (suspend_time) { if (platform_pre_snapshot(1)) set_abort_result(TOI_PRE_SNAPSHOT_FAILED); } else { if (platform_pre_restore(1)) set_abort_result(TOI_PRE_RESTORE_FAILED); } if (test_result_state(TOI_ABORTED)) { toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 1); return 1; } if (test_action_state(TOI_LATE_CPU_HOTPLUG)) { if (disable_nonboot_cpus()) { set_abort_result(TOI_CPU_HOTPLUG_FAILED); toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG, suspend_time, 1); return 1; } } local_irq_disable(); if (syscore_suspend()) { set_abort_result(TOI_SYSCORE_REFUSED); toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 1); return 1; } if (suspend_time && pm_wakeup_pending()) { set_abort_result(TOI_WAKEUP_EVENT); toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME, suspend_time, 1); return 1; } return 0; }
/** * hibernate - Carry out system hibernation, including saving the image. */ int hibernate(void) { int error; hib_log("entering hibernate()\n"); if (test_action_state(TOI_REPLACE_SWSUSP)) { error = try_tuxonice_hibernate(); return error; } lock_system_sleep(); /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { error = -EBUSY; goto Unlock; } pm_prepare_console(); error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); if (error) goto Exit; /* Allocate memory management structures */ error = create_basic_memory_bitmaps(); if (error) goto Exit; printk(KERN_INFO "PM: Syncing filesystems ... "); sys_sync(); printk("done.\n"); error = freeze_processes(); if (error) goto Free_bitmaps; error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); if (error || freezer_test_done) goto Thaw; if (in_suspend) { unsigned int flags = 0; if (hibernation_mode == HIBERNATION_PLATFORM) flags |= SF_PLATFORM_MODE; if (nocompress) flags |= SF_NOCOMPRESS_MODE; else flags |= SF_CRC32_MODE; pr_debug("PM: writing image.\n"); error = swsusp_write(flags); swsusp_free(); if (!error) power_down(); in_suspend = 0; pm_restore_gfp_mask(); } else { pr_debug("PM: Image restored successfully.\n"); } Thaw: thaw_processes(); /* Don't bother checking whether freezer_test_done is true */ freezer_test_done = false; Free_bitmaps: free_basic_memory_bitmaps(); Exit: pm_notifier_call_chain(PM_POST_HIBERNATION); pm_restore_console(); atomic_inc(&snapshot_device_available); Unlock: unlock_system_sleep(); return error; }
/** * toi_go_atomic - do the actual atomic copy/restore * @state: The state to use for dpm_suspend_start & power_down calls. * @suspend_time: Whether we're suspending or resuming. **/ int toi_go_atomic(pm_message_t state, int suspend_time) { if (suspend_time) { if (platform_begin(1)) { set_abort_result(TOI_PLATFORM_PREP_FAILED); toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3); hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__, suspend_time, state.event); return 1; } if (dpm_prepare(PMSG_FREEZE)) { set_abort_result(TOI_DPM_PREPARE_FAILED); dpm_complete(PMSG_RECOVER); toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3); hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__, suspend_time, state.event); return 1; } } suspend_console(); ftrace_stop(); pm_restrict_gfp_mask(); if (suspend_time) { #if 0 /* FIXME: jonathan.jmchen: trick code here to let dpm_suspend succeeded, NEED to find out the root cause!! */ if (events_check_enabled) { hib_log("play trick here set events_check_enabled(%d) = false!!\n", events_check_enabled); events_check_enabled = false; } #endif if (dpm_suspend(state)) { set_abort_result(TOI_DPM_SUSPEND_FAILED); toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } } else { if (dpm_suspend_start(state)) { set_abort_result(TOI_DPM_SUSPEND_FAILED); toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } } /* At this point, dpm_suspend_start() has been called, but *not* * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now. * Otherwise, drivers for some devices (e.g. interrupt controllers) * become desynchronized with the actual state of the hardware * at resume time, and evil weirdness ensues. */ if (dpm_suspend_end(state)) { set_abort_result(TOI_DEVICE_REFUSED); toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 1); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } if (suspend_time) { if (platform_pre_snapshot(1)) set_abort_result(TOI_PRE_SNAPSHOT_FAILED); } else { if (platform_pre_restore(1)) set_abort_result(TOI_PRE_RESTORE_FAILED); } if (test_result_state(TOI_ABORTED)) { toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 1); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } if (test_action_state(TOI_LATE_CPU_HOTPLUG)) { if (disable_nonboot_cpus()) { set_abort_result(TOI_CPU_HOTPLUG_FAILED); toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG, suspend_time, 1); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } } local_irq_disable(); if (syscore_suspend()) { set_abort_result(TOI_SYSCORE_REFUSED); toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 1); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } if (suspend_time && pm_wakeup_pending()) { set_abort_result(TOI_WAKEUP_EVENT); toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME, suspend_time, 1); hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__, suspend_time, state.event, toi_result); return 1; } hib_log("SUCCEEDED @line:%d suspend(%d) pm_state(%d)\n", __LINE__, suspend_time, state.event); return 0; }