/* * replica_check_poolset_health -- check if a given poolset can be considered as * healthy, and store the status in a helping structure */ int replica_check_poolset_health(struct pool_set *set, struct poolset_health_status **set_hsp, unsigned flags) { if (replica_create_poolset_health_status(set, set_hsp)) { LOG(1, "Creating poolset health status failed"); return -1; } struct poolset_health_status *set_hs = *set_hsp; /* check if part files exist, and if not - create them, and open them */ check_and_open_poolset_part_files(set, set_hs, flags); /* map all headers */ map_all_unbroken_headers(set, set_hs); /* check if checksums are correct for parts in all replicas */ check_checksums(set, set_hs); /* check if uuids in parts across each replica are consistent */ if (check_replicas_consistency(set, set_hs)) { LOG(1, "Replica consistency check failed"); goto err; } /* check poolset_uuid values between replicas */ if (check_poolset_uuids(set, set_hs)) { LOG(1, "Poolset uuids check failed"); goto err; } /* check if uuids for adjacent replicas are consistent */ if (check_uuids_between_replicas(set, set_hs)) { LOG(1, "Replica uuids check failed"); goto err; } if (check_store_all_sizes(set, set_hs)) { LOG(1, "Reading pool sizes failed"); goto err; } unmap_all_headers(set); util_poolset_fdclose(set); return 0; err: unmap_all_headers(set); util_poolset_fdclose(set); replica_free_poolset_health_status(set_hs); return -1; }
/** * __toi_post_context_save - steps after saving the cpu context * * Steps taken after saving the CPU state to make the actual * atomic copy. * * Called from swsusp_save in snapshot.c via toi_post_context_save. **/ int __toi_post_context_save(void) { unsigned long old_ps1_size = pagedir1.size; check_checksums(); free_checksum_pages(); toi_recalculate_image_contents(1); extra_pd1_pages_used = pagedir1.size > old_ps1_size ? pagedir1.size - old_ps1_size : 0; if (extra_pd1_pages_used > extra_pd1_pages_allowance) { printk(KERN_INFO "Pageset1 has grown by %lu pages. " "extra_pages_allowance is currently only %lu.\n", pagedir1.size - old_ps1_size, extra_pd1_pages_allowance); /* * Highlevel code will see this, clear the state and * retry if we haven't already done so twice. */ if (any_to_free(1)) { set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL); return 1; } if (try_allocate_extra_memory()) { printk(KERN_INFO "Failed to allocate the extra memory" " needed. Restarting the process."); set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL); return 1; } printk(KERN_INFO "However it looks like there's enough" " free ram and storage to handle this, so " " continuing anyway."); /* * What if try_allocate_extra_memory above calls * toi_allocate_extra_pagedir_memory and it allocs a new * slab page via toi_kzalloc which should be in ps1? So... */ toi_recalculate_image_contents(1); } if (!test_action_state(TOI_TEST_FILTER_SPEED) && !test_action_state(TOI_TEST_BIO)) toi_copy_pageset1(); return 0; }
/* * replica_check_poolset_health -- check if a given poolset can be considered as * healthy, and store the status in a helping structure */ int replica_check_poolset_health(struct pool_set *set, struct poolset_health_status **set_hsp, unsigned flags) { LOG(3, "set %p, set_hsp %p, flags %u", set, set_hsp, flags); if (replica_create_poolset_health_status(set, set_hsp)) { LOG(1, "creating poolset health status failed"); return -1; } struct poolset_health_status *set_hs = *set_hsp; /* check if part files exist, and if not - create them, and open them */ check_and_open_poolset_part_files(set, set_hs, flags); /* map all headers */ map_all_unbroken_headers(set, set_hs); /* check if checksums are correct for parts in all replicas */ check_checksums(set, set_hs); /* check if option flags are consistent */ if (check_options(set, set_hs)) { LOG(1, "flags check failed"); goto err; } if (check_shutdown_state(set, set_hs)) { LOG(1, "replica shutdown_state check failed"); goto err; } /* check if uuids in parts across each replica are consistent */ if (check_replicas_consistency(set, set_hs)) { LOG(1, "replica consistency check failed"); goto err; } /* check poolset_uuid values between replicas */ if (check_poolset_uuids(set, set_hs)) { LOG(1, "poolset uuids check failed"); goto err; } /* check if uuids for adjacent replicas are consistent */ if (check_uuids_between_replicas(set, set_hs)) { LOG(1, "replica uuids check failed"); goto err; } /* check if healthy replicas make up another poolset */ if (check_replica_cycles(set, set_hs)) { LOG(1, "replica cycles check failed"); goto err; } /* check if replicas are large enough */ if (check_replica_sizes(set, set_hs)) { LOG(1, "replica sizes check failed"); goto err; } if (check_store_all_sizes(set, set_hs)) { LOG(1, "reading pool sizes failed"); goto err; } unmap_all_headers(set); util_poolset_fdclose_always(set); return 0; err: errno = EINVAL; unmap_all_headers(set); util_poolset_fdclose_always(set); replica_free_poolset_health_status(set_hs); return -1; }