/* * demand attach fs * save all fileserver state */ int fs_stateSave(void) { int ret = 0, verified = 1; struct fs_dump_state state; /* save and restore need to be atomic wrt other host package operations */ H_LOCK; ViceLog(0, ("fs_stateSave: commencing fileserver state dump\n")); if (fs_stateAlloc(&state)) { ViceLog(0, ("fs_stateSave: memory allocation failed; dump aborted\n")); ret = 1; goto done; } /* XXX * on busy servers, these checks will inevitably fail since stuff drops H_LOCK * all over the place (with structs left in inconsistent states) while RPCs to * clients happen (grumble, grumble, the host package needs to be rewritten...) * * the current hack is to force the background threads that deal with host and * callback state offline early in the shutdown process, do VShutdown, come * back and wait for those threads to die, THEN do the state dump * * BUT, this still has one flaw -- what do we do about rx worker threads that * are blocked in the host package making an RPC call to a cm??? * * perhaps we need a refcounter that keeps track of threads blocked in rpc calls * with H_LOCK dropped (and the host struct likely left in an inconsistent state) * * or better yet, we need to associate a state machine with each host object * (kind of like demand attach Volume structures). * * sigh. I suspect we'll need to revisit this issue */ if (fs_state.options.fs_state_verify_before_save) { ViceLog(0, ("fs_stateSave: performing internal consistency checks before proceeding with state dump\n")); if (h_stateVerify(&state)) { ViceLog(0, ("fs_stateSave: error: host table consistency checks failed; state dump will not be marked clean\n")); verified = 0; ret = 1; } if (cb_stateVerify(&state)) { ViceLog(0, ("fs_stateSave: error: callback table consistency checks failed; state dump will not be marked clean\n")); verified = 0; ret = 1; } /* if a consistency check asserted the bail flag, reset it */ state.bail = 0; ViceLog(0, ("fs_stateSave: proceeding with dump\n")); } if (fs_stateCreateDump(&state)) { ViceLog(0, ("fs_stateSave: error: dump create failed\n")); ret = 1; goto done; } if (h_stateSave(&state)) { ViceLog(0, ("fs_stateSave: error: host state dump failed\n")); ret = 1; goto done; } if (cb_stateSave(&state)) { ViceLog(0, ("fs_stateSave: error: callback state dump failed\n")); ret = 1; goto done; } if (!verified) { state.bail = 1; } if (fs_stateCommitDump(&state)) { ViceLog(0, ("fs_stateSave: error: dump commit failed\n")); ret = 1; goto done; } if (verified) { ViceLog(0, ("fs_stateSave: fileserver state dump completed successfully\n")); } else { ViceLog(0, ("fs_stateSave: fileserver state dump completed, but not marked clean.\n")); ViceLog(0, ("fs_stateSave: please save a copy of '%s' for use by technical support\n", state.fn)); } done: if (fs_stateFileOpen(&state)) fs_stateCloseDump(&state); fs_stateFree(&state); H_UNLOCK; return ret; }
/* * demand attach fs * save all fileserver state */ int fs_stateSave(void) { int ret = 0, verified = 1; struct fs_dump_state state; /* save and restore need to be atomic wrt other host package operations */ H_LOCK; ViceLog(0, ("fs_stateSave: commencing fileserver state dump\n")); if (fs_stateAlloc(&state)) { ViceLog(0, ("fs_stateSave: memory allocation failed; dump aborted\n")); ret = 1; goto done; } /* XXX * on busy servers, these checks will inevitably fail since stuff drops H_LOCK * all over the place (with structs left in inconsistent states) while RPCs to * clients happen (grumble, grumble, the host package needs to be rewritten...) * * the current hack is to force the background threads that deal with host and * callback state offline early in the shutdown process, do VShutdown, come * back and wait for those threads to die, THEN do the state dump * * BUT, this still has one flaw -- what do we do about rx worker threads that * are blocked in the host package making an RPC call to a cm??? * * currently we try to detect if a host struct is in an inconsistent state * when we go to save it to disk, and just skip the hosts that we think may * be inconsistent (see h_isBusy_r in host.c). This has the problem of causing * more InitCallBackState's when we come back up, but the number of hosts in * such a state should be small. In the future, we could try to lock hosts * (with some deadline so we don't wait forever) before serializing, but at * least for now it does not seem worth the trouble. */ if (fs_state.options.fs_state_verify_before_save) { ViceLog(0, ("fs_stateSave: performing internal consistency checks before proceeding with state dump\n")); if (h_stateVerify(&state)) { ViceLog(0, ("fs_stateSave: error: host table consistency checks failed; state dump will not be marked clean\n")); verified = 0; ret = 1; } if (cb_stateVerify(&state)) { ViceLog(0, ("fs_stateSave: error: callback table consistency checks failed; state dump will not be marked clean\n")); verified = 0; ret = 1; } /* if a consistency check asserted the bail flag, reset it */ state.bail = 0; ViceLog(0, ("fs_stateSave: proceeding with dump\n")); } if (fs_stateCreateDump(&state)) { ViceLog(0, ("fs_stateSave: error: dump create failed\n")); ret = 1; goto done; } if (h_stateSave(&state)) { ViceLog(0, ("fs_stateSave: error: host state dump failed\n")); ret = 1; goto done; } if (cb_stateSave(&state)) { ViceLog(0, ("fs_stateSave: error: callback state dump failed\n")); ret = 1; goto done; } if (!verified) { state.bail = 1; } if (fs_stateCommitDump(&state)) { ViceLog(0, ("fs_stateSave: error: dump commit failed\n")); ret = 1; goto done; } if (verified) { ViceLog(0, ("fs_stateSave: fileserver state dump completed successfully\n")); } else { ViceLog(0, ("fs_stateSave: fileserver state dump completed, but not marked clean.\n")); ViceLog(0, ("fs_stateSave: please save a copy of '%s' for use by technical support\n", state.fn)); } done: if (fs_stateFileOpen(&state)) fs_stateCloseDump(&state); fs_stateFree(&state); H_UNLOCK; return ret; }