int refresh_clvmd() { int num_responses; char args[1]; // No args really. lvm_response_t *response; int saved_errno; int status; int i; status = _cluster_request(CLVMD_CMD_REFRESH, "*", args, 0, &response, &num_responses); /* If any nodes were down then display them and return an error */ for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) { fprintf(stderr, "clvmd not running on node %s", response[i].node); status = 0; errno = response[i].status; } else if (response[i].status) { fprintf(stderr, "Error resetting node %s: %s", response[i].node, response[i].response[0] ? response[i].response : strerror(response[i].status)); status = 0; errno = response[i].status; } } saved_errno = errno; _cluster_free_request(response, num_responses); errno = saved_errno; return status; }
int query_resource(const char *resource, int *mode) #endif { int i, status, len, num_responses, saved_errno; const char *node = ""; char *args; lvm_response_t *response = NULL; saved_errno = errno; len = strlen(resource) + 3; args = alloca(len); strcpy(args + 2, resource); args[0] = 0; args[1] = LCK_CLUSTER_VG; status = _cluster_request(CLVMD_CMD_LOCK_QUERY, node, args, len, &response, &num_responses); *mode = LCK_NULL; for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) continue; if (!response[i].response[0]) continue; /* * All nodes should use CR, or exactly one node * should held EX. (PR is obsolete) * If two nodes node reports different locks, * something is broken - just return more important mode. */ if (decode_lock_type(response[i].response) > *mode) *mode = decode_lock_type(response[i].response); log_debug("Lock held for %s, node %s : %s", resource, response[i].node, response[i].response); } _cluster_free_request(response, num_responses); errno = saved_errno; return status; }
int debug_clvmd(int level, int clusterwide) { int num_responses; char args[1]; const char *nodes; lvm_response_t *response; int saved_errno; int status; int i; args[0] = level; if (clusterwide) nodes = "*"; else nodes = "."; status = _cluster_request(CLVMD_CMD_SET_DEBUG, nodes, args, 1, &response, &num_responses); /* If any nodes were down then display them and return an error */ for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) { fprintf(stderr, "clvmd not running on node %s", response[i].node); status = 0; errno = response[i].status; } else if (response[i].status) { fprintf(stderr, "Error setting debug on node %s: %s", response[i].node, response[i].response[0] ? response[i].response : strerror(response[i].status)); status = 0; errno = response[i].status; } } saved_errno = errno; _cluster_free_request(response, num_responses); errno = saved_errno; return status; }
static int _lock_for_cluster(struct cmd_context *cmd, unsigned char clvmd_cmd, uint32_t flags, const char *name) { int status; int i; char *args; const char *node = ""; int len; int dmeventd_mode; int saved_errno = errno; lvm_response_t *response = NULL; int num_responses; assert(name); len = strlen(name) + 3; args = alloca(len); strcpy(args + 2, name); /* Maskoff lock flags */ args[0] = flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK | LCK_NONBLOCK | LCK_HOLD); args[1] = flags & (LCK_LOCAL | LCK_CLUSTER_VG); if (mirror_in_sync()) args[1] |= LCK_MIRROR_NOSYNC_MODE; /* * Must handle tri-state return from dmeventd_monitor_mode. * But DMEVENTD_MONITOR_IGNORE is not propagated across the cluster. */ dmeventd_mode = dmeventd_monitor_mode(); if (dmeventd_mode != DMEVENTD_MONITOR_IGNORE && dmeventd_mode) args[1] |= LCK_DMEVENTD_MONITOR_MODE; if (cmd->partial_activation) args[1] |= LCK_PARTIAL_MODE; /* * VG locks are just that: locks, and have no side effects * so we only need to do them on the local node because all * locks are cluster-wide. * Also, if the lock is exclusive it makes no sense to try to * acquire it on all nodes, so just do that on the local node too. * One exception, is that P_ locks /do/ get distributed across * the cluster because they might have side-effects. */ if (strncmp(name, "P_", 2) && (clvmd_cmd == CLVMD_CMD_LOCK_VG || (flags & LCK_TYPE_MASK) == LCK_EXCL || (flags & LCK_LOCAL) || !(flags & LCK_CLUSTER_VG))) node = "."; status = _cluster_request(clvmd_cmd, node, args, len, &response, &num_responses); /* If any nodes were down then display them and return an error */ for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) { log_error("clvmd not running on node %s", response[i].node); status = 0; errno = response[i].status; } else if (response[i].status) { log_error("Error locking on node %s: %s", response[i].node, response[i].response[0] ? response[i].response : strerror(response[i].status)); status = 0; errno = response[i].status; } } saved_errno = errno; _cluster_free_request(response, num_responses); errno = saved_errno; return status; }
static int _lock_for_cluster(struct cmd_context *cmd, unsigned char clvmd_cmd, uint32_t flags, const char *name) { int status; int i; char *args; const char *node = ""; int len; int dmeventd_mode; int saved_errno; lvm_response_t *response = NULL; int num_responses; assert(name); len = strlen(name) + 3; args = alloca(len); strcpy(args + 2, name); /* args[0] holds bottom 8 bits except LCK_LOCAL (0x40). */ args[0] = flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK | LCK_NONBLOCK | LCK_HOLD | LCK_CLUSTER_VG); args[1] = 0; if (flags & LCK_ORIGIN_ONLY) args[1] |= LCK_ORIGIN_ONLY_MODE; if (flags & LCK_REVERT) args[1] |= LCK_REVERT_MODE; if (mirror_in_sync()) args[1] |= LCK_MIRROR_NOSYNC_MODE; if (test_mode()) args[1] |= LCK_TEST_MODE; /* * We propagate dmeventd_monitor_mode() to clvmd faithfully, since * dmeventd monitoring is tied to activation which happens inside clvmd * when locking_type = 3. */ dmeventd_mode = dmeventd_monitor_mode(); if (dmeventd_mode == DMEVENTD_MONITOR_IGNORE) args[1] |= LCK_DMEVENTD_MONITOR_IGNORE; if (dmeventd_mode) args[1] |= LCK_DMEVENTD_MONITOR_MODE; if (cmd->partial_activation) args[1] |= LCK_PARTIAL_MODE; /* * VG locks are just that: locks, and have no side effects * so we only need to do them on the local node because all * locks are cluster-wide. * * P_ locks /do/ get distributed across the cluster because they might * have side-effects. * * SYNC_NAMES and VG_BACKUP use the VG name directly without prefix. */ if (clvmd_cmd == CLVMD_CMD_SYNC_NAMES) { if (flags & LCK_LOCAL) node = NODE_LOCAL; } else if (clvmd_cmd != CLVMD_CMD_VG_BACKUP) { if (strncmp(name, "P_", 2) && (clvmd_cmd == CLVMD_CMD_LOCK_VG || (flags & LCK_LOCAL) || !(flags & LCK_CLUSTER_VG))) node = NODE_LOCAL; else if (flags & LCK_REMOTE) node = NODE_REMOTE; } status = _cluster_request(clvmd_cmd, node, args, len, &response, &num_responses); /* If any nodes were down then display them and return an error */ for (i = 0; i < num_responses; i++) { if (response[i].status == EHOSTDOWN) { log_error("clvmd not running on node %s", response[i].node); status = 0; errno = response[i].status; } else if (response[i].status) { log_error("Error locking on node %s: %s", response[i].node, response[i].response[0] ? response[i].response : strerror(response[i].status)); status = 0; errno = response[i].status; } } saved_errno = errno; _cluster_free_request(response, num_responses); errno = saved_errno; return status; }