/* Return pipe FDs & child PID if sucessful */ int fork_new_instance(int nodeid, int *vq_sock, pid_t *childpid) { int pipes[2]; pid_t pid; if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK, 0, pipes)) { return -1; } parent_socket = pipes[0]; switch ( (pid=fork()) ) { case -1: perror("fork failed"); return -1; case 0: /* child process - continue below */ break; default: /* parent process */ *vq_sock = pipes[1]; *childpid = pid; return 0; } our_nodeid = nodeid; poll_loop = qb_loop_create(); if (icmap_get_uint32("quorum.device.timeout", &qdevice_timeout) != CS_OK) { qdevice_timeout = VOTEQUORUM_QDEVICE_DEFAULT_TIMEOUT; } set_local_node_pos(&corosync_api); load_quorum_instance(&corosync_api); qb_loop_poll_add(poll_loop, QB_LOOP_MED, parent_socket, POLLIN, NULL, parent_pipe_read_fn); /* Start it up! */ initial_sync(nodeid); qb_loop_run(poll_loop); return 0; }
/* This is different to the one in totemconfig.c in that we already * know the 'local' node ID, so we can just search for that. * It needs to be here rather than at main config read time as it's * (obviously) going to be different for each instance. */ static void set_local_node_pos(struct corosync_api_v1 *api) { icmap_iter_t iter; uint32_t node_pos; char name_str[ICMAP_KEYNAME_MAXLEN]; uint32_t nodeid; const char *iter_key; int res; int found = 0; iter = icmap_iter_init("nodelist.node."); while ((iter_key = icmap_iter_next(iter, NULL, NULL)) != NULL) { res = sscanf(iter_key, "nodelist.node.%u.%s", &node_pos, name_str); if (res != 2) { continue; } if (strcmp(name_str, "nodeid")) { continue; } res = icmap_get_uint32(iter_key, &nodeid); if (res == CS_OK) { if (nodeid == our_nodeid) { found = 1; res = icmap_set_uint32("nodelist.local_node_pos", node_pos); } } } if (!found) { /* This probably indicates a dynamically-added node * set the pos to zero and use the votes of the * first node in corosync.conf */ res = icmap_set_uint32("nodelist.local_node_pos", 0); } }
static unsigned int service_unlink_and_exit ( struct corosync_api_v1 *corosync_api, const char *service_name, unsigned int service_ver) { unsigned short service_id; char *name_sufix; int res; const char *iter_key_name; icmap_iter_t iter; char key_name[ICMAP_KEYNAME_MAXLEN]; unsigned int found_service_ver; char *found_service_name; int service_found; name_sufix = strrchr (service_name, '_'); if (name_sufix) name_sufix++; else name_sufix = (char*)service_name; service_found = 0; found_service_name = NULL; iter = icmap_iter_init("internal_configuration.service."); while ((iter_key_name = icmap_iter_next(iter, NULL, NULL)) != NULL) { res = sscanf(iter_key_name, "internal_configuration.service.%hu.%s", &service_id, key_name); if (res != 2) { continue; } snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%hu.name", service_id); free(found_service_name); if (icmap_get_string(key_name, &found_service_name) != CS_OK) { continue; } snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.ver", service_id); if (icmap_get_uint32(key_name, &found_service_ver) != CS_OK) { continue; } if (service_ver == found_service_ver && strcmp(found_service_name, service_name) == 0) { free(found_service_name); service_found = 1; break; } } icmap_iter_finalize(iter); if (service_found && service_id < SERVICES_COUNT_MAX && corosync_service[service_id] != NULL) { if (corosync_service[service_id]->exec_exit_fn) { res = corosync_service[service_id]->exec_exit_fn (); if (res == -1) { return (-1); } } log_printf(LOGSYS_LEVEL_NOTICE, "Service engine unloaded: %s", corosync_service[service_id]->name); corosync_service[service_id] = NULL; cs_ipcs_service_destroy (service_id); snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.handle", service_id); icmap_delete(key_name); snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.name", service_id); icmap_delete(key_name); snprintf(key_name, ICMAP_KEYNAME_MAXLEN, "internal_configuration.service.%u.ver", service_id); icmap_delete(key_name); } return (0); }
static void totem_volatile_config_read (struct totem_config *totem_config) { char *str; icmap_get_uint32("totem.token", &totem_config->token_timeout); icmap_get_uint32("totem.token_retransmit", &totem_config->token_retransmit_timeout); icmap_get_uint32("totem.hold", &totem_config->token_hold_timeout); icmap_get_uint32("totem.token_retransmits_before_loss_const", &totem_config->token_retransmits_before_loss_const); icmap_get_uint32("totem.join", &totem_config->join_timeout); icmap_get_uint32("totem.send_join", &totem_config->send_join_timeout); icmap_get_uint32("totem.consensus", &totem_config->consensus_timeout); icmap_get_uint32("totem.merge", &totem_config->merge_timeout); icmap_get_uint32("totem.downcheck", &totem_config->downcheck_timeout); icmap_get_uint32("totem.fail_recv_const", &totem_config->fail_to_recv_const); icmap_get_uint32("totem.seqno_unchanged_const", &totem_config->seqno_unchanged_const); icmap_get_uint32("totem.rrp_token_expired_timeout", &totem_config->rrp_token_expired_timeout); icmap_get_uint32("totem.rrp_problem_count_timeout", &totem_config->rrp_problem_count_timeout); icmap_get_uint32("totem.rrp_problem_count_threshold", &totem_config->rrp_problem_count_threshold); icmap_get_uint32("totem.rrp_problem_count_mcast_threshold", &totem_config->rrp_problem_count_mcast_threshold); icmap_get_uint32("totem.rrp_autorecovery_check_timeout", &totem_config->rrp_autorecovery_check_timeout); icmap_get_uint32("totem.heartbeat_failures_allowed", &totem_config->heartbeat_failures_allowed); icmap_get_uint32("totem.max_network_delay", &totem_config->max_network_delay); icmap_get_uint32("totem.window_size", &totem_config->window_size); icmap_get_uint32("totem.max_messages", &totem_config->max_messages); icmap_get_uint32("totem.miss_count_const", &totem_config->miss_count_const); if (icmap_get_string("totem.vsftype", &str) == CS_OK) { totem_config->vsf_type = str; } }
extern int totem_config_read ( struct totem_config *totem_config, const char **error_string, uint64_t *warnings) { int res = 0; char *str; unsigned int ringnumber = 0; int member_count = 0; icmap_iter_t iter, member_iter; const char *iter_key; const char *member_iter_key; char ringnumber_key[ICMAP_KEYNAME_MAXLEN]; char tmp_key[ICMAP_KEYNAME_MAXLEN]; uint8_t u8; uint16_t u16; char *cluster_name = NULL; int i; int local_node_pos; int nodeid_set; *warnings = 0; memset (totem_config, 0, sizeof (struct totem_config)); totem_config->interfaces = malloc (sizeof (struct totem_interface) * INTERFACE_MAX); if (totem_config->interfaces == 0) { *error_string = "Out of memory trying to allocate ethernet interface storage area"; return -1; } memset (totem_config->interfaces, 0, sizeof (struct totem_interface) * INTERFACE_MAX); strcpy (totem_config->rrp_mode, "none"); icmap_get_uint32("totem.version", (uint32_t *)&totem_config->version); totem_get_crypto(totem_config); if (icmap_get_string("totem.rrp_mode", &str) == CS_OK) { strcpy (totem_config->rrp_mode, str); free(str); } icmap_get_uint32("totem.nodeid", &totem_config->node_id); totem_config->clear_node_high_bit = 0; if (icmap_get_string("totem.clear_node_high_bit", &str) == CS_OK) { if (strcmp (str, "yes") == 0) { totem_config->clear_node_high_bit = 1; } free(str); } icmap_get_uint32("totem.threads", &totem_config->threads); icmap_get_uint32("totem.netmtu", &totem_config->net_mtu); icmap_get_string("totem.cluster_name", &cluster_name); /* * Get things that might change in the future */ totem_volatile_config_read(totem_config); if (icmap_get_string("totem.interface.0.bindnetaddr", &str) != CS_OK) { /* * We were not able to find ring 0 bindnet addr. Try to use nodelist informations */ config_convert_nodelist_to_interface(totem_config); } else { free(str); } iter = icmap_iter_init("totem.interface."); while ((iter_key = icmap_iter_next(iter, NULL, NULL)) != NULL) { res = sscanf(iter_key, "totem.interface.%[^.].%s", ringnumber_key, tmp_key); if (res != 2) { continue; } if (strcmp(tmp_key, "bindnetaddr") != 0) { continue; } member_count = 0; ringnumber = atoi(ringnumber_key); /* * Get the bind net address */ if (icmap_get_string(iter_key, &str) == CS_OK) { res = totemip_parse (&totem_config->interfaces[ringnumber].bindnet, str, totem_config->interfaces[ringnumber].mcast_addr.family); free(str); } /* * Get interface multicast address */ snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastaddr", ringnumber); if (icmap_get_string(tmp_key, &str) == CS_OK) { res = totemip_parse (&totem_config->interfaces[ringnumber].mcast_addr, str, 0); free(str); } else { /* * User not specified address -> autogenerate one from cluster_name key * (if available) */ res = get_cluster_mcast_addr (cluster_name, &totem_config->interfaces[ringnumber].bindnet, ringnumber, &totem_config->interfaces[ringnumber].mcast_addr); } totem_config->broadcast_use = 0; snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.broadcast", ringnumber); if (icmap_get_string(tmp_key, &str) == CS_OK) { if (strcmp (str, "yes") == 0) { totem_config->broadcast_use = 1; totemip_parse ( &totem_config->interfaces[ringnumber].mcast_addr, "255.255.255.255", 0); } free(str); } /* * Get mcast port */ snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastport", ringnumber); if (icmap_get_uint16(tmp_key, &totem_config->interfaces[ringnumber].ip_port) != CS_OK) { if (totem_config->broadcast_use) { totem_config->interfaces[ringnumber].ip_port = DEFAULT_PORT + (2 * ringnumber); } else { totem_config->interfaces[ringnumber].ip_port = DEFAULT_PORT; } } /* * Get the TTL */ totem_config->interfaces[ringnumber].ttl = 1; snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.ttl", ringnumber); if (icmap_get_uint8(tmp_key, &u8) == CS_OK) { totem_config->interfaces[ringnumber].ttl = u8; } snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.member.", ringnumber); member_iter = icmap_iter_init(tmp_key); while ((member_iter_key = icmap_iter_next(member_iter, NULL, NULL)) != NULL) { if (member_count == 0) { if (icmap_get_string("nodelist.node.0.ring0_addr", &str) == CS_OK) { free(str); *warnings |= TOTEM_CONFIG_WARNING_MEMBERS_IGNORED; break; } else { *warnings |= TOTEM_CONFIG_WARNING_MEMBERS_DEPRECATED; } } if (icmap_get_string(member_iter_key, &str) == CS_OK) { res = totemip_parse (&totem_config->interfaces[ringnumber].member_list[member_count++], str, 0); } } icmap_iter_finalize(member_iter); totem_config->interfaces[ringnumber].member_count = member_count; totem_config->interface_count++; } icmap_iter_finalize(iter); /* * Store automatically generated items back to icmap */ for (i = 0; i < totem_config->interface_count; i++) { snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastaddr", i); if (icmap_get_string(tmp_key, &str) == CS_OK) { free(str); } else { str = (char *)totemip_print(&totem_config->interfaces[i].mcast_addr); icmap_set_string(tmp_key, str); } snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastport", i); if (icmap_get_uint16(tmp_key, &u16) != CS_OK) { icmap_set_uint16(tmp_key, totem_config->interfaces[i].ip_port); } } totem_config->transport_number = TOTEM_TRANSPORT_UDP; if (icmap_get_string("totem.transport", &str) == CS_OK) { if (strcmp (str, "udpu") == 0) { totem_config->transport_number = TOTEM_TRANSPORT_UDPU; } if (strcmp (str, "iba") == 0) { totem_config->transport_number = TOTEM_TRANSPORT_RDMA; } free(str); } free(cluster_name); /* * Check existence of nodelist */ if (icmap_get_string("nodelist.node.0.ring0_addr", &str) == CS_OK) { free(str); /* * find local node */ local_node_pos = find_local_node_in_nodelist(totem_config); if (local_node_pos != -1) { icmap_set_uint32("nodelist.local_node_pos", local_node_pos); snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "nodelist.node.%u.nodeid", local_node_pos); nodeid_set = (totem_config->node_id != 0); if (icmap_get_uint32(tmp_key, &totem_config->node_id) == CS_OK && nodeid_set) { *warnings |= TOTEM_CONFIG_WARNING_TOTEM_NODEID_IGNORED; } /* * Make localnode ring0_addr read only, so we can be sure that local * node never changes. If rebinding to other IP would be in future * supported, this must be changed and handled properly! */ snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "nodelist.node.%u.ring0_addr", local_node_pos); icmap_set_ro_access(tmp_key, 0, 1); icmap_set_ro_access("nodelist.local_node_pos", 0, 1); } put_nodelist_members_to_config(totem_config); } add_totem_config_notification(totem_config); return 0; }
static void totemknet_refresh_config( int32_t event, const char *key_name, struct icmap_notify_value new_val, struct icmap_notify_value old_val, void *user_data) { uint8_t reloading; uint32_t value; uint32_t link_no; size_t num_nodes; uint16_t host_ids[KNET_MAX_HOST]; int i; int err; char path[ICMAP_KEYNAME_MAXLEN]; struct totemknet_instance *instance = (struct totemknet_instance *)user_data; ENTER(); /* * If a full reload is in progress then don't do anything until it's done and * can reconfigure it all atomically */ if (icmap_get_uint8("config.totemconfig_reload_in_progress", &reloading) == CS_OK && reloading) { return; } if (icmap_get_uint32("totem.knet_pmtud_interval", &value) == CS_OK) { instance->totem_config->knet_pmtud_interval = value; knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet_pmtud_interval now %d", value); err = knet_handle_pmtud_setfreq(instance->knet_handle, instance->totem_config->knet_pmtud_interval); if (err) { KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_WARNING, "knet_handle_pmtud_setfreq failed"); } } /* Get link parameters */ for (i = 0; i <= instance->num_links; i++) { sprintf(path, "totem.interface.%d.knet_link_priority", i); if (icmap_get_uint32(path, &value) == CS_OK) { instance->totem_config->interfaces[i].knet_link_priority = value; knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet_link_priority on link %d now %d", i, value); } sprintf(path, "totem.interface.%d.knet_ping_interval", i); if (icmap_get_uint32(path, &value) == CS_OK) { instance->totem_config->interfaces[i].knet_ping_interval = value; knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet_ping_interval on link %d now %d", i, value); } sprintf(path, "totem.interface.%d.knet_ping_timeout", i); if (icmap_get_uint32(path, &value) == CS_OK) { instance->totem_config->interfaces[i].knet_ping_timeout = value; knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet_ping_timeout on link %d now %d", i, value); } sprintf(path, "totem.interface.%d.knet_ping_precision", i); if (icmap_get_uint32(path, &value) == CS_OK) { instance->totem_config->interfaces[i].knet_ping_precision = value; knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet_ping_precision on link %d now %d", i, value); } sprintf(path, "totem.interface.%d.knet_pong_count", i); if (icmap_get_uint32(path, &value) == CS_OK) { instance->totem_config->interfaces[i].knet_pong_count = value; knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet_pong_count on link %d now %d", i, value); } } /* Configure link parameters for each node */ err = knet_host_get_host_list(instance->knet_handle, host_ids, &num_nodes); if (err != 0) { KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_host_get_host_list failed"); } for (i=0; i<num_nodes; i++) { for (link_no = 0; link_no < instance->num_links; link_no++) { err = knet_link_set_ping_timers(instance->knet_handle, host_ids[i], link_no, instance->totem_config->interfaces[link_no].knet_ping_interval, instance->totem_config->interfaces[link_no].knet_ping_timeout, instance->totem_config->interfaces[link_no].knet_ping_precision); if (err) { KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_ping_timers for node %d link %d failed", host_ids[i], link_no); } err = knet_link_set_pong_count(instance->knet_handle, host_ids[i], link_no, instance->totem_config->interfaces[link_no].knet_pong_count); if (err) { KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_pong_count for node %d link %d failed",host_ids[i], link_no); } err = knet_link_set_priority(instance->knet_handle, host_ids[i], link_no, instance->totem_config->interfaces[link_no].knet_link_priority); if (err) { KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_priority for node %d link %d failed", host_ids[i], link_no); } } } LEAVE(); }