Example #1
0
static void corosync_ring_id_store (
	const struct memb_ring_id *memb_ring_id,
	const struct totem_ip_address *addr)
{
	char filename[PATH_MAX];
	int fd;
	int res;

	snprintf (filename, sizeof(filename), "%s/ringid_%s",
		get_run_dir(), totemip_print (addr));

	fd = open (filename, O_WRONLY, 0700);
	if (fd == -1) {
		fd = open (filename, O_CREAT|O_RDWR, 0700);
	}
	if (fd == -1) {
		LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR,
			"Couldn't store new ring id %llx to stable storage",
			memb_ring_id->seq);

		corosync_exit_error (COROSYNC_DONE_STORE_RINGID);
	}
	log_printf (LOGSYS_LEVEL_DEBUG,
		"Storing new sequence id for ring %llx", memb_ring_id->seq);
	res = write (fd, &memb_ring_id->seq, sizeof(memb_ring_id->seq));
	close (fd);
	if (res != sizeof(memb_ring_id->seq)) {
		LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR,
			"Couldn't store new ring id %llx to stable storage",
			memb_ring_id->seq);

		corosync_exit_error (COROSYNC_DONE_STORE_RINGID);
	}
}
/* below function is required in config_ch function
 * because the totempg_ifaces_print function returns
 * the IP address corresponding to the in a different format.
 * Hence we have the below function */
char *get_node_ip (unsigned int nodeid)
{
    static char iface_string[256 * INTERFACE_MAX];
    char one_iface[64];
    struct totem_ip_address interfaces[INTERFACE_MAX];
    char **status;
    unsigned int iface_count;
    int res;

    iface_string[0] = '\0';

    res = totempg_ifaces_get (nodeid, interfaces, &status, &iface_count);
    if (res == -1) {
        clLog(ERROR,OPN,AIS,
                "totempg_ifaces_get failed");
        return ("no interface found for nodeid");
    }
    assert(iface_count <= INTERFACE_MAX);

    /* Please note that in openais.conf file as of now we are specifying
     * ONLY ONE interface. The protocol works for multiple processes as well.
     * But we are limiting it to work on only one interface
     */
    sprintf (one_iface, "%s",totemip_print (&interfaces[0]));
    strncat (iface_string, one_iface, sizeof(iface_string)-1);

    return (iface_string);
}
void clGmsWrapperUpdateMyIP()
{
    /* This function extracts the latest "this_ip" information
     * about the IP address that this node has bound to
     * and updates a GMS specific local structure.
     * In the config change functions we update my node
     * details with the updated IP in case there was a network
     * up/down and the node has bound to new IP */
    sprintf ((char *)myAddress.value, "%s",totemip_print (this_ip));
    myAddress.length = strlen ((char *)myAddress.value);
#ifndef OPENAIS_TIPC
    if (this_ip->family == AF_INET) {
        myAddress.family = CL_GMS_AF_INET;
    } else {
        if (this_ip->family == AF_INET6) {
            myAddress.family = CL_GMS_AF_INET6;
        } else {
            assert (0);
        }
    }
#else
    myAddress.family = AF_TIPC;
#endif

    /* Also update the global gms data of thisnode with new IP */
    gmsGlobalInfo.config.thisNodeInfo.nodeIpAddress = myAddress;
       
    clLog(DBG,OPN,AIS, "My Local IP address = %s",myAddress.value);
}
Example #4
0
static void corosync_ring_id_create_or_load (
	struct memb_ring_id *memb_ring_id,
	const struct totem_ip_address *addr)
{
	int fd;
	int res = 0;
	char filename[PATH_MAX];

	snprintf (filename, sizeof(filename), "%s/ringid_%s",
		get_run_dir(), totemip_print (addr));
	fd = open (filename, O_RDONLY, 0700);
	/*
	 * If file can be opened and read, read the ring id
	 */
	if (fd != -1) {
		res = read (fd, &memb_ring_id->seq, sizeof (uint64_t));
		close (fd);
	}
	/*
	 * If file could not be opened or read, create a new ring id
	 */
	if ((fd == -1) || (res != sizeof (uint64_t))) {
		memb_ring_id->seq = 0;
		umask(0);
		fd = open (filename, O_CREAT|O_RDWR, 0700);
		if (fd != -1) {
			res = write (fd, &memb_ring_id->seq, sizeof (uint64_t));
			close (fd);
			if (res == -1) {
				LOGSYS_PERROR (errno, LOGSYS_LEVEL_ERROR,
					"Couldn't write ringid file '%s'", filename);

				corosync_exit_error (COROSYNC_DONE_STORE_RINGID);
			}
		} else {
			LOGSYS_PERROR (errno, LOGSYS_LEVEL_ERROR,
				"Couldn't create ringid file '%s'", filename);

			corosync_exit_error (COROSYNC_DONE_STORE_RINGID);
		}
	}

	totemip_copy(&memb_ring_id->rep, addr);
	assert (!totemip_zero_check(&memb_ring_id->rep));
}
/*
 * Library Interface Implementation
 */
static void message_handler_req_lib_cfg_ringstatusget (
	void *conn,
	void *msg)
{
	struct res_lib_cfg_ringstatusget res_lib_cfg_ringstatusget;
	struct totem_ip_address interfaces[INTERFACE_MAX];
	unsigned int iface_count;
	char **status;
	char *totem_ip_string;
	unsigned int i;

	ENTER("");

	res_lib_cfg_ringstatusget.header.id = MESSAGE_RES_CFG_RINGSTATUSGET;
	res_lib_cfg_ringstatusget.header.size = sizeof (struct res_lib_cfg_ringstatusget);
	res_lib_cfg_ringstatusget.header.error = SA_AIS_OK;

	totempg_ifaces_get (
		totempg_my_nodeid_get(),
		interfaces,
		&status,
		&iface_count);

	res_lib_cfg_ringstatusget.interface_count = iface_count;

	for (i = 0; i < iface_count; i++) {
		totem_ip_string = (char *)totemip_print (&interfaces[i]);
		strcpy ((char *)&res_lib_cfg_ringstatusget.interface_status[i],
			status[i]);
		strcpy ((char *)&res_lib_cfg_ringstatusget.interface_name[i],
			totem_ip_string);
	}
	openais_response_send (
		conn,
		&res_lib_cfg_ringstatusget,
		sizeof (struct res_lib_cfg_ringstatusget));

	LEAVE("");
}
Example #6
0
extern int totem_config_read (
	struct totem_config *totem_config,
	const char **error_string,
	uint64_t *warnings)
{
	int res = 0;
	char *str;
	unsigned int ringnumber = 0;
	int member_count = 0;
	icmap_iter_t iter, member_iter;
	const char *iter_key;
	const char *member_iter_key;
	char ringnumber_key[ICMAP_KEYNAME_MAXLEN];
	char tmp_key[ICMAP_KEYNAME_MAXLEN];
	uint8_t u8;
	uint16_t u16;
	char *cluster_name = NULL;
	int i;
	int local_node_pos;
	int nodeid_set;

	*warnings = 0;

	memset (totem_config, 0, sizeof (struct totem_config));
	totem_config->interfaces = malloc (sizeof (struct totem_interface) * INTERFACE_MAX);
	if (totem_config->interfaces == 0) {
		*error_string = "Out of memory trying to allocate ethernet interface storage area";
		return -1;
	}

	memset (totem_config->interfaces, 0,
		sizeof (struct totem_interface) * INTERFACE_MAX);

	strcpy (totem_config->rrp_mode, "none");

	icmap_get_uint32("totem.version", (uint32_t *)&totem_config->version);

	totem_get_crypto(totem_config);

	if (icmap_get_string("totem.rrp_mode", &str) == CS_OK) {
		strcpy (totem_config->rrp_mode, str);
		free(str);
	}

	icmap_get_uint32("totem.nodeid", &totem_config->node_id);

	totem_config->clear_node_high_bit = 0;
	if (icmap_get_string("totem.clear_node_high_bit", &str) == CS_OK) {
		if (strcmp (str, "yes") == 0) {
			totem_config->clear_node_high_bit = 1;
		}
		free(str);
	}

	icmap_get_uint32("totem.threads", &totem_config->threads);

	icmap_get_uint32("totem.netmtu", &totem_config->net_mtu);

	icmap_get_string("totem.cluster_name", &cluster_name);

	/*
	 * Get things that might change in the future
	 */
	totem_volatile_config_read(totem_config);

	if (icmap_get_string("totem.interface.0.bindnetaddr", &str) != CS_OK) {
		/*
		 * We were not able to find ring 0 bindnet addr. Try to use nodelist informations
		 */
		config_convert_nodelist_to_interface(totem_config);
	} else {
		free(str);
	}

	iter = icmap_iter_init("totem.interface.");
	while ((iter_key = icmap_iter_next(iter, NULL, NULL)) != NULL) {
		res = sscanf(iter_key, "totem.interface.%[^.].%s", ringnumber_key, tmp_key);
		if (res != 2) {
			continue;
		}

		if (strcmp(tmp_key, "bindnetaddr") != 0) {
			continue;
		}

		member_count = 0;

		ringnumber = atoi(ringnumber_key);
		/*
		 * Get the bind net address
		 */
		if (icmap_get_string(iter_key, &str) == CS_OK) {
			res = totemip_parse (&totem_config->interfaces[ringnumber].bindnet, str,
						     totem_config->interfaces[ringnumber].mcast_addr.family);
			free(str);
		}

		/*
		 * Get interface multicast address
		 */
		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastaddr", ringnumber);
		if (icmap_get_string(tmp_key, &str) == CS_OK) {
			res = totemip_parse (&totem_config->interfaces[ringnumber].mcast_addr, str, 0);
			free(str);
		} else {
			/*
			 * User not specified address -> autogenerate one from cluster_name key
			 * (if available)
			 */
			res = get_cluster_mcast_addr (cluster_name,
					&totem_config->interfaces[ringnumber].bindnet,
					ringnumber,
					&totem_config->interfaces[ringnumber].mcast_addr);
		}

		totem_config->broadcast_use = 0;
		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.broadcast", ringnumber);
		if (icmap_get_string(tmp_key, &str) == CS_OK) {
			if (strcmp (str, "yes") == 0) {
				totem_config->broadcast_use = 1;
				totemip_parse (
					&totem_config->interfaces[ringnumber].mcast_addr,
					"255.255.255.255", 0);
			}
			free(str);
		}

		/*
		 * Get mcast port
		 */
		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastport", ringnumber);
		if (icmap_get_uint16(tmp_key, &totem_config->interfaces[ringnumber].ip_port) != CS_OK) {
			if (totem_config->broadcast_use) {
				totem_config->interfaces[ringnumber].ip_port = DEFAULT_PORT + (2 * ringnumber);
			} else {
				totem_config->interfaces[ringnumber].ip_port = DEFAULT_PORT;
			}
		}

		/*
		 * Get the TTL
		 */
		totem_config->interfaces[ringnumber].ttl = 1;

		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.ttl", ringnumber);

		if (icmap_get_uint8(tmp_key, &u8) == CS_OK) {
			totem_config->interfaces[ringnumber].ttl = u8;
		}

		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.member.", ringnumber);
		member_iter = icmap_iter_init(tmp_key);
		while ((member_iter_key = icmap_iter_next(member_iter, NULL, NULL)) != NULL) {
			if (member_count == 0) {
				if (icmap_get_string("nodelist.node.0.ring0_addr", &str) == CS_OK) {
					free(str);
					*warnings |= TOTEM_CONFIG_WARNING_MEMBERS_IGNORED;
					break;
				} else {
					*warnings |= TOTEM_CONFIG_WARNING_MEMBERS_DEPRECATED;
				}
			}

			if (icmap_get_string(member_iter_key, &str) == CS_OK) {
				res = totemip_parse (&totem_config->interfaces[ringnumber].member_list[member_count++],
						str, 0);
			}
		}
		icmap_iter_finalize(member_iter);

		totem_config->interfaces[ringnumber].member_count = member_count;
		totem_config->interface_count++;
	}
	icmap_iter_finalize(iter);

	/*
	 * Store automatically generated items back to icmap
	 */
	for (i = 0; i < totem_config->interface_count; i++) {
		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastaddr", i);
		if (icmap_get_string(tmp_key, &str) == CS_OK) {
			free(str);
		} else {
			str = (char *)totemip_print(&totem_config->interfaces[i].mcast_addr);
			icmap_set_string(tmp_key, str);
		}

		snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "totem.interface.%u.mcastport", i);
		if (icmap_get_uint16(tmp_key, &u16) != CS_OK) {
			icmap_set_uint16(tmp_key, totem_config->interfaces[i].ip_port);
		}
	}

	totem_config->transport_number = TOTEM_TRANSPORT_UDP;
	if (icmap_get_string("totem.transport", &str) == CS_OK) {
		if (strcmp (str, "udpu") == 0) {
			totem_config->transport_number = TOTEM_TRANSPORT_UDPU;
		}

		if (strcmp (str, "iba") == 0) {
			totem_config->transport_number = TOTEM_TRANSPORT_RDMA;
		}
		free(str);
	}

	free(cluster_name);

	/*
	 * Check existence of nodelist
	 */
	if (icmap_get_string("nodelist.node.0.ring0_addr", &str) == CS_OK) {
		free(str);
		/*
		 * find local node
		 */
		local_node_pos = find_local_node_in_nodelist(totem_config);
		if (local_node_pos != -1) {
			icmap_set_uint32("nodelist.local_node_pos", local_node_pos);

			snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "nodelist.node.%u.nodeid", local_node_pos);

			nodeid_set = (totem_config->node_id != 0);
			if (icmap_get_uint32(tmp_key, &totem_config->node_id) == CS_OK && nodeid_set) {
				*warnings |= TOTEM_CONFIG_WARNING_TOTEM_NODEID_IGNORED;
			}

			/*
			 * Make localnode ring0_addr read only, so we can be sure that local
			 * node never changes. If rebinding to other IP would be in future
			 * supported, this must be changed and handled properly!
			 */
			snprintf(tmp_key, ICMAP_KEYNAME_MAXLEN, "nodelist.node.%u.ring0_addr", local_node_pos);
			icmap_set_ro_access(tmp_key, 0, 1);
			icmap_set_ro_access("nodelist.local_node_pos", 0, 1);
		}

		put_nodelist_members_to_config(totem_config);
	}

	add_totem_config_notification(totem_config);

	return 0;
}
Example #7
0
int totemknet_member_add (
	void *knet_context,
	const struct totem_ip_address *local,
	const struct totem_ip_address *member,
	int link_no)
{
	struct totemknet_instance *instance = (struct totemknet_instance *)knet_context;
	int err;
	int port = instance->ip_port[link_no];
	struct sockaddr_storage remote_ss;
	struct sockaddr_storage local_ss;
	int addrlen;

	if (member->nodeid == instance->our_nodeid) {
		return 0; /* Don't add ourself, we send loopback messages directly */
	}

	/* Keep track of the number of links */
	if (link_no > instance->num_links) {
		instance->num_links = link_no;
	}

	knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet: member_add: %d (%s), link=%d", member->nodeid, totemip_print(member), link_no);
	knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet:      local: %d (%s)", local->nodeid, totemip_print(local));
	if (link_no == 0) {
		if (knet_host_add(instance->knet_handle, member->nodeid)) {
			KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_host_add");
			return -1;
		}

		if (knet_host_set_policy(instance->knet_handle, member->nodeid, instance->link_mode)) {
			KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_set_policy failed");
			return -1;
		}
	}

	/* Casts to remove const */
	totemip_totemip_to_sockaddr_convert((struct totem_ip_address *)member, port+link_no, &remote_ss, &addrlen);
	totemip_totemip_to_sockaddr_convert((struct totem_ip_address *)local, port+link_no, &local_ss, &addrlen);
	err = knet_link_set_config(instance->knet_handle, member->nodeid, link_no, &local_ss, &remote_ss);
	if (err) {
		KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_config failed");
		return -1;
	}

	knet_log_printf (LOGSYS_LEVEL_DEBUG, "knet: member_add: Setting link prio to %d",
		    instance->totem_config->interfaces[link_no].knet_link_priority);

	err = knet_link_set_priority(instance->knet_handle, member->nodeid, link_no,
			       instance->totem_config->interfaces[link_no].knet_link_priority);
	if (err) {
		KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_priority for nodeid %d, link %d failed", member->nodeid, link_no);
	}

	err = knet_link_set_ping_timers(instance->knet_handle, member->nodeid, link_no,
				  instance->totem_config->interfaces[link_no].knet_ping_interval,
				  instance->totem_config->interfaces[link_no].knet_ping_timeout,
				  instance->totem_config->interfaces[link_no].knet_ping_precision);
	if (err) {
		KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_ping_timers for nodeid %d, link %d failed", member->nodeid, link_no);
	}
	err = knet_link_set_pong_count(instance->knet_handle, member->nodeid, link_no,
				       instance->totem_config->interfaces[link_no].knet_pong_count);
	if (err) {
		KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_pong_count for nodeid %d, link %d failed", member->nodeid, link_no);
	}

	err = knet_link_set_enable(instance->knet_handle, member->nodeid, link_no, 1);
	if (err) {
		KNET_LOGSYS_PERROR(errno, LOGSYS_LEVEL_ERROR, "knet_link_set_enable for nodeid %d, link %d failed", member->nodeid, link_no);
		return -1;
	}

	return (0);
}
static void gms_exec_message_handler (
                                      void *message,
                                      unsigned int nodeid)
{
    mar_req_header_t              header = {0};
    struct VDECL(req_exec_gms_nodejoin) req_exec_gms_nodejoin = {{0}};
    ClGmsViewNodeT               *node = NULL;
    ClRcT                         rc = CL_OK;
    ClGmsClusterMemberT           thisGmsClusterNode = {0};
    char                          nodeIp[256 * INTERFACE_MAX] = "";
    int                           isLocalMsg = 0;
    int                           verCode = 0;
    ClBufferHandleT               bufferHandle = NULL;

    /* Get the ip address string for the given nodeId */
    strncpy(nodeIp, get_node_ip(nodeid), (256 * INTERFACE_MAX)-1);
    if (strcmp(nodeIp, totemip_print(this_ip)) == 0)
    {
        isLocalMsg = 1;
    }

    /* Unmarshall the incoming message */
    rc = clBufferCreate(&bufferHandle);
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,
                   "Failed to create buffer while unmarshalling the received message. rc 0x%x",rc);
        return;
    }

    memcpy(&header, message, sizeof(mar_req_header_t));

    rc = clBufferNBytesWrite(bufferHandle, (ClUint8T *)message+sizeof(mar_req_header_t), header.size-sizeof(mar_req_header_t));
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,
                   "Failed to retrieve data from buffer. rc 0x%x",rc);
        goto out_delete;
    }

    rc = unmarshallReqExecGmsNodeJoin(bufferHandle, &req_exec_gms_nodejoin);
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,"Failed to unmarshall the data. rc 0x%x",rc);
        goto out_delete;
    }

    verCode = CL_VERSION_CODE(req_exec_gms_nodejoin.version.releaseCode, 
                              req_exec_gms_nodejoin.version.majorVersion,
                              req_exec_gms_nodejoin.version.minorVersion);
    clLog(DBG,OPN,AIS,
          "Received a %d message from version [%d.%d.%d].",req_exec_gms_nodejoin.gmsMessageType,
          req_exec_gms_nodejoin.version.releaseCode, req_exec_gms_nodejoin.version.majorVersion, 
          req_exec_gms_nodejoin.version.minorVersion);
    /* Verify version */
    if (verCode > CL_VERSION_CODE(curVer.releaseCode, curVer.majorVersion, curVer.minorVersion)) {
        /* I received a message from higher version and it dont know
         * how to decode it. So it discarding it. */
        clLog(NOTICE,OPN,AIS,
              "Version mismatch detected. Discarding the message ");
        goto out_delete;
    }

    // message type & message data
    clLog(DBG,OPN,AIS,"message type %d from groupId %d!\n", req_exec_gms_nodejoin.gmsMessageType, req_exec_gms_nodejoin.gmsGroupId);

    /* This message is from same version. So processing it */
    switch (req_exec_gms_nodejoin.gmsMessageType)
    {
    case CL_GMS_CLUSTER_JOIN_MSG:
        {
            ClUint32T minVersion = CL_VERSION_CODE(5, 0, 0);
            clLog(DBG,OPN,AIS,
                  "Received multicast message for cluster join from ioc node [%#x:%#x]",
                  req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress,
                  req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId);
            clNodeCacheMinVersionGet(NULL, &minVersion);
            if(minVersion >= CL_VERSION_CODE(5, 0, 0) && gAspNativeLeaderElection)
            {
                clLog(DBG, OPN, AIS,
                      "Skipping multicast join since node cache view is used to form the cluster ring");
                goto out_delete;
            }
            node = (ClGmsViewNodeT *) clHeapAllocate(sizeof(ClGmsViewNodeT));
            if (node == NULL)
            {
                clLog (ERROR,OPN,AIS, "clHeapAllocate failed");
                goto out_delete;
            }
            else {
                rc = clVersionVerify(
                                     &(gmsGlobalInfo.config.versionsSupported),
                                     &(req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion)
                                     );
                ringVersion.releaseCode =
                    req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion.releaseCode;
                ringVersion.majorVersion=
                    req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion.majorVersion;
                ringVersion.minorVersion=
                    req_exec_gms_nodejoin.specificMessage.gmsClusterNode.gmsVersion.minorVersion;
                if(rc != CL_OK)
                {
                    ringVersionCheckPassed = CL_FALSE;
                    /* copy the ring version */
                    clGmsCsLeave( &joinCs );
                    clLog (ERROR,OPN,AIS,
                           "Server Version Mismatch detected for this join message");
                    break;
                }

                _clGmsGetThisNodeInfo(&thisGmsClusterNode);
                if( thisGmsClusterNode.nodeId !=
                    req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId)
                {
                    /* TODO This will never happen... */
                    clGmsCsLeave( &joinCs );
                }

                node->viewMember.clusterMember =
                    req_exec_gms_nodejoin.specificMessage.gmsClusterNode;
                /* If this is local join, then update the IP address */
                if (thisGmsClusterNode.nodeId ==
                    req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId)
                {
                    memcpy(&node->viewMember.clusterMember.nodeIpAddress,
                           &myAddress, sizeof(ClGmsNodeAddressT));
                }

                rc = _clGmsEngineClusterJoin(req_exec_gms_nodejoin.gmsGroupId,
                                             req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId,
                                             node);
            }
        }
        break;
    case CL_GMS_CLUSTER_EJECT_MSG:
        clLog (DBG,OPN,AIS,
               "Received cluster eject multicast message from ioc node [%#x:%#x]",
               req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress,
               req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId);
        /* inform the member about the eject by invoking the ejection
         *  callback registered with the reason UKNOWN */
        /* The below logic is same for the leave as well so we just
         *  fall through the case */
        _clGmsGetThisNodeInfo(&thisGmsClusterNode);
        if( req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId ==
            thisGmsClusterNode.nodeId)
        {
            rc = _clGmsCallClusterMemberEjectCallBack(
                                                      req_exec_gms_nodejoin.ejectReason);
            if( rc != CL_OK )
            {
                clLog(ERROR,OPN,AIS,"_clGmsCallEjectCallBack failed with"
                      "rc:0x%x",rc);
            }
        }
    case CL_GMS_CLUSTER_LEAVE_MSG:
        clLog(DBG,OPN,AIS,
              "Received cluster leave multicast message from ioc node [%#x:%#x]",
              req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress,
              req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId);
        rc = _clGmsEngineClusterLeave(req_exec_gms_nodejoin.gmsGroupId,
                                      req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeId);
        break;
    case CL_GMS_GROUP_CREATE_MSG:
        clLog(DBG,OPN,AIS,
              "Received group create multicast message from ioc node [%#x:%#x]",
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress,
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId);

        rc = _clGmsEngineGroupCreate(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupName,
                                     req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupParams,
                                     req_exec_gms_nodejoin.contextHandle, isLocalMsg);
        break;
    case CL_GMS_GROUP_DESTROY_MSG:
        clLog(DBG,OPN,AIS,
              "Received group destroy multicast message from ioc node [%#x:%#x]",
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress,
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId);

        rc = _clGmsEngineGroupDestroy(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupId,
                                      req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupName,
                                      req_exec_gms_nodejoin.contextHandle, isLocalMsg);
        break;
    case CL_GMS_GROUP_JOIN_MSG:
        clLog(DBG,OPN,AIS,
              "Received group join multicast message from ioc node [%#x:%#x]",
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress,
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId);

        node = (ClGmsViewNodeT *) clHeapAllocate(sizeof(ClGmsViewNodeT));
        if (!node)
        {
            log_printf (LOG_LEVEL_NOTICE, "clHeapAllocate failed");
            goto out_delete;
        }
        else {
            /* FIXME: Need to verify version */
            memcpy(&node->viewMember.groupMember,&req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode,
                   sizeof(ClGmsGroupMemberT));
            memcpy(&node->viewMember.groupData, &req_exec_gms_nodejoin.specificMessage.groupMessage.groupData,
                   sizeof(ClGmsGroupInfoT));
            rc = _clGmsEngineGroupJoin(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupId,
                                       node, req_exec_gms_nodejoin.contextHandle, isLocalMsg);
        }
        break;
    case CL_GMS_GROUP_LEAVE_MSG:
        clLog(DBG,OPN,AIS,
              "Received group leave multicast message from ioc node [%#x:%#x]",
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.nodeAddress,
              req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberAddress.iocPhyAddress.portId);

        rc = _clGmsEngineGroupLeave(req_exec_gms_nodejoin.specificMessage.groupMessage.groupData.groupId,
                                    req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberId,
                                    req_exec_gms_nodejoin.contextHandle, isLocalMsg);
        break;
    case CL_GMS_COMP_DEATH:
        clLog(DBG,OPN,AIS,
              "Received comp death multicast message");
        rc = _clGmsRemoveMemberOnCompDeath(req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode.memberId);
        break;
    case CL_GMS_LEADER_ELECT_MSG:
        clLog(DBG,OPN,AIS,
              "Received leader elect multicast message from ioc node [%#x:%#x]",
              req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.nodeAddress,
              req_exec_gms_nodejoin.specificMessage.gmsClusterNode.nodeAddress.iocPhyAddress.portId);
        rc = _clGmsEnginePreferredLeaderElect(req_exec_gms_nodejoin.specificMessage.gmsClusterNode, 
                                              req_exec_gms_nodejoin.contextHandle,
                                              isLocalMsg);
        break;
    case CL_GMS_SYNC_MESSAGE:
        clLog(DBG,OPN,AIS,
              "Received gms synch multicast message");
        rc = _clGmsEngineGroupInfoSync((ClGmsGroupSyncNotificationT *)(req_exec_gms_nodejoin.dataPtr));
        clHeapFree(((ClGmsGroupSyncNotificationT *)req_exec_gms_nodejoin.dataPtr)->groupInfoList);
        clHeapFree(((ClGmsGroupSyncNotificationT *)req_exec_gms_nodejoin.dataPtr)->groupMemberList);
        clHeapFree(req_exec_gms_nodejoin.dataPtr);
        break;

    case CL_GMS_GROUP_MCAST_MSG:
        _clGmsEngineMcastMessageHandler(
                                        &(req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.gmsGroupNode),
                                        &(req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.groupData),
                                        req_exec_gms_nodejoin.specificMessage.mcastMessage.userDataSize,
                                        req_exec_gms_nodejoin.dataPtr);
        break;
    default:
        clLogMultiline(ERROR,OPN,AIS,
                       "Openais GMS wrapper received Message wih invalid [MsgType=%x]. \n"
                       "This could be because of multicast port clashes.",
                       req_exec_gms_nodejoin.gmsMessageType);
        goto out_delete;
    }
    clLog(TRACE,OPN,AIS,
          "Processed the received message. Returning");
    out_delete:
    clBufferDelete(&bufferHandle);
}