ClRcT _clGmsDbOpen(
            CL_IN      const    ClUint64T   numOfGroups,
            CL_INOUT   ClGmsDbT** const     gmsDb)
{
    if (gmsDb == NULL) 
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    *gmsDb = (ClGmsDbT *) clHeapAllocate(sizeof(ClGmsDbT)
                                        * numOfGroups);

    if (*gmsDb == NULL)
    {
        return CL_GMS_RC(CL_ERR_NO_MEMORY);
    }

    memset(*gmsDb, 0, sizeof(ClGmsDbT)* numOfGroups);

    clLog(DBG,GEN,DB,
            "Created GMS master database successfully");

    return CL_OK;
}
static ClRcT  _clGmsCliGetNumeric(
            CL_IN    const   ClCharT* const argv_str,
            CL_OUT   ClUint64T* const num)
{
    ClInt64T cnum;

    if (num == NULL)
        return CL_ERR_NULL_POINTER;

    *num = 0;
    errno = 0;
 
    cnum = (ClUint64T)strtoll(argv_str, NULL, 10);

        /* FIXME: Is errno checking allowed? */

    if ((errno == EINVAL) || (errno == ERANGE))
        return CL_GMS_RC(CL_ERR_INVALID_PARAMETER);

    if (cnum < 0)
        return CL_GMS_RC(CL_ERR_INVALID_PARAMETER);

    *num = (ClUint64T) cnum;

    return CL_OK;
}
/*-----------------------------------------------------------------------------
 * Cluster Leader Elect API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterLeaderElect(
    CL_IN const ClGmsHandleT                      gmsHandle,
    CL_IN const ClGmsNodeIdT                      preferredLeader,
    CL_INOUT    ClGmsNodeIdT                     *leader,
    CL_INOUT    ClGmsNodeIdT                     *deputy,
    CL_INOUT    ClBoolT                          *leadershipChanged)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterLeaderElectRequestT      req = {0};
    ClGmsClusterLeaderElectResponseT    *res = NULL;
    
    if ((leader == NULL) || (deputy == NULL) || (leadershipChanged == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    clGmsMutexLock( gms_instance_ptr->response_mutex);
    req.gmsHandle = gmsHandle;
    req.preferredLeaderNode = preferredLeader;
    
    rc = cl_gms_cluster_leader_elect_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    *leader = res->leader;
    *deputy = res->deputy;
    *leadershipChanged = res->leadershipChanged;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(LEA,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Member Get Async API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberGetAsync(
    CL_IN const ClGmsHandleT   gmsHandle,
    CL_IN const ClInvocationT  invocation,
    CL_IN const ClGmsNodeIdT   nodeId)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterMemberGetAsyncRequestT   req = {0};
    ClGmsClusterMemberGetAsyncResponseT *res = NULL;
        
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    if (gms_instance_ptr->callbacks.clGmsClusterMemberGetCallback == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
        goto error_checkin;
    }
    
    req.gmsHandle  = gmsHandle;
    req.nodeId     = nodeId;
    req.invocation = invocation;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    if (rc != CL_OK)
    {
        goto error_checkin;
    }
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_member_get_async_rmd(&req, 0 /* use def. timeout */,
                                             &res);
    if (rc != CL_OK) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }

error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
error_checkin:
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                    "\nclHandleCheckin failed");
    }
    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Finalize API
 *---------------------------------------------------------------------------*/
ClRcT clGmsFinalize(
    CL_IN const ClGmsHandleT gmsHandle)
{
	struct gms_instance *gms_instance_ptr = NULL;
	ClRcT rc= CL_OK;

	rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void **)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    rc = clGmsMutexLock(gms_instance_ptr->response_mutex);
    if(rc != CL_OK)
    {
        return rc;
    }

	/*
	 * Another thread has already started finalizing
	 */
	if (gms_instance_ptr->finalize) {
		clGmsMutexUnlock(gms_instance_ptr->response_mutex);
		if ((clHandleCheckin(gmsHandleDb, gmsHandle)) != CL_OK)
        {
            clLogError(GEN,DB,
                       "\nclHandleCheckin Error");
        }
		return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
	}

	gms_instance_ptr->finalize = 1;

	clGmsMutexUnlock(gms_instance_ptr->response_mutex);
	clGmsMutexDelete(gms_instance_ptr->response_mutex);
    
	if ((clHandleDestroy(gmsHandleDb, gmsHandle)) != CL_OK)
    {
        clLogError(GEN,NA,
                   "\nclHandleDestroy Error");
    }
    
	if ((clHandleCheckin(gmsHandleDb, gmsHandle)) != CL_OK)
    {
        clLogError(GEN,NA,
                   "\nclHandleCheckin Error");
    }

	return CL_GMS_RC(rc);
}
/*----------------------------------------------------------------------------
 *  Cluster Track Callback Handler
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrackCallbackHandler(
    CL_IN   ClGmsClusterTrackCallbackDataT* const res)
{
    ClRcT rc = CL_OK;
    struct gms_instance *gms_instance_ptr = NULL;
    ClGmsHandleT gmsHandle = CL_GMS_INVALID_HANDLE;

    CL_ASSERT(res != NULL);
    clLog(INFO,NA,NA,"received cluster track callback");

    gmsHandle = res->gmsHandle;
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        goto error_free_res;
    }

    if (gms_instance_ptr == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NULL_POINTER);
        goto error_free_res;
    }

    if (gms_instance_ptr->callbacks.clGmsClusterTrackCallback == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
        goto error_checkin_free_res;
    }

    /*
     * Calling the user's callback function with the data.  The user cannot
     * free the data we provide.  If it needs to reatin it, it has to copy
     * it out from what we provide here.
     */
            (*gms_instance_ptr->callbacks.clGmsClusterTrackCallback)
            (gmsHandle, &res->buffer, res->numberOfMembers, res->rc);
  

error_checkin_free_res:
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

error_free_res:
    /* Need to free data (res) if are not able to call the actual callback */
    if (res->buffer.notification != NULL)
    {
        clHeapFree((void*)res->buffer.notification);
    }
    clHeapFree((void*)res);
    return rc;
}
/*----------------------------------------------------------------------------
 *  Cluster Member Eject Callback Handler
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberEjectCallbackHandler(
    CL_IN   ClGmsClusterMemberEjectCallbackDataT* const res)
{
    ClRcT rc = CL_OK;
    struct gms_instance *gms_instance_ptr = NULL;
    ClGmsHandleT gmsHandle = CL_GMS_INVALID_HANDLE;
    
    CL_ASSERT(res != NULL);
    
    gmsHandle = res->gmsHandle;
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        goto error_free_res;
    }

    if (gms_instance_ptr == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NULL_POINTER);
        goto error_free_res;
    }

    
    if (gms_instance_ptr->
                cluster_manage_callbacks.clGmsMemberEjectCallback == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
        goto error_checkin_free_res;
    }
    
    /*
     * Calling the user's callback function with the data.  The user cannot
     * free the data we provide.  If it needs to reatin it, it has to copy
     * it out from what we provide here.
     */
    (*gms_instance_ptr->cluster_manage_callbacks.clGmsMemberEjectCallback)
                               (res->reason);
     
error_checkin_free_res:
    if (clHandleCheckin(gmsHandleDb, gmsHandle))
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }


error_free_res:
    clHeapFree((void*)res);
    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Track Stop API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrackStop(
    CL_IN const ClGmsHandleT gmsHandle)
{
    ClRcT                           rc = CL_OK;
    struct gms_instance            *gms_instance_ptr = NULL;
    ClGmsClusterTrackStopRequestT   req = {0};
    ClGmsClusterTrackStopResponseT *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }
    
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    req.gmsHandle = gmsHandle;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    
    CL_ASSERT(rc == CL_OK); /* Should really never happen */
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_track_stop_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_exit;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);

error_exit:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);

    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return CL_GMS_RC(rc);
}
/*-----------------------------------------------------------------------------
 * Cluster Leave Async API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterLeaveAsync(
    CL_IN const ClGmsHandleT                      gmsHandle,
    CL_IN const ClGmsNodeIdT                      nodeId)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterLeaveRequestT            req = {0};
    ClGmsClusterLeaveResponseT          *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }
    
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    memset(&(gms_instance_ptr->cluster_manage_callbacks), 0,
           sizeof(ClGmsClusterManageCallbacksT));
    
    req.gmsHandle = gmsHandle;
    req.nodeId    = nodeId;
    req.sync      = CL_FALSE;

    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_leave_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
ClRcT clGmsCompUpNotify (
        CL_IN  ClUint32T         compId)
{
    ClRcT                          rc = CL_OK;
    ClGmsCompUpNotifyRequestT      req = {0};
    ClGmsCompUpNotifyResponseT    *res = NULL;

    CL_GMS_SET_CLIENT_VERSION( req );

    req.compId = compId;

    rc = cl_gms_comp_up_notify_rmd(&req, 0, &res);
    if (rc != CL_OK) /* If there was an error, res isn't allocated */
    {
        return CL_GMS_RC(rc);
    }
    CL_ASSERT(res != NULL);

    rc = res->rc;

    clHeapFree((void*)res);
    return CL_GMS_RC(rc);
}
/*-----------------------------------------------------------------------------
 * Cluster Member Eject API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberEject(
    CL_IN const ClGmsHandleT                      gmsHandle,
    CL_IN const ClGmsNodeIdT                      nodeId,
    CL_IN const ClGmsMemberEjectReasonT           reason)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterMemberEjectRequestT      req = {0};
    ClGmsClusterMemberEjectResponseT    *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    clGmsMutexLock( gms_instance_ptr->response_mutex);
    
    req.gmsHandle = gmsHandle;
    req.nodeId    = nodeId;
    req.reason    = reason;
    
    rc = cl_gms_cluster_member_eject_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Track API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrack(
    CL_IN    const ClGmsHandleT               gmsHandle,
    CL_IN    const ClUint8T                   trackFlags,
    CL_INOUT ClGmsClusterNotificationBufferT* const notificationBuffer)
{    
    ClRcT                       rc = CL_OK;
    struct gms_instance        *gms_instance_ptr = NULL;
    ClGmsClusterTrackRequestT   req = {0};
    ClGmsClusterTrackResponseT *res = NULL;
    const ClUint8T validFlag = CL_GMS_TRACK_CURRENT | CL_GMS_TRACK_CHANGES |
                            CL_GMS_TRACK_CHANGES_ONLY;
    ClBoolT shouldFreeNotification = CL_TRUE;

    clLog(TRACE,CLM,NA,"clGmsClusterTrack API is invoked");
    if (((trackFlags | validFlag) ^ validFlag) != 0) {
        return CL_GMS_RC(CL_ERR_BAD_FLAG);
    }

    CL_GMS_SET_CLIENT_VERSION( req );
    
    if (((trackFlags & CL_GMS_TRACK_CURRENT) == CL_GMS_TRACK_CURRENT) && /* If current view is requested */
        (notificationBuffer != NULL) &&        /* Buffer is provided */
        (notificationBuffer->notification != NULL) && /* Caller provides array */
        (notificationBuffer->numberOfItems == 0)) /* then size must be given */
    {
        return CL_GMS_RC(CL_ERR_INVALID_PARAMETER);
    }
    
    if (trackFlags == 0) /* at least one flag should be specified */
    {
        return CL_GMS_RC(CL_ERR_BAD_FLAG);
    }
    
    if (((trackFlags & CL_GMS_TRACK_CHANGES) == CL_GMS_TRACK_CHANGES) &&
        ((trackFlags & CL_GMS_TRACK_CHANGES_ONLY) == CL_GMS_TRACK_CHANGES_ONLY)) /* mutually exclusive flags */
    {
        return CL_GMS_RC(CL_ERR_BAD_FLAG);
    }
    
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    /* If not a sync call, then clGmsClusterTrackCallbackHandler must be given */
    if (((trackFlags & (CL_GMS_TRACK_CHANGES|CL_GMS_TRACK_CHANGES_ONLY)) != 0) ||
        (((trackFlags & CL_GMS_TRACK_CURRENT) == CL_GMS_TRACK_CURRENT) &&
         (notificationBuffer == NULL)))
    {
        if (gms_instance_ptr->callbacks.clGmsClusterTrackCallback == NULL)
        {
            rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
            goto error_checkin;
        }
    }
    
    req.gmsHandle  = gmsHandle;
    req.trackFlags = trackFlags;
    req.sync       = CL_FALSE;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    
    CL_ASSERT(rc == CL_OK); /* Should really never happen */
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    if (((trackFlags & CL_GMS_TRACK_CURRENT) == CL_GMS_TRACK_CURRENT) &&
        (notificationBuffer != NULL)) /* Sync response requested */
    {
        /*
         * We need to call the extended track() request which returns with
         * a notification buffer allocated by the XDR layer.
         */
        clLogMultiline(TRACE,CLM,NA,
                "Sending RMD to GMS server for Cluster track with"
                " track flags CL_GMS_TRACK_CURRENT");
        req.sync = CL_TRUE;
        rc = cl_gms_cluster_track_rmd(&req, 0 /* use def. timeout */, &res);
        clLog(TRACE,CLM,NA,"Returned from cluster track RMD");
        if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
        {
            switch (CL_GET_ERROR_CODE(rc))
            {
                case CL_ERR_TIMEOUT:    rc = CL_GMS_RC(CL_ERR_TIMEOUT); break;
                case CL_ERR_TRY_AGAIN:  rc = CL_GMS_RC(CL_ERR_TRY_AGAIN); break;
                default:                rc = CL_GMS_RC(CL_ERR_UNSPECIFIED);
            }
            /* FIXME: Need to get back to this! Based on consensus among
             *  engineers.
             */
            goto error_unlock_checkin;
        }

        if (res->rc != CL_OK) /* If other side indicated error, we need
                               * to free the buffer.
                               */
        {
            rc = res->rc;
            goto error_exit;
        }

        /* All fine, need to copy buffer */
        if (notificationBuffer->notification == NULL) /* we provide array */
        {
            memcpy(notificationBuffer, &res->buffer,
                   sizeof(*notificationBuffer)); /* This takes care of array */
            shouldFreeNotification = CL_FALSE;
        }
        else
        { /* caller provided array with fixed given size; we need to copy if
           * there is enough space.
           */
            if (notificationBuffer->numberOfItems >=
                res->buffer.numberOfItems)
            {
                /* Copy array, as much as we can */
                memcpy((void*)notificationBuffer->notification,
                       (void*)res->buffer.notification,
                       res->buffer.numberOfItems *
                          sizeof(ClGmsClusterNotificationT));
            }
            /*
             * Instead of copying the rest of the fields in buffer one-by-one,
             * we do a trick: relink the above array and than copy the entire
             * struck over.  This will keep working even if the buffer struct
             * grows in the future; without change here.
             */
            clHeapFree((void*)res->buffer.notification);
            res->buffer.notification = notificationBuffer->notification;
            memcpy((void*)notificationBuffer, (void*)&res->buffer,
                   sizeof(*notificationBuffer));
            shouldFreeNotification = CL_FALSE;
        }
    }
    else
    {
        clLog(TRACE,CLM,NA, "Sending Async RMD to GMS server for cluster track"); 
        /* No sync response requested, so we call the simple rmd call */
        rc = cl_gms_cluster_track_rmd(&req, 0 /* use def. timeout */, &res);
        clLog(TRACE,CLM,NA, "Cluster track RMD returned");
        /* No sync response requested, so we call the simple rmd call */
        if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
        {
            goto error_unlock_checkin;
        }
        
        rc = res->rc;
    }     

error_exit:
    if(shouldFreeNotification == CL_TRUE )
    {
        if (res->buffer.notification != NULL)
          clHeapFree((void*)res->buffer.notification);
    }
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
error_checkin:
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLog(ERROR,CLM,NA,"clHandleCheckin Failed");
    }
    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Join Async API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterJoinAsync(
    CL_IN const ClGmsHandleT                        gmsHandle,
    CL_IN const ClGmsClusterManageCallbacksT* const clusterManageCallbacks,
    CL_IN const ClGmsLeadershipCredentialsT         credentials,
    CL_IN const ClGmsNodeIdT                        nodeId,
    CL_IN const SaNameT*                      const nodeName)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterJoinRequestT             req = {0};
    ClGmsClusterJoinResponseT           *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    if ((nodeName == (const void*)NULL) ||
        (clusterManageCallbacks == (const void*)NULL) ||
        (clusterManageCallbacks->clGmsMemberEjectCallback == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    memcpy(&(gms_instance_ptr->cluster_manage_callbacks),
           clusterManageCallbacks,
           sizeof(ClGmsClusterManageCallbacksT));
    
    req.gmsHandle   = gmsHandle;
    req.credentials = credentials;
    req.nodeId      = nodeId;
    memcpy(&req.nodeName,nodeName, sizeof(SaNameT));
    req.sync        = CL_FALSE;
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_join_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Join API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterJoin(
    CL_IN const ClGmsHandleT                        gmsHandle,
    CL_IN const ClGmsClusterManageCallbacksT* const clusterManageCallbacks,
    CL_IN const ClGmsLeadershipCredentialsT         credentials,
    CL_IN const ClTimeT                             timeout,
    CL_IN const ClGmsNodeIdT                        nodeId,
    CL_IN const SaNameT*                      const nodeName)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterJoinRequestT             req = {0};
    ClGmsClusterJoinResponseT           *res = NULL;
    
    clLog(INFO,CLM,NA, "clGmsClusterJoin API is being invoked");
    CL_GMS_SET_CLIENT_VERSION( req );

    if ((nodeName == (const void*)NULL) ||
        (clusterManageCallbacks == (const void*)NULL) ||
        (clusterManageCallbacks->clGmsMemberEjectCallback == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    memcpy(&(gms_instance_ptr->cluster_manage_callbacks),
           clusterManageCallbacks,
           sizeof(ClGmsClusterManageCallbacksT));
    
    req.gmsHandle   = gmsHandle;
    req.credentials = credentials;
    req.nodeId      = nodeId;
    memcpy(&req.nodeName,nodeName, sizeof(SaNameT));
    req.sync        = CL_TRUE;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    if (clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId)) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclEoMyEoIocPortGet failed");
    }
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    clLog(TRACE,CLM,NA, "Sending RMD to GMS server for cluster join");
    rc = cl_gms_cluster_join_rmd(&req, (ClUint32T)(timeout/NS_IN_MS), &res);
    clLog(TRACE,CLM,NA, "clGmsClusterJoin RMD returned");

    
    if( res ) 
    clHeapFree((void*)res);
    
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }
    return rc;
}
static ClRcT   gmsCliAddViewNode(
            CL_IN   ClUint32T argc, 
            CL_IN   ClCharT** argv, 
            CL_OUT  ClCharT** ret)
{ 
    ClRcT           rc = CL_OK;
    ClGmsViewNodeT  *viewNode = NULL;
    ClUint64T       num = 0;
    ClGmsGroupIdT   groupId = CL_GMS_INVALID_GROUP_ID;
    ClGmsMemberIdT  memberId = 0;
    ClIocAddressT   iocAddress = {{0}};
    ClGmsLeadershipCredentialsT credentials = CL_GMS_INELIGIBLE_CREDENTIALS;
    ClUint64T       viewNumber = 0; 
 

    

    if (argc != 8)
    {
        _clGmsCliMakeError(ret, VIEW_NODE_ADD_USAGE);

        return CL_OK;
    }

    rc = _clGmsCliGetNumeric(argv[1], (void *)&num);
    
    if (rc != CL_OK) 
    {
        _clGmsCliMakeError(ret, "Invalid View Id passed\n\0");
        return CL_OK;   /* CLI command itself succeeded but input
                         * is wrong. So return OK. */
    }

    groupId = (ClGmsGroupIdT) num;

    rc = _clGmsCliGetNumeric(argv[2], (void *)&num);
    
    if (rc != CL_OK) 
    {
        _clGmsCliMakeError(ret, "Invalid Node Id passed\n\0");
        return CL_OK;   /* CLI command itself succeeded but input
                         * is wrong. So return OK. */
    }
   
    memberId = (ClGmsMemberIdT) num;
 
    rc = _clGmsCliGetNumeric(argv[4], (ClUint64T*)&num);

    if (rc != CL_OK) 
    {
        _clGmsCliMakeError(ret, "Invalid IOC address passed\n\0");
        return CL_OK;   /* CLI command itself succeeded but input
                         * is wrong. So return OK. */
    }

    iocAddress.iocPhyAddress.nodeAddress = (ClIocNodeAddressT) num; 
    rc = _clGmsCliGetNumeric(argv[5], (ClUint64T*)&num);

    if (rc != CL_OK) 
    {
        _clGmsCliMakeError(ret, "Invalid port Id passed\n\0");
        return CL_OK;   /* CLI command itself succeeded but input
                         * is wrong. So return OK. */
    }

    iocAddress.iocPhyAddress.portId = (ClIocPortT) num;

    /* FIXME: Assuming credentials is some kind of integer */
    rc = _clGmsCliGetNumeric(argv[6], (ClUint64T*)&num);

    if (rc != CL_OK) 
    {
        _clGmsCliMakeError(ret, "Invalid credentials passed\n\0");
        return CL_OK;   /* CLI command itself succeeded but input
                         * is wrong. So return OK. */
    }

    credentials = (ClGmsLeadershipCredentialsT) num;

    rc = _clGmsCliGetNumeric(argv[6], (ClUint64T*)&num);

    if (rc != CL_OK) 
    {
        _clGmsCliMakeError(ret, "Invalid credentials passed\n\0");
        return CL_OK;   /* CLI command itself succeeded but input
                         * is wrong. So return OK. */
    }

    viewNumber = num;

    viewNode = (ClGmsViewNodeT *)clHeapAllocate(sizeof(ClGmsViewNodeT));

    if (!viewNode)  return CL_GMS_RC(CL_ERR_NO_MEMORY);

    memset(viewNode, 0 , sizeof(viewNode));

    if (groupId != CL_GMS_CLUSTER_ID)
    {
        viewNode->viewMember.groupMember.memberId = memberId;
        viewNode->viewMember.groupMember.initialViewNumber = 
                                                    viewNumber;
        viewNode->viewMember.groupMember.memberAddress = iocAddress;

        strncpy(viewNode->viewMember.groupMember.memberName.value,
                                    argv[3], strlen(argv[3]));
        viewNode->viewMember.groupMember.memberName.length = strlen(argv[3]);
        viewNode->viewMember.groupMember.credential = credentials;

        rc = _clGmsViewAddNode(groupId, memberId, viewNode);
        
    }
    else
    {
        viewNode->viewMember.clusterMember.nodeId = memberId;
        viewNode->viewMember.clusterMember.initialViewNumber = 
                                                        viewNumber;
        viewNode->viewMember.clusterMember.nodeAddress = iocAddress;

        strncpy(viewNode->viewMember.clusterMember.nodeName.value,
                                    argv[3], strlen(argv[3]));
        viewNode->viewMember.clusterMember.nodeName.length = strlen(argv[3]);
        viewNode->viewMember.clusterMember.credential = credentials;

        rc = _clGmsViewAddNode(groupId, memberId, viewNode);
    }

    return rc;
}
/*  FIXME:
 */
ClRcT   _clGmsDbCreate(
           CL_IN        ClGmsDbT*  const      gmsDb,
           CL_OUT       ClGmsDbT** const      gmsElement)
{
    ClRcT       rc = CL_OK;
    ClUint32T   i = 0;
    ClUint64T   cntIndex = 0;

    if ((gmsDb == NULL) || (gmsElement == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    for(i = 0; i < gmsGlobalInfo.config.noOfGroups; i++)
    {
        if (gmsDb[i].view.isActive == CL_FALSE)
        {
            cntIndex = i;
            break;
        }
    }
    if (i == gmsGlobalInfo.config.noOfGroups)
    {
        return CL_ERR_OUT_OF_RANGE;
    }

    /* Current view database. Holds cluster and groups info */


    rc = clCntHashtblCreate(
                      viewDbParams.htbleParams.gmsNumOfBuckets, 
                      viewDbParams.htbleParams.gmsHashKeyCompareCallback, 
                      viewDbParams.htbleParams.gmsHashCallback,
                      viewDbParams.htbleParams.gmsHashDeleteCallback,
                      viewDbParams.htbleParams.gmsHashDestroyCallback,
                      CL_CNT_UNIQUE_KEY, 
                      &gmsDb[cntIndex].htbl[CL_GMS_CURRENT_VIEW]);

    if (rc != CL_OK)
    {
        return rc;
    }

    /* Cluster joined/left view list. Used for tracking */

    rc = clCntHashtblCreate(
                      viewDbParams.htbleParams.gmsNumOfBuckets, 
                      viewDbParams.htbleParams.gmsHashKeyCompareCallback, 
                      viewDbParams.htbleParams.gmsHashCallback,
                      viewDbParams.htbleParams.gmsHashDeleteCallback,
                      viewDbParams.htbleParams.gmsHashDestroyCallback,
                      CL_CNT_UNIQUE_KEY, 
                      &gmsDb[cntIndex].htbl[CL_GMS_JOIN_LEFT_VIEW]);

    if (rc != CL_OK)
    {
        return rc;
    }

    /* Track hash table create */

    rc = clCntHashtblCreate(
                viewDbParams.trackDbParams->htbleParams.gmsNumOfBuckets, 
                viewDbParams.trackDbParams->htbleParams.
                                                 gmsHashKeyCompareCallback, 
                viewDbParams.trackDbParams->htbleParams.gmsHashCallback,
                viewDbParams.trackDbParams->htbleParams.
                                                     gmsHashDeleteCallback,
                viewDbParams.trackDbParams->htbleParams.
                                                        gmsHashDestroyCallback,
                CL_CNT_UNIQUE_KEY, 
                &gmsDb[cntIndex].htbl[CL_GMS_TRACK]);

    if (rc != CL_OK)
    {
        return rc;
    }

    clGmsMutexCreate(&gmsDb[cntIndex].viewMutex);


    clGmsMutexCreate(&gmsDb[cntIndex].trackMutex);


    gmsDb[cntIndex].view.isActive = CL_TRUE;

    clLog(DBG,GEN,DB,
            "Created View and Track DB for GroupId [%lld]",cntIndex);

    *gmsElement = &gmsDb[cntIndex++];

    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Member Get API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberGet(
    CL_IN  const  ClGmsHandleT         gmsHandle,
    CL_IN  const  ClGmsNodeIdT         nodeId, 
    CL_IN  const  ClTimeT              timeout,
    CL_OUT ClGmsClusterMemberT* const clusterMember)
{
    ClRcT                           rc = CL_OK;
    struct gms_instance            *gms_instance_ptr= NULL;
    ClGmsClusterMemberGetRequestT   req = {0};
    ClGmsClusterMemberGetResponseT *res= NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    if (clusterMember == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
     
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    req.gmsHandle = gmsHandle;
    req.nodeId    = nodeId;
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_member_get_rmd(&req, (ClUint32T)(timeout/NS_IN_MS), &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    if (rc != CL_OK)
    {
        goto error_exit;
    }
    
    memcpy((void*)clusterMember, (void*)&res->member,
           sizeof(ClGmsClusterMemberT));

error_exit:
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }
    
    return CL_GMS_RC(rc);
}
/*-----------------------------------------------------------------------------
 * Initialize API
 *---------------------------------------------------------------------------*/
ClRcT clGmsInitialize(
    CL_OUT   ClGmsHandleT* const    gmsHandle,
    CL_IN    const ClGmsCallbacksT* const gmsCallbacks,
    CL_INOUT ClVersionT*   const      version)
{
    struct gms_instance *gms_instance_ptr = NULL;
    ClRcT rc = CL_OK;
    ClGmsClientInitRequestT req = {{0}};
    ClGmsClientInitResponseT *res = NULL;

    /* Step 0: Check readiness of library */

    rc = check_lib_init();
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_NOT_INITIALIZED);
    }
    
    /* Step 1: Checking inputs */
    CL_ASSERT(gmsHandle != NULL);
    CL_ASSERT(version != NULL);
#if 0    
    if ((gmsHandle == NULL) || (version == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
#endif    

    *gmsHandle = CL_HANDLE_INVALID_VALUE;

    /* Step 2: Verifying version match */
    
    rc = clVersionVerify (&version_database, version);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_VERSION_MISMATCH); 
    }

    /* Step 3: Obtain unique handle */
    rc = clHandleCreate(gmsHandleDb, sizeof(struct gms_instance), gmsHandle);
    CL_ASSERT(rc == CL_OK);
#if 0    
    if (rc != CL_OK)
    {
        rc = CL_GMS_RC(CL_ERR_NO_RESOURCE);
        goto error_no_destroy;
    }
#endif    
    clLogInfo("GMS","CLT","GMS client handle is [%llX]",*gmsHandle);
    
    rc = clHandleCheckout(gmsHandleDb, *gmsHandle, (void **)&gms_instance_ptr);
    CL_ASSERT(rc == CL_OK);
    CL_ASSERT(gms_instance_ptr != NULL);
#if 0    
    if(rc != CL_OK)
    {
        goto error_destroy;
    }
    if (gms_instance_ptr == NULL)
    {
        clHandleCheckin(gmsHandleDb, *gmsHandle);
        rc = CL_GMS_RC(CL_ERR_NULL_POINTER);
        goto error_destroy;
    }
#endif

    rc = clGmsMutexCreate(&gms_instance_ptr->response_mutex);
    CL_ASSERT(rc == CL_OK);
#if 0    
    if(rc != CL_OK)
    {
        clHandleCheckin(gmsHandleDb, *gmsHandle);
        goto error_destroy;
    }
#endif

    /* Step 4: Negotiate version with the server */
    req.clientVersion.releaseCode = version->releaseCode;
    req.clientVersion.majorVersion= version->majorVersion;
    req.clientVersion.minorVersion= version->minorVersion;

    rc = cl_gms_clientlib_initialize_rmd(&req, 0x0 ,&res );
    if(rc != CL_OK )
    {
        clLogError(GEN,NA,"cl_gms_clientlib_initialize_rmd failed with rc:0x%x ",rc);
        clGmsMutexDelete(gms_instance_ptr->response_mutex);
        gms_instance_ptr->response_mutex = 0;
        clHandleCheckin(gmsHandleDb, *gmsHandle);
        rc = CL_GMS_RC(rc);
        goto error_destroy;
    }
    
    /* Step 5: Initialize instance entry */
    if (gmsCallbacks) 
    {
        memcpy(&gms_instance_ptr->callbacks, gmsCallbacks, sizeof(ClGmsCallbacksT));
    } 
    else 
    {
        memset(&gms_instance_ptr->callbacks, 0, sizeof(ClGmsCallbacksT));
    }

    memset(&gms_instance_ptr->cluster_notification_buffer, 0, sizeof(ClGmsClusterNotificationBufferT));
    memset(&gms_instance_ptr->group_notification_buffer, 0, sizeof(ClGmsGroupNotificationBufferT));

    /* Step 6: Decrement handle use count and return */
    if ((clHandleCheckin(gmsHandleDb, *gmsHandle)) != CL_OK)
    {
        clLogError(GEN,DB, "\nclHandleCheckin failed");
    }
    clHeapFree(res);
    return CL_OK;

    error_destroy:
    clHandleDestroy(gmsHandleDb, *gmsHandle);
    *gmsHandle = CL_HANDLE_INVALID_VALUE;

    //error_no_destroy:
    return rc;
}
static ClRcT dummy_health_check(ClEoSchedFeedBackT* schFeedback)
{
    return CL_GMS_RC(CL_ERR_NOT_IMPLEMENTED);
}
int clGmsSendMsg(ClGmsViewMemberT       *memberNodeInfo,
                 ClGmsGroupIdT           groupId, 
                 ClGmsMessageTypeT       msgType,
                 ClGmsMemberEjectReasonT ejectReason,
                 ClUint32T               dataSize,
                 ClPtrT                  dataPtr)
{
    mar_req_header_t        header = {0};
    struct VDECL(req_exec_gms_nodejoin) req_exec_gms_nodejoin = {{0}};
    struct iovec    req_exec_gms_iovec = {0};
    int             result = -1;
    ClRcT           rc = CL_OK;
    ClUint32T       clusterVersion = 0;
    ClBufferHandleT bufferHandle = 0;
    ClUint8T        *message = NULL;
    ClUint32T       length = 0;
    ClPtrT          temp = NULL;

    rc = clNodeCacheMinVersionGet(NULL, &clusterVersion);
    if(clusterVersion >= CL_VERSION_CODE(5, 0, 0) 
       && 
       clAspNativeLeaderElection())
    {
        clLog(DBG, OPN, AIS, 
              "Skipped sending msgtype [%d] since node cache is used to form the cluster view",
              msgType);
        return 0;
    }

    if (rc != CL_OK)
    {
        clLog(ERROR,OPN,AIS,
                "Error while getting version from the version cache. rc 0x%x",rc);

        curVer.releaseCode = CL_RELEASE_VERSION;
        curVer.majorVersion = CL_MAJOR_VERSION;
        curVer.minorVersion = CL_MINOR_VERSION;
    } 
    else 
    {
        curVer.releaseCode = CL_VERSION_RELEASE(clusterVersion);
        curVer.majorVersion = CL_VERSION_MAJOR(clusterVersion);
        curVer.minorVersion = CL_VERSION_MINOR(clusterVersion);
    }

    /* Get the version and send it */
    req_exec_gms_nodejoin.version.releaseCode = curVer.releaseCode;
    req_exec_gms_nodejoin.version.majorVersion = curVer.majorVersion;
    req_exec_gms_nodejoin.version.minorVersion = curVer.minorVersion;

    /* For now we send message without caring about version. Later on
     * we need to change it accordingly */
    
    switch(msgType)
    {
        case CL_GMS_CLUSTER_JOIN_MSG:
        case CL_GMS_CLUSTER_LEAVE_MSG:
        case CL_GMS_CLUSTER_EJECT_MSG:
            clLog(DBG,OPN,AIS,
                    "Sending cluster %s multicast message",
                    msgType == CL_GMS_CLUSTER_JOIN_MSG ? "join":
                    msgType == CL_GMS_CLUSTER_LEAVE_MSG ? "leave" : "eject");
            req_exec_gms_nodejoin.ejectReason = ejectReason;
            memcpy (&req_exec_gms_nodejoin.specificMessage.gmsClusterNode, &memberNodeInfo->clusterMember,
                    sizeof (ClGmsClusterMemberT));
            req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle;
            break;
        case CL_GMS_GROUP_CREATE_MSG:
        case CL_GMS_GROUP_DESTROY_MSG:
        case CL_GMS_GROUP_JOIN_MSG:
        case CL_GMS_GROUP_LEAVE_MSG:
            clLog(DBG,OPN,AIS,
                    "Sending group %s multicast message",
                    msgType == CL_GMS_GROUP_CREATE_MSG ? "create" : 
                    msgType == CL_GMS_GROUP_DESTROY_MSG ? "destroy" :
                    msgType == CL_GMS_GROUP_JOIN_MSG ? "join" : "leave");
            memcpy (&req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode, 
                    &memberNodeInfo->groupMember,
                    sizeof (ClGmsGroupMemberT));
            memcpy (&req_exec_gms_nodejoin.specificMessage.groupMessage.groupData, 
                    &memberNodeInfo->groupData,
                    sizeof(ClGmsGroupInfoT));
            req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle;
            break;
        case CL_GMS_COMP_DEATH:
            clLog(DBG,OPN,AIS,
                    "Sending comp death multicast message");
            memcpy (&req_exec_gms_nodejoin.specificMessage.groupMessage.gmsGroupNode, 
                    &memberNodeInfo->groupMember,
                    sizeof (ClGmsGroupMemberT));
            req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle;
            break;
        case CL_GMS_LEADER_ELECT_MSG:
            clLog(DBG,OPN,AIS,
                    "Sending leader elect multicast message");
            memcpy (&req_exec_gms_nodejoin.specificMessage.gmsClusterNode, &memberNodeInfo->clusterMember,
                    sizeof (ClGmsClusterMemberT));
            req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle;
            break;
        case CL_GMS_SYNC_MESSAGE:
            clLog(DBG,OPN,AIS,
                    "Sending gms synch multicast message");
            req_exec_gms_nodejoin.dataPtr = dataPtr;
            break;
        case CL_GMS_GROUP_MCAST_MSG:
            memcpy (&req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.gmsGroupNode, 
                    &memberNodeInfo->groupMember,
                    sizeof (ClGmsGroupMemberT));
            memcpy (&req_exec_gms_nodejoin.specificMessage.mcastMessage.groupInfo.groupData,
                    &memberNodeInfo->groupData,
                    sizeof(ClGmsGroupInfoT));
            req_exec_gms_nodejoin.contextHandle = memberNodeInfo->contextHandle;
            req_exec_gms_nodejoin.specificMessage.mcastMessage.userDataSize = dataSize;
            req_exec_gms_nodejoin.dataPtr = dataPtr;
            break;
        default:
            clLog(DBG,OPN,AIS,
                    "Requested wrong message to be multicasted. Message type %d",
                    msgType);
            return CL_GMS_RC(CL_ERR_INVALID_PARAMETER);
    }

    req_exec_gms_nodejoin.gmsMessageType = msgType;
    req_exec_gms_nodejoin.gmsGroupId = groupId;

    /* Create a buffer handle and marshall the eliments */
    rc = clBufferCreate(&bufferHandle);
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,
                "Failed to create buffer while sending message on totem. rc 0x%x",rc);
        return rc;
    }

    rc = marshallReqExecGmsNodeJoin(&req_exec_gms_nodejoin,bufferHandle);
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,
                "Failed to marshall the data while sending message on totem. rc 0x%x",rc);
        goto buffer_delete_return;
    }

    rc = clBufferLengthGet(bufferHandle, &length);
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,
                "Failed to get buffer length. rc 0x%x",rc);
        goto buffer_delete_return;
    }

    rc = clBufferFlatten(bufferHandle, &message);
    if (rc != CL_OK)
    {
        clLogError(OPN,AIS,
                "clBufferFlatten failed with rc 0x%x",rc);
        goto buffer_delete_return;
    }

    header.id = SERVICE_ID_MAKE (GMS_SERVICE, MESSAGE_REQ_EXEC_GMS_NODEJOIN);
    header.size = length + sizeof(mar_req_header_t);

    /* We need to prepend the total message length in the beginning of the
     * message so that we can find the length while unmarshalling */
    temp = clHeapAllocate(header.size);
    if (temp == NULL)
    {
        clLogError(OPN,AIS, 
                "Failed to allocate memory while sending the message");
        goto buffer_delete_return;
    }

    memcpy(temp,&header, sizeof(mar_req_header_t));
    memcpy(temp+sizeof(mar_req_header_t), message, length);

    req_exec_gms_iovec.iov_base = temp;
    req_exec_gms_iovec.iov_len = length + sizeof(mar_req_header_t);

    result = totempg_groups_mcast_joined (openais_group_handle, &req_exec_gms_iovec, 1, TOTEMPG_AGREED);

    clLog(DBG,OPN,AIS,
            "Done with sending multicast message of type %d",msgType);

buffer_delete_return:
    if (message != NULL)
        clHeapFree(message);

    if (temp != NULL)
        clHeapFree(temp);

    clBufferDelete(&bufferHandle);
    return result;
}