/*-----------------------------------------------------------------------------
 * Finalize API
 *---------------------------------------------------------------------------*/
ClRcT clGmsFinalize(
    CL_IN const ClGmsHandleT gmsHandle)
{
	struct gms_instance *gms_instance_ptr = NULL;
	ClRcT rc= CL_OK;

	rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void **)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    rc = clGmsMutexLock(gms_instance_ptr->response_mutex);
    if(rc != CL_OK)
    {
        return rc;
    }

	/*
	 * Another thread has already started finalizing
	 */
	if (gms_instance_ptr->finalize) {
		clGmsMutexUnlock(gms_instance_ptr->response_mutex);
		if ((clHandleCheckin(gmsHandleDb, gmsHandle)) != CL_OK)
        {
            clLogError(GEN,DB,
                       "\nclHandleCheckin Error");
        }
		return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
	}

	gms_instance_ptr->finalize = 1;

	clGmsMutexUnlock(gms_instance_ptr->response_mutex);
	clGmsMutexDelete(gms_instance_ptr->response_mutex);
    
	if ((clHandleDestroy(gmsHandleDb, gmsHandle)) != CL_OK)
    {
        clLogError(GEN,NA,
                   "\nclHandleDestroy Error");
    }
    
	if ((clHandleCheckin(gmsHandleDb, gmsHandle)) != CL_OK)
    {
        clLogError(GEN,NA,
                   "\nclHandleCheckin Error");
    }

	return CL_GMS_RC(rc);
}
/*-----------------------------------------------------------------------------
 * Cluster Leader Elect API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterLeaderElect(
    CL_IN const ClGmsHandleT                      gmsHandle,
    CL_IN const ClGmsNodeIdT                      preferredLeader,
    CL_INOUT    ClGmsNodeIdT                     *leader,
    CL_INOUT    ClGmsNodeIdT                     *deputy,
    CL_INOUT    ClBoolT                          *leadershipChanged)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterLeaderElectRequestT      req = {0};
    ClGmsClusterLeaderElectResponseT    *res = NULL;
    
    if ((leader == NULL) || (deputy == NULL) || (leadershipChanged == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    clGmsMutexLock( gms_instance_ptr->response_mutex);
    req.gmsHandle = gmsHandle;
    req.preferredLeaderNode = preferredLeader;
    
    rc = cl_gms_cluster_leader_elect_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    *leader = res->leader;
    *deputy = res->deputy;
    *leadershipChanged = res->leadershipChanged;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(LEA,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Member Get Async API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberGetAsync(
    CL_IN const ClGmsHandleT   gmsHandle,
    CL_IN const ClInvocationT  invocation,
    CL_IN const ClGmsNodeIdT   nodeId)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterMemberGetAsyncRequestT   req = {0};
    ClGmsClusterMemberGetAsyncResponseT *res = NULL;
        
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    if (gms_instance_ptr->callbacks.clGmsClusterMemberGetCallback == NULL)
    {
        rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
        goto error_checkin;
    }
    
    req.gmsHandle  = gmsHandle;
    req.nodeId     = nodeId;
    req.invocation = invocation;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    if (rc != CL_OK)
    {
        goto error_checkin;
    }
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_member_get_async_rmd(&req, 0 /* use def. timeout */,
                                             &res);
    if (rc != CL_OK) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }

error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
error_checkin:
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                    "\nclHandleCheckin failed");
    }
    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Leave Async API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterLeaveAsync(
    CL_IN const ClGmsHandleT                      gmsHandle,
    CL_IN const ClGmsNodeIdT                      nodeId)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterLeaveRequestT            req = {0};
    ClGmsClusterLeaveResponseT          *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }
    
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    memset(&(gms_instance_ptr->cluster_manage_callbacks), 0,
           sizeof(ClGmsClusterManageCallbacksT));
    
    req.gmsHandle = gmsHandle;
    req.nodeId    = nodeId;
    req.sync      = CL_FALSE;

    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_leave_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Track Stop API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrackStop(
    CL_IN const ClGmsHandleT gmsHandle)
{
    ClRcT                           rc = CL_OK;
    struct gms_instance            *gms_instance_ptr = NULL;
    ClGmsClusterTrackStopRequestT   req = {0};
    ClGmsClusterTrackStopResponseT *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }
    
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    req.gmsHandle = gmsHandle;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    
    CL_ASSERT(rc == CL_OK); /* Should really never happen */
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_track_stop_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_exit;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);

error_exit:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);

    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return CL_GMS_RC(rc);
}
/*-----------------------------------------------------------------------------
 * Cluster Member Eject API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberEject(
    CL_IN const ClGmsHandleT                      gmsHandle,
    CL_IN const ClGmsNodeIdT                      nodeId,
    CL_IN const ClGmsMemberEjectReasonT           reason)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterMemberEjectRequestT      req = {0};
    ClGmsClusterMemberEjectResponseT    *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    clGmsMutexLock( gms_instance_ptr->response_mutex);
    
    req.gmsHandle = gmsHandle;
    req.nodeId    = nodeId;
    req.reason    = reason;
    
    rc = cl_gms_cluster_member_eject_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Member Get API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterMemberGet(
    CL_IN  const  ClGmsHandleT         gmsHandle,
    CL_IN  const  ClGmsNodeIdT         nodeId, 
    CL_IN  const  ClTimeT              timeout,
    CL_OUT ClGmsClusterMemberT* const clusterMember)
{
    ClRcT                           rc = CL_OK;
    struct gms_instance            *gms_instance_ptr= NULL;
    ClGmsClusterMemberGetRequestT   req = {0};
    ClGmsClusterMemberGetResponseT *res= NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    if (clusterMember == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
     
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }
    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    req.gmsHandle = gmsHandle;
    req.nodeId    = nodeId;
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_member_get_rmd(&req, (ClUint32T)(timeout/NS_IN_MS), &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    if (rc != CL_OK)
    {
        goto error_exit;
    }
    
    memcpy((void*)clusterMember, (void*)&res->member,
           sizeof(ClGmsClusterMemberT));

error_exit:
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }
    
    return CL_GMS_RC(rc);
}
/*-----------------------------------------------------------------------------
 * Cluster Track API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterTrack(
    CL_IN    const ClGmsHandleT               gmsHandle,
    CL_IN    const ClUint8T                   trackFlags,
    CL_INOUT ClGmsClusterNotificationBufferT* const notificationBuffer)
{    
    ClRcT                       rc = CL_OK;
    struct gms_instance        *gms_instance_ptr = NULL;
    ClGmsClusterTrackRequestT   req = {0};
    ClGmsClusterTrackResponseT *res = NULL;
    const ClUint8T validFlag = CL_GMS_TRACK_CURRENT | CL_GMS_TRACK_CHANGES |
                            CL_GMS_TRACK_CHANGES_ONLY;
    ClBoolT shouldFreeNotification = CL_TRUE;

    clLog(TRACE,CLM,NA,"clGmsClusterTrack API is invoked");
    if (((trackFlags | validFlag) ^ validFlag) != 0) {
        return CL_GMS_RC(CL_ERR_BAD_FLAG);
    }

    CL_GMS_SET_CLIENT_VERSION( req );
    
    if (((trackFlags & CL_GMS_TRACK_CURRENT) == CL_GMS_TRACK_CURRENT) && /* If current view is requested */
        (notificationBuffer != NULL) &&        /* Buffer is provided */
        (notificationBuffer->notification != NULL) && /* Caller provides array */
        (notificationBuffer->numberOfItems == 0)) /* then size must be given */
    {
        return CL_GMS_RC(CL_ERR_INVALID_PARAMETER);
    }
    
    if (trackFlags == 0) /* at least one flag should be specified */
    {
        return CL_GMS_RC(CL_ERR_BAD_FLAG);
    }
    
    if (((trackFlags & CL_GMS_TRACK_CHANGES) == CL_GMS_TRACK_CHANGES) &&
        ((trackFlags & CL_GMS_TRACK_CHANGES_ONLY) == CL_GMS_TRACK_CHANGES_ONLY)) /* mutually exclusive flags */
    {
        return CL_GMS_RC(CL_ERR_BAD_FLAG);
    }
    
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return CL_GMS_RC(CL_ERR_INVALID_HANDLE);
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    /* If not a sync call, then clGmsClusterTrackCallbackHandler must be given */
    if (((trackFlags & (CL_GMS_TRACK_CHANGES|CL_GMS_TRACK_CHANGES_ONLY)) != 0) ||
        (((trackFlags & CL_GMS_TRACK_CURRENT) == CL_GMS_TRACK_CURRENT) &&
         (notificationBuffer == NULL)))
    {
        if (gms_instance_ptr->callbacks.clGmsClusterTrackCallback == NULL)
        {
            rc = CL_GMS_RC(CL_ERR_NO_CALLBACK);
            goto error_checkin;
        }
    }
    
    req.gmsHandle  = gmsHandle;
    req.trackFlags = trackFlags;
    req.sync       = CL_FALSE;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    rc = clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId));
    
    CL_ASSERT(rc == CL_OK); /* Should really never happen */
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    if (((trackFlags & CL_GMS_TRACK_CURRENT) == CL_GMS_TRACK_CURRENT) &&
        (notificationBuffer != NULL)) /* Sync response requested */
    {
        /*
         * We need to call the extended track() request which returns with
         * a notification buffer allocated by the XDR layer.
         */
        clLogMultiline(TRACE,CLM,NA,
                "Sending RMD to GMS server for Cluster track with"
                " track flags CL_GMS_TRACK_CURRENT");
        req.sync = CL_TRUE;
        rc = cl_gms_cluster_track_rmd(&req, 0 /* use def. timeout */, &res);
        clLog(TRACE,CLM,NA,"Returned from cluster track RMD");
        if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
        {
            switch (CL_GET_ERROR_CODE(rc))
            {
                case CL_ERR_TIMEOUT:    rc = CL_GMS_RC(CL_ERR_TIMEOUT); break;
                case CL_ERR_TRY_AGAIN:  rc = CL_GMS_RC(CL_ERR_TRY_AGAIN); break;
                default:                rc = CL_GMS_RC(CL_ERR_UNSPECIFIED);
            }
            /* FIXME: Need to get back to this! Based on consensus among
             *  engineers.
             */
            goto error_unlock_checkin;
        }

        if (res->rc != CL_OK) /* If other side indicated error, we need
                               * to free the buffer.
                               */
        {
            rc = res->rc;
            goto error_exit;
        }

        /* All fine, need to copy buffer */
        if (notificationBuffer->notification == NULL) /* we provide array */
        {
            memcpy(notificationBuffer, &res->buffer,
                   sizeof(*notificationBuffer)); /* This takes care of array */
            shouldFreeNotification = CL_FALSE;
        }
        else
        { /* caller provided array with fixed given size; we need to copy if
           * there is enough space.
           */
            if (notificationBuffer->numberOfItems >=
                res->buffer.numberOfItems)
            {
                /* Copy array, as much as we can */
                memcpy((void*)notificationBuffer->notification,
                       (void*)res->buffer.notification,
                       res->buffer.numberOfItems *
                          sizeof(ClGmsClusterNotificationT));
            }
            /*
             * Instead of copying the rest of the fields in buffer one-by-one,
             * we do a trick: relink the above array and than copy the entire
             * struck over.  This will keep working even if the buffer struct
             * grows in the future; without change here.
             */
            clHeapFree((void*)res->buffer.notification);
            res->buffer.notification = notificationBuffer->notification;
            memcpy((void*)notificationBuffer, (void*)&res->buffer,
                   sizeof(*notificationBuffer));
            shouldFreeNotification = CL_FALSE;
        }
    }
    else
    {
        clLog(TRACE,CLM,NA, "Sending Async RMD to GMS server for cluster track"); 
        /* No sync response requested, so we call the simple rmd call */
        rc = cl_gms_cluster_track_rmd(&req, 0 /* use def. timeout */, &res);
        clLog(TRACE,CLM,NA, "Cluster track RMD returned");
        /* No sync response requested, so we call the simple rmd call */
        if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
        {
            goto error_unlock_checkin;
        }
        
        rc = res->rc;
    }     

error_exit:
    if(shouldFreeNotification == CL_TRUE )
    {
        if (res->buffer.notification != NULL)
          clHeapFree((void*)res->buffer.notification);
    }
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
error_checkin:
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLog(ERROR,CLM,NA,"clHandleCheckin Failed");
    }
    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Join Async API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterJoinAsync(
    CL_IN const ClGmsHandleT                        gmsHandle,
    CL_IN const ClGmsClusterManageCallbacksT* const clusterManageCallbacks,
    CL_IN const ClGmsLeadershipCredentialsT         credentials,
    CL_IN const ClGmsNodeIdT                        nodeId,
    CL_IN const SaNameT*                      const nodeName)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterJoinRequestT             req = {0};
    ClGmsClusterJoinResponseT           *res = NULL;
    
    CL_GMS_SET_CLIENT_VERSION( req );
    if ((nodeName == (const void*)NULL) ||
        (clusterManageCallbacks == (const void*)NULL) ||
        (clusterManageCallbacks->clGmsMemberEjectCallback == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }

    
    memcpy(&(gms_instance_ptr->cluster_manage_callbacks),
           clusterManageCallbacks,
           sizeof(ClGmsClusterManageCallbacksT));
    
    req.gmsHandle   = gmsHandle;
    req.credentials = credentials;
    req.nodeId      = nodeId;
    memcpy(&req.nodeName,nodeName, sizeof(SaNameT));
    req.sync        = CL_FALSE;
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    rc = cl_gms_cluster_join_rmd(&req, 0 /* use def. timeout */, &res);
    if ((rc != CL_OK) || (res == NULL)) /* If there was an error, res isn't allocated */
    {
        goto error_unlock_checkin;
    }
    
    rc = res->rc;
    
    clHeapFree((void*)res);
    
error_unlock_checkin:
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }

    
    return rc;
}
/*-----------------------------------------------------------------------------
 * Cluster Join API
 *---------------------------------------------------------------------------*/
ClRcT clGmsClusterJoin(
    CL_IN const ClGmsHandleT                        gmsHandle,
    CL_IN const ClGmsClusterManageCallbacksT* const clusterManageCallbacks,
    CL_IN const ClGmsLeadershipCredentialsT         credentials,
    CL_IN const ClTimeT                             timeout,
    CL_IN const ClGmsNodeIdT                        nodeId,
    CL_IN const SaNameT*                      const nodeName)
{
    ClRcT                                rc = CL_OK;
    struct gms_instance                 *gms_instance_ptr = NULL;
    ClGmsClusterJoinRequestT             req = {0};
    ClGmsClusterJoinResponseT           *res = NULL;
    
    clLog(INFO,CLM,NA, "clGmsClusterJoin API is being invoked");
    CL_GMS_SET_CLIENT_VERSION( req );

    if ((nodeName == (const void*)NULL) ||
        (clusterManageCallbacks == (const void*)NULL) ||
        (clusterManageCallbacks->clGmsMemberEjectCallback == NULL))
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    rc = clHandleCheckout(gmsHandleDb, gmsHandle, (void**)&gms_instance_ptr);
    if (rc != CL_OK)
    {
        return rc;
    }

    if (gms_instance_ptr == NULL)
    {
        return CL_GMS_RC(CL_ERR_NULL_POINTER);
    }
    
    memcpy(&(gms_instance_ptr->cluster_manage_callbacks),
           clusterManageCallbacks,
           sizeof(ClGmsClusterManageCallbacksT));
    
    req.gmsHandle   = gmsHandle;
    req.credentials = credentials;
    req.nodeId      = nodeId;
    memcpy(&req.nodeName,nodeName, sizeof(SaNameT));
    req.sync        = CL_TRUE;
    req.address.iocPhyAddress.nodeAddress = clIocLocalAddressGet();
    if (clEoMyEoIocPortGet(&(req.address.iocPhyAddress.portId)) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclEoMyEoIocPortGet failed");
    }
    
    clGmsMutexLock(gms_instance_ptr->response_mutex);
    
    clLog(TRACE,CLM,NA, "Sending RMD to GMS server for cluster join");
    rc = cl_gms_cluster_join_rmd(&req, (ClUint32T)(timeout/NS_IN_MS), &res);
    clLog(TRACE,CLM,NA, "clGmsClusterJoin RMD returned");

    
    if( res ) 
    clHeapFree((void*)res);
    
    clGmsMutexUnlock(gms_instance_ptr->response_mutex);
    if (clHandleCheckin(gmsHandleDb, gmsHandle) != CL_OK)
    {
        clLogError(CLM,NA,
                   "\nclHandleCheckin failed");
    }
    return rc;
}
static ClRcT gmsCliGetGroupInfo (
                    CL_IN   ClUint32T argc,
                    CL_IN   ClCharT** argv,
                    CL_OUT  ClCharT** ret)
{
    ClNameT          groupName      = {0};
    ClGmsGroupIdT    groupId        = 0;
    ClGmsGroupInfoT  groupInfo      = {{0}};
    ClCharT          timeBuffer[256]= {0};
    ClTimeT          ti             = 0;
    ClRcT            rc             = CL_OK;
    ClGmsDbT        *thisViewDb     = NULL;


    /* Allocate maximum possible */ 
    *ret = clHeapAllocate(3000);
    if( *ret == NULL ){
        clLog (ERROR,GEN,NA,
                "Memory allocation failed");
        return CL_ERR_NO_MEMORY;
    }

    memset(*ret,0,3000);

    if (argc != 2)
    {
        _clGmsCliMakeError( ret, GROUP_INFO_USAGE );
        return CL_OK;
    }

    groupName.length = strlen(argv[1]);
    if (groupName.length <= 0)
    {
        _clGmsCliMakeError(ret, "Invalid group name provided");
        return CL_OK;
    }

    strncpy(groupName.value,argv[1],groupName.length);

    /* Take the lock on the database */
    clGmsMutexLock(gmsGlobalInfo.nameIdMutex);

    rc = _clGmsNameIdDbFind(&gmsGlobalInfo.groupNameIdDb, &groupName, &groupId);

    clGmsMutexUnlock(gmsGlobalInfo.nameIdMutex);

    if (rc != CL_OK)
    {
        _clGmsCliMakeError(ret, "Given Group does not exist");
        return rc;
    }

    rc = _clGmsViewDbFind(groupId,&thisViewDb);
    if (rc != CL_OK)
    {
        return rc;
    }

    clGmsMutexLock(thisViewDb->viewMutex);
    memcpy(&groupInfo,&thisViewDb->groupInfo,sizeof(ClGmsGroupInfoT));
    clGmsMutexUnlock(thisViewDb->viewMutex);

    _clGmsCliPrint(ret, "------------------------------------------------\n");
    _clGmsCliPrint(ret, "Group Info for      : %s\n",argv[1]);
    _clGmsCliPrint(ret, "------------------------------------------------\n");

    _clGmsCliPrint(ret, "Group ID            : %d\n",
            groupInfo.groupId);

    _clGmsCliPrint(ret, "Is IOC Group        : %s\n",
            groupInfo.groupParams.isIocGroup == CL_TRUE ? "Yes" : "No");

    _clGmsCliPrint(ret, "No Of Members       : %d\n",
            groupInfo.noOfMembers);

    _clGmsCliPrint(ret, "Marked for Delete   : %s\n",
            groupInfo.setForDelete == CL_TRUE ? "Yes" : "No");

    _clGmsCliPrint(ret, "IOC MCast Address   : 0x%llx\n",
            groupInfo.iocMulticastAddr);

    ti = groupInfo.creationTimestamp/CL_GMS_NANO_SEC;
    _clGmsCliPrint(ret, "Creation Time Stamp : %s",
            ctime_r((const time_t*)&ti,timeBuffer));

    ti = groupInfo.lastChangeTimestamp/CL_GMS_NANO_SEC;
    _clGmsCliPrint(ret, "Last Changed Time   : %s\n",
            ctime_r((const time_t*)&ti,timeBuffer));

    return CL_OK;
}
static ClRcT gmsCliGroupsInfoListGet (
                    CL_IN   ClUint32T argc,
                    CL_IN   ClCharT** argv,
                    CL_OUT  ClCharT** ret)
{
    ClGmsGroupInfoT *groupInfoList  = NULL;
    ClUint32T        index          = 0;
    ClUint32T        numberOfGroups = 0;
    ClCharT          name[256]      = "";
    ClCharT          timeBuffer[256]= {0};
    ClTimeT          ti             = 0;
    ClInt32T maxBytes = 0;
    ClInt32T curBytes = 0;

    if (argc > 1)
    {
        _clGmsCliMakeError(ret, "Usage: allGroupsInfo\r");
        return CL_OK;
    }

    *ret = NULL;
    
    /* Take the lock on the database */
    clGmsMutexLock(gmsGlobalInfo.dbMutex);
    for (index = 0; index < gmsGlobalInfo.config.noOfGroups; index++)
    {
        if ((gmsGlobalInfo.db[index].view.isActive == CL_TRUE) &&
                (gmsGlobalInfo.db[index].viewType == CL_GMS_GROUP))
        {
            numberOfGroups++;
            groupInfoList = realloc(groupInfoList,
                                    sizeof(ClGmsGroupInfoT)*numberOfGroups);
            if (groupInfoList == NULL)
            {
                clGmsMutexUnlock(gmsGlobalInfo.dbMutex);
                return CL_ERR_NO_MEMORY;
            }
            memcpy(&groupInfoList[numberOfGroups-1],&gmsGlobalInfo.db[index].groupInfo, 
                    sizeof(ClGmsGroupInfoT));
        }
    }
    clGmsMutexUnlock(gmsGlobalInfo.dbMutex);

    clDebugPrintExtended(ret, &maxBytes, &curBytes, 
                         "-------------------------------------------------------------------------\n");
    clDebugPrintExtended(ret, &maxBytes, &curBytes,
                         "Total No Of Groups : %d\n",numberOfGroups);
    clDebugPrintExtended(ret, &maxBytes, &curBytes,
                         "-------------------------------------------------------------------------\n");
    if (numberOfGroups == 0)
    {
        goto done_ret;
    }

    clDebugPrintExtended(ret, &maxBytes, &curBytes,
                         "GroupName     GId  noOfMembers  setForDelete  IocMCAddr       creationTime\n");
    clDebugPrintExtended(ret, &maxBytes, &curBytes,
                         "-------------------------------------------------------------------------\n");
    for (index = 0; index < numberOfGroups; index++)
    {
        getNameString(&groupInfoList[index].groupName, name);
        ti = groupInfoList[index].creationTimestamp/CL_GMS_NANO_SEC;
        clDebugPrintExtended(ret, &maxBytes, &curBytes,
                             "%-13s %3d  %11d  %12s  %16llx %s",
                             name, groupInfoList[index].groupId, groupInfoList[index].noOfMembers,
                             groupInfoList[index].setForDelete == CL_TRUE ? "Yes": "No",
                             groupInfoList[index].iocMulticastAddr,
                             ctime_r((const time_t*)&ti,timeBuffer));
    }

done_ret:
    free(groupInfoList);
    return CL_OK;
}
/*
 * This is a modified service to send synch before leader election takes place
 */
static void gms_sync_init (void)
{
    /* As of now it is noop */
    clLog(TRACE,OPN,AIS, "sync_init function is called for ClovisGMS service");

    // invoking sync so that node joined will always have latest info on GMS
    ClRcT              rc = CL_OK;
    ClGmsGroupSyncNotificationT     syncNotification = {0};
    ClUint32T          noOfItems = 0;
    void*              buf = NULL;
    ClGmsDbT*          thisViewDb = NULL;
    ClUint32T          i = 0;
    ClInt32T           result = 0;

    // Only leader should be sending out, synch Info (group => default cluster)
    if (gmsGlobalInfo.config.thisNodeInfo.isCurrentLeader)
    {
        clLog(INFO,GROUPS,NA, "I am leader of the cluster. So sending Groups Sync message for the new node.");

        /* Send SYNC message with entire group database */
        syncNotification.noOfGroups = 0;
        syncNotification.noOfMembers = 0;

        clGmsMutexLock(gmsGlobalInfo.dbMutex);
        clLog(TRACE,CLM,NA, "Acquired mutex. Now gathering groups info");

        // Sending updates only for other groups; default cluster group nodes update each other
        for (i=1; i < gmsGlobalInfo.config.noOfGroups; i++)
        {
            if ((gmsGlobalInfo.db[i].view.isActive == CL_TRUE) &&
                (gmsGlobalInfo.db[i].viewType == CL_GMS_GROUP || (gmsGlobalInfo.db[i].viewType == CL_GMS_CLUSTER)) )
            {
                ClInt32T j = 0;
                /* These 2 conditions should indicate that the group exists and is active.
                 */
                thisViewDb = &gmsGlobalInfo.db[i];

                /* Get the group Info for thisViewDb */
                syncNotification.groupInfoList = (ClGmsGroupInfoT*)realloc(syncNotification.groupInfoList,
                                                                           sizeof(ClGmsGroupInfoT)*(syncNotification.noOfGroups+1));
                if (syncNotification.groupInfoList == NULL)
                {
                    clLog(ERROR,CLM,NA, "Could not allocate memory while gathering group information, synch failed!");
                    clGmsMutexUnlock(gmsGlobalInfo.dbMutex);
                    rc = CL_ERR_NO_MEMORY;
                    return;
                }

                memcpy(&syncNotification.groupInfoList[syncNotification.noOfGroups], &(thisViewDb->groupInfo),sizeof(ClGmsGroupInfoT));
                syncNotification.noOfGroups++;

                /* Get the list of members of this group */
                rc = _clGmsViewGetCurrentViewNotification(thisViewDb, &buf, &noOfItems);
                if (rc != CL_OK)
                {
                    clLog(ERROR,CLM,NA, "_clGmsViewGetCurrentViewNotification failed while sending SYNC message. rc = 0x%x\n",rc);
                    clGmsMutexUnlock(gmsGlobalInfo.dbMutex);
                    return;
                }

                clLog(TRACE,CLM,NA, "group info for group [%d]; with members [%d]", thisViewDb->groupInfo.groupId, noOfItems);

                if ((noOfItems == 0) || (buf == NULL))
                {
                    buf = NULL;
                    noOfItems = 0;
                    continue;
                }

                syncNotification.groupMemberList = (ClGmsViewNodeT*)realloc(syncNotification.groupMemberList, 
                                                                            sizeof(ClGmsViewNodeT)*(noOfItems+syncNotification.noOfMembers));
                if (syncNotification.groupMemberList == NULL)
                {
                    clLog(ERROR,CLM,NA, "Could not allocate memory while gathering group information");
                    clGmsMutexUnlock(gmsGlobalInfo.dbMutex);
                    rc = CL_ERR_NO_MEMORY;
                    return;
                }

                memset(&syncNotification.groupMemberList[syncNotification.noOfMembers], 0, sizeof(ClGmsViewNodeT)*noOfItems);
                for (j = 0; j < noOfItems; j++)
                {
                    memcpy(&syncNotification.groupMemberList[syncNotification.noOfMembers].viewMember.groupMember,
                           &( ( (ClGmsGroupNotificationT*)buf )[j].groupMember ),
                           sizeof(ClGmsGroupMemberT) );
                    syncNotification.groupMemberList[syncNotification.noOfMembers].viewMember.groupData.groupId =
                        thisViewDb->groupInfo.groupId;
                    clLog(DBG,GEN,NA, "Sync group Id [%d]", thisViewDb->groupInfo.groupId);
                    syncNotification.noOfMembers++;
                }

                clHeapFree(buf);
                buf = NULL;
                noOfItems = 0;
            }
        }

        clGmsMutexUnlock(gmsGlobalInfo.dbMutex);
        clLog(DEBUG,CLM,NA, "Gathered group information for [%d] groups with [%d] members. Now sending it over multicast", 
              syncNotification.noOfGroups, syncNotification.noOfMembers);

        /* Send the multicast message */
        if(syncNotification.noOfGroups > 0)
        {
            result = clGmsSendMsg(NULL, 0x0, CL_GMS_SYNC_MESSAGE, 0, 0, (void *)&syncNotification);
            if (result < 0)
            {
                clLog(ERROR,GROUPS,NA, "Openais Sync Message Send failed");
            }

            clLog(TRACE,CLM,NA, "Group information is sent over multicast");

            /* Free the pointer used to gather the group member info.
             * since the memory is allocated using realloc, free it 
             * using normal free
             */
            free(syncNotification.groupInfoList);
            if (syncNotification.noOfMembers > 0)
            {
                free(syncNotification.groupMemberList);
            }
        }
    }
    else
    {
        clLog(CRITICAL,CLM,NA, "Node is not a leader; so expecting the Synch message from Leader!");
    }

    return;
}