ClRcT clMsgFinBlockStatusSet(ClMsgMoveStatusT status)
{
    ClRcT rc;
    ClIocNodeAddressT node;


    switch(status)
    {
        case MSG_MOVE_FIN_BLOCKED :

            if(gMsgMoveStatus == MSG_MOVE_FIN_BLOCKED)
            {
                goto out;
            }

            rc = clMsgNextNodeGet(gLocalAddress, &node);
            if(rc != CL_OK)
            {
                clLogError("QUE", "FAI", "Failed to get the next node's address. error code [0x%x].", rc);
                goto error_out;
            }
            else if(node == gLocalAddress)
            {
                clLogDebug("QUE", "FAI", "Only one/this node is present in the cluster.");
                gMsgMoveStatus = MSG_MOVE_DONE;
                clOsalCondSignal(gFinBlockCond);
                goto error_out;
            }

            gQMoveDestNode = node;
            gMsgMoveStatus = MSG_MOVE_FIN_BLOCKED;

            /* Find closed queues and move */
            clMsgFailoverQueuesMove(gQMoveDestNode, &gMsgNumOfOpenQs);


error_out:
out:
            if(gMsgNumOfOpenQs != 0)
                break;

        case MSG_MOVE_DONE:
            if(gMsgNumOfOpenQs != 0)
                gMsgNumOfOpenQs--;
            if(gMsgNumOfOpenQs == 0)
            {
                gMsgMoveStatus = MSG_MOVE_DONE;
                clOsalCondSignal(gFinBlockCond);
            }
            break;

        default:
            break;
    }

    return CL_OK;
}
Esempio n. 2
0
static void clMsgNotificationReceiveCallback(ClIocNotificationIdT event, ClPtrT pArg, ClIocAddressT *pAddr)
{
    
    clOsalMutexLock(&gClMsgFinalizeLock);
    if(!gClMsgInit)
    {
        /*
         * Msg server already finalized. skip it.
         */
        clOsalMutexUnlock(&gClMsgFinalizeLock);
        return;
    }
    ++gClMsgSvcRefCnt;
    clOsalMutexUnlock(&gClMsgFinalizeLock);

    if((event == CL_IOC_COMP_DEATH_NOTIFICATION && pAddr->iocPhyAddress.portId == CL_IOC_MSG_PORT) ||
       event == CL_IOC_NODE_LEAVE_NOTIFICATION || 
       event == CL_IOC_NODE_LINK_DOWN_NOTIFICATION)
    {
        clMsgNodeLeftCleanup(pAddr);
    }
    else if(event == CL_IOC_COMP_DEATH_NOTIFICATION && pAddr->iocPhyAddress.portId > CL_IOC_RESERVED_PORTS)
    {
        clMsgCompLeftCleanup(pAddr);
    }

    clOsalMutexLock(&gClMsgFinalizeLock);
    --gClMsgSvcRefCnt;
    clOsalCondSignal(&gClMsgFinalizeCond);
    clOsalMutexUnlock(&gClMsgFinalizeLock);

    return;
}
void cpmClusterTrackCallBack(ClGmsHandleT handle, const ClGmsClusterNotificationBufferT *clusterNotificationBuffer, ClUint32T nMembers, ClRcT rc)
{
    clLogMultiline(CL_LOG_SEV_DEBUG, CPM_LOG_AREA_CPM, CPM_LOG_CTX_CPM_GMS,
                   "Received cluster track callback from GMS on node [%s] -- \n"
                   "Leader : [%d] \n"
                   "Deputy : [%d] (-1 -> No deputy) \n"
                   "Leader changed ? [%s] \n"
                   "Number of nodes in callback : [%d]",
                   gpClCpm->pCpmLocalInfo->nodeName,
                   clusterNotificationBuffer->leader,
                   clusterNotificationBuffer->deputy,
                   clusterNotificationBuffer->leadershipChanged ? "Yes" : "No",
                   clusterNotificationBuffer->numberOfItems);

    clOsalMutexLock(&gpClCpm->cpmGmsMutex);
    gpClCpm->trackCallbackInProgress = CL_TRUE;
    clOsalMutexUnlock(&gpClCpm->cpmGmsMutex);
    cpmHandleGroupInformation(clusterNotificationBuffer);

    rc = clOsalMutexLock(&gpClCpm->cpmGmsMutex);
    gpClCpm->trackCallbackInProgress = CL_FALSE;
    CL_CPM_CHECK_1(CL_LOG_SEV_ERROR, CL_CPM_LOG_1_OSAL_MUTEX_LOCK_ERR, rc, rc,
                   CL_LOG_HANDLE_APP);
    rc = clOsalCondSignal(&gpClCpm->cpmGmsCondVar);
    CL_CPM_CHECK_1(CL_LOG_SEV_ERROR, CL_CPM_LOG_1_OSAL_COND_SIGNAL_ERR, rc, rc,
                   CL_LOG_HANDLE_APP);
    rc = clOsalMutexUnlock(&gpClCpm->cpmGmsMutex);
    CL_CPM_CHECK_1(CL_LOG_SEV_ERROR, CL_CPM_LOG_1_OSAL_MUTEX_UNLOCK_ERR, rc, rc,
                   CL_LOG_HANDLE_APP);
failure:
    return;
}
static __inline__ ClRcT clAmsEntityTriggerRecoveryListAdd(ClAmsEntityTriggerT *pEntityTrigger)
{
    clOsalMutexLock(&gClAmsEntityTriggerRecoveryCtrl.list.mutex);
    clListAddTail(&pEntityTrigger->list, 
                  &gClAmsEntityTriggerRecoveryCtrl.list.list);
    ++gClAmsEntityTriggerRecoveryCtrl.list.numElements;
    clOsalCondSignal(&gClAmsEntityTriggerRecoveryCtrl.list.cond);
    clOsalMutexUnlock(&gClAmsEntityTriggerRecoveryCtrl.list.mutex);
    return CL_OK;
}
static ClRcT clAmsEntityTriggerRecoveryThreadDelete(void)
{
    ClRcT rc = CL_OK;
    if(gClAmsEntityTriggerRecoveryCtrl.running &&
       gClAmsEntityTriggerRecoveryCtrl.task)
    {
        clOsalMutexLock(&gClAmsEntityTriggerRecoveryCtrl.list.mutex);
        gClAmsEntityTriggerRecoveryCtrl.running = CL_FALSE;
        clOsalCondSignal(&gClAmsEntityTriggerRecoveryCtrl.list.cond);
        clOsalMutexUnlock(&gClAmsEntityTriggerRecoveryCtrl.list.mutex);
        clOsalTaskJoin(gClAmsEntityTriggerRecoveryCtrl.task);
    }
    return rc;
}
Esempio n. 6
0
static void sendHashDeleteCallBack(ClCntKeyHandleT userKey,
                                   ClCntDataHandleT userData)
{
    ClRmdRecordSendT *rec = (ClRmdRecordSendT *) userData;
    CL_FUNC_ENTER();
    if (userData)
    {
        /*
         * The check on the flag is a must now since sync
         * path also invokes this code. Added as a fix to
         * Bug - 3748.
         */
        if (rec->flags & CL_RMD_CALL_ASYNC)
        {
            /*
             * cleanup for Async Info 
             */
            if (rec->recType.asyncRec.timerID)
            {
                clTimerDeleteAsync(&rec->recType.asyncRec.timerID);
            }
            
            clHeapFree(rec);
        }
        else
        {
            /* Before destroying the condition variable, make sure it is not being used in clOsalCondWait(). */
            clOsalCondSignal(&rec->recType.syncRec.syncCond);
            /*
             * cleanup for Sync Info
             */
            clOsalCondDestroy(&rec->recType.syncRec.syncCond);
        }
    }

    CL_FUNC_EXIT();
}
ClRcT
clLogUtilLibFinalize(ClBoolT  logLibInit)
{
    ClRcT  rc = CL_OK;
    /* Notify the thread to stop */
    rc = clOsalMutexLock(&gLogMutex);
    if( CL_OK != rc )
    {
        return rc;
    }
    gUtilLibInitialized = CL_FALSE;
    clLogDebugFilterFinalize();
    /* 
     * signalling to that guy to wake up
     */
    clOsalCondSignal(&gLogCond);
    clOsalMutexUnlock(&gLogMutex);
    /* Wait till that thread finishes its own job */
    clOsalTaskJoin(taskId);

    /*
     * This is hardcoded, have to find out the best way to solve this problem
     * once all the data have been flushed, log library will be finalized.
     */
    if( CL_TRUE == logLibInit )
    {	
    	clLogFinalize(1);
    }
    /*
     * just destroying provided, all initialed libraried will be
     * finalized by main EO function. 
     */
    clOsalCondDestroy(&gLogCond);
    clOsalMutexDestroy(&gLogMutex);
    return CL_OK;
}
/*
 * This function will be called either from GMS track callback or 
 * IOC notification, it receives the master address & deputy and 
 * process the same
 */
ClRcT
clCkptMasterAddressUpdate(ClIocNodeAddressT  leader, 
                          ClIocNodeAddressT  deputy)
{
    ClIocTLInfoT tlInfo    = {0};
    SaNameT      name      = {0};
    ClRcT        rc        = CL_OK;
    ClBoolT      updateReq = CL_FALSE;

    /*
     * Check whether master or deputy address has changed.
     */
    if(gCkptSvr->masterInfo.masterAddr != leader)
    {
        /*
         * Master address changed.
         */
        updateReq = CL_TRUE;
        gCkptSvr->masterInfo.prevMasterAddr = gCkptSvr->masterInfo.masterAddr;    
        gCkptSvr->masterInfo.masterAddr     = leader;    
        
        /*
         * Deregister the old TL entry.
         */
        if(gCkptSvr->masterInfo.compId != CL_CKPT_UNINIT_VALUE)
        {
            rc = clIocTransparencyDeregister(gCkptSvr->masterInfo.compId);
        }
        else
        {
            clCpmComponentNameGet(gCkptSvr->amfHdl, &name);
            clCpmComponentIdGet(gCkptSvr->amfHdl, &name, 
                                &gCkptSvr->compId);
            gCkptSvr->masterInfo.compId = gCkptSvr->compId;
        }

        /*
         * Update the TL.
         */
        if(gCkptSvr->masterInfo.masterAddr == clIocLocalAddressGet())
        {
            ckptOwnLogicalAddressGet(&tlInfo.logicalAddr);
            tlInfo.compId                   = gCkptSvr->compId;
            gCkptSvr->masterInfo.compId     = gCkptSvr->compId;
            tlInfo.contextType              = CL_IOC_TL_GLOBAL_SCOPE;
            tlInfo.physicalAddr.nodeAddress = clIocLocalAddressGet();
            tlInfo.physicalAddr.portId      = CL_IOC_CKPT_PORT;
            tlInfo.haState                  = CL_IOC_TL_ACTIVE;
            rc = clIocTransparencyRegister(&tlInfo);
        }
        /*
         * update the ioc notify callbacks for the new master address 
         * Once address update is over, then we have uninstall the registered
         * callback and reregistered to new master address
         */
        clCkptIocCallbackUpdate();
    }
    
    if(gCkptSvr->masterInfo.deputyAddr != deputy)
    {
        /*
         * Deputy address has changed.
         */
        updateReq = CL_TRUE;
        gCkptSvr->masterInfo.deputyAddr = deputy ;    
    }

    /*
     * Signal the receipt of master and deputy addresses. 
     */
    clOsalMutexLock(gCkptSvr->mutexVar);
    if(gCkptSvr->condVarWaiting == CL_TRUE)
    {
        gCkptSvr->condVarWaiting = CL_FALSE;
        clOsalCondSignal(gCkptSvr->condVar);
    }
    clOsalMutexUnlock(gCkptSvr->mutexVar);

    /* 
     * Update the old master(if existing) with the new leader addresses.
     */
    if((updateReq == CL_TRUE) && 
       (((ClInt32T) gCkptSvr->masterInfo.prevMasterAddr != -1) &&
       (gCkptSvr->masterInfo.prevMasterAddr != CL_CKPT_UNINIT_ADDR)))
    {
        rc = ckptIdlHandleUpdate(gCkptSvr->masterInfo.prevMasterAddr,
                                 gCkptSvr->ckptIdlHdl,0);
        rc = VDECL_VER(clCkptLeaderAddrUpdateClientAsync, 4, 0, 0)(gCkptSvr->ckptIdlHdl,
                                    gCkptSvr->masterInfo.masterAddr,
                                    gCkptSvr->masterInfo.deputyAddr,
                                    NULL,0);
    }
    clLogNotice("ADDR", "UPDATE", "CKPT master [%d], deputy [%d]",
                gCkptSvr->masterInfo.masterAddr, gCkptSvr->masterInfo.deputyAddr);
    return rc;
}
static ClRcT
logVWriteDeferred(ClHandleT       handle,
                  ClLogSeverityT  severity,
                  ClUint16T       serviceId,
                  ClUint16T       msgId,
                  ClCharT         *pMsgHeader,
                  ClBoolT         deferred,
                  ClCharT         *pFmtStr,
                  va_list         vaargs)
{
    ClRcT    rc     = CL_OK;
    ClBoolT signalFlusher = CL_FALSE;
    static ClBoolT deferredFlag = CL_FALSE;
    static ClBoolT flushPending = CL_TRUE;
    ClBoolT initialRecord = CL_FALSE;
    ClBoolT unlock = CL_TRUE;

    if( gUtilLibInitialized == CL_FALSE )
    {
        /*
         * Since log util lib/EO itself is not initialized,
         * we can safely assume being single threaded at this point
         * and just save the log record for a later flush.
         */
        initialRecord = CL_TRUE;
        goto out_store;
    }

    /* Take the mutex */
    rc = clOsalMutexLock(&gLogMutex);
    if(CL_GET_ERROR_CODE(rc) == CL_ERR_INUSE)
    {
        /*
         * Same thread trying to lock as a potential log write loop.
         * We avoid taking the lock here.
         */
        unlock = CL_FALSE;
        rc = CL_OK;
    }

    if( CL_OK != rc )
    {
        fprintf(stderr, "failed to get the lock [0x%x]", rc);
        goto failure;
    }

    if(gClLogServer)
        deferred = CL_TRUE;

    /*
     * If log is up and there are no pending flushes, write directly
     * to avoid garbled logs because of per client deferred writes
     * We go the deferred way for log server writes whenever they are enabled/fired
     */
    if(!deferred && CL_LOG_HANDLE_APP != CL_HANDLE_INVALID_VALUE)
    {
        if(flushPending && unlock)
        {
            clLogFlushRecords();
            flushPending = CL_FALSE;
        }
        if(unlock)
            clOsalMutexUnlock(&gLogMutex);
        return clLogVWriteAsyncWithHeader(handle, severity, serviceId, msgId, pMsgHeader, vaargs);
    }

    if(!unlock)
    {
        /*
         * Skip the recursive deferred log here.
         */
        return CL_OK;
    }

    /* Access the index */
    out_store:
    gLogMsgArray[writeIdx].handle    = handle;
    gLogMsgArray[writeIdx].severity  = severity;
    gLogMsgArray[writeIdx].serviceId = serviceId;
    gLogMsgArray[writeIdx].msgId     = msgId;
    gLogMsgArray[writeIdx].msgHeader[0] = 0;
    if(pMsgHeader)
    {
        memset(gLogMsgArray[writeIdx].msgHeader, 0, sizeof(gLogMsgArray[writeIdx].msgHeader));
        strncpy(gLogMsgArray[writeIdx].msgHeader, pMsgHeader,
                sizeof(gLogMsgArray[writeIdx].msgHeader)-1);
    }
    vsnprintf(gLogMsgArray[writeIdx].msg, CL_LOG_MAX_MSG_LEN, pFmtStr, vaargs);
    ++writeIdx;
    writeIdx = writeIdx % CL_LOG_MAX_NUM_MSGS;
    if( overWriteFlag ) 
    {
        ++readIdx;
        readIdx %= CL_LOG_MAX_NUM_MSGS;
    }
    if( (readIdx == writeIdx) && (overWriteFlag == 0) )
    {
        overWriteFlag = 1;
    }
    
    if(initialRecord)
        return CL_OK;

    if(deferred) 
    {
        if(!deferredFlag)
            deferredFlag = CL_TRUE;
        if(!flushPending)
            flushPending = CL_TRUE;
    }

    if(deferredFlag 
       && 
       (signalFlusher || overWriteFlag != 0 || (writeIdx % CL_LOG_FLUSH_FREQ == 0)) )
    {
        clOsalCondSignal(&gLogCond);
    }
    
    if(unlock)
    {
        rc = clOsalMutexUnlock(&gLogMutex);
        if( CL_OK != rc )
        {
            goto failure;
        }
    }

    return CL_OK;

    failure:
    return rc;
}