ClRcT _clTxnAgentAppendActiveJob(CL_IN ClBufferHandleT outMsg) { ClRcT rc = CL_OK; ClTxnDefnT *pTxnDefn; ClCntNodeHandleT currTxnNode; ClCharT pOutMsg[1024]; CL_FUNC_ENTER(); rc = clCntFirstNodeGet(clTxnAgntCfg->activeTxnMap, &currTxnNode); while (rc == CL_OK) { ClCntNodeHandleT nodeTxn; ClCntNodeHandleT currJobNode; ClTxnAppJobDefnT *pNewTxnJob; /* Retrieve data-node for node */ rc = clCntNodeUserDataGet(clTxnAgntCfg->activeTxnMap, currTxnNode, (ClCntDataHandleT *)&pTxnDefn); if(CL_OK != rc) { clLogError("AGT", "CLI", "User data get failed with error [0x%x]", rc); return rc; } rc = clCntFirstNodeGet(pTxnDefn->jobList, &currJobNode); while (rc == CL_OK) { ClCntNodeHandleT nodeJob; /* Retrieve data-node for node */ rc = clCntNodeUserDataGet(pTxnDefn->jobList, currJobNode, (ClCntDataHandleT *)&pNewTxnJob); pOutMsg[0] = '\0'; sprintf(pOutMsg, " 0x%x:0x%x\t| 0x%x\t| %30s \n", pTxnDefn->serverTxnId.txnMgrNodeAddress, pTxnDefn->serverTxnId.txnId, pNewTxnJob->jobId.jobId, gCliTxnStatusStr[pNewTxnJob->currentState]); clXdrMarshallArrayClUint8T(&pOutMsg, strlen((char*)pOutMsg), outMsg, 0); nodeJob = currJobNode; rc = clCntNextNodeGet(pTxnDefn->jobList, nodeJob, &currJobNode); } nodeTxn = currTxnNode; rc = clCntNextNodeGet(clTxnAgntCfg->activeTxnMap, nodeTxn, &currTxnNode); } if(CL_GET_ERROR_CODE(rc) == CL_ERR_NOT_EXIST) rc = CL_OK; CL_FUNC_EXIT(); return(rc); }
void clMsgQueueEmpty(ClMsgQueueInfoT *pQInfo) { ClRcT rc; ClCntNodeHandleT nodeHandle = NULL, nextNodeHandle = NULL; ClUint32T i; ClRcT retCode; for(i = 0; i < CL_MSG_QUEUE_PRIORITIES; i++) { rc = clCntFirstNodeGet(pQInfo->pPriorityContainer[i], &nodeHandle); if(CL_GET_ERROR_CODE(rc) == CL_ERR_NOT_EXIST) continue; do { rc = clCntNextNodeGet(pQInfo->pPriorityContainer[i], nodeHandle, &nextNodeHandle); if(CL_GET_ERROR_CODE(rc) != CL_ERR_NOT_EXIST && rc != CL_OK) { clLogError("QUE", "EMT", "Failed to get the next node from container. error code [0x%x].", rc); break; } retCode = clCntNodeDelete(pQInfo->pPriorityContainer[i], nodeHandle); if(retCode != CL_OK) clLogError("QUE", "EMT", "Failed to delete a node from container. error code [0x%x].", retCode); nodeHandle = nextNodeHandle; }while(CL_GET_ERROR_CODE(rc) != CL_ERR_NOT_EXIST); } }
ClRcT _clTxnAgentAppendServices(CL_IN ClBufferHandleT outMsg) { ClRcT rc = CL_OK; ClTxnAgentCompServiceInfoT *pCompInstance; ClCntNodeHandleT currNode; ClCharT pOutMsg[1024]; CL_FUNC_ENTER(); rc = clCntFirstNodeGet(clTxnAgntCfg->compServiceMap, &currNode); while (rc == CL_OK) { ClCntNodeHandleT node; /* Retrieve data-node for node */ rc = clCntNodeUserDataGet(clTxnAgntCfg->compServiceMap, currNode, (ClCntDataHandleT *)&pCompInstance); if(CL_OK != rc) { clLogError("AGT", "CLI", "User data get failed with error [0x%x]", rc); return rc; } pOutMsg[0] = '\0'; if(pCompInstance->serviceCapability == CL_TXN_AGENT_SERVICE_2PC) { sprintf(pOutMsg, " %6d\t| 2PC Capable \n", pCompInstance->serviceType); } else if(pCompInstance->serviceCapability == CL_TXN_AGENT_SERVICE_1PC) { sprintf(pOutMsg, " %6d\t| 1PC Capable \n", pCompInstance->serviceType); } clXdrMarshallArrayClUint8T(&pOutMsg, strlen((char*)pOutMsg), outMsg, 0); node = currNode; rc = clCntNextNodeGet(clTxnAgntCfg->compServiceMap, node, &currNode); } if(CL_GET_ERROR_CODE(rc) == CL_ERR_NOT_EXIST) rc = CL_OK; CL_FUNC_EXIT(); return(rc); }
/* Cleanup Functions */ void clCorMoIdToNodeNameTablesCleanUp(void) { ClCntNodeHandleT nodeH = 0; ClCntNodeHandleT nextNodeH = 0; clCntFirstNodeGet(nodeNameToMoIdTableHandle, &nodeH); while(nodeH) { clCntNextNodeGet(nodeNameToMoIdTableHandle, nodeH, &nextNodeH); clCntNodeDelete(nodeNameToMoIdTableHandle, nodeH); nodeH = nextNodeH; } nodeH = 0; clCntFirstNodeGet(moIdToNodeNameTableHandle, &nodeH); while(nodeH) { clCntNextNodeGet(moIdToNodeNameTableHandle, nodeH, &nextNodeH); clCntNodeDelete(moIdToNodeNameTableHandle, nodeH); nodeH = nextNodeH; } }
ClUint32T cpmNodeFindByNodeId(ClUint32T nodeId, ClCpmLT **cpmL) { ClRcT rc = CL_OK; ClCntNodeHandleT cpmNode = 0; ClUint32T cpmLCount = 0; ClCpmLT *tempCpmL = NULL; ClUint32T found = 0; cpmLCount = gpClCpm->noOfCpm; if (gpClCpm->pCpmConfig->cpmType == CL_CPM_GLOBAL && cpmLCount != 0) { rc = clCntFirstNodeGet(gpClCpm->cpmTable, &cpmNode); CL_CPM_CHECK_2(CL_LOG_SEV_ERROR, CL_CPM_LOG_2_CNT_FIRST_NODE_GET_ERR, "CPM-L", rc, rc, CL_LOG_HANDLE_APP); while (cpmLCount) { rc = clCntNodeUserDataGet(gpClCpm->cpmTable, cpmNode, (ClCntDataHandleT *) &tempCpmL); CL_CPM_CHECK_1(CL_LOG_SEV_ERROR, CL_CPM_LOG_1_CNT_NODE_USR_DATA_GET_ERR, rc, rc, CL_LOG_HANDLE_APP); if (tempCpmL->pCpmLocalInfo) { if ((ClUint32T) tempCpmL->pCpmLocalInfo->nodeId == nodeId) { *cpmL = tempCpmL; found = 1; break; } } cpmLCount--; if (cpmLCount) { rc = clCntNextNodeGet(gpClCpm->cpmTable, cpmNode, &cpmNode); CL_CPM_CHECK_2(CL_LOG_SEV_ERROR, CL_CPM_LOG_2_CNT_NEXT_NODE_GET_ERR, "CPM-L", rc, rc, CL_LOG_HANDLE_APP); } } } if(found == 1) return CL_OK; else return CL_CPM_RC(CL_ERR_DOESNT_EXIST); failure: *cpmL = NULL; return rc; }
ClRcT clCntAllNodesDelete(ClCntHandleT containerHandle) { CclContainer_t *pContainer = NULL; ClCntNodeHandleT containerNode = CL_HANDLE_INVALID_VALUE; ClCntNodeHandleT nextContainerNode = CL_HANDLE_INVALID_VALUE; ClRcT errorCode = CL_OK; pContainer = (CclContainer_t *) containerHandle; nullChkRet(pContainer); if(pContainer->validContainer != CONTAINER_ID) { returnCntError(CL_ERR_INVALID_HANDLE, "Passed container handle is invalid"); } errorCode = clCntFirstNodeGet(containerHandle, &containerNode); if(CL_OK != errorCode) { /* * Check if container is empty. */ if(CL_GET_ERROR_CODE(errorCode) == CL_ERR_NOT_EXIST) return CL_OK; return errorCode; } while(containerNode) { if(clCntNextNodeGet(containerHandle, containerNode, &nextContainerNode) != CL_OK) { nextContainerNode = 0; } errorCode = clCntNodeDelete(containerHandle, containerNode); if(errorCode != CL_OK) { return(errorCode); } containerNode = nextContainerNode; } return (CL_OK); }
ClRcT _clGmsDbGetFirst( CL_IN const ClGmsDbT* const gmsDb, CL_IN const ClGmsDbTypeT type, CL_OUT ClCntNodeHandleT** const gmsOpaque, CL_INOUT void** const data) { ClRcT rc = CL_OK; /* Get the first node which is an internal pointer * to the container data structure. */ if ((gmsDb == (const void *)NULL) || (data == NULL) || (gmsOpaque == NULL)) { return CL_ERR_NULL_POINTER; } rc = clCntFirstNodeGet((ClCntHandleT) gmsDb->htbl[type], (ClCntNodeHandleT*) gmsOpaque); if (rc == CL_CNT_RC(CL_ERR_NOT_EXIST)) { *data = NULL; return CL_OK; } if (rc != CL_OK) return rc; /* Get the real data */ rc = clCntNodeUserDataGet((ClCntHandleT) gmsDb->htbl[type], (ClCntNodeHandleT)*gmsOpaque, (ClCntDataHandleT*)(data)); if (rc == CL_CNT_RC(CL_ERR_NOT_EXIST)) { *data = NULL; return CL_OK; } return rc; }
static ClRcT __clCntWalk(ClCntHandleT containerHandle, ClCntWalkCallbackT fpUserWalkCallback, ClCntArgHandleT userDataArg, ClInt32T dataLength, ClBoolT failSafe) { CclContainer_t *pContainer = NULL; ClCntNodeHandleT containerNode = CL_HANDLE_INVALID_VALUE; ClCntNodeHandleT nextContainerNode = CL_HANDLE_INVALID_VALUE; ClCntDataHandleT userData = CL_HANDLE_INVALID_VALUE; ClCntKeyHandleT userKey = CL_HANDLE_INVALID_VALUE; BaseLinkedListNode_t *pTemp = NULL; ClRcT errorCode = CL_OK; nullChkRet(fpUserWalkCallback); pContainer = (CclContainer_t *) containerHandle; nullChkRet(pContainer); if(pContainer->validContainer != CONTAINER_ID) { returnCntError(CL_ERR_INVALID_HANDLE, "Passed container handle is invalid"); } errorCode = clCntFirstNodeGet (containerHandle, &containerNode); if(errorCode != CL_OK) { if((CL_GET_ERROR_CODE(errorCode)) == CL_ERR_NOT_EXIST) { /* If the container is empty it means we dont have to walk anymore. * Hence return OK. */ return (CL_OK); } return(errorCode); } while (containerNode) { pTemp = (BaseLinkedListNode_t*) containerNode; if(clCntNextNodeGet(containerHandle, containerNode, &nextContainerNode) != CL_OK) { nextContainerNode = 0; } if ((pTemp->pRbeExpression == NULL) || (clRuleExprEvaluate(pTemp->pRbeExpression, (ClUint32T*) userDataArg, dataLength))) { errorCode = clCntNodeUserKeyGet (containerHandle, containerNode, &userKey); errorCode = clCntNodeUserDataGet (containerHandle, containerNode, &userData); errorCode = fpUserWalkCallback(userKey, userData, userDataArg,dataLength); if(!failSafe && (CL_OK != errorCode)) { return (errorCode); } } containerNode = nextContainerNode; } return(CL_OK); }
ClRcT _cpmClusterConfigList(ClInt32T argc, ClCharT **retStr) { ClCpmLT *cpmL = NULL; ClRcT rc = CL_OK; ClCntNodeHandleT hNode = 0; ClUint32T cpmLCount = 0; ClCharT tempStr[256]; ClCharT *tmpStr = NULL; ClBufferHandleT message; rc = clBufferCreate(&message); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to create message \n"), rc); if (argc != ONE_ARGUMENT) { sprintf(tempStr, "Usage: clusterList"); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to write message %x\n", rc), rc); goto done; } /* * Print the local stuff first */ sprintf(tempStr, "%s\n", "nodeName | status | iocAddress | iocPort "); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); sprintf(tempStr, "%s\n", "-----------------------------------------------------------------------"); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); /* * Now even the CPM/G information is stored in the list. * So no need of printing the above information. */ #if 0 if (gpClCpm->pCpmLocalInfo->status == CL_CPM_EO_DEAD) sprintf(tempStr, "%10s | DEAD | %8d | 0x%x\n", gpClCpm->pCpmLocalInfo->nodeName, gpClCpm->pCpmLocalInfo->cpmAddress.nodeAddress, gpClCpm->pCpmLocalInfo->cpmAddress.portId); else sprintf(tempStr, "%10s | ALIVE | %8d | 0x%x\n", gpClCpm->pCpmLocalInfo->nodeName, gpClCpm->pCpmLocalInfo->cpmAddress.nodeAddress, gpClCpm->pCpmLocalInfo->cpmAddress.portId); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); #endif /*_cpmListAllSU();*/ /* * Get all the CPMs one by one and delete the stuff. */ rc = clCntFirstNodeGet(gpClCpm->cpmTable, &hNode); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to get first cpmTable Node %x\n", rc), rc); rc = clCntNodeUserDataGet(gpClCpm->cpmTable, hNode, (ClCntDataHandleT *) &cpmL); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to get container Node data %x\n", rc), rc); cpmLCount = gpClCpm->noOfCpm; while (cpmLCount) { if (cpmL->pCpmLocalInfo != NULL) { if (cpmL->pCpmLocalInfo->status == CL_CPM_EO_DEAD) { sprintf(tempStr, "%10s | DEAD | %8d | 0x%x\n", cpmL->nodeName, cpmL->pCpmLocalInfo->cpmAddress.nodeAddress, cpmL->pCpmLocalInfo->cpmAddress.portId); } else { sprintf(tempStr, "%10s | ALIVE | %8d | 0x%x\n", cpmL->nodeName, cpmL->pCpmLocalInfo->cpmAddress.nodeAddress, cpmL->pCpmLocalInfo->cpmAddress.portId); } rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); } else { sprintf(tempStr, "%10s \n", cpmL->nodeName); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); } #if 0 rc = clCntFirstNodeGet(cpmL->suTable, &hSU); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to get first su Node in cpmL %x\n", rc), rc); if (cpmL->pCpmLocalInfo != NULL) clOsalPrintf("%10s | %8d | %8d | 0x%8x| 0x%8x \n", cpmL->pCpmLocalInfo->nodeName, cpmL->pCpmLocalInfo->status, cpmL->pCpmLocalInfo->nodeId, cpmL->pCpmLocalInfo->cpmAddress.nodeAddress, cpmL->pCpmLocalInfo->cpmAddress.portId); else clOsalPrintf("%10s \n", cpmL->nodeName); suCount = cpmL->noOfsu; while (suCount) { rc = clCntNodeUserDataGet(cpmL->suTable, hSU, (ClCntDataHandleT *) &su); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to get first container su Node data %x\n", rc), rc); clOsalPrintf("\t %10s |%15s |%11s |%16s \n", su->suName, _cpmPresenceStateNameGet(su->suPresenceState), _cpmOperStateNameGet(su->suOperState), _cpmReadinessStateNameGet(su->suReadinessState)); tempCompRef = su->suCompList; while (tempCompRef != NULL) { clOsalPrintf ("-----------------------------------------------------------------------\n"); clOsalPrintf("\t\t%10s %14d |%15s |%11s |%16s \n", tempCompRef->ref->compConfig->compName, tempCompRef->ref->compRestartCount, _cpmPresenceStateNameGet(tempCompRef->ref-> compPresenceState), _cpmOperStateNameGet(tempCompRef->ref-> compOperState), _cpmReadinessStateNameGet(tempCompRef->ref-> compReadinessState)); tempCompRef = tempCompRef->pNext; } suCount--; if (suCount) { rc = clCntNextNodeGet(cpmL->suTable, hSU, &hSU); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to Get Node Data \n"), rc); } } #endif sprintf(tempStr, "%s", "-----------------------------------------------------------------------\n"); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); cpmLCount--; if (cpmLCount) { rc = clCntNextNodeGet(gpClCpm->cpmTable, hNode, &hNode); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to Get Node Data \n"), rc); rc = clCntNodeUserDataGet(gpClCpm->cpmTable, hNode, (ClCntDataHandleT *) &cpmL); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to get container Node data %d\n", rc), rc); } } /* * Bug 4986 : * Moved the code to NULL terminate the string * below the done: label, so that the usage string * written to the buffer is also NULL terminated. */ done: /* * NULL terminate the string */ sprintf(tempStr, "%s", "\0"); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, 1); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); rc = clBufferFlatten(message, (ClUint8T **) &tmpStr); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to flatten message \n"), rc); *retStr = tmpStr; clBufferDelete(&message); return rc; failure: clBufferDelete(&message); return rc; }
ClRcT _clCpmComponentListAll(ClInt32T argc, ClCharT **retStr) { ClCntNodeHandleT hNode = 0; ClCpmComponentT *comp = NULL; ClUint32T rc = CL_OK, count; ClCpmEOListNodeT *eoList = NULL; ClCharT state[10] = "\0"; ClCharT status[10] = "\0"; ClCharT tempStr[256]; ClCharT *tmpStr = NULL; ClBufferHandleT message; ClCharT cpmCompName[CL_MAX_NAME_LENGTH] = {0}; rc = clBufferCreate(&message); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to create message %x\n", rc), rc); if (argc != ONE_ARGUMENT) { rc = clBufferNBytesWrite(message, (ClUint8T *) STR_AND_SIZE("Usage: compList")); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to write message %x\n", rc), rc); rc = CL_CPM_RC(CL_ERR_INVALID_PARAMETER); goto done; } count = gpClCpm->noOfComponent; snprintf(cpmCompName, CL_MAX_NAME_LENGTH-1, "%s_%s", CL_CPM_COMPONENT_NAME, gpClCpm->pCpmLocalInfo->nodeName); rc = clCntFirstNodeGet(gpClCpm->compTable, &hNode); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to Get First component \n"), rc); rc = clBufferNBytesWrite(message, (ClUint8T *) STR_AND_SIZE("################### List Of Components ########################\n")); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); rc = clCntNodeUserDataGet(gpClCpm->compTable, hNode,(ClCntDataHandleT *) &comp); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to Get Node Data \n"), rc); rc = clBufferNBytesWrite(message, STR_AND_SIZE(" CompName | compId | eoPort | PID | RestartCount | PresenceState\n")); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); rc = clBufferNBytesWrite(message, STR_AND_SIZE("\t\t ID | Port | Name | Health |Recv Threads \n")); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); rc = clBufferNBytesWrite(message, STR_AND_SIZE("========================================================================================\n")); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); while (count) { if (strcmp(comp->compConfig->compName, cpmCompName)!=0) /* Skip if it is the CPM component */ { eoList = comp->eoHandle; int len = sprintf(tempStr, "%30s| 0x%x | 0x%x |%8d |%14d |%15s\n", comp->compConfig->compName, comp->compId, comp->eoPort, comp->processId, comp->compRestartCount, _cpmPresenceStateNameGet(comp->compPresenceState)); if (comp->compConfig->compProperty != CL_AMS_COMP_PROPERTY_SA_AWARE) { if(!eoList || !eoList->eoptr) { len += snprintf(tempStr + len, sizeof(tempStr) - len, "\t\t 0x%x | 0x%x |%10s |%10s |%04d \n", 0, 0, "-", "-", 0); } } rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr,len); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); while (eoList != NULL && eoList->eoptr != NULL) { compMgrStateStatusGet(eoList->status, eoList->eoptr->state, status, sizeof(status), state, sizeof(state)); int noOfThreads = (eoList->eoptr->appType == CL_EO_USE_THREAD_FOR_RECV) ? eoList->eoptr->noOfThreads + 1: eoList->eoptr->noOfThreads; int len = sprintf(tempStr, "\t\t 0x%llx | 0x%x |%10s |%10s |%04d \n", eoList->eoptr->eoID, eoList->eoptr->eoPort, eoList->eoptr->name, status, noOfThreads); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, len); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); eoList = eoList->pNext; } rc = clBufferNBytesWrite(message, STR_AND_SIZE("-----------------------------------------------------------------------------------------\n")); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); } count--; if (count) { rc = clCntNextNodeGet(gpClCpm->compTable, hNode, &hNode); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to Get Node Data %x\n", rc), rc); rc = clCntNodeUserDataGet(gpClCpm->compTable, hNode, (ClCntDataHandleT *) &comp); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to Get Node user Data %x\n", rc), rc); } } /* * Bug 4986 : * Moved the code to NULL terminate the string * below the done: label, so that the usage string * written to the buffer is also NULL terminated. */ done: /* * NULL terminate the string */ rc = clBufferNBytesWrite(message, (ClUint8T *) "\0", 1); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); rc = clBufferFlatten(message, (ClUint8T **) &tmpStr); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to flatten the message \n"), rc); *retStr = tmpStr; clBufferDelete(&message); return rc; failure: clBufferDelete(&message); return rc; }
ClRcT clCpmExecutionObjectListShow(ClInt32T argc, ClIocNodeAddressT compAddr, ClUint32T flag, ClEoIdT eoId, ClCharT **retStr) { /* * ClCpmEOListNodeT* ptr = gpClCpm->eoList; */ ClCpmEOListNodeT *ptr = NULL; ClCharT name[32] = "\0"; ClCharT state[10] = "\0"; ClCharT status[10] = "\0"; ClUint32T compCount = 0; ClCntNodeHandleT hNode = 0; ClCpmComponentT *comp = NULL; ClRcT rc = CL_OK; ClCharT tempStr[256]; ClCharT *tmpStr = NULL; ClBufferHandleT message = 0; rc = clBufferCreate(&message); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to create message %x\n", rc), rc); if (argc != ONE_ARGUMENT) { sprintf(tempStr, "Usage: EOShow"); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to write message %x\n", rc), rc); goto done; } sprintf(tempStr, "\n ID | Port | Name | Health | EO State | Recv Threads "); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to write message %x\n", rc), rc); sprintf(tempStr, "\n ===================================================================== "); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to write message %x\n", rc), rc); /* * take the semaphore */ if ((rc = clOsalMutexLock(gpClCpm->eoListMutex)) != CL_OK) CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Could not get Lock successfully------\n")); rc = clCntFirstNodeGet(gpClCpm->compTable, &hNode); CL_CPM_LOCK_CHECK(CL_DEBUG_ERROR, ("Unable to Get First component \n"), rc); rc = clCntNodeUserDataGet(gpClCpm->compTable, hNode, (ClCntDataHandleT *) &comp); CL_CPM_LOCK_CHECK(CL_DEBUG_ERROR, ("Unable to Get Node Data \n"), rc); compCount = gpClCpm->noOfComponent; while (compCount != 0) { ptr = comp->eoHandle; if (flag == 0) { while (ptr != NULL && ptr->eoptr != NULL) { strcpy(name, ptr->eoptr->name); /* * Obtain the state and status in string format */ compMgrStateStatusGet(ptr->status, ptr->eoptr->state, status, sizeof(status), state, sizeof(state)); if (ptr->eoptr->appType == CL_EO_USE_THREAD_FOR_RECV) sprintf(tempStr, "\n 0x%llx| 0x%x | %10s | %10s | %10s | %04d ", ptr->eoptr->eoID, ptr->eoptr->eoPort, name, status, state, (ptr->eoptr->noOfThreads + 1)); else sprintf(tempStr, "\n 0x%llx| 0x%x | %10s | %10s | %10s | %04d ", ptr->eoptr->eoID, ptr->eoptr->eoPort, name, status, state, ptr->eoptr->noOfThreads); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_LOCK_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); ptr = ptr->pNext; } } else { while (ptr != NULL && ptr->eoptr != NULL) { if (ptr->eoptr->eoID == eoId) { /* * obtain the state and status in string format */ compMgrStateStatusGet(ptr->status, ptr->eoptr->state, status, sizeof(status), state, sizeof(state)); strcpy(name, ptr->eoptr->name); if (ptr->eoptr->appType == CL_EO_USE_THREAD_FOR_RECV) sprintf(tempStr, "\n 0x%llx| 0x%x | %10s | %10s | %10s | %04d | ", ptr->eoptr->eoID, ptr->eoptr->eoPort, name, status, state, ptr->eoptr->noOfThreads + 1); else sprintf(tempStr, "\n 0x%llx| 0x%x | %10s | %10s | %10s | %04d | ", ptr->eoptr->eoID, ptr->eoptr->eoPort, name, status, state, ptr->eoptr->noOfThreads); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, strlen(tempStr)); CL_CPM_LOCK_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); break; } ptr = ptr->pNext; } #if 0 if (ptr == NULL) CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("EOID not found\n")); #endif } compCount--; if (compCount) { rc = clCntNextNodeGet(gpClCpm->compTable, hNode, &hNode); CL_CPM_LOCK_CHECK(CL_DEBUG_ERROR, ("\n Unable to Get Node Data \n"), rc); rc = clCntNodeUserDataGet(gpClCpm->compTable, hNode, (ClCntDataHandleT *) &comp); CL_CPM_LOCK_CHECK(CL_DEBUG_ERROR, ("\n Unable to Get Node Data \n"), rc); } } /* * Release the semaphore */ rc = clOsalMutexUnlock(gpClCpm->eoListMutex); CL_CPM_CHECK(CL_DEBUG_ERROR, ("COMP_MGR: Could not UnLock successfully------\n"), rc); /* * Bug 4986 : * Moved the code to NULL terminate the string * below the done: label, so that the usage string * written to the buffer is also NULL terminated. */ done: /* * NULL terminate the string */ sprintf(tempStr, "%s", "\0"); rc = clBufferNBytesWrite(message, (ClUint8T *) tempStr, 1); CL_CPM_CHECK(CL_DEBUG_ERROR, ("\n Unable to write message \n"), rc); /* * Construct the return buffer */ rc = clBufferFlatten(message, (ClUint8T **) &tmpStr); CL_CPM_CHECK(CL_DEBUG_ERROR, ("Unable to flatten the message \n"), rc); *retStr = tmpStr; clBufferDelete(&message); return (CL_OK); withlock: /* * Release the semaphore */ rc = clOsalMutexUnlock(gpClCpm->eoListMutex); CL_CPM_CHECK(CL_DEBUG_ERROR, ("COMP_MGR: Could not UnLock successfully------\n"), rc); failure: clBufferDelete(&message); return rc; }
ClRcT cpmInvocationClearCompInvocation(SaNameT *compName) { ClRcT rc = CL_OK; ClCntNodeHandleT nodeHandle, nextNodeHandle; ClCpmInvocationT *invocationData = NULL; void *data = NULL; /* * Check the input parameter */ if (!compName) { CL_CPM_CHECK(CL_LOG_SEV_ERROR, ("Invalid parameter passed \n"), CL_CPM_RC(CL_ERR_NULL_POINTER)); } clOsalMutexLock(gpClCpm->invocationMutex); rc = clCntFirstNodeGet(gpClCpm->invocationTable, &nodeHandle); if (rc != CL_OK) goto withLock; while (nodeHandle) { rc = clCntNodeUserDataGet(gpClCpm->invocationTable, nodeHandle, (ClCntDataHandleT *) &invocationData); if (rc != CL_OK) goto withLock; rc = clCntNextNodeGet(gpClCpm->invocationTable, nodeHandle, &nextNodeHandle); if((data = invocationData->data)) { ClUint32T matched = 0; if ((invocationData->flags & CL_CPM_INVOCATION_AMS)) { matched = !strncmp((const ClCharT *) (((ClAmsInvocationT*) data)->compName.value), (const ClCharT *) compName->value, ((ClAmsInvocationT*) data)->compName.length); } else if ((invocationData->flags & CL_CPM_INVOCATION_CPM)) { matched = !strncmp((const ClCharT *) (((ClCpmComponentT*) data)->compConfig->compName), (const ClCharT *) compName->value, compName->length); } if(matched) { clLogDebug("INVOCATION", "CLEAR", "Clearing invocation for component [%.*s] " "invocation [%#llx]", compName->length, compName->value, invocationData->invocation); if (clCntNodeDelete(gpClCpm->invocationTable, nodeHandle) != CL_OK) goto withLock; if( (invocationData->flags & CL_CPM_INVOCATION_DATA_COPIED) ) clHeapFree(data); clHeapFree(invocationData); } } if (CL_GET_ERROR_CODE(rc) == CL_ERR_NOT_EXIST) break; if (rc != CL_OK) goto withLock; nodeHandle = nextNodeHandle; } clOsalMutexUnlock(gpClCpm->invocationMutex); return CL_OK; withLock: failure: clOsalMutexUnlock(gpClCpm->invocationMutex); return rc; }
ClRcT cpmPrintDBXML(FILE *fp) { ClRcT rc = CL_OK; ClCntNodeHandleT cpmNode = 0; ClUint32T cpmLCount = 0; ClCpmLT *cpmL = NULL; fprintf(fp,"<cpm>\n"); /* * Walk through the cpm table to find and display SCs */ clOsalMutexLock(gpClCpm->cpmTableMutex); cpmLCount = gpClCpm->noOfCpm; if (gpClCpm->pCpmConfig->cpmType == CL_CPM_GLOBAL && cpmLCount != 0) { rc = clCntFirstNodeGet(gpClCpm->cpmTable, &cpmNode); if(rc != CL_OK) { clLogError(CPM_LOG_AREA_DB,CPM_LOG_CTX_DB_XML,CL_CPM_LOG_2_CNT_FIRST_NODE_GET_ERR, "CPM-L", rc); clLogWrite((CL_LOG_HANDLE_APP), (CL_LOG_SEV_DEBUG), NULL, CL_CPM_LOG_2_CNT_FIRST_NODE_GET_ERR, ("CPM-L"), (rc)); goto out_unlock; } while (cpmLCount) { rc = clCntNodeUserDataGet(gpClCpm->cpmTable, cpmNode, (ClCntDataHandleT *) &cpmL); if(rc != CL_OK) { clLogError(CPM_LOG_AREA_DB,CPM_LOG_CTX_DB_XML,CL_CPM_LOG_1_CNT_NODE_USR_DATA_GET_ERR, rc); clLogWrite((CL_LOG_HANDLE_APP), (CL_LOG_SEV_DEBUG), NULL, CL_CPM_LOG_1_CNT_NODE_USR_DATA_GET_ERR, rc); goto out_unlock; } if (cpmL->pCpmLocalInfo) { /* * If the node is a SC, display its data */ if(!strcmp((const ClCharT *)cpmL->nodeType.value, (const ClCharT *)gpClCpm->pCpmLocalInfo->nodeType.value) || (ClUint32T) cpmL->pCpmLocalInfo->nodeId == gpClCpm->activeMasterNodeId || (ClUint32T) cpmL->pCpmLocalInfo->nodeId == gpClCpm->deputyNodeId) { fprintf(fp,"<node value=\"%s\">\n",cpmL->pCpmLocalInfo->nodeName); fprintf(fp,"<id value=\"%d\"/>\n",cpmL->pCpmLocalInfo->nodeId); fprintf(fp,"<ha_state value=\"%s\"/>\n",(ClUint32T) cpmL->pCpmLocalInfo->nodeId == gpClCpm->activeMasterNodeId ? "active" : "standby"); fprintf(fp,"</node>\n"); } } cpmLCount--; if (cpmLCount) { rc = clCntNextNodeGet(gpClCpm->cpmTable, cpmNode, &cpmNode); if(rc != CL_OK) { clLogError(CPM_LOG_AREA_DB,CPM_LOG_CTX_DB_XML,CL_CPM_LOG_2_CNT_NEXT_NODE_GET_ERR, "CPM-L", rc); clLogWrite((CL_LOG_HANDLE_APP), (CL_LOG_SEV_DEBUG), NULL, CL_CPM_LOG_2_CNT_NEXT_NODE_GET_ERR, ("CPM-L"), (rc)); goto out_unlock; } } } } rc = CL_OK; out_unlock: clOsalMutexUnlock(gpClCpm->cpmTableMutex); fprintf(fp,"</cpm>\n"); return rc; }
ClRcT _clCkpMastertReplicaAddressUpdate(ClHandleT mastHdl, ClIocNodeAddressT actAddr) { ClRcT rc = CL_OK; ClCntNodeHandleT nodeHdl = 0; ClCntDataHandleT dataHdl = 0; CkptMasterDBEntryT *pMasterDBEntry = NULL; ClCkptClientUpdInfoT eventInfo = {0}; ClEventIdT eventId = 0; clLogDebug(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Changing the active address of ckpt handle [%#llX]", mastHdl); /* * Retrieve the information associated with the master hdl. */ if( CL_OK != (rc = clHandleCheckout(gCkptSvr->masterInfo.masterDBHdl, mastHdl, (void **) &pMasterDBEntry))) { clLogError(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Master db entry doesn't have this handle [%#llX]", mastHdl); return rc; } /* * Delete the entry of the node that went down from the checkpoint's * replica list. */ if (pMasterDBEntry->replicaList) clCntAllNodesForKeyDelete(pMasterDBEntry->replicaList, (ClPtrT)(ClWordT)actAddr); else { clLogWarning(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN,"Replicalist for %s is empty",pMasterDBEntry->name.value); } /* * Store the node's address as prev active address. */ pMasterDBEntry->prevActiveRepAddr = actAddr; /* * Select the new active address. In case of COLLOCATED checkpoint, * the new active address to UNINIT value. */ if (CL_CKPT_IS_COLLOCATED(pMasterDBEntry->attrib.creationFlags)) { if(pMasterDBEntry->activeRepAddr == actAddr) { pMasterDBEntry->activeRepAddr = CL_CKPT_UNINIT_ADDR; } else { if(pMasterDBEntry->activeRepAddr != CL_CKPT_UNINIT_ADDR) { /* * If we have traces of the master handle in our peer list * missed by the active replica set, then remove it here. */ clLogNotice(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Active replica is [%d]." "Removing master ckpt handle [%#llX] from last active [%d]", pMasterDBEntry->activeRepAddr, mastHdl, actAddr); _ckptPeerListMasterHdlAdd(mastHdl, actAddr, CL_CKPT_UNINIT_ADDR); } goto exitOnError; } } else { rc = clCntFirstNodeGet(pMasterDBEntry->replicaList, &nodeHdl); if(CL_ERR_INVALID_HANDLE == CL_GET_ERROR_CODE(rc) || nodeHdl == 0) { rc = CL_OK; pMasterDBEntry->activeRepAddr = CL_CKPT_UNINIT_ADDR; } CKPT_ERR_CHECK(CL_CKPT_SVR,CL_LOG_SEV_ERROR, ("clCkptActiveReplicaAddrGet failed rc[0x %x]\n",rc), rc); if( nodeHdl != 0 ) { rc = clCntNodeUserDataGet(pMasterDBEntry->replicaList, nodeHdl, &dataHdl); CKPT_ERR_CHECK(CL_CKPT_SVR,CL_LOG_SEV_ERROR, ("clCkptActiveReplicaAddrGet failed rc[0x %x]\n",rc), rc); pMasterDBEntry->activeRepAddr = (ClIocNodeAddressT)(ClWordT)dataHdl; } } /* * Inform the client about the change in active replica. */ if((gCkptSvr->masterInfo.masterAddr == gCkptSvr->localAddr) || (clCpmIsSCCapable() && (pMasterDBEntry->prevActiveRepAddr == gCkptSvr->masterInfo.masterAddr))) { eventInfo.eventType = htonl(CL_CKPT_ACTIVE_REP_CHG_EVENT); eventInfo.actAddr = htonl(pMasterDBEntry->activeRepAddr); saNameCopy(&eventInfo.name, &pMasterDBEntry->name); eventInfo.name.length = htons(pMasterDBEntry->name.length); clLogNotice(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Changing the address from [%d] to [%d] for checkpoint [%.*s]", actAddr, pMasterDBEntry->activeRepAddr, pMasterDBEntry->name.length, pMasterDBEntry->name.value); rc = clEventPublish(gCkptSvr->clntUpdEvtHdl, (const void*)&eventInfo, sizeof(ClCkptClientUpdInfoT), &eventId); } /* * Delete the masterHdl from old address's peerlist and add to * new address's peerlist. */ if(!CL_CKPT_IS_COLLOCATED(pMasterDBEntry->attrib.creationFlags)) { _ckptPeerListMasterHdlAdd(mastHdl, actAddr, pMasterDBEntry->activeRepAddr); } exitOnError: { /* * Checkin the updated stuff. */ clHandleCheckin(gCkptSvr->masterInfo.masterDBHdl, mastHdl); return rc; } }
ClRcT clCkptMasterPeerUpdateNoLock(ClIocPortT portId, ClUint32T flag, ClIocNodeAddressT localAddr, ClUint8T credential) { ClRcT rc = CL_OK; CkptPeerInfoT *pPeerInfo = NULL; CkptNodeListInfoT *pPeerInfoDH = NULL; ClCntNodeHandleT nodeHdl = 0; ClCntNodeHandleT tempHdl = 0; ClHandleT *pMasterHandle = NULL; /* * Check whether node/component is coming up or going down. */ if(flag == CL_CKPT_SERVER_UP) { /* * Checkpoint server up scenario. */ clLogDebug(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_ANNOUNCE, "Received welcome message from master, updating the peerlist for [%d]", localAddr); /* Reset the replica list for peer being welcomed without knowing the peer is available or not */ if(localAddr != gCkptSvr->localAddr) { clLogNotice("PEER", "UPDATE", "Resetting the replica list for the peer [%#x] being welcomed", localAddr); clCkptMasterReplicaListUpdateNoLock(localAddr); } /* * Add an entry to the peer list if not existing. * Mark the node as "available" i.e. available for checkpoint * operations like storing replicas etc.. */ rc = clCntDataForKeyGet( gCkptSvr->masterInfo.peerList, (ClPtrT)(ClWordT)localAddr, (ClCntDataHandleT *)&pPeerInfo); if( rc == CL_OK && pPeerInfo != NULL) { CL_ASSERT(pPeerInfo->ckptList != 0); pPeerInfo->credential = credential; pPeerInfo->available = CL_CKPT_NODE_AVAIL; if(localAddr != gCkptSvr->localAddr) { pPeerInfo->replicaCount = 0; } } else { if( CL_OK !=( rc = _ckptMasterPeerListInfoCreate(localAddr, credential,0))) { return rc; } } } else { /* * Node/component down scenario. */ clLogDebug(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Updating the peerAddr [%d] for down notification", localAddr); /* * Find the corresponding entry from the peer list. */ if( CL_OK != (rc = clCntDataForKeyGet(gCkptSvr->masterInfo.peerList, (ClCntKeyHandleT)(ClWordT)localAddr, (ClCntDataHandleT *) &pPeerInfo))) { rc = CL_OK; goto exitOnError; } if( flag != CL_CKPT_COMP_DOWN) { clLogDebug(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Either ckpt server or node down, " "changing active address"); clCntFirstNodeGet(pPeerInfo->mastHdlList,&nodeHdl); tempHdl = 0; while(nodeHdl != 0) { rc = clCntNodeUserKeyGet(pPeerInfo->mastHdlList,nodeHdl, (ClCntKeyHandleT *)&pMasterHandle); if( CL_OK != rc ) { clLogError(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Not able get the data for node handle rc[0x %x]", rc); goto exitOnError; } rc = clCntNextNodeGet(pPeerInfo->mastHdlList, nodeHdl, &tempHdl); /* * Update the active address and inform the clients. */ if( CL_OK != (rc = _clCkpMastertReplicaAddressUpdate(*pMasterHandle, localAddr))) { return rc; } nodeHdl = tempHdl; tempHdl = 0; } } if (flag != CL_CKPT_SVR_DOWN) { /* * Component down/ node down case. * In case of component down close the associated client Hdl. * Incase of node down close all client Hdl. * Delete the ckpt Hdls from the client handle List. */ clLogDebug(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Closing the opened handles from this slot id [%d]...", localAddr); clCntFirstNodeGet(pPeerInfo->ckptList,&nodeHdl); while(nodeHdl != 0) { rc = clCntNodeUserDataGet(pPeerInfo->ckptList,nodeHdl, (ClCntDataHandleT *)&pPeerInfoDH); if( CL_OK != rc ) { clLogError(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Not able get the data for node handle rc[0x %x]", rc); goto exitOnError; } clCntNextNodeGet(pPeerInfo->ckptList,nodeHdl,&tempHdl); if ( (flag == CL_CKPT_COMP_DOWN && pPeerInfoDH->appPortNum == portId) || (flag == CL_CKPT_NODE_DOWN) ) { /* * Close the checkpoint hdl but dont delete the entry from * masterHdl list. */ if(gCkptSvr->masterInfo.masterAddr == gCkptSvr->localAddr) { clLogInfo(CL_CKPT_AREA_MAS_DEP, CL_CKPT_CTX_PEER_DOWN, "Closing the handle [%#llX]...", pPeerInfoDH->clientHdl); _clCkptMasterCloseNoLock(pPeerInfoDH->clientHdl, localAddr, !CL_CKPT_MASTER_HDL); } } nodeHdl = tempHdl; tempHdl = 0; } } else if (flag == CL_CKPT_SVR_DOWN) { /* * Mark the availability of checkpoint server as UNAVAILABLE. */ if(pPeerInfo->credential == CL_CKPT_CREDENTIAL_POSITIVE) gCkptSvr->masterInfo.availPeerCount--; pPeerInfo->available = CL_CKPT_NODE_UNAVAIL; } if(flag == CL_CKPT_NODE_DOWN || flag == CL_CKPT_SVR_DOWN) { /* * Node down case, delete the entry from master's peer list. */ rc = clCntAllNodesForKeyDelete(gCkptSvr->masterInfo.peerList, (ClPtrT)(ClWordT)localAddr); CKPT_ERR_CHECK(CL_CKPT_SVR,CL_LOG_SEV_ERROR, (" MasterPeerUpdate failed rc[0x %x]\n",rc), rc); } if( flag != CL_CKPT_COMP_DOWN) { /* * Find other nodes to store the replicas of checkpoints for whom * this node was storing the replicas. */ if(gCkptSvr->masterInfo.masterAddr == gCkptSvr->localAddr) { _ckptCheckpointLoadBalancing(); } } } exitOnError: { return rc; } }