/** * Event Handling Function. * * Handles the event at current state. Handles message at the current * state. If the message/event is not handled at current state, then * its passed to the parent and passed till 'top'. If its not * handled, till top, then this method returns ERROR. else, the * transition is handled. Transition, involves all exit method from * bottom till LCA and execution of transition object function, and * entry methods from LCA till the next state. * * @param smThis Instance Object * @param msg Event that needs to be handled * * @returns * CL_OK on CL_OK (successful transition) <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance/msg handle <br/> * SM_ERR_EXIT_FAILED if the exit handler returned failure * CL_SM_RC(CL_ERR_INVALID_STATE) unable to handle event at current state * */ ClRcT clHsmInstanceOnEvent(ClSmInstancePtrT smThis, ClSmEventPtrT msg ) { ClSmStatePtrT curr; ClSmTransitionPtrT tO=0; ClRcT ret = CL_OK; CL_FUNC_ENTER(); CL_ASSERT(smThis); CL_ASSERT(msg); if(!smThis || !smThis->sm || !smThis->current || !msg) { ret = CL_SM_RC(CL_ERR_NULL_POINTER); CL_FUNC_EXIT(); return ret; } for(curr=smThis->current;curr && !tO;) { /* check if the event is in event handler table */ if(msg->eventId < (ClSmEventIdT)curr->maxEventTransitions && msg->eventId >= 0) { tO = curr->eventTransitionTable[msg->eventId].transition; break; } curr=curr->parent; } if(curr && tO) { #ifdef DEBUG clLogTrace(HSM_LOG_AREA,HSM_LOG_CTX_EVENT,"StateMachine [%s] OnEvent [%d] in State [%d:%s]", smThis->name, msg->eventId, curr->type, curr->name); #else clLogTrace(HSM_LOG_AREA,HSM_LOG_CTX_EVENT,"OnEvent %d in state %d", msg->eventId, curr->type); #endif IGNORE_RETURN(_transition(smThis, tO, smThis->current, tO->nextState, msg)); } else { ret = CL_SM_RC(CL_ERR_INVALID_STATE); } CL_FUNC_EXIT(); return ret; }
ClRcT clMsgCallClientsTrackCallback(ClIocPhysicalAddressT compAddr, ClNameT *pGroupName, SaMsgHandleT appHandle, SaMsgQueueGroupNotificationBufferT *pData) { ClRcT rc; ClIdlHandleObjT idlObj = {0}; ClIdlHandleT idlHandle = 0; memcpy(&idlObj, &gIdlUcastObj, sizeof(idlObj)); idlObj.address.address.iocAddress.iocPhyAddress = compAddr; rc = clIdlHandleInitialize(&idlObj, &idlHandle); if(rc != CL_OK) { clLogError("IDL", "TRCb", "Failed to initialize the IDL handle. error code [0x%x].", rc); goto error_out; } clLogTrace("IDL", "TRCb", "Calling track callback for client [0x%x:0x%x].", compAddr.nodeAddress, compAddr.portId); rc = VDECL_VER(clMsgClientsTrackCallbackClientAsync, 4, 0, 0)(idlHandle, appHandle, pGroupName, pData, NULL, NULL); if(rc != CL_OK) { clLogError("IDL", "TRCb", "Failed to make an Async RMD to client. error code [0x%x].", rc); } clIdlHandleFinalize(idlHandle); error_out: return rc; }
ClRcT clMsgMessageGet_Idl( ClIocPhysicalAddressT destNode, ClNameT *pQName, SaTimeT timeout) { ClRcT rc; ClIdlHandleObjT idlObj = {0}; ClIdlHandleT idlHandle = 0; memcpy(&idlObj, &gIdlUcastObj, sizeof(idlObj)); idlObj.address.address.iocAddress.iocPhyAddress = destNode; rc = clIdlHandleInitialize(&idlObj, &idlHandle); if(rc != CL_OK) { clLogError("IDL", "GET", "Failed to initialize the IDL handle. error code [0x%x].", rc); goto error_out; } clLogTrace("IDL", "GET", "Get message request for queue [%.*s] on node [%d].", pQName->length, pQName->value, destNode.nodeAddress); rc = VDECL_VER(clMsgMessageGetClientSync, 4, 0, 0)(idlHandle, pQName, timeout); if(rc != CL_OK) { clLogError("IDL", "GET", "Queue [%.*s] message get failed on node [%d]. error code [0x%x].", pQName->length, pQName->value, destNode.nodeAddress, rc); } clIdlHandleFinalize(idlHandle); error_out: return rc; }
/** * Continue the extended state machine instance. * * This API to be called if the state machine is paused * and events are just being queued and not being * processed. This API puts the state machine instance * back in regular processing mode. * * @param smThis State Machine Object * * @returns * CL_OK on CL_OK (successful start) <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance handle <br/> * * @see #clEsmInstancePause */ ClRcT clEsmInstanceContinue(ClExSmInstancePtrT smThis ) { ClRcT ret = CL_OK; CL_FUNC_ENTER(); CL_ASSERT(smThis); if(smThis && smThis->fsm) { if(!(ESM_IS_PAUSED(smThis))) { ret = SM_ERR_NOT_PAUSED; CL_FUNC_EXIT(); return ret; } #ifdef DEBUG clLogTrace(ESM_LOG_AREA,CL_LOG_CONTEXT_UNSPECIFIED,"Continue [%s]", smThis->fsm->name); #endif ESM_CONTINUE(smThis); } else { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } CL_FUNC_EXIT(); return ret; }
ClRcT clMsgQueueStatusGet_4_0_0( SaNameT *pQName, SaMsgQueueStatusT *pQueueStatus) { ClRcT rc = CL_OK; ClMsgQueueRecordT *pQEntry; SaMsgQueueHandleT qHandle; CL_MSG_INIT_CHECK(rc); if( rc != CL_OK) { goto error_out; } CL_OSAL_MUTEX_LOCK(&gClQueueDbLock); if(clMsgQNameEntryExists(pQName, &pQEntry) == CL_FALSE) { CL_OSAL_MUTEX_UNLOCK(&gClQueueDbLock); rc = CL_MSG_RC(CL_ERR_DOESNT_EXIST); clLogError("QUE", "STAT", "Queue [%.*s] does not exist. error code [0x%x].", pQName->length, pQName->value, rc); goto error_out; } qHandle = pQEntry->qHandle; CL_OSAL_MUTEX_UNLOCK(&gClQueueDbLock); rc = clMsgQueueStatusGet(qHandle, pQueueStatus); if(rc != CL_OK) clLogTrace("QUE", "STAT", "Failed to get the status of the queue [%.*s]. error code [0x%x].", pQName->length, pQName->value, rc); error_out: return rc; }
ClRcT clMsgQueueAllocateThroughIdl( ClIocPhysicalAddressT destNode, ClNameT *pQName, SaMsgQueueOpenFlagsT openFlags, SaMsgQueueCreationAttributesT *pCreationAttrs, SaMsgQueueHandleT *pQHandle) { ClRcT rc; ClIdlHandleObjT idlObj = {0}; ClIdlHandleT idlHandle = 0; memcpy(&idlObj, &gIdlUcastObj, sizeof(idlObj)); idlObj.address.address.iocAddress.iocPhyAddress = destNode; rc = clIdlHandleInitialize(&idlObj, &idlHandle); if(rc != CL_OK) { clLogError("IDL", "ALOC", "Failed to initialize the IDL handle. error code [0x%x].", rc); goto error_out; } clLogTrace("IDL", "ALOC", "Allocate-request for queue [%.*s] on node [%d].", pQName->length, pQName->value, destNode.nodeAddress); rc = VDECL_VER(clMsgQueueAllocateClientSync, 4, 0, 0)(idlHandle, pQName, openFlags, pCreationAttrs, pQHandle); if(rc != CL_OK) { clLogError("IDL", "ALOC", "Queue [%.*s] allocation failed on node [%d]. error code [0x%x].", pQName->length, pQName->value, destNode.nodeAddress, rc); } clIdlHandleFinalize(idlHandle); error_out: return rc; }
/* * Merge the difference vector with the current data set */ static ClUint8T *__differenceVectorMerge(ClUint8T *lastData, ClSizeT lastDataSize, ClDifferenceVectorT *vector, ClOffsetT offset, ClSizeT dataSize) { ClUint8T *mergeSpace = NULL; ClUint32T i; ClUint32T sectionBlocks; ClOffsetT startOffset; ClSizeT sectionSize; sectionSize = offset + dataSize; sectionBlocks = ( (sectionSize + CL_MD5_BLOCK_MASK) & ~CL_MD5_BLOCK_MASK ) >> CL_MD5_BLOCK_SHIFT; startOffset = offset & CL_MD5_BLOCK_MASK; /* * Merge the data into the section. * Cannot reallocate as the allocation span can be more than the size of the section considering we reuse * the merge space if the current section size is already bigger than the specified section size. */ if(sectionSize > lastDataSize) { mergeSpace = (ClUint8T*) clHeapCalloc(1, sectionSize); CL_ASSERT(mergeSpace != NULL); if(lastData) memcpy(mergeSpace, lastData, lastDataSize); } else mergeSpace = lastData; /*reuse the old section allocation and overwrite the data*/ /* * Now apply the difference. */ for(i = 0; i < vector->numDataVectors; ++i) { ClUint32T block = vector->dataVectors[i].dataBlock; ClSizeT size = vector->dataVectors[i].dataSize; ClUint8T *pData = vector->dataVectors[i].dataBase; CL_ASSERT(block < sectionBlocks); /* validation of the specified block size*/ CL_ASSERT((block << CL_MD5_BLOCK_SHIFT) + size <= sectionSize); clLogTrace("DIFF", "MD5-MERGE", "Copy new block [%d], size [%lld]", block, size); memcpy(mergeSpace + (block << CL_MD5_BLOCK_SHIFT) + startOffset, pData, size); if(startOffset) startOffset = 0; /*reset startOffset*/ dataSize -= size; } CL_ASSERT((ClInt64T)dataSize >= 0); clLogTrace("DIFF", "MD5-MERGE", "Merged [%lld] bytes from old block at offset [%lld]", dataSize, offset); return mergeSpace; }
/** * Add event to the event q. * * API to add event to the state machine instance queue. The * event properties are copied (a new event is created and * the contents of the event passed a re copied to the new * event), but the payload is just referenced and not copied. * * @param smThis Extended State machine Instance handle * @param msg Event information * * @returns * CL_OK on CL_OK <br/> * CL_SM_RC(CL_ERR_NO_MEMORY) on memory allocation FAILURE <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance handle <br/> * * @see #clEsmInstanceProcessEvent * @see #clEsmInstanceProcessEvents * */ ClRcT clEsmInstanceEventAdd(ClExSmInstancePtrT smThis, ClSmEventPtrT msg ) { ClRcT ret = CL_OK; CL_FUNC_ENTER(); CL_ASSERT(smThis); CL_ASSERT(msg); if(smThis && msg) { ClSmQueueItemPtrT item; item = (ClSmQueueItemPtrT) mALLOC(sizeof(ClSmQueueItemT)); if(!item) { ret = CL_SM_RC(CL_ERR_NO_MEMORY); } else { if(ESM_LOCK(smThis)!=CL_OK) { ret = SM_ERR_LOCKED; mFREE(item); CL_FUNC_EXIT(); return ret; } item->event = *msg; if (ESM_IS_PAUSED(smThis) && ESM_IS_DROP_ON_PAUSE(smThis)) { ret = CL_OK; mFREE(item); ESM_UNLOCK(smThis); CL_FUNC_EXIT(); return ret; } ret = SMQ_ENQUEUE(smThis->q, item); clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_EVENT,"Event %d added => ret [%d]", item->event.eventId, ret); ESM_UNLOCK(smThis); } } else { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } CL_FUNC_EXIT(); return ret; }
ClRcT clHalLibInitialize() { ClRcT rc= CL_OK ; CL_FUNC_ENTER(); if (CL_TRUE == halInitDone) { clLogCritical(CL_LOG_AREA_UNSPECIFIED,CL_LOG_CONTEXT_UNSPECIFIED,"\n clHalLibInitialize Called Again \n"); CL_FUNC_EXIT(); return (CL_HAL_SET_RC(CL_ERR_INVALID_STATE)); } #ifdef DEBUG rc= dbgAddComponent(COMP_PREFIX, COMP_NAME, COMP_DEBUG_VAR_PTR); if (CL_OK != rc) { clLogError(CL_LOG_AREA_UNSPECIFIED,CL_LOG_CONTEXT_UNSPECIFIED,"dbgAddComponent Failed \n "); CL_FUNC_EXIT(); return rc; } #endif memset(&halDevObjTable,0, sizeof(HalDeviceObjTableT)); halDevObjTable.pphalDeviceObj=(HalDeviceObjectT **)clHeapAllocate((halConfig. halNumDevObject)*sizeof(HalDeviceObjectT *)); if (NULL == halDevObjTable.pphalDeviceObj) { clLogCritical(CL_LOG_AREA_UNSPECIFIED,CL_LOG_CONTEXT_UNSPECIFIED,"\n clHalLibInitialize Error no memory HAL\n"); CL_FUNC_EXIT(); return(CL_HAL_SET_RC(CL_ERR_NO_MEMORY)); } memset(halDevObjTable.pphalDeviceObj,0, ((halConfig. halNumDevObject)*sizeof(HalDeviceObjectT *))); halInitDone = CL_TRUE; /* Create device object(s) from the Configuration Info */ rc = halDevObjTableCreate (); if (rc != CL_OK) { clLogCritical(CL_LOG_AREA_UNSPECIFIED,CL_LOG_CONTEXT_UNSPECIFIED,"\n halDevObjTableCreate Failed"); CL_FUNC_EXIT(); return rc ; } clLogTrace(CL_LOG_AREA_UNSPECIFIED,CL_LOG_CONTEXT_UNSPECIFIED,"\nclHalLibInitialize CL_OK\n"); CL_FUNC_EXIT(); return (CL_OK) ; }
static ClRcT clMsgNextNodeGet(ClIocNodeAddressT node, ClIocNodeAddressT *pNextNode) { ClRcT rc = CL_OK; ClIocNodeAddressT i; ClStatusT status; for(i = node - 1; i >= CL_IOC_MIN_NODE_ADDRESS ; i--) { rc = clCpmNodeStatusGet(i, &status); if(rc != CL_OK) { clLogError("NOD", "STA", "Failed to get node status for node [0x%x]. error code [0x%x].", i, rc); continue; } else if(status == CL_STATUS_DOWN) { continue; } *pNextNode = i; goto out; } for(i = node + 1; i <= CL_IOC_MAX_NODE_ADDRESS ; i++) { rc = clCpmNodeStatusGet(i, &status); if(rc != CL_OK) { clLogError("NOD", "STA", "Failed to get node status for node [0x%x]. error code [0x%x].", i, rc); continue; } else if(status == CL_STATUS_DOWN) { continue; } *pNextNode = i; goto out; } *pNextNode = node; clLogTrace("NOD", "STA", "This node is going down. Message queue failover node is [0x%x].", node); out: return rc; }
ClRcT clRmdReceiveAsyncReply(ClEoExecutionObjT *pThis, ClBufferHandleT rmdRecvMsg, ClUint8T priority, ClUint8T protoType, ClUint32T length, ClIocPhysicalAddressT srcAddr) { ClRmdPktT msg = {{ {0} }}; ClRcT rc = CL_OK; ClRmdAckSendContextT sendContext = {0}; ClUint32T size = 0; clRmdDumpPkt("received async reply", rmdRecvMsg); RMD_DBG4((" RMD receive Async Reply\n")); rc = clBufferReadOffsetSet(rmdRecvMsg, 0, CL_BUFFER_SEEK_SET); rc = clRmdUnmarshallRmdHdr(rmdRecvMsg, &msg.ClRmdHdr, &size); if(rc != CL_OK) { clBufferDelete(&rmdRecvMsg); RMD_DBG3((" %s: Bad Message, rc 0x%x", __FUNCTION__, rc)); return rc; } CL_RMD_VERSION_VERIFY(msg.ClRmdHdr, rc); if (rc != CL_OK) { clBufferDelete(&rmdRecvMsg); return CL_OK; } rc = rmdHandleAsyncReply(pThis, &msg, size, &srcAddr, priority, protoType, rmdRecvMsg); sendContext.srcAddr = srcAddr; sendContext.priority = priority; sendContext.ClRmdHdr = msg.ClRmdHdr; rc = rmdAckSend(pThis,&sendContext); if(rc != CL_OK) { clLogTrace("ACK","ASYN","Error in rmdAckSend for async. rc = 0x%x.\n",rc); } #if RMD_FILTER_CLEANUP clRMDCheckAndCallCommPortCleanup(pThis); #endif return CL_OK; }
/* * Creates a new handle database. User is responsible to cleanup and free * database. */ ClRcT clHandleDatabaseCreate( void (*destructor)(void*), ClHandleDatabaseHandleT *databaseHandle) { ClHdlDatabaseT *hdbp = NULL; nullChkRet(databaseHandle); hdbp = (ClHdlDatabaseT*) clHeapCalloc(1, sizeof(ClHdlDatabaseT)); if (NULL == hdbp) { clLogError(CL_HDL_AREA, CL_HDL_CTX_DBCREATE, "Memory allocation failed"); return CL_HANDLE_RC(CL_ERR_NO_MEMORY); } (void)pthread_mutex_init(&hdbp->mutex, NULL); /* This always returns 0 */ if (destructor != NULL) { hdbp->handle_instance_destructor = destructor; } hdbp->pValidDb = (void *) CL_HDL_VALID_DB; hdbp->id = handleDbId++; /* * Database handle is obtained from memory address here. This is OK, * since (1) handle type is larger or same size as address, (2) the * use of handle is limited to one process. */ *databaseHandle = hdbp; #if 0 clLogTrace(CL_HDL_AREA, CL_HDL_CTX_DBCREATE, "Database [%p] has been created", (ClPtrT) hdbp); #endif clDbgResourceNotify(clDbgHandleGroupResource, clDbgAllocate, 0, hdbp, ("Handle database %p allocated", (ClPtrT) hdbp)); return CL_OK; }
/** * Process all pending events. * * Handle all pending events in the q. All the events * that are pending in the event q are processed. If * there is an error in the processing, then it returns * back with the error code, even if there are more * events in the q. * * @param smThis Instance Object handle * * @returns * CL_OK on CL_OK <br/> * SM_ERR_NO_EVENT if there are no events in the q <br/> * SM_ERR_LOCKED if the instance is locked <br/> * SM_ERR_PAUSED if the instance is paused <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance handle <br/> * * @see #clEsmInstanceProcessEvent */ ClRcT clEsmInstanceProcessEvents(ClExSmInstancePtrT smThis) { ClRcT ret = CL_OK; int k=0; CL_FUNC_ENTER(); CL_ASSERT(smThis); if(smThis) { while(ret == CL_OK) { ret = clEsmInstanceProcessEvent(smThis); if(ret==CL_OK) k++; } /* if more than one event, then return back * status OK */ if(k>0 && ret==SM_ERR_NO_EVENT) { ret = CL_OK; } clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_EVENT,"Processed %d events",k); } else { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } CL_FUNC_EXIT(); return ret; }
static ClUint32T __differenceVectorGet(ClDifferenceBlockT *block, ClUint8T *data, ClOffsetT offset, ClSizeT dataSize, ClDifferenceVectorT *differenceVector) { ClUint32T i; ClMD5T *md5CurList = block->md5List; ClMD5T *md5List ; ClUint32T md5CurBlocks = block->md5Blocks; ClUint32T md5Blocks = 0; ClUint32T dataBlocks, sectionBlocks ; ClSizeT sectionSize; ClSizeT vectorSize = 0; ClSizeT lastDataSize = dataSize; ClUint8T *pLastData = NULL; ClInt32T lastMatch = -1; ClBoolT doMD5 = CL_FALSE; ClUint32T startBlock, endBlock; ClOffsetT startOffset, nonZeroOffset = 0; sectionSize = offset + dataSize; /* * First align to md5 block size */ dataBlocks = ((dataSize + CL_MD5_BLOCK_MASK) & ~CL_MD5_BLOCK_MASK) >> CL_MD5_BLOCK_SHIFT; sectionBlocks = ((sectionSize + CL_MD5_BLOCK_MASK) & ~CL_MD5_BLOCK_MASK) >> CL_MD5_BLOCK_SHIFT; startOffset = offset & CL_MD5_BLOCK_MASK; startBlock = ( offset & ~CL_MD5_BLOCK_MASK ) >> CL_MD5_BLOCK_SHIFT; /* align the start block*/ endBlock = sectionBlocks; md5Blocks = CL_MAX(sectionBlocks, md5CurBlocks); md5List = (ClMD5T *) clHeapCalloc(md5Blocks, sizeof(*md5List)); CL_ASSERT(md5List != NULL); if(md5CurList) memcpy(md5List, md5CurList, md5CurBlocks); else { md5CurList = md5List; md5CurBlocks = md5Blocks; } /* * If the specified vector blocks don't match the data blocks. * refill. Reset the md5 for offsetted writes considering its cheaper to * just recompute the md5 for this block on a subsequent write to this block */ if(differenceVector && differenceVector->md5Blocks && differenceVector->md5Blocks == dataBlocks) { memcpy(md5List + startBlock, differenceVector->md5List, sizeof(*md5List) * differenceVector->md5Blocks); /* *If data vector already specified, then just update the md5 list and exit. */ if(differenceVector->numDataVectors) { clLogTrace("DIFF", "MD5", "Difference vector already specified with md5 list of size [%d] " "with [%d] difference data vectors", dataBlocks, differenceVector->numDataVectors); goto out_set; } } else doMD5 = CL_TRUE; data += offset; pLastData = data; /* * If we are going to allocate datavectors, free the existing set to be overridden * with a fresh set. */ if(differenceVector && differenceVector->dataVectors) { clHeapFree(differenceVector->dataVectors); differenceVector->dataVectors = NULL; differenceVector->numDataVectors = 0; } nonZeroOffset |= startOffset; for(i = startBlock; i < endBlock; ++i) { ClSizeT c = CL_MIN(CL_MD5_BLOCK_SIZE - startOffset, dataSize); nonZeroOffset &= startOffset; if(doMD5) { if(!startOffset) clMD5Compute(data, c, md5List[i].md5sum); else memset(&md5List[i], 0, sizeof(md5List[i])); } dataSize -= c; data += c; if(startOffset) startOffset = 0; /* * Just gather the new md5 list if there is no vector to be accumulated */ if(!differenceVector) continue; /* * Just gather md5s if we hit the limit for the current data size or if * we didnt have an md5 to start with */ if(md5List == md5CurList) { if(lastMatch < 0) lastMatch = i; continue; } if(i < md5CurBlocks) { /* * Always store offsetted blocks in the difference vector. whose md5 wasnt computed. */ if(!nonZeroOffset && memcmp(md5List[i].md5sum, md5CurList[i].md5sum, sizeof(md5List[i].md5sum)) == 0) { /* * Blocks are the same. Skip the add for this block. */ clLogTrace("DIFF", "MD5", "Skipping copying block [%d] to replica", i); continue; } } else { if(lastMatch < 0) { lastMatch = i; pLastData = data - c; lastDataSize = dataSize + c; } continue; } clLogTrace("DIFF", "MD5", "Copying block [%d] to replica of size [%lld]", i, c); if(!(differenceVector->numDataVectors & 7 ) ) { differenceVector->dataVectors = (ClDataVectorT*) clHeapRealloc(differenceVector->dataVectors, sizeof(*differenceVector->dataVectors) * (differenceVector->numDataVectors + 8)); CL_ASSERT(differenceVector->dataVectors != NULL); memset(differenceVector->dataVectors + differenceVector->numDataVectors, 0, sizeof(*differenceVector->dataVectors) * 8); } differenceVector->dataVectors[differenceVector->numDataVectors].dataBlock = i; /* block mismatched */ differenceVector->dataVectors[differenceVector->numDataVectors].dataBase = data - c; differenceVector->dataVectors[differenceVector->numDataVectors].dataSize = c; ++differenceVector->numDataVectors; vectorSize += c; } CL_ASSERT(dataSize == 0); if(lastMatch >= 0 && differenceVector) /* impossible but coverity killer : Who knows! */ { if(!(differenceVector->numDataVectors & 7)) { differenceVector->dataVectors = (ClDataVectorT*) clHeapRealloc(differenceVector->dataVectors, sizeof(*differenceVector->dataVectors) * (differenceVector->numDataVectors + 8)); CL_ASSERT(differenceVector->dataVectors != NULL); memset(differenceVector->dataVectors + differenceVector->numDataVectors, 0, sizeof(*differenceVector->dataVectors) * 8); } clLogTrace("DIFF", "MD5", "Copying block [%d] to replica of size [%lld]", lastMatch, lastDataSize); differenceVector->dataVectors[differenceVector->numDataVectors].dataBlock = lastMatch; differenceVector->dataVectors[differenceVector->numDataVectors].dataBase = pLastData; differenceVector->dataVectors[differenceVector->numDataVectors].dataSize = lastDataSize; ++differenceVector->numDataVectors; vectorSize += lastDataSize; } if(differenceVector) { clLogTrace("DIFF", "MD5", "Vector has [%lld] bytes to be written. Skipped [%lld] bytes.", vectorSize, sectionSize - vectorSize); } out_set: block->md5List = md5List; block->md5Blocks = md5Blocks; if(doMD5 && differenceVector) { clLogTrace("DIFF", "MD5", "Copying md5 list preloaded with [%d] blocks to the difference vector " "with [%d] data difference vectors", dataBlocks, differenceVector->numDataVectors); if(differenceVector->md5List) clHeapFree(differenceVector->md5List); differenceVector->md5List = (ClMD5T*) clHeapCalloc(dataBlocks, sizeof(*differenceVector->md5List)); CL_ASSERT(differenceVector->md5List != NULL); memcpy(differenceVector->md5List, md5List + startBlock, sizeof(*differenceVector->md5List) * dataBlocks); differenceVector->md5Blocks = dataBlocks; } if(md5CurList != md5List) clHeapFree(md5CurList); return sectionBlocks; }
static ClRcT clLogFlusherRecordsGetMcast(ClLogSvrStreamDataT *pStreamData, ClUint32T nRecords, ClLogFlushRecordT *pFlushRecord) { ClRcT rc = CL_OK; ClLogStreamHeaderT *pHeader = pStreamData->pStreamHeader; ClUint8T *pRecords = pStreamData->pStreamRecords; ClUint32T startIdx = 0; ClUint32T buffLen = 0; ClIocNodeAddressT localAddr = 0; ClUint8T *pBuffer = NULL; ClUint32T firstBatch = 0; ClBoolT doMulticast = CL_FALSE; ClUint32T secondBatch = 0; if ((CL_LOG_STREAM_HEADER_STRUCT_ID != pHeader->struct_id) || (CL_LOG_STREAM_HEADER_UPDATE_COMPLETE != pHeader->update_status)) {/* Stream Header is corrupted so reset Header parameters */ clLogStreamHeaderReset(pHeader); } if(pFlushRecord->multicast < 0 ) { doMulticast = ( (0 < (pStreamData->ackersCount + pStreamData->nonAckersCount)) && (pHeader->streamMcastAddr.iocMulticastAddress != 0) )? CL_TRUE: CL_FALSE; if( (pStreamData->ackersCount + pStreamData->nonAckersCount) == 1 && (pStreamData->fileOwnerAddr == clIocLocalAddressGet()) ) { doMulticast = CL_FALSE; } pFlushRecord->multicast = doMulticast; pFlushRecord->mcastAddr = pHeader->streamMcastAddr; pFlushRecord->ackersCount = pStreamData->ackersCount; } else { doMulticast = (pFlushRecord->multicast ? CL_TRUE : CL_FALSE) ; } localAddr = clIocLocalAddressGet(); if((!doMulticast) && (pStreamData->fileOwnerAddr != localAddr)) { /*Nobody is interested in these records and they are not for me then skip them */ /* clLogDebug("SVR", "FLU", "Nobody is Interested in These records, So skipping them");*/ return rc; } startIdx = pHeader->startAck % pHeader->maxRecordCount; if(nRecords > pHeader->maxRecordCount) nRecords = pHeader->maxRecordCount; CL_ASSERT(pHeader->recordSize < 4*1024); // Sanity check the log record size buffLen = nRecords * pHeader->recordSize; clLogTrace(CL_LOG_AREA_SVR, "FLU", "startIdx: %u maxRec: %u nRecords: %u startIdx: %d recordIdx: %d", startIdx, pHeader->maxRecordCount, nRecords, pHeader->startAck, pHeader->recordIdx); /* FirstBatch is from startIdx towards maxRecordCount and SecondBatch is from 0 to startIdx * SecondBatch is only valid if number of records are greater than (maxRecordCount - startIdx) */ if ( (startIdx + nRecords) <= pHeader->maxRecordCount ) { firstBatch = nRecords; secondBatch = 0; } else { firstBatch = pHeader->maxRecordCount - startIdx; secondBatch = nRecords + startIdx - pHeader->maxRecordCount; } /* Computed firstBatch and secondBatch number of records, now verify and flush them */ pBuffer = pRecords + (startIdx * pHeader->recordSize); pFlushRecord->pBuffs = (ClLogFlushBufferT*) clHeapRealloc(pFlushRecord->pBuffs, (pFlushRecord->numBufs+1)*sizeof(*pFlushRecord->pBuffs)); CL_ASSERT(pFlushRecord->pBuffs != NULL); memset(pFlushRecord->pBuffs+pFlushRecord->numBufs, 0, sizeof(*pFlushRecord->pBuffs)); pFlushRecord->pBuffs[pFlushRecord->numBufs].pRecord = (ClUint8T*) clHeapCalloc(sizeof(ClUint8T), buffLen); CL_ASSERT(pFlushRecord->pBuffs[pFlushRecord->numBufs].pRecord != NULL); pFlushRecord->pBuffs[pFlushRecord->numBufs].numRecords = 0; if (firstBatch) { clLogVerifyAndFlushRecords(pBuffer, pHeader, pFlushRecord, firstBatch); } if (secondBatch) { pBuffer = pRecords; clLogVerifyAndFlushRecords(pBuffer, pHeader, pFlushRecord, secondBatch); } pFlushRecord->numBufs++; CL_LOG_DEBUG_TRACE(("Exit")); return rc; }
ClRcT clLogFlusherCookieHandleDestroy(ClHandleT hFlusher, ClBoolT timerExpired) { ClRcT rc = CL_OK; ClLogFlushCookieT *pFlushCookie = NULL; ClLogSvrEoDataT *pSvrEoEntry = NULL; CL_LOG_DEBUG_TRACE(("Enter")); /* * FIXME: * Unable to flush this set of records but will this be true in * future also, DON'T know * Also need to reset the startAck otherwise it will not enter * the cond_wait */ rc = clLogSvrEoEntryGet(&pSvrEoEntry, NULL); if( CL_OK != rc ) { return rc; } rc = clHandleValidate(pSvrEoEntry->hFlusherDB, hFlusher); if( CL_OK != rc ) { return rc;/*Flusher handle has already been destroyed*/ } rc = clHandleCheckout(pSvrEoEntry->hFlusherDB, hFlusher, (void **) &pFlushCookie); if( (CL_TRUE == timerExpired) && (CL_OK != rc) ) { clLogTrace("LOG", "FLS", "Timer has already destroyed the handle"); return CL_OK; } if( CL_OK != rc ) { clLogError("LOG", "FLS", "Flusher handle checkout failed : " "rc[0x %x]", rc); return rc; } if( CL_FALSE == timerExpired ) { clLogWarning("LOG", "FLS", "Didn't get ack for %d records", pFlushCookie->numRecords); } CL_LOG_CLEANUP(clTimerDelete(&pFlushCookie->hTimer), CL_OK); rc = clHandleCheckin(pSvrEoEntry->hFlusherDB, hFlusher); if( CL_OK != rc ) { clLogError("LOG", "FLS", "clHandleCheckin(): rc[0x %x]", rc); } CL_LOG_CLEANUP(clHandleDestroy(pSvrEoEntry->hFlusherDB, hFlusher), CL_OK); CL_LOG_DEBUG_TRACE(("Exit")); return rc; }
/** * Creates a new Extended State machine Instance. * * This API creates a new Extended State macine Instance of given * state machine type. The extended state machine shall include * all the regular state machine instance functionalities, plus * additional event queue, history, and lock capabilities. * * @param sm State machine type * @param instance [out] newly created extended state machine instance * * @returns * CL_OK on CL_OK <br/> * CL_SM_RC(CL_ERR_NO_MEMORY) on memory allocation FAILURE <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null sm / instance <br/> * * @see #clEsmInstanceDelete */ ClRcT clEsmInstanceCreate(ClSmTemplatePtrT sm, ClExSmInstancePtrT* instance ) { ClRcT ret = CL_OK; CL_FUNC_ENTER(); CL_ASSERT(instance); CL_ASSERT(sm); clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_CREATE,"Create Extended State Machine Instance"); if(sm && instance) { /* allocate the instance space */ *instance = (ClExSmInstancePtrT) mALLOC(sizeof(ClExSmInstanceT)); if(*instance!=0) { memset(*instance, 0, sizeof(ClExSmInstanceT)); /* call sm create here */ ret = clSmInstanceCreate(sm, &(*instance)->fsm); if(ret == CL_OK) { ret = clOsalMutexCreate(&(*instance)->lock); if (CL_OK != ret) { clSmInstanceDelete((*instance)->fsm); mFREE(*instance); ret = SM_ERR_NO_SEMA; } else { /* create queue and init */ ret = SMQ_CREATE((*instance)->q); if(ret == CL_OK) { /* init log buffer */ ESM_LOG_INIT((*instance)->log, ESM_LOG_ENTRIES); } if(!(*instance)->log.buffer || ret != CL_OK) { /* delete the instance */ ret = clSmInstanceDelete((*instance)->fsm); /* delete the mutex */ clOsalMutexDelete((*instance)->lock); /* check if q init succeeded */ if(ret == CL_OK) { /* delete the queue */ clQueueDelete(&((*instance)->q)); } /* free the instance */ mFREE(*instance); ret = CL_SM_RC(CL_ERR_NO_MEMORY); } } } } else { ret = CL_SM_RC(CL_ERR_NO_MEMORY); } } else { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } CL_FUNC_EXIT(); return ret; }
ClRcT clHandleCheckin( ClHandleDatabaseHandleT databaseHandle, ClHandleT handle) { ClRcT rc = CL_OK; void *instance = NULL; ClHdlDatabaseT *hdbp = (ClHdlDatabaseT*) databaseHandle; ClRcT ec = CL_OK; ClInt32T refcount = 0; hdlDbValidityChk(hdbp); /* sometimes people want to create the same handle across multiple nodes hdlValidityChk(handle,hdbp); */ handle = CL_HDL_IDX(handle); /* once we've verified it, we only care about the index */ /* * Decrementing handle to ensure the non-zero handle interface. */ if (CL_HANDLE_INVALID_VALUE == handle--) { clLogError(CL_HDL_AREA, CL_HDL_CTX_CHECKIN, "Passed handle [%p:%#llX] is invalid", (ClPtrT) hdbp, handle); return CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); /* 0 no longer allowed */ } ec = pthread_mutex_lock(&hdbp->mutex); if (ec != 0) { int err = errno; clDbgCodeError(CL_HANDLE_RC(CL_ERR_MUTEX_ERROR), ("Handle database mutex lock failed error: %s (%d)", strerror(err), err) ); return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); } if (handle >= (ClHandleT)hdbp->n_handles) { pthread_mutex_unlock( &hdbp->mutex); clLogError(CL_HDL_AREA, CL_HDL_CTX_CHECKIN,"Passed handle [%p:%#llX] is invalid handle", (ClPtrT) hdbp, handle); return CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); } refcount = hdbp->handles[handle].ref_count; if( (--refcount <= 0) && (hdbp->handles[handle].state != HANDLE_STATE_PENDINGREMOVAL) ) { pthread_mutex_unlock( &hdbp->mutex); clLogError(CL_HDL_AREA, CL_HDL_CTX_CHECKIN, "There is no balance between checkout, checkin for handle [%p:%#llX]", (ClPtrT) hdbp, (handle + 1)); return CL_HANDLE_RC(CL_ERR_INVALID_STATE); } CL_ASSERT(hdbp->handles[handle].ref_count > 0); // unsigned compare (CID 196 on #1780) hdbp->handles[handle].ref_count -= 1; if (hdbp->handles[handle].ref_count == 0) { instance = (hdbp->handles[handle].instance); if (hdbp->handle_instance_destructor != NULL) { hdbp->handle_instance_destructor(instance); } if (hdbp->handles[handle].flags & HANDLE_ALLOC_FLAG) /* Clean up the handle if we allocated it */ clHeapFree(instance); memset(&hdbp->handles[handle], 0, /* This also makes entry EMPTY */ sizeof(ClHdlEntryT)); CL_ASSERT(hdbp->n_handles_used > 0); // unsigned compare (CID 196 on #1780) hdbp->n_handles_used--; } ec = pthread_mutex_unlock(&hdbp->mutex); if (ec != 0) { int err = errno; clDbgCodeError(CL_HANDLE_RC(CL_ERR_MUTEX_ERROR), ("Handle database mutex unlock failed error: %s (%d)", strerror(err), err) ); return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } /* This check to avoid recursive call from LogClient */ if( refcount > 0 ) { #if 0 clLogTrace(CL_HDL_AREA, CL_HDL_CTX_CHECKIN, "Checkin for handle [%p:%#llX]", (ClPtrT) hdbp, (handle + 1)); #endif } return rc; }
ClRcT clMsgSendMessage_idl(ClMsgMessageSendTypeT sendType, ClIocPhysicalAddressT compAddr, ClNameT *pName, ClMsgMessageIovecT *pMessage, SaTimeT sendTime, ClHandleT senderHandle, SaTimeT timeout, ClBoolT isSync, SaMsgAckFlagsT ackFlag, MsgCltSrvClMsgMessageReceivedAsyncCallbackT fpAsyncCallback, void *cookie) { ClRcT rc = CL_OK; ClIdlHandleT idlHandle = 0; ClIdlHandleObjT idlObj = {0}; if(compAddr.nodeAddress == CL_IOC_BROADCAST_ADDRESS) { rc = VDECL_VER(clMsgMessageReceivedClientAsync, 4, 0, 0)(gIdlBcastHandle, sendType, pName, pMessage, sendTime, senderHandle, timeout, NULL, NULL); if(rc != CL_OK) clLogError("IDL", "BCAST", "Failed to broadcast a message. error code [0x%x].", rc); return rc; } memcpy(&idlObj, &gIdlUcastObj, sizeof(idlObj)); idlObj.address.address.iocAddress.iocPhyAddress = compAddr; if (pMessage->priority == SA_MSG_MESSAGE_HIGHEST_PRIORITY) { idlObj.options.priority = CL_IOC_HIGH_PRIORITY; } rc = clIdlHandleInitialize(&idlObj, &idlHandle); if(rc != CL_OK) { clLogError("IDL", "SND", "Failed to initialize the IDL handle. error code [0x%x].", rc); goto error_out; } clLogTrace("IDL", "SND", "Sending a message to component [0x%x,0x%x].", compAddr.nodeAddress, compAddr.portId); if (isSync == CL_TRUE) { rc = VDECL_VER(clMsgMessageReceivedClientSync, 4, 0, 0)(idlHandle, sendType, pName, pMessage, sendTime, senderHandle, timeout); if(rc != CL_OK) clLogError("IDL", "SND", "Failed to send a message to component [0x%x,0x%x]. error code [0x%x].", compAddr.nodeAddress, compAddr.portId, rc); } else { if(ackFlag == SA_MSG_MESSAGE_DELIVERED_ACK) { rc = VDECL_VER(clMsgMessageReceivedClientAsync, 4, 0, 0)(idlHandle, sendType, pName, pMessage, sendTime, senderHandle, timeout, fpAsyncCallback, cookie); if(rc != CL_OK) clLogError("IDL", "SND", "Failed to send a message to component [0x%x,0x%x]. error code [0x%x].", compAddr.nodeAddress, compAddr.portId, rc); } else { rc = VDECL_VER(clMsgMessageReceivedClientAsync, 4, 0, 0)(idlHandle, sendType, pName, pMessage, sendTime, senderHandle, timeout, NULL, NULL); if(rc != CL_OK) clLogError("IDL", "SND", "Failed to send a message to component [0x%x,0x%x]. error code [0x%x].", compAddr.nodeAddress, compAddr.portId, rc); } } clIdlHandleFinalize(idlHandle); error_out: return rc; }
ClRcT clHandleDestroy ( ClHandleDatabaseHandleT databaseHandle, ClHandleT handle) { ClHdlDatabaseT *hdbp = (ClHdlDatabaseT*) databaseHandle; ClRcT ec = CL_OK; hdlDbValidityChk(hdbp); handle = CL_HDL_IDX(handle); /* once we've verified it, we only care about the index */ /* * Decrementing handle to ensure the non-zero handle interface. */ if (CL_HANDLE_INVALID_VALUE == handle--) { clLogError("HDL", CL_LOG_CONTEXT_UNSPECIFIED, "Passed handle [%p:%#llX] is invalid", (ClPtrT) hdbp, handle); return CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); /* 0 no longer allowed */ } /* Verify this particular handle has been already created */ if( (NULL == hdbp->handles) || (0 == hdbp->n_handles_used) ) { clLogError("HDL", CL_LOG_CONTEXT_UNSPECIFIED, "Invalid attempt to delete the non exiting handle [%p:%#llX]", (ClPtrT) hdbp, handle); return CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); } ec = pthread_mutex_lock (&hdbp->mutex); if (ec != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); } if (handle >= (ClHandleT)hdbp->n_handles) { ec = pthread_mutex_unlock (&hdbp->mutex); if (ec != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } clLogError("HDL", CL_LOG_CONTEXT_UNSPECIFIED, "Passed handle [%p:%#llX] has not been created", (ClPtrT) hdbp, handle); return CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); } clDbgResourceNotify(clDbgHandleResource, clDbgRelease, hdbp, handle+1, ("Handle [%p:%#llX] (state: %d, ref: %d) released", (ClPtrT)hdbp, handle+1,hdbp->handles[handle].state,hdbp->handles[handle].ref_count)); if (HANDLE_STATE_USED == hdbp->handles[handle].state) { hdbp->handles[handle].state = HANDLE_STATE_PENDINGREMOVAL; ec = pthread_mutex_unlock (&hdbp->mutex); if (ec != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } /* * Adding 1 to handle to ensure the non-zero handle interface. */ ec = clHandleCheckin (databaseHandle, handle+1); return ec; } else if (HANDLE_STATE_EMPTY == hdbp->handles[handle].state) { ec = CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); } else if (HANDLE_STATE_PENDINGREMOVAL == hdbp->handles[handle].state) { ec = pthread_mutex_unlock( &hdbp->mutex); if( ec != 0 ) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } clLogWarning(CL_HDL_AREA, CL_HDL_CTX_DESTROY, "Destroy has been called for this handle [%p:%#llX]" "returning CL_OK", (ClPtrT) hdbp, (handle + 1)); return CL_OK; } else { clDbgCodeError(CL_ERR_INVALID_HANDLE, ("Passed handle [%p:%#llX] doesn't have any proper state," "corrupted code", (ClPtrT) hdbp, (handle + 1))); /* * Invalid state - this musn't happen! */ } if(pthread_mutex_unlock (&hdbp->mutex) != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } #if 0 clLogTrace(CL_HDL_AREA, CL_HDL_CTX_DESTROY, "Handle [%p:%#llX] has been deleted successfully", (ClPtrT) hdbp, (handle + 1)); #endif return ec; }
ClRcT clHandleAdd (ClHandleDatabaseHandleT databaseHandle, void* instance, ClIocPhysicalAddressT *compAddr, ClHandleT* handle_out) { ClHandleT handle = 0; ClHdlEntryT *new_handles = NULL; ClBoolT found = CL_FALSE; ClRcT rc = CL_OK; ClHdlDatabaseT *hdbp = (ClHdlDatabaseT*) databaseHandle; nullChkRet(handle_out); hdlDbValidityChk(hdbp); rc = pthread_mutex_lock (&hdbp->mutex); if (rc != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); } for (handle = 0; handle < hdbp->n_handles; handle++) { if (hdbp->handles[handle].state == HANDLE_STATE_EMPTY) { found = 1; break; } } if (found == 0) { new_handles = (ClHdlEntryT *) realloc ( hdbp->handles, sizeof (ClHdlEntryT) * (hdbp->n_handles + CL_HDL_NUM_HDLS_BUNCH)); if (new_handles == NULL) { rc = pthread_mutex_unlock (&hdbp->mutex); if (rc != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be very bad */ } return CL_HANDLE_RC(CL_ERR_NO_MEMORY); } memset(&new_handles[hdbp->n_handles], '\0', sizeof(ClHdlEntryT) * CL_HDL_NUM_HDLS_BUNCH); hdbp->n_handles += CL_HDL_NUM_HDLS_BUNCH; hdbp->handles = new_handles; } hdbp->handles[handle].state = HANDLE_STATE_USED; hdbp->handles[handle].instance = instance; hdbp->handles[handle].ref_count = 1; hdbp->handles[handle].flags = 0; hdbp->n_handles_used++; /* * Adding 1 to handle to ensure the non-zero handle interface. */ if (compAddr == NULL) *handle_out = CL_HDL_MAKE_ADDR(ASP_NODEADDR,gEOIocPort,hdbp->id, handle + 1); else *handle_out = CL_HDL_MAKE_ADDR(compAddr->nodeAddress,compAddr->portId, hdbp->id, handle + 1); hdbp->handles[handle].handle = *handle_out; // Log uses handle so handle cant' use log // clDbgResourceNotify(clDbgHandleResource, clDbgAllocate, hdbp, handle+1, ("Handle [%p:%#llX] allocated", (ClPtrT)hdbp, handle+1)); rc = pthread_mutex_unlock (&hdbp->mutex); if (rc != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } #if 0 clLogTrace(CL_HDL_AREA, CL_HDL_CTX_CREATE, "Handle [%p:%#llX] has been created", (ClPtrT) hdbp, (handle + 1)); #endif return rc; }
ClRcT _clTxnAgentProcessClientCmd( CL_IN ClBufferHandleT inMsgHandle, CL_OUT ClBufferHandleT outMsgHandle, CL_IN ClTxnMessageHeaderT *pMsgHdr) { ClRcT rc = CL_OK; ClUint32T mCount = pMsgHdr->msgCount; ClTxnStartStopT startstop = CL_TXN_DEFAULT; ClTxnCommHandleT commHandle; CL_FUNC_ENTER(); CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("To processing %d messages", pMsgHdr->msgCount)); rc = clTxnCommIfcNewSessionCreate(CL_TXN_MSG_AGNT_RESP_TO_CLIENT, pMsgHdr->srcAddr, CL_TXN_CLIENT_MGR_RESP_RECV, NULL, CL_TXN_RMD_DFLT_TIMEOUT, CL_TXN_COMMON_ID, &commHandle); clLogTrace("AGT", "RDT", "[%d] Message(s) received from client [0x%x:0x%x]", pMsgHdr->msgCount, pMsgHdr->srcAddr.nodeAddress, pMsgHdr->srcAddr.portId); while ( (CL_OK == rc) && (pMsgHdr->msgCount > 0) ) { ClTxnCmdT tCmd; pMsgHdr->msgCount--; rc = VDECL_VER(clXdrUnmarshallClTxnCmdT, 4, 0, 0)(inMsgHandle, &tCmd); switch (tCmd.cmd) { case CL_TXN_CMD_READ_JOB: if(mCount == (pMsgHdr->msgCount + 1) ) { startstop = CL_TXN_START; } if(!pMsgHdr->msgCount) { startstop |= CL_TXN_STOP; } clLogDebug("AGT", NULL, "Processing stop, startstop[%d], mCount[%d], msgCount[%d]", startstop, mCount, pMsgHdr->msgCount); rc = clTxnAgentReadJob(tCmd, inMsgHandle, commHandle, startstop); if(CL_OK != rc) { clLogError("AGT", NULL, "Failed to process read job, rc=[0x%x]", rc); } startstop = CL_TXN_PHASE; break; default: CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Invalid comamnd received 0x%x", tCmd.cmd)); rc = CL_ERR_INVALID_PARAMETER; break; } } rc = clTxnCommIfcSessionRelease(commHandle); rc = clTxnCommIfcReadMessage(commHandle, outMsgHandle); if (CL_OK != rc) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Failed to process all txn-cmds. rc:0x%x", rc)); rc = CL_GET_ERROR_CODE(rc); } CL_FUNC_EXIT(); return (rc); }
/** * NOT AN EXTERNAL API. PLEASE DO NOT DOCUMENT. * * This routine creates an object instance in the calling context, and * sets the object type the one specified as the argument. Also, the * object instance is added to the object manager database. */ void * omCommonCreateObj(ClOmClassTypeT classId, ClUint32T numInstances, ClHandleT *handle, void *pExtObj, int flag, ClRcT *rc, void *pUsrData, ClUint32T usrDataLen) { int idx; char *pObjPtr = NULL; ClUint32T **pInst; ClUint32T instIdx = 0; ClOmClassControlBlockT * pTab; char *tmpPtr; ClUint32T instBlkLen = 0; CL_FUNC_ENTER(); if (NULL == rc) { clLogError("OMG", "OMC", "Null value passed for return code"); return (NULL); } if(NULL == handle || ( (flag == CL_OM_ADD_FLAG) && (NULL == pExtObj) ) ) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("NULL handle is passed.")); *rc = CL_OM_SET_RC(CL_OM_ERR_NULL_PTR); return (NULL); } *rc = 0; /* validate the input arguments */ if (omClassTypeValidate(classId) != CL_OK) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Invalid input classId arguments")); *rc = CL_OM_SET_RC(CL_OM_ERR_INVALID_CLASS); return (NULL); } pTab = clOmClassEntryGet(classId); CL_ASSERT(pTab); if (!numInstances) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Invalid input numInstances arguments")); *rc = CL_OM_SET_RC(CL_OM_ERR_INVALID_OBJ_INSTANCE); return (NULL); } /* Get obj memory block length */ instBlkLen = pTab->size * numInstances; Reallocate: /* * Check if the class control structure is initalized with the instance * table. This is done during the initialization of the class table. */ if (!(pInst = pTab->pInstTab)) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Instance table for the class does not exist")); *rc = CL_OM_SET_RC(CL_OM_ERR_INSTANCE_TAB_NOT_EXISTS); return (NULL); } /* Find the first empty slot in the instance table */ *rc = omGetFreeInstSlot(pInst, pTab->maxObjInst, &instIdx); if (CL_GET_ERROR_CODE(*rc) == CL_ERR_NOT_EXIST) { ClUint32T **tmp_ptr = NULL; ClUint32T tmp_size = 0; clLogDebug("OMC", "OBC", "No free slot found in the OM class [0x%x] buffer for this object. " "Reallocating the class buffer size.", classId); /* No free slot found. Need to allocate maInstances number of slots more */ pTab->maxObjInst = pTab->maxObjInst * 2 ; /* Double the size of max instances */ tmp_size = (pTab->maxObjInst * sizeof(ClUint32T *)); tmp_ptr = pTab->pInstTab; tmp_ptr = (ClUint32T **) clHeapRealloc(tmp_ptr, tmp_size); if (NULL == tmp_ptr) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Failed to allocate memory for Instance Table")); *rc = CL_OM_SET_RC(CL_OM_ERR_NO_MEMORY); return (NULL); } pTab->pInstTab = tmp_ptr; goto Reallocate; } clLogTrace("OMC", "OBC", "Allocating the index [%u] in the OM class [0x%x] buffer.", instIdx, classId); /* Check if we have room for the contiguous instance available to * allocate the object instances requested by the user. * NOTE: We could enhance this later to allow dis-contiguous slots */ for (idx = instIdx; idx < (instIdx + numInstances); idx++) { if (mGET_REAL_ADDR(pInst[idx])) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Unable to fit requested num instances")); *rc = CL_OM_SET_RC(CL_OM_ERR_INSTANCE_TAB_NOSLOTS); return (NULL); } } /* Allocate the memory for the object instances */ if (flag == CL_OM_CREATE_FLAG) { pObjPtr = (char*)clHeapAllocate(instBlkLen); if(NULL == pObjPtr) { /* * TODO: To check if we have to go a free the instances * that were allocated when (numInstances > 1) req. */ #if (CW_PROFILER == YES) /* TODO: lockId = osLock(); */ /* TODO: pOM->perfStat.memoryAllocFail++; */ /* TODO: osUnLock(lockId); */ #endif CL_DEBUG_PRINT(CL_DEBUG_CRITICAL, ("unable to allocate memory from heap!!")); CL_FUNC_EXIT(); *rc = CL_ERR_NO_MEMORY; return (NULL); } /* Reset object block contents to 0 */ memset(pObjPtr, 0, instBlkLen); tmpPtr = pObjPtr; } else if (flag == CL_OM_ADD_FLAG) { tmpPtr = pExtObj; } else { CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Unknown flag argument passed")); *rc = CL_ERR_INVALID_PARAMETER; return (NULL); } /* Now, add it to the instance table */ for (idx = instIdx; idx < (instIdx + numInstances); idx++) { /* * Cautionary check, if the address is *NOT* aligned on a four * byte boundry */ if ((ClWordT)tmpPtr & INST_BITS_MASK) { CL_DEBUG_PRINT(CL_DEBUG_CRITICAL, ("Allocated buffer not on word aligned boundry")); *rc = CL_OM_SET_RC(CL_OM_ERR_ALOC_BUF_NOT_ALIGNED); return (NULL); } /* Start adding the object to the instance table */ ((struct CL_OM_BASE_CLASS *)tmpPtr)->__objType = CL_OM_FORM_HANDLE(classId, instIdx); /* TODO: lockId = osLock(); */ if (flag == CL_OM_CREATE_FLAG) pInst[idx] = (ClUint32T *)mSET_ALLOC_BY_OM(tmpPtr); else pInst[idx] = (ClUint32T *)tmpPtr; pTab->curObjCount++; #if (CW_PROFILER == YES) /* pOM->perfStat.objectCreated++; */ #endif /* TODO: osUnLock(lockId); */ /* Now, start calling the initializer method for the class hierarchy */ *rc = omInitObjHierarchy(pTab, classId, (void *)tmpPtr, pUsrData, usrDataLen); tmpPtr += pTab->size; } /* return the handle argument */ *handle = CL_OM_FORM_HANDLE(classId, instIdx); CL_FUNC_EXIT(); if (flag == CL_OM_CREATE_FLAG) return((void *)pObjPtr); else return(NULL); }
/** * [Internal] Transition function * * Transition from current state 'curr' using Transition object (tO) * to the 'next' state. * * @param smThis state machine object handle * @param tO transition object handle * @param curr current state handle * @param next next state handle * @param msg event message * * @returns * */ static ClRcT _transition(ClSmInstancePtrT smThis, ClSmTransitionPtrT tO, ClSmStatePtrT curr, ClSmStatePtrT next, ClSmEventPtrT msg) { ClSmStatePtrT nextParents[MAX_DEPTH]; ClSmStatePtrT tmp=curr; int lvls=-1; ClSmStatePtrT pptr = 0; ClRcT retCode = 0; memset(nextParents, 0, sizeof(ClSmStatePtrT)*MAX_DEPTH); if(tO) { lvls =(int)_findLCA(smThis,curr,next); clLogTrace(HSM_LOG_AREA,CL_LOG_CONTEXT_UNSPECIFIED,"HSM Transition from %d to %d", curr->type, next->type); /* exit till LCA */ for(;tmp && lvls>-1;lvls--) { if(tmp->exit) { retCode = (*tmp->exit)(curr, &next, msg); } tmp = tmp->parent; } pptr = tmp; smThis->current = next; /* run the transition */ if(tO->transitionHandler) { ClSmStatePtrT forced = next; retCode = (*tO->transitionHandler)(curr, &forced, msg); /* do something for conditional transition here */ } /* entry from LCA to start */ lvls=0; tmp = next; while(tmp && tmp!=pptr) { nextParents[lvls++] = tmp; tmp = tmp->parent; } smThis->current = tO->nextState; /* now run thru the entry states */ for(;lvls>-1;lvls--) { tmp = nextParents[lvls]; if(tmp && tmp->entry) { retCode = (*tmp->entry)(curr, &smThis->current, msg); } } /* till its composite state, then set to the init state */ tmp = next->init; while(tmp) { /* fire the entry */ if(tmp->entry) { retCode = (*tmp->entry) (curr, &smThis->current, msg); } smThis->current = tmp; tmp=tmp->init; } } /* note: take care of retCode - if failed !! */ return retCode; }
/** * Event Handling Function. * * Removes the first event from the q and processes it. Apart * from the event handling thats done at the SMType (Simple * State machine), this API also handles like if this state * machine instance is locked / not. Also, checks if its in * history state and if so, then returns back to the previous * state. * * @param smThis Instance Object * * @returns * CL_OK on CL_OK <br/> * SM_ERR_NO_EVENT if there are no events in the q <br/> * SM_ERR_LOCKED if the instance is locked <br/> * SM_ERR_PAUSED if the instance is paused <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance handle <br/> * * @see #clEsmInstanceProcessEvents */ ClRcT clEsmInstanceProcessEvent(ClExSmInstancePtrT smThis) { ClRcT ret = CL_OK; ClSmEventPtrT msg=0; ClSmLogInfoPtrT logBuf; CL_FUNC_ENTER(); CL_ASSERT(smThis); if(smThis && smThis->fsm && smThis->fsm->sm && smThis->fsm->current) { ClUint32T sz; if(ESM_LOCK(smThis)!=CL_OK) { ret = SM_ERR_LOCKED; CL_FUNC_EXIT(); return ret; } SMQ_SIZE(smThis->q, sz); if(sz == 0) { ret = SM_ERR_NO_EVENT; } else if(ESM_IS_PAUSED(smThis)) { ret = SM_ERR_PAUSED; } else if (ESM_IS_BUSY(smThis)) { ret = SM_ERR_BUSY; } else { ClSmQueueItemPtrT item; ClRcT rc; /* dequeue the message */ rc = SMQ_DEQUEUE(smThis->q, item); if(rc!=CL_OK || !item) { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } else { ClSmStatePtrT history = smThis->fsm->current; ClSmTransitionPtrT trans = 0; ESM_SET_BUSY_STATE(smThis); ESM_UNLOCK(smThis); msg = &item->event; /* if its in history state, then * need to take care of the next state * (use previous state, if not configured) */ if(history->type == ESM_HISTORY_STATE) { if(history->maxEventTransitions >(ClUint16T) msg->eventId && msg->eventId >= 0 && history->eventTransitionTable[msg->eventId].transition) { trans = history->eventTransitionTable[msg->eventId].transition; if(!trans->nextState) { /* This is not such a good idea, what happens another * instance uses the type, it will reject it as if though * there is a predefined next state and will not go to * history state */ trans->nextState = smThis->previous; clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_EVENT,"History State Set as Next State!"); } else { trans = 0; } } else { clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_EVENT,"Unknown Event in History State"); } } #ifdef DEBUG clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_EVENT,"StateMachine [%s] OnEvent [%d]", smThis->fsm->name, msg->eventId); #else clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_EVENT,"OnEvent %d", msg->eventId); #endif ret = clSmInstanceOnEvent(smThis->fsm, msg); if(ret == CL_OK) { /* update the history state */ smThis->previous = history; /* record log */ logBuf = ESM_LOG_BUF(smThis->log); logBuf->eventId = msg->eventId; logBuf->from = smThis->previous; logBuf->to = smThis->fsm->current; ESM_LOG_IDX_INCR(smThis->log); } /* restore the original state machine, if history * state is set */ if(trans) { trans->nextState = 0; } /* free the dequeued item */ mFREE(item); ESM_LOCK(smThis); ESM_SET_IDL_STATE(smThis); } } ESM_UNLOCK(smThis); } else { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } CL_FUNC_EXIT(); return ret; }
ClRcT clHandleCheckout( ClHandleDatabaseHandleT databaseHandle, ClHandleT handleArg, void **instance) { ClRcT rc = CL_OK; ClHdlDatabaseT *hdbp = (ClHdlDatabaseT*)databaseHandle; ClHdlStateT state = HANDLE_STATE_EMPTY; ClRcT ec = CL_OK; ClHandleT handle; hdlDbValidityChk(hdbp); /* sometimes people want to create the same handle across multiple nodes hdlValidityChk(handle,hdbp); */ handle = CL_HDL_IDX(handleArg); /* once we've verified it, we only care about the index */ nullChkRet(instance); /* * Decrementing handle to ensure the non-zero handle interface. */ if (CL_HANDLE_INVALID_VALUE == handle--) { clDbgCodeError(CL_HANDLE_RC(CL_ERR_INVALID_HANDLE), ("Passed Invalid Handle [0x0]")); return CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); /* 0 no longer allowed */ } ec = pthread_mutex_lock (&hdbp->mutex); if (ec != 0) { return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); } if (handle >= (ClHandleT)hdbp->n_handles) { rc = CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); pthread_mutex_unlock(&hdbp->mutex); clDbgCodeError(rc, ("Passed Invalid Handle [%p:%#llx]", (ClPtrT) hdbp, handleArg)); return rc; } if ( ( state = hdbp->handles[handle].state ) != HANDLE_STATE_USED) { pthread_mutex_unlock(&hdbp->mutex); if (state == HANDLE_STATE_EMPTY) { /* In some of our ASP components the assumption made, * like checkout handle returns CL_ERR_INVALID_HANDLE * to verify the handle does exist or not. * so removing the debug pause */ #if 0 clDbgCodeError(rc, ("Handle [%p:%#llX] is not allocated", (ClPtrT) hdbp, (handle+1))); #endif } else if (state == HANDLE_STATE_PENDINGREMOVAL) { clDbgCodeError(rc, ("Handle [%p:%#llX] is being removed", (ClPtrT) hdbp, handleArg)); } else { clDbgCodeError(rc, ("Handle [%p:%#llX] invalid state %d", (ClPtrT) hdbp, handleArg, state)); } rc = CL_HANDLE_RC(CL_ERR_INVALID_HANDLE); clDbgCodeError(rc, ("Handle [%p:%#llX] is invalid", (ClPtrT) hdbp, handleArg)); return rc; } *instance = hdbp->handles[handle].instance; hdbp->handles[handle].ref_count += 1; ec = pthread_mutex_unlock (&hdbp->mutex); if (ec != 0) { clDbgCodeError(CL_HANDLE_RC(CL_ERR_MUTEX_ERROR), ("Mutex unlock failed errno %d", errno)); return CL_HANDLE_RC(CL_ERR_MUTEX_ERROR); /* This can be devastating */ } #if 0 clLogTrace(CL_HDL_AREA, CL_HDL_CTX_CHECKOUT, "Checked out handle [%p:%#llX]", (ClPtrT) hdbp, handleArg); #endif return rc; }
/** * Delete Extended State machine Instance. * * API to delete a previously created State macine Instance. Also * frees up the events that are in the Q. * * @param smThis Extended State machine Instance to be deleted * * @returns * CL_OK on CL_OK <br/> * CL_SM_RC(CL_ERR_NULL_POINTER) on invalid/null instance handle <br/> * * @see #clEsmInstanceCreate * */ ClRcT clEsmInstanceDelete(ClExSmInstancePtrT smThis ) { ClRcT ret = CL_OK; CL_FUNC_ENTER(); CL_ASSERT(smThis); clLogTrace(ESM_LOG_AREA,ESM_LOG_CTX_DELETE,"Delete Extended State Machine Instance"); if(smThis) { ClUint32T sz = 0; if(ESM_LOCK(smThis)!=CL_OK) { ret = SM_ERR_LOCKED; CL_FUNC_EXIT(); return ret; } /* free the fsm first */ ret = clSmInstanceDelete(smThis->fsm); SMQ_SIZE(smThis->q, sz); /* Check if the queue is empty, if not, dequeue and delete them */ if(sz > 0) { ClSmQueueItemPtrT item; ClRcT rc; rc = SMQ_DEQUEUE(smThis->q, item); while(rc==CL_OK && item) { mFREE(item); rc = SMQ_DEQUEUE(smThis->q, item); } clLogInfo(ESM_LOG_AREA,ESM_LOG_CTX_DELETE,"***Delete: Events are present in Q! Dropped to floor!!! ***"); } /* delete the queue */ clQueueDelete(&smThis->q); /* free the history buffer */ mFREE(smThis->log.buffer); /* unlock it before, so we can delete the mutex */ ESM_UNLOCK(smThis); /* delete the mutex */ clOsalMutexDelete(smThis->lock); /* free the object */ mFREE(smThis); } else { ret = CL_SM_RC(CL_ERR_NULL_POINTER); } CL_FUNC_EXIT(); return ret; }
ClRcT VDECL(_corObjectWalkOp) (ClEoDataT cData, ClBufferHandleT inMsgHandle, ClBufferHandleT outMsgHandle) { ClRcT rc = CL_OK; corObjFlagNWalkInfoT* pData = NULL; CL_FUNC_ENTER(); if(gCorInitStage == CL_COR_INIT_INCOMPLETE) { clLogError("OBW", "EOF", "The COR server Initialization is in progress...."); return CL_COR_SET_RC(CL_COR_ERR_TRY_AGAIN); } pData = clHeapAllocate(sizeof(corObjFlagNWalkInfoT)); if(!pData) { clLogWrite(CL_LOG_HANDLE_APP, CL_LOG_DEBUG, gCorClientLibName, CL_LOG_MESSAGE_0_MEMORY_ALLOCATION_FAILED); CL_DEBUG_PRINT(CL_DEBUG_ERROR,(CL_COR_ERR_STR(CL_COR_ERR_NO_MEM))); return (CL_COR_SET_RC(CL_COR_ERR_NO_MEM)); } if((rc = VDECL_VER(clXdrUnmarshallcorObjFlagNWalkInfoT, 4, 0, 0)(inMsgHandle, (void *)pData)) != CL_OK) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Failed to Unmarshall corObjFlagNWalkInfoT")); clHeapFree(pData); return rc; } clCorClientToServerVersionValidate(pData->version, rc); if(rc != CL_OK) { clHeapFree(pData); return CL_COR_SET_RC(CL_COR_ERR_VERSION_UNSUPPORTED); } switch(pData->operation) { case COR_OBJ_WALK_DATA_GET: clOsalMutexLock(gCorMutexes.gCorServerMutex); #if 0 objHdlCount = 0; rc = _corObjectCountGet(&iCount); pObjHdlList = (char *) clHeapAllocate(iCount*sizeof(ClCorObjectHandleT)); if(pObjHdlList == NULL) { clHeapFree(pData); clOsalMutexUnlock(gCorMutexes.gCorServerMutex); clLogWrite(CL_LOG_HANDLE_APP, CL_LOG_DEBUG, NULL, CL_LOG_MESSAGE_0_MEMORY_ALLOCATION_FAILED); CL_DEBUG_PRINT(CL_DEBUG_ERROR,(CL_COR_ERR_STR(CL_COR_ERR_NO_MEM))); return CL_COR_SET_RC(CL_COR_ERR_NO_MEM); } clLogTrace("OBW", "EFN", "Going for the object walk now"); #endif rc = _clCorObjectWalk(&pData->moId, &pData->moIdWithWC, _corObjHdlListGet, pData->flags, outMsgHandle); if (CL_OK != rc) { clLogError("OBW", "EFN", "Failed to do the object walk on server. rc[0x%x]", rc); } #if 0 else { rc = clBufferNBytesWrite(outMsgHandle, (ClUint8T *)pObjHdlList, (ClUint32T)objHdlCount * sizeof(ClCorObjectHandleT)); if (CL_OK != rc) clLogError("OBW", "EFN", "Failed to write the object walk information into the out buffer. rc[0x%x]", rc); } clLogTrace("OBW", "EFN", "Done with the object walk"); clHeapFree(pObjHdlList); #endif clOsalMutexUnlock(gCorMutexes.gCorServerMutex); break; case COR_OBJ_SUBTREE_DELETE: clOsalMutexLock(gCorMutexes.gCorServerMutex); rc = _clCorSubTreeDelete(pData->moId); clOsalMutexUnlock(gCorMutexes.gCorServerMutex); break; default: CL_DEBUG_PRINT(CL_DEBUG_ERROR, ( "INVALID OPERATION, rc = %x", rc) ); rc = CL_COR_SET_RC(CL_COR_ERR_INVALID_PARAM); break; } CL_FUNC_EXIT(); clHeapFree(pData); return rc; }