ClRcT clDebugPrintExtended(ClCharT **retstr, ClInt32T *maxBytes, ClInt32T *curBytes, const ClCharT *format, ...) { va_list ap; ClInt32T len; ClCharT c; if(!retstr || !maxBytes || !curBytes) return CL_DEBUG_RC(CL_ERR_INVALID_PARAMETER); va_start(ap, format); len = vsnprintf(&c, 1, format, ap); va_end(ap); if(!len) return CL_OK; ++len; if(!*maxBytes) *maxBytes = CL_MAX(512, len<<1); if(!*retstr || (*curBytes + len) >= *maxBytes) { if(!*retstr) *curBytes = 0; *maxBytes *= ( *curBytes ? 2 : 1 ); if(*curBytes + len >= *maxBytes) *maxBytes += (len<<1); *retstr = clHeapRealloc(*retstr, *maxBytes); CL_ASSERT(*retstr != NULL); } va_start(ap, format); *curBytes += vsnprintf(*retstr + *curBytes, *maxBytes - *curBytes, format, ap); va_end(ap); return CL_OK; }
/* * clBitmapCreate() */ ClRcT clBitmapCreate(ClBitmapHandleT *phBitmap, ClUint32T bitNum) { ClRcT rc = CL_OK; CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Enter")); if ( NULL == phBitmap) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Null Pointer")); return CL_BITMAP_RC(CL_ERR_NULL_POINTER); } *phBitmap = clHeapRealloc(NULL, sizeof(ClBitmapInfoT)); if( NULL == *phBitmap) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("clHeapRealloc() failed")); return CL_BITMAP_RC(CL_ERR_NO_MEMORY); } rc = clBitmapInfoInitialize(*phBitmap, bitNum); if (rc != CL_OK) { clHeapFree(phBitmap); return rc; } CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Exit")); return rc; }
static ClRcT clBitmapRealloc(ClBitmapInfoT *pBitmapInfo, ClUint32T bitNum) { ClRcT rc = CL_OK; ClUint32T nBytes = 0; ClUint32T nInit = 0; CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Enter: %u", bitNum)); nBytes = bitNum / CL_BM_BITS_IN_BYTE; ++nBytes; /* bitNum is 0-based */ pBitmapInfo->pBitmap = clHeapRealloc(pBitmapInfo->pBitmap, nBytes); if( NULL == pBitmapInfo->pBitmap ) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("clHeapRealloc() failed")); return CL_BITMAP_RC(CL_ERR_NO_MEMORY); } CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Reallocated: %u from %u", nBytes, pBitmapInfo->nBytes)); nInit = nBytes - pBitmapInfo->nBytes; memset(&(pBitmapInfo->pBitmap[pBitmapInfo->nBytes]), 0, nInit); pBitmapInfo->nBytes = nBytes; CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Exit")); return rc; }
ClRcT clEoNotificationCallbackInstall(ClIocPhysicalAddressT compAddr, ClPtrT pFunc, ClPtrT pArg, ClHandleT *pHandle) { ClRcT rc = CL_OK; ClUint32T i = 0; ClEoCallbackRecT **tempDb; if(pFunc == NULL || pHandle == NULL) { return CL_EO_RC(CL_ERR_INVALID_PARAMETER); } rc = clOsalMutexLock(&gpCallbackDb.lock); if(rc != CL_OK) { /* Print an error message here */ return rc; } re_check: for(; i < gpCallbackDb.numRecs; i++) { if(gpCallbackDb.pDb[i] == NULL) { ClEoCallbackRecT *pRec = {0}; pRec =(ClEoCallbackRecT *)clHeapAllocate(sizeof(ClEoCallbackRecT)); pRec->node = compAddr.nodeAddress; pRec->port = compAddr.portId; pRec->pFunc = *((ClCpmNotificationFuncT*)&pFunc); pRec->pArg = pArg; *pHandle = i; gpCallbackDb.pDb[i] = pRec; goto out; } } tempDb = clHeapRealloc(gpCallbackDb.pDb, sizeof(ClEoCallbackRecT *) * gpCallbackDb.numRecs * 2); if(tempDb == NULL) { rc = CL_EO_RC(CL_ERR_NO_MEMORY); goto out; } memset(tempDb + gpCallbackDb.numRecs, 0, sizeof(ClEoCallbackRecT *) * gpCallbackDb.numRecs); gpCallbackDb.pDb = tempDb; gpCallbackDb.numRecs *= 2; goto re_check; out : clOsalMutexUnlock(&gpCallbackDb.lock); return rc; }
ClRcT clSnmpCommit( ClMedErrorListT *pMedErrList) { ClMedOpT opInfo = {0}; ClRcT errorCode = CL_OK; ClUint32T count = 0; clSnmpMutexLock(gOper.mtx); /* Might as well check if the operation is complete before locking */ if(!gOper.cmtd) /* CL_FALSE means not committed */ { opInfo = gOper.opInfo; gOper.cmtd = CL_TRUE; /* Indicates that the request is executed */ clLogDebug("SNP","OPE", "Calling med to execute operation, no of ops[%d]", opInfo.varCount); errorCode = clMedOperationExecute (gSubAgentInfo.medHdl, &opInfo); if(CL_OK != errorCode) { clLogError("SNM","OPE", "Failed to execute operation, rc=[0x%x]", errorCode); for(count = 0; count < opInfo.varCount; count++) { if(opInfo.varInfo[count].errId != CL_OK) { clLogError("SNM","OPE", "Setting errorCode [0x%x] to OID [%s]", opInfo.varInfo[count].errId, opInfo.varInfo[count].attrId.id); /*pErrCode = opInfo.varInfo[count].errId;*/ /* Construct error list here */ gOper.medErrList.pErrorList = (ClMedErrorIdT *)clHeapRealloc(gOper.medErrList.pErrorList, (gOper.medErrList.count + 1)*sizeof(ClMedErrorIdT)); if(!gOper.medErrList.pErrorList) { errorCode = CL_ERR_NO_MEMORY; goto exitOnError; } gOper.medErrList.pErrorList[gOper.medErrList.count].errId = opInfo.varInfo[count].errId; gOper.medErrList.pErrorList[gOper.medErrList.count].oidInfo.id = (ClUint8T*)clHeapAllocate(opInfo.varInfo[count].attrId.len); if(!gOper.medErrList.pErrorList[gOper.medErrList.count].oidInfo.id) { errorCode = CL_ERR_NO_MEMORY; goto exitOnError; } memcpy(gOper.medErrList.pErrorList[gOper.medErrList.count].oidInfo.id, opInfo.varInfo[count].attrId.id, opInfo.varInfo[count].attrId.len); gOper.medErrList.pErrorList[gOper.medErrList.count].oidInfo.len = opInfo.varInfo[count].attrId.len; gOper.medErrList.count++; } } *pMedErrList = gOper.medErrList; /* Copy the error list, this is freed at clSnmpUndo() */ } #if 0 for(arrIndex = 0; arrIndex < opInfo.varCount; arrIndex++) { clHeapFree(opInfo.varInfo[arrIndex].pVal); clHeapFree(opInfo.varInfo[arrIndex].pInst); } gOper.opInfo.varCount = 0; clHeapFree (gOper.opInfo.varInfo); gOper.opInfo.varInfo = NULL; /* Next operation would see this and return */ #endif } else if (gOper.medErrList.count) { clLogDebug("SNM","OPE", "Already committed, return error list"); *pMedErrList = gOper.medErrList; /* Copy the error list, this is freed at clSnmpUndo() */ errorCode = CL_SNMP_ERR_GENERR; /* Return a general error, this is not set to the varbin var */ } exitOnError: clSnmpMutexUnlock(gOper.mtx); return errorCode; /* Return error code */ }
/* ClUint32T clSetTableAttr( ClSnmpReqInfoT* reqInfo, void* data, ClInt32T *pOpNum, ClInt32T *pErrCode) { ClMedOpT opInfo; static ClMedVarBindT *tempVarInfo = NULL; static ClInt32T arrIndex = 0; ClInt32T count = 0; ClInt32T errorCode = CL_OK; if(NULL == tempVarInfo) { tempVarInfo = (ClMedVarBindT *) clHeapCalloc(1,*pOpNum * sizeof (ClMedVarBindT)); if(NULL == tempVarInfo) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Could not allocate memory. Bulk set for %d objects", *pOpNum) ); return CL_SNMP_ERR_NOCREATION; } } opInfo.varInfo = tempVarInfo; if(arrIndex < *pOpNum) { opInfo.varInfo[arrIndex].errId = CL_OK; opInfo.varInfo[arrIndex].attrId.len = (ClUint32T)strlen(reqInfo->oid) + 1; opInfo.varInfo[arrIndex].attrId.id = (ClUint8T *)clHeapCalloc(1,opInfo.varInfo[arrIndex].attrId.len); if(NULL == opInfo.varInfo[arrIndex].attrId.id) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Could not allocate memory. Size asked for %d", opInfo.varInfo[arrIndex].attrId.len) ); return CL_SNMP_ERR_NOCREATION; } memcpy(opInfo.varInfo[arrIndex].attrId.id, reqInfo->oid, opInfo.varInfo[arrIndex].attrId.len); opInfo.varInfo[arrIndex].len = reqInfo->dataLen; opInfo.varInfo[arrIndex].pVal = clHeapCalloc(1,reqInfo->dataLen); if(NULL == opInfo.varInfo[arrIndex].pVal) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Could not allocate memory. Size asked for %d",reqInfo->dataLen) ); return CL_SNMP_ERR_NOCREATION; } memcpy(opInfo.varInfo[arrIndex].pVal, data, reqInfo->dataLen); opInfo.varInfo[arrIndex].pInst = (ClSnmpReqInfoT *)clHeapCalloc(1,sizeof(ClSnmpReqInfoT) ); if(NULL == opInfo.varInfo[arrIndex].pInst) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Could not allocate memory. Size asked for %u", (ClUint32T)sizeof(ClSnmpReqInfoT) ) ); return CL_SNMP_ERR_NOCREATION; } memcpy(opInfo.varInfo[arrIndex].pInst, reqInfo, sizeof(ClSnmpReqInfoT) ); if(arrIndex == (*pOpNum - 1) ) { opInfo.opCode = reqInfo->opCode; opInfo.varCount = *pOpNum; errorCode = clMedOperationExecute (gSubAgentInfo.medHdl, &opInfo); if(CL_OK != errorCode) { for(count = 0; count <= arrIndex; count++) { if(opInfo.varInfo[count].errId != CL_OK) { *pErrCode = opInfo.varInfo[count].errId; *pOpNum = count; break; } } } arrIndex = 0; for(arrIndex = 0; arrIndex < *pOpNum; arrIndex++) { clHeapFree(opInfo.varInfo[arrIndex].pVal); clHeapFree(opInfo.varInfo[arrIndex].pInst); } arrIndex = 0; clHeapFree (tempVarInfo); tempVarInfo = NULL; return errorCode; } arrIndex++; return CL_OK; } return errorCode; } */ ClUint32T clSnmpJobTableAttrAdd( ClSnmpReqInfoT* reqInfo, void* data, ClInt32T *pOpNum, ClInt32T *pErrCode, ClPtrT pReqAddInfo) { ClInt32T arrIndex = 0; ClRcT rc = CL_OK; clSnmpMutexLock(gOper.mtx); gOper.opInfo.varInfo = (ClMedVarBindT *) clHeapRealloc(gOper.opInfo.varInfo, (gOper.opInfo.varCount+1) * sizeof (ClMedVarBindT)); if(NULL == gOper.opInfo.varInfo) { clLogError("SNP","OPE", "Could not allocate memory. Bulk set for [%d] objects", *pOpNum) ; clSnmpMutexUnlock(gOper.mtx); return CL_SNMP_ERR_NOCREATION; } arrIndex = gOper.opInfo.varCount; clLogDebug("SNP","OPE", "Adding request[%d] into store", arrIndex); gOper.opInfo.varInfo[arrIndex].errId = CL_OK; gOper.opInfo.varInfo[arrIndex].attrId.len = (ClUint32T)strlen(reqInfo->oid) + 1; gOper.opInfo.varInfo[arrIndex].attrId.id = (ClUint8T *)clHeapCalloc(1, gOper.opInfo.varInfo[arrIndex].attrId.len); if(NULL == gOper.opInfo.varInfo[arrIndex].attrId.id) { clLogError("SNP","OPE", "Could not allocate memory. Size asked for [%d]", gOper.opInfo.varInfo[arrIndex].attrId.len); clSnmpMutexUnlock(gOper.mtx); return CL_SNMP_ERR_NOCREATION; } memcpy(gOper.opInfo.varInfo[arrIndex].attrId.id, reqInfo->oid, gOper.opInfo.varInfo[arrIndex].attrId.len); gOper.opInfo.varInfo[arrIndex].len = reqInfo->dataLen; gOper.opInfo.varInfo[arrIndex].pVal = clHeapAllocate(reqInfo->dataLen); if(NULL == gOper.opInfo.varInfo[arrIndex].pVal) { clLogError("SNP","OPE", "Could not allocate memory. Size asked for [%d]", reqInfo->dataLen); clSnmpMutexUnlock(gOper.mtx); return CL_SNMP_ERR_NOCREATION; } memset(gOper.opInfo.varInfo[arrIndex].pVal, 0, reqInfo->dataLen); if (data) { memcpy(gOper.opInfo.varInfo[arrIndex].pVal, data, reqInfo->dataLen); } gOper.opInfo.varInfo[arrIndex].pInst = (ClSnmpReqInfoT *)clHeapCalloc(1,sizeof(ClSnmpReqInfoT) ); if(NULL == gOper.opInfo.varInfo[arrIndex].pInst) { clLogError("SNP","OPE", "Could not allocate memory. Size asked for [%u]", (ClUint32T)sizeof(ClSnmpReqInfoT) ); clSnmpMutexUnlock(gOper.mtx); return CL_SNMP_ERR_NOCREATION; } memcpy(gOper.opInfo.varInfo[arrIndex].pInst, reqInfo, sizeof(ClSnmpReqInfoT) ); gOper.opInfo.opCode = reqInfo->opCode; /* This could be a problem if a SET and a CREATE comes */ /* Validate here */ clLogDebug("SNP","OPE", "Validating instance, tableType [%d],oid [%s], oidLen[%d], opCode [%d]", reqInfo->tableType, reqInfo->oid, reqInfo->oidLen, reqInfo->opCode); if(CL_SNMP_CREATE != reqInfo->opCode) /* Dont validate instance for CREATE as it wont be there */ { rc = clMedInstValidate(gSubAgentInfo.medHdl, &gOper.opInfo.varInfo[arrIndex]); if(CL_OK != rc) { clLogError("SNM","OPE", "Failed to validate instance information, rc=[0x%x], errorCode=[0x%x]", rc, gOper.opInfo.varInfo[arrIndex].errId); *pErrCode = gOper.opInfo.varInfo[arrIndex].errId; gErrorHappened = CL_TRUE; } if (reqInfo->opCode == CL_SNMP_GET) { clLogDebug("SNM","OPE", "Adding the get job."); gOper.pOpAddData = (_ClSnmpGetReqInfoT *) clHeapRealloc (gOper.pOpAddData, (arrIndex + 1)* sizeof(_ClSnmpGetReqInfoT)); if (NULL == gOper.pOpAddData) { clLogError("SNP","OPE", "Could not allocate memory for the get info object. "); clSnmpMutexUnlock(gOper.mtx); return CL_SNMP_ERR_NOCREATION; } (((_ClSnmpGetReqInfoT *)gOper.pOpAddData) + arrIndex)->pRequest = ((_ClSnmpGetReqInfoT *)pReqAddInfo)->pRequest; (((_ClSnmpGetReqInfoT *)gOper.pOpAddData) + arrIndex)->columnType = ((_ClSnmpGetReqInfoT *)pReqAddInfo)->columnType; } } arrIndex++; /* Should this be incremented when inst validation failed? */ gOper.opInfo.varCount = arrIndex; gOper.cmtd = CL_FALSE; /* Not committed */ gOper.refCount = arrIndex; /* Ref count to delete this global structure */ clLogDebug("SNP","OPE", "No of operations in store [%d]", gOper.opInfo.varCount); clSnmpMutexUnlock(gOper.mtx); return rc; }
static ClUint32T __differenceVectorGet(ClDifferenceBlockT *block, ClUint8T *data, ClOffsetT offset, ClSizeT dataSize, ClDifferenceVectorT *differenceVector) { ClUint32T i; ClMD5T *md5CurList = block->md5List; ClMD5T *md5List ; ClUint32T md5CurBlocks = block->md5Blocks; ClUint32T md5Blocks = 0; ClUint32T dataBlocks, sectionBlocks ; ClSizeT sectionSize; ClSizeT vectorSize = 0; ClSizeT lastDataSize = dataSize; ClUint8T *pLastData = NULL; ClInt32T lastMatch = -1; ClBoolT doMD5 = CL_FALSE; ClUint32T startBlock, endBlock; ClOffsetT startOffset, nonZeroOffset = 0; sectionSize = offset + dataSize; /* * First align to md5 block size */ dataBlocks = ((dataSize + CL_MD5_BLOCK_MASK) & ~CL_MD5_BLOCK_MASK) >> CL_MD5_BLOCK_SHIFT; sectionBlocks = ((sectionSize + CL_MD5_BLOCK_MASK) & ~CL_MD5_BLOCK_MASK) >> CL_MD5_BLOCK_SHIFT; startOffset = offset & CL_MD5_BLOCK_MASK; startBlock = ( offset & ~CL_MD5_BLOCK_MASK ) >> CL_MD5_BLOCK_SHIFT; /* align the start block*/ endBlock = sectionBlocks; md5Blocks = CL_MAX(sectionBlocks, md5CurBlocks); md5List = (ClMD5T *) clHeapCalloc(md5Blocks, sizeof(*md5List)); CL_ASSERT(md5List != NULL); if(md5CurList) memcpy(md5List, md5CurList, md5CurBlocks); else { md5CurList = md5List; md5CurBlocks = md5Blocks; } /* * If the specified vector blocks don't match the data blocks. * refill. Reset the md5 for offsetted writes considering its cheaper to * just recompute the md5 for this block on a subsequent write to this block */ if(differenceVector && differenceVector->md5Blocks && differenceVector->md5Blocks == dataBlocks) { memcpy(md5List + startBlock, differenceVector->md5List, sizeof(*md5List) * differenceVector->md5Blocks); /* *If data vector already specified, then just update the md5 list and exit. */ if(differenceVector->numDataVectors) { clLogTrace("DIFF", "MD5", "Difference vector already specified with md5 list of size [%d] " "with [%d] difference data vectors", dataBlocks, differenceVector->numDataVectors); goto out_set; } } else doMD5 = CL_TRUE; data += offset; pLastData = data; /* * If we are going to allocate datavectors, free the existing set to be overridden * with a fresh set. */ if(differenceVector && differenceVector->dataVectors) { clHeapFree(differenceVector->dataVectors); differenceVector->dataVectors = NULL; differenceVector->numDataVectors = 0; } nonZeroOffset |= startOffset; for(i = startBlock; i < endBlock; ++i) { ClSizeT c = CL_MIN(CL_MD5_BLOCK_SIZE - startOffset, dataSize); nonZeroOffset &= startOffset; if(doMD5) { if(!startOffset) clMD5Compute(data, c, md5List[i].md5sum); else memset(&md5List[i], 0, sizeof(md5List[i])); } dataSize -= c; data += c; if(startOffset) startOffset = 0; /* * Just gather the new md5 list if there is no vector to be accumulated */ if(!differenceVector) continue; /* * Just gather md5s if we hit the limit for the current data size or if * we didnt have an md5 to start with */ if(md5List == md5CurList) { if(lastMatch < 0) lastMatch = i; continue; } if(i < md5CurBlocks) { /* * Always store offsetted blocks in the difference vector. whose md5 wasnt computed. */ if(!nonZeroOffset && memcmp(md5List[i].md5sum, md5CurList[i].md5sum, sizeof(md5List[i].md5sum)) == 0) { /* * Blocks are the same. Skip the add for this block. */ clLogTrace("DIFF", "MD5", "Skipping copying block [%d] to replica", i); continue; } } else { if(lastMatch < 0) { lastMatch = i; pLastData = data - c; lastDataSize = dataSize + c; } continue; } clLogTrace("DIFF", "MD5", "Copying block [%d] to replica of size [%lld]", i, c); if(!(differenceVector->numDataVectors & 7 ) ) { differenceVector->dataVectors = (ClDataVectorT*) clHeapRealloc(differenceVector->dataVectors, sizeof(*differenceVector->dataVectors) * (differenceVector->numDataVectors + 8)); CL_ASSERT(differenceVector->dataVectors != NULL); memset(differenceVector->dataVectors + differenceVector->numDataVectors, 0, sizeof(*differenceVector->dataVectors) * 8); } differenceVector->dataVectors[differenceVector->numDataVectors].dataBlock = i; /* block mismatched */ differenceVector->dataVectors[differenceVector->numDataVectors].dataBase = data - c; differenceVector->dataVectors[differenceVector->numDataVectors].dataSize = c; ++differenceVector->numDataVectors; vectorSize += c; } CL_ASSERT(dataSize == 0); if(lastMatch >= 0 && differenceVector) /* impossible but coverity killer : Who knows! */ { if(!(differenceVector->numDataVectors & 7)) { differenceVector->dataVectors = (ClDataVectorT*) clHeapRealloc(differenceVector->dataVectors, sizeof(*differenceVector->dataVectors) * (differenceVector->numDataVectors + 8)); CL_ASSERT(differenceVector->dataVectors != NULL); memset(differenceVector->dataVectors + differenceVector->numDataVectors, 0, sizeof(*differenceVector->dataVectors) * 8); } clLogTrace("DIFF", "MD5", "Copying block [%d] to replica of size [%lld]", lastMatch, lastDataSize); differenceVector->dataVectors[differenceVector->numDataVectors].dataBlock = lastMatch; differenceVector->dataVectors[differenceVector->numDataVectors].dataBase = pLastData; differenceVector->dataVectors[differenceVector->numDataVectors].dataSize = lastDataSize; ++differenceVector->numDataVectors; vectorSize += lastDataSize; } if(differenceVector) { clLogTrace("DIFF", "MD5", "Vector has [%lld] bytes to be written. Skipped [%lld] bytes.", vectorSize, sectionSize - vectorSize); } out_set: block->md5List = md5List; block->md5Blocks = md5Blocks; if(doMD5 && differenceVector) { clLogTrace("DIFF", "MD5", "Copying md5 list preloaded with [%d] blocks to the difference vector " "with [%d] data difference vectors", dataBlocks, differenceVector->numDataVectors); if(differenceVector->md5List) clHeapFree(differenceVector->md5List); differenceVector->md5List = (ClMD5T*) clHeapCalloc(dataBlocks, sizeof(*differenceVector->md5List)); CL_ASSERT(differenceVector->md5List != NULL); memcpy(differenceVector->md5List, md5List + startBlock, sizeof(*differenceVector->md5List) * dataBlocks); differenceVector->md5Blocks = dataBlocks; } if(md5CurList != md5List) clHeapFree(md5CurList); return sectionBlocks; }
static ClRcT clLogFlusherRecordsGetMcast(ClLogSvrStreamDataT *pStreamData, ClUint32T nRecords, ClLogFlushRecordT *pFlushRecord) { ClRcT rc = CL_OK; ClLogStreamHeaderT *pHeader = pStreamData->pStreamHeader; ClUint8T *pRecords = pStreamData->pStreamRecords; ClUint32T startIdx = 0; ClUint32T buffLen = 0; ClIocNodeAddressT localAddr = 0; ClUint8T *pBuffer = NULL; ClUint32T firstBatch = 0; ClBoolT doMulticast = CL_FALSE; ClUint32T secondBatch = 0; if ((CL_LOG_STREAM_HEADER_STRUCT_ID != pHeader->struct_id) || (CL_LOG_STREAM_HEADER_UPDATE_COMPLETE != pHeader->update_status)) {/* Stream Header is corrupted so reset Header parameters */ clLogStreamHeaderReset(pHeader); } if(pFlushRecord->multicast < 0 ) { doMulticast = ( (0 < (pStreamData->ackersCount + pStreamData->nonAckersCount)) && (pHeader->streamMcastAddr.iocMulticastAddress != 0) )? CL_TRUE: CL_FALSE; if( (pStreamData->ackersCount + pStreamData->nonAckersCount) == 1 && (pStreamData->fileOwnerAddr == clIocLocalAddressGet()) ) { doMulticast = CL_FALSE; } pFlushRecord->multicast = doMulticast; pFlushRecord->mcastAddr = pHeader->streamMcastAddr; pFlushRecord->ackersCount = pStreamData->ackersCount; } else { doMulticast = (pFlushRecord->multicast ? CL_TRUE : CL_FALSE) ; } localAddr = clIocLocalAddressGet(); if((!doMulticast) && (pStreamData->fileOwnerAddr != localAddr)) { /*Nobody is interested in these records and they are not for me then skip them */ /* clLogDebug("SVR", "FLU", "Nobody is Interested in These records, So skipping them");*/ return rc; } startIdx = pHeader->startAck % pHeader->maxRecordCount; if(nRecords > pHeader->maxRecordCount) nRecords = pHeader->maxRecordCount; CL_ASSERT(pHeader->recordSize < 4*1024); // Sanity check the log record size buffLen = nRecords * pHeader->recordSize; clLogTrace(CL_LOG_AREA_SVR, "FLU", "startIdx: %u maxRec: %u nRecords: %u startIdx: %d recordIdx: %d", startIdx, pHeader->maxRecordCount, nRecords, pHeader->startAck, pHeader->recordIdx); /* FirstBatch is from startIdx towards maxRecordCount and SecondBatch is from 0 to startIdx * SecondBatch is only valid if number of records are greater than (maxRecordCount - startIdx) */ if ( (startIdx + nRecords) <= pHeader->maxRecordCount ) { firstBatch = nRecords; secondBatch = 0; } else { firstBatch = pHeader->maxRecordCount - startIdx; secondBatch = nRecords + startIdx - pHeader->maxRecordCount; } /* Computed firstBatch and secondBatch number of records, now verify and flush them */ pBuffer = pRecords + (startIdx * pHeader->recordSize); pFlushRecord->pBuffs = (ClLogFlushBufferT*) clHeapRealloc(pFlushRecord->pBuffs, (pFlushRecord->numBufs+1)*sizeof(*pFlushRecord->pBuffs)); CL_ASSERT(pFlushRecord->pBuffs != NULL); memset(pFlushRecord->pBuffs+pFlushRecord->numBufs, 0, sizeof(*pFlushRecord->pBuffs)); pFlushRecord->pBuffs[pFlushRecord->numBufs].pRecord = (ClUint8T*) clHeapCalloc(sizeof(ClUint8T), buffLen); CL_ASSERT(pFlushRecord->pBuffs[pFlushRecord->numBufs].pRecord != NULL); pFlushRecord->pBuffs[pFlushRecord->numBufs].numRecords = 0; if (firstBatch) { clLogVerifyAndFlushRecords(pBuffer, pHeader, pFlushRecord, firstBatch); } if (secondBatch) { pBuffer = pRecords; clLogVerifyAndFlushRecords(pBuffer, pHeader, pFlushRecord, secondBatch); } pFlushRecord->numBufs++; CL_LOG_DEBUG_TRACE(("Exit")); return rc; }
static ClRcT clAmsMgmtSGMigrateMPlusN(ClAmsSGRedundancyModelT model, ClAmsEntityT *sgName, const ClCharT *prefix, ClUint32T numActiveSUs, ClUint32T numStandbySUs, ClAmsMgmtMigrateListT *migrateList) { ClUint32T i; ClRcT rc = CL_OK; ClAmsEntityBufferT siBuffer = {0}; ClAmsEntityBufferT suBuffer = {0}; ClAmsEntityBufferT nodeBuffer = {0}; ClInt32T extraSIs = 0; ClInt32T extraSUs = 0; ClInt32T extraNodes = 0; ClAmsEntityT *nodeList = NULL; ClAmsEntityT *nodes = NULL; ClAmsEntityT *sus = NULL; ClAmsEntityT *comps = NULL; ClAmsEntityT *sis = NULL; ClAmsEntityT *csis = NULL; ClInt32T numNodes = 0; ClAmsEntityConfigT *pSURefComp = NULL; ClAmsEntityConfigT *pSGRefSI = NULL; ClAmsEntityConfigT *pSIRefCSI = NULL; ClAmsEntityConfigT *pSGConfig = NULL; ClAmsSGConfigT sgConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; ClUint32T numSupportedCSITypes = 0; SaNameT *pNumSupportedCSITypes = NULL; ClAmsMgmtCCBHandleT ccbHandle = 0; ClAmsMgmtMigrateListT *unlockList = NULL; rc = clAmsMgmtEntityGetConfig(gHandle, sgName, &pSGConfig); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "SG [%.*s] config get returned [%#x]", sgName->name.length-1, sgName->name.value, rc); goto out; } memcpy(&sgConfig, pSGConfig, sizeof(sgConfig)); clHeapFree(pSGConfig); /* * If scaling down actives, ensure that those many service units are locked. */ if(numActiveSUs < sgConfig.numPrefActiveSUs) { ClInt32T numShrinkSUs = sgConfig.numPrefActiveSUs - numActiveSUs; ClAmsEntityBufferT suList = {0}; ClInt32T numOutOfServiceSUs = 0; rc = clAmsMgmtGetSGSUList(gHandle, sgName, &suList); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "SG [%.*s] su list returned [%#x]", sgName->name.length-1, sgName->name.value, rc); goto out; } for(i = 0; i < suList.count; ++i) { ClAmsSUConfigT *pSUConfig = NULL; rc = clAmsMgmtEntityGetConfig(gHandle, suList.entity+i, (ClAmsEntityConfigT**)&pSUConfig); if(rc != CL_OK) { clHeapFree(suList.entity); clLogError("AMS", "MIGRATE", "SU [%.*s] get config returned [%#x]", suList.entity[i].name.length-1, suList.entity[i].name.value, rc); goto out; } if(pSUConfig->adminState == CL_AMS_ADMIN_STATE_LOCKED_A || pSUConfig->adminState == CL_AMS_ADMIN_STATE_LOCKED_I) { ++numOutOfServiceSUs; } clHeapFree(pSUConfig); } clHeapFree(suList.entity); if(numOutOfServiceSUs < numShrinkSUs) { clLogError("AMS", "MIGRATE", "Expected a minimum of [%d] SUs to be out of service to satisfy SG. " "redundancy model shrink. Got [%d] out of service", numShrinkSUs, numOutOfServiceSUs); rc = CL_AMS_RC(CL_AMS_ERR_INVALID_ENTITY_STATE); goto out; } } rc = clAmsMgmtSGRedundancyModelEstimate(model, sgName, numActiveSUs, numStandbySUs, &extraSIs, &extraSUs, &extraNodes); if(rc != CL_OK) { goto out; } rc = clAmsMgmtCCBInitialize(gHandle, &ccbHandle); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS ccb initialize returned [%#x]", rc); goto out; } /* * Add the existing SI CSI list to the supported list. */ rc = clAmsMgmtGetSGSIList(gHandle, sgName, &siBuffer); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS sg si list returned [%#x]", rc); goto out; } if(siBuffer.count) { rc = clAmsMgmtEntityGetConfig(gHandle, siBuffer.entity, &pSGRefSI); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS reference si get config returned [%#x]", rc); goto out_free; } } for(i = 0; i < siBuffer.count; ++i) { ClUint32T j; ClAmsEntityBufferT csiBuffer = {CL_AMS_ENTITY_TYPE_ENTITY}; ClAmsSIConfigT siConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; ClUint64T mask = 0; memcpy(&siConfig.entity, siBuffer.entity+i, sizeof(siConfig.entity)); mask |= SI_CONFIG_NUM_STANDBY_ASSIGNMENTS; siConfig.numStandbyAssignments = numStandbySUs; if(numActiveSUs > 1) siConfig.numStandbyAssignments = (numStandbySUs+1)&~1; siConfig.numStandbyAssignments = CL_MAX(1, siConfig.numStandbyAssignments/ (numActiveSUs?numActiveSUs:1)); /* * Update the num standby assignments. */ rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &siConfig.entity, mask); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "SI [%.*s] num standby set returned [%#x]", siConfig.entity.name.length-1, siConfig.entity.name.value, rc); } rc = clAmsMgmtGetSICSIList(gHandle, siBuffer.entity+i, &csiBuffer); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS get si csi list returned [%#x]", rc); goto out_free; } pNumSupportedCSITypes = (SaNameT*) clHeapRealloc(pNumSupportedCSITypes, (numSupportedCSITypes+csiBuffer.count)*sizeof(SaNameT)); for(j = 0; j < csiBuffer.count ; ++j) { ClAmsEntityConfigT *entityConfig = NULL; ClAmsCSIConfigT csiConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; ClUint32T k; rc = clAmsMgmtEntityGetConfig(gHandle, csiBuffer.entity+j, &entityConfig); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS csi get config returned [%#x]", rc); goto out_free; } memcpy(&csiConfig, entityConfig, sizeof(csiConfig)); if(!pSIRefCSI) { pSIRefCSI = entityConfig; } else { clHeapFree(entityConfig); } /* * Search for this csi type in the list to see if its * already present */ for(k = 0; k < numSupportedCSITypes; ++k) { if(!memcmp(pNumSupportedCSITypes[k].value, csiConfig.type.value, pNumSupportedCSITypes[k].length)) break; } if(k == numSupportedCSITypes) { memcpy(pNumSupportedCSITypes+numSupportedCSITypes, &csiConfig.type, sizeof(csiConfig.type)); ++numSupportedCSITypes; } } clHeapFree(csiBuffer.entity); } if(extraSIs) { sis = (ClAmsEntityT*) clHeapCalloc(extraSIs, sizeof(ClAmsEntityT)); CL_ASSERT(sis != NULL); csis = (ClAmsEntityT*) clHeapCalloc(extraSIs, sizeof(ClAmsEntityT)); for(i = siBuffer.count; i < siBuffer.count + extraSIs; ++i) { ClAmsEntityT si ={CL_AMS_ENTITY_TYPE_ENTITY}; ClAmsEntityT csi = {CL_AMS_ENTITY_TYPE_ENTITY}; ClUint64T bitMask = 0; ClAmsSIConfigT siConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; ClAmsCSIConfigT csiConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; si.type = CL_AMS_ENTITY_TYPE_SI; snprintf((ClCharT*)si.name.value, sizeof(si.name.value)-1, "%s_%.*s_SI%d", prefix, sgName->name.length-1, (const ClCharT*)sgName->name.value, i); clLogNotice("AMS", "MIGRATE", "Creating SI [%s]", si.name.value); si.name.length = strlen((const ClCharT*)si.name.value)+1; rc = clAmsMgmtCCBEntityCreate(ccbHandle, &si); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS entity create returned [%#x]", rc); goto out_free; } memcpy(&sis[i-siBuffer.count], &si, sizeof(si)); rc = clAmsMgmtCCBSetSGSIList(ccbHandle, sgName, &si); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS set sg silist returned [%#x]", rc); goto out_free; } if(pSGRefSI) { /* * Set config to the base SI. */ bitMask = CL_AMS_CONFIG_ATTR_ALL; memcpy(&siConfig, pSGRefSI, sizeof(siConfig)); memcpy(&siConfig.entity, &si, sizeof(siConfig.entity)); siConfig.numStandbyAssignments = numStandbySUs; if(numActiveSUs > 1 ) siConfig.numStandbyAssignments = (numStandbySUs+1)&~1; siConfig.numStandbyAssignments = CL_MAX(1,siConfig.numStandbyAssignments/ (numActiveSUs?numActiveSUs:1)); siConfig.numCSIs = 1; siConfig.adminState = CL_AMS_ADMIN_STATE_LOCKED_A; rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &siConfig.entity, bitMask); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS entity set config returned [%#x]", rc); goto out_free; } } csi.type = CL_AMS_ENTITY_TYPE_CSI; snprintf((ClCharT*)csi.name.value, sizeof(csi.name.value), "%s_CSI%d", (const ClCharT*)si.name.value, i-siBuffer.count); csi.name.length = strlen((const ClCharT*)csi.name.value)+1; clLogNotice("AMS", "MIGRATE", "Creating CSI [%s]", csi.name.value); rc = clAmsMgmtCCBEntityCreate(ccbHandle, &csi); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS csi create returned [%#x]", rc); goto out_free; } memcpy(&csis[i-siBuffer.count], &csi, sizeof(csi)); rc = clAmsMgmtCCBSetSICSIList(ccbHandle, &si, &csi); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "SET si csi list returned [%#x]", rc); goto out_free; } if(pSIRefCSI) { /* * Load the config. for the base csi type. */ memcpy(&csiConfig, pSIRefCSI, sizeof(csiConfig)); memcpy(&csiConfig.entity, &csi, sizeof(csiConfig.entity)); csiConfig.isProxyCSI = CL_FALSE; memcpy(&csiConfig.type, &csiConfig.entity.name, sizeof(csiConfig.type)); bitMask = CL_AMS_CONFIG_ATTR_ALL; rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &csiConfig.entity, bitMask); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS ref csi set config returned [%#x]", rc); goto out_free; } } /* * Add this to the supported list. */ pNumSupportedCSITypes = (SaNameT*) clHeapRealloc(pNumSupportedCSITypes, (numSupportedCSITypes+1)*sizeof(SaNameT)); CL_ASSERT(pNumSupportedCSITypes != NULL); memcpy(pNumSupportedCSITypes+numSupportedCSITypes, &csi.name, sizeof(SaNameT)); ++numSupportedCSITypes; } } if(extraNodes) { nodes = (ClAmsEntityT*) clHeapCalloc(extraNodes, sizeof(ClAmsEntityT)); CL_ASSERT(nodes != NULL); rc = clAmsMgmtGetNodeList(gHandle, &nodeBuffer); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS get node list returned [%#x]", rc); goto out_free; } for(i = nodeBuffer.count ; i < nodeBuffer.count + extraNodes; ++i) { ClAmsEntityT node = {CL_AMS_ENTITY_TYPE_ENTITY}; node.type = CL_AMS_ENTITY_TYPE_NODE; snprintf((ClCharT*) node.name.value, sizeof(node.name.value), "%s_Node%d", prefix, i); node.name.length = strlen((const ClCharT*) node.name.value) + 1; clLogNotice("AMS", "MIGRATE", "Creating node [%s]", node.name.value); rc = clAmsMgmtCCBEntityCreate(ccbHandle, &node); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS ccb create returned [%#x]", rc); goto out_free; } memcpy(&nodes[i-nodeBuffer.count], &node, sizeof(node)); } } rc = clAmsMgmtGetSGSUList(gHandle, sgName, &suBuffer); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "Get SG su list returned [%#x]", rc); goto out_free; } for(i = 0 ; i < suBuffer.count; ++i) { ClUint32T j; ClAmsEntityBufferT compBuffer= { 0 } ; rc = clAmsMgmtGetSUCompList(gHandle, &suBuffer.entity[i], &compBuffer); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "Get SU comp list returned [%#x]", rc); goto out_free; } /* * Get the first component properties. */ if(!pSURefComp) { rc = clAmsMgmtEntityGetConfig(gHandle, compBuffer.entity, &pSURefComp); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS base comp get config returned [%#x]", rc); goto out_free; } } /* * Update all config. with supported csi types. * and correct comp config whereever appropriate */ for(j = 0; j < compBuffer.count; ++j) { ClAmsEntityConfigT *entityConfig =NULL; ClAmsCompConfigT compConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; ClUint64T bitMask = 0; ClUint32T k ; rc = clAmsMgmtEntityGetConfig(gHandle, compBuffer.entity+j, &entityConfig); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS comp get config returned [%#x]", rc); goto out_free; } memcpy(&compConfig, entityConfig, sizeof(compConfig)); clHeapFree(entityConfig); /* * update supported CSI type incase of SI additions. */ if(extraSIs) { bitMask |= COMP_CONFIG_SUPPORTED_CSI_TYPE; compConfig.pSupportedCSITypes = (SaNameT*) clHeapRealloc(compConfig.pSupportedCSITypes, (compConfig.numSupportedCSITypes + extraSIs)* sizeof(SaNameT)); CL_ASSERT(compConfig.pSupportedCSITypes); for(k = compConfig.numSupportedCSITypes; k < compConfig.numSupportedCSITypes + extraSIs; ++k) { memcpy(compConfig.pSupportedCSITypes+k, &csis[k-compConfig.numSupportedCSITypes].name, sizeof(SaNameT)); } compConfig.numSupportedCSITypes += extraSIs; } bitMask |= COMP_CONFIG_NUM_MAX_STANDBY_CSIS; /* * take active to standby ratio */ compConfig.numMaxStandbyCSIs = numActiveSUs; if(numStandbySUs > 1 ) compConfig.numMaxStandbyCSIs = (numActiveSUs+1)&~1; compConfig.numMaxStandbyCSIs = CL_MAX(1, compConfig.numMaxStandbyCSIs/ (numStandbySUs?numStandbySUs:1)); rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &compConfig.entity, bitMask); clHeapFree(compConfig.pSupportedCSITypes); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS entity set config returned [%#x]", rc); goto out_free; } } clHeapFree(compBuffer.entity); } if(extraSUs) { sus = (ClAmsEntityT*) clHeapCalloc(extraSUs, sizeof(ClAmsEntityT)); CL_ASSERT(sus != NULL); comps = (ClAmsEntityT*) clHeapCalloc(extraSUs, sizeof(ClAmsEntityT)); CL_ASSERT(comps != NULL); nodeList = (ClAmsEntityT*) clHeapCalloc(extraSUs + extraNodes, sizeof(ClAmsEntityT)); CL_ASSERT(nodeList != NULL); rc = clAmsMgmtGetSUFreeNodes(sgName, prefix, extraSUs, extraNodes, nodeList, &numNodes); for(i = suBuffer.count; i < suBuffer.count + extraSUs; ++i) { ClAmsEntityT su = {CL_AMS_ENTITY_TYPE_ENTITY}; ClAmsEntityT comp = {CL_AMS_ENTITY_TYPE_ENTITY}; ClAmsSUConfigT suConfig = { { CL_AMS_ENTITY_TYPE_ENTITY } } ; ClAmsCompConfigT compConfig = {{CL_AMS_ENTITY_TYPE_ENTITY}}; ClUint64T bitMask = 0; su.type = CL_AMS_ENTITY_TYPE_SU; snprintf((ClCharT*)su.name.value, sizeof(su.name.value), "%s_%s_SU%d", prefix, (const ClCharT*)nodeList[i-suBuffer.count].name.value, i); su.name.length = strlen((const ClCharT*)su.name.value)+1; clLogNotice("AMS", "MIGRATE", "Creating SU [%s]", su.name.value); rc = clAmsMgmtCCBEntityCreate(ccbHandle, &su); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "SU create returned [%#x]", rc); goto out_free; } memcpy(&sus[i-suBuffer.count], &su, sizeof(su)); /* * Assign this SU under the parent node and SG */ rc = clAmsMgmtCCBSetNodeSUList(ccbHandle, &nodeList[i-suBuffer.count], &su); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "Node su list set returned [%#x]", rc); goto out_free; } /* * Give the parent SG. for this SU */ rc = clAmsMgmtCCBSetSGSUList(ccbHandle, sgName, &su); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "Set SG su list returned [%#x]", rc); goto out_free; } bitMask = SU_CONFIG_NUM_COMPONENTS; memcpy(&suConfig.entity, &su, sizeof(suConfig.entity)); suConfig.numComponents = 1; rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &suConfig.entity, bitMask); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "SU set config returned [%#x]", rc); goto out_free; } comp.type = CL_AMS_ENTITY_TYPE_COMP; snprintf((ClCharT*) comp.name.value, sizeof(comp.name.value), "%s_Comp%d", su.name.value, i - suBuffer.count); comp.name.length = strlen((const ClCharT*) comp.name.value) + 1; clLogNotice("AMS", "MIGRATE", "Creating component [%s]", comp.name.value); rc = clAmsMgmtCCBEntityCreate(ccbHandle, &comp); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "Comp create returned [%#x]", rc); goto out_free; } memcpy(&comps[i-suBuffer.count], &comp, sizeof(comp)); rc = clAmsMgmtCCBSetSUCompList(ccbHandle, &su, &comp); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS set su comp list returned [%#x]", rc); goto out_free; } if(pSURefComp) { /* * At this stage, we have created the hierarchy. * Set the comp property to the base component type and * add the num supported CSI types to be part of every component * added to the SU. */ bitMask = CL_AMS_CONFIG_ATTR_ALL; memcpy(&compConfig, pSURefComp, sizeof(compConfig)); memcpy(&compConfig.entity, &comp, sizeof(compConfig.entity)); compConfig.numSupportedCSITypes = numSupportedCSITypes; compConfig.pSupportedCSITypes = pNumSupportedCSITypes; memcpy(&compConfig.parentSU.entity, &su, sizeof(compConfig.parentSU.entity)); /* * Distribute the standbys based on the active/standby ratio. */ compConfig.numMaxStandbyCSIs = numActiveSUs; if(numStandbySUs > 1 ) compConfig.numMaxStandbyCSIs = (numActiveSUs+1)&~1; compConfig.numMaxStandbyCSIs = CL_MAX(1, compConfig.numMaxStandbyCSIs/ (numStandbySUs?numStandbySUs:1)); rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &compConfig.entity, bitMask); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS set config returned [%#x]", rc); goto out_free; } } } } /* * At this stage, we are all set to commit. after updating SG config. */ { ClUint64T bitMask = 0; bitMask |= SG_CONFIG_REDUNDANCY_MODEL; sgConfig.redundancyModel = model; sgConfig.numPrefActiveSUs = numActiveSUs; bitMask |= SG_CONFIG_NUM_PREF_ACTIVE_SUS; sgConfig.numPrefStandbySUs = numStandbySUs; bitMask |= SG_CONFIG_NUM_PREF_STANDBY_SUS; if(sgConfig.numPrefInserviceSUs < numActiveSUs + numStandbySUs) { sgConfig.numPrefInserviceSUs = numActiveSUs + numStandbySUs; bitMask |= SG_CONFIG_NUM_PREF_INSERVICE_SUS; } sgConfig.numPrefAssignedSUs = numActiveSUs + numStandbySUs; bitMask |= SG_CONFIG_NUM_PREF_ASSIGNED_SUS; /* * Active standby ratio. */ sgConfig.maxStandbySIsPerSU = numActiveSUs; if(numStandbySUs > 1 ) sgConfig.maxStandbySIsPerSU = (numActiveSUs+1)&~1; sgConfig.maxStandbySIsPerSU = CL_MAX(1,sgConfig.maxStandbySIsPerSU/ (numStandbySUs?numStandbySUs:1)); bitMask |= SG_CONFIG_MAX_STANDBY_SIS_PER_SU; rc = clAmsMgmtCCBEntitySetConfig(ccbHandle, &sgConfig.entity, bitMask); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS sg set config returned [%#x]", rc); goto out_free; } } rc = clAmsMgmtCCBCommit(ccbHandle); if(rc != CL_OK) { clLogError("AMS", "MIGRATE", "AMS database commit returned [%#x]", rc); } /* * Okay, the commit is successful. Now unlock all added entities * except SU so that other attributes could be updated before unlocking * Do that in a separate thread as there could be pending invocations. */ unlockList = (ClAmsMgmtMigrateListT*) clHeapCalloc(1, sizeof(*unlockList)); CL_ASSERT(unlockList != NULL); unlockList->si.count = extraSIs; unlockList->node.count = extraNodes; unlockList->su.count = extraSUs; unlockList->si.entity = (ClAmsEntityT*) clHeapCalloc(extraSIs, sizeof(*sis)); unlockList->node.entity = (ClAmsEntityT*) clHeapCalloc(extraNodes, sizeof(*nodes)); unlockList->su.entity = (ClAmsEntityT*) clHeapCalloc(extraSUs, sizeof(*sus)); CL_ASSERT(unlockList->si.entity && unlockList->node.entity && unlockList->su.entity); memcpy(unlockList->si.entity, sis, sizeof(*sis)*extraSIs); memcpy(unlockList->node.entity, nodes, sizeof(*nodes)*extraNodes); memcpy(unlockList->su.entity, sus, sizeof(*sus) * extraSUs); clOsalTaskCreateDetached("MIGRATE-UNLOCK-THREAD", CL_OSAL_SCHED_OTHER, 0, 0, clAmsMgmtMigrateListUnlock, (void*)unlockList); /* * Return the newly created info. in the migrated list. */ if(migrateList) { if(extraSIs) { migrateList->si.count = extraSIs; migrateList->si.entity = sis; migrateList->csi.count = extraSIs; migrateList->csi.entity = csis; sis = csis = NULL; } if(extraNodes) { migrateList->node.count = extraNodes; migrateList->node.entity = nodes; nodes = NULL; } if(extraSUs) { migrateList->su.count = extraSUs; migrateList->su.entity = sus; migrateList->comp.count = extraSUs; migrateList->comp.entity = comps; sus = comps = NULL; } } out_free: clAmsMgmtCCBFinalize(ccbHandle); if(siBuffer.entity) clHeapFree(siBuffer.entity); if(nodeBuffer.entity) clHeapFree(nodeBuffer.entity); if(suBuffer.entity) clHeapFree(suBuffer.entity); if(nodeList) clHeapFree(nodeList); if(nodes) clHeapFree(nodes); if(sus) clHeapFree(sus); if(comps) clHeapFree(comps); if(sis) clHeapFree(sis); if(csis) clHeapFree(csis); if(pSGRefSI) clHeapFree(pSGRefSI); if(pSIRefCSI) clHeapFree(pSIRefCSI); if(pSURefComp) clHeapFree(pSURefComp); if(pNumSupportedCSITypes) clHeapFree(pNumSupportedCSITypes); out: return rc; }
/** * NOT AN EXTERNAL API. PLEASE DO NOT DOCUMENT. * * This routine creates an object instance in the calling context, and * sets the object type the one specified as the argument. Also, the * object instance is added to the object manager database. */ void * omCommonCreateObj(ClOmClassTypeT classId, ClUint32T numInstances, ClHandleT *handle, void *pExtObj, int flag, ClRcT *rc, void *pUsrData, ClUint32T usrDataLen) { int idx; char *pObjPtr = NULL; ClUint32T **pInst; ClUint32T instIdx = 0; ClOmClassControlBlockT * pTab; char *tmpPtr; ClUint32T instBlkLen = 0; CL_FUNC_ENTER(); if (NULL == rc) { clLogError("OMG", "OMC", "Null value passed for return code"); return (NULL); } if(NULL == handle || ( (flag == CL_OM_ADD_FLAG) && (NULL == pExtObj) ) ) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("NULL handle is passed.")); *rc = CL_OM_SET_RC(CL_OM_ERR_NULL_PTR); return (NULL); } *rc = 0; /* validate the input arguments */ if (omClassTypeValidate(classId) != CL_OK) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Invalid input classId arguments")); *rc = CL_OM_SET_RC(CL_OM_ERR_INVALID_CLASS); return (NULL); } pTab = clOmClassEntryGet(classId); CL_ASSERT(pTab); if (!numInstances) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Invalid input numInstances arguments")); *rc = CL_OM_SET_RC(CL_OM_ERR_INVALID_OBJ_INSTANCE); return (NULL); } /* Get obj memory block length */ instBlkLen = pTab->size * numInstances; Reallocate: /* * Check if the class control structure is initalized with the instance * table. This is done during the initialization of the class table. */ if (!(pInst = pTab->pInstTab)) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Instance table for the class does not exist")); *rc = CL_OM_SET_RC(CL_OM_ERR_INSTANCE_TAB_NOT_EXISTS); return (NULL); } /* Find the first empty slot in the instance table */ *rc = omGetFreeInstSlot(pInst, pTab->maxObjInst, &instIdx); if (CL_GET_ERROR_CODE(*rc) == CL_ERR_NOT_EXIST) { ClUint32T **tmp_ptr = NULL; ClUint32T tmp_size = 0; clLogDebug("OMC", "OBC", "No free slot found in the OM class [0x%x] buffer for this object. " "Reallocating the class buffer size.", classId); /* No free slot found. Need to allocate maInstances number of slots more */ pTab->maxObjInst = pTab->maxObjInst * 2 ; /* Double the size of max instances */ tmp_size = (pTab->maxObjInst * sizeof(ClUint32T *)); tmp_ptr = pTab->pInstTab; tmp_ptr = (ClUint32T **) clHeapRealloc(tmp_ptr, tmp_size); if (NULL == tmp_ptr) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Failed to allocate memory for Instance Table")); *rc = CL_OM_SET_RC(CL_OM_ERR_NO_MEMORY); return (NULL); } pTab->pInstTab = tmp_ptr; goto Reallocate; } clLogTrace("OMC", "OBC", "Allocating the index [%u] in the OM class [0x%x] buffer.", instIdx, classId); /* Check if we have room for the contiguous instance available to * allocate the object instances requested by the user. * NOTE: We could enhance this later to allow dis-contiguous slots */ for (idx = instIdx; idx < (instIdx + numInstances); idx++) { if (mGET_REAL_ADDR(pInst[idx])) { CL_DEBUG_PRINT(CL_DEBUG_ERROR, ("Unable to fit requested num instances")); *rc = CL_OM_SET_RC(CL_OM_ERR_INSTANCE_TAB_NOSLOTS); return (NULL); } } /* Allocate the memory for the object instances */ if (flag == CL_OM_CREATE_FLAG) { pObjPtr = (char*)clHeapAllocate(instBlkLen); if(NULL == pObjPtr) { /* * TODO: To check if we have to go a free the instances * that were allocated when (numInstances > 1) req. */ #if (CW_PROFILER == YES) /* TODO: lockId = osLock(); */ /* TODO: pOM->perfStat.memoryAllocFail++; */ /* TODO: osUnLock(lockId); */ #endif CL_DEBUG_PRINT(CL_DEBUG_CRITICAL, ("unable to allocate memory from heap!!")); CL_FUNC_EXIT(); *rc = CL_ERR_NO_MEMORY; return (NULL); } /* Reset object block contents to 0 */ memset(pObjPtr, 0, instBlkLen); tmpPtr = pObjPtr; } else if (flag == CL_OM_ADD_FLAG) { tmpPtr = pExtObj; } else { CL_DEBUG_PRINT(CL_DEBUG_TRACE, ("Unknown flag argument passed")); *rc = CL_ERR_INVALID_PARAMETER; return (NULL); } /* Now, add it to the instance table */ for (idx = instIdx; idx < (instIdx + numInstances); idx++) { /* * Cautionary check, if the address is *NOT* aligned on a four * byte boundry */ if ((ClWordT)tmpPtr & INST_BITS_MASK) { CL_DEBUG_PRINT(CL_DEBUG_CRITICAL, ("Allocated buffer not on word aligned boundry")); *rc = CL_OM_SET_RC(CL_OM_ERR_ALOC_BUF_NOT_ALIGNED); return (NULL); } /* Start adding the object to the instance table */ ((struct CL_OM_BASE_CLASS *)tmpPtr)->__objType = CL_OM_FORM_HANDLE(classId, instIdx); /* TODO: lockId = osLock(); */ if (flag == CL_OM_CREATE_FLAG) pInst[idx] = (ClUint32T *)mSET_ALLOC_BY_OM(tmpPtr); else pInst[idx] = (ClUint32T *)tmpPtr; pTab->curObjCount++; #if (CW_PROFILER == YES) /* pOM->perfStat.objectCreated++; */ #endif /* TODO: osUnLock(lockId); */ /* Now, start calling the initializer method for the class hierarchy */ *rc = omInitObjHierarchy(pTab, classId, (void *)tmpPtr, pUsrData, usrDataLen); tmpPtr += pTab->size; } /* return the handle argument */ *handle = CL_OM_FORM_HANDLE(classId, instIdx); CL_FUNC_EXIT(); if (flag == CL_OM_CREATE_FLAG) return((void *)pObjPtr); else return(NULL); }
ClRcT clAmsPeSGAssignSUCustom( CL_IN ClAmsSGT *sg ) { ClAmsSIT **scannedSIList = NULL; ClUint32T numScannedSIs = 0; ClUint32T numMaxSIs = 0; AMS_CHECK_SG ( sg ); AMS_FUNC_ENTER ( ("SG [%s]\n",sg->config.entity.name.value) ); /* * Find SU assignments for SIs requiring active assignments */ { ClRcT rc1 = CL_OK; ClRcT rc2 = CL_OK; ClAmsSIT *lastSI = NULL; ClAmsSUT *lastSU = NULL; while ( 1 ) { ClAmsSUT *su = NULL; ClAmsSIT *si=NULL; rc1 = clAmsPeSGFindSIForActiveAssignmentCustom(sg, &si, &su); if ( rc1 != CL_OK ) { break; } clLogInfo("SG", "ASI", "SI [%.*s] needs assignment...", si->config.entity.name.length-1, si->config.entity.name.value); if(!su) { rc2 = clAmsPeSGFindSUForActiveAssignmentCustom(sg, &su, si); if ( rc2 != CL_OK ) { break; } } if( (lastSI == si) && (lastSU == su) ) { AMS_LOG(CL_DEBUG_ERROR, ("Assign active to SG - Current SI and SU same as "\ "last selection. Breaking out of assignment\n")); break; } lastSI = si; lastSU = su; su->status.numWaitAdjustments = 0; AMS_CALL ( clAmsPeSUAssignSI(su, si, CL_AMS_HA_STATE_ACTIVE) ); } if ( (rc1 != CL_OK) && (CL_GET_ERROR_CODE(rc1) != CL_ERR_NOT_EXIST) ) { return rc1; } if ( (rc2 != CL_OK) && (CL_GET_ERROR_CODE(rc2) != CL_ERR_NOT_EXIST) ) { return rc2; } } /* * Find SU assignments for SIs requiring standby assignments */ { ClRcT rc1 = CL_OK; ClRcT rc2 = CL_OK; ClAmsSIT *lastSI = NULL; ClAmsSUT *lastSU = NULL; while ( 1 ) { ClAmsSIT *si = NULL; ClAmsSUT *su = NULL; rc1 = clAmsPeSGFindSIForStandbyAssignmentCustom(sg, &si, &su, scannedSIList, numScannedSIs); if ( rc1 != CL_OK ) { break; } if(!su) { rc2 = clAmsPeSGFindSUForStandbyAssignmentCustom(sg, &su, si); if ( rc2 != CL_OK ) { break; } } if( (lastSI == si) && (lastSU == su) ) { AMS_LOG(CL_DEBUG_ERROR, ("Assign standby to SG - Current SI and SU same as "\ "last selection. Breaking out of assignment step\n")); break; } lastSI = si; lastSU = su; rc2 = clAmsPeSUAssignSI(su, si, CL_AMS_HA_STATE_STANDBY); if(rc2 != CL_OK) { if(CL_GET_ERROR_CODE(rc2) == CL_ERR_DOESNT_EXIST || CL_GET_ERROR_CODE(rc2) == CL_ERR_NOT_EXIST) { /* * We could be encountering fixed slot protection config. * So skip this SI and check for other SIs that could be * assigned as standby */ ClUint32T numSIs = sg->config.siList.numEntities; if(!numSIs) { if(scannedSIList) clHeapFree(scannedSIList); return rc2; } if(numSIs > numMaxSIs) { numMaxSIs = numSIs; scannedSIList = clHeapRealloc(scannedSIList, numSIs * sizeof(*scannedSIList)); CL_ASSERT(scannedSIList != NULL); } scannedSIList[numScannedSIs++] = si; rc2 = CL_OK; continue; } else { if(scannedSIList) clHeapFree(scannedSIList); return rc2; } } } if ( (rc1 != CL_OK) && (CL_GET_ERROR_CODE(rc1) != CL_ERR_NOT_EXIST) ) { if(scannedSIList) clHeapFree(scannedSIList); return rc1; } if ( (rc2 != CL_OK) && (CL_GET_ERROR_CODE(rc2) != CL_ERR_NOT_EXIST) ) { if(scannedSIList) clHeapFree(scannedSIList); return rc2; } } if(scannedSIList) clHeapFree(scannedSIList); return CL_OK; }