/* * ======== HeapStd_alloc ======== * This heap uses the 'C' rts malloc call. * * Only support alignment requests that will be honored by malloc. */ Ptr HeapStd_alloc(HeapStd_Object *obj, SizeT size, SizeT align, Error_Block *eb) { Ptr buf; IArg key; /* Make sure the specified alignment is not too large */ Assert_isTrue((align <= Memory_getMaxDefaultTypeAlign()), HeapStd_A_invalidAlignment); /* Determine if there is enough memory */ key = Gate_enterSystem(); if ((SizeT)(obj->remainSize) < size) { Gate_leaveSystem(key); return (NULL); } obj->remainSize -= size; Gate_leaveSystem(key); /* malloc the buffer! */ if ((buf = malloc(size)) == NULL) { /* Undo the size change in case of a failure */ key = Gate_enterSystem(); obj->remainSize += size; Gate_leaveSystem(key); return (NULL); } return (buf); }
/* * ======== ListMP_sharedMemReq ======== */ SizeT ListMP_sharedMemReq(const ListMP_Params *params) { SizeT memReq, minAlign; UInt16 regionId; if (params->sharedAddr == NULL) { regionId = params->regionId; } else { regionId = SharedRegion_getId(params->sharedAddr); } /* Assert that the region is valid */ Assert_isTrue(regionId != SharedRegion_INVALIDREGIONID, ti_sdo_ipc_Ipc_A_addrNotInSharedRegion); minAlign = Memory_getMaxDefaultTypeAlign(); if (SharedRegion_getCacheLineSize(regionId) > minAlign) { minAlign = SharedRegion_getCacheLineSize(regionId); } memReq = _Ipc_roundup(sizeof(ti_sdo_ipc_ListMP_Attrs), minAlign); return (memReq); }
/* * ======== TransportShm_sharedMemReq ======== */ SizeT TransportShm_sharedMemReq(const TransportShm_Params *params) { SizeT memReq, minAlign; UInt16 regionId; ListMP_Params listMPParams; regionId = SharedRegion_getId(params->sharedAddr); minAlign = Memory_getMaxDefaultTypeAlign(); if (SharedRegion_getCacheLineSize(regionId) > minAlign) { minAlign = SharedRegion_getCacheLineSize(regionId); } /* for the Attrs structure */ memReq = _Ipc_roundup(sizeof(TransportShm_Attrs), minAlign); /* for the second Attrs structure */ memReq += _Ipc_roundup(sizeof(TransportShm_Attrs), minAlign); ListMP_Params_init(&listMPParams); listMPParams.regionId = regionId; /* for localListMP */ memReq += ListMP_sharedMemReq(&listMPParams); /* for remoteListMP */ memReq += ListMP_sharedMemReq(&listMPParams); return(memReq); }
/* * ======== GatePetersonN_Instance_init ======== */ Int GatePetersonN_Instance_init(GatePetersonN_Object *obj, IGateProvider_Handle localGate, const GatePetersonN_Params *params, Error_Block *eb) { SizeT offset; SizeT minAlign = Memory_getMaxDefaultTypeAlign(); SizeT i; if (SharedRegion_getCacheLineSize(params->regionId) > minAlign) { minAlign = SharedRegion_getCacheLineSize(params->regionId); } Assert_isTrue(params->sharedAddr != NULL, ti_sdo_ipc_Ipc_A_invParam); Assert_isTrue(GatePetersonN_numInstances != 0, ti_sdo_ipc_Ipc_A_invParam); obj->localGate = localGate; obj->cacheEnabled = SharedRegion_isCacheEnabled(params->regionId); obj->cacheLineSize = SharedRegion_getCacheLineSize(params->regionId); obj->nested = 0; /* This is not cluster aware: * obj->numProcessors = MultiProc_getNumProcessors(); * obj->selfId = MultiProc_self(); */ /* Cluster aware initialization */ obj->numProcessors = MultiProc_getNumProcsInCluster(); /* set selfId to 0-based offset within cluster. */ obj->selfId = MultiProc_self() - MultiProc_getBaseIdOfCluster(); /* Assign shared memory addresses for the protocol state variables */ offset = 0; for (i=0; i < obj->numProcessors; i++) { obj->enteredStage[i] = (Int32 *)((UArg)(params->sharedAddr) + offset); offset += minAlign; } for (i=0; i < obj->numProcessors - 1; i++) { obj->lastProcEnteringStage[i] = (Int32 *)((UArg)(params->sharedAddr) + offset); offset += minAlign; } if (!params->openFlag) { /* Creating. */ obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC; GatePetersonN_postInit(obj); } else { /* Opening. */ obj->objType = ti_sdo_ipc_Ipc_ObjType_OPENDYNAMIC; } return (0); }
/* Allocates the specified number of bytes. */ Ptr Memory_alloc (IHeap_Handle heap, SizeT size, SizeT align, Ptr eb) { Ptr buffer = NULL; GT_4trace (curTrace, GT_ENTER, "Memory_alloc", heap, size, align, eb); /* check whether the right paramaters are passed or not.*/ GT_assert (curTrace, (size > 0)); (Void) eb; /* Not used. */ if (heap == NULL) { /* Call the kernel API for memory allocation */ buffer = MemoryOS_alloc (size, align, 0); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (buffer == NULL) { GT_setFailureReason (curTrace, GT_4CLASS, "Memory_alloc", Memory_E_MEMORY, "Failed to allocate memory!"); } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ } else { /* if align == 0, use default alignment */ if (align == 0) { align = Memory_getMaxDefaultTypeAlign (); } buffer = IHeap_alloc (heap, size, align); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (buffer == NULL) { /*! @retval NULL Heap_alloc failed */ GT_setFailureReason (curTrace, GT_4CLASS, "Memory_alloc", Memory_E_MEMORY, "IHeap_alloc failed!"); } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ } GT_1trace (curTrace, GT_LEAVE, "Memory_alloc", buffer); return buffer; }
/* * ======== GatePetersonN_sharedMemReq ======== */ SizeT GatePetersonN_sharedMemReq(const IGateMPSupport_Params *params) { SizeT memReq; UInt16 numProcessors = MultiProc_getNumProcsInCluster(); /* Cluster aware */ SizeT minAlign = Memory_getMaxDefaultTypeAlign(); if (SharedRegion_getCacheLineSize(params->regionId) > minAlign) { minAlign = SharedRegion_getCacheLineSize(params->regionId); } /* Allocate aligned memory for shared state variables used in protocol * enteredStage[NUM_PROCESSORS] * lastProcEnteringStage[NUM_STAGES] */ memReq = ((2 * numProcessors) - 1) * SharedRegion_getCacheLineSize(params->regionId); return(memReq); }
/* * ======== Memory_alloc ======== * If eb has an error already set, we preserve the error if we can. We * could do better by creating a separate error block for the proxy * allocation, but this would cost more than it's worth; the normal case * would pay a constant time overhead for the rare case of allocation * failing when eb already has an error set. */ Ptr Memory_alloc(IHeap_Handle heap, SizeT size, SizeT align, Error_Block *eb) { Ptr block; Bool prior = Error_check(eb); /* if align == 0, use default alignment */ if (align == 0) { align = Memory_getMaxDefaultTypeAlign(); } /* allocate using a non-NULL appropriate heap */ block = Memory_HeapProxy_alloc(heap ? heap : Memory_defaultHeapInstance, size, align, eb); /* if the allocator returned NULL and either * the error was already set or * it didn't set the error */ if (block == NULL && (prior || !Error_check(eb))) { Error_raise(eb, Error_E_memory, (IArg)heap, (IArg)size); } return (block); }
/* * ======== TransportShm_Instance_init ======== */ Int TransportShm_Instance_init(TransportShm_Object *obj, UInt16 procId, const TransportShm_Params *params, Error_Block *eb) { Int localIndex; Int remoteIndex; Int status; Bool flag; UInt32 minAlign; ListMP_Params listMPParams[2]; Swi_Handle swiHandle; Swi_Params swiParams; Ptr localAddr; swiHandle = TransportShm_Instance_State_swiObj(obj); /* * Determine who gets the '0' slot in shared memory and who gets * the '1' slot. The '0' slot is given to the lower MultiProc id. */ if (MultiProc_self() < procId) { localIndex = 0; remoteIndex = 1; } else { localIndex = 1; remoteIndex = 0; } if (params->openFlag) { /* Open by sharedAddr */ obj->objType = ti_sdo_ipc_Ipc_ObjType_OPENDYNAMIC; obj->self = (TransportShm_Attrs *)params->sharedAddr; obj->regionId = SharedRegion_getId(params->sharedAddr); obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); localAddr = SharedRegion_getPtr(obj->self->gateMPAddr); status = GateMP_openByAddr(localAddr, (GateMP_Handle *)&obj->gate); if (status < 0) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return(1); } } else { /* init the gate for ListMP create below */ if (params->gate != NULL) { obj->gate = params->gate; } else { obj->gate = (ti_sdo_ipc_GateMP_Handle)GateMP_getDefaultRemote(); } /* Creating using sharedAddr */ obj->regionId = SharedRegion_getId(params->sharedAddr); /* Assert that the buffer is in a valid shared region */ Assert_isTrue(obj->regionId != SharedRegion_INVALIDREGIONID, ti_sdo_ipc_Ipc_A_addrNotInSharedRegion); /* Assert that sharedAddr is cache aligned */ Assert_isTrue(((UInt32)params->sharedAddr % SharedRegion_getCacheLineSize(obj->regionId) == 0), ti_sdo_ipc_Ipc_A_addrNotCacheAligned); /* set object's cacheEnabled, type, self */ obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC; obj->self = (TransportShm_Attrs *)params->sharedAddr; } /* determine the minimum alignment to align to */ minAlign = Memory_getMaxDefaultTypeAlign(); if (SharedRegion_getCacheLineSize(obj->regionId) > minAlign) { minAlign = SharedRegion_getCacheLineSize(obj->regionId); } /* * Carve up the shared memory. * If cache is enabled, these need to be on separate cache lines. * This is done with minAlign and _Ipc_roundup function. */ obj->other = (TransportShm_Attrs *)((UInt32)(obj->self) + (_Ipc_roundup(sizeof(TransportShm_Attrs), minAlign))); ListMP_Params_init(&(listMPParams[0])); listMPParams[0].gate = (GateMP_Handle)obj->gate; listMPParams[0].sharedAddr = (UInt32 *)((UInt32)(obj->other) + (_Ipc_roundup(sizeof(TransportShm_Attrs), minAlign))); ListMP_Params_init(&listMPParams[1]); listMPParams[1].gate = (GateMP_Handle)obj->gate; listMPParams[1].sharedAddr = (UInt32 *)((UInt32)(listMPParams[0].sharedAddr) + ListMP_sharedMemReq(&listMPParams[0])); obj->priority = params->priority; obj->remoteProcId = procId; Swi_Params_init(&swiParams); swiParams.arg0 = (UArg)obj; Swi_construct(Swi_struct(swiHandle), (Swi_FuncPtr)TransportShm_swiFxn, &swiParams, eb); if (params->openFlag == FALSE) { obj->localList = (ti_sdo_ipc_ListMP_Handle) ListMP_create(&(listMPParams[localIndex])); if (obj->localList == NULL) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (2); } obj->remoteList = (ti_sdo_ipc_ListMP_Handle) ListMP_create(&(listMPParams[remoteIndex])); if (obj->localList == NULL) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (2); } } else { /* Open the local ListMP instance */ status = ListMP_openByAddr(listMPParams[localIndex].sharedAddr, (ListMP_Handle *)&(obj->localList)); if (status < 0) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (2); } /* Open the remote ListMP instance */ status = ListMP_openByAddr(listMPParams[remoteIndex].sharedAddr, (ListMP_Handle *)&(obj->remoteList)); if (status < 0) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (2); } } /* register the event with Notify */ status = Notify_registerEventSingle( procId, /* remoteProcId */ 0, /* lineId */ TransportShm_notifyEventId, (Notify_FnNotifyCbck)TransportShm_notifyFxn, (UArg)swiHandle); if (status < 0) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (3); } /* Register the transport with MessageQ */ flag = ti_sdo_ipc_MessageQ_registerTransport( TransportShm_Handle_upCast(obj), procId, params->priority); if (flag == FALSE) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (4); } if (params->openFlag == FALSE) { obj->self->creatorProcId = MultiProc_self(); obj->self->notifyEventId = TransportShm_notifyEventId; obj->self->priority = obj->priority; /* Store the GateMP sharedAddr in the Attrs */ obj->self->gateMPAddr = ti_sdo_ipc_GateMP_getSharedAddr(obj->gate); obj->self->flag = TransportShm_UP; if (obj->cacheEnabled) { Cache_wbInv(obj->self, sizeof(TransportShm_Attrs), Cache_Type_ALL, TRUE); } } else { obj->other->flag = TransportShm_UP; if (obj->cacheEnabled) { Cache_wbInv(&(obj->other->flag), minAlign, Cache_Type_ALL, TRUE); } } obj->status = TransportShm_UP; return (0); }