c_ulong v_kernelGetTransactionId ( v_kernel _this) { c_ulong id = pa_increment(&_this->transactionCount); if (id == 0) { /* the value '0' is reserved to specify 'no-transaction' */ id = pa_increment(&_this->transactionCount); } return id; }
static void issueLowMemoryWarning( c_voidp arg) { #ifdef DDS_1958_CANNOT_CALL_REGISTERED_FUNC_PTR_FROM_DIFF_PROCESS os_uint32 warningCount; v_handleServer server; server = v_handleServer(arg); /* dds1958: ES: Check if the warning count is 0 at the moment. If so it * means that no warning has been issued. If the value is not 0 however * then we do not need to continue and do not need to do any increment * and safe out on that code in situations where we get the low memory * warning a lot. The idea is that just doing this check (although not * a definate yes or no to doing the warning) is in the cases where * a warning has already been issued much cheaper then doing the * increment and then checking. Only in the situation where the warning * is issued for the first time, is this check useless. But that is only * 1 time vs many times. */ if(server->lowMemWarningCount == 0) { /* increment the warning count */ warningCount = pa_increment(&server->lowMemWarningCount); if(warningCount == 1) { OS_REPORT(OS_WARNING, "issueLowMemoryWarning",0, "Shared memory is running very low!"); } } #endif }
/** \brief Allocate memory from heap * * \b os_malloc calls \b ptr_malloc which is a function pointer * which defaults to \b malloc, but can be redefined via * \b os_heapSetService. */ void * os_malloc ( os_size_t size) { char *ptr; alloc_delta += (os_uint32)size; alloc_cum += (os_uint32)size; alloc_count++; #ifdef OSPL_STRICT_MEM /* Allow 24 bytes so we can store the allocation size, magic number and malloc count, ( and keep alignement ) */ ptr = ptr_malloc((size_t)size+24); if ( ptr != NULL ) { *((size_t *)ptr) = size; ptr += 24; memset(ptr, 0, size); *(((uint64_t*)ptr)-1) = OS_MALLOC_MAGIC_SIG; *(((uint64_t*)ptr)-2) = pa_increment(&alloccnt); } #else ptr = ptr_malloc((size_t)size); #endif return (ptr); }
in_object in_objectKeep( in_object _this) { in_object result; assert(_this); assert(_this->confidence == IN_OBJECT_CONFIDENCE); assert(_this->refCount >= 1); /* Check whether the object is allocated to support a keep on a NULL * pointer. */ if(_this) { pa_increment(&(_this->refCount)); /* if(_this->kind == IN_OBJECT_KIND_PARTICIPANT_FACADE) { printf("Keep %p: %d -> %d\n", _this, _this->refCount-1, _this->refCount); } */ result = _this; } else { result = NULL; } return result; }
static c_bool doAdd( d_kind kind) { c_ulong i; c_long add; add = pa_increment(&maxObjectCount); if(add == 1){ for(i=0; i<D_KINDCOUNT; i++){ typedObjectCount[i] = 0; maxTypedObjectCount[i] = 0; } } pa_increment(&allocationCount); pa_increment(&(typedObjectCount[kind])); pa_increment(&(maxTypedObjectCount[kind])); return TRUE; }
void * os_realloc( void *memblk, os_size_t size) { unsigned char *ptr = (unsigned char *)memblk; #ifdef OSPL_STRICT_MEM size_t origsize = 0; if ( ptr != NULL ) { size_t i; origsize = *((size_t *)(ptr - 24)); assert (*(((uint64_t*)ptr)-1) != OS_FREE_MAGIC_SIG); assert (*(((uint64_t*)ptr)-1) == OS_MALLOC_MAGIC_SIG); *(((uint64_t*)ptr)-1) = OS_FREE_MAGIC_SIG; for ( i = 0; i+7 < origsize; i++ ) { assert( OS_MAGIC_SIG_CHECK( &ptr[i] ) ); } ptr -= 24; } if ( size > 0 ) { size += 24; } #endif ptr = ptr_realloc(ptr, size); #ifdef OSPL_STRICT_MEM if ( size > 0 && ptr != NULL ) { size -= 24; if ( size > origsize ) { memset( ptr + 24 + origsize, 0, size - origsize ); } *((size_t *)ptr) = size; ptr += 24; *(((uint64_t*)ptr)-1) = OS_MALLOC_MAGIC_SIG; *(((uint64_t*)ptr)-2) = pa_increment(&alloccnt); } #endif return (ptr); }
/** \brief OS layer initialization * * \b os_osInit calls: * - \b os_sharedMemoryInit * - \b os_threadInit */ void os_osInit ( void) { os_uint32 initCount; initCount = pa_increment(&_ospl_osInitCount); if(initCount == 1){ os_sharedMemoryInit(); os_threadModuleInit(); } else { OS_REPORT_1(OS_INFO, "os_osInit", 1, "OS-layer initialization called %d times", initCount); } return; }
/** \brief OS layer initialization * * \b os_osInit calls: * - \b os_sharedMemoryInit * - \b os_threadInit */ void os_osInit(void) { os_uint32 initCount; initCount = pa_increment(&_ospl_osInitCount); if (initCount == 1) { os_debugModeInit(); os_timeModuleInit(); os_processModuleInit(); os_sharedMemoryInit(); os_threadModuleInit(); } /* Else initialization is already done. */ return; }
v_kernel v_kernelAttach( c_base base, const c_char *name) { v_kernel kernel = NULL; os_uint32 attachCount; if (name == NULL) { OS_REPORT(OS_ERROR, "v_kernelAttach",0, "Failed to lookup kernel, specified kernel name = <NULL>"); } else { kernel = c_lookup(base,name); if (kernel == NULL) { OS_REPORT_1(OS_ERROR, "v_kernelAttach",0, "Failed to lookup kernel '%s' in Database", name); } else if (c_checkType(kernel,"v_kernel") != kernel) { c_free(kernel); kernel = NULL; OS_REPORT_1(OS_ERROR, "v_kernelAttach",0, "Object '%s' is apparently not of type 'v_kernel'", name); } else { attachCount = pa_increment(&kernel->userCount); if(attachCount == 1){ /* Result of the attach may NEVER be 1, as that would mean that an * attach to an unreferenced kernel succeeded. If it happens, undo * increment and free reference to returned kernel. */ pa_decrement(&kernel->userCount); c_free(kernel); kernel = NULL; OS_REPORT_1(OS_ERROR, "v_kernelAttach",0, "Operation aborted: Object '%s' is apparently an " "unreferenced kernel object.", name); } } } return kernel; }
/** \brief OS layer initialization * * \b os_osInit calls: * - \b os_sharedMemoryInit * - \b os_threadInit */ void os_osInit (void) { os_uint32 initCount; initCount = pa_increment(&_ospl_osInitCount); if (initCount == 1) { os_mutexModuleInit(); os_reportInit(OS_FALSE); /*os_processModuleInit();*/ os_threadModuleInit(); os_sharedMemoryInit(); } else { #ifndef NDEBUG OS_REPORT_1(OS_INFO, "os_osInit", 1, "OS-layer initialization called %d times", initCount); #endif /* NDEBUG */ } return; }
/** \brief OS layer deinitialization * * \b os_osExit calls: * - \b os_sharedMemoryExit * - \b os_threadExit */ void os_osExit ( void) { os_uint32 initCount; initCount = pa_decrement(&_ospl_osInitCount); if(initCount == 0){ os_sharedMemoryExit(); os_threadModuleExit(); } else if ((initCount + 1) < initCount){ /* The 0 boundary is passed, so os_osExit is called more often than * os_osInit. Therefore undo decrement as nothing happened and warn. */ initCount = pa_increment(&_ospl_osInitCount); OS_REPORT(OS_WARNING, "os_osExit", 1, "OS-layer not initialized"); /* Fail in case of DEV, as it is incorrect API usage */ assert(0); } return; }
d_object d_objectKeep( d_object object) { d_object result = NULL; assert(object); assert(object->confidence == D_CONFIDENCE); if(object){ pa_increment(&(object->refCount)); result = object; #if CHECK_REF if (object->kind == CHECK_REF_TYPE) { UT_TRACE("\n\n============ Keep(%p): %d -> %d =============\n", (void*)object, refCount-1, refCount); } #endif } return result; }
u_result u_userInitialise( void) { u_user u; u_result rm = U_RESULT_OK; os_mutexAttr mutexAttr; os_uint32 initCount; void* initUser; os_result osResult; os_signalHandlerExitRequestCallback exitRequestCallback; os_signalHandlerExceptionCallback exceptionCallback; initCount = pa_increment(&_ospl_userInitCount); /* If initCount == 0 then an overflow has occurred. * This can only realistically happen when u_userDetach() * is called more often than u_userInitialize(). */ assert(initCount != 0); os_osInit(); if (initCount == 1) { /* Will start allocating the object, so it should currently be empty. */ assert(user == NULL); /* Use indirection, as user != NULL is a precondition for user-layer * functions, so make sure it only holds true when the user-layer is * initialized. */ initUser = os_malloc(sizeof(C_STRUCT(u_user))); if (initUser == NULL) { /* Initialization failed, so decrement the initialization counter. */ pa_decrement(&_ospl_userInitCount); os_osExit(); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Allocation of user admin failed: out of memory."); rm = U_RESULT_OUT_OF_MEMORY; } else { u = u_user(initUser); os_mutexAttrInit(&mutexAttr); mutexAttr.scopeAttr = OS_SCOPE_PRIVATE; os_mutexInit(&u->mutex,&mutexAttr); osResult = os_signalHandlerNew(); if(osResult != os_resultSuccess) { /* Initialization did not succeed, undo increment and return error */ initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Failed to create the signal handler. No proper signal handling can be performed."); rm = U_RESULT_INTERNAL_ERROR; } else { exitRequestCallback = os_signalHandlerSetExitRequestCallback(u__userExitRequestCallbackWrapper); if(exitRequestCallback && exitRequestCallback != u__userExitRequestCallbackWrapper) { initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Replaced an exit request callback on the signal handler while this was not expected."); rm = U_RESULT_INTERNAL_ERROR; } if(rm == U_RESULT_OK){ exceptionCallback = os_signalHandlerSetExceptionCallback(u__userExceptionCallbackWrapper); if(exceptionCallback && exceptionCallback != u__userExceptionCallbackWrapper) { initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Replaced an exception callback on the signal handler while this was not expected."); rm = U_RESULT_INTERNAL_ERROR; } } if(rm == U_RESULT_OK) { u->domainCount = 0; u->protectCount = 0; u->detachThreadId = OS_THREAD_ID_NONE; /* This will mark the user-layer initialized */ user = initUser; } } } } else { if(user == NULL){ os_time sleep = {0, 100000}; /* 100ms */ /* Another thread is currently initializing the user-layer. Since * user != NULL is a precondition for calls after u_userInitialise(), * a sleep is performed, to ensure that (if succeeded) successive * user-layer calls will also actually pass.*/ os_nanoSleep(sleep); } if(user == NULL){ /* Initialization did not succeed, undo increment and return error */ initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT_1(OS_ERROR,"u_userInitialise",0, "Internal error: User-layer should be initialized " "(initCount = %d), but user == NULL (waited 100ms).", initCount); rm = U_RESULT_INTERNAL_ERROR; } } return rm; }
v_subscriber v_subscriberNew( v_participant p, const c_char *name, v_subscriberQos qos, c_bool enable) { v_kernel kernel; v_subscriber s; v_subscriberQos q; v_entity found; v_accessMode access; kernel = v_objectKernel(p); /* ES, dds1576: If a partition policy was provided then we need to verify * if the partition policy does not contain any partition expressions for * which read access is not allowed. * If read access is not allowed for one of the partitions listed in the * partition policy of the qos, then the subscriber will not be created at * all. */ if(qos && qos->partition) { access = v_kernelPartitionAccessMode(kernel, qos->partition); } else { access = V_ACCESS_MODE_READ_WRITE;/* default */ } if(access == V_ACCESS_MODE_READ_WRITE || access == V_ACCESS_MODE_READ) { q = v_subscriberQosNew(kernel,qos); if (q != NULL) { s = v_subscriber(v_objectNew(kernel,K_SUBSCRIBER)); v_observerInit(v_observer(s),name, NULL, enable); s->qos = q; c_mutexInit(&s->sharesMutex, SHARED_MUTEX); if (q->share.enable) { v_lockShares(kernel); found = v_addShareUnsafe(kernel,v_entity(s)); if (found != v_entity(s)) { /* Make sure to set the partition list to NULL, because * v_publicFree will cause a crash in the v_subscriberDeinit * otherwise. */ s->partitions = NULL; /*v_publicFree to free reference held by the handle server.*/ v_publicFree(v_public(s)); /*Now free the local reference as well.*/ c_free(s); pa_increment(&(v_subscriber(found)->shareCount)); v_unlockShares(kernel); return c_keep(found); } s->shares = c_tableNew(v_kernelType(kernel,K_READER), "qos.share.name"); } else { s->shares = NULL; } s->shareCount = 1; s->partitions = v_partitionAdminNew(kernel); s->readers = c_setNew(v_kernelType(kernel,K_READER)); if (q->share.enable) { s->participant = kernel->builtin->participant; } else { s->participant = p; } c_lockInit(&s->lock,SHARED_LOCK); v_participantAdd(v_participant(s->participant),v_entity(s)); if (q->share.enable) { v_unlockShares(kernel); } if (enable) { v_subscriberEnable(s); } } else { OS_REPORT(OS_ERROR, "v_subscriberNew", 0, "Subscriber not created: inconsistent qos"); s = NULL; } } else { OS_REPORT(OS_ERROR, "v_subscriberNew", 0, "Subscriber not created: Access rights for one of the partitions listed in the partition list was not sufficient (i.e. read or readwrite)."); s = NULL; } return s; }