static c_bool doSub( d_kind kind) { pa_decrement(&allocationCount); pa_decrement(&(typedObjectCount[kind])); return TRUE; }
void d_objectFree( d_object object, d_kind kind) { os_uint32 refCount; OS_UNUSED_ARG(kind); assert(d_objectIsValid(object, kind) == TRUE); if(object){ assert(object->confidence == D_CONFIDENCE); assert(object->kind == kind); assert(object->refCount >= 1); refCount = pa_decrement(&(object->refCount)); if(refCount == 0){ if(object->deinit){ object->deinit(object); } object->confidence = D_CONFIDENCE_NULL; object->kind = D_BAD_TYPE; os_free(object); assert(doSub(kind)); } #if CHECK_REF if (kind == CHECK_REF_TYPE) { UT_TRACE("\n\n============ Free(%p): %d -> %d =============\n", (void*)object, refCount+1, refCount); } #endif } }
void in_objectFree( in_object _this) { os_uint32 refCount; /* Check whether the object has been allocated to support calling this * function with a NULL pointer. */ if(_this) { assert(_this->confidence == IN_OBJECT_CONFIDENCE); assert(_this->refCount >= 1); refCount = pa_decrement(&(_this->refCount)); /* if(_this->kind == IN_OBJECT_KIND_PARTICIPANT_FACADE) { printf("Free %p: %d -> %d\n", _this, _this->refCount+1, _this->refCount); } */ if(refCount == 0) { if(_this->deinit) { _this->deinit(_this); } assert(doSub(_this->kind)); _this->confidence = IN_OBJECT_CONFIDENCE_NULL; _this->kind = IN_OBJECT_KIND_INVALID; os_free(_this); } } }
void v_subscriberFree( v_subscriber s) { v_kernel kernel; v_participant p; v_reader o; v_entity found; c_long sc; kernel = v_objectKernel(s); sc = (c_long)pa_decrement(&(s->shareCount)); if (sc > 0) return; if(sc == 0){ v_observableRemoveObserver(v_observable(kernel->groupSet),v_observer(s), NULL); if (s->qos->share.enable) { found = v_removeShare(kernel,v_entity(s)); assert(found == v_entity(s)); c_free(found); } while ((o = c_take(s->readers)) != NULL) { switch (v_objectKind(o)) { case K_DATAREADER: v_dataReaderFree(v_dataReader(o)); break; case K_DELIVERYSERVICE: v_deliveryServiceFree(v_deliveryService(o)); break; case K_GROUPQUEUE: v_groupQueueFree(v_groupQueue(o)); break; case K_NETWORKREADER: v_networkReaderFree(v_networkReader(o)); break; default: OS_REPORT_1(OS_ERROR, "v_subscriber", 0, "Unknown reader %d", v_objectKind(o)); assert(FALSE); break; } c_free(o); } p = v_participant(s->participant); if (p != NULL) { v_participantRemove(p,v_entity(s)); s->participant = NULL; } v_publicFree(v_public(s)); } else { OS_REPORT_1(OS_ERROR, "v_subscriberFree", 0, "subscriber already freed (shareCount is now %d).", sc); assert(sc == 0); } }
void v_kernelDetach( v_kernel k) { os_uint32 attachCount; assert(C_TYPECHECK(k,v_kernel)); attachCount = pa_decrement(&k->userCount); /* Assert on zero-boundary crossing */ assert(attachCount + 1 > attachCount); }
v_kernel v_kernelAttach( c_base base, const c_char *name) { v_kernel kernel = NULL; os_uint32 attachCount; if (name == NULL) { OS_REPORT(OS_ERROR, "v_kernelAttach",0, "Failed to lookup kernel, specified kernel name = <NULL>"); } else { kernel = c_lookup(base,name); if (kernel == NULL) { OS_REPORT_1(OS_ERROR, "v_kernelAttach",0, "Failed to lookup kernel '%s' in Database", name); } else if (c_checkType(kernel,"v_kernel") != kernel) { c_free(kernel); kernel = NULL; OS_REPORT_1(OS_ERROR, "v_kernelAttach",0, "Object '%s' is apparently not of type 'v_kernel'", name); } else { attachCount = pa_increment(&kernel->userCount); if(attachCount == 1){ /* Result of the attach may NEVER be 1, as that would mean that an * attach to an unreferenced kernel succeeded. If it happens, undo * increment and free reference to returned kernel. */ pa_decrement(&kernel->userCount); c_free(kernel); kernel = NULL; OS_REPORT_1(OS_ERROR, "v_kernelAttach",0, "Operation aborted: Object '%s' is apparently an " "unreferenced kernel object.", name); } } } return kernel; }
/** \brief OS layer deinitialization * * \b os_osExit calls: * - \b os_sharedMemoryExit * - \b os_threadExit */ void os_osExit ( void) { os_uint32 initCount; initCount = pa_decrement(&_ospl_osInitCount); if(initCount == 0){ os_sharedMemoryExit(); os_threadModuleExit(); } else if ((initCount + 1) < initCount){ /* The 0 boundary is passed, so os_osExit is called more often than * os_osInit. Therefore undo decrement as nothing happened and warn. */ initCount = pa_increment(&_ospl_osInitCount); OS_REPORT(OS_WARNING, "os_osExit", 1, "OS-layer not initialized"); /* Fail in case of DEV, as it is incorrect API usage */ assert(0); } return; }
u_result u_userInitialise( void) { u_user u; u_result rm = U_RESULT_OK; os_mutexAttr mutexAttr; os_uint32 initCount; void* initUser; os_result osResult; os_signalHandlerExitRequestCallback exitRequestCallback; os_signalHandlerExceptionCallback exceptionCallback; initCount = pa_increment(&_ospl_userInitCount); /* If initCount == 0 then an overflow has occurred. * This can only realistically happen when u_userDetach() * is called more often than u_userInitialize(). */ assert(initCount != 0); os_osInit(); if (initCount == 1) { /* Will start allocating the object, so it should currently be empty. */ assert(user == NULL); /* Use indirection, as user != NULL is a precondition for user-layer * functions, so make sure it only holds true when the user-layer is * initialized. */ initUser = os_malloc(sizeof(C_STRUCT(u_user))); if (initUser == NULL) { /* Initialization failed, so decrement the initialization counter. */ pa_decrement(&_ospl_userInitCount); os_osExit(); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Allocation of user admin failed: out of memory."); rm = U_RESULT_OUT_OF_MEMORY; } else { u = u_user(initUser); os_mutexAttrInit(&mutexAttr); mutexAttr.scopeAttr = OS_SCOPE_PRIVATE; os_mutexInit(&u->mutex,&mutexAttr); osResult = os_signalHandlerNew(); if(osResult != os_resultSuccess) { /* Initialization did not succeed, undo increment and return error */ initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Failed to create the signal handler. No proper signal handling can be performed."); rm = U_RESULT_INTERNAL_ERROR; } else { exitRequestCallback = os_signalHandlerSetExitRequestCallback(u__userExitRequestCallbackWrapper); if(exitRequestCallback && exitRequestCallback != u__userExitRequestCallbackWrapper) { initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Replaced an exit request callback on the signal handler while this was not expected."); rm = U_RESULT_INTERNAL_ERROR; } if(rm == U_RESULT_OK){ exceptionCallback = os_signalHandlerSetExceptionCallback(u__userExceptionCallbackWrapper); if(exceptionCallback && exceptionCallback != u__userExceptionCallbackWrapper) { initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT(OS_ERROR, "u_userInitialise", 0, "Replaced an exception callback on the signal handler while this was not expected."); rm = U_RESULT_INTERNAL_ERROR; } } if(rm == U_RESULT_OK) { u->domainCount = 0; u->protectCount = 0; u->detachThreadId = OS_THREAD_ID_NONE; /* This will mark the user-layer initialized */ user = initUser; } } } } else { if(user == NULL){ os_time sleep = {0, 100000}; /* 100ms */ /* Another thread is currently initializing the user-layer. Since * user != NULL is a precondition for calls after u_userInitialise(), * a sleep is performed, to ensure that (if succeeded) successive * user-layer calls will also actually pass.*/ os_nanoSleep(sleep); } if(user == NULL){ /* Initialization did not succeed, undo increment and return error */ initCount = pa_decrement(&_ospl_userInitCount); OS_REPORT_1(OS_ERROR,"u_userInitialise",0, "Internal error: User-layer should be initialized " "(initCount = %d), but user == NULL (waited 100ms).", initCount); rm = U_RESULT_INTERNAL_ERROR; } } return rm; }