void _ump_osu_lock_term( _ump_osu_lock_t *lock ) { int call_result; UMP_DEBUG_ASSERT_POINTER( lock ); /** Debug lock checking: */ /* Lock is signalled on terminate - not a guarantee, since we could be locked immediately beforehand */ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as, ("cannot terminate held lock\n") ); call_result = pthread_mutex_destroy( &lock->mutex ); UMP_DEBUG_ASSERT( 0 == call_result, ("Incorrect mutex use detected: pthread_mutex_destroy call failed with error code %d\n", call_result) ); /* Destroy extra state for ANY_UNLOCK type osu_locks */ if ( lock->flags & _UMP_OSU_LOCKFLAG_ANYUNLOCK ) { UMP_DEBUG_ASSERT( UMP_FALSE == lock->state, ("terminate called on locked object %p\n", lock)); call_result = pthread_cond_destroy(&lock->condition); UMP_DEBUG_ASSERT( 0 == call_result, ("Incorrect condition-variable use detected: pthread_cond_destroy call failed with error code %d\n", call_result) ); } UMP_IGNORE(call_result); _ump_osu_free( lock ); }
UMP_API_EXPORT int ump_cpu_msync_now(ump_handle memh, ump_cpu_msync_op op, void* address, int size) { int offset; ump_mem * mem = (ump_mem*)memh; UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid")); /* If the op is readout, we do the readout from DD. Else we skip flushing if the userspace handle says that it is uncached */ if ((UMP_MSYNC_READOUT_CACHE_ENABLED!=op) && (0 == mem->is_cached) ) return 0; if ( NULL == address ) { address = ((ump_mem*)mem)->mapped_mem; } offset = (int) ((unsigned long)address - (unsigned long)((ump_mem*)mem)->mapped_mem); if ( 0 == size ) { size = (int)((ump_mem*)mem)->size; } UMP_DEBUG_ASSERT(0 < (((ump_mem*)mem)->ref_count), ("Reference count too low")); UMP_DEBUG_ASSERT((size>=0) && (size <= (int)((ump_mem*)mem)->size), ("Memory size of passed handle too low")); UMP_DEBUG_ASSERT(NULL != ((ump_mem*)mem)->mapped_mem, ("Error in mapping pointer (not mapped)")); if ( (offset+size) > (int)mem->size) { size = mem->size - offset; } mem->is_cached = ump_arch_msync(mem->secure_id, mem->mapped_mem, mem->cookie, address, size, op); return mem->is_cached ; }
_ump_osu_errcode_t _ump_osu_lock_wait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode) { /* Parameter validation */ UMP_DEBUG_ASSERT_POINTER( lock ); UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode, ("unrecognised mode, %.8X\n", mode) ); /** @note since only one flag can be set, we use a switch statement here. * Otherwise, MUST add an enum into the _ump_osu_lock_t to store the * implemented lock type */ switch ( lock->flags ) { case _UMP_OSU_LOCKFLAG_STATIC: case _UMP_OSU_LOCKFLAG_DEFAULT: /* Usual Mutex type */ { int call_result; call_result = pthread_mutex_lock( &lock->mutex ); UMP_DEBUG_ASSERT( 0 == call_result, ("pthread_mutex_lock call failed with error code %d\n", call_result)); UMP_IGNORE( call_result ); } /* DEBUG tracking of previously locked state - occurs while lock is obtained */ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as, ("This lock was already locked\n") ); UMP_DEBUG_CODE( lock->locked_as = mode ); break; case _UMP_OSU_LOCKFLAG_ANYUNLOCK: /** @note Use of bitflags in a case statement ONLY works because this * is the ONLY flag that is supported */ /* lock the mutex protecting access to the state field */ pthread_mutex_lock( &lock->mutex ); /* loop while locked (state is UMP_TRUE) */ /* pthread_cond_wait unlocks the mutex, wait, and locks the mutex once unblocked */ while ( UMP_TRUE == lock->state ) pthread_cond_wait( &lock->condition, &lock->mutex ); /* DEBUG tracking of previously locked state - occurs while lock is obtained */ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as, ("This lock was already locked\n") ); UMP_DEBUG_CODE( lock->locked_as = mode ); /* the state is UMP_FALSE (unlocked), so we set it to UMP_TRUE to indicate that it's locked and can return knowing that we own the lock */ lock->state = UMP_TRUE; /* final unlock of the mutex */ pthread_mutex_unlock(&lock->mutex); break; default: UMP_DEBUG_ERROR( ("lock has incorrect flags==%.8X\n", lock->flags) ); break; } return _UMP_OSU_ERR_OK; }
void _ump_osu_lock_signal( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode ) { /* Parameter validation */ UMP_DEBUG_ASSERT_POINTER( lock ); UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode, ("unrecognised mode, %.8X\n", mode) ); /** @note since only one flag can be set, we use a switch statement here. * Otherwise, MUST add an enum into the _ump_osu_lock_t to store the * implemented lock type */ switch ( lock->flags ) { case _UMP_OSU_LOCKFLAG_STATIC: case _UMP_OSU_LOCKFLAG_DEFAULT: /* Usual Mutex type */ /* DEBUG tracking of previously locked state - occurs while lock is obtained */ UMP_DEBUG_ASSERT( mode == lock->locked_as, ("This lock was locked as==%.8X, but tried to unlock as mode==%.8X\n", lock->locked_as, mode)); UMP_DEBUG_CODE( lock->locked_as = _UMP_OSU_LOCKMODE_UNDEF ); { int call_result; call_result = pthread_mutex_unlock( &lock->mutex ); UMP_DEBUG_ASSERT( 0 == call_result, ("pthread_mutex_lock call failed with error code %d\n", call_result)); UMP_IGNORE( call_result ); } break; case _UMP_OSU_LOCKFLAG_ANYUNLOCK: /** @note Use of bitflags in a case statement ONLY works because this * is the ONLY flag that is supported */ pthread_mutex_lock(&lock->mutex); UMP_DEBUG_ASSERT( UMP_TRUE == lock->state, ("Unlocking a _ump_osu_lock_t %p which is not locked\n", lock)); /* DEBUG tracking of previously locked state - occurs while lock is obtained */ UMP_DEBUG_ASSERT( mode == lock->locked_as, ("This lock was locked as==%.8X, but tried to unlock as %.8X\n", lock->locked_as, mode )); UMP_DEBUG_CODE( lock->locked_as = _UMP_OSU_LOCKMODE_UNDEF ); /* mark as unlocked */ lock->state = UMP_FALSE; /* signal the condition, only wake a single thread */ pthread_cond_signal(&lock->condition); pthread_mutex_unlock(&lock->mutex); break; default: UMP_DEBUG_ERROR( ("lock has incorrect flags==%.8X\n", lock->flags) ); break; } }
_ump_osu_errcode_t _ump_osu_lock_auto_init( _ump_osu_lock_t **pplock, _ump_osu_lock_flags_t flags, u32 initial, u32 order ) { int call_result; /* Validate parameters: */ UMP_DEBUG_ASSERT_POINTER( pplock ); /** @opt We don't lock the Critical Section or do anything if this is already non-null */ if ( NULL != *pplock) { return _UMP_OSU_ERR_OK; } /* We MIGHT need to initialize it, lock the Critical Section and check again */ call_result = pthread_mutex_lock(&static_auto_init_mutex); /* It would be a programming error for this to fail: */ UMP_DEBUG_ASSERT( 0 == call_result, ("failed to lock critical section\n") ); if ( NULL != *pplock ) { /* We caught a race condition to initialize this osu_lock. The other thread won the race, so the osu_lock is now initialized. */ call_result = pthread_mutex_unlock(&static_auto_init_mutex); UMP_DEBUG_ASSERT(0 == call_result, ("failed to unlock critical section\n")); return _UMP_OSU_ERR_OK; } /* We're the first thread in: initialize the osu_lock */ *pplock = _ump_osu_lock_init( flags, initial, order ); if ( NULL == *pplock ) { /* osu_lock creation failed */ call_result = pthread_mutex_unlock(&static_auto_init_mutex); UMP_DEBUG_ASSERT(0 == call_result, ("failed to unlock critical section\n")); return _UMP_OSU_ERR_FAULT; } /* osu_lock created OK */ call_result = pthread_mutex_unlock(&static_auto_init_mutex); UMP_DEBUG_ASSERT(0 == call_result, ("failed to unlock critical section\n")); UMP_IGNORE( call_result ); return _UMP_OSU_ERR_OK; }
UMP_API_EXPORT ump_handle ump_handle_create_from_secure_id(ump_secure_id secure_id) { unsigned long size; UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != secure_id, ("Secure ID is invalid")); size = ump_arch_size_get(secure_id); if (0 != size) { unsigned long cookie; /* * The UMP memory which the secure_id referes to could now be deleted and re-created * since we don't have any references to it yet. The mapping below will however fail if * we have supplied incorrect size, so we are safe. */ void *mapping = ump_arch_map(secure_id, size, UMP_CACHE_DISABLE, &cookie); if (NULL != mapping) { ump_mem *mem = _ump_osu_calloc(1, sizeof(*mem)); if (NULL != mem) { mem->secure_id = secure_id; mem->mapped_mem = mapping; mem->size = size; mem->cookie = cookie; mem->is_cached = UMP_CACHE_ENABLE; /* Is set to actually check in the ump_cpu_msync_now() function */ _ump_osu_lock_auto_init(&mem->ref_lock, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0); UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n")); mem->ref_count = 1; /* This is called only to set the cache settings in this handle */ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0); UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem)); return (ump_handle)mem; } ump_arch_unmap(mapping, size, cookie); } } UMP_DEBUG_PRINT(2, ("UMP handle creation failed for ID %u", secure_id)); return UMP_INVALID_MEMORY_HANDLE; }
/* Allocate a buffer which can be used directly by hardware, 4kb aligned */ static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache) { ump_secure_id secure_id; unsigned long allocated_size = size; UMP_DEBUG_PRINT(4, ("Allocating UMP memory of size %lu", size)); secure_id = ump_arch_allocate(&allocated_size, constraints); if (secure_id != UMP_INVALID_SECURE_ID) { unsigned long cookie; void * mapping; mapping = ump_arch_map(secure_id, allocated_size, cache, &cookie); if (NULL != mapping) { /* * PS: By now we have actually increased the ref count in the device driver by 2, * one for the allocation iteself, and one for the mapping. */ ump_mem * mem; mem = _ump_osu_calloc(1, sizeof(*mem)); if (NULL != mem) { mem->secure_id = secure_id; mem->mapped_mem = mapping; mem->size = allocated_size; mem->cookie = cookie; mem->is_cached = 1; /* Default to ON, is disabled later if not */ _ump_osu_lock_auto_init(&mem->ref_lock, 0, 0, 0); UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n")); mem->ref_count = 1; /* * ump_arch_allocate() gave us a kernel space reference, and the same did ump_arch_map() * We release the one from ump_arch_allocate(), and rely solely on the one from the ump_arch_map() * That is, ump_arch_unmap() should now do the final release towards the UMP kernel space driver. */ ump_arch_reference_release(secure_id); /* This is called only to set the cache settings in this handle */ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0); UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem)); return (ump_handle)mem; } ump_arch_unmap(mapping, allocated_size, cookie); /* Unmap the memory */ ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */ } ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */ } UMP_DEBUG_PRINT(4, ("Allocation of UMP memory failed")); return UMP_INVALID_MEMORY_HANDLE; }
ump_secure_id ump_arch_allocate(unsigned long *size, ump_alloc_constraints constraints) { _ump_uk_allocate_s call_arg; if (NULL == size) { return UMP_INVALID_SECURE_ID; } call_arg.ctx = ump_uk_ctx; call_arg.secure_id = UMP_INVALID_SECURE_ID; call_arg.size = *size; #ifdef UMP_DEBUG_SKIP_CODE /** Run-time ASSERTing that _ump_uk_api_version_s and ump_alloc_constraints are * interchangable */ switch (constraints) { case UMP_REF_DRV_CONSTRAINT_NONE: UMP_DEBUG_ASSERT(UMP_REF_DRV_UK_CONSTRAINT_NONE == constraints, ("ump_uk_alloc_constraints out of sync with ump_alloc_constraints")); break; case UMP_REF_DRV_CONSTRAINT_PHYSICALLY_LINEAR: UMP_DEBUG_ASSERT(UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR == constraints, ("ump_uk_alloc_constraints out of sync with ump_alloc_constraints")); break; default: UMP_DEBUG_ASSERT(1, ("ump_uk_alloc_constraints out of sync with ump_alloc_constraints: %d unrecognized", constraints)); break; } #endif call_arg.constraints = (ump_uk_alloc_constraints)constraints; if (_UMP_OSU_ERR_OK != _ump_uku_allocate(&call_arg)) { return UMP_INVALID_SECURE_ID; } *size = call_arg.size; UMP_DEBUG_PRINT(4, ("UMP: Allocated ID %u, size %ul", call_arg.secure_id, call_arg.size)); return call_arg.secure_id; }
void ump_arch_close(void) { _ump_osu_lock_auto_init(&ump_lock_arch, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0); /* Check that the lock was initialized */ if (NULL == ump_lock_arch) { UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to init lock\n")); return; } /* Attempt to obtain a lock */ if (_UMP_OSU_ERR_OK != _ump_osu_lock_wait(ump_lock_arch, _UMP_OSU_LOCKMODE_RW)) { UMP_DEBUG_PRINT(1, ("UMP: ump_arch_close() failed to acquire lock\n")); return; } UMP_DEBUG_ASSERT(0 < ump_ref_count, ("UMP: ump_arch_close() called while no references exist")); if (ump_ref_count > 0) { ump_ref_count--; if (0 == ump_ref_count) { _ump_osu_errcode_t retval = _ump_uku_close(&ump_uk_ctx); UMP_DEBUG_ASSERT(retval == _UMP_OSU_ERR_OK, ("UMP: Failed to close UMP interface")); UMP_IGNORE(retval); ump_uk_ctx = NULL; _ump_osu_lock_signal(ump_lock_arch, _UMP_OSU_LOCKMODE_RW); _ump_osu_lock_term(ump_lock_arch); /* Not 100% thread safe, since another thread can already be waiting for this lock in ump_arch_open() */ ump_lock_arch = NULL; return; } } /* Signal the lock so someone else can use it */ _ump_osu_lock_signal(ump_lock_arch, _UMP_OSU_LOCKMODE_RW); }
_ump_osu_lock_t *_ump_osu_lock_static( u32 nr ) { UMP_DEBUG_ASSERT( nr < UMP_OSU_STATIC_LOCK_COUNT, ("provided static lock index (%d) out of bounds (0 < nr < %d)\n", nr, UMP_OSU_STATIC_LOCK_COUNT) ); return &_ump_osu_static_locks[nr]; }
_ump_osu_lock_t *_ump_osu_lock_init( _ump_osu_lock_flags_t flags, u32 initial, u32 order ) { _ump_osu_lock_t * lock; pthread_mutexattr_t mutex_attributes; UMP_IGNORE(order); /* order isn't implemented yet, for now callers should set it to zero. */ /* Validate parameters: */ /* Flags acceptable */ UMP_DEBUG_ASSERT( 0 == ( flags & ~( _UMP_OSU_LOCKFLAG_ANYUNLOCK)), ("incorrect flags or trying to initialise a statically initialized lock, %.8X\n", flags) ); /* Parameter initial SBZ - for future expansion */ UMP_DEBUG_ASSERT( 0 == initial, ("initial must be zero\n") ); if (0 != pthread_mutexattr_init(&mutex_attributes)) { return NULL; } #if UMP_DEBUG_EXTENDED_MUTEX_LOCK_CHECKING #define UMP_PTHREADS_MUTEX_TYPE PTHREAD_MUTEX_ERRORCHECK #else #define UMP_PTHREADS_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT #endif if (0 != pthread_mutexattr_settype(&mutex_attributes, UMP_PTHREADS_MUTEX_TYPE)) { /** Return NULL on failure */ pthread_mutexattr_destroy(&mutex_attributes); return NULL; } #undef UMP_PTHREADS_MUTEX_TYPE /** @opt use containing structures for the ANY_UNLOCK type, to * save 2 DWORDS when not in use */ lock = _ump_osu_malloc( sizeof(_ump_osu_lock_t) ); if( NULL == lock ) { /** Return NULL on failure */ pthread_mutexattr_destroy(&mutex_attributes); return NULL; } if (0 != pthread_mutex_init( &lock->mutex, &mutex_attributes )) { pthread_mutexattr_destroy(&mutex_attributes); _ump_osu_free( lock ); return NULL; } /* done with the mutexattr object */ pthread_mutexattr_destroy(&mutex_attributes); /* ANY_UNLOCK type */ if ( flags & _UMP_OSU_LOCKFLAG_ANYUNLOCK ) { if (0 != pthread_cond_init( &lock->condition, NULL )) { /* cleanup */ pthread_mutex_destroy( &lock->mutex ); _ump_osu_free( lock ); return NULL; } lock->state = UMP_FALSE; /* mark as unlocked by default */ } lock->flags = flags; /** Debug lock checking */ UMP_DEBUG_CODE( lock->locked_as = _UMP_OSU_LOCKMODE_UNDEF ); return lock; }
UMP_API_EXPORT int ump_unlock( ump_handle memh ) { ump_mem * mem = (ump_mem*)memh; UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid")); return ump_arch_unlock(mem->secure_id); }
UMP_API_EXPORT int ump_switch_hw_usage( ump_handle memh, ump_hw_usage new_user ) { ump_mem * mem = (ump_mem*)memh; UMP_DEBUG_ASSERT(UMP_INVALID_MEMORY_HANDLE != memh, ("Handle is invalid")); return ump_arch_switch_hw_usage(mem->secure_id, new_user); }
_ump_osu_errcode_t _ump_osu_lock_timed_wait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode, u64 timeout) { /* absolute time specifier */ struct timespec ts; struct timeval tv; /* Parameter validation */ UMP_DEBUG_ASSERT_POINTER( lock ); UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode, ("unrecognised mode, %.8X\n", mode) ); UMP_DEBUG_ASSERT( _UMP_OSU_LOCKFLAG_ANYUNLOCK == lock->flags, ("Timed operations only implemented for ANYUNLOCK type locks")); /* calculate the realtime timeout value */ if (0 != gettimeofday(&tv, NULL)) { UMP_DEBUG_PRINT(1,("Could not get the current realtime value to calculate the absolute value for a timed mutex lock with a timeout")); return _UMP_OSU_ERR_FAULT; } tv.tv_usec += timeout; #define UMP_USECS_PER_SECOND 1000000LL #define UMP_NANOSECS_PER_USEC 1000LL /* did we overflow a second in the usec part? */ while (tv.tv_usec >= UMP_USECS_PER_SECOND) { tv.tv_usec -= UMP_USECS_PER_SECOND; tv.tv_sec++; } /* copy to the correct struct */ ts.tv_sec = tv.tv_sec; ts.tv_nsec = (tv.tv_usec * UMP_NANOSECS_PER_USEC); #undef UMP_USECS_PER_SECOND #undef UMP_NANOSECS_PER_USEC /* lock the mutex protecting access to the state field */ pthread_mutex_lock( &lock->mutex ); /* loop while locked (state is UMP_TRUE) */ /* pthread_cond_timedwait unlocks the mutex, wait, and locks the mutex once unblocked (either due to the event or the timeout) */ while ( UMP_TRUE == lock->state ) { int res; res = pthread_cond_timedwait( &lock->condition, &lock->mutex, &ts ); if (0 == res) continue; /* test the state variable again (loop condition) */ else if (ETIMEDOUT == res) { /* timeout, need to clean up and return the correct error code */ pthread_mutex_unlock(&lock->mutex); return _UMP_OSU_ERR_TIMEOUT; } else { UMP_DEBUG_PRINT(1, ("Unexpected return from pthread_cond_timedwait 0x%08X\n", res)); pthread_mutex_unlock(&lock->mutex); return _UMP_OSU_ERR_FAULT; } } /* DEBUG tracking of previously locked state - occurs while lock is obtained */ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as, ("This lock was already locked\n") ); UMP_DEBUG_CODE( lock->locked_as = mode ); /* the state is UMP_FALSE (unlocked), so we set it to UMP_TRUE to indicate that it's locked and can return knowing that we own the lock */ lock->state = UMP_TRUE; /* final unlock of the mutex */ pthread_mutex_unlock(&lock->mutex); return _UMP_OSU_ERR_OK; }
_ump_osu_errcode_t _ump_osu_lock_trywait( _ump_osu_lock_t *lock, _ump_osu_lock_mode_t mode) { _ump_osu_errcode_t err = _UMP_OSU_ERR_FAULT; /* Parameter validation */ UMP_DEBUG_ASSERT_POINTER( lock ); UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_RW == mode, ("unrecognised mode, %.8X\n", mode) ); /** @note since only one flag can be set, we use a switch statement here. * Otherwise, MUST add an enum into the _ump_osu_lock_t to store the * implemented lock type */ switch ( lock->flags ) { case _UMP_OSU_LOCKFLAG_STATIC: case _UMP_OSU_LOCKFLAG_DEFAULT: /* Usual Mutex type */ { /* This is not subject to UMP_CHECK - overriding the result would cause a programming error */ if ( 0 == pthread_mutex_trylock( &lock->mutex ) ) { err = _UMP_OSU_ERR_OK; /* DEBUG tracking of previously locked state - occurs while lock is obtained */ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as || mode == lock->locked_as, ("tried as mode==%.8X, but was locked as %.8X\n", mode, lock->locked_as) ); UMP_DEBUG_CODE( lock->locked_as = mode ); } } break; case _UMP_OSU_LOCKFLAG_ANYUNLOCK: /** @note Use of bitflags in a case statement ONLY works because this * is the ONLY flag that is supported */ /* lock the mutex protecting access to the state field */ pthread_mutex_lock(&lock->mutex); if ( UMP_FALSE == lock->state) { /* unlocked, take the lock */ lock->state = UMP_TRUE; err = _UMP_OSU_ERR_OK; } /* DEBUG tracking of previously locked state - occurs while lock is obtained */ /* Can do this regardless of whether we obtained ANYUNLOCK: */ UMP_DEBUG_ASSERT( _UMP_OSU_LOCKMODE_UNDEF == lock->locked_as || mode == lock->locked_as, ("tried as mode==%.8X, but was locked as %.8X\n", mode, lock->locked_as) ); /* If we were already locked, this does no harm, because of the above assert: */ UMP_DEBUG_CODE( lock->locked_as = mode ); pthread_mutex_unlock(&lock->mutex); break; default: UMP_DEBUG_ERROR( ("lock has incorrect flags==%.8X\n", lock->flags) ); break; } return err; }