void* cas_thread( object_t thread, void* arg ) { int loop = 0; cas_value_t val = *(cas_value_t*)arg; thread_sleep( 10 ); while( !thread_should_terminate( thread ) && ( loop < 65535 ) ) { while( !atomic_cas32( &val_32, val.val_32, 0 ) ) thread_yield(); while( !atomic_cas32( &val_32, 0, val.val_32 ) ) thread_yield(); while( !atomic_cas64( &val_64, val.val_64, 0 ) ) thread_yield(); while( !atomic_cas64( &val_64, 0, val.val_64 ) ) thread_yield(); while( !atomic_cas_ptr( &val_ptr, val.val_ptr, 0 ) ) thread_yield(); while( !atomic_cas_ptr( &val_ptr, 0, val.val_ptr ) ) thread_yield(); ++loop; thread_yield(); } return 0; }
static void* cas_thread(void* arg) { unsigned int loop = 0; cas_value_t val = *(cas_value_t*)arg; thread_sleep(10); while (!thread_try_wait(0) && (loop < 65535)) { while (!atomic_cas32(&val_32, val.val_32, 0)) thread_yield(); while (!atomic_cas32(&val_32, 0, val.val_32)) thread_yield(); while (!atomic_cas64(&val_64, val.val_64, 0)) thread_yield(); while (!atomic_cas64(&val_64, 0, val.val_64)) thread_yield(); while (!atomic_cas_ptr(&val_ptr, val.val_ptr, 0)) thread_yield(); while (!atomic_cas_ptr(&val_ptr, 0, val.val_ptr)) thread_yield(); ++loop; thread_yield(); } return 0; }
object_t objectmap_reserve( objectmap_t* map ) { uint64_t idx, next, id; FOUNDATION_ASSERT( map ); /*lint -esym(613,pool) */ //Reserve spot in array //TODO: Look into double-ended implementation with allocation from tail and free push to head do { idx = atomic_load64( &map->free ); if( idx >= map->size ) { log_error( 0, ERROR_OUT_OF_MEMORY, "Pool full, unable to reserve id" ); return 0; } next = ((uintptr_t)map->map[idx]) >> 1; } while( !atomic_cas64( &map->free, next, idx ) ); //Sanity check that slot isn't taken FOUNDATION_ASSERT_MSG( (intptr_t)(map->map[idx]) & 1, "Map failed sanity check, slot taken after reserve" ); map->map[idx] = 0; //Allocate ID id = 0; do { id = atomic_incr64( &map->id ) & map->id_max; //Wrap-around handled by masking } while( !id ); //Make sure id stays within correct bits (if fails, check objectmap allocation and the mask setup there) FOUNDATION_ASSERT( ( ( id << map->size_bits ) & map->mask_id ) == ( id << map->size_bits ) ); return ( id << map->size_bits ) | idx; /*lint +esym(613,pool) */ }
void objectmap_free( objectmap_t* map, object_t id ) { uint64_t idx, last; FOUNDATION_ASSERT( map ); /*lint -esym(613,pool) */ idx = (intptr_t)( id & map->mask_index ); if( (uintptr_t)map->map[idx] & 1 ) return; //Already free do { last = atomic_load64( &map->free ); map->map[idx] = (void*)((uintptr_t)(last<<1)|1); } while( !atomic_cas64( &map->free, idx, last ) ); /*lint +esym(613,pool) */ }