Ejemplo n.º 1
0
void
render_context_queue(render_context_t* context, render_command_t* command, uint64_t sort) {
	int32_t idx = atomic_exchange_and_add32(&context->reserved, 1, memory_order_relaxed);
	FOUNDATION_ASSERT_MSG(idx < context->allocated, "Render command overallocation");
	context->keys[ idx ] = sort;
	memcpy(context->commands + idx, command, sizeof(render_command_t));
}
Ejemplo n.º 2
0
render_command_t*
render_context_reserve(render_context_t* context, uint64_t sort) {
	int32_t idx = atomic_exchange_and_add32(&context->reserved, 1, memory_order_relaxed);
	FOUNDATION_ASSERT_MSG(idx < context->allocated, "Render command overallocation");
	context->keys[ idx ] = sort;
	return context->commands + idx;
}
Ejemplo n.º 3
0
static void _memory_tracker_track( void* addr, uint64_t size )
{
	if( addr ) do
	{
		int32_t tag = atomic_exchange_and_add32( &_memory_tag_next, 1 );
		if( tag >= MAX_CONCURRENT_ALLOCATIONS )
		{
			int32_t newtag = tag % MAX_CONCURRENT_ALLOCATIONS;
			atomic_cas32( &_memory_tag_next, newtag, tag + 1 );
			tag = newtag;
		}
		if( !_memory_tags[ tag ].address && atomic_cas_ptr( &_memory_tags[ tag ].address, addr, 0 ) )
		{
			_memory_tags[ tag ].size = (uintptr_t)size;
			stacktrace_capture( _memory_tags[ tag ].trace, 14, 3 );
			hashtable_set( _memory_table, (uintptr_t)addr, (uintptr_t)( tag + 1 ) );
			return;
		}
	} while( true );
}
Ejemplo n.º 4
0
static void*
add_thread(void* arg) {
	int loop = 0;
	int32_t icount = 0;
	FOUNDATION_UNUSED(arg);
	while (!thread_try_wait(0) && (loop < 65535)) {
		for (icount = 0; icount < 128; ++icount) {
			atomic_add32(&val_32, icount % 2 ? -icount : icount);
			atomic_exchange_and_add64(&val_64, icount % 2 ? -icount : icount);
		}
		for (icount = 0; icount < 128; ++icount) {
			atomic_exchange_and_add32(&val_32, icount % 2 ? icount : -icount);
			atomic_add64(&val_64, icount % 2 ? icount : -icount);
		}

		++loop;
		thread_yield();
	}
	return 0;
}
Ejemplo n.º 5
0
void* add_thread( object_t thread, void* arg )
{
    int loop = 0;
    int32_t icount = 0;
    while( !thread_should_terminate( thread ) && ( loop < 65535 ) )
    {
        for( icount = 0; icount < 128; ++icount )
        {
            atomic_add32( &val_32, icount % 2 ? -icount : icount );
            atomic_exchange_and_add64( &val_64, icount % 2 ? -icount : icount );
        }
        for( icount = 0; icount < 128; ++icount )
        {
            atomic_exchange_and_add32( &val_32, icount % 2 ? icount : -icount );
            atomic_add64( &val_64, icount % 2 ? icount : -icount );
        }

        ++loop;
        thread_yield();
    }
    return 0;
}