示例#1
0
DECLARE_TEST(profile, initialize) {
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32(&_test_profile_output_counter, 0);

	profile_initialize(STRING_CONST("test_profile"), _test_profile_buffer, TEST_PROFILE_BUFFER_SIZE);
	profile_enable(true);

	profile_log(STRING_CONST("testing"));

	thread_sleep(1000);

	profile_enable(false);
	profile_finalize();

#if BUILD_ENABLE_PROFILE
	EXPECT_GT(atomic_load32(&_test_profile_output_counter), 0);
#else
	EXPECT_EQ(atomic_load32(&_test_profile_output_counter), 0);
#endif

	err = error();
	EXPECT_EQ(err, ERROR_NONE);

	return 0;
}
示例#2
0
DECLARE_TEST( profile, initialize )
{
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32( &_test_profile_output_counter, 0 );

	profile_initialize( "test_profile", _test_profile_buffer, _test_profile_buffer_size );
	profile_enable( true );

	profile_log( "testing" );

	thread_sleep( 1000 );

	profile_enable( false );
	profile_shutdown();

#if BUILD_ENABLE_PROFILE
	EXPECT_GT( atomic_load32( &_test_profile_output_counter ), 0 );
#else
	EXPECT_EQ( atomic_load32( &_test_profile_output_counter ), 0 );
#endif

	err = error();
	EXPECT_EQ( err, ERROR_NONE );

	return 0;
}
示例#3
0
DECLARE_TEST( profile, thread )
{
	object_t thread[32];
	int ith;
	int frame;
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32( &_test_profile_output_counter, 0 );

	profile_initialize( "test_profile", _test_profile_buffer, 30000/*_test_profile_buffer_size*/ );
	profile_enable( true );
	profile_set_output_wait( 1 );

	log_info( HASH_TEST, "This test will intentionally run out of memory in profiling system" );
	for( ith = 0; ith < 32; ++ith )
	{
		thread[ith] = thread_create( _profile_fail_thread, "profile_thread", THREAD_PRIORITY_NORMAL, 0 );
		thread_start( thread[ith], 0 );
	}

	test_wait_for_threads_startup( thread, 32 );

	for( frame = 0; frame < 1000; ++frame )
	{
		thread_sleep( 16 );
		profile_end_frame( frame );
	}

	for( ith = 0; ith < 32; ++ith )
	{
		thread_terminate( thread[ith] );
		thread_destroy( thread[ith] );
		thread_yield();
	}

	test_wait_for_threads_exit( thread, 32 );

	thread_sleep( 1000 );

	profile_enable( false );
	profile_shutdown();

	err = error();

#if BUILD_ENABLE_PROFILE
	EXPECT_GT( atomic_load32( &_test_profile_output_counter ), 0 );
	//TODO: Implement parsing output results
#else
	EXPECT_EQ( atomic_load32( &_test_profile_output_counter ), 0 );
#endif
	EXPECT_EQ( err, ERROR_NONE );

	return 0;
}
示例#4
0
static void* _profile_io( object_t thread, void* arg )
{
	unsigned int system_info_counter = 0;
	profile_block_t system_info;
	FOUNDATION_UNUSED( arg );
	memset( &system_info, 0, sizeof( profile_block_t ) );
	system_info.data.id = PROFILE_ID_SYSTEMINFO;
	system_info.data.start = time_ticks_per_second();
	string_copy( system_info.data.name, "sysinfo", 7 );

	while( !thread_should_terminate( thread ) )
	{
		thread_sleep( _profile_wait );

		if( !atomic_load32( &_profile_root ) )
			continue;

		profile_begin_block( "profile_io" );

		if( atomic_load32( &_profile_root ) )
		{
			profile_begin_block( "process" );

			//This is thread safe in the sense that only completely closed and ended
			//blocks will be put as children to root block, so no additional blocks
			//will ever be added to child subtrees while we process it here
			_profile_process_root_block();

			profile_end_block();
		}

		if( system_info_counter++ > 10 )
		{
			if( _profile_write )
				_profile_write( &system_info, sizeof( profile_block_t ) );
			system_info_counter = 0;
		}

		profile_end_block();
	}

	if( atomic_load32( &_profile_root ) )
		_profile_process_root_block();

	if( _profile_write )
	{
		profile_block_t terminate;
		memset( &terminate, 0, sizeof( profile_block_t ) );
		terminate.data.id = PROFILE_ID_ENDOFSTREAM;
		_profile_write( &terminate, sizeof( profile_block_t ) );
	}

	return 0;
}
示例#5
0
DECLARE_TEST(profile, thread) {
	thread_t thread[32];
	int ith;
	uint64_t frame;
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32(&_test_profile_output_counter, 0);

	profile_initialize(STRING_CONST("test_profile"), _test_profile_buffer, 30000);
	profile_enable(true);
	profile_set_output_wait(1);

	log_enable_stdout(false);
	for (ith = 0; ith < 32; ++ith)
		thread_initialize(&thread[ith], _profile_fail_thread, 0, STRING_CONST("profile_thread"),
		                  THREAD_PRIORITY_NORMAL, 0);
	for (ith = 0; ith < 32; ++ith)
		thread_start(&thread[ith]);

	test_wait_for_threads_startup(thread, 32);

	for (frame = 0; frame < 1000; ++frame) {
		thread_sleep(16);
		profile_end_frame(frame);
	}

	for (ith = 0; ith < 32; ++ith)
		thread_signal(&thread[ith]);

	test_wait_for_threads_finish(thread, 32);

	for (ith = 0; ith < 32; ++ith)
		thread_finalize(&thread[ith]);
	log_enable_stdout(true);

	err = error();
	thread_sleep(1000);

	profile_enable(false);
	profile_finalize();

#if BUILD_ENABLE_PROFILE
	EXPECT_INTGT(atomic_load32(&_test_profile_output_counter), 0);
	//TODO: Implement parsing output results
#else
	EXPECT_INTEQ(atomic_load32(&_test_profile_output_counter), 0);
#endif
	EXPECT_INTEQ(err, ERROR_NONE);

	return 0;
}
示例#6
0
static void _profile_put_root_block( uint32_t block )
{
	uint32_t sibling;
	profile_block_t* self = GET_BLOCK( block );

#if PROFILE_ENABLE_SANITY_CHECKS
	FOUNDATION_ASSERT( self->sibling == 0 );
#endif
	while( !atomic_cas32( &_profile_root, block, 0 ) )
	{
		do
		{
			sibling = atomic_load32( &_profile_root );
		} while( sibling && !atomic_cas32( &_profile_root, 0, sibling ) );

		if( sibling )
		{
			if( self->sibling )
			{
				uint32_t leaf = self->sibling;
				while( GET_BLOCK( leaf )->sibling )
					leaf = GET_BLOCK( leaf )->sibling;
				GET_BLOCK( sibling )->previous = leaf;
				GET_BLOCK( leaf )->sibling = sibling;
			}
			else
			{
				self->sibling = sibling;
			}
		}
	}
}
示例#7
0
static profile_block_t* _profile_allocate_block( void )
{
	//Grab block from free list, avoiding ABA issues by
	//using high 16 bit as a loop counter
	profile_block_t* block;
	uint32_t free_block_tag, free_block, next_block_tag;
	do
	{
		free_block_tag = atomic_load32( &_profile_free );
		free_block = free_block_tag & 0xffff;

		next_block_tag = GET_BLOCK( free_block )->child;
		next_block_tag |= ( atomic_incr32( &_profile_loopid ) & 0xffff ) << 16;
	} while( free_block && !atomic_cas32( &_profile_free, next_block_tag, free_block_tag ) );

	if( !free_block )
	{
		static atomic32_t has_warned = {0};
		if( atomic_cas32( &has_warned, 1, 0 ) )
			log_error( 0, ERROR_OUT_OF_MEMORY, ( _profile_num_blocks < 65535 ) ? "Profile blocks exhausted, increase profile memory block size" : "Profile blocks exhausted, decrease profile output wait time" );
		return 0;
	}

	block = GET_BLOCK( free_block );
	memset( block, 0, sizeof( profile_block_t ) );
	return block;
}
示例#8
0
DECLARE_TEST(atomic, cas) {
	size_t num_threads = math_clamp(system_hardware_threads() * 4, 4, 32);
	size_t ithread;
	thread_t threads[32];
	cas_value_t cas_values[32];

	for (ithread = 0; ithread < num_threads; ++ithread) {
		cas_values[ithread].val_32 = (int32_t)ithread;
		cas_values[ithread].val_64 = (int64_t)ithread;
		cas_values[ithread].val_ptr = (void*)(uintptr_t)ithread;
		thread_initialize(&threads[ithread], cas_thread, &cas_values[ithread],
		                  STRING_CONST("cas"), THREAD_PRIORITY_NORMAL, 0);
	}
	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_start(&threads[ithread]);

	test_wait_for_threads_startup(threads, num_threads);
	test_wait_for_threads_finish(threads, num_threads);

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_finalize(&threads[ithread]);

	EXPECT_EQ(atomic_load32(&val_32), 0);
	EXPECT_EQ(atomic_load64(&val_64), 0);
	EXPECT_EQ(atomic_loadptr(&val_ptr), 0);

	return 0;
}
示例#9
0
void profile_shutdown( void )
{
	profile_enable( 0 );

	while( thread_is_thread( _profile_io_thread ) )
		thread_sleep( 1 );
	_profile_io_thread = 0;

	//Discard and free up blocks remaining in queue
	_profile_thread_finalize();
	if( atomic_load32( &_profile_root ) )
		_profile_process_root_block();

	//Sanity checks
	{
		uint64_t num_blocks = 0;
		uint32_t free_block = atomic_load32( &_profile_free ) & 0xffff;

		if( atomic_load32( &_profile_root ) )
			log_error( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, at least one root block still allocated/active" );

		while( free_block )
		{
			profile_block_t* block = GET_BLOCK( free_block );
			if( block->sibling )
			       log_errorf( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, block %d has sibling set", free_block );
			++num_blocks;
			free_block = GET_BLOCK( free_block )->child;
		}
		if( _profile_num_blocks )
			++num_blocks; //Include the wasted block 0

		if( num_blocks != _profile_num_blocks )
		{
			//If profile output function (user) crashed, this will probably trigger since at least one block will be lost in space
			log_errorf( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, lost blocks (found %llu of %llu)", num_blocks, _profile_num_blocks );
		}
	}

	atomic_store32( &_profile_root, 0 );
	atomic_store32( &_profile_free, 0 );

	_profile_num_blocks = 0;
	_profile_identifier = 0;
}
示例#10
0
static void _profile_free_block( uint32_t block, uint32_t leaf )
{
	uint32_t last_tag, block_tag;
	do
	{
		block_tag = block | ( ( atomic_incr32( &_profile_loopid ) & 0xffff ) << 16 );
		last_tag = atomic_load32( &_profile_free );
		GET_BLOCK( leaf )->child = last_tag & 0xffff;
	} while( !atomic_cas32( &_profile_free, block_tag, last_tag ) );
}
示例#11
0
object_t _object_unref( object_base_t* obj )
{
	int32_t ref;
	if( obj ) do
	{
		ref = atomic_load32( &obj->ref );
		if( ( ref > 0 ) && atomic_cas32( &obj->ref, ref - 1, ref ) )
			return ( ref == 1 ) ? 0 : obj->id;
	} while( ref > 0 );
	return 0;
}
示例#12
0
DECLARE_TEST( mutex, signal )
{
	mutex_t* mutex;
	object_t thread[32];
	int ith;

	mutex = mutex_allocate( "test" );
	mutex_lock( mutex );

	for( ith = 0; ith < 32; ++ith )
	{
		thread[ith] = thread_create( thread_wait, "thread_wait", THREAD_PRIORITY_NORMAL, 0 );
		thread_start( thread[ith], mutex );
	}

	mutex_unlock( mutex );

	test_wait_for_threads_startup( thread, 32 );

	while( atomic_load32( &thread_waiting ) < 32 )
		thread_yield();
	thread_sleep( 1000 ); //Hack wait to give threads time to progress from atomic_incr to mutex_wait

	mutex_signal( mutex );

	for( ith = 0; ith < 32; ++ith )
	{
		thread_terminate( thread[ith] );
		thread_destroy( thread[ith] );
	}

	test_wait_for_threads_exit( thread, 32 );

	EXPECT_EQ( atomic_load32( &thread_waited ), 32 );

	EXPECT_FALSE( mutex_wait( mutex, 500 ) );

	mutex_deallocate( mutex );

	return 0;
}
示例#13
0
文件: buffer.c 项目: ifzz/render_lib
object_t
render_buffer_ref(object_t id) {
	int32_t ref;
	render_buffer_t* buffer = objectmap_lookup(_render_map_buffer, id);
	if (buffer)
		do {
			ref = atomic_load32(&buffer->ref);
			if ((ref > 0) && atomic_cas32(&buffer->ref, ref + 1, ref))
				return id;
		}
		while (ref > 0);
	return 0;
}
示例#14
0
DECLARE_TEST(mutex, signal) {
	mutex_t* mutex;
	thread_t thread[32];
	size_t ith;

	mutex = mutex_allocate(STRING_CONST("test"));
	mutex_lock(mutex);

	for (ith = 0; ith < 32; ++ith)
		thread_initialize(&thread[ith], thread_waiter, mutex, STRING_CONST("thread_wait"),
		                  THREAD_PRIORITY_NORMAL, 0);
	for (ith = 0; ith < 32; ++ith)
		thread_start(&thread[ith]);

	mutex_unlock(mutex);

	test_wait_for_threads_startup(thread, 32);

	while (atomic_load32(&thread_waiting) < 32)
		thread_yield();
	thread_sleep(1000);   //Hack wait to give threads time to progress from atomic_incr to mutex_wait

	mutex_signal(mutex);

	test_wait_for_threads_finish(thread, 32);

	for (ith = 0; ith < 32; ++ith)
		thread_finalize(&thread[ith]);

	EXPECT_EQ(atomic_load32(&thread_waited), 32);

	EXPECT_FALSE(mutex_try_wait(mutex, 500));

	mutex_deallocate(mutex);

	return 0;
}
示例#15
0
void
lua_push_op(lua_t* env, lua_op_t* op) {
	unsigned int ofs, old;
	do {
		old = atomic_load32(&env->queue_tail);
		ofs = old + 1;
		if (ofs >= BUILD_LUA_CALL_QUEUE_SIZE)
			ofs = 0;
	}
	while (!atomic_cas32(&env->queue_tail, ofs, old));

	//Got slot, copy except command
	memcpy(&env->queue[ofs].data, &op->data, sizeof(op->data) + sizeof(lua_arg_t));
	//Now set command, completing insert
	env->queue[ofs].cmd = op->cmd;
}
示例#16
0
文件: buffer.c 项目: ifzz/render_lib
void
render_buffer_unlock(object_t id) {
	render_buffer_t* buffer = GET_BUFFER(id);
	if (!atomic_load32(&buffer->locks))
		return;
	if (atomic_decr32(&buffer->locks) == 0) {
		buffer->access = nullptr;
		if ((buffer->flags & RENDERBUFFER_LOCK_WRITE) && !(buffer->flags & RENDERBUFFER_LOCK_NOUPLOAD)) {
			buffer->flags |= RENDERBUFFER_DIRTY;
			if ((buffer->policy == RENDERBUFFER_UPLOAD_ONUNLOCK) ||
			        (buffer->flags & RENDERBUFFER_LOCK_FORCEUPLOAD))
				render_buffer_upload(buffer);
		}
		buffer->flags &= ~RENDERBUFFER_LOCK_BITS;
	}
	render_buffer_destroy(id);
}
示例#17
0
void* objectmap_lookup_ref( const objectmap_t* map, object_t id )
{
	void* object;
	do
	{
		object = map->map[ id & map->mask_index ];
		if( object && !( (uintptr_t)object & 1 ) &&
		   ( ( *( (uint64_t*)object + 1 ) & map->mask_id ) == ( id & map->mask_id ) ) ) //ID in object is offset by 8 bytes
		{
			object_base_t* base_obj = object;
			int32_t ref = atomic_load32( &base_obj->ref );
			if( ref && atomic_cas32( &base_obj->ref, ref + 1, ref ) )
				return object;
		}
	} while( object );
	return 0;
}
示例#18
0
文件: buffer.c 项目: ifzz/render_lib
void
render_buffer_destroy(object_t id) {
	int32_t ref;
	render_buffer_t* buffer = GET_BUFFER(id);
	if (buffer) {
		do {
			ref = atomic_load32(&buffer->ref);
			if ((ref > 0) && atomic_cas32(&buffer->ref, ref - 1, ref)) {
				if (ref == 1) {
					objectmap_free(_render_map_buffer, id);
					buffer->backend->vtable.deallocate_buffer(buffer->backend, buffer, true, true);
					memory_deallocate(buffer);
				}
				return;
			}
		}
		while (ref > 0);
	}
}
示例#19
0
static void _profile_process_root_block( void )
{
	uint32_t block;

	do
	{
		block = atomic_load32( &_profile_root );
	} while( block && !atomic_cas32( &_profile_root, 0, block ) );

	while( block )
	{
		profile_block_t* leaf;
		profile_block_t* current = GET_BLOCK( block );
		uint32_t next = current->sibling;

		current->sibling = 0;
		leaf = _profile_process_block( current );
		_profile_free_block( block, BLOCK_INDEX( leaf ) );

		block = next;
	}
}
示例#20
0
DECLARE_TEST(atomic, add) {
	size_t num_threads = math_clamp(system_hardware_threads() * 4, 4, 32);
	size_t ithread;
	thread_t threads[32];

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_initialize(&threads[ithread], add_thread, 0,
		                  STRING_CONST("add"), THREAD_PRIORITY_NORMAL, 0);
	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_start(&threads[ithread]);

	test_wait_for_threads_startup(threads, num_threads);
	test_wait_for_threads_finish(threads, num_threads);

	for (ithread = 0; ithread < num_threads; ++ithread)
		thread_finalize(&threads[ithread]);

	EXPECT_EQ(atomic_load32(&val_32), 0);
	EXPECT_EQ(atomic_load64(&val_64), 0);

	return 0;
}
示例#21
0
bool objectmap_lookup_unref( const objectmap_t* map, object_t id, object_deallocate_fn deallocate )
{
	void* object;
	do
	{
		object = map->map[ id & map->mask_index ];
		if( object && !( (uintptr_t)object & 1 ) &&
		   ( ( *( (uint64_t*)object + 1 ) & map->mask_id ) == ( id & map->mask_id ) ) ) //ID in object is offset by 8 bytes
		{
			object_base_t* base_obj = object;
			int32_t ref = atomic_load32( &base_obj->ref );
			if( ref && atomic_cas32( &base_obj->ref, ref - 1, ref ) )
			{
				if( ref == 1 )
				{
					deallocate( id, object );
					return false;
				}
				return true;
			}
		}
	} while( object );
	return false;
}
示例#22
0
文件: context.c 项目: ifzz/render_lib
size_t
render_context_reserved(render_context_t* context) {
	return atomic_load32(&context->reserved);
}
示例#23
0
size_t
render_context_reserved(render_context_t* context) {
	return (size_t)atomic_load32(&context->reserved, memory_order_acquire);
}
示例#24
0
void loom_tick()
{
    if (atomic_load32(&gLoomTicking) < 1)
    {

        // Signal that the app has really stopped execution
        if (atomic_load32(&gLoomPaused) == 0)
        {
            atomic_store32(&gLoomPaused, 1);
        }

        // Sleep for longer while paused.
        // Since graphics aren't running in a paused state, there is no yielding
        // we would otherwise run in a busy loop without sleeping.
        loom_thread_sleep(30);

        return;
    }

    atomic_store32(&gLoomPaused, 0);

    Telemetry::beginTick();
    
    LOOM_PROFILE_START(loom_tick);

    LSLuaState *vm = NULL;

    vm = LoomApplication::getReloadQueued() ? NULL : LoomApplication::getRootVM();

    // Mark the main thread for NativeDelegates. On some platforms this
    // may change so we remark every frame.
    NativeDelegate::markMainThread();
    if (vm) NativeDelegate::executeDeferredCalls(vm->VM());

    performance_tick();

    profilerBlock_t p = { "loom_tick", platform_getMilliseconds(), 17 };
    
    if (LoomApplication::getReloadQueued())
    {
        LoomApplication::reloadMainAssembly();
    }
    else
    {
        if (vm)
        {
            // https://theengineco.atlassian.net/browse/LOOM-468
            // decouple debugger enabled from connection time
            // as the debugger matures this may change a bit
            if (LoomApplicationConfig::waitForDebugger() > 0)
            {
                vm->invokeStaticMethod("system.debugger.DebuggerClient", "update");
            }

            LoomApplication::ticks.invoke();
        }
    }
    
    loom_asset_pump();
    
    platform_HTTPUpdate();
    
    GFX::Texture::tick();
    
    if (Loom2D::Stage::smMainStage) Loom2D::Stage::smMainStage->invokeRenderStage();
    
    finishProfilerBlock(&p);
    
    LOOM_PROFILE_END(loom_tick);
    
    LOOM_PROFILE_ZERO_CHECK()
    
    Telemetry::endTick();

}