Exemple #1
0
void profile_initialize( const char* identifier, void* buffer, uint64_t size )
{
	profile_block_t* root  = buffer;
	profile_block_t* block = root;
	uint64_t num_blocks = size / sizeof( profile_block_t );
	uint32_t i;

	if( num_blocks > 65535 )
		num_blocks = 65535;

	for( i = 0; i < ( num_blocks - 1 ); ++i, ++block )
	{
		block->child = ( i + 1 );
		block->sibling = 0;
	}
	block->child = 0;
	block->sibling = 0;
	root->child = 0;

	atomic_store32( &_profile_root, 0 );

	_profile_num_blocks = num_blocks;
	_profile_identifier = identifier;
	_profile_blocks = root;
	atomic_store32( &_profile_free, 1 ); //TODO: Currently 0 is a no-block identifier, so we waste the first block
	atomic_store32( &_profile_counter, 128 );
	_profile_ground_time = time_current();
	set_thread_profile_block( 0 );

	log_debugf( 0, "Initialize profiling system with %llu blocks (%lluKiB)", num_blocks, size / 1024 );
}
Exemple #2
0
DECLARE_TEST( profile, initialize )
{
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32( &_test_profile_output_counter, 0 );

	profile_initialize( "test_profile", _test_profile_buffer, _test_profile_buffer_size );
	profile_enable( true );

	profile_log( "testing" );

	thread_sleep( 1000 );

	profile_enable( false );
	profile_shutdown();

#if BUILD_ENABLE_PROFILE
	EXPECT_GT( atomic_load32( &_test_profile_output_counter ), 0 );
#else
	EXPECT_EQ( atomic_load32( &_test_profile_output_counter ), 0 );
#endif

	err = error();
	EXPECT_EQ( err, ERROR_NONE );

	return 0;
}
Exemple #3
0
static void
_mutex_initialize(mutex_t* mutex, const char* name, size_t length) {
	mutex->name = string_to_const(string_copy(mutex->name_buffer, 32, name, length));

#if FOUNDATION_PLATFORM_WINDOWS
	InitializeCriticalSectionAndSpinCount((CRITICAL_SECTION*)mutex->csection, 4000);
	mutex->event = CreateEvent(0, TRUE, FALSE, 0);
	atomic_store32(&mutex->waiting, 0);
#elif FOUNDATION_PLATFORM_POSIX || FOUNDATION_PLATFORM_PNACL
	mutex->pending = false;

	pthread_mutexattr_t attr;
	pthread_mutexattr_init(&attr);
	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);

	pthread_cond_init(&mutex->cond, 0);
	pthread_mutex_init(&mutex->mutex, &attr);

	pthread_mutexattr_destroy(&attr);
#else
#  error _mutex_initialize not implemented
#endif

	mutex->lockcount = 0;
	mutex->lockedthread = 0;
}
Exemple #4
0
DECLARE_TEST(profile, initialize) {
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32(&_test_profile_output_counter, 0);

	profile_initialize(STRING_CONST("test_profile"), _test_profile_buffer, TEST_PROFILE_BUFFER_SIZE);
	profile_enable(true);

	profile_log(STRING_CONST("testing"));

	thread_sleep(1000);

	profile_enable(false);
	profile_finalize();

#if BUILD_ENABLE_PROFILE
	EXPECT_GT(atomic_load32(&_test_profile_output_counter), 0);
#else
	EXPECT_EQ(atomic_load32(&_test_profile_output_counter), 0);
#endif

	err = error();
	EXPECT_EQ(err, ERROR_NONE);

	return 0;
}
Exemple #5
0
void profile_shutdown( void )
{
	profile_enable( 0 );

	while( thread_is_thread( _profile_io_thread ) )
		thread_sleep( 1 );
	_profile_io_thread = 0;

	//Discard and free up blocks remaining in queue
	_profile_thread_finalize();
	if( atomic_load32( &_profile_root ) )
		_profile_process_root_block();

	//Sanity checks
	{
		uint64_t num_blocks = 0;
		uint32_t free_block = atomic_load32( &_profile_free ) & 0xffff;

		if( atomic_load32( &_profile_root ) )
			log_error( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, at least one root block still allocated/active" );

		while( free_block )
		{
			profile_block_t* block = GET_BLOCK( free_block );
			if( block->sibling )
			       log_errorf( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, block %d has sibling set", free_block );
			++num_blocks;
			free_block = GET_BLOCK( free_block )->child;
		}
		if( _profile_num_blocks )
			++num_blocks; //Include the wasted block 0

		if( num_blocks != _profile_num_blocks )
		{
			//If profile output function (user) crashed, this will probably trigger since at least one block will be lost in space
			log_errorf( 0, ERROR_INTERNAL_FAILURE, "Profile module state inconsistent on shutdown, lost blocks (found %llu of %llu)", num_blocks, _profile_num_blocks );
		}
	}

	atomic_store32( &_profile_root, 0 );
	atomic_store32( &_profile_free, 0 );

	_profile_num_blocks = 0;
	_profile_identifier = 0;
}
Exemple #6
0
DECLARE_TEST( profile, thread )
{
	object_t thread[32];
	int ith;
	int frame;
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32( &_test_profile_output_counter, 0 );

	profile_initialize( "test_profile", _test_profile_buffer, 30000/*_test_profile_buffer_size*/ );
	profile_enable( true );
	profile_set_output_wait( 1 );

	log_info( HASH_TEST, "This test will intentionally run out of memory in profiling system" );
	for( ith = 0; ith < 32; ++ith )
	{
		thread[ith] = thread_create( _profile_fail_thread, "profile_thread", THREAD_PRIORITY_NORMAL, 0 );
		thread_start( thread[ith], 0 );
	}

	test_wait_for_threads_startup( thread, 32 );

	for( frame = 0; frame < 1000; ++frame )
	{
		thread_sleep( 16 );
		profile_end_frame( frame );
	}

	for( ith = 0; ith < 32; ++ith )
	{
		thread_terminate( thread[ith] );
		thread_destroy( thread[ith] );
		thread_yield();
	}

	test_wait_for_threads_exit( thread, 32 );

	thread_sleep( 1000 );

	profile_enable( false );
	profile_shutdown();

	err = error();

#if BUILD_ENABLE_PROFILE
	EXPECT_GT( atomic_load32( &_test_profile_output_counter ), 0 );
	//TODO: Implement parsing output results
#else
	EXPECT_EQ( atomic_load32( &_test_profile_output_counter ), 0 );
#endif
	EXPECT_EQ( err, ERROR_NONE );

	return 0;
}
Exemple #7
0
static void*
objectmap_thread(void* arg) {
	objectmap_t* map;
	object_base_t* objects;
	int obj;
	int loop;
	object_base_t* lookup;

	map = arg;
	objects = memory_allocate(0, sizeof(object_base_t) * 512, 16,
	                          MEMORY_PERSISTENT | MEMORY_ZERO_INITIALIZED);

	thread_sleep(10);

	for (loop = 0; loop < 32; ++loop) {
		thread_yield();

		for (obj = 0; obj < 512; ++obj) {
			atomic_store32(&objects[obj].ref, 1);
			objects[obj].id = objectmap_reserve(map);
			EXPECT_NE_MSGFORMAT(objects[obj].id, 0, "Unable to reserve slot for object num %d", obj);
			EXPECT_EQ_MSGFORMAT(objectmap_lookup(map, objects[obj].id), 0,
			                    "Object %d (%" PRIx64 ") already stored in map in loop %d",
			                    obj, objects[obj].id, loop);
			EXPECT_TRUE(objectmap_set(map, objects[obj].id, objects + obj));
			lookup = objectmap_lookup(map, objects[obj].id);
			EXPECT_NE_MSGFORMAT(lookup, 0, "Object num %d (%" PRIx64 ") not set in map, got null on lookup in loop %d",
			                    obj, objects[obj].id, loop);
			EXPECT_EQ_MSGFORMAT(lookup, objects + obj,
			                    "Object %d (%" PRIx64 ") 0x%" PRIfixPTR " was not set at reserved slot in map, got object 0x%"
			                    PRIfixPTR " in loop %d", obj, objects[obj].id, (uintptr_t)(objects + obj), (uintptr_t)lookup, loop);
		}

		thread_yield();

		for (obj = 0; obj < 512; ++obj) {
			void* raw = map->map[ objects[obj].id & map->mask_index ];
			lookup = objectmap_lookup(map, objects[obj].id);
			EXPECT_NE_MSGFORMAT(lookup, 0, "Object 0x%" PRIfixPTR " num %d (%" PRIx64 ") not set in map, got null on lookup in loop %d (raw 0x%" PRIfixPTR ")",
			                    (uintptr_t)(objects + obj), obj, objects[obj].id, loop, (uintptr_t)raw);
			EXPECT_EQ_MSGFORMAT(lookup, objects + obj,
			                    "Object %d (%" PRIx64 ") 0x%" PRIfixPTR " was not set at reserved slot in map, got object 0x%"
			                    PRIfixPTR " in loop %d", obj, objects[obj].id, (uintptr_t)(objects + obj), (uintptr_t)lookup, loop);
			EXPECT_TRUE(objectmap_free(map, objects[obj].id));
			lookup = objectmap_lookup(map, objects[obj].id);
			EXPECT_EQ_MSGFORMAT(lookup, 0,
			                    "Object %d (%" PRIx64 ") 0x%" PRIfixPTR " still set in map, got non-null (0x%" PRIfixPTR ") on lookup in loop %d", obj,
			                    objects[obj].id, (uintptr_t)(objects + obj), (uintptr_t)lookup, loop);
		}
	}

	memory_deallocate(objects);

	return 0;
}
Exemple #8
0
DECLARE_TEST(profile, thread) {
	thread_t thread[32];
	int ith;
	uint64_t frame;
	error_t err = error();

	_test_profile_offset = 0;
	atomic_store32(&_test_profile_output_counter, 0);

	profile_initialize(STRING_CONST("test_profile"), _test_profile_buffer, 30000);
	profile_enable(true);
	profile_set_output_wait(1);

	log_enable_stdout(false);
	for (ith = 0; ith < 32; ++ith)
		thread_initialize(&thread[ith], _profile_fail_thread, 0, STRING_CONST("profile_thread"),
		                  THREAD_PRIORITY_NORMAL, 0);
	for (ith = 0; ith < 32; ++ith)
		thread_start(&thread[ith]);

	test_wait_for_threads_startup(thread, 32);

	for (frame = 0; frame < 1000; ++frame) {
		thread_sleep(16);
		profile_end_frame(frame);
	}

	for (ith = 0; ith < 32; ++ith)
		thread_signal(&thread[ith]);

	test_wait_for_threads_finish(thread, 32);

	for (ith = 0; ith < 32; ++ith)
		thread_finalize(&thread[ith]);
	log_enable_stdout(true);

	err = error();
	thread_sleep(1000);

	profile_enable(false);
	profile_finalize();

#if BUILD_ENABLE_PROFILE
	EXPECT_INTGT(atomic_load32(&_test_profile_output_counter), 0);
	//TODO: Implement parsing output results
#else
	EXPECT_INTEQ(atomic_load32(&_test_profile_output_counter), 0);
#endif
	EXPECT_INTEQ(err, ERROR_NONE);

	return 0;
}
Exemple #9
0
lua_t*
lua_allocate(void) {
	lua_t* env = memory_allocate(HASH_LUA, sizeof(lua_t), 0,
	                             MEMORY_PERSISTENT | MEMORY_32BIT_ADDRESS | MEMORY_ZERO_INITIALIZED);

	//Foundation allocators can meet demands of luajit on both 32 and 64 bit platforms
	lua_State* state = lua_newstate(lua_allocator, env);
	if (!state) {
		log_error(HASH_LUA, ERROR_INTERNAL_FAILURE, STRING_CONST("Unable to allocate Lua state"));
		memory_deallocate(env);
		return 0;
	}

	lua_atpanic(state, lua_panic);

	//Disable automagic gc
	lua_gc(state, LUA_GCCOLLECT, 0);

	lua_pushlightuserdata(state, env);
	lua_setlglobal(state, "__environment", 13);

	env->state = state;
	env->calldepth = 0;

#if BUILD_ENABLE_LUA_THREAD_SAFE
	semaphore_initialize(&env->execution_right, 1);
	env->queue_head = 0;
	atomic_store32(&env->queue_tail, 0);
#endif

	int stacksize = lua_gettop(state);

	luaL_openlibs(state);

	lua_module_registry_initialize(state);

	lua_pop(state, lua_gettop(state) - stacksize);

	return env;
}
Exemple #10
0
object_t
render_vertexbuffer_create(render_backend_t* backend, render_usage_t usage, size_t vertices,
                           const render_vertex_decl_t* decl, const void* data) {
	object_t id = objectmap_reserve(_render_map_buffer);
	if (!id) {
		log_error(HASH_RENDER, ERROR_OUT_OF_MEMORY,
		          STRING_CONST("Unable to allocate vertex buffer, out of slots in object map"));
		return 0;
	}

	memory_context_push(HASH_RENDER);

	render_vertexbuffer_t* buffer = memory_allocate(HASH_RENDER, sizeof(render_vertexbuffer_t), 0,
	                                                MEMORY_PERSISTENT | MEMORY_ZERO_INITIALIZED);
	buffer->id         = id;
	buffer->backend    = backend;
	buffer->usage      = usage;
	buffer->buffertype = RENDERBUFFER_VERTEX;
	buffer->policy     = RENDERBUFFER_UPLOAD_ONDISPATCH;
	buffer->size       = decl->size;
	memcpy(&buffer->decl, decl, sizeof(render_vertex_decl_t));
	atomic_store32(&buffer->ref, 1);
	objectmap_set(_render_map_buffer, id, buffer);

	if (vertices) {
		buffer->allocated = vertices;
		buffer->used = vertices;
		buffer->store = backend->vtable.allocate_buffer(backend, (render_buffer_t*)buffer);
		if (data) {
			memcpy(buffer->store, data, vertices * buffer->size);
			buffer->flags |= RENDERBUFFER_DIRTY;
		}
	}

	memory_context_pop();

	return id;
}
Exemple #11
0
DECLARE_TEST(objectmap, store) {
	objectmap_t* map;
	object_base_t first;
	object_base_t second;
	object_base_t third;

	map = objectmap_allocate(129);
	
	memset(&first, 0, sizeof(first));
	memset(&second, 0, sizeof(first));
	memset(&third, 0, sizeof(first));
	atomic_store32(&first.ref, 1);
	atomic_store32(&second.ref, 1);
	atomic_store32(&third.ref, 1);
	first.id = 1;
	second.id = 2;
	third.id = 3;

	EXPECT_EQ(objectmap_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, 1), 0);

	first.id = objectmap_reserve(map);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);

	second.id = objectmap_reserve(map);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, second.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);

	objectmap_set(map, first.id, &first);
	EXPECT_EQ(objectmap_lookup(map, first.id), &first);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), &first);
	EXPECT_EQ(objectmap_lookup(map, second.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);

	objectmap_set(map, second.id, &second);
	EXPECT_EQ(objectmap_lookup(map, first.id), &first);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), &first);
	EXPECT_EQ(objectmap_lookup(map, second.id), &second);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), &second);

	objectmap_free(map, first.id);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, second.id), &second);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), &second);

	objectmap_free(map, first.id);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, second.id), &second);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), &second);

	objectmap_free(map, second.id);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, second.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);

	objectmap_deallocate(map);

	//Size should be clamped to three
	map = objectmap_allocate(1);

	EXPECT_EQ(objectmap_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, 1), 0);

	first.id = objectmap_reserve(map);
	EXPECT_TYPENE(first.id, 0, object_t, PRIx64);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);

	second.id = objectmap_reserve(map);
	EXPECT_TYPENE(second.id, 0, object_t, PRIx64);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, second.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);

	third.id = objectmap_reserve(map);
	EXPECT_TYPENE(third.id, 0, object_t, PRIx64);
	EXPECT_EQ(objectmap_lookup(map, first.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup(map, second.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);
	EXPECT_EQ(objectmap_lookup(map, third.id), 0);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);

	objectmap_set(map, first.id, &first);
	objectmap_set(map, second.id, &second);
	objectmap_set(map, third.id, &third);

	log_enable_stdout(false);
	EXPECT_TYPEEQ(objectmap_reserve(map), 0, object_t, PRIx64);
	EXPECT_TYPEEQ(objectmap_reserve(map), 0, object_t, PRIx64);
	log_enable_stdout(true);

	objectmap_free(map, first.id);
	objectmap_free(map, second.id);
	//Leak one object
	//objectmap_free(map, third.id);
	EXPECT_EQ(objectmap_lookup_ref(map, first.id), nullptr);
	EXPECT_EQ(objectmap_raw_lookup(map, 0), 0);
	EXPECT_EQ(objectmap_lookup_ref(map, second.id), nullptr);
	EXPECT_EQ(objectmap_raw_lookup(map, 1), 0);
	EXPECT_NE(objectmap_lookup_ref(map, third.id), nullptr);
	EXPECT_NE(objectmap_raw_lookup(map, 2), 0);

	log_enable_stdout(false);
	objectmap_deallocate(map);
	log_enable_stdout(true);

	return 0;
}
Exemple #12
0
void _object_initialize( object_base_t* obj, object_t id )
{
	obj->id = id;
	atomic_store32( &obj->ref, 1 );
}
void loom_tick()
{
    if (atomic_load32(&gLoomTicking) < 1)
    {

        // Signal that the app has really stopped execution
        if (atomic_load32(&gLoomPaused) == 0)
        {
            atomic_store32(&gLoomPaused, 1);
        }

        // Sleep for longer while paused.
        // Since graphics aren't running in a paused state, there is no yielding
        // we would otherwise run in a busy loop without sleeping.
        loom_thread_sleep(30);

        return;
    }

    atomic_store32(&gLoomPaused, 0);

    Telemetry::beginTick();
    
    LOOM_PROFILE_START(loom_tick);

    LSLuaState *vm = NULL;

    vm = LoomApplication::getReloadQueued() ? NULL : LoomApplication::getRootVM();

    // Mark the main thread for NativeDelegates. On some platforms this
    // may change so we remark every frame.
    NativeDelegate::markMainThread();
    if (vm) NativeDelegate::executeDeferredCalls(vm->VM());

    performance_tick();

    profilerBlock_t p = { "loom_tick", platform_getMilliseconds(), 17 };
    
    if (LoomApplication::getReloadQueued())
    {
        LoomApplication::reloadMainAssembly();
    }
    else
    {
        if (vm)
        {
            // https://theengineco.atlassian.net/browse/LOOM-468
            // decouple debugger enabled from connection time
            // as the debugger matures this may change a bit
            if (LoomApplicationConfig::waitForDebugger() > 0)
            {
                vm->invokeStaticMethod("system.debugger.DebuggerClient", "update");
            }

            LoomApplication::ticks.invoke();
        }
    }
    
    loom_asset_pump();
    
    platform_HTTPUpdate();
    
    GFX::Texture::tick();
    
    if (Loom2D::Stage::smMainStage) Loom2D::Stage::smMainStage->invokeRenderStage();
    
    finishProfilerBlock(&p);
    
    LOOM_PROFILE_END(loom_tick);
    
    LOOM_PROFILE_ZERO_CHECK()
    
    Telemetry::endTick();

}