Exemple #1
0
/*************************************************************************************************
 * Threads
 */
mt_thread mt_thread_create(
    pfn_mt_thread_kernel kernel_fn, pfn_mt_thread_init init_fn, pfn_mt_thread_release release_fn,
    enum mt_thread_priority pr, size_t local_mem_sz, size_t tmp_mem_sz, void* param1, void* param2)
{
    static uint count = 0;
    result_t r;
    mt_thread thread = (mt_thread)ALLOC(sizeof(struct mt_thread_data), 0);
    if (thread == NULL)
        return NULL;
    memset(thread, 0x00, sizeof(struct mt_thread_data));

    if (local_mem_sz > 0)   {
        r = mem_freelist_create(mem_heap(), &thread->local_mem, local_mem_sz, 0);
        if (IS_FAIL(r)) {
            FREE(thread);
            return NULL;
        }
        mem_freelist_bindalloc(&thread->local_mem, &thread->local_alloc);
    }

    if (tmp_mem_sz > 0) {
        r = mem_stack_create(mem_heap(), &thread->tmp_mem, tmp_mem_sz, 0);
        if (IS_FAIL(r)) {
            FREE(thread);
            return NULL;
        }
        mem_stack_bindalloc(&thread->tmp_mem, &thread->tmp_alloc);
    }

    thread->kernel_fn = kernel_fn;
    thread->init_fn = init_fn;
    thread->release_fn = release_fn;
    thread->param1 = param1;
    thread->param2 = param2;
    thread->pr = pr;

    char e1name[32];
    char e2name[32];
    sprintf(e1name, "stop:t(%d)", count);
    sprintf(e2name, "resume:t(%d)", count);
    count++;

    thread->events[EVENT_STOP] = CreateEvent(NULL, TRUE, FALSE, e1name);
    thread->events[EVENT_RESUME] = CreateEvent(NULL, TRUE, TRUE, e2name);

    if (thread->events[EVENT_STOP] == NULL || thread->events[EVENT_RESUME] == NULL)
        return NULL;

    HANDLE t = CreateThread(NULL, 0, thread_callback, thread, 0, (DWORD*)&thread->id);
    if (t == NULL) {
        mt_thread_destroy(thread);
        return NULL;
    }

    thread->t = t;

    switch (pr)   {
    case MT_THREAD_NORMAL:    SetThreadPriority(thread->t, THREAD_PRIORITY_NORMAL);    break;
    case MT_THREAD_HIGH:      SetThreadPriority(thread->t, THREAD_PRIORITY_HIGHEST);   break;
    case MT_THREAD_LOW:       SetThreadPriority(thread->t, THREAD_PRIORITY_LOWEST);    break;
    }

    return thread;
}
Exemple #2
0
struct gfx_model* gfx_model_load(struct allocator* alloc, const char* h3dm_filepath,
    uint thread_id)
{
	struct allocator* tmp_alloc = tsk_get_tmpalloc(thread_id);
	A_SAVE(tmp_alloc);

	struct h3d_header header;
    struct h3d_model h3dmodel;
	struct gfx_model* model = NULL;
    uint renderable_idx = 0;
    struct stack_alloc stack_mem;
    struct allocator stack_alloc;
    result_t r;

    memset(&stack_mem, 0x00, sizeof(stack_mem));

	file_t f = fio_openmem(tmp_alloc, h3dm_filepath, FALSE, MID_GFX);
	if (f == NULL)	{
		err_printf(__FILE__, __LINE__, "load model '%s' failed: could not open file", h3dm_filepath);
		goto err_cleanup;
	}

	/* header */
    fio_read(f, &header, sizeof(header), 1);
	if (header.sign != H3D_SIGN || header.type != H3D_MESH)	{
		err_printf(__FILE__, __LINE__, "load model '%s' failed: invalid file format", h3dm_filepath);
		goto err_cleanup;
	}

    if (header.version != H3D_VERSION && header.version != H3D_VERSION_13)  {
        err_printf(__FILE__, __LINE__, "load model '%s' failed: file version not implemented/obsolete",
            h3dm_filepath);
        goto err_cleanup;
    }

    /* model */
    fio_read(f, &h3dmodel, sizeof(h3dmodel), 1);

    /* calculate size and create stack allocator for proceeding allocations */
    size_t total_sz =
        sizeof(struct gfx_model) +
        h3dmodel.node_cnt*sizeof(struct gfx_model_node) + 16 +
        h3dmodel.node_cnt*sizeof(uint) +
        h3dmodel.geo_cnt*sizeof(struct gfx_model_geo) +
        h3dmodel.mesh_cnt*sizeof(struct gfx_model_mesh) +
        h3dmodel.mtl_cnt*sizeof(struct gfx_model_mtl) +
        h3dmodel.has_occ*sizeof(struct gfx_model_occ) +
        h3dmodel.total_childidxs*sizeof(uint) +
        h3dmodel.total_geo_subsets*sizeof(struct gfx_model_geosubset) +
        h3dmodel.total_joints*sizeof(struct gfx_model_joint) +
        h3dmodel.total_joints*sizeof(struct mat3f) +
        h3dmodel.total_submeshes*sizeof(struct gfx_model_submesh) +
        h3dmodel.total_skeletons*sizeof(struct gfx_model_skeleton) +
        h3dmodel.total_skeletons*32 + /* 2 aligned allocs per skeleton */
        h3dmodel.total_maps*sizeof(struct gfx_model_map) +
        h3dmodel.occ_idx_cnt*sizeof(uint16) +
        h3dmodel.occ_vert_cnt*sizeof(struct vec3f) +
        h3dmodel.has_occ*16; /* 1 aligned alloc for occ */
    r = mem_stack_create(alloc, &stack_mem, total_sz, MID_GFX);
    if (IS_FAIL(r)) {
        err_printn(__FILE__, __LINE__, RET_OUTOFMEMORY);
        goto err_cleanup;
    }
    mem_stack_bindalloc(&stack_mem, &stack_alloc);

    /* */
    model = (struct gfx_model*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model), MID_GFX);
    if (model == NULL)	{
        err_printn(__FILE__, __LINE__, RET_OUTOFMEMORY);
        goto err_cleanup;
    }
    memset(model, 0x00, sizeof(struct gfx_model));
    model->alloc = alloc;

	/* nodes */
	if (h3dmodel.node_cnt > 0)	{
		model->nodes = (struct gfx_model_node*)A_ALIGNED_ALLOC(&stack_alloc,
            sizeof(struct gfx_model_node)*h3dmodel.node_cnt, MID_GFX);
        ASSERT(model->nodes);
		memset(model->nodes, 0x00, sizeof(struct gfx_model_node)*h3dmodel.node_cnt);

		for (uint i = 0; i < h3dmodel.node_cnt; i++)	{
			struct gfx_model_node* node = &model->nodes[i];
			if (!model_loadnode(node, f, &stack_alloc))
				goto err_cleanup;

            /* NOTE: we set root matrix to identity and keep the old one as "root_mat" */
            if (i == 0) {
                mat3_setm(&model->root_mat, &node->local_mat);
                mat3_set_ident(&node->local_mat);
            }
			model->node_cnt ++;
		}
	}

	/* meshes */
	if (h3dmodel.mesh_cnt > 0)	{
		model->meshes = (struct gfx_model_mesh*)A_ALLOC(&stack_alloc,
            sizeof(struct gfx_model_mesh)*h3dmodel.mesh_cnt, MID_GFX);
		ASSERT(model->meshes);
		memset(model->meshes, 0x00, sizeof(struct gfx_model_mesh)*h3dmodel.mesh_cnt);
		uint idx = 0;

		for (uint i = 0; i < h3dmodel.mesh_cnt; i++)	{
			struct gfx_model_mesh* mesh = &model->meshes[i];
			if (!model_loadmesh(mesh, f, &stack_alloc))
				goto err_cleanup;

			/* assign global indexes */
			for (uint k = 0; k < mesh->submesh_cnt; k++)
				mesh->submeshes[k].offset_idx = idx++;

			model->mesh_cnt ++;
		}
	}

	/* geos */
	if (h3dmodel.geo_cnt > 0)	{
		model->geos = (struct gfx_model_geo*)A_ALLOC(&stack_alloc,
            sizeof(struct gfx_model_geo)*h3dmodel.geo_cnt, MID_GFX);
		ASSERT(model->geos);

		memset(model->geos, 0x00, sizeof(struct gfx_model_geo)*h3dmodel.geo_cnt);
		for (uint i = 0; i < h3dmodel.geo_cnt; i++)	{
			struct gfx_model_geo* geo = &model->geos[i];
			if (!model_loadgeo(geo, f, &stack_alloc, tmp_alloc, thread_id))
				goto err_cleanup;
			model->geo_cnt ++;
		}
	}

	/* materials */
	if (h3dmodel.mtl_cnt > 0)	{
		model->mtls = (struct gfx_model_mtl*)A_ALLOC(&stack_alloc,
            sizeof(struct gfx_model_mtl)*h3dmodel.mtl_cnt, MID_GFX);
		ASSERT(model->mtls);

		memset(model->mtls, 0x00, sizeof(struct gfx_model_mtl)*h3dmodel.mtl_cnt);
		for (uint i = 0; i < h3dmodel.mtl_cnt; i++)	{
			struct gfx_model_mtl* mtl = &model->mtls[i];
			if (!model_loadmtl(mtl, f, &stack_alloc))
                goto err_cleanup;
			model->mtl_cnt ++;
		}
	}

    if (header.version >= H3D_VERSION_11 && h3dmodel.has_occ)   {
        model->occ = (struct gfx_model_occ*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model_occ),
            MID_GFX);
        ASSERT(model->occ);

        memset(model->occ, 0x00, sizeof(struct gfx_model_occ));
        if (!model_loadocc(model->occ, f, &stack_alloc))
            goto err_cleanup;
    }

    /* populate renderable nodes */
    model->renderable_idxs = (uint*)A_ALLOC(&stack_alloc, sizeof(uint)*h3dmodel.node_cnt,
        MID_GFX);
    ASSERT(model->renderable_idxs);

    for (uint i = 0; i < h3dmodel.node_cnt; i++)	{
        struct gfx_model_node* node = &model->nodes[i];
        if (node->mesh_id != INVALID_INDEX)
            model->renderable_idxs[renderable_idx++] = i;
    }
    model->renderable_cnt = renderable_idx;

    /* calculate sum of aabb(s) from renderable nodes */
	aabb_setzero(&model->bb);
    struct mat3f node_mat;  /* transform matrix, relative to model */
	for (uint i = 0; i < renderable_idx; i++)   {
        struct gfx_model_node* node = &model->nodes[model->renderable_idxs[i]];
        mat3_setm(&node_mat, &node->local_mat);
        struct gfx_model_node* pnode = node;
        while (pnode->parent_id != INVALID_INDEX)	{
            pnode = &model->nodes[pnode->parent_id];
            mat3_mul(&node_mat, &node_mat, &pnode->local_mat);
        }
        if (node->parent_id != INVALID_INDEX)
            mat3_mul(&node_mat, &node_mat, &model->root_mat);

        /* transform local box to model-relative bounding box and merge with final */
        struct aabb bb;
        aabb_xform(&bb, &model->nodes[model->renderable_idxs[i]].bb, &node_mat);
		aabb_merge(&model->bb, &model->bb, &bb);
    }
    /* for empty models, we set a minimal bounding-box */
    if (aabb_iszero(&model->bb))    {
        aabb_pushptf(&model->bb, 0.1f, 0.1f, 0.1f);
        aabb_pushptf(&model->bb, -0.1f, -0.1f, -0.1f);
    }

    fio_close(f);
	A_LOAD(tmp_alloc);

    if (thread_id != 0) {
        gfx_delayed_waitforobjects(thread_id);
        gfx_delayed_fillobjects(thread_id);
    }

	return model;

err_cleanup:
	if (f != NULL)
        fio_close(f);
	if (model != NULL)
		gfx_model_unload(model);
    mem_stack_destroy(&stack_mem);
	A_LOAD(tmp_alloc);
	return NULL;
}
Exemple #3
0
result_t eng_init(const struct init_params* params)
{
    result_t r = RET_OK;

    ASSERT(g_eng == NULL);
    g_eng = (struct engine*)ALLOC(sizeof(struct engine), 0);
    if (g_eng == 0)
        return err_printn(__FILE__, __LINE__, RET_OUTOFMEMORY);
    memset(g_eng, 0x00, sizeof(struct engine));

    eng_zero();

    memcpy(&g_eng->params, params, sizeof(struct init_params));

    hw_getinfo(&g_eng->hwinfo, HWINFO_ALL);

    /* console (before anything else) */
    if (BIT_CHECK(params->flags, ENG_FLAG_CONSOLE))	{
		r |= con_init(params->console_lines_max);
		if (IS_FAIL(r))
			return RET_FAIL;
		log_outputfunc(TRUE, con_log, NULL);
    }

    /* show build options */
#if !defined(FULL_VERSION)
#error "must define FULL_VERSION macro"
#endif

    time_t raw_tm;
    time(&raw_tm);

    log_printf(LOG_TEXT, "init darkhammer engine v%s build[%s, %s, %s, %s], time: %s", 
        FULL_VERSION,
#if defined(_DEBUG_)
    		"debug"
#else
    		"release"
#endif
    		,
#if defined(_PROFILE_)
    		"profile"
#else
    		"no-profile"
#endif
    		,
#if defined(_X86_)
    		"x86"
#elif defined(_X64_)
    		"x64"
#endif
    		,
#if defined(_ENABLEASSERT_)
    		"assert"
#else
    		"no-assert"
#endif
            , asctime(localtime(&raw_tm)));

    /* hardware info */
    hw_printinfo(&g_eng->hwinfo, HWINFO_ALL);

    size_t tmp_sz = params->dev.buffsize_tmp;
    size_t data_sz = data_sz = params->dev.buffsize_data;
    tmp_sz = tmp_sz != 0 ? ((size_t)tmp_sz*1024) : FRAME_STACK_SIZE;
    data_sz = data_sz != 0 ? ((size_t)data_sz*1024) : DATA_SIZE;

    /* allocators */
    /* dynamic allocator for data in dev (editor) mode, stack allocator in game (normal) mode */
    if (BIT_CHECK(params->flags, ENG_FLAG_OPTIMIZEMEMORY))   {
        /* lsr (load-stay-resident) allocator for essential engine data */
        r |= mem_stack_create(mem_heap(), &g_eng->lsr_stack, LSR_SIZE, MID_DATA);
        mem_stack_bindalloc(&g_eng->lsr_stack, &g_eng->lsr_alloc);

        r |= mem_freelist_create(mem_heap(), &g_eng->data_freelist, data_sz, MID_DATA);
        mem_freelist_bindalloc(&g_eng->data_freelist, &g_eng->data_alloc);
    }   else    {
        mem_heap_bindalloc(&g_eng->data_alloc);
        mem_heap_bindalloc(&g_eng->lsr_alloc);

        g_eng->data_alloc.alloc_fn = eng_allocfn_data;
        g_eng->data_alloc.alignedalloc_fn = eng_alignedallocfn_data;
        g_eng->lsr_alloc.alloc_fn = eng_allocfn_lsr;
        g_eng->lsr_alloc.alignedalloc_fn = eng_alignedallocfn_lsr;

        r = RET_OK;
    }

    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: out of memory for allocators");
        return RET_FAIL;
    }

    /* timer manager and frame timer */
    g_eng->timer = timer_createinstance(TRUE);

    /* add engine's own data path to file-mgr */
    if (params->data_path != NULL)   {
        char data_path_ext[DH_PATH_MAX];
        path_getfileext(data_path_ext, params->data_path);
        if (str_isequal_nocase(data_path_ext, "pak"))    {
            if (IS_FAIL(pak_open(&g_eng->data_pak, mem_heap(), params->data_path, 0)))    {
                err_print(__FILE__, __LINE__, "engine init: could not open data pak");
                return RET_FAIL;
            }
        }   else   {
            if (!util_pathisdir(params->data_path)) {
                err_print(__FILE__, __LINE__, "engine init: data path is not valid");
                return RET_FAIL;
            }
            fio_addvdir(params->data_path, FALSE);
        }
        /* assume that share directory is same as data dir */
        path_getdir(g_eng->share_dir, params->data_path);
    }   else    {
        char data_path[DH_PATH_MAX];
        char share_dir[DH_PATH_MAX];
#ifndef SHARE_DIR
        char exe_dir[DH_PATH_MAX];
        path_join(share_dir, util_getexedir(exe_dir), "..", NULL);
        path_norm(share_dir, share_dir);
#else
        path_norm(share_dir, SHARE_DIR);
#endif
        path_join(data_path, share_dir, "data", NULL);
        if (!util_pathisdir(data_path)) {
            err_print(__FILE__, __LINE__, "engine init: data path is not valid");
            return RET_FAIL;
        }

        fio_addvdir(data_path, FALSE);  /* set default (config.h configured on build) data dir */
        strcpy(g_eng->share_dir, share_dir);
    }

    uint rs_flags = 0;
    /* activate hot loading in DEV mode */
    rs_flags |= BIT_CHECK(params->flags, ENG_FLAG_DEV) ? RS_FLAG_HOTLOADING : 0;
    if (!BIT_CHECK(params->flags, ENG_FLAG_DISABLEBGLOAD))  {
        rs_flags |= RS_FLAG_PREPARE_BGLOAD;
    }

    /* task manager */
    uint thread_cnt = maxui(g_eng->hwinfo.cpu_core_cnt - 1, 1);
    r = tsk_initmgr(thread_cnt, 0, tmp_sz, 0);
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init task-mgr");
        return RET_FAIL;
    }
    struct allocator* tmp_alloc = tsk_get_tmpalloc(0);
    A_SAVE(tmp_alloc);

    /* resource manager (with only 1 thread for multi-thread loading) */
    r = rs_initmgr(rs_flags, 1);
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init res-mgr");
        return RET_FAIL;
    }
    rs_set_dataalloc(&g_eng->lsr_alloc);

    /* graphics renderer */
    r = gfx_init(&params->gfx);
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init gfx");
        return RET_FAIL;
    }

    /* debug HUD */
    r = hud_init(BIT_CHECK(params->flags, ENG_FLAG_CONSOLE));
    if (IS_FAIL(r))	{
        err_print(__FILE__, __LINE__, "engine init failed: could not init debug-hud");
        return RET_FAIL;
    }

    /* Physics */
    if (!BIT_CHECK(params->flags, ENG_FLAG_DISABLEPHX)) {
        r = phx_init(params);
        if (IS_FAIL(r)) {
            err_print(__FILE__, __LINE__, "engine init failed: could not init physics");
            return RET_FAIL;
        }
    }

    /* component manager */
    r = cmp_initmgr();
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init cmp-mgr");
        return RET_FAIL;
    }
    cmp_set_globalalloc(&g_eng->data_alloc, tsk_get_tmpalloc(0));

    /* world manager */
    r = wld_initmgr();
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init world-mgr");
        return RET_FAIL;
    }

    /* scene manager */
    r = scn_initmgr();
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init scene-mgr");
        return RET_FAIL;
    }

    /* init lua */
    r = sct_init(&params->sct, BIT_CHECK(params->flags, ENG_FLAG_DEV) ? TRUE : FALSE);
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init script engine");
        return RET_FAIL;
    }

    /* web-server */
#if defined(_PROFILE_)
    r = prf_initmgr();
    if (IS_FAIL(r))	{
    	log_print(LOG_WARNING, "profiler manager init failed: service will not be available");
    	prf_releasemgr();
    }
#endif

    /* lod-scheme */
    r = lod_initmgr();
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: could not init lod-scheme");
        return RET_FAIL;
    }

    /* init basic resources */
    r = rs_init_resources();
    if (IS_FAIL(r)) {
        err_print(__FILE__, __LINE__, "engine init failed: coult not init res-mgr resources");
        return RET_FAIL;
    }

    /* switch back to normal data allocator */
    rs_set_dataalloc(&g_eng->data_alloc);

    /* enable background-loading if res-mgr is prepared for (see above rs_initmgr) */
    if (gfx_check_feature(GFX_FEATURE_THREADED_CREATES))
        rs_add_flags(RS_FLAG_BGLOADING);
    log_print(LOG_TEXT, "init ok: ready.");

    /* init world vars */
    eng_world_regvars();

    /* engine specific console commnads */
    con_register_cmd("showfps", eng_console_showfps, NULL, "showfps [1*/0]");
    con_register_cmd("showft", eng_console_showft, NULL, "showft [1*/0]");
    con_register_cmd("showgraph", eng_console_showgraph, NULL, "showgraph [ft][fps][drawcalls]");
    con_register_cmd("lockfps", eng_console_lockfps, NULL, "lockfps [fps]");

    /* execute console commands - should be the final stage if initialization */
    if (BIT_CHECK(params->flags, ENG_FLAG_CONSOLE))	{
		for (uint i = 0; i < params->console_cmds_cnt; i++)	{
			con_exec(params->console_cmds + i*128);
		}
    }

    A_LOAD(tmp_alloc);
    return RET_OK;
}
Exemple #4
0
struct gfx_model_instance* gfx_model_createinstance(struct allocator* alloc,
		struct allocator* tmp_alloc, reshandle_t model)
{
    struct gfx_model* m = rs_get_model(model);
    uint unique_cnt = 0;
    uint joint_cnt = 0;
    uint skeleton_cnt = 0;
    for (uint i = 0; i < m->mesh_cnt; i++)
        unique_cnt += m->meshes[i].submesh_cnt;
    for (uint i = 0; i < m->geo_cnt; i++) {
        if (m->geos[i].skeleton != NULL)    {
            joint_cnt += m->geos[i].skeleton->joint_cnt*3;
            skeleton_cnt ++;
        }
    }

    /* create stack memory for proceeding allocs and calculate size */
    struct stack_alloc stack_mem;
    struct allocator stack_alloc;
    size_t total_sz =
        sizeof(struct gfx_model_instance) +
        m->mtl_cnt*sizeof(struct gfx_model_mtlgpu) +
        m->geo_cnt*sizeof(struct gfx_model_posegpu) +
        sizeof(uint)*unique_cnt +
        sizeof(int)*m->renderable_cnt +
        skeleton_cnt*16 +
        joint_cnt*sizeof(struct mat3f)*3;

    if (IS_FAIL(mem_stack_create(alloc, &stack_mem, total_sz, MID_GFX)))
        return NULL;
    mem_stack_bindalloc(&stack_mem, &stack_alloc);

    /* */
	struct gfx_model_instance* inst = (struct gfx_model_instance*)A_ALLOC(&stack_alloc,
        sizeof(struct gfx_model_instance), MID_GFX);
    ASSERT(inst);
	memset(inst, 0x00, sizeof(struct gfx_model_instance));

	inst->alloc = alloc;
	inst->model = model;

	/* gpu materials */
	if (m->mtl_cnt > 0)	{
		inst->mtls = (struct gfx_model_mtlgpu**)A_ALLOC(&stack_alloc,
            sizeof(struct gfx_model_mtlgpu*)*m->mtl_cnt, MID_GFX);
		ASSERT(inst->mtls);
		memset(inst->mtls, 0x00, sizeof(struct gfx_model_mtlgpu*)*m->mtl_cnt);
	}

	if (m->geo_cnt > 0)	{
		inst->poses = (struct gfx_model_posegpu**)A_ALLOC(&stack_alloc,
            sizeof(struct gfx_model_posegpu*)*m->geo_cnt, MID_GFX);
		ASSERT(inst->poses);
		memset(inst->poses, 0x00, sizeof(struct gfx_model_posegpu*)*m->geo_cnt);
        inst->pose_cnt = m->geo_cnt;
	}

	for (uint i = 0; i < m->renderable_cnt; i++)	{
		struct gfx_model_node* n = &m->nodes[m->renderable_idxs[i]];
		struct gfx_model_mesh* mesh = &m->meshes[n->mesh_id];

		for (uint k = 0; k < mesh->submesh_cnt; k++)	{
			uint geo_id = mesh->geo_id;
			uint mtl_id = mesh->submeshes[k].mtl_id;
			uint rpath_flags = model_make_rpathflags(m, n->mesh_id, k);

			/* create material gpu data, if not created before */
			if (inst->mtls[mtl_id] == NULL)	{
				inst->mtls[mtl_id] = model_load_gpumtl(alloc, &stack_alloc, tmp_alloc,
                    &m->mtls[mtl_id], rpath_flags);
				if (inst->mtls[mtl_id] == NULL)	{
					gfx_model_destroyinstance(inst);
					return NULL;
				}
			}

			if (inst->poses[geo_id] == NULL && BIT_CHECK(rpath_flags, GFX_RPATH_SKINNED))	{
				inst->poses[geo_id] = model_load_gpupose(&stack_alloc, tmp_alloc, &m->geos[geo_id],
						rpath_flags);
				if (inst->poses[geo_id] == NULL)	{
					gfx_model_destroyinstance(inst);
					return NULL;
				}
			}
		}
	}

	/* create unique Ids */
	if (unique_cnt > 0)	{
		inst->unique_ids = (uint*)A_ALLOC(&stack_alloc, sizeof(uint)*unique_cnt, MID_GFX);
		if (inst->unique_ids == NULL)	{
			gfx_model_destroyinstance(inst);
			return NULL;
		}
		memset(inst->unique_ids, 0x00, sizeof(uint)*unique_cnt);
	}

    /* create alpha flags */
    inst->alpha_flags = (int*)A_ALLOC(&stack_alloc, sizeof(int)*m->renderable_cnt, MID_GFX);
    memset(inst->alpha_flags, 0x00, sizeof(int)*m->renderable_cnt);

	/* update data of materials */
	gfx_model_updatemtls(inst);

	return inst;
}