void eng_release() { if (g_eng == NULL) return; rs_release_resources(); lod_releasemgr(); #if !defined(_DEBUG_) pak_close(&g_eng->data_pak); #endif prf_releasemgr(); sct_release(); wld_releasemgr(); scn_releasemgr(); cmp_releasemgr(); phx_release(); hud_release(); gfx_release(); rs_reportleaks(); rs_releasemgr(); tsk_releasemgr(); if (g_eng->timer != NULL) timer_destroyinstance(g_eng->timer); /* check for main memory leaks */ if (BIT_CHECK(g_eng->params.flags, ENG_FLAG_DEV)) { int leak_cnt = mem_freelist_getleaks(&g_eng->data_freelist, NULL); if (leak_cnt > 0) log_printf(LOG_WARNING, "%d leaks found on dynamic 'data' memory", leak_cnt); } mem_freelist_destroy(&g_eng->data_freelist); mem_stack_destroy(&g_eng->lsr_stack); log_print(LOG_TEXT, "engine released."); if (BIT_CHECK(g_eng->params.flags, ENG_FLAG_CONSOLE)) { log_outputfunc(FALSE, NULL, NULL); con_release(); } FREE(g_eng); g_eng = NULL; }
void mt_thread_destroy(mt_thread thread) { /* reset events */ if (thread->events[EVENT_STOP] != NULL) SetEvent(thread->events[EVENT_STOP]); if (thread->events[EVENT_RESUME] != NULL) ResetEvent(thread->events[EVENT_RESUME]); /* wait for thread to finish */ if (thread->t != NULL) { WaitForSingleObject(thread->t, INFINITE); CloseHandle(thread->t); } /* destroy events */ if (thread->events[EVENT_RESUME] != NULL) CloseHandle(thread->events[EVENT_RESUME]); if (thread->events[EVENT_STOP] != NULL) CloseHandle(thread->events[EVENT_STOP]); mem_freelist_destroy(&thread->local_mem); mem_stack_destroy(&thread->tmp_mem); FREE(thread); }
void prf_destroy_samples(struct prf_samples* s) { mem_stack_destroy(&s->alloc); FREE(s); }
struct gfx_model* gfx_model_load(struct allocator* alloc, const char* h3dm_filepath, uint thread_id) { struct allocator* tmp_alloc = tsk_get_tmpalloc(thread_id); A_SAVE(tmp_alloc); struct h3d_header header; struct h3d_model h3dmodel; struct gfx_model* model = NULL; uint renderable_idx = 0; struct stack_alloc stack_mem; struct allocator stack_alloc; result_t r; memset(&stack_mem, 0x00, sizeof(stack_mem)); file_t f = fio_openmem(tmp_alloc, h3dm_filepath, FALSE, MID_GFX); if (f == NULL) { err_printf(__FILE__, __LINE__, "load model '%s' failed: could not open file", h3dm_filepath); goto err_cleanup; } /* header */ fio_read(f, &header, sizeof(header), 1); if (header.sign != H3D_SIGN || header.type != H3D_MESH) { err_printf(__FILE__, __LINE__, "load model '%s' failed: invalid file format", h3dm_filepath); goto err_cleanup; } if (header.version != H3D_VERSION && header.version != H3D_VERSION_13) { err_printf(__FILE__, __LINE__, "load model '%s' failed: file version not implemented/obsolete", h3dm_filepath); goto err_cleanup; } /* model */ fio_read(f, &h3dmodel, sizeof(h3dmodel), 1); /* calculate size and create stack allocator for proceeding allocations */ size_t total_sz = sizeof(struct gfx_model) + h3dmodel.node_cnt*sizeof(struct gfx_model_node) + 16 + h3dmodel.node_cnt*sizeof(uint) + h3dmodel.geo_cnt*sizeof(struct gfx_model_geo) + h3dmodel.mesh_cnt*sizeof(struct gfx_model_mesh) + h3dmodel.mtl_cnt*sizeof(struct gfx_model_mtl) + h3dmodel.has_occ*sizeof(struct gfx_model_occ) + h3dmodel.total_childidxs*sizeof(uint) + h3dmodel.total_geo_subsets*sizeof(struct gfx_model_geosubset) + h3dmodel.total_joints*sizeof(struct gfx_model_joint) + h3dmodel.total_joints*sizeof(struct mat3f) + h3dmodel.total_submeshes*sizeof(struct gfx_model_submesh) + h3dmodel.total_skeletons*sizeof(struct gfx_model_skeleton) + h3dmodel.total_skeletons*32 + /* 2 aligned allocs per skeleton */ h3dmodel.total_maps*sizeof(struct gfx_model_map) + h3dmodel.occ_idx_cnt*sizeof(uint16) + h3dmodel.occ_vert_cnt*sizeof(struct vec3f) + h3dmodel.has_occ*16; /* 1 aligned alloc for occ */ r = mem_stack_create(alloc, &stack_mem, total_sz, MID_GFX); if (IS_FAIL(r)) { err_printn(__FILE__, __LINE__, RET_OUTOFMEMORY); goto err_cleanup; } mem_stack_bindalloc(&stack_mem, &stack_alloc); /* */ model = (struct gfx_model*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model), MID_GFX); if (model == NULL) { err_printn(__FILE__, __LINE__, RET_OUTOFMEMORY); goto err_cleanup; } memset(model, 0x00, sizeof(struct gfx_model)); model->alloc = alloc; /* nodes */ if (h3dmodel.node_cnt > 0) { model->nodes = (struct gfx_model_node*)A_ALIGNED_ALLOC(&stack_alloc, sizeof(struct gfx_model_node)*h3dmodel.node_cnt, MID_GFX); ASSERT(model->nodes); memset(model->nodes, 0x00, sizeof(struct gfx_model_node)*h3dmodel.node_cnt); for (uint i = 0; i < h3dmodel.node_cnt; i++) { struct gfx_model_node* node = &model->nodes[i]; if (!model_loadnode(node, f, &stack_alloc)) goto err_cleanup; /* NOTE: we set root matrix to identity and keep the old one as "root_mat" */ if (i == 0) { mat3_setm(&model->root_mat, &node->local_mat); mat3_set_ident(&node->local_mat); } model->node_cnt ++; } } /* meshes */ if (h3dmodel.mesh_cnt > 0) { model->meshes = (struct gfx_model_mesh*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model_mesh)*h3dmodel.mesh_cnt, MID_GFX); ASSERT(model->meshes); memset(model->meshes, 0x00, sizeof(struct gfx_model_mesh)*h3dmodel.mesh_cnt); uint idx = 0; for (uint i = 0; i < h3dmodel.mesh_cnt; i++) { struct gfx_model_mesh* mesh = &model->meshes[i]; if (!model_loadmesh(mesh, f, &stack_alloc)) goto err_cleanup; /* assign global indexes */ for (uint k = 0; k < mesh->submesh_cnt; k++) mesh->submeshes[k].offset_idx = idx++; model->mesh_cnt ++; } } /* geos */ if (h3dmodel.geo_cnt > 0) { model->geos = (struct gfx_model_geo*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model_geo)*h3dmodel.geo_cnt, MID_GFX); ASSERT(model->geos); memset(model->geos, 0x00, sizeof(struct gfx_model_geo)*h3dmodel.geo_cnt); for (uint i = 0; i < h3dmodel.geo_cnt; i++) { struct gfx_model_geo* geo = &model->geos[i]; if (!model_loadgeo(geo, f, &stack_alloc, tmp_alloc, thread_id)) goto err_cleanup; model->geo_cnt ++; } } /* materials */ if (h3dmodel.mtl_cnt > 0) { model->mtls = (struct gfx_model_mtl*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model_mtl)*h3dmodel.mtl_cnt, MID_GFX); ASSERT(model->mtls); memset(model->mtls, 0x00, sizeof(struct gfx_model_mtl)*h3dmodel.mtl_cnt); for (uint i = 0; i < h3dmodel.mtl_cnt; i++) { struct gfx_model_mtl* mtl = &model->mtls[i]; if (!model_loadmtl(mtl, f, &stack_alloc)) goto err_cleanup; model->mtl_cnt ++; } } if (header.version >= H3D_VERSION_11 && h3dmodel.has_occ) { model->occ = (struct gfx_model_occ*)A_ALLOC(&stack_alloc, sizeof(struct gfx_model_occ), MID_GFX); ASSERT(model->occ); memset(model->occ, 0x00, sizeof(struct gfx_model_occ)); if (!model_loadocc(model->occ, f, &stack_alloc)) goto err_cleanup; } /* populate renderable nodes */ model->renderable_idxs = (uint*)A_ALLOC(&stack_alloc, sizeof(uint)*h3dmodel.node_cnt, MID_GFX); ASSERT(model->renderable_idxs); for (uint i = 0; i < h3dmodel.node_cnt; i++) { struct gfx_model_node* node = &model->nodes[i]; if (node->mesh_id != INVALID_INDEX) model->renderable_idxs[renderable_idx++] = i; } model->renderable_cnt = renderable_idx; /* calculate sum of aabb(s) from renderable nodes */ aabb_setzero(&model->bb); struct mat3f node_mat; /* transform matrix, relative to model */ for (uint i = 0; i < renderable_idx; i++) { struct gfx_model_node* node = &model->nodes[model->renderable_idxs[i]]; mat3_setm(&node_mat, &node->local_mat); struct gfx_model_node* pnode = node; while (pnode->parent_id != INVALID_INDEX) { pnode = &model->nodes[pnode->parent_id]; mat3_mul(&node_mat, &node_mat, &pnode->local_mat); } if (node->parent_id != INVALID_INDEX) mat3_mul(&node_mat, &node_mat, &model->root_mat); /* transform local box to model-relative bounding box and merge with final */ struct aabb bb; aabb_xform(&bb, &model->nodes[model->renderable_idxs[i]].bb, &node_mat); aabb_merge(&model->bb, &model->bb, &bb); } /* for empty models, we set a minimal bounding-box */ if (aabb_iszero(&model->bb)) { aabb_pushptf(&model->bb, 0.1f, 0.1f, 0.1f); aabb_pushptf(&model->bb, -0.1f, -0.1f, -0.1f); } fio_close(f); A_LOAD(tmp_alloc); if (thread_id != 0) { gfx_delayed_waitforobjects(thread_id); gfx_delayed_fillobjects(thread_id); } return model; err_cleanup: if (f != NULL) fio_close(f); if (model != NULL) gfx_model_unload(model); mem_stack_destroy(&stack_mem); A_LOAD(tmp_alloc); return NULL; }