void x86_release_func( struct x86_function *p ) { _mesa_exec_free(p->store); p->store = NULL; p->csr = NULL; p->size = 0; }
void _tnl_free_vertices( struct gl_context *ctx ) { TNLcontext *tnl = TNL_CONTEXT(ctx); if (tnl) { struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx); struct tnl_clipspace_fastpath *fp, *tmp; _mesa_align_free(vtx->vertex_buf); vtx->vertex_buf = NULL; for (fp = vtx->fastpath ; fp ; fp = tmp) { tmp = fp->next; free(fp->attr); /* KW: At the moment, fp->func is constrained to be allocated by * _mesa_exec_alloc(), as the hardwired fastpaths in * t_vertex_generic.c are handled specially. It would be nice * to unify them, but this probably won't change until this * module gets another overhaul. */ _mesa_exec_free((void *) fp->func); free(fp); } vtx->fastpath = NULL; } }
static void free_funcs( struct dynfn *l ) { struct dynfn *f, *tmp; foreach_s (f, tmp, l) { remove_from_list( f ); _mesa_exec_free( f->code ); _mesa_free( f ); }
static void do_realloc( struct x86_function *p ) { if (p->size == 0) { p->size = 1024; p->store = _mesa_exec_malloc(p->size); p->csr = p->store; } else { unsigned used = p->csr - p->store; unsigned char *tmp = p->store; p->size *= 2; p->store = _mesa_exec_malloc(p->size); memcpy(p->store, tmp, used); p->csr = p->store + used; _mesa_exec_free(tmp); } }
void vf_destroy( struct vertex_fetch *vf ) { struct vf_fastpath *fp, *tmp; for (fp = vf->fastpath ; fp ; fp = tmp) { tmp = fp->next; FREE(fp->attr); /* KW: At the moment, fp->func is constrained to be allocated by * _mesa_exec_alloc(), as the hardwired fastpaths in * t_vertex_generic.c are handled specially. It would be nice * to unify them, but this probably won't change until this * module gets another overhaul. */ _mesa_exec_free((void *) fp->func); FREE(fp); } vf->fastpath = NULL; FREE(vf); }