filename_t* filename_t::workingDirectory (void) { ___CBTPUSH; #define INITIAL_SIZE 256 char* wd; char buf[INITIAL_SIZE]; wd = getcwd (buf, INITIAL_SIZE); if (wd == NULL && errno == ERANGE) { int size = INITIAL_SIZE << 1; char* buf2 = (char*) mem_allocate ("char[]", size, PTYPE_ORDINARY); wd = getcwd (buf2, size); while (wd == NULL && errno == ERANGE) { mem_release (buf2); size <<= 1; buf2 = (char*) mem_allocate ("char[]", size, PTYPE_ORDINARY); wd = getcwd (buf2, size); } if (wd == NULL) { mem_release (buf2); } } filename_t* result = filename_t::create (wd); if (!(wd == buf || wd == NULL)) { mem_release (wd); } ___CBTPOP; return result; }
/* rbt_node_t */ static void rbt_node_free(void* v_node){ rbt_node_t* node = (rbt_node_t*) v_node; assert(NULL != node); mem_release(node->contents); if(node->left) mem_release(node->left); if(node->right) mem_release(node->right); }
void lf_ordlist_destroy(struct lf_ordlist *lst) { if (lst->tail->n.refct_claim != 4) lf_ordlist_print(stdout, lst); assert(lst->tail->n.refct_claim == 4); mem_release(lst->fl, lst->head); assert(lst->tail->n.refct_claim == 2); mem_release(lst->fl, lst->tail); mem_freelist_destroy(lst->fl); free(lst); }
void *lf_ordlist_find(struct lf_ordlist *lst, void *value) { void *ret = NULL; struct lf_ordlist_node *right_node, *left_node; right_node = search(lst, value, &left_node); if (right_node != lst->tail && lst->cmp(right_node->value, value) == 0) ret = right_node->value; mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); return ret; }
static void load_data_record(Istream& fin, const gcstring& table, int tran, int n) { try { if (n > loadbuf_size) { loadbuf_size = max(n, 2 * loadbuf_size); mem_release(loadbuf); loadbuf = (char*) mem_committed(loadbuf_size); verify(loadbuf); } fin.read(loadbuf, n); Record rec(loadbuf); if (rec.cursize() != n) except_err(table << ": rec size " << rec.cursize() << " not what was read " << n); if (table == "views") theDB()->add_any_record(tran, table, rec); else theDB()->add_record(tran, table, rec); } catch (const Except& e) { errlog("load: skipping corrupted record in: ", table.str(), e.str()); alert("skipping corrupted record in: " << table << ": " << e); alerts = true; } }
void egl_disp_free(EGL_DISP_HANDLE_T disp_handle) { #ifdef BRCM_V3D_OPT return; #endif DISP_T *disp = disp_from_handle(disp_handle); uint32_t i; finish(disp); /* take the current image off the display */ if (disp->in_use.last_win != EGL_PLATFORM_WIN_NONE) { egl_server_platform_display_nothing_sync(disp->in_use.last_win); } for (i = 0; i != disp->in_use.n; ++i) { if (disp->in_use.n != 1) { KHRN_IMAGE_T *image = (KHRN_IMAGE_T *)mem_lock(disp->in_use.images[i]); /* wait for all outstanding writes to the image to complete. we do * this to make sure there aren't any wait-for-display messages * hanging around in the system (we only post wait-for-display * messages before writes). we don't really want to flush unflushed * writes here, but whatever */ khrn_interlock_read_immediate(&image->interlock); vcos_assert(image->interlock.disp_image_handle == egl_disp_image_handle(disp_handle, i)); image->interlock.disp_image_handle = EGL_DISP_IMAGE_HANDLE_INVALID; mem_unlock(disp->in_use.images[i]); } mem_release(disp->in_use.images[i]); } free_disp_handle(disp_handle); }
static void rbt_delete_node(rbt_t* tree, rbt_node_t* node){ rbt_node_t* descendant = NULL; if(node->left && node->right){ descendant = rightmost_descendant(node->left); mem_retain(descendant); rbt_delete_node(tree, descendant); if(node->left) node->left->parent = descendant; if(node->right) node->right->parent = descendant; descendant->left = node->left; descendant->right = node->right; descendant->color = node->color; }else if(BLACK == rbt_node_color(node)){ //black node with at most one non-leaf child if(RED == rbt_node_color(node->left) || RED == rbt_node_color(node->right)){ descendant = node->left ? node->left : node->right; descendant->color = BLACK; } else { rbt_del_rebalance(tree, node); } } rbt_node_replace(tree, node, descendant); node->left = NULL; node->right = NULL; node->parent = NULL; mem_release(node); }
/** Free a payload buffer */ void mmal_port_payload_free(MMAL_PORT_T *port, uint8_t *payload) { if (!port || !port->priv) return; LOG_TRACE("%s(%i:%i) port %p, payload %p", port->component->name, (int)port->type, (int)port->index, port, payload); if (!port->priv->pf_payload_alloc) { /* Revert to using the heap */ #ifdef _VIDEOCORE mem_release((MEM_HANDLE_T)payload); #else vcos_free(payload); #endif mmal_port_release(port); return; } LOCK_PORT(port); port->priv->pf_payload_free(port, payload); UNLOCK_PORT(port); mmal_port_release(port); }
~Loading() { theDB()->loading = false; mem_release(loadbuf); loadbuf = 0; loadbuf_size = 0; }
realTimeClock_t::~realTimeClock_t (void) { ___CBTPUSH; mem_release (m_date); ___CBTPOP; }
/* ---------------------------------------------------------------------- * destroy a pool of objects. if none of the pool objects are in use, * the destroy will happen immediately. otherwise, we will sleep for * up to 'timeout' microseconds waiting for the objects to be released. * * timeout of 0xffffffff means 'wait forever'; 0 means 'do not wait'. * * returns 0 on success, -1 on error (ie. even after 'timeout' there * were still objects in use) * -------------------------------------------------------------------- */ int32_t vc_pool_destroy( VC_POOL_T *pool, uint32_t timeout ) { int i; vcos_assert( pool->magic == POOL_MAGIC ); // wait for all objects to become free for (;;) { lock_pool( pool ); if ( pool->allocated == 0 ) break; unlock_pool( pool ); if ( wait_event(pool,timeout) ) return -1; // timed out } if ( pool->mem != MEM_INVALID_HANDLE ) { // just a single memory object to free mem_release( pool->mem ); } else { // release individual pool entries back to mempool for (i=0; i<pool->nobjects; i++) mem_release( pool->object[i].mem ); } // remove from the global list rtos_latch_get(&pool_list_latch); VC_POOL_T **pp = &vc_pool_list; while (*pp != pool) { pp = &((*pp)->next); } vcos_assert(*pp); *pp = pool->next; rtos_latch_put(&pool_list_latch); // kill the pool struct pool->magic = 0; destroy_event( pool ); rtos_priorityfree( pool ); return 0; }
uint32_t mem_ut(void) { #define BLOCK_SIZE MROUND(10) #define BLOCK_COUNT 10 uint8_t MALIGN(4) pool[BLOCK_COUNT][BLOCK_SIZE]; void *mem_free; void *mem_used; uint16_t mem_free_count; void *mem; mem_init(pool, BLOCK_SIZE, BLOCK_COUNT, &mem_free); mem_free_count = mem_free_count_get(mem_free); if (mem_free_count != BLOCK_COUNT) { return 1; } mem_used = 0; while (mem_free_count--) { uint16_t mem_free_count_current; mem = mem_acquire(&mem_free); mem_free_count_current = mem_free_count_get(mem_free); if (mem_free_count != mem_free_count_current) { return 2; } memcpy(mem, &mem_used, sizeof(mem)); mem_used = mem; } mem = mem_acquire(&mem_free); if (mem) { return 3; } while (++mem_free_count < BLOCK_COUNT) { uint16_t mem_free_count_current; mem = mem_used; memcpy(&mem_used, mem, sizeof(void *)); mem_release(mem, &mem_free); mem_free_count_current = mem_free_count_get(mem_free); if ((mem_free_count + 1) != mem_free_count_current) { return 4; } } if (mem != mem_free) { return 5; } if (mem_free_count_get(mem_free) != BLOCK_COUNT) { return 6; } return 0; }
filename_t::~filename_t (void) { ___CBTPUSH; mem_release ((void*) m_name); /* The m_indices are in the same buffer as m_name and should not be released separately. */ ___CBTPOP; }
assembler_t::~assembler_t (void) { ___CBTPUSH; m_instructionBuilder->dump (); m_inputReader->dump (); m_tokenizer->dump (); m_compiler->dump (); mem_release (m_input); ___CBTPOP; }
MEM_HANDLE_T egl_server_platform_create_pixmap_info(uint32_t pixmap) { VC_IMAGE_T *vcimage = (VC_IMAGE_T *)pixmap; KHRN_IMAGE_FORMAT_T format = 0; MEM_HANDLE_T data_handle; vcos_assert(pixmap); switch (vcimage->type) { case VC_IMAGE_TF_RGBA32: format = ABGR_8888_TF; break; case VC_IMAGE_TF_RGBX32: format = XBGR_8888_TF; break; case VC_IMAGE_TF_RGB565: format = RGB_565_TF; break; case VC_IMAGE_TF_RGBA5551: format = RGBA_5551_TF; break; case VC_IMAGE_TF_RGBA16: format = RGBA_4444_TF; break; #ifdef __VIDEOCORE4__ case VC_IMAGE_RGBA32: format = ABGR_8888_RSO; break; case VC_IMAGE_RGB565: format = RGB_565_RSO; break; #endif default: UNREACHABLE(); } if (vcimage->mem_handle != MEM_INVALID_HANDLE) { data_handle = vcimage->mem_handle; mem_acquire(data_handle); } else data_handle = mem_wrap(vcimage->image_data, vcimage->size, 64, MEM_FLAG_NONE, "egl_server_platform_create_pixmap_info"); if (data_handle == MEM_INVALID_HANDLE) { return MEM_INVALID_HANDLE; } MEM_HANDLE_T handle = khrn_image_create_from_storage(format, vcimage->width, vcimage->height, vcimage->pitch, MEM_INVALID_HANDLE, data_handle, 0, (KHRN_IMAGE_CREATE_FLAG_T)(IMAGE_CREATE_FLAG_TEXTURE | IMAGE_CREATE_FLAG_RENDER_TARGET)); /* todo: are these flags right? */ mem_release(data_handle); return handle; }
void buf_clear(buf_t* buf) { void* entry; while ( !buf_empty(buf) ) { entry = buf_read(buf); if (NULL != entry) mem_release( entry ); } buf->reads = 0; buf->writes = 0; }
fileOutputStream_t::fileOutputStream_t (charSequence_t* fileName, bool append) { ___CBTPUSH; int bufSize = fileName->length () + 1; char buf[bufSize]; char* utf8 = fileName->toUtf8 (buf, bufSize); initialise (utf8, append); if (utf8 != buf) { mem_release (utf8); } ___CBTPOP; }
static bool glxx_buffer_inner_data(GLXX_BUFFER_INNER_T *item, int32_t size, const void *data, bool is_new_item) { uint32_t current_size; vcos_assert(size >= 0); khrn_interlock_write_immediate(&item->interlock); current_size = mem_get_size(item->mh_storage); if (current_size != (uint32_t)size) { #ifdef __VIDEOCORE4__ MEM_HANDLE_T handle; MEM_FLAG_T flags = MEM_FLAG_DIRECT | MEM_FLAG_DISCARDABLE; if(!is_new_item) { /* unretain existing, retain new */ if(item->mh_storage!=MEM_ZERO_SIZE_HANDLE) mem_unretain(item->mh_storage); flags |= MEM_FLAG_RETAINED; } /* discardable so can be reclaimed if short of memory and no longer retained */ handle = mem_alloc_ex((uint32_t)size, 4, flags, "GLXX_BUFFER_INNER_T.storage", MEM_COMPACT_DISCARD); #else MEM_HANDLE_T handle = mem_alloc_ex((uint32_t)size, 4, MEM_FLAG_NONE, "GLXX_BUFFER_INNER_T.storage", MEM_COMPACT_DISCARD); // check, no term #endif if (handle == MEM_INVALID_HANDLE) { MEM_ASSIGN(item->mh_storage, MEM_ZERO_SIZE_HANDLE); return false; } MEM_ASSIGN(item->mh_storage, handle); mem_release(handle); } /* at this point buffer->mh_storage is guaranteed to have size size */ if (data) { memcpy(mem_lock(item->mh_storage), data, size); mem_unlock(item->mh_storage); } return true; }
static void do_interlock_release(KHRN_FMEM_T *fmem) { KHRN_FMEM_TWEAK_T *h; uint32_t i; KHRN_INTERLOCK_T *interlock; for (h = fmem->interlock.start; h != NULL; h = h->header.next) { for (i = 1; i <= h->header.count; i++) { interlock = (KHRN_INTERLOCK_T *)((uint8_t *)mem_lock(h[i].interlock.mh_handle) + h[i].interlock.offset); khrn_interlock_release(interlock, khrn_interlock_user(fmem->render_state_n)); mem_unlock(h[i].interlock.mh_handle); mem_release(h[i].interlock.mh_handle); } } }
filename_t* filename_t::create (charSequence_t* filename) { ___CBTPUSH; filename_t* result = NULL; if (filename != NULL) { const int bufSize = filename->length () + 1; char buf[bufSize]; char* utf8 = filename->toUtf8 (buf, bufSize); result = filename_t::create (utf8); if (utf8 != buf) { mem_release (utf8); } } ___CBTPOP; return result; }
int fileOutputStream_t::close (void) { ___CBTPUSH; int result; if (handle != NULL) { result = flush (); mem_release (buf); fclose (handle); handle = NULL; } else { result = 0; } ___CBTPOP; return result; }
void core_release(int report_leaks) { sock_release(); timer_releasemgr(); fio_releasemgr(); json_release(); err_release(); log_release(); /* dump memory leaks before releasing memory manager and log * because memory leak report dumps leakage data to logger */ if (report_leaks) mem_reportleaks(); mem_release(); }
struct lf_ordlist *lf_ordlist_create(size_t nbrelm, int (*cmp)(void *a, void *b)) { struct lf_ordlist *lst; lst = calloc(1, sizeof(*lst)); if (!lst) return NULL; lst->cmp = cmp; lst->nelms = nbrelm + 2; lst->fl = mem_freelist_create(lst->nelms, 1, sizeof(struct lf_ordlist_node)); if (!lst->fl) goto err_fl; lst->head = mem_new(lst->fl); if (!lst->head) goto err_head; lst->tail = mem_new(lst->fl); if (!lst->tail) goto err_tail; mem_incr_ref(lst->tail); NEXT(lst->head) = (struct node *) lst->tail; return lst; err_tail: mem_release(lst->fl, lst->head); err_head: mem_freelist_destroy(lst->fl); err_fl: free(lst); return NULL; }
int fileOutputStream_t::write (charSequence_t* str) { ___CBTPUSH; int result; if (index == IO_BUF_SIZE) { result = writeBuffer (); } else { result = 0; } if (result == 0) { int bytesWritten; char* buffer = buf + index; char* outputBuffer = str->toUtf8 (buffer, IO_BUF_SIZE - index, &bytesWritten); if (outputBuffer != buffer) { result = writeLargeBlock (outputBuffer, bytesWritten); mem_release (outputBuffer); } else { index += bytesWritten; } } ___CBTPOP; return result; }
int lf_ordlist_remove(struct lf_ordlist *lst, void *value) { struct lf_ordlist_node *right_node, *right_node_next, *left_node; for ( ; ; ) { right_node = search(lst, value, &left_node); if ((right_node == lst->tail) || lst->cmp(right_node->value, value) != 0) { mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); return 0; } right_node_next = mem_safe_read(lst->fl, &NEXT(right_node)); if (!IS_MARKED(right_node_next)) { assert(right_node != lst->tail); if (compare_and_swap(&NEXT(right_node), (intptr_t) right_node_next, (intptr_t) GET_MARKED(right_node_next))) break; } mem_release(lst->fl, right_node_next); mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); } /* if the CAS succeeds, NEXT(left_node) gets our ref to * 'right_node_next' */ assert(left_node != lst->tail); if (!compare_and_swap(&NEXT(left_node), (intptr_t) right_node, (intptr_t) right_node_next)) { mem_release(lst->fl, right_node_next); mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); /* delete it via a search. */ right_node = search(lst, value, &left_node); mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); } else { /* safely deleted. */ mem_release(lst->fl, right_node); /* our ref */ mem_release(lst->fl, right_node); /* NEXT(left_node) ref */ mem_release(lst->fl, left_node); } return 1; }
void *lf_ordlist_cond_update(struct lf_ordlist *lst, int (*f)(void *, void*), void *value) { struct lf_ordlist_node *new_node; struct lf_ordlist_node *right_node, *left_node; assert(value); new_node = mem_new(lst->fl); new_node->value = value; for ( ; ; ) { right_node = search(lst, value, &left_node); if (right_node != lst->tail && lst->cmp(right_node->value, value) == 0) { mem_release(lst->fl, left_node); mem_release(lst->fl, new_node); void *ret = right_node->value; while (f /* Never changes */) { void *v = right_node->value; if (!f(v, value)) { ret = v; break; } if (compare_and_swap(&right_node->value, (intptr_t) v, (intptr_t) value)) { ret = value; break; } } mem_release(lst->fl, right_node); return ret; } /* NEXT(left_node) gets our reference if CAS succeeds, * and NEXT(new_node) gets the ref to right_node if * CAS succeeds */ NEXT(new_node) = (struct node *) right_node; assert(left_node != lst->tail); if (compare_and_swap(&NEXT(left_node), (intptr_t) right_node, (intptr_t) new_node)) { mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); return value; } mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); /* If we release 'new_node' later, don't also release * right_node. */ NEXT(new_node) = NULL; } }
/** * Search for a key's position in the ordered list. Upon return, * *left_node points to the left node (and it has a reference) and the * return value points to a node that is referenced too. */ static struct lf_ordlist_node *search(struct lf_ordlist *lst, void *key, struct lf_ordlist_node **left_node) { struct lf_ordlist_node *left_node_next, *right_node; struct lf_ordlist_node *t, *t_next; search_again: for ( ; ; ) { *left_node = left_node_next = NULL; t = mem_safe_read(lst->fl, &lst->head); t_next = mem_safe_read(lst->fl, &NEXT(lst->head)); /* 1: Find left_node and right_node * * Entering this loop: t and t_next are referenced. * * Leaving this loop: t is referenced, and if t_next * is not marked then *left_node * and left_node_next are set and * have their own references. If t * != lst->tail, then t_next is * also referenced. */ do { if (!IS_MARKED(t_next)) { /* these refs may have been copied * before, but we had to loop again */ if (*left_node) { mem_release(lst->fl, *left_node); *left_node = NULL; } if (left_node_next) { mem_release(lst->fl, left_node_next); left_node_next = NULL; } /* copy both t and t_next's refs */ mem_incr_ref(t); (*left_node) = t; mem_incr_ref(t_next); left_node_next = t_next; } mem_release(lst->fl, t); t = GET_UNMARKED(t_next); /* take t_next's ref */ if (t == lst->tail) break; t_next = mem_safe_read(lst->fl, &NEXT(t)); } while (IS_MARKED(t_next) || lst->cmp(t->value, key) < 0); if (t != lst->tail) mem_release(lst->fl, t_next); /* done with t_next */ right_node = t; /* takes t's reference */ /* * At this point, right_node, *left_node and * left_node_next all have references. */ /* 2: Check nodes are adjacent */ if (left_node_next == right_node) { mem_release(lst->fl, left_node_next); if (right_node != lst->tail && IS_MARKED(NEXT(right_node))) { mem_release(lst->fl, right_node); mem_release(lst->fl, *left_node); goto search_again; } else { return right_node; } } /* 3: Remove one or more marked nodes * * Here, left_node_next, right_node and *left_node are * referenced. */ /* in case CAS succeeds. */ mem_incr_ref(right_node); assert(*left_node != lst->tail); if (compare_and_swap(&NEXT(*left_node), (intptr_t) left_node_next, (intptr_t) right_node)) { /* one for NEXT(*left_node), one for * 'left_node_next' */ mem_release(lst->fl, left_node_next); mem_release(lst->fl, left_node_next); if ((right_node != lst->tail) && IS_MARKED(NEXT(right_node))) { mem_release(lst->fl, right_node); mem_release(lst->fl, *left_node); goto search_again; } else { return right_node; } } /* one for the CAS prep. ref and one for * 'right_node' */ mem_release(lst->fl, right_node); mem_release(lst->fl, right_node); mem_release(lst->fl, *left_node); mem_release(lst->fl, left_node_next); } /* for( ; ; ) */ /* should not reach here */ assert(0); }
static void mmal_port_clock_payload_free(MMAL_PORT_T *port, uint8_t *payload) { MMAL_PARAM_UNUSED(port); mem_release((MEM_HANDLE_T)payload); }
/** * create a fixed pool of relocatable objects. * * return (opaque) pointer to the newly created pool, or NULL if * there was insufficient memory. * * @param size Size of each sub-object * @param num Number of sub-objects * @param align Alignment of sub-objects * @param flags Flags * @param name A name for this pool * @param overhead Allocate additional space in the non-moveable heap * * If flags include VC_POOL_FLAGS_SUBDIVISIBLE we get a single relocatable * memory block large enough for all 'n' objects; it can either be used * as a single block, or divided up into 'n' of them. * -------------------------------------------------------------------- */ VC_POOL_T * vc_pool_create( size_t size, uint32_t num, uint32_t align, VC_POOL_FLAGS_T flags, const char *name, uint32_t overhead_size ) { int i; int mem_flags = MEM_FLAG_NO_INIT; vcos_assert(size != 0); vcos_assert(num != 0); vcos_assert(name); overhead_size = (overhead_size+OVERHEAD_ALIGN-1) & ~(OVERHEAD_ALIGN-1); // allocate and zero main struct int alloc_len = sizeof(VC_POOL_T) + num * sizeof(VC_POOL_OBJECT_T) + num * overhead_size; VC_POOL_T *pool = (VC_POOL_T*)rtos_prioritymalloc( alloc_len, RTOS_ALIGN_DEFAULT, RTOS_PRIORITY_UNIMPORTANT, "vc_pool" ); if ( !pool ) return NULL; // failed to allocate pool memset( pool, 0, alloc_len ); // array of pool objects pool->object = (VC_POOL_OBJECT_T *)((unsigned char *)pool + sizeof(VC_POOL_T)); // initialise pool->magic = POOL_MAGIC; pool->latch = rtos_latch_unlocked(); if ( flags & VC_POOL_FLAGS_DIRECT ) mem_flags |= MEM_FLAG_DIRECT; if ( flags & VC_POOL_FLAGS_COHERENT ) mem_flags |= MEM_FLAG_COHERENT; if ( flags & VC_POOL_FLAGS_HINT_PERMALOCK ) mem_flags |= MEM_FLAG_HINT_PERMALOCK; if ( align == 0 ) align = 32; // minimum 256-bit aligned vcos_assert( _count(align) == 1 ); // must be power of 2 pool->alignment = align; pool->overhead = (uint8_t*)(pool+1) + num*sizeof(VC_POOL_OBJECT_T); pool->overhead_size = overhead_size; pool->name = name; pool->max_objects = num; pool->pool_flags = flags; if ( flags & VC_POOL_FLAGS_SUBDIVISIBLE ) { // a single mem_handle, shared between objects uint32_t rounded_size = (size + align - 1) & ~(align - 1); pool->mem = mem_alloc( rounded_size, align, (MEM_FLAG_T)mem_flags, name ); if ( pool->mem == MEM_INVALID_HANDLE ) { // out of memory... clean up nicely and return error rtos_priorityfree( pool ); return NULL; } pool->nobjects = 0; pool->object_size = 0; pool->max_object_size = rounded_size; } else { // bunch of individual objects for (i=0; i<num; i++) { MEM_HANDLE_T mem = mem_alloc( size, align, (MEM_FLAG_T)mem_flags, name ); pool->object[i].mem = mem; // all ->offset fields are 0 from the previous memset if ( mem == MEM_INVALID_HANDLE ) { // out of memory... clean up nicely and return error while (i > 0) mem_release( pool->object[--i].mem ); rtos_priorityfree( pool ); return NULL; // failed to allocate pool } // pointer to 'overhead' memory for this entry pool->object[i].overhead = pool->overhead + i*pool->overhead_size; } pool->mem = MEM_INVALID_HANDLE; pool->nobjects = num; pool->object_size = size; pool->max_object_size = size; } create_event( pool ); // link into global list rtos_latch_get(&pool_list_latch); pool->next = vc_pool_list; vc_pool_list = pool; rtos_latch_put(&pool_list_latch); // done return pool; }
/* rbt_t */ static void rbt_free(void* v_tree){ rbt_t* tree = (rbt_t*) v_tree; assert(NULL != tree); if(tree->root) mem_release(tree->root); }