/* * Frees the memory pointed to by ptr. */ void mm_free(void *ptr) { int bin_index; char *tree; char *slot_ptr = (char *)ptr; /* Find out which tree the pointer belongs to */ char *run = mm_findnodetree(slot_ptr, &tree, &bin_index); if (!run) { fprintf(stderr, "Trying to free an invalid pointer.\n"); return; } if (tree != large_allocations) { /* Tiny or small allocation */ /* Free the slot */ int32_t pos = ((int32_t)run + mmrun_get_size(run) - (int32_t)ptr); mmrun_toggleslot((pos / mmrun_get_slotsize(run)) - 1, run); /* Free the run if it is empty */ if (mmrun_isempty(run)) { rbtree_remove(run, &bins[bin_index]); mmfreerun_add(run); } } else { /* Remove the run from the large allocation list and free it */ rbtree_remove(run, &large_allocations); mmfreerun_add(run); } }
/* * Tries to allocate a suitably sized run from the free list. * * Returns the size of the allocated run and a pointer to it. * Returns 0 and NULL if no suitably sized run can be found. */ static size_t mmrun_allocate_freerun(size_t size, char **allocated) { /* Nothing to do if the free list is empty */ if (free_runs) { /* * Scan the free list linearly for a run that is of * the correct size (address ordered first-fit) */ int run_size; char *run = rbtree_first(free_runs); while (run && mmrun_get_largesize(run) < size) { run = rbtree_next(run); } /* If no run is found return NULL */ if (!run) { *allocated = NULL; return 0; } /* Remove the run from the free list */ rbtree_remove(run, &free_runs); run_size = mmrun_get_largesize(run); run_size = mmrun_split(size, run); *allocated = run; return run_size; } *allocated = NULL; return 0; }
static inline void vres_cache_free(vres_cache_group_t *group, vres_cache_t *cache) { rbtree_remove(&group->tree, &cache->desc); list_del(&cache->list); free(cache->buf); free(cache); group->count--; }
static bool _predicate (void) { int i; KeyValuePair_t n; struct rbtree tree; KeyValuePair_t *node; struct rbtree_node *result; rbtree_init (&tree, _compareFn, 0); for (i = 0; i < TreeSize; i++) { node = malloc (sizeof (KeyValuePair_t)); node->key = i; node->val = TreeSize + i; rbtree_insert ((struct rbtree_node *) &node->node, &tree); } // Lookup the nodes. for (i = 0; i < TreeSize; i++) { KeyValuePair_t *kvResult; n.key = i; kvResult = rbtree_container_of (rbtree_lookup ((struct rbtree_node *) &n.node, &tree), KeyValuePair_t, node); if (kvResult->key != i || kvResult->val != TreeSize + i) { return false; } } // This lookup should fail. n.key = TreeSize; result = rbtree_lookup ((struct rbtree_node *) &n.node, &tree); if (result != NULL) { return false; } //iterate (rbtree_first(&tree), iterateFn); result = rbtree_first(&tree); while (result) { KeyValuePair_t *kvResult = rbtree_container_of (result, KeyValuePair_t, node); struct rbtree_node *n = result; result = rbtree_next (result); rbtree_remove (n, &tree); free (kvResult); } // This lookup should fail because we just cleared the tree. n.key = TreeSize; n.key = 0; result = rbtree_lookup ((struct rbtree_node *) &n.node, &tree); if (result != NULL) { return false; } return true; }
void remove_file(struct benchfiles *b, struct ffsb_file *entry) { rw_lock_write(&b->fileslock); rbtree_remove(b->files, entry, NULL); /* add node to the cir. list of "holes" */ cl_insert_tail(b->holes, entry); rw_unlock_write(&b->fileslock); }
static void mmfreerun_add(char *run) { char *buddy; int run_size = mmrun_get_size(run); /* See if the run can be appended to an adjacent run on the free list */ buddy = rbtree_lookup((char *)((int)run-1), free_runs); if (buddy) { int buddy_size = mmrun_get_largesize(buddy); /* Increase the size of the run on the free list */ mmrun_set_largesize(buddy_size + run_size, buddy); return; } /* * See if the run can be expanded with an adjacent run * from the free list */ buddy = rbtree_lookup((char *)((int)run+run_size+1), free_runs); if (buddy) { int buddy_size = mmrun_get_largesize(buddy); /* * Remove the old run and add it's size to the new run. * Then add the new run to the free list. */ rbtree_remove(buddy, &free_runs); mmrun_init(0, 0, run); mmrun_set_largesize(buddy_size + run_size, run); rbtree_insert(run, &free_runs); return; } /* The run can't be merged so add it to the free list */ mmrun_init(0, 0, run); mmrun_set_largesize(run_size, run); rbtree_insert(run, &free_runs); }
void _execute_alarm_task(alarm_t *a) { a->handler(a->data); rbtree_remove(alarms, a); _remove_alarm_task_from_queue(a); }
/* * Change the size of an allocation and copy data from the old pointer. * * If ptr is NULL mm_reallloc is the same as mm_malloc. * If size is 0 mm_realloc is the same as mm_free. * * Returns a pointer to the newly allocated memory. */ void *mm_realloc(void *ptr, size_t size) { /* If size is 0 free the pointer */ if (size == 0) { mm_free(ptr); return NULL; } /* If ptr is NULL just allocate */ if (ptr == NULL) { return mm_malloc(size); } /* Find the run the old pointer belongs to */ char *old_run = mm_findnodetree((char *)ptr, NULL, NULL); /* Find out the size of the old pointer */ int old_size; if (mmrun_get_slotcount(old_run)) { old_size = mmrun_get_slotsize(old_run); } else { old_size = mmrun_get_largesize(old_run); } /* See if ptr can be expanded */ if (mmrun_get_slotcount(old_run) == 0) { /* Return if the run is already large enough */ if ((old_size - RUN_HEADER_SIZE) >= size) { return ptr; } /* See if there is a free run after the old run */ if (free_runs) { char *run = rbtree_lookup(old_run + old_size + 1, free_runs); /* Check if it the expanded run can contain the new size */ if (run && (mmrun_get_size(run) + old_size) >= size) { /* Remove the free run from the free list */ rbtree_remove(run, &free_runs); int run_size = mmrun_get_largesize(run); /* Merge it with the old run */ mmrun_init(0, 0, old_run); mmrun_set_largesize(old_size + run_size, old_run); /* Split off any excess */ mmrun_split(size + RUN_HEADER_SIZE, old_run); /* Return the expanded run */ return old_run + RUN_HEADER_SIZE; } } } /* If ptr can't be expanded just allocate a new run and copy */ void *new_ptr = mm_malloc(size); if (new_ptr) { /* Copy data from the old pointer to the new one */ int min_size = (size > old_size) ? old_size : size; memcpy(new_ptr, ptr, min_size); /* Free the old pointer */ mm_free(ptr); } return new_ptr; }
static inline void vres_event_free(vres_event_group_t *group, vres_event_t *event) { rbtree_remove(&group->tree, &event->desc); pthread_cond_destroy(&event->cond); free(event); }
static void delNode(addrMapNode* node) { rbtree_remove(&sAddrMap, &node->node, addrMapNodeDestructor); }
int main() { mmnode ns[10]; int i = 0; for( ; i < 10; ++i) { ns[i].key = i+1; ns[i].base.key = &ns[i].key; } rbtree_t rb = create_rbtree(_comp); for(i = 0; i < 10; ++i) rbtree_insert(rb,(rbnode*)&ns[i]); { mmnode *n = (mmnode*)rbtree_first(rb); while(n) { printf("%d\n",n->key); n = (mmnode*)rbnode_next((rbnode*)n); } } rbtree_check_vaild(rb); mmnode *succ = (mmnode*)rbtree_remove(rb,(void*)&ns[3].key); printf("%d\n",succ->key); rbtree_check_vaild(rb); { mmnode *n = (mmnode*)rbtree_first(rb); while(n) { printf("%d\n",n->key); n = (mmnode*)rbnode_next((rbnode*)n); } } { mmnode *n = (mmnode*)rbtree_last(rb); while(n) { printf("%d\n",n->key); n = (mmnode*)rbnode_pre((rbnode*)n); } } /* map_t m = MAP_CREATE(int,int,_comp,NULL); MAP_INSERT(int,int,m,1,1); MAP_INSERT(int,int,m,2,2); MAP_INSERT(int,int,m,3,3); MAP_INSERT(int,int,m,4,4); MAP_INSERT(int,int,m,5,5); MAP_INSERT(int,int,m,6,6); MAP_INSERT(int,int,m,7,7); MAP_INSERT(int,int,m,8,8); MAP_INSERT(int,int,m,9,9); MAP_INSERT(int,int,m,10,10); printf("------test iter------\n"); map_iter it = map_begin(m); map_iter end = map_end(m); for( ; !IT_EQ(it,end); IT_NEXT(it)) printf("%d\n",IT_GET_VAL(int,it)); printf("------test remove 4------\n"); MAP_REMOVE(int,m,4); it = map_begin(m); end = map_end(m); for( ; !IT_EQ(it,end); IT_NEXT(it)) printf("%d\n",IT_GET_VAL(int,it)); */ return 0; }
static inline void vres_file_delete_dir(const char *path) { pthread_rwlock_wrlock(&vres_file_dlock); rbtree_remove(&vres_file_dtree, (void *)path); pthread_rwlock_unlock(&vres_file_dlock); }