void map_node_destroy(map_node *n, int free_data) { if (n) { map_node_destroy(n->left, free_data); map_node_destroy(n->right, free_data); if (free_data) { free(n->x->a); if (n->x->b != (void*)1) // for sets free(n->x->b); } free(n->x); free(n); } }
/* Note: This functions doesn't care about memory deallocation for key/values*/ int map_delete_node(map_node * node) { if (!node) return 0; map_remove_node(node); map_node_destroy(node); return 1; }
int map_delete_tree(map_node * node) { if (!node) return 0; if (node->left) map_delete_tree(node->left); if (node->right) map_delete_tree(node->right); map_node_destroy(node); return 1; }
static void map_lru_scan(void) { int max_loops = atomic_read(&map_pages_nr); while (atomic_read(&map_pages_nr) > max_map_pages && --max_loops >= 0) { struct ploop_map * map; struct map_node * candidate = NULL; spin_lock_irq(&map_lru_lock); if (!list_empty(&map_lru)) { candidate = list_first_entry(&map_lru, struct map_node, lru); atomic_inc(&candidate->refcnt); } spin_unlock_irq(&map_lru_lock); if (!candidate) break; map = candidate->parent; spin_lock_irq(&map->plo->lock); spin_lock(&map_lru_lock); if (waitqueue_active(&map->destroy_waitq)) { atomic_dec(&candidate->refcnt); wake_up(&map->destroy_waitq); spin_unlock(&map_lru_lock); spin_unlock_irq(&map->plo->lock); return; } list_del_init(&candidate->lru); if (atomic_dec_and_test(&candidate->refcnt)) { /* This instance is within its limits, just * readd node back to tail of lru. */ if (map->pages <= map->plo->tune.min_map_pages && time_after(map->last_activity + map->plo->tune.max_map_inactivity, jiffies) && !test_bit(PLOOP_MAP_DEAD, &map->flags)) { list_add_tail(&candidate->lru, &map_lru); } else { map_node_destroy(candidate); } } spin_unlock(&map_lru_lock); spin_unlock_irq(&map->plo->lock); if (!(max_loops & 16)) cond_resched(); }
void map_destroy(map *m, int free_data) { map_node_destroy(m->x, free_data); free(m); }
void map_clear(map *m, int free_data) { map_node_destroy(m->x, free_data); m->x = NULL; }