static void maps__delete(struct rb_root *maps) { struct rb_node *next = rb_first(maps); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, maps); map__delete(pos); } }
static void *nommu_region_list_start(struct seq_file *m, loff_t *_pos) { struct rb_node *p; loff_t pos = *_pos; down_read(&nommu_region_sem); for (p = rb_first(&nommu_region_tree); p; p = rb_next(p)) if (pos-- == 0) return p; return NULL; }
size_t dso__fprintf(struct dso *self, FILE *fp) { size_t ret = fprintf(fp, "dso: %s\n", self->name); struct rb_node *nd; for (nd = rb_first(&self->syms); nd; nd = rb_next(nd)) { struct symbol *pos = rb_entry(nd, struct symbol, rb_node); ret += symbol__fprintf(pos, fp); } return ret; }
void symbols__delete(struct rb_root *symbols) { struct symbol *pos; struct rb_node *next = rb_first(symbols); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, symbols); symbol__delete(pos); } }
static void dso__delete_symbols(struct dso *self) { struct symbol *pos; struct rb_node *next = rb_first(&self->syms); while (next) { pos = rb_entry(next, struct symbol, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, &self->syms); symbol__delete(pos, self->sym_priv_size); } }
void debug_print_digest_tree(struct rb_root *d_tree) { struct rb_node *node; struct d_tree *d_node; printf("Digests in digest tree:\n"); for (node = rb_first(d_tree); node; node = rb_next(node)) { d_node = rb_entry(node, struct d_tree, t_node); debug_print_digest(stdout, d_node->digest); printf("\n"); } }
/** * Free the memory allocated by ion_debug_mem_map_create * @param mem_map The mem map to free. */ static void ion_debug_mem_map_destroy(struct rb_root *mem_map) { if (mem_map) { struct rb_node *n; while ((n = rb_first(mem_map)) != 0) { struct mem_map_data *data = rb_entry(n, struct mem_map_data, node); rb_erase(&data->node, mem_map); kfree(data); } } }
struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int flags) { struct rb_node *n; struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); /* if the client doesn't support this heap type */ if (!((1 << heap->type) & client->heap_mask)) continue; /* if the caller didn't specify this heap ID */ if (!((1 << heap->id) & flags)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR_OR_NULL(buffer)) break; } mutex_unlock(&dev->lock); if (IS_ERR_OR_NULL(buffer)) return ERR_PTR(PTR_ERR(buffer)); handle = ion_handle_create(client, buffer); if (IS_ERR_OR_NULL(handle)) goto end; /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); mutex_lock(&client->lock); ion_handle_add(client, handle); mutex_unlock(&client->lock); return handle; end: ion_buffer_put(buffer); return handle; }
void rblist__delete(struct rblist *rblist) { if (rblist != NULL) { struct rb_node *pos, *next = rb_first(&rblist->entries); while (next) { pos = next; next = rb_next(pos); rblist__remove_node(rblist, pos); } free(rblist); } }
struct str_node *strlist__entry(const struct strlist *self, unsigned int idx) { struct rb_node *nd; for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct str_node *pos = rb_entry(nd, struct str_node, rb_node); if (!idx--) return pos; } return NULL; }
/* * Find the nearest FN and update s_nearest_time */ void osmo_gsm_timers_prepare(void) { struct rb_node *node; int current_fn; current_fn = get_current_fn(); node = rb_first(&timer_root); if (node) { struct osmo_gsm_timer_list *this_timer; this_timer = container_of(node, struct osmo_gsm_timer_list, node); update_nearest(&this_timer->fn, ¤t_fn); } else {
static void kllds_remove_rbtree(void) { kllds_entry *elem; struct rb_node *next; write_lock(&kllds_rwlck); while ((next = rb_first(&kllds_rbtree))) { elem = rb_entry(next, kllds_entry, rb); rb_erase(next, &kllds_rbtree); kfree(elem->val); kmem_cache_free(tmp_kllds_cache, elem); } write_unlock(&kllds_rwlck); }
size_t machine__fprintf(struct machine *machine, FILE *fp) { size_t ret = 0; struct rb_node *nd; for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); } return ret; }
static void remove_all_attrs(struct rb_root *root) { struct rb_node *node, *next_node; struct ib_sa_attr_list *attr_list; write_lock_irq(&rwlock); for (node = rb_first(root); node; node = next_node) { next_node = rb_next(node); attr_list = rb_entry(node, struct ib_sa_attr_list, node); remove_attr(root, attr_list); } write_unlock_irq(&rwlock); }
static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { struct rb_node *n; for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer == buffer) return handle; } return NULL; }
void o2fsck_icount_free(o2fsck_icount *icount) { struct rb_node *node; icount_node *in; ocfs2_bitmap_free(icount->ic_single_bm); while((node = rb_first(&icount->ic_multiple_tree)) != NULL) { in = rb_entry(node, icount_node, in_node); rb_erase(node, &icount->ic_multiple_tree); free(in); } free(icount); }
rb_node__t* rb_next(const rb_node__t *node){ if (node == NULL){ return NULL; } rb_node__t* parent = node->parent; if(node->right != NULL) return rb_first(node->right); while(parent != NULL && parent->right == node){ node = parent; parent = node->parent; } return parent; }
size_t perf_session__fprintf(struct perf_session *self, FILE *fp) { size_t ret = 0; struct rb_node *nd; for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) { struct thread *pos = rb_entry(nd, struct thread, rb_node); ret += thread__fprintf(pos, fp); } return ret; }
/** * Get first node in the map. */ static struct tree_node* map_first(const struct map *tmap) { struct rb_node *node; if (!tmap) { return NULL; } node = rb_first(&tmap->root); if (!node) { return NULL; } return container_of(node, struct tree_node, node); }
int perf_event__synthesize_modules(perf_event__handler_t process, struct perf_session *session, struct machine *machine) { struct rb_node *nd; struct map_groups *kmaps = &machine->kmaps; union perf_event *event = zalloc((sizeof(event->mmap) + session->id_hdr_size)); if (event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); return -1; } event->header.type = PERF_RECORD_MMAP; /* * kernel uses 0 for user space maps, see kernel/perf_event.c * __perf_event_mmap */ if (machine__is_host(machine)) event->header.misc = PERF_RECORD_MISC_KERNEL; else event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) { size_t size; struct map *pos = rb_entry(nd, struct map, rb_node); if (pos->dso->kernel) continue; size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); memset(event->mmap.filename + size, 0, session->id_hdr_size); event->mmap.header.size += session->id_hdr_size; event->mmap.start = pos->start; event->mmap.len = pos->end - pos->start; event->mmap.pid = machine->pid; memcpy(event->mmap.filename, pos->dso->long_name, pos->dso->long_name_len + 1); process(event, &synth_sample, session); } free(event); return 0; }
static int ion_debug_find_buffer_owner(const struct ion_client *client, const struct ion_buffer *buf) { struct rb_node *n; for (n = rb_first(&client->handles); n; n = rb_next(n)) { const struct ion_handle *handle = rb_entry(n, const struct ion_handle, node); if (handle->buffer == buf) return 1; } return 0; }
static void cgwb_bdi_exit(struct backing_dev_info *bdi) { struct rb_node *rbn; spin_lock_irq(&cgwb_lock); while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { struct bdi_writeback_congested *congested = rb_entry(rbn, struct bdi_writeback_congested, rb_node); rb_erase(rbn, &bdi->cgwb_congested_tree); congested->__bdi = NULL; /* mark @congested unlinked */ } spin_unlock_irq(&cgwb_lock); }
static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) { struct nfs4_state_owner *sp; struct rb_node *pos; struct rpc_cred *cred; cred = nfs4_get_machine_cred(clp); if (cred != NULL) goto out; pos = rb_first(&clp->cl_state_owners); if (pos != NULL) { sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); cred = get_rpccred(sp->so_cred); }
/* * The below is leftmost cache rbtree addon */ static struct fiops_ioc *fiops_rb_first(struct fiops_rb_root *root) { /* Service tree is empty */ if (!root->count) return NULL; if (!root->left) root->left = rb_first(&root->rb); if (root->left) return rb_entry(root->left, struct fiops_ioc, rb_node); return NULL; }
unsigned long task_vsize(struct mm_struct *mm) { struct vm_area_struct *vma; struct rb_node *p; unsigned long vsize = 0; down_read(&mm->mmap_sem); for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { vma = rb_entry(p, struct vm_area_struct, vm_rb); vsize += vma->vm_end - vma->vm_start; } up_read(&mm->mmap_sem); return vsize; }
static void kinterval_dump(struct seq_file *m) { struct kinterval *range; struct rb_node *node; node = rb_first(&kinterval_tree); while (node) { range = rb_entry(node, struct kinterval, rb); seq_printf(m, " start=%llu end=%llu type=%lu (%s)\n", range->start, range->end, range->type, range_attr_name(range->type)); node = rb_next(&range->rb); } }
static void dso_cache__free(struct rb_root *root) { struct rb_node *next = rb_first(root); while (next) { struct dso_cache *cache; cache = rb_entry(next, struct dso_cache, rb_node); next = rb_next(&cache->rb_node); rb_erase(&cache->rb_node, root); free(cache); } }
static void remove_old_attrs(struct rb_root *root, unsigned long update_id) { struct rb_node *node, *next_node; struct ib_sa_attr_list *attr_list; write_lock_irq(&rwlock); for (node = rb_first(root); node; node = next_node) { next_node = rb_next(node); attr_list = rb_entry(node, struct ib_sa_attr_list, node); if (attr_list->update_id != update_id) remove_attr(root, attr_list); } write_unlock_irq(&rwlock); }
static void dump_linger_requests(struct seq_file *s, struct ceph_osd *osd) { struct rb_node *n; mutex_lock(&osd->lock); for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) { struct ceph_osd_linger_request *lreq = rb_entry(n, struct ceph_osd_linger_request, node); dump_linger_request(s, lreq); } mutex_unlock(&osd->lock); }
static void nova_destroy_range_node_tree(struct super_block *sb, struct rb_root *tree) { struct nova_range_node *curr; struct rb_node *temp; temp = rb_first(tree); while (temp) { curr = container_of(temp, struct nova_range_node, node); temp = rb_next(temp); rb_erase(&curr->node, tree); nova_free_range_node(curr); } }