示例#1
0
static size_t ion_debug_heap_total(struct ion_client *client,
				   enum ion_heap_ids id)
{
	size_t size = 0;
	struct rb_node *n;

	mutex_lock(&client->lock);
	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
		struct ion_handle *handle = rb_entry(n,
						     struct ion_handle,
						     node);
		if (handle->buffer->heap->id == id)
			size += handle->buffer->size;
	}
	mutex_unlock(&client->lock);
	return size;
}
示例#2
0
/* ep_free在epollfd被close时调用,
 * 释放一些资源而已, 比较简单 */
static void ep_free(struct eventpoll *ep)
{
	struct rb_node *rbp;
	struct epitem *epi;

	/* We need to release all tasks waiting for these file */
	if (waitqueue_active(&ep->poll_wait))
		ep_poll_safewake(&ep->poll_wait);

	/*
	 * We need to lock this because we could be hit by
	 * eventpoll_release_file() while we're freeing the "struct eventpoll".
	 * We do not need to hold "ep->mtx" here because the epoll file
	 * is on the way to be removed and no one has references to it
	 * anymore. The only hit might come from eventpoll_release_file() but
	 * holding "epmutex" is sufficent here.
	 */
	mutex_lock(&epmutex);

	/*
	 * Walks through the whole tree by unregistering poll callbacks.
	 */
	for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
		epi = rb_entry(rbp, struct epitem, rbn);

		ep_unregister_pollwait(ep, epi);
	}

	/*
	 * Walks through the whole tree by freeing each "struct epitem". At this
	 * point we are sure no poll callbacks will be lingering around, and also by
	 * holding "epmutex" we can be sure that no file cleanup code will hit
	 * us during this operation. So we can avoid the lock on "ep->lock".
	 */
	/* 之所以在关闭epollfd之前不需要调用epoll_ctl移除已经添加的fd,
	 * 是因为这里已经做了... */
	while ((rbp = rb_first(&ep->rbr)) != NULL) {
		epi = rb_entry(rbp, struct epitem, rbn);
		ep_remove(ep, epi);
	}

	mutex_unlock(&epmutex);
	mutex_destroy(&ep->mtx);
	free_uid(ep->user);
	kfree(ep);
}
static void debug_print_tree(struct ext4_sb_info *sbi)
{
	struct rb_node *node;
	struct ext4_system_zone *entry;
	int first = 1;

	printk(KERN_INFO "System zones: ");
	node = rb_first(&sbi->system_blks);
	while (node) {
		entry = rb_entry(node, struct ext4_system_zone, node);
		printk("%s%llu-%llu", first ? "" : ", ",
		       entry->start_blk, entry->start_blk + entry->count - 1);
		first = 0;
		node = rb_next(node);
	}
	printk("\n");
}
示例#4
0
static void hists__resort_entries(struct hists *self)
{
	unsigned long position = 1;
	struct rb_root tmp = RB_ROOT;
	struct rb_node *next = rb_first(&self->entries);

	while (next != NULL) {
		struct hist_entry *n = rb_entry(next, struct hist_entry, rb_node);

		next = rb_next(&n->rb_node);
		rb_erase(&n->rb_node, &self->entries);
		n->position = position++;
		perf_session__insert_hist_entry_by_name(&tmp, n);
	}

	self->entries = tmp;
}
示例#5
0
文件: log.c 项目: 7799/linux
/**
 * remove_buds - remove used buds.
 * @c: UBIFS file-system description object
 *
 * This function removes use buds from the buds tree. It does not remove the
 * buds which are pointed to by journal heads.
 */
static void remove_buds(struct ubifs_info *c)
{
	struct rb_node *p;

	ubifs_assert(list_empty(&c->old_buds));
	c->cmt_bud_bytes = 0;
	spin_lock(&c->buds_lock);
	p = rb_first(&c->buds);
	while (p) {
		struct rb_node *p1 = p;
		struct ubifs_bud *bud;
		struct ubifs_wbuf *wbuf;

		p = rb_next(p);
		bud = rb_entry(p1, struct ubifs_bud, rb);
		wbuf = &c->jheads[bud->jhead].wbuf;

		if (wbuf->lnum == bud->lnum) {
			/*
			 * Do not remove buds which are pointed to by journal
			 * heads (non-closed buds).
			 */
			c->cmt_bud_bytes += wbuf->offs - bud->start;
			dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
				bud->lnum, bud->start, dbg_jhead(bud->jhead),
				wbuf->offs - bud->start, c->cmt_bud_bytes);
			bud->start = wbuf->offs;
		} else {
			c->cmt_bud_bytes += c->leb_size - bud->start;
			dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld",
				bud->lnum, bud->start, dbg_jhead(bud->jhead),
				c->leb_size - bud->start, c->cmt_bud_bytes);
			rb_erase(p1, &c->buds);
			/*
			 * If the commit does not finish, the recovery will need
			 * to replay the journal, in which case the old buds
			 * must be unchanged. Do not release them until post
			 * commit i.e. do not allow them to be garbage
			 * collected.
			 */
			list_move(&bud->list, &c->old_buds);
		}
	}
	spin_unlock(&c->buds_lock);
}
/*
 * search through the tree for an extent_map with a given offset.  If
 * it can't be found, try to find some neighboring extents
 */
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
				     struct rb_node **prev_ret,
				     struct rb_node **next_ret)
{
	struct rb_node *n = root->rb_node;
	struct rb_node *prev = NULL;
	struct rb_node *orig_prev = NULL;
	struct extent_map *entry;
	struct extent_map *prev_entry = NULL;

	while (n) {
		entry = rb_entry(n, struct extent_map, rb_node);
		prev = n;
		prev_entry = entry;

		WARN_ON(!entry->in_tree);

		if (offset < entry->start)
			n = n->rb_left;
		else if (offset >= extent_map_end(entry))
			n = n->rb_right;
		else
			return n;
	}

	if (prev_ret) {
		orig_prev = prev;
		while (prev && offset >= extent_map_end(prev_entry)) {
			prev = rb_next(prev);
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
		}
		*prev_ret = prev;
		prev = orig_prev;
	}

	if (next_ret) {
		prev_entry = rb_entry(prev, struct extent_map, rb_node);
		while (prev && offset < prev_entry->start) {
			prev = rb_prev(prev);
			prev_entry = rb_entry(prev, struct extent_map, rb_node);
		}
		*next_ret = prev;
	}
	return NULL;
}
示例#7
0
void rb_delete (struct rb_tree *t, const void *key)
{
	struct rb_node *z;

	assert (t != NULL);
	assert (t->root != NULL);
	assert (key != NULL);

	z = rb_search (t, key);
	
	if (z != &rb_null) {
		struct rb_node *x, *y, *parent;
		
		if (z->left == &rb_null || z->right == &rb_null) 
			y = z;
		else
			y = rb_next (z);

		if (y->left != &rb_null)
			x = y->left;
		else
			x = y->right;

		parent = y->parent;
		if (x != &rb_null)
			x->parent = parent;
		
		if (y->parent == &rb_null)
			t->root = x;
		else {
			if (y == y->parent->left)
				y->parent->left = x;
			else
				y->parent->right = x;
		}

		if (y != z)
			z->data = y->data;

		if (y->color == RB_BLACK)
			rb_delete_fixup (&t->root, x, parent);

		free (y);
	}
}
示例#8
0
/*
 * searches the tree for the given offset.
 *
 * fuzzy - If this is set, then we are trying to make an allocation, and we just
 * want a section that has at least bytes size and comes at or after the given
 * offset.
 */
static struct btrfs_free_space *
tree_search_offset(struct btrfs_block_group_cache *block_group,
                   u64 offset, int bitmap_only, int fuzzy)
{
    struct rb_node *n = block_group->free_space_offset.rb_node;
    struct btrfs_free_space *entry, *prev = NULL;

    /* find entry that is closest to the 'offset' */
    while (1) {
        if (!n) {
            entry = NULL;
            break;
        }

        entry = rb_entry(n, struct btrfs_free_space, offset_index);
        prev = entry;

        if (offset < entry->offset)
            n = n->rb_left;
        else if (offset > entry->offset)
            n = n->rb_right;
        else
            break;
    }

    if (bitmap_only) {
        if (!entry)
            return NULL;
        if (entry->bitmap)
            return entry;

        /*
         * bitmap entry and extent entry may share same offset,
         * in that case, bitmap entry comes after extent entry.
         */
        n = rb_next(n);
        if (!n)
            return NULL;
        entry = rb_entry(n, struct btrfs_free_space, offset_index);
        if (entry->offset != offset)
            return NULL;

        WARN_ON(!entry->bitmap);
        return entry;
    } else if (entry) {
示例#9
0
文件: session.c 项目: alphadx/NAT64
int session_for_each(l4_protocol l4_proto, int (*func)(struct session_entry *, void *), void *arg)
{
	struct session_table *table;
	struct rb_node *node;
	int error;

	error = get_session_table(l4_proto, &table);
	if (error)
		return error;

	for (node = rb_first(&table->tree4); node; node = rb_next(node)) {
		error = func(rb_entry(node, struct session_entry, tree4_hook), arg);
		if (error)
			return error;
	}

	return 0;
}
示例#10
0
static void
__sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
		  u64 min_hit)
{
	struct rb_node *n;
	struct callchain_node *child;

	n = rb_first(&node->rb_root_in);
	while (n) {
		child = rb_entry(n, struct callchain_node, rb_node_in);
		n = rb_next(n);

		__sort_chain_flat(rb_root, child, min_hit);
	}

	if (node->hit && node->hit >= min_hit)
		rb_insert_callchain(rb_root, node, CHAIN_FLAT);
}
示例#11
0
static void ext4_es_print_tree(struct inode *inode)
{
	struct ext4_es_tree *tree;
	struct rb_node *node;

	printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino);
	tree = &EXT4_I(inode)->i_es_tree;
	node = rb_first(&tree->root);
	while (node) {
		struct extent_status *es;
		es = rb_entry(node, struct extent_status, rb_node);
		printk(KERN_DEBUG " [%u/%u) %llu %x",
		       es->es_lblk, es->es_len,
		       ext4_es_pblock(es), ext4_es_status(es));
		node = rb_next(node);
	}
	printk(KERN_DEBUG "\n");
}
示例#12
0
文件: pat_rbtree.c 项目: 08opt/linux
static struct memtype *memtype_rb_exact_match(struct rb_root *root,
				u64 start, u64 end)
{
	struct memtype *match;

	match = memtype_rb_lowest_match(root, start, end);
	while (match != NULL && match->start < end) {
		struct rb_node *node;

		if (match->start == start && match->end == end)
			return match;

		node = rb_next(&match->rb);
		if (node)
			match = container_of(node, struct memtype, rb);
		else
			match = NULL;
	}
示例#13
0
static void *nommu_vma_list_start(struct seq_file *m, loff_t *_pos)
{
	struct rb_node *_rb;
	loff_t pos = *_pos;
	void *next = NULL;

	down_read(&nommu_vma_sem);

	for (_rb = rb_first(&nommu_vma_tree); _rb; _rb = rb_next(_rb)) {
		if (pos == 0) {
			next = _rb;
			break;
		}
		pos--;
	}

	return next;
}
示例#14
0
static inline void
deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
{
	const int data_dir = rq_data_dir(drq->request);

	if (dd->next_drq[data_dir] == drq) {
		struct rb_node *rbnext = rb_next(&drq->rb_node);

		dd->next_drq[data_dir] = NULL;
		if (rbnext)
			dd->next_drq[data_dir] = rb_entry_drq(rbnext);
	}

	if (ON_RB(&drq->rb_node)) {
		rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
		RB_CLEAR(&drq->rb_node);
	}
}
示例#15
0
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
{
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = fs_info->tree_root;
	struct btrfs_root *free_space_root;
	struct btrfs_block_group_cache *block_group;
	struct rb_node *node;
	int ret;

	trans = btrfs_start_transaction(tree_root, 0);
	if (IS_ERR(trans))
		return PTR_ERR(trans);

	set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
	free_space_root = btrfs_create_tree(trans, fs_info,
					    BTRFS_FREE_SPACE_TREE_OBJECTID);
	if (IS_ERR(free_space_root)) {
		ret = PTR_ERR(free_space_root);
		goto abort;
	}
	fs_info->free_space_root = free_space_root;

	node = rb_first(&fs_info->block_group_cache_tree);
	while (node) {
		block_group = rb_entry(node, struct btrfs_block_group_cache,
				       cache_node);
		ret = populate_free_space_tree(trans, block_group);
		if (ret)
			goto abort;
		node = rb_next(node);
	}

	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
	btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);

	return btrfs_commit_transaction(trans);

abort:
	clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
	btrfs_abort_transaction(trans, ret);
	btrfs_end_transaction(trans);
	return ret;
}
示例#16
0
int PullCache(INT h, int hash, int *width, int *height, int *format, unsigned char ** data)
{
	LPCACHE_NODE pNode;
	LPCACHE_HANDLE handle = (LPCACHE_HANDLE)h;
	int ret = 0;
	if (handle == GNull) {
		return PARAM_INVALID;
	}

	// search in rb-tree
	pNode = rbt_search(&handle->mRBRoot, hash);

	if (pNode != GNull) {
		//remove out.
		dl_remove_node(&(pNode->mDLNode), &(handle->mDLRoot));

		//add node
		dl_insert_node(&(pNode->mDLNode), GNull, &(handle->mDLRoot));

		cache_data_parse(&(pNode->mData), width, height, format, data);

	} else {
		//not found.
#if defined( _DEBUG )
		LPRB_NODE node;
		LPDLL_NODE link;
		LPCACHE_NODE data;
		LOGI("not found %ld\n", hash);
		for (node = rb_first(&(handle->mRBRoot)); node != GNull; node = rb_next(node)) {
			container_of(data, node, CACHE_NODE, mRBNode);
			LOGI("%ld\n", data->mKey);
		}
		LOGI("double link list:\n");
		for (link = dll_first(&(handle->mDLLRoot)); link != GNull; link = dll_next(link)) {
			container_of(data, link, CACHE_NODE, mDLLNode);
			LOGI("%ld\n", data->mKey);
		}

#endif
		return -1;
	}

	return ret;
}
示例#17
0
// ******************************************
// O(N) traversal
int Traverse(unsigned long *random_seed, param_t *params)
{
    rbnode_t *new_node, *node;
    long key = -1;
    long index = 1;
    int errors = 0;

#ifdef DEBUG
    long values[1000];
    index = 0;
    //printf("TRAVERSAL ******************************************\n");
#endif
    read_lock(My_Tree->lock);
    new_node = rb_first_n(My_Tree);

    while (new_node != NULL)
    {
        node = new_node;
        key = node->key;

        if (key != index)
        {
            if (index & 0x0001) index++;
            errors++;
        }
        if (key != index)
        {
            printf(" ***************** INVALID TRAVERSAL ******************\n");
            printf(" ***************** INVALID TRAVERSAL ******************\n");
            printf("Expected %ld found %ld\n", index, key);
            printf(" ***************** INVALID TRAVERSAL ******************\n");
            printf(" ***************** INVALID TRAVERSAL ******************\n");
            read_unlock(My_Tree->lock);
            exit(-1);
            return 0;
        }
        index++;
        new_node = rb_next(node);
    }

    read_unlock(My_Tree->lock);

    return errors;
}
示例#18
0
/* Disable active TFO if FIN is the only packet in the ofo queue
 * and no data is received.
 * Also check if we can reset tfo_active_disable_times if data is
 * received successfully on a marked active TFO sockets opened on
 * a non-loopback interface
 */
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct rb_node *p;
	struct sk_buff *skb;
	struct dst_entry *dst;

	if (!tp->syn_fastopen)
		return;

	if (!tp->data_segs_in) {
		p = rb_first(&tp->out_of_order_queue);
		if (p && !rb_next(p)) {
			skb = rb_entry(p, struct sk_buff, rbnode);
			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
				tcp_fastopen_active_disable(sk);
				return;
			}
		}
示例#19
0
int event__synthesize_modules(event__handler_t process,
			      struct perf_session *session,
			      struct machine *machine)
{
	struct rb_node *nd;
	struct map_groups *kmaps = &machine->kmaps;
	u16 misc;

	/*
	 * kernel uses 0 for user space maps, see kernel/perf_event.c
	 * __perf_event_mmap
	 */
	if (machine__is_host(machine))
		misc = PERF_RECORD_MISC_KERNEL;
	else
		misc = PERF_RECORD_MISC_GUEST_KERNEL;

	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
	     nd; nd = rb_next(nd)) {
		event_t ev;
		size_t size;
		struct map *pos = rb_entry(nd, struct map, rb_node);

		if (pos->dso->kernel)
			continue;

		size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
		memset(&ev, 0, sizeof(ev));
		ev.mmap.header.misc = misc;
		ev.mmap.header.type = PERF_RECORD_MMAP;
		ev.mmap.header.size = (sizeof(ev.mmap) -
				        (sizeof(ev.mmap.filename) - size));
		ev.mmap.start = pos->start;
		ev.mmap.len   = pos->end - pos->start;
		ev.mmap.pid   = machine->pid;

		memcpy(ev.mmap.filename, pos->dso->long_name,
		       pos->dso->long_name_len + 1);
		process(&ev, session);
	}

	return 0;
}
示例#20
0
文件: balloc.c 项目: kzlin129/tt-gpl
static void __rsv_window_dump(struct rb_root *root, int verbose,
			      const char *fn)
{
	struct rb_node *n;
	struct ext3_reserve_window_node *rsv, *prev;
	int bad;

restart:
	n = rb_first(root);
	bad = 0;
	prev = NULL;

	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
	while (n) {
		rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
		if (verbose)
			printk("reservation window 0x%p "
			       "start:  %d, end:  %d\n",
			       rsv, rsv->rsv_start, rsv->rsv_end);
		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
			printk("Bad reservation %p (start >= end)\n",
			       rsv);
			bad = 1;
		}
		if (prev && prev->rsv_end >= rsv->rsv_start) {
			printk("Bad reservation %p (prev->end >= start)\n",
			       rsv);
			bad = 1;
		}
		if (bad) {
			if (!verbose) {
				printk("Restarting reservation walk in verbose mode\n");
				verbose = 1;
				goto restart;
			}
		}
		n = rb_next(n);
		prev = rsv;
	}
	printk("Window map complete.\n");
	if (bad)
		BUG();
}
示例#21
0
/*
 * __remove_hrtimer - internal function to remove a timer
 *
 * Caller must hold the base lock.
 *
 * High resolution timer mode reprograms the clock event device when the
 * timer is the one which expires next. The caller can disable this by setting
 * reprogram to zero. This is useful, when the context does a reprogramming
 * anyway (e.g. timer interrupt)
 */
static void __remove_hrtimer(struct hrtimer *timer,
			     struct hrtimer_clock_base *base,
			     unsigned long newstate, int reprogram)
{
	if (timer->state & HRTIMER_STATE_ENQUEUED) {
		/*
		 * Remove the timer from the rbtree and replace the
		 * first entry pointer if necessary.
		 */
		if (base->first == &timer->node) {
			base->first = rb_next(&timer->node);
			/* Reprogram the clock event device. if enabled */
			if (reprogram && hrtimer_hres_active())
				hrtimer_force_reprogram(base->cpu_base);
		}
		rb_erase(&timer->node, &base->active);
	}
	timer->state = newstate;
}
示例#22
0
文件: extent_map.c 项目: 020gzh/linux
static int tree_insert(struct rb_root *root, struct extent_map *em)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct extent_map *entry = NULL;
	struct rb_node *orig_parent = NULL;
	u64 end = range_end(em->start, em->len);

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct extent_map, rb_node);

		if (em->start < entry->start)
			p = &(*p)->rb_left;
		else if (em->start >= extent_map_end(entry))
			p = &(*p)->rb_right;
		else
			return -EEXIST;
	}

	orig_parent = parent;
	while (parent && em->start >= extent_map_end(entry)) {
		parent = rb_next(parent);
		entry = rb_entry(parent, struct extent_map, rb_node);
	}
	if (parent)
		if (end > entry->start && em->start < extent_map_end(entry))
			return -EEXIST;

	parent = orig_parent;
	entry = rb_entry(parent, struct extent_map, rb_node);
	while (parent && em->start < entry->start) {
		parent = rb_prev(parent);
		entry = rb_entry(parent, struct extent_map, rb_node);
	}
	if (parent)
		if (end > entry->start && em->start < extent_map_end(entry))
			return -EEXIST;

	rb_link_node(&em->rb_node, orig_parent, p);
	rb_insert_color(&em->rb_node, root);
	return 0;
}
示例#23
0
static void __rsv_window_dump(struct rb_root *root, int verbose,
			      const char *fn)
{
	struct rb_node *n;
	struct ext2_reserve_window_node *rsv, *prev;
	int bad;

restart:
	n = rb_first(root);
	bad = 0;
	prev = NULL;

;
	while (n) {
		rsv = rb_entry(n, struct ext2_reserve_window_node, rsv_node);
		if (verbose)
//			printk("reservation window 0x%p "
//				"start: %lu, end: %lu\n",
;
		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
//			printk("Bad reservation %p (start >= end)\n",
;
			bad = 1;
		}
		if (prev && prev->rsv_end >= rsv->rsv_start) {
//			printk("Bad reservation %p (prev->end >= start)\n",
;
			bad = 1;
		}
		if (bad) {
			if (!verbose) {
;
				verbose = 1;
				goto restart;
			}
		}
		n = rb_next(n);
		prev = rsv;
	}
;
	BUG_ON(bad);
}
示例#24
0
/*
 * before removing the node, find the deepest node on the rebalance path
 * that will still be there after @node gets removed
 */
struct rb_node *rb_augment_erase_begin(struct rb_node *node)
{
	struct rb_node *deepest;

	if (!node->rb_right && !node->rb_left)
		deepest = rb_parent(node);
	else if (!node->rb_right)
		deepest = node->rb_left;
	else if (!node->rb_left)
		deepest = node->rb_right;
	else {
		deepest = rb_next(node);
		if (deepest->rb_right)
			deepest = deepest->rb_right;
		else if (rb_parent(deepest) != node)
			deepest = rb_parent(deepest);
	}

	return deepest;
}
示例#25
0
unsigned int inet_frag_rbtree_purge(struct rb_root *root)
{
	struct rb_node *p = rb_first(root);
	unsigned int sum = 0;

	while (p) {
		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);

		p = rb_next(p);
		rb_erase(&skb->rbnode, root);
		while (skb) {
			struct sk_buff *next = FRAG_CB(skb)->next_frag;

			sum += skb->truesize;
			kfree_skb(skb);
			skb = next;
		}
	}
	return sum;
}
示例#26
0
unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self)
{
	struct rb_node *nd;
	int row = 0;

	if (self->top == NULL)
                self->top = rb_first(self->entries);

	nd = self->top;

	while (nd != NULL) {
		ui_browser__gotorc(self, row, 0);
		self->write(self, nd, row);
		if (++row == self->height)
			break;
		nd = rb_next(nd);
	}

	return row;
}
示例#27
0
/*
 * move an entry to dispatch queue
 */
static void
deadline_move_request(struct deadline_data *dd, struct request *rq)
{
    const int data_dir = rq_data_dir(rq);
    struct rb_node *rbnext = rb_next(&rq->rb_node);

    dd->next_rq[READ] = NULL;
    dd->next_rq[WRITE] = NULL;

    if (rbnext)
        dd->next_rq[data_dir] = rb_entry_rq(rbnext);

    dd->last_sector = rq->sector + rq->nr_sectors;

    /*
     * take it off the sort and fifo list, move
     * to dispatch queue
     */
    deadline_move_to_dispatch(dd, rq);
}
示例#28
0
int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id)
{
    struct rb_node *current_node = NULL;
    struct rb_node *next_node = rb_first(eventLoop->timeEventRbtreeRoot);
    struct aeTimeEvent *te;

    while (next_node)
    {
        current_node = next_node;
        next_node = rb_next(next_node);
        te = container_of(current_node, struct aeTimeEvent, rb_node);
        if (te->id == id)
        {
            rb_erase(current_node, eventLoop->timeEventRbtreeRoot);
            freeTimeEvent(eventLoop, te);
            return AE_OK;
        }
    }
    return AE_ERR; /* NO event with the specified ID found */
}
示例#29
0
文件: debugfs.c 项目: Lyude/linux
static void dump_backoffs(struct seq_file *s, struct ceph_osd *osd)
{
	struct rb_node *n;

	mutex_lock(&osd->lock);
	for (n = rb_first(&osd->o_backoffs_by_id); n; n = rb_next(n)) {
		struct ceph_osd_backoff *backoff =
		    rb_entry(n, struct ceph_osd_backoff, id_node);

		seq_printf(s, "osd%d\t", osd->o_osd);
		dump_spgid(s, &backoff->spgid);
		seq_printf(s, "\t%llu\t", backoff->id);
		dump_hoid(s, backoff->begin);
		seq_putc(s, '\t');
		dump_hoid(s, backoff->end);
		seq_putc(s, '\n');
	}

	mutex_unlock(&osd->lock);
}
示例#30
0
unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
{
	struct rb_node *nd;
	int row = 0;

	if (browser->top == NULL)
                browser->top = rb_first(browser->entries);

	nd = browser->top;

	while (nd != NULL) {
		ui_browser__gotorc(browser, row, 0);
		browser->write(browser, nd, row);
		if (++row == browser->rows)
			break;
		nd = rb_next(nd);
	}

	return row;
}