/*
 * find/create a frag in the tree
 */
static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
						    u32 f)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct ceph_inode_frag *frag;
	int c;

	p = &ci->i_fragtree.rb_node;
	while (*p) {
		parent = *p;
		frag = rb_entry(parent, struct ceph_inode_frag, node);
		c = ceph_frag_compare(f, frag->frag);
		if (c < 0)
			p = &(*p)->rb_left;
		else if (c > 0)
			p = &(*p)->rb_right;
		else
			return frag;
	}

	frag = kmalloc(sizeof(*frag), GFP_NOFS);
	if (!frag) {
		pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
		       "frag %x\n", &ci->vfs_inode,
		       ceph_vinop(&ci->vfs_inode), f);
		return ERR_PTR(-ENOMEM);
	}
	frag->frag = f;
	frag->split_by = 0;
	frag->mds = -1;
	frag->ndist = 0;

	rb_link_node(&frag->node, parent, p);
	rb_insert_color(&frag->node, &ci->i_fragtree);

	dout("get_or_create_frag added %llx.%llx frag %x\n",
	     ceph_vinop(&ci->vfs_inode), f);
	return frag;
}
Esempio n. 2
0
static void
rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
		    enum chain_mode mode)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct callchain_node *rnode;
	u64 chain_cumul = callchain_cumul_hits(chain);

	while (*p) {
		u64 rnode_cumul;

		parent = *p;
		rnode = rb_entry(parent, struct callchain_node, rb_node);
		rnode_cumul = callchain_cumul_hits(rnode);

		switch (mode) {
		case CHAIN_FLAT:
		case CHAIN_FOLDED:
			if (rnode->hit < chain->hit)
				p = &(*p)->rb_left;
			else
				p = &(*p)->rb_right;
			break;
		case CHAIN_GRAPH_ABS: /* Falldown */
		case CHAIN_GRAPH_REL:
			if (rnode_cumul < chain_cumul)
				p = &(*p)->rb_left;
			else
				p = &(*p)->rb_right;
			break;
		case CHAIN_NONE:
		default:
			break;
		}
	}

	rb_link_node(&chain->rb_node, parent, p);
	rb_insert_color(&chain->rb_node, root);
}
Esempio n. 3
0
//---------------------------------------------------------------------
SAWON* insert_data(struct rb_root *root, int sid, struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node * parent = NULL;
	SAWON *s;

	while(*p)
	{
		parent = *p;
		s = rb_entry(parent, SAWON, tree);

		if(sid < s->sid)
			p = &(*p)->rb_left;
		else if(sid > s->sid)
			p = &(*p)->rb_right;
		else
			return s;
	}
	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}
Esempio n. 4
0
/*
 * Put a new tmp_dnode_info into the temporaty RB-tree, keeping the list in
 * order of increasing version.
 */
static void jffs2_add_tn_to_tree(struct jffs2_tmp_dnode_info *tn, struct rb_root *list)
{
	struct rb_node **p = &list->rb_node;
	struct rb_node * parent = NULL;
	struct jffs2_tmp_dnode_info *this;

	while (*p) {
		parent = *p;
		this = rb_entry(parent, struct jffs2_tmp_dnode_info, rb);

		/* There may actually be a collision here, but it doesn't
		   actually matter. As long as the two nodes with the same
		   version are together, it's all fine. */
		if (tn->version > this->version)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&tn->rb, parent, p);
	rb_insert_color(&tn->rb, list);
}
Esempio n. 5
0
/* internal helper to link @node into the rb-tree */
static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
				   struct drm_vma_offset_node *node)
{
	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
	struct rb_node *parent = NULL;
	struct drm_vma_offset_node *iter_node;

	while (likely(*iter)) {
		parent = *iter;
		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);

		if (node->vm_node.start < iter_node->vm_node.start)
			iter = &(*iter)->rb_left;
		else if (node->vm_node.start > iter_node->vm_node.start)
			iter = &(*iter)->rb_right;
		else
			BUG();
	}

	rb_link_node(&node->vm_rb, parent, iter);
	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
}
Esempio n. 6
0
/*
 * In the case that a entry with the same offset is found, a pointer to
 * the existing entry is stored in dupentry and the function returns -EEXIST
 */
static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
			struct zswap_entry **dupentry)
{
	struct rb_node **link = &root->rb_node, *parent = NULL;
	struct zswap_entry *myentry;

	while (*link) {
		parent = *link;
		myentry = rb_entry(parent, struct zswap_entry, rbnode);
		if (myentry->offset > entry->offset)
			link = &(*link)->rb_left;
		else if (myentry->offset < entry->offset)
			link = &(*link)->rb_right;
		else {
			*dupentry = myentry;
			return -EEXIST;
		}
	}
	rb_link_node(&entry->rbnode, parent, link);
	rb_insert_color(&entry->rbnode, root);
	return 0;
}
Esempio n. 7
0
static void aeInsertTimeEvent(struct rb_root *root, struct aeTimeEvent *data)
{
    struct rb_node **new_node = &(root->rb_node), *parent = NULL;

    /* Figure out where to put new node */
    while (*new_node) {
        struct aeTimeEvent *this_node = container_of(*new_node, struct aeTimeEvent, rb_node);

        parent = *new_node;

        if (data->when_sec < this_node->when_sec
            || (data->when_sec == this_node->when_sec
                && data->when_ms < this_node->when_ms))
            new_node = &((*new_node)->rb_left);
        else
            new_node = &((*new_node)->rb_right);
    }

    /* Add new node and rebalance tree. */
    rb_link_node(&data->rb_node, parent, new_node);
    rb_insert_color(&data->rb_node, root);
}
/**
*internal helper to link node into the rb-tree
*/
static inline void _mali_vma_offset_add_rb(struct mali_allocation_manager *mgr,
		struct mali_vma_node *node)
{
	struct rb_node **iter = &mgr->allocation_mgr_rb.rb_node;
	struct rb_node *parent = NULL;
	struct mali_vma_node *iter_node;

	while (likely(*iter)) {
		parent = *iter;
		iter_node = rb_entry(*iter, struct mali_vma_node, vm_rb);

		if (node->vm_node.start < iter_node->vm_node.start)
			iter = &(*iter)->rb_left;
		else if (node->vm_node.start > iter_node->vm_node.start)
			iter = &(*iter)->rb_right;
		else
			MALI_DEBUG_ASSERT(0);
	}

	rb_link_node(&node->vm_rb, parent, iter);
	rb_insert_color(&node->vm_rb, &mgr->allocation_mgr_rb);
}
Esempio n. 9
0
static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
			      int bytes_req, int bytes_alloc, int cpu)
{
	struct rb_node **node = &root_alloc_stat.rb_node;
	struct rb_node *parent = NULL;
	struct alloc_stat *data = NULL;

	while (*node) {
		parent = *node;
		data = rb_entry(*node, struct alloc_stat, node);

		if (ptr > data->ptr)
			node = &(*node)->rb_right;
		else if (ptr < data->ptr)
			node = &(*node)->rb_left;
		else
			break;
	}

	if (data && data->ptr == ptr) {
		data->hit++;
		data->bytes_req += bytes_req;
		data->bytes_alloc += bytes_alloc;
	} else {
		data = malloc(sizeof(*data));
		if (!data)
			die("malloc");
		data->ptr = ptr;
		data->pingpong = 0;
		data->hit = 1;
		data->bytes_req = bytes_req;
		data->bytes_alloc = bytes_alloc;

		rb_link_node(&data->node, parent, node);
		rb_insert_color(&data->node, &root_alloc_stat);
	}
	data->call_site = call_site;
	data->alloc_cpu = cpu;
}
Esempio n. 10
0
/* BINARY TREE */
static inline void enq_timer(struct rt_tasklet_struct *timed_timer)
{
	struct rt_tasklet_struct *timerh, *tmrnxt, *timer;
	rb_node_t **rbtn, *rbtpn = NULL;
	timer = timerh = &timers_list[TIMED_TIMER_CPUID];
	rbtn = &timerh->rbr.rb_node;

	while (*rbtn) {
		rbtpn = *rbtn;
		tmrnxt = rb_entry(rbtpn, struct rt_tasklet_struct, rbn);
		if (timer->firing_time > tmrnxt->firing_time) {
			rbtn = &(rbtpn)->rb_right;
		} else {
			rbtn = &(rbtpn)->rb_left;
			timer = tmrnxt;
		}
	}
	rb_link_node(&timed_timer->rbn, rbtpn, rbtn);
	rb_insert_color(&timed_timer->rbn, &timerh->rbr);
	timer->prev = (timed_timer->prev = timer->prev)->next = timed_timer;
	timed_timer->next = timer;
}
int add_rbtree_timer(struct rte_timer *tim, uint32_t expire)
{
	uint32_t thread_idx = rte_get_thread_id();
	struct rbtree_timer_base *base=global_rbt_base + thread_idx;
	struct rb_node **link = &base->active.rb_node;
	struct rb_node *parent=NULL;
	struct rte_timer *entry=NULL;
	int leftmost=1;

	if(!(tim->flags & RTE_TIMER_INITED)){
		return -1;
	}

	if(tim->flags & RTE_TIMER_ADDED){
		return -1;
	}

	tim->expire = rte_get_cur_time() + expire;
	// tim->expire = expire;
	while(*link){
		parent = *link;
		entry = rb_entry(parent, struct rte_timer, node);
		if(tim->expire<entry->expire){
			link = &(*link)->rb_left;
		}else{
			link = &(*link)->rb_right;
			leftmost=0;
		}
	}
	if(leftmost){
		base->first = &tim->node;
	}
	rb_link_node(&tim->node, parent, link);
	rb_insert_color(&tim->node, &base->active);
	tim->flags |= RTE_TIMER_ADDED;

	return 0;
}
Esempio n. 12
0
struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
{
	struct rb_node **p = &self->threads.rb_node;
	struct rb_node *parent = NULL;
	struct thread *th;

	/*
	 * Font-end cache - PID lookups come in blocks,
	 * so most of the time we dont have to look up
	 * the full rbtree:
	 */
	if (self->last_match && self->last_match->pid == pid)
		return self->last_match;

	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);

		if (th->pid == pid) {
			self->last_match = th;
			return th;
		}

		if (pid < th->pid)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	th = thread__new(pid);
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
		rb_insert_color(&th->rb_node, &self->threads);
		self->last_match = th;
	}

	return th;
}
static void
id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
		struct cifs_sid_id **psidid, char *typestr)
{
	int rc;
	char *strptr;
	struct rb_node *node = root->rb_node;
	struct rb_node *parent = NULL;
	struct rb_node **linkto = &(root->rb_node);
	struct cifs_sid_id *lsidid;

	while (node) {
		lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
		parent = node;
		rc = compare_sids(sidptr, &((lsidid)->sid));
		if (rc > 0) {
			linkto = &(node->rb_left);
			node = node->rb_left;
		} else if (rc < 0) {
			linkto = &(node->rb_right);
			node = node->rb_right;
		}
	}

	memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
	(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
	(*psidid)->refcount = 0;

	sprintf((*psidid)->sidstr, "%s", typestr);
	strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
	sid_to_str(&(*psidid)->sid, strptr);

	clear_bit(SID_ID_PENDING, &(*psidid)->state);
	clear_bit(SID_ID_MAPPED, &(*psidid)->state);

	rb_link_node(&(*psidid)->rbnode, parent, linkto);
	rb_insert_color(&(*psidid)->rbnode, root);
}
Esempio n. 14
0
/**
 * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list.
 * @c: UBIFS file-system description object
 * @bud: the bud to add
 */
void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
{
	struct rb_node **p, *parent = NULL;
	struct ubifs_bud *b;
	struct ubifs_jhead *jhead;

	spin_lock(&c->buds_lock);
	p = &c->buds.rb_node;
	while (*p) {
		parent = *p;
		b = rb_entry(parent, struct ubifs_bud, rb);
		ubifs_assert(bud->lnum != b->lnum);
		if (bud->lnum < b->lnum)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&bud->rb, parent, p);
	rb_insert_color(&bud->rb, &c->buds);
	if (c->jheads) {
		jhead = &c->jheads[bud->jhead];
		list_add_tail(&bud->list, &jhead->buds_list);
	} else
		ubifs_assert(c->replaying && c->ro_mount);

	/*
	 * Note, although this is a new bud, we anyway account this space now,
	 * before any data has been written to it, because this is about to
	 * guarantee fixed mount time, and this bud will anyway be read and
	 * scanned.
	 */
	c->bud_bytes += c->leb_size - bud->start;

	dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
		bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
	spin_unlock(&c->buds_lock);
}
Esempio n. 15
0
/**
 * bfq_insert - generic tree insertion.
 * @root: tree root.
 * @entity: entity to insert.
 *
 * This is used for the idle and the active tree, since they are both
 * ordered by finish time.
 */
static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
{
	struct bfq_entity *entry;
	struct rb_node **node = &root->rb_node;
	struct rb_node *parent = NULL;

	BUG_ON(entity->tree != NULL);

	while (*node != NULL) {
		parent = *node;
		entry = rb_entry(parent, struct bfq_entity, rb_node);

		if (bfq_gt(entry->finish, entity->finish))
			node = &parent->rb_left;
		else
			node = &parent->rb_right;
	}

	rb_link_node(&entity->rb_node, parent, node);
	rb_insert_color(&entity->rb_node, root);

	entity->tree = root;
}
Esempio n. 16
0
int my_insert(struct rb_root *root, struct mynode *data)
{
      struct rb_node **new1 = &(root->rb_node), *parent = NULL;

      /* Figure out where to put new node */
      while (*new1) {
          struct mynode *this1 = container_of(*new1, struct mynode, node);
          int result = strcmp(data->string, this1->string);

        parent = *new1;
          if (result < 0)
              new1 = &((*new1)->rb_left);
          else 
              new1 = &((*new1)->rb_right);
    
      }

      /* Add new node and rebalance tree. */
      rb_link_node(&data->node, parent, new1);
      rb_insert_color(&data->node, root);

    return 1;
}
Esempio n. 17
0
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
	struct rb_node **p = &q->delayed.rb_node, *parent = NULL;

	while (*p) {
		struct fq_flow *aux;

		parent = *p;
		aux = container_of(parent, struct fq_flow, rate_node);
		if (f->time_next_packet >= aux->time_next_packet)
			p = &parent->rb_right;
		else
			p = &parent->rb_left;
	}
	rb_link_node(&f->rate_node, parent, p);
	rb_insert_color(&f->rate_node, &q->delayed);
	q->throttled_flows++;
	q->stat_throttled++;

	f->next = &throttled;
	if (q->time_next_delayed_flow > f->time_next_packet)
		q->time_next_delayed_flow = f->time_next_packet;
}
Esempio n. 18
0
static void io_cache_insert(struct io_cache *ic,
			    struct io_cache_block *insert_icb)
{
	struct rb_node **p = &ic->ic_lookup.rb_node;
	struct rb_node *parent = NULL;
	struct io_cache_block *icb = NULL;

	while (*p) {
		parent = *p;
		icb = rb_entry(parent, struct io_cache_block, icb_node);
		if (insert_icb->icb_blkno < icb->icb_blkno) {
			p = &(*p)->rb_left;
			icb = NULL;
		} else if (insert_icb->icb_blkno > icb->icb_blkno) {
			p = &(*p)->rb_right;
			icb = NULL;
		} else
			assert(0);  /* We erased it, remember? */
	}

	rb_link_node(&insert_icb->icb_node, parent, p);
	rb_insert_color(&insert_icb->icb_node, &ic->ic_lookup);
}
Esempio n. 19
0
static void ion_debug_mem_map_add(struct rb_root *mem_map,
				  struct mem_map_data *data)
{
	struct rb_node **p = &mem_map->rb_node;
	struct rb_node *parent = NULL;
	struct mem_map_data *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct mem_map_data, node);

		if (data->addr < entry->addr) {
			p = &(*p)->rb_left;
		} else if (data->addr > entry->addr) {
			p = &(*p)->rb_right;
		} else {
			pr_err("%s: mem_map_data already found.", __func__);
			BUG();
		}
	}
	rb_link_node(&data->node, parent, p);
	rb_insert_color(&data->node, mem_map);
}
void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
{
	struct rb_node **p = &head->head.rb_node;
	struct rb_node *parent = NULL;
	struct timerqueue_node  *ptr;
 
	/* Make sure we don't add nodes that are already added */
	WARN_ON_ONCE(!RB_EMPTY_NODE(&node->node));

	while (*p) {
		parent = *p;
		ptr = rb_entry(parent, struct timerqueue_node, node);
		if (node->expires.tv64 < ptr->expires.tv64)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}
	rb_link_node(&node->node, parent, p);
	rb_insert_color(&node->node, &head->head);

	if (!head->next || node->expires.tv64 < head->next->expires.tv64)
		head->next = node;
}
Esempio n. 21
0
static void kllds_insert_by_key(struct rb_root *ver_root, kllds_entry *ins_tn) {
  struct rb_node **uncle = &ver_root->rb_node;
  struct rb_node *parent = NULL;
  kllds_entry *tn;

  write_lock(&kllds_rwlck);
  while(*uncle!=NULL) {
    parent = *uncle;
    tn = rb_entry(parent, kllds_entry, rb);

    if(ins_tn->key < tn->key) {
      uncle = &parent->rb_left;
    } else if(ins_tn->key > tn->key) {
      uncle = &parent->rb_right;
    } else {
      write_unlock(&kllds_rwlck);
      return;
    }
  }
  rb_link_node(&ins_tn->rb, parent, uncle);
  rb_insert_color(&ins_tn->rb, ver_root);
  write_unlock(&kllds_rwlck);
}
int rb_insert(struct rb_root *root, struct rb_node *node,
	      rb_compare_nodes comp)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	int ret;

	while(*p) {
		parent = *p;

		ret = comp(parent, node);
		if (ret < 0)
			p = &(*p)->rb_left;
		else if (ret > 0)
			p = &(*p)->rb_right;
		else
			return -EEXIST;
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return 0;
}
/* returns NULL if the insertion worked, or it returns the node it did find
 * in the tree
 */
static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
				   struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent = NULL;
	struct btrfs_ordered_extent *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);

		if (file_offset < entry->file_offset)
			p = &(*p)->rb_left;
		else if (file_offset >= entry_end(entry))
			p = &(*p)->rb_right;
		else
			return parent;
	}

	rb_link_node(node, parent, p);
	rb_insert_color(node, root);
	return NULL;
}
struct io_device *insert_iod_with_format(struct modem_shared *msd,
		enum dev_format format, struct io_device *iod)
{
	struct rb_node **p = &msd->iodevs_tree_fmt.rb_node;
	struct rb_node *parent = NULL;

	while (*p) {
		struct io_device *iodev;

		parent = *p;
		iodev = rb_entry(parent, struct io_device, node_fmt);
		if (format < iodev->format)
			p = &(*p)->rb_left;
		else if (format > iodev->format)
			p = &(*p)->rb_right;
		else
			return iodev;
	}

	rb_link_node(&iod->node_fmt, parent, p);
	rb_insert_color(&iod->node_fmt, &msd->iodevs_tree_fmt);
	return NULL;
}
static ssize_t o2nm_node_ipv4_address_write(struct o2nm_node *node,
					    const char *page,
					    size_t count)
{
	struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
	int ret, i;
	struct rb_node **p, *parent;
	unsigned int octets[4];
	__be32 ipv4_addr = 0;

	ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
		     &octets[1], &octets[0]);
	if (ret != 4)
		return -EINVAL;

	for (i = 0; i < ARRAY_SIZE(octets); i++) {
		if (octets[i] > 255)
			return -ERANGE;
		be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
	}

	ret = 0;
	write_lock(&cluster->cl_nodes_lock);
	if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
		ret = -EEXIST;
	else {
		rb_link_node(&node->nd_ip_node, parent, p);
		rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
	}
	write_unlock(&cluster->cl_nodes_lock);
	if (ret)
		return ret;

	memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));

	return count;
}
static void
sid_rb_insert(struct rb_root *root, unsigned long cid,
		struct cifs_sid_id **psidid, char *typestr)
{
	char *strptr;
	struct rb_node *node = root->rb_node;
	struct rb_node *parent = NULL;
	struct rb_node **linkto = &(root->rb_node);
	struct cifs_sid_id *lsidid;

	while (node) {
		lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
		parent = node;
		if (cid > lsidid->id) {
			linkto = &(node->rb_left);
			node = node->rb_left;
		}
		if (cid < lsidid->id) {
			linkto = &(node->rb_right);
			node = node->rb_right;
		}
	}

	(*psidid)->id = cid;
	(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
	(*psidid)->refcount = 0;

	sprintf((*psidid)->sidstr, "%s", typestr);
	strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
	sprintf(strptr, "%ld", cid);

	clear_bit(SID_ID_PENDING, &(*psidid)->state);
	clear_bit(SID_ID_MAPPED, &(*psidid)->state);

	rb_link_node(&(*psidid)->rbnode, parent, linkto);
	rb_insert_color(&(*psidid)->rbnode, root);
}
Esempio n. 27
0
/*
 * The list of stealable -rtws pjobs is a rb-tree with pjobs ordered by deadline,
 * excluding current pjob. Pjobs with equal deadline obey to a FIFO order.
 */
static void enqueue_stealable_pjob_rtws(struct rtws_rq *rtws_rq, struct sched_rtws_entity *rtws_se)
{
    struct rq *rq = rq_of_rtws_rq(rtws_rq);
    struct task_struct *p = task_of_rtws_se(rtws_se);
    struct rb_node **link = &rtws_rq->stealable_pjobs.rb_node;
    struct rb_node *parent = NULL;
    struct sched_rtws_entity *entry;
    int edf = 1;

    BUG_ON(!RB_EMPTY_NODE(&rtws_se->stealable_pjob_node));

    printk(KERN_INFO "****stealable pjob enqueue, task %d ****\n", p->pid);

    /* TODO: Optimization in case enqueueing task is next edf */

    while (*link) {
        parent = *link;
        entry = rb_entry(parent, struct sched_rtws_entity, stealable_pjob_node);

        if (entity_preempt_rtws(rtws_se, entry))
            link = &parent->rb_left;
        else {
            link = &parent->rb_right;
            edf = 0;
        }
    }

    if (edf) {
        rtws_rq->leftmost_stealable_pjob = &rtws_se->stealable_pjob_node;
        cpudl_set(&rq->rd->rtwss_cpudl, rq->cpu, rtws_se->job.deadline, 0, 1);
        smp_wmb();
    }

    rb_link_node(&rtws_se->stealable_pjob_node, parent, link);
    rb_insert_color(&rtws_se->stealable_pjob_node, &rtws_rq->stealable_pjobs);
}
Esempio n. 28
0
static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct rb_node **cur = &bdev->addr_space_rb.rb_node;
	struct rb_node *parent = NULL;
	struct ttm_buffer_object *cur_bo;
	unsigned long offset = bo->vm_node->start;
	unsigned long cur_offset;

	while (*cur) {
		parent = *cur;
		cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
		cur_offset = cur_bo->vm_node->start;
		if (offset < cur_offset)
			cur = &parent->rb_left;
		else if (offset > cur_offset)
			cur = &parent->rb_right;
		else
			BUG();
	}

	rb_link_node(&bo->vm_rb, parent, cur);
	rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
}
Esempio n. 29
0
/**
 * __get_lock - find or create a lock instance
 * @lock: pointer to a pthread lock function
 *
 * Try to find an existing lock in the rbtree using the provided pointer. If
 * one wasn't found - create it.
 */
static struct lock_lookup *__get_lock(void *lock)
{
	struct rb_node **node, *parent;
	struct lock_lookup *l;

	ll_pthread_rwlock_rdlock(&locks_rwlock);
	node = __get_lock_node(lock, &parent);
	ll_pthread_rwlock_unlock(&locks_rwlock);
	if (*node) {
		return rb_entry(*node, struct lock_lookup, node);
	}

	/* We didn't find the lock, let's create it */
	l = alloc_lock();
	if (l == NULL)
		return NULL;

	l->orig = lock;
	/*
	 * Currently the name of the lock is the ptr value of the pthread lock,
	 * while not optimal, it makes debugging a bit easier.
	 *
	 * TODO: Get the real name of the lock using libdwarf
	 */
	sprintf(l->name, "%p", lock);
	lockdep_init_map(&l->dep_map, l->name, &l->key, 0);

	ll_pthread_rwlock_wrlock(&locks_rwlock);
	/* This might have changed since the last time we fetched it */
	node = __get_lock_node(lock, &parent);
	rb_link_node(&l->node, parent, node);
	rb_insert_color(&l->node, &locks);
	ll_pthread_rwlock_unlock(&locks_rwlock);

	return l;
}
Esempio n. 30
0
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
			   struct ion_buffer *buffer)
{
	struct rb_node **p = &dev->buffers.rb_node;
	struct rb_node *parent = NULL;
	struct ion_buffer *entry;

	while (*p) {
		parent = *p;
		entry = rb_entry(parent, struct ion_buffer, node);

		if (buffer < entry) {
			p = &(*p)->rb_left;
		} else if (buffer > entry) {
			p = &(*p)->rb_right;
		} else {
			pr_err("%s: buffer already found.", __func__);
			BUG();
		}
	}

	rb_link_node(&buffer->node, parent, p);
	rb_insert_color(&buffer->node, &dev->buffers);
}