Exemple #1
0
/*
 * Slow path lock function:
 */
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
		  struct hrtimer_sleeper *timeout,
		  int detect_deadlock)
{
	struct rt_mutex_waiter waiter;
	int ret = 0;

	debug_rt_mutex_init_waiter(&waiter);
	RB_CLEAR_NODE(&waiter.pi_tree_entry);
	RB_CLEAR_NODE(&waiter.tree_entry);

	raw_spin_lock(&lock->wait_lock);

	/* Try to acquire the lock again: */
	if (try_to_take_rt_mutex(lock, current, NULL)) {
		raw_spin_unlock(&lock->wait_lock);
		return 0;
	}

	set_current_state(state);

	/* Setup the timer, when timeout != NULL */
	if (unlikely(timeout)) {
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
		if (!hrtimer_active(&timeout->timer))
			timeout->task = NULL;
	}

	ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);

	if (likely(!ret))
		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);

	set_current_state(TASK_RUNNING);

	if (unlikely(ret))
		remove_waiter(lock, &waiter);

	/*
	 * try_to_take_rt_mutex() sets the waiter bit
	 * unconditionally. We might have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);

	raw_spin_unlock(&lock->wait_lock);

	/* Remove pending timer: */
	if (unlikely(timeout))
		hrtimer_cancel(&timeout->timer);

	debug_rt_mutex_free_waiter(&waiter);

	return ret;
}
Exemple #2
0
static struct itree_node *itnode_init(unsigned long long uuid, unsigned long inmem)
{
	struct itree_node *itnode = NULL;

	itnode = calloc(1, sizeof(*itnode));
	if (!itnode)
		return NULL;

	RB_CLEAR_NODE(&itnode->inodes_node);
	RB_CLEAR_NODE(&itnode->sorted_node);
	itnode->uuid = uuid;
	itnode->inmem = inmem;
	return itnode;
}
Exemple #3
0
static struct eam_entry *eamt_create_entry(struct ipv6_prefix *ip6, struct ipv4_prefix *ip4)
{
	struct eam_entry *entry;

	entry = kmem_cache_alloc(entry_cache, GFP_ATOMIC);
	if (!entry)
		return NULL;

	entry->pref4 = *ip4;
	entry->pref6 = *ip6;
	RB_CLEAR_NODE(&entry->tree4_hook);
	RB_CLEAR_NODE(&entry->tree6_hook);

	return entry;
}
Exemple #4
0
/**
 * bus1_queue_entry_new() - allocate new queue entry
 * @seq:	initial sequence number
 * @n_files:	number of files to carry
 *
 * This allocates a new queue-entry with pre-allocated space to carry the given
 * amount of file descriptors. The queue entry is initially unlinked and no
 * slice is associated to it. The caller is free to modify the files array and
 * the slice as they wish.
 *
 * Return: Pointer to slice, ERR_PTR on failure.
 */
struct bus1_queue_entry *bus1_queue_entry_new(size_t n_files)
{
	struct bus1_queue_entry *entry;

	entry = kzalloc(sizeof(*entry) + n_files * sizeof(struct file *),
			GFP_KERNEL);
	if (!entry)
		return ERR_PTR(-ENOMEM);

	RB_CLEAR_NODE(&entry->transaction.rb);
	RB_CLEAR_NODE(&entry->rb);
	entry->n_files = n_files;

	return entry;
}
Exemple #5
0
static void namei_add_inode(ext2_ino_t ino)
{
	struct target_inode *t, *n;
	struct rb_node **p = &namei_targets.rb_node;
	struct rb_node *parent = NULL;

	n = malloc(sizeof(*n));
	if (!n) {
		fprintf(stderr, "Unable to allocate space for inode\n");
		exit(1);
	}

	RB_CLEAR_NODE(&n->rb_node);
	n->ino = ino;
	n->nlinks = 1;

	while (*p) {
		parent = *p;
		t = rb_entry(parent, struct target_inode, rb_node);

		if (ino < t->ino)
			p = &(*p)->rb_left;
		else if (ino > t->ino)
			p = &(*p)->rb_right;
		else
			return;
	}

	rb_link_node(&n->rb_node, parent, p);
	rb_insert_color(&n->rb_node, &namei_targets);
}
static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
				    unsigned flags)
{
	struct i915_mmu_notifier *mn;
	struct i915_mmu_object *mo;

	if (flags & I915_USERPTR_UNSYNCHRONIZED)
		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;

	if (WARN_ON(obj->userptr.mm == NULL))
		return -EINVAL;

	mn = i915_mmu_notifier_find(obj->userptr.mm);
	if (IS_ERR(mn))
		return PTR_ERR(mn);

	mo = kzalloc(sizeof(*mo), GFP_KERNEL);
	if (!mo)
		return -ENOMEM;

	mo->mn = mn;
	mo->obj = obj;
	mo->it.start = obj->userptr.ptr;
	mo->it.last = obj->userptr.ptr + obj->base.size - 1;
	RB_CLEAR_NODE(&mo->it.rb);

	obj->userptr.mmu_object = mo;
	return 0;
}
Exemple #7
0
void rb_init_node(LPRB_NODE rb)
{
	rb->rb_parent_color = 0;
	rb->rb_right = NULL;
	rb->rb_left = NULL;
	RB_CLEAR_NODE(rb);
}
Exemple #8
0
/**
 * bus1_queue_unlink() - unlink entry from sorted queue
 * @queue:	queue to unlink from
 * @entry:	entry to unlink, or NULL
 *
 * This unlinks @entry from the message queue @queue. If the entry was already
 * unlinked (or NULL is passed), this is a no-op.
 *
 * The caller must hold the write-side peer-lock of the parent peer.
 *
 * Return: True if the queue became readable with this call. This can happen if
 *         you unlink a staging entry, and thus a waiting entry becomes ready.
 */
bool bus1_queue_unlink(struct bus1_queue *queue,
		       struct bus1_queue_entry *entry)
{
	struct rb_node *node;

	if (!entry || RB_EMPTY_NODE(&entry->rb))
		return false;

	node = rcu_dereference_protected(queue->front,
					 bus1_queue_is_held(queue));
	if (node == &entry->rb) {
		node = rb_next(node);
		if (node && bus1_queue_entry(node)->seq & 1)
			node = NULL;
		rcu_assign_pointer(queue->front, node);
	} else {
		node = NULL;
	}

	rb_erase(&entry->rb, &queue->messages);
	RB_CLEAR_NODE(&entry->rb);

	/*
	 * If this entry was non-ready in front, but the next entry exists and
	 * is ready, then the queue becomes readable if you pop the front.
	 */
	return (entry->seq & 1) && node && !(bus1_queue_entry(node)->seq & 1);
}
Exemple #9
0
static void rb_init_node(struct rb_node *rb)
{
	rb->rb_parent_color = 0;
	rb->rb_right = NULL;
	rb->rb_left = NULL;
	RB_CLEAR_NODE(rb);
}
static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
{
	if (!RB_EMPTY_NODE(&entry->rbnode)) {
		rb_erase(&entry->rbnode, root);
		RB_CLEAR_NODE(&entry->rbnode);
	}
}
Exemple #11
0
int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
{
	map_groups__init(&machine->kmaps);
	RB_CLEAR_NODE(&machine->rb_node);
	INIT_LIST_HEAD(&machine->user_dsos);
	INIT_LIST_HEAD(&machine->kernel_dsos);

	machine->threads = RB_ROOT;
	INIT_LIST_HEAD(&machine->dead_threads);
	machine->last_match = NULL;

	machine->kmaps.machine = machine;
	machine->pid = pid;

	machine->symbol_filter = NULL;
	machine->id_hdr_size = 0;

	machine->root_dir = strdup(root_dir);
	if (machine->root_dir == NULL)
		return -ENOMEM;

	if (pid != HOST_KERNEL_ID) {
		struct thread *thread = machine__findnew_thread(machine, 0,
								pid);
		char comm[64];

		if (thread == NULL)
			return -ENOMEM;

		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
		thread__set_comm(thread, comm, 0);
	}

	return 0;
}
Exemple #12
0
/**
 * bus1_queue_relink() - change sequence number of an entry
 * @queue:	queue to operate on
 * @entry:	entry to relink
 * @seq:	sequence number to set
 *
 * This changes the sequence number of @entry to @seq. The caller must
 * guarantee that the entry was already linked with an odd-numbered sequence
 * number. This will unlink the entry, change the sequence number and link it
 * again.
 *
 * The caller must hold the write-side peer-lock of the parent peer.
 *
 * Return: True if the queue became readable with this call.
 */
bool bus1_queue_relink(struct bus1_queue *queue,
		       struct bus1_queue_entry *entry,
		       u64 seq)
{
	struct rb_node *front;

	if (WARN_ON(seq == 0 ||
		    RB_EMPTY_NODE(&entry->rb) ||
		    !(entry->seq & 1)))
		return false;

	bus1_queue_assert_held(queue);

	/* remember front, cannot point to @entry */
	front = rcu_access_pointer(queue->front);
	WARN_ON(front == &entry->rb);

	/* drop from rb-tree and insert again */
	rb_erase(&entry->rb, &queue->messages);
	RB_CLEAR_NODE(&entry->rb);
	bus1_queue_link(queue, entry, seq);

	/* if this uncovered a front, then the queue became readable */
	return !front && rcu_access_pointer(queue->front);
}
Exemple #13
0
static void dequeue_stealable_pjob_rtws(struct rtws_rq *rtws_rq, struct sched_rtws_entity *rtws_se)
{
    struct task_struct *p = task_of_rtws_se(rtws_se);
    struct rq *rq = rq_of_rtws_rq(rtws_rq);

    if (RB_EMPTY_NODE(&rtws_se->stealable_pjob_node))
        return;

    printk(KERN_INFO "****stealable pjob dequeue, task %d ****\n", p->pid);

    if (rtws_rq->leftmost_stealable_pjob == &rtws_se->stealable_pjob_node) {
        struct rb_node *next_node;

        next_node = rb_next(&rtws_se->stealable_pjob_node);
        rtws_rq->leftmost_stealable_pjob = next_node;

        if (next_node)
            cpudl_set(&rq->rd->rtwss_cpudl, rq->cpu, rtws_se->job.deadline, 0, 1);
        else
            cpudl_set(&rq->rd->rtwss_cpudl, rq->cpu, 0, 0, 0);

        smp_wmb();
    }

    rb_erase(&rtws_se->stealable_pjob_node, &rtws_rq->stealable_pjobs);
    RB_CLEAR_NODE(&rtws_se->stealable_pjob_node);
}
Exemple #14
0
static void del_object(struct i915_mmu_object *mo)
{
	if (RB_EMPTY_NODE(&mo->it.rb))
		return;

	interval_tree_remove(&mo->it, &mo->mn->objects);
	RB_CLEAR_NODE(&mo->it.rb);
}
Exemple #15
0
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;
	unsigned long *stackend;
	int node = tsk_fork_get_node(orig);
	int err;

	prepare_to_copy(orig);

	tsk = alloc_task_struct_node(node);
	if (!tsk)
		return NULL;

	ti = alloc_thread_info_node(tsk, node);
	if (!ti) {
		free_task_struct(tsk);
		return NULL;
	}

	err = arch_dup_task_struct(tsk, orig);
	if (err)
		goto out;

	tsk->stack = ti;

	setup_thread_stack(tsk, orig);
	clear_user_return_notifier(tsk);
	clear_tsk_need_resched(tsk);
	stackend = end_of_stack(tsk);
	*stackend = STACK_END_MAGIC;	/* for overflow detection */

#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

	/*
	 * One for us, one for whoever does the "release_task()" (usually
	 * parent)
	 */
	atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
	tsk->btrace_seq = 0;
#endif
	tsk->splice_pipe = NULL;

	account_kernel_stack(ti, 1);

#ifdef CONFIG_ANDROID_LMK_ADJ_RBTREE
	RB_CLEAR_NODE(&tsk->adj_node);
#endif
	return tsk;

out:
	free_thread_info(ti);
	free_task_struct(tsk);
	return NULL;
}
Exemple #16
0
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
	struct signal_struct *sig;

	if (clone_flags & CLONE_THREAD)
		return 0;

	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	sig->nr_threads = 1;
	atomic_set(&sig->live, 1);
	atomic_set(&sig->sigcnt, 1);

	/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
	sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
	tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);

	init_waitqueue_head(&sig->wait_chldexit);
	if (clone_flags & CLONE_NEWPID)
		sig->flags |= SIGNAL_UNKILLABLE;
	sig->curr_target = tsk;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);

	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	sig->real_timer.function = it_real_fn;

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	posix_cpu_timers_init_group(sig);

	tty_audit_fork(sig);
	sched_autogroup_fork(sig);

#ifdef CONFIG_CGROUPS
	init_rwsem(&sig->group_rwsem);
#endif

	sig->oom_adj = current->signal->oom_adj;
	sig->oom_score_adj = current->signal->oom_score_adj;
	sig->oom_score_adj_min = current->signal->oom_score_adj_min;

#ifdef CONFIG_ANDROID_LMK_ADJ_RBTREE
	RB_CLEAR_NODE(&sig->adj_node);
#endif
	sig->has_child_subreaper = current->signal->has_child_subreaper ||
				   current->signal->is_child_subreaper;

	mutex_init(&sig->cred_guard_mutex);

	return 0;
}
Exemple #17
0
/* Append skb to the last "run". */
static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
					struct sk_buff *skb)
{
	RB_CLEAR_NODE(&skb->rbnode);
	FRAG_CB(skb)->next_frag = NULL;

	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
	FRAG_CB(q->fragments_tail)->next_frag = skb;
	q->fragments_tail = skb;
}
static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
{
	struct zswap_entry *entry;
	entry = kmem_cache_alloc(zswap_entry_cache, gfp);
	if (!entry)
		return NULL;
	entry->refcount = 1;
	RB_CLEAR_NODE(&entry->rbnode);
	return entry;
}
Exemple #19
0
struct session_entry *session_create(struct ipv4_pair *ipv4, struct ipv6_pair *ipv6,
		l4_protocol l4_proto)
{
	struct session_entry *result = kmem_cache_alloc(entry_cache, GFP_ATOMIC);
	if (!result)
		return NULL;

	result->ipv4 = *ipv4;
	result->ipv6 = *ipv6;
	result->dying_time = 0;
	result->bib = NULL;
	INIT_LIST_HEAD(&result->bib_list_hook);
	INIT_LIST_HEAD(&result->expire_list_hook);
	result->l4_proto = l4_proto;
	result->state = 0;
	RB_CLEAR_NODE(&result->tree6_hook);
	RB_CLEAR_NODE(&result->tree4_hook);

	return result;
}
Exemple #20
0
/**
 * kdbus_node_init() - initialize a kdbus_node
 * @node:	Pointer to the node to initialize
 * @type:	The type the node will have (KDBUS_NODE_*)
 *
 * The caller is responsible of allocating @node and initializating it to zero.
 * Once this call returns, you must use the node_ref() and node_unref()
 * functions to manage this node.
 */
void kdbus_node_init(struct kdbus_node *node, unsigned int type)
{
	atomic_set(&node->refcnt, 1);
	mutex_init(&node->lock);
	node->id = 0;
	node->type = type;
	RB_CLEAR_NODE(&node->rb);
	node->children = RB_ROOT;
	init_waitqueue_head(&node->waitq);
	atomic_set(&node->active, KDBUS_NODE_NEW);
}
Exemple #21
0
static void
rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
	if (RB_EMPTY_NODE(&waiter->tree_entry))
		return;

	if (lock->waiters_leftmost == &waiter->tree_entry)
		lock->waiters_leftmost = rb_next(&waiter->tree_entry);

	rb_erase(&waiter->tree_entry, &lock->waiters);
	RB_CLEAR_NODE(&waiter->tree_entry);
}
Exemple #22
0
static void
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
		return;

	if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
		task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);

	rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
	RB_CLEAR_NODE(&waiter->pi_tree_entry);
}
Exemple #23
0
/**
 *      kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
 *      @kn: kernfs_node of interest
 *
 *      Try to unlink @kn from its sibling rbtree which starts from
 *      kn->parent->dir.children.  Returns %true if @kn was actually
 *      removed, %false if @kn wasn't on the rbtree.
 *
 *      Locking:
 *      mutex_lock(kernfs_mutex)
 */
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
{
	if (RB_EMPTY_NODE(&kn->rb))
		return false;

	if (kernfs_type(kn) == KERNFS_DIR)
		kn->parent->dir.subdirs--;

	rb_erase(&kn->rb, &kn->parent->dir.children);
	RB_CLEAR_NODE(&kn->rb);
	return true;
}
Exemple #24
0
void map__init(struct map *self, enum map_type type,
               u64 start, u64 end, u64 pgoff, struct dso *dso)
{
    self->type     = type;
    self->start    = start;
    self->end      = end;
    self->pgoff    = pgoff;
    self->dso      = dso;
    self->map_ip   = map__map_ip;
    self->unmap_ip = map__unmap_ip;
    RB_CLEAR_NODE(&self->rb_node);
}
Exemple #25
0
/**
 * alloc_extent_map - allocate new extent map structure
 *
 * Allocate a new extent_map structure.  The new structure is
 * returned with a reference count of one and needs to be
 * freed using free_extent_map()
 */
struct extent_map *alloc_extent_map(void)
{
	struct extent_map *em;
	em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
	if (!em)
		return NULL;
	RB_CLEAR_NODE(&em->rb_node);
	em->flags = 0;
	em->compress_type = BTRFS_COMPRESS_NONE;
	em->generation = 0;
	atomic_set(&em->refs, 1);
	INIT_LIST_HEAD(&em->list);
	return em;
}
/**
 * timerqueue_del - Removes a timer from the timerqueue.
 *
 * @head: head of timerqueue
 * @node: timer node to be removed
 *
 * Removes the timer node from the timerqueue.
 */
void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
{
	WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));

	/* update next pointer */
	if (head->next == node) {
		struct rb_node *rbn = rb_next(&node->node);

		head->next = rbn ?
			rb_entry(rbn, struct timerqueue_node, node) : NULL;
	}
	rb_erase(&node->node, &head->head);
	RB_CLEAR_NODE(&node->node);
}
static void fiops_init_icq(struct io_cq *icq)
{
	struct fiops_data *fiopsd = icq->q->elevator->elevator_data;
	struct fiops_ioc *ioc = icq_to_cic(icq);

	RB_CLEAR_NODE(&ioc->rb_node);
	INIT_LIST_HEAD(&ioc->fifo);
	ioc->sort_list = RB_ROOT;

	ioc->fiopsd = fiopsd;

	ioc->pid = current->pid;
	fiops_mark_ioc_prio_changed(ioc);
}
Exemple #28
0
static bool test_add_and_remove(void)
{
	struct rb_root root = RB_ROOT;
	struct node_thing nodes[4];
	bool expecteds[4];
	int i;
	struct node_thing *exists;
	bool success = true;

	for (i = 0; i < ARRAY_SIZE(nodes); i++) {
		nodes[i].i = i;
		RB_CLEAR_NODE(&nodes[i].hook);
		expecteds[i] = false;
	}

	exists = add(&root, &nodes[1]);
	success &= ASSERT_PTR(NULL, exists, "exists 1");
	expecteds[1] = true;
	success &= check_nodes(&root, expecteds);
	if (!success)
		return false;

	exists = add(&root, &nodes[3]);
	success &= ASSERT_PTR(NULL, exists, "exists 2");
	expecteds[3] = true;
	success &= check_nodes(&root, expecteds);
	if (!success)
		return false;

	exists = add(&root, &nodes[0]);
	success &= ASSERT_PTR(NULL, exists, "exists 3");
	expecteds[0] = true;
	success &= check_nodes(&root, expecteds);
	if (!success)
		return false;

	exists = add(&root, &nodes[2]);
	success &= ASSERT_PTR(NULL, exists, "exists 4");
	expecteds[2] = true;
	success &= check_nodes(&root, expecteds);

	rb_erase(&nodes[2].hook, &root);
	expecteds[2] = false;
	success &= check_nodes(&root, expecteds);
	if (!success)
		return false;

	return success;
}
Exemple #29
0
void map__init(struct map *self, enum map_type type,
	       u64 start, u64 end, u64 pgoff, struct dso *dso)
{
	self->type     = type;
	self->start    = start;
	self->end      = end;
	self->pgoff    = pgoff;
	self->dso      = dso;
	self->map_ip   = map__map_ip;
	self->unmap_ip = map__unmap_ip;
	RB_CLEAR_NODE(&self->rb_node);
	self->groups   = NULL;
	self->referenced = false;
	self->erange_warned = false;
}
Exemple #30
0
void map__init(struct map *map, enum map_type type,
	       u64 start, u64 end, u64 pgoff, struct dso *dso)
{
	map->type     = type;
	map->start    = start;
	map->end      = end;
	map->pgoff    = pgoff;
	map->reloc    = 0;
	map->dso      = dso;
	map->map_ip   = map__map_ip;
	map->unmap_ip = map__unmap_ip;
	RB_CLEAR_NODE(&map->rb_node);
	map->groups   = NULL;
	map->referenced = false;
	map->erange_warned = false;
}